diff options
Diffstat (limited to 'lib')
217 files changed, 12413 insertions, 6138 deletions
diff --git a/lib/Analysis/AliasAnalysisCounter.cpp b/lib/Analysis/AliasAnalysisCounter.cpp index 030bcd2..ae28b55 100644 --- a/lib/Analysis/AliasAnalysisCounter.cpp +++ b/lib/Analysis/AliasAnalysisCounter.cpp @@ -17,6 +17,7 @@ #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Assembly/Writer.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; diff --git a/lib/Analysis/AliasAnalysisEvaluator.cpp b/lib/Analysis/AliasAnalysisEvaluator.cpp index 6a2564c..6b0a956 100644 --- a/lib/Analysis/AliasAnalysisEvaluator.cpp +++ b/lib/Analysis/AliasAnalysisEvaluator.cpp @@ -26,6 +26,7 @@ #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Assembly/Writer.h" #include "llvm/Target/TargetData.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/InstIterator.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/raw_ostream.h" diff --git a/lib/Analysis/AliasSetTracker.cpp b/lib/Analysis/AliasSetTracker.cpp index 6634600..02aff50 100644 --- a/lib/Analysis/AliasSetTracker.cpp +++ b/lib/Analysis/AliasSetTracker.cpp @@ -19,6 +19,7 @@ #include "llvm/Type.h" #include "llvm/Target/TargetData.h" #include "llvm/Assembly/Writer.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/InstIterator.h" #include "llvm/Support/Format.h" @@ -549,8 +550,8 @@ void AliasSetTracker::print(raw_ostream &OS) const { OS << "\n"; } -void AliasSet::dump() const { print(errs()); } -void AliasSetTracker::dump() const { print(errs()); } +void AliasSet::dump() const { print(dbgs()); } +void AliasSetTracker::dump() const { print(dbgs()); } //===----------------------------------------------------------------------===// // ASTCallbackVH Class Implementation diff --git a/lib/Analysis/DbgInfoPrinter.cpp b/lib/Analysis/DbgInfoPrinter.cpp index ab92e3f..7d72b38 100644 --- a/lib/Analysis/DbgInfoPrinter.cpp +++ b/lib/Analysis/DbgInfoPrinter.cpp @@ -19,10 +19,10 @@ #include "llvm/Pass.h" #include "llvm/Function.h" #include "llvm/IntrinsicInst.h" +#include "llvm/Metadata.h" #include "llvm/Assembly/Writer.h" #include "llvm/Analysis/DebugInfo.h" #include "llvm/Analysis/Passes.h" -#include "llvm/Analysis/ValueTracking.h" #include "llvm/Support/CFG.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/raw_ostream.h" @@ -75,18 +75,16 @@ void PrintDbgInfo::printVariableDeclaration(const Value *V) { } void PrintDbgInfo::printStopPoint(const DbgStopPointInst *DSI) { - if (PrintDirectory) { - std::string dir; - GetConstantStringInfo(DSI->getDirectory(), dir); - Out << dir << "/"; - } + if (PrintDirectory) + if (MDString *Str = dyn_cast<MDString>(DSI->getDirectory())) + Out << Str->getString() << '/'; - std::string file; - GetConstantStringInfo(DSI->getFileName(), file); - Out << file << ":" << DSI->getLine(); + if (MDString *Str = dyn_cast<MDString>(DSI->getFileName())) + Out << Str->getString(); + Out << ':' << DSI->getLine(); if (unsigned Col = DSI->getColumn()) - Out << ":" << Col; + Out << ':' << Col; } void PrintDbgInfo::printFuncStart(const DbgFuncStartInst *FS) { diff --git a/lib/Analysis/DebugInfo.cpp b/lib/Analysis/DebugInfo.cpp index 1c9f500..de2d839 100644 --- a/lib/Analysis/DebugInfo.cpp +++ b/lib/Analysis/DebugInfo.cpp @@ -13,15 +13,16 @@ //===----------------------------------------------------------------------===// #include "llvm/Analysis/DebugInfo.h" +#include "llvm/Target/TargetMachine.h" // FIXME: LAYERING VIOLATION! #include "llvm/Constants.h" #include "llvm/DerivedTypes.h" #include "llvm/Intrinsics.h" #include "llvm/IntrinsicInst.h" #include "llvm/Instructions.h" -#include "llvm/LLVMContext.h" #include "llvm/Module.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/ADT/SmallPtrSet.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/DebugLoc.h" #include "llvm/Support/raw_ostream.h" @@ -34,7 +35,7 @@ using namespace llvm::dwarf; /// ValidDebugInfo - Return true if V represents valid debug info value. /// FIXME : Add DIDescriptor.isValid() -bool DIDescriptor::ValidDebugInfo(MDNode *N, CodeGenOpt::Level OptLevel) { +bool DIDescriptor::ValidDebugInfo(MDNode *N, unsigned OptLevel) { if (!N) return false; @@ -45,8 +46,7 @@ bool DIDescriptor::ValidDebugInfo(MDNode *N, CodeGenOpt::Level OptLevel) { if (Version != LLVMDebugVersion && Version != LLVMDebugVersion6) return false; - unsigned Tag = DI.getTag(); - switch (Tag) { + switch (DI.getTag()) { case DW_TAG_variable: assert(DIVariable(N).Verify() && "Invalid DebugInfo value"); break; @@ -83,8 +83,8 @@ DIDescriptor::getStringField(unsigned Elt) const { if (DbgNode == 0) return StringRef(); - if (Elt < DbgNode->getNumElements()) - if (MDString *MDS = dyn_cast_or_null<MDString>(DbgNode->getElement(Elt))) + if (Elt < DbgNode->getNumOperands()) + if (MDString *MDS = dyn_cast_or_null<MDString>(DbgNode->getOperand(Elt))) return MDS->getString(); return StringRef(); @@ -94,8 +94,8 @@ uint64_t DIDescriptor::getUInt64Field(unsigned Elt) const { if (DbgNode == 0) return 0; - if (Elt < DbgNode->getNumElements()) - if (ConstantInt *CI = dyn_cast<ConstantInt>(DbgNode->getElement(Elt))) + if (Elt < DbgNode->getNumOperands()) + if (ConstantInt *CI = dyn_cast<ConstantInt>(DbgNode->getOperand(Elt))) return CI->getZExtValue(); return 0; @@ -105,8 +105,8 @@ DIDescriptor DIDescriptor::getDescriptorField(unsigned Elt) const { if (DbgNode == 0) return DIDescriptor(); - if (Elt < DbgNode->getNumElements() && DbgNode->getElement(Elt)) - return DIDescriptor(dyn_cast<MDNode>(DbgNode->getElement(Elt))); + if (Elt < DbgNode->getNumOperands() && DbgNode->getOperand(Elt)) + return DIDescriptor(dyn_cast<MDNode>(DbgNode->getOperand(Elt))); return DIDescriptor(); } @@ -115,11 +115,16 @@ GlobalVariable *DIDescriptor::getGlobalVariableField(unsigned Elt) const { if (DbgNode == 0) return 0; - if (Elt < DbgNode->getNumElements()) - return dyn_cast_or_null<GlobalVariable>(DbgNode->getElement(Elt)); + if (Elt < DbgNode->getNumOperands()) + return dyn_cast_or_null<GlobalVariable>(DbgNode->getOperand(Elt)); return 0; } +unsigned DIVariable::getNumAddrElements() const { + return DbgNode->getNumOperands()-6; +} + + //===----------------------------------------------------------------------===// // Predicates //===----------------------------------------------------------------------===// @@ -127,18 +132,14 @@ GlobalVariable *DIDescriptor::getGlobalVariableField(unsigned Elt) const { /// isBasicType - Return true if the specified tag is legal for /// DIBasicType. bool DIDescriptor::isBasicType() const { - assert (!isNull() && "Invalid descriptor!"); - unsigned Tag = getTag(); - - return Tag == dwarf::DW_TAG_base_type; + assert(!isNull() && "Invalid descriptor!"); + return getTag() == dwarf::DW_TAG_base_type; } /// isDerivedType - Return true if the specified tag is legal for DIDerivedType. bool DIDescriptor::isDerivedType() const { - assert (!isNull() && "Invalid descriptor!"); - unsigned Tag = getTag(); - - switch (Tag) { + assert(!isNull() && "Invalid descriptor!"); + switch (getTag()) { case dwarf::DW_TAG_typedef: case dwarf::DW_TAG_pointer_type: case dwarf::DW_TAG_reference_type: @@ -157,10 +158,8 @@ bool DIDescriptor::isDerivedType() const { /// isCompositeType - Return true if the specified tag is legal for /// DICompositeType. bool DIDescriptor::isCompositeType() const { - assert (!isNull() && "Invalid descriptor!"); - unsigned Tag = getTag(); - - switch (Tag) { + assert(!isNull() && "Invalid descriptor!"); + switch (getTag()) { case dwarf::DW_TAG_array_type: case dwarf::DW_TAG_structure_type: case dwarf::DW_TAG_union_type: @@ -176,10 +175,8 @@ bool DIDescriptor::isCompositeType() const { /// isVariable - Return true if the specified tag is legal for DIVariable. bool DIDescriptor::isVariable() const { - assert (!isNull() && "Invalid descriptor!"); - unsigned Tag = getTag(); - - switch (Tag) { + assert(!isNull() && "Invalid descriptor!"); + switch (getTag()) { case dwarf::DW_TAG_auto_variable: case dwarf::DW_TAG_arg_variable: case dwarf::DW_TAG_return_variable: @@ -197,19 +194,15 @@ bool DIDescriptor::isType() const { /// isSubprogram - Return true if the specified tag is legal for /// DISubprogram. bool DIDescriptor::isSubprogram() const { - assert (!isNull() && "Invalid descriptor!"); - unsigned Tag = getTag(); - - return Tag == dwarf::DW_TAG_subprogram; + assert(!isNull() && "Invalid descriptor!"); + return getTag() == dwarf::DW_TAG_subprogram; } /// isGlobalVariable - Return true if the specified tag is legal for /// DIGlobalVariable. bool DIDescriptor::isGlobalVariable() const { - assert (!isNull() && "Invalid descriptor!"); - unsigned Tag = getTag(); - - return Tag == dwarf::DW_TAG_variable; + assert(!isNull() && "Invalid descriptor!"); + return getTag() == dwarf::DW_TAG_variable; } /// isGlobal - Return true if the specified tag is legal for DIGlobal. @@ -220,50 +213,47 @@ bool DIDescriptor::isGlobal() const { /// isScope - Return true if the specified tag is one of the scope /// related tag. bool DIDescriptor::isScope() const { - assert (!isNull() && "Invalid descriptor!"); - unsigned Tag = getTag(); - - switch (Tag) { - case dwarf::DW_TAG_compile_unit: - case dwarf::DW_TAG_lexical_block: - case dwarf::DW_TAG_subprogram: - return true; - default: - break; + assert(!isNull() && "Invalid descriptor!"); + switch (getTag()) { + case dwarf::DW_TAG_compile_unit: + case dwarf::DW_TAG_lexical_block: + case dwarf::DW_TAG_subprogram: + case dwarf::DW_TAG_namespace: + return true; + default: + break; } return false; } /// isCompileUnit - Return true if the specified tag is DW_TAG_compile_unit. bool DIDescriptor::isCompileUnit() const { - assert (!isNull() && "Invalid descriptor!"); - unsigned Tag = getTag(); + assert(!isNull() && "Invalid descriptor!"); + return getTag() == dwarf::DW_TAG_compile_unit; +} - return Tag == dwarf::DW_TAG_compile_unit; +/// isNameSpace - Return true if the specified tag is DW_TAG_namespace. +bool DIDescriptor::isNameSpace() const { + assert(!isNull() && "Invalid descriptor!"); + return getTag() == dwarf::DW_TAG_namespace; } /// isLexicalBlock - Return true if the specified tag is DW_TAG_lexical_block. bool DIDescriptor::isLexicalBlock() const { - assert (!isNull() && "Invalid descriptor!"); - unsigned Tag = getTag(); - - return Tag == dwarf::DW_TAG_lexical_block; + assert(!isNull() && "Invalid descriptor!"); + return getTag() == dwarf::DW_TAG_lexical_block; } /// isSubrange - Return true if the specified tag is DW_TAG_subrange_type. bool DIDescriptor::isSubrange() const { - assert (!isNull() && "Invalid descriptor!"); - unsigned Tag = getTag(); - - return Tag == dwarf::DW_TAG_subrange_type; + assert(!isNull() && "Invalid descriptor!"); + return getTag() == dwarf::DW_TAG_subrange_type; } /// isEnumerator - Return true if the specified tag is DW_TAG_enumerator. bool DIDescriptor::isEnumerator() const { - assert (!isNull() && "Invalid descriptor!"); - unsigned Tag = getTag(); - - return Tag == dwarf::DW_TAG_enumerator; + assert(!isNull() && "Invalid descriptor!"); + return getTag() == dwarf::DW_TAG_enumerator; } //===----------------------------------------------------------------------===// @@ -278,8 +268,8 @@ DIType::DIType(MDNode *N) : DIDescriptor(N) { } unsigned DIArray::getNumElements() const { - assert (DbgNode && "Invalid DIArray"); - return DbgNode->getNumElements(); + assert(DbgNode && "Invalid DIArray"); + return DbgNode->getNumOperands(); } /// replaceAllUsesWith - Replace all uses of debug info referenced by @@ -289,7 +279,7 @@ void DIDerivedType::replaceAllUsesWith(DIDescriptor &D) { if (isNull()) return; - assert (!D.isNull() && "Can not replace with null"); + assert(!D.isNull() && "Can not replace with null"); // Since we use a TrackingVH for the node, its easy for clients to manufacture // legitimate situations where they want to replaceAllUsesWith() on something @@ -299,7 +289,7 @@ void DIDerivedType::replaceAllUsesWith(DIDescriptor &D) { if (getNode() != D.getNode()) { MDNode *Node = DbgNode; Node->replaceAllUsesWith(D.getNode()); - delete Node; + Node->destroy(); } } @@ -422,7 +412,7 @@ uint64_t DIDerivedType::getOriginalTypeSize() const { /// describes - Return true if this subprogram provides debugging /// information for the function F. bool DISubprogram::describes(const Function *F) { - assert (F && "Invalid function"); + assert(F && "Invalid function"); StringRef Name = getLinkageName(); if (Name.empty()) Name = getName(); @@ -434,24 +424,26 @@ bool DISubprogram::describes(const Function *F) { StringRef DIScope::getFilename() const { if (isLexicalBlock()) return DILexicalBlock(DbgNode).getFilename(); - else if (isSubprogram()) + if (isSubprogram()) return DISubprogram(DbgNode).getFilename(); - else if (isCompileUnit()) + if (isCompileUnit()) return DICompileUnit(DbgNode).getFilename(); - else - assert (0 && "Invalid DIScope!"); + if (isNameSpace()) + return DINameSpace(DbgNode).getFilename(); + assert(0 && "Invalid DIScope!"); return StringRef(); } StringRef DIScope::getDirectory() const { if (isLexicalBlock()) return DILexicalBlock(DbgNode).getDirectory(); - else if (isSubprogram()) + if (isSubprogram()) return DISubprogram(DbgNode).getDirectory(); - else if (isCompileUnit()) + if (isCompileUnit()) return DICompileUnit(DbgNode).getDirectory(); - else - assert (0 && "Invalid DIScope!"); + if (isNameSpace()) + return DINameSpace(DbgNode).getDirectory(); + assert(0 && "Invalid DIScope!"); return StringRef(); } @@ -462,16 +454,16 @@ StringRef DIScope::getDirectory() const { /// dump - Print descriptor. void DIDescriptor::dump() const { - errs() << "[" << dwarf::TagString(getTag()) << "] "; - errs().write_hex((intptr_t) &*DbgNode) << ']'; + dbgs() << "[" << dwarf::TagString(getTag()) << "] "; + dbgs().write_hex((intptr_t) &*DbgNode) << ']'; } /// dump - Print compile unit. void DICompileUnit::dump() const { if (getLanguage()) - errs() << " [" << dwarf::LanguageString(getLanguage()) << "] "; + dbgs() << " [" << dwarf::LanguageString(getLanguage()) << "] "; - errs() << " [" << getDirectory() << "/" << getFilename() << " ]"; + dbgs() << " [" << getDirectory() << "/" << getFilename() << " ]"; } /// dump - Print type. @@ -480,14 +472,14 @@ void DIType::dump() const { StringRef Res = getName(); if (!Res.empty()) - errs() << " [" << Res << "] "; + dbgs() << " [" << Res << "] "; unsigned Tag = getTag(); - errs() << " [" << dwarf::TagString(Tag) << "] "; + dbgs() << " [" << dwarf::TagString(Tag) << "] "; // TODO : Print context getCompileUnit().dump(); - errs() << " [" + dbgs() << " [" << getLineNumber() << ", " << getSizeInBits() << ", " << getAlignInBits() << ", " @@ -495,12 +487,12 @@ void DIType::dump() const { << "] "; if (isPrivate()) - errs() << " [private] "; + dbgs() << " [private] "; else if (isProtected()) - errs() << " [protected] "; + dbgs() << " [protected] "; if (isForwardDecl()) - errs() << " [fwd] "; + dbgs() << " [fwd] "; if (isBasicType()) DIBasicType(DbgNode).dump(); @@ -509,21 +501,21 @@ void DIType::dump() const { else if (isCompositeType()) DICompositeType(DbgNode).dump(); else { - errs() << "Invalid DIType\n"; + dbgs() << "Invalid DIType\n"; return; } - errs() << "\n"; + dbgs() << "\n"; } /// dump - Print basic type. void DIBasicType::dump() const { - errs() << " [" << dwarf::AttributeEncodingString(getEncoding()) << "] "; + dbgs() << " [" << dwarf::AttributeEncodingString(getEncoding()) << "] "; } /// dump - Print derived type. void DIDerivedType::dump() const { - errs() << "\n\t Derived From: "; getTypeDerivedFrom().dump(); + dbgs() << "\n\t Derived From: "; getTypeDerivedFrom().dump(); } /// dump - Print composite type. @@ -531,73 +523,73 @@ void DICompositeType::dump() const { DIArray A = getTypeArray(); if (A.isNull()) return; - errs() << " [" << A.getNumElements() << " elements]"; + dbgs() << " [" << A.getNumElements() << " elements]"; } /// dump - Print global. void DIGlobal::dump() const { StringRef Res = getName(); if (!Res.empty()) - errs() << " [" << Res << "] "; + dbgs() << " [" << Res << "] "; unsigned Tag = getTag(); - errs() << " [" << dwarf::TagString(Tag) << "] "; + dbgs() << " [" << dwarf::TagString(Tag) << "] "; // TODO : Print context getCompileUnit().dump(); - errs() << " [" << getLineNumber() << "] "; + dbgs() << " [" << getLineNumber() << "] "; if (isLocalToUnit()) - errs() << " [local] "; + dbgs() << " [local] "; if (isDefinition()) - errs() << " [def] "; + dbgs() << " [def] "; if (isGlobalVariable()) DIGlobalVariable(DbgNode).dump(); - errs() << "\n"; + dbgs() << "\n"; } /// dump - Print subprogram. void DISubprogram::dump() const { StringRef Res = getName(); if (!Res.empty()) - errs() << " [" << Res << "] "; + dbgs() << " [" << Res << "] "; unsigned Tag = getTag(); - errs() << " [" << dwarf::TagString(Tag) << "] "; + dbgs() << " [" << dwarf::TagString(Tag) << "] "; // TODO : Print context getCompileUnit().dump(); - errs() << " [" << getLineNumber() << "] "; + dbgs() << " [" << getLineNumber() << "] "; if (isLocalToUnit()) - errs() << " [local] "; + dbgs() << " [local] "; if (isDefinition()) - errs() << " [def] "; + dbgs() << " [def] "; - errs() << "\n"; + dbgs() << "\n"; } /// dump - Print global variable. void DIGlobalVariable::dump() const { - errs() << " ["; + dbgs() << " ["; getGlobal()->dump(); - errs() << "] "; + dbgs() << "] "; } /// dump - Print variable. void DIVariable::dump() const { StringRef Res = getName(); if (!Res.empty()) - errs() << " [" << Res << "] "; + dbgs() << " [" << Res << "] "; getCompileUnit().dump(); - errs() << " [" << getLineNumber() << "] "; + dbgs() << " [" << getLineNumber() << "] "; getType().dump(); - errs() << "\n"; + dbgs() << "\n"; // FIXME: Dump complex addresses } @@ -899,18 +891,18 @@ DISubprogram DIFactory::CreateSubprogramDefinition(DISubprogram &SPDeclaration) Value *Elts[] = { GetTagConstant(dwarf::DW_TAG_subprogram), llvm::Constant::getNullValue(Type::getInt32Ty(VMContext)), - DeclNode->getElement(2), // Context - DeclNode->getElement(3), // Name - DeclNode->getElement(4), // DisplayName - DeclNode->getElement(5), // LinkageName - DeclNode->getElement(6), // CompileUnit - DeclNode->getElement(7), // LineNo - DeclNode->getElement(8), // Type - DeclNode->getElement(9), // isLocalToUnit + DeclNode->getOperand(2), // Context + DeclNode->getOperand(3), // Name + DeclNode->getOperand(4), // DisplayName + DeclNode->getOperand(5), // LinkageName + DeclNode->getOperand(6), // CompileUnit + DeclNode->getOperand(7), // LineNo + DeclNode->getOperand(8), // Type + DeclNode->getOperand(9), // isLocalToUnit ConstantInt::get(Type::getInt1Ty(VMContext), true), - DeclNode->getElement(11), // Virtuality - DeclNode->getElement(12), // VIndex - DeclNode->getElement(13) // Containting Type + DeclNode->getOperand(11), // Virtuality + DeclNode->getOperand(12), // VIndex + DeclNode->getOperand(13) // Containting Type }; return DISubprogram(MDNode::get(VMContext, &Elts[0], 14)); } @@ -943,7 +935,7 @@ DIFactory::CreateGlobalVariable(DIDescriptor Context, StringRef Name, // Create a named metadata so that we do not lose this mdnode. NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.gv"); - NMD->addElement(Node); + NMD->addOperand(Node); return DIGlobalVariable(Node); } @@ -996,6 +988,21 @@ DILexicalBlock DIFactory::CreateLexicalBlock(DIDescriptor Context) { return DILexicalBlock(MDNode::get(VMContext, &Elts[0], 2)); } +/// CreateNameSpace - This creates new descriptor for a namespace +/// with the specified parent context. +DINameSpace DIFactory::CreateNameSpace(DIDescriptor Context, StringRef Name, + DICompileUnit CompileUnit, + unsigned LineNo) { + Value *Elts[] = { + GetTagConstant(dwarf::DW_TAG_namespace), + Context.getNode(), + MDString::get(VMContext, Name), + CompileUnit.getNode(), + ConstantInt::get(Type::getInt32Ty(VMContext), LineNo) + }; + return DINameSpace(MDNode::get(VMContext, &Elts[0], 5)); +} + /// CreateLocation - Creates a debug info location. DILocation DIFactory::CreateLocation(unsigned LineNo, unsigned ColumnNo, DIScope S, DILocation OrigLoc) { @@ -1088,9 +1095,7 @@ Instruction *DIFactory::InsertDbgValueIntrinsic(Value *V, Value *Offset, /// processModule - Process entire module and collect debug info. void DebugInfoFinder::processModule(Module &M) { - - MetadataContext &TheMetadata = M.getContext().getMetadata(); - unsigned MDDbgKind = TheMetadata.getMDKind("dbg"); + unsigned MDDbgKind = M.getMDKindID("dbg"); for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) for (Function::iterator FI = (*I).begin(), FE = (*I).end(); FI != FE; ++FI) @@ -1098,17 +1103,16 @@ void DebugInfoFinder::processModule(Module &M) { ++BI) { if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(BI)) processDeclare(DDI); - else if (MDDbgKind) - if (MDNode *L = TheMetadata.getMD(MDDbgKind, BI)) - processLocation(DILocation(L)); + else if (MDNode *L = BI->getMetadata(MDDbgKind)) + processLocation(DILocation(L)); } NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.gv"); if (!NMD) return; - for (unsigned i = 0, e = NMD->getNumElements(); i != e; ++i) { - DIGlobalVariable DIG(cast<MDNode>(NMD->getElement(i))); + for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) { + DIGlobalVariable DIG(cast<MDNode>(NMD->getOperand(i))); if (addGlobalVariable(DIG)) { addCompileUnit(DIG.getCompileUnit()); processType(DIG.getType()); @@ -1238,275 +1242,237 @@ bool DebugInfoFinder::addSubprogram(DISubprogram SP) { return true; } -namespace llvm { - /// findStopPoint - Find the stoppoint coressponding to this instruction, that - /// is the stoppoint that dominates this instruction. - const DbgStopPointInst *findStopPoint(const Instruction *Inst) { - if (const DbgStopPointInst *DSI = dyn_cast<DbgStopPointInst>(Inst)) - return DSI; - - const BasicBlock *BB = Inst->getParent(); - BasicBlock::const_iterator I = Inst, B; - while (BB) { - B = BB->begin(); - - // A BB consisting only of a terminator can't have a stoppoint. - while (I != B) { - --I; - if (const DbgStopPointInst *DSI = dyn_cast<DbgStopPointInst>(I)) - return DSI; - } - - // This BB didn't have a stoppoint: if there is only one predecessor, look - // for a stoppoint there. We could use getIDom(), but that would require - // dominator info. - BB = I->getParent()->getUniquePredecessor(); - if (BB) - I = BB->getTerminator(); - } +/// findStopPoint - Find the stoppoint coressponding to this instruction, that +/// is the stoppoint that dominates this instruction. +const DbgStopPointInst *llvm::findStopPoint(const Instruction *Inst) { + if (const DbgStopPointInst *DSI = dyn_cast<DbgStopPointInst>(Inst)) + return DSI; - return 0; - } + const BasicBlock *BB = Inst->getParent(); + BasicBlock::const_iterator I = Inst, B; + while (BB) { + B = BB->begin(); - /// findBBStopPoint - Find the stoppoint corresponding to first real - /// (non-debug intrinsic) instruction in this Basic Block, and return the - /// stoppoint for it. - const DbgStopPointInst *findBBStopPoint(const BasicBlock *BB) { - for(BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) + // A BB consisting only of a terminator can't have a stoppoint. + while (I != B) { + --I; if (const DbgStopPointInst *DSI = dyn_cast<DbgStopPointInst>(I)) return DSI; + } - // Fallback to looking for stoppoint of unique predecessor. Useful if this - // BB contains no stoppoints, but unique predecessor does. - BB = BB->getUniquePredecessor(); + // This BB didn't have a stoppoint: if there is only one predecessor, look + // for a stoppoint there. We could use getIDom(), but that would require + // dominator info. + BB = I->getParent()->getUniquePredecessor(); if (BB) - return findStopPoint(BB->getTerminator()); - - return 0; + I = BB->getTerminator(); } - Value *findDbgGlobalDeclare(GlobalVariable *V) { - const Module *M = V->getParent(); - NamedMDNode *NMD = M->getNamedMetadata("llvm.dbg.gv"); - if (!NMD) - return 0; - - for (unsigned i = 0, e = NMD->getNumElements(); i != e; ++i) { - DIGlobalVariable DIG(cast_or_null<MDNode>(NMD->getElement(i))); - if (DIG.isNull()) - continue; - if (DIG.getGlobal() == V) - return DIG.getNode(); - } - return 0; - } - - /// Finds the llvm.dbg.declare intrinsic corresponding to this value if any. - /// It looks through pointer casts too. - const DbgDeclareInst *findDbgDeclare(const Value *V, bool stripCasts) { - if (stripCasts) { - V = V->stripPointerCasts(); - - // Look for the bitcast. - for (Value::use_const_iterator I = V->use_begin(), E =V->use_end(); - I != E; ++I) - if (isa<BitCastInst>(I)) { - const DbgDeclareInst *DDI = findDbgDeclare(*I, false); - if (DDI) return DDI; - } - return 0; - } + return 0; +} - // Find llvm.dbg.declare among uses of the instruction. - for (Value::use_const_iterator I = V->use_begin(), E =V->use_end(); - I != E; ++I) - if (const DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) - return DDI; +/// findBBStopPoint - Find the stoppoint corresponding to first real +/// (non-debug intrinsic) instruction in this Basic Block, and return the +/// stoppoint for it. +const DbgStopPointInst *llvm::findBBStopPoint(const BasicBlock *BB) { + for(BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) + if (const DbgStopPointInst *DSI = dyn_cast<DbgStopPointInst>(I)) + return DSI; - return 0; - } + // Fallback to looking for stoppoint of unique predecessor. Useful if this + // BB contains no stoppoints, but unique predecessor does. + BB = BB->getUniquePredecessor(); + if (BB) + return findStopPoint(BB->getTerminator()); -bool getLocationInfo(const Value *V, std::string &DisplayName, - std::string &Type, unsigned &LineNo, std::string &File, - std::string &Dir) { - DICompileUnit Unit; - DIType TypeD; - - if (GlobalVariable *GV = dyn_cast<GlobalVariable>(const_cast<Value*>(V))) { - Value *DIGV = findDbgGlobalDeclare(GV); - if (!DIGV) return false; - DIGlobalVariable Var(cast<MDNode>(DIGV)); - - StringRef D = Var.getDisplayName(); - if (!D.empty()) - DisplayName = D; - LineNo = Var.getLineNumber(); - Unit = Var.getCompileUnit(); - TypeD = Var.getType(); - } else { - const DbgDeclareInst *DDI = findDbgDeclare(V); - if (!DDI) return false; - DIVariable Var(cast<MDNode>(DDI->getVariable())); - - StringRef D = Var.getName(); - if (!D.empty()) - DisplayName = D; - LineNo = Var.getLineNumber(); - Unit = Var.getCompileUnit(); - TypeD = Var.getType(); - } + return 0; +} - StringRef T = TypeD.getName(); - if (!T.empty()) - Type = T; - StringRef F = Unit.getFilename(); - if (!F.empty()) - File = F; - StringRef D = Unit.getDirectory(); - if (!D.empty()) - Dir = D; - return true; - } +Value *llvm::findDbgGlobalDeclare(GlobalVariable *V) { + const Module *M = V->getParent(); + NamedMDNode *NMD = M->getNamedMetadata("llvm.dbg.gv"); + if (!NMD) + return 0; - /// isValidDebugInfoIntrinsic - Return true if SPI is a valid debug - /// info intrinsic. - bool isValidDebugInfoIntrinsic(DbgStopPointInst &SPI, - CodeGenOpt::Level OptLev) { - return DIDescriptor::ValidDebugInfo(SPI.getContext(), OptLev); + for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) { + DIGlobalVariable DIG(cast_or_null<MDNode>(NMD->getOperand(i))); + if (DIG.isNull()) + continue; + if (DIG.getGlobal() == V) + return DIG.getNode(); } + return 0; +} - /// isValidDebugInfoIntrinsic - Return true if FSI is a valid debug - /// info intrinsic. - bool isValidDebugInfoIntrinsic(DbgFuncStartInst &FSI, - CodeGenOpt::Level OptLev) { - return DIDescriptor::ValidDebugInfo(FSI.getSubprogram(), OptLev); - } +/// Finds the llvm.dbg.declare intrinsic corresponding to this value if any. +/// It looks through pointer casts too. +const DbgDeclareInst *llvm::findDbgDeclare(const Value *V, bool stripCasts) { + if (stripCasts) { + V = V->stripPointerCasts(); - /// isValidDebugInfoIntrinsic - Return true if RSI is a valid debug - /// info intrinsic. - bool isValidDebugInfoIntrinsic(DbgRegionStartInst &RSI, - CodeGenOpt::Level OptLev) { - return DIDescriptor::ValidDebugInfo(RSI.getContext(), OptLev); + // Look for the bitcast. + for (Value::use_const_iterator I = V->use_begin(), E =V->use_end(); + I != E; ++I) + if (isa<BitCastInst>(I)) { + const DbgDeclareInst *DDI = findDbgDeclare(*I, false); + if (DDI) return DDI; + } + return 0; } - /// isValidDebugInfoIntrinsic - Return true if REI is a valid debug - /// info intrinsic. - bool isValidDebugInfoIntrinsic(DbgRegionEndInst &REI, - CodeGenOpt::Level OptLev) { - return DIDescriptor::ValidDebugInfo(REI.getContext(), OptLev); - } + // Find llvm.dbg.declare among uses of the instruction. + for (Value::use_const_iterator I = V->use_begin(), E =V->use_end(); + I != E; ++I) + if (const DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) + return DDI; + return 0; +} - /// isValidDebugInfoIntrinsic - Return true if DI is a valid debug - /// info intrinsic. - bool isValidDebugInfoIntrinsic(DbgDeclareInst &DI, - CodeGenOpt::Level OptLev) { - return DIDescriptor::ValidDebugInfo(DI.getVariable(), OptLev); - } +bool llvm::getLocationInfo(const Value *V, std::string &DisplayName, + std::string &Type, unsigned &LineNo, + std::string &File, std::string &Dir) { + DICompileUnit Unit; + DIType TypeD; - /// ExtractDebugLocation - Extract debug location information - /// from llvm.dbg.stoppoint intrinsic. - DebugLoc ExtractDebugLocation(DbgStopPointInst &SPI, - DebugLocTracker &DebugLocInfo) { - DebugLoc DL; - Value *Context = SPI.getContext(); - - // If this location is already tracked then use it. - DebugLocTuple Tuple(cast<MDNode>(Context), NULL, SPI.getLine(), - SPI.getColumn()); - DenseMap<DebugLocTuple, unsigned>::iterator II - = DebugLocInfo.DebugIdMap.find(Tuple); - if (II != DebugLocInfo.DebugIdMap.end()) - return DebugLoc::get(II->second); - - // Add a new location entry. - unsigned Id = DebugLocInfo.DebugLocations.size(); - DebugLocInfo.DebugLocations.push_back(Tuple); - DebugLocInfo.DebugIdMap[Tuple] = Id; - - return DebugLoc::get(Id); - } + if (GlobalVariable *GV = dyn_cast<GlobalVariable>(const_cast<Value*>(V))) { + Value *DIGV = findDbgGlobalDeclare(GV); + if (!DIGV) return false; + DIGlobalVariable Var(cast<MDNode>(DIGV)); - /// ExtractDebugLocation - Extract debug location information - /// from DILocation. - DebugLoc ExtractDebugLocation(DILocation &Loc, - DebugLocTracker &DebugLocInfo) { - DebugLoc DL; - MDNode *Context = Loc.getScope().getNode(); - MDNode *InlinedLoc = NULL; - if (!Loc.getOrigLocation().isNull()) - InlinedLoc = Loc.getOrigLocation().getNode(); - // If this location is already tracked then use it. - DebugLocTuple Tuple(Context, InlinedLoc, Loc.getLineNumber(), - Loc.getColumnNumber()); - DenseMap<DebugLocTuple, unsigned>::iterator II - = DebugLocInfo.DebugIdMap.find(Tuple); - if (II != DebugLocInfo.DebugIdMap.end()) - return DebugLoc::get(II->second); - - // Add a new location entry. - unsigned Id = DebugLocInfo.DebugLocations.size(); - DebugLocInfo.DebugLocations.push_back(Tuple); - DebugLocInfo.DebugIdMap[Tuple] = Id; - - return DebugLoc::get(Id); + StringRef D = Var.getDisplayName(); + if (!D.empty()) + DisplayName = D; + LineNo = Var.getLineNumber(); + Unit = Var.getCompileUnit(); + TypeD = Var.getType(); + } else { + const DbgDeclareInst *DDI = findDbgDeclare(V); + if (!DDI) return false; + DIVariable Var(cast<MDNode>(DDI->getVariable())); + + StringRef D = Var.getName(); + if (!D.empty()) + DisplayName = D; + LineNo = Var.getLineNumber(); + Unit = Var.getCompileUnit(); + TypeD = Var.getType(); } - /// ExtractDebugLocation - Extract debug location information - /// from llvm.dbg.func_start intrinsic. - DebugLoc ExtractDebugLocation(DbgFuncStartInst &FSI, - DebugLocTracker &DebugLocInfo) { - DebugLoc DL; - Value *SP = FSI.getSubprogram(); - - DISubprogram Subprogram(cast<MDNode>(SP)); - unsigned Line = Subprogram.getLineNumber(); - DICompileUnit CU(Subprogram.getCompileUnit()); - - // If this location is already tracked then use it. - DebugLocTuple Tuple(CU.getNode(), NULL, Line, /* Column */ 0); - DenseMap<DebugLocTuple, unsigned>::iterator II - = DebugLocInfo.DebugIdMap.find(Tuple); - if (II != DebugLocInfo.DebugIdMap.end()) - return DebugLoc::get(II->second); - - // Add a new location entry. - unsigned Id = DebugLocInfo.DebugLocations.size(); - DebugLocInfo.DebugLocations.push_back(Tuple); - DebugLocInfo.DebugIdMap[Tuple] = Id; - - return DebugLoc::get(Id); - } + StringRef T = TypeD.getName(); + if (!T.empty()) + Type = T; + StringRef F = Unit.getFilename(); + if (!F.empty()) + File = F; + StringRef D = Unit.getDirectory(); + if (!D.empty()) + Dir = D; + return true; +} - /// getDISubprogram - Find subprogram that is enclosing this scope. - DISubprogram getDISubprogram(MDNode *Scope) { - DIDescriptor D(Scope); - if (D.isNull()) - return DISubprogram(); - - if (D.isCompileUnit()) - return DISubprogram(); - - if (D.isSubprogram()) - return DISubprogram(Scope); - - if (D.isLexicalBlock()) - return getDISubprogram(DILexicalBlock(Scope).getContext().getNode()); - +/// ExtractDebugLocation - Extract debug location information +/// from llvm.dbg.stoppoint intrinsic. +DebugLoc llvm::ExtractDebugLocation(DbgStopPointInst &SPI, + DebugLocTracker &DebugLocInfo) { + DebugLoc DL; + Value *Context = SPI.getContext(); + + // If this location is already tracked then use it. + DebugLocTuple Tuple(cast<MDNode>(Context), NULL, SPI.getLine(), + SPI.getColumn()); + DenseMap<DebugLocTuple, unsigned>::iterator II + = DebugLocInfo.DebugIdMap.find(Tuple); + if (II != DebugLocInfo.DebugIdMap.end()) + return DebugLoc::get(II->second); + + // Add a new location entry. + unsigned Id = DebugLocInfo.DebugLocations.size(); + DebugLocInfo.DebugLocations.push_back(Tuple); + DebugLocInfo.DebugIdMap[Tuple] = Id; + + return DebugLoc::get(Id); +} + +/// ExtractDebugLocation - Extract debug location information +/// from DILocation. +DebugLoc llvm::ExtractDebugLocation(DILocation &Loc, + DebugLocTracker &DebugLocInfo) { + DebugLoc DL; + MDNode *Context = Loc.getScope().getNode(); + MDNode *InlinedLoc = NULL; + if (!Loc.getOrigLocation().isNull()) + InlinedLoc = Loc.getOrigLocation().getNode(); + // If this location is already tracked then use it. + DebugLocTuple Tuple(Context, InlinedLoc, Loc.getLineNumber(), + Loc.getColumnNumber()); + DenseMap<DebugLocTuple, unsigned>::iterator II + = DebugLocInfo.DebugIdMap.find(Tuple); + if (II != DebugLocInfo.DebugIdMap.end()) + return DebugLoc::get(II->second); + + // Add a new location entry. + unsigned Id = DebugLocInfo.DebugLocations.size(); + DebugLocInfo.DebugLocations.push_back(Tuple); + DebugLocInfo.DebugIdMap[Tuple] = Id; + + return DebugLoc::get(Id); +} + +/// ExtractDebugLocation - Extract debug location information +/// from llvm.dbg.func_start intrinsic. +DebugLoc llvm::ExtractDebugLocation(DbgFuncStartInst &FSI, + DebugLocTracker &DebugLocInfo) { + DebugLoc DL; + Value *SP = FSI.getSubprogram(); + + DISubprogram Subprogram(cast<MDNode>(SP)); + unsigned Line = Subprogram.getLineNumber(); + DICompileUnit CU(Subprogram.getCompileUnit()); + + // If this location is already tracked then use it. + DebugLocTuple Tuple(CU.getNode(), NULL, Line, /* Column */ 0); + DenseMap<DebugLocTuple, unsigned>::iterator II + = DebugLocInfo.DebugIdMap.find(Tuple); + if (II != DebugLocInfo.DebugIdMap.end()) + return DebugLoc::get(II->second); + + // Add a new location entry. + unsigned Id = DebugLocInfo.DebugLocations.size(); + DebugLocInfo.DebugLocations.push_back(Tuple); + DebugLocInfo.DebugIdMap[Tuple] = Id; + + return DebugLoc::get(Id); +} + +/// getDISubprogram - Find subprogram that is enclosing this scope. +DISubprogram llvm::getDISubprogram(MDNode *Scope) { + DIDescriptor D(Scope); + if (D.isNull()) return DISubprogram(); - } - - /// getDICompositeType - Find underlying composite type. - DICompositeType getDICompositeType(DIType T) { - if (T.isNull()) - return DICompositeType(); - - if (T.isCompositeType()) - return DICompositeType(T.getNode()); - - if (T.isDerivedType()) - return getDICompositeType(DIDerivedType(T.getNode()).getTypeDerivedFrom()); - + + if (D.isCompileUnit()) + return DISubprogram(); + + if (D.isSubprogram()) + return DISubprogram(Scope); + + if (D.isLexicalBlock()) + return getDISubprogram(DILexicalBlock(Scope).getContext().getNode()); + + return DISubprogram(); +} + +/// getDICompositeType - Find underlying composite type. +DICompositeType llvm::getDICompositeType(DIType T) { + if (T.isNull()) return DICompositeType(); - } + + if (T.isCompositeType()) + return DICompositeType(T.getNode()); + + if (T.isDerivedType()) + return getDICompositeType(DIDerivedType(T.getNode()).getTypeDerivedFrom()); + + return DICompositeType(); } diff --git a/lib/Analysis/IPA/Andersens.cpp b/lib/Analysis/IPA/Andersens.cpp index 4d5b312..28c66af 100644 --- a/lib/Analysis/IPA/Andersens.cpp +++ b/lib/Analysis/IPA/Andersens.cpp @@ -1402,7 +1402,7 @@ void Andersens::ClumpAddressTaken() { unsigned Pos = NewPos++; Translate[i] = Pos; NewGraphNodes.push_back(GraphNodes[i]); - DEBUG(errs() << "Renumbering node " << i << " to node " << Pos << "\n"); + DEBUG(dbgs() << "Renumbering node " << i << " to node " << Pos << "\n"); } // I believe this ends up being faster than making two vectors and splicing @@ -1412,7 +1412,7 @@ void Andersens::ClumpAddressTaken() { unsigned Pos = NewPos++; Translate[i] = Pos; NewGraphNodes.push_back(GraphNodes[i]); - DEBUG(errs() << "Renumbering node " << i << " to node " << Pos << "\n"); + DEBUG(dbgs() << "Renumbering node " << i << " to node " << Pos << "\n"); } } @@ -1421,7 +1421,7 @@ void Andersens::ClumpAddressTaken() { unsigned Pos = NewPos++; Translate[i] = Pos; NewGraphNodes.push_back(GraphNodes[i]); - DEBUG(errs() << "Renumbering node " << i << " to node " << Pos << "\n"); + DEBUG(dbgs() << "Renumbering node " << i << " to node " << Pos << "\n"); } } @@ -1493,7 +1493,7 @@ void Andersens::ClumpAddressTaken() { /// receive &D from E anyway. void Andersens::HVN() { - DEBUG(errs() << "Beginning HVN\n"); + DEBUG(dbgs() << "Beginning HVN\n"); // Build a predecessor graph. This is like our constraint graph with the // edges going in the opposite direction, and there are edges for all the // constraints, instead of just copy constraints. We also build implicit @@ -1564,7 +1564,7 @@ void Andersens::HVN() { Node2DFS.clear(); Node2Deleted.clear(); Node2Visited.clear(); - DEBUG(errs() << "Finished HVN\n"); + DEBUG(dbgs() << "Finished HVN\n"); } @@ -1688,7 +1688,7 @@ void Andersens::HVNValNum(unsigned NodeIndex) { /// and is equivalent to value numbering the collapsed constraint graph /// including evaluating unions. void Andersens::HU() { - DEBUG(errs() << "Beginning HU\n"); + DEBUG(dbgs() << "Beginning HU\n"); // Build a predecessor graph. This is like our constraint graph with the // edges going in the opposite direction, and there are edges for all the // constraints, instead of just copy constraints. We also build implicit @@ -1768,7 +1768,7 @@ void Andersens::HU() { } // PEClass nodes will be deleted by the deleting of N->PointsTo in our caller. Set2PEClass.clear(); - DEBUG(errs() << "Finished HU\n"); + DEBUG(dbgs() << "Finished HU\n"); } @@ -1946,12 +1946,12 @@ void Andersens::RewriteConstraints() { // to anything. if (LHSLabel == 0) { DEBUG(PrintNode(&GraphNodes[LHSNode])); - DEBUG(errs() << " is a non-pointer, ignoring constraint.\n"); + DEBUG(dbgs() << " is a non-pointer, ignoring constraint.\n"); continue; } if (RHSLabel == 0) { DEBUG(PrintNode(&GraphNodes[RHSNode])); - DEBUG(errs() << " is a non-pointer, ignoring constraint.\n"); + DEBUG(dbgs() << " is a non-pointer, ignoring constraint.\n"); continue; } // This constraint may be useless, and it may become useless as we translate @@ -1999,16 +1999,16 @@ void Andersens::PrintLabels() const { if (i < FirstRefNode) { PrintNode(&GraphNodes[i]); } else if (i < FirstAdrNode) { - DEBUG(errs() << "REF("); + DEBUG(dbgs() << "REF("); PrintNode(&GraphNodes[i-FirstRefNode]); - DEBUG(errs() <<")"); + DEBUG(dbgs() <<")"); } else { - DEBUG(errs() << "ADR("); + DEBUG(dbgs() << "ADR("); PrintNode(&GraphNodes[i-FirstAdrNode]); - DEBUG(errs() <<")"); + DEBUG(dbgs() <<")"); } - DEBUG(errs() << " has pointer label " << GraphNodes[i].PointerEquivLabel + DEBUG(dbgs() << " has pointer label " << GraphNodes[i].PointerEquivLabel << " and SCC rep " << VSSCCRep[i] << " and is " << (GraphNodes[i].Direct ? "Direct" : "Not direct") << "\n"); @@ -2025,7 +2025,7 @@ void Andersens::PrintLabels() const { /// operation are stored in SDT and are later used in SolveContraints() /// and UniteNodes(). void Andersens::HCD() { - DEBUG(errs() << "Starting HCD.\n"); + DEBUG(dbgs() << "Starting HCD.\n"); HCDSCCRep.resize(GraphNodes.size()); for (unsigned i = 0; i < GraphNodes.size(); ++i) { @@ -2074,7 +2074,7 @@ void Andersens::HCD() { Node2Visited.clear(); Node2Deleted.clear(); HCDSCCRep.clear(); - DEBUG(errs() << "HCD complete.\n"); + DEBUG(dbgs() << "HCD complete.\n"); } // Component of HCD: @@ -2146,7 +2146,7 @@ void Andersens::Search(unsigned Node) { /// Optimize the constraints by performing offline variable substitution and /// other optimizations. void Andersens::OptimizeConstraints() { - DEBUG(errs() << "Beginning constraint optimization\n"); + DEBUG(dbgs() << "Beginning constraint optimization\n"); SDTActive = false; @@ -2230,7 +2230,7 @@ void Andersens::OptimizeConstraints() { // HCD complete. - DEBUG(errs() << "Finished constraint optimization\n"); + DEBUG(dbgs() << "Finished constraint optimization\n"); FirstRefNode = 0; FirstAdrNode = 0; } @@ -2238,7 +2238,7 @@ void Andersens::OptimizeConstraints() { /// Unite pointer but not location equivalent variables, now that the constraint /// graph is built. void Andersens::UnitePointerEquivalences() { - DEBUG(errs() << "Uniting remaining pointer equivalences\n"); + DEBUG(dbgs() << "Uniting remaining pointer equivalences\n"); for (unsigned i = 0; i < GraphNodes.size(); ++i) { if (GraphNodes[i].AddressTaken && GraphNodes[i].isRep()) { unsigned Label = GraphNodes[i].PointerEquivLabel; @@ -2247,7 +2247,7 @@ void Andersens::UnitePointerEquivalences() { UniteNodes(i, PENLEClass2Node[Label]); } } - DEBUG(errs() << "Finished remaining pointer equivalences\n"); + DEBUG(dbgs() << "Finished remaining pointer equivalences\n"); PENLEClass2Node.clear(); } @@ -2403,7 +2403,7 @@ void Andersens::SolveConstraints() { std::vector<unsigned int> RSV; #endif while( !CurrWL->empty() ) { - DEBUG(errs() << "Starting iteration #" << ++NumIters << "\n"); + DEBUG(dbgs() << "Starting iteration #" << ++NumIters << "\n"); Node* CurrNode; unsigned CurrNodeIndex; @@ -2706,11 +2706,11 @@ unsigned Andersens::UniteNodes(unsigned First, unsigned Second, SecondNode->OldPointsTo = NULL; NumUnified++; - DEBUG(errs() << "Unified Node "); + DEBUG(dbgs() << "Unified Node "); DEBUG(PrintNode(FirstNode)); - DEBUG(errs() << " and Node "); + DEBUG(dbgs() << " and Node "); DEBUG(PrintNode(SecondNode)); - DEBUG(errs() << "\n"); + DEBUG(dbgs() << "\n"); if (SDTActive) if (SDT[Second] >= 0) { @@ -2755,17 +2755,17 @@ unsigned Andersens::FindNode(unsigned NodeIndex) const { void Andersens::PrintNode(const Node *N) const { if (N == &GraphNodes[UniversalSet]) { - errs() << "<universal>"; + dbgs() << "<universal>"; return; } else if (N == &GraphNodes[NullPtr]) { - errs() << "<nullptr>"; + dbgs() << "<nullptr>"; return; } else if (N == &GraphNodes[NullObject]) { - errs() << "<null>"; + dbgs() << "<null>"; return; } if (!N->getValue()) { - errs() << "artificial" << (intptr_t) N; + dbgs() << "artificial" << (intptr_t) N; return; } @@ -2774,85 +2774,85 @@ void Andersens::PrintNode(const Node *N) const { if (Function *F = dyn_cast<Function>(V)) { if (isa<PointerType>(F->getFunctionType()->getReturnType()) && N == &GraphNodes[getReturnNode(F)]) { - errs() << F->getName() << ":retval"; + dbgs() << F->getName() << ":retval"; return; } else if (F->getFunctionType()->isVarArg() && N == &GraphNodes[getVarargNode(F)]) { - errs() << F->getName() << ":vararg"; + dbgs() << F->getName() << ":vararg"; return; } } if (Instruction *I = dyn_cast<Instruction>(V)) - errs() << I->getParent()->getParent()->getName() << ":"; + dbgs() << I->getParent()->getParent()->getName() << ":"; else if (Argument *Arg = dyn_cast<Argument>(V)) - errs() << Arg->getParent()->getName() << ":"; + dbgs() << Arg->getParent()->getName() << ":"; if (V->hasName()) - errs() << V->getName(); + dbgs() << V->getName(); else - errs() << "(unnamed)"; + dbgs() << "(unnamed)"; if (isa<GlobalValue>(V) || isa<AllocaInst>(V) || isMalloc(V)) if (N == &GraphNodes[getObject(V)]) - errs() << "<mem>"; + dbgs() << "<mem>"; } void Andersens::PrintConstraint(const Constraint &C) const { if (C.Type == Constraint::Store) { - errs() << "*"; + dbgs() << "*"; if (C.Offset != 0) - errs() << "("; + dbgs() << "("; } PrintNode(&GraphNodes[C.Dest]); if (C.Type == Constraint::Store && C.Offset != 0) - errs() << " + " << C.Offset << ")"; - errs() << " = "; + dbgs() << " + " << C.Offset << ")"; + dbgs() << " = "; if (C.Type == Constraint::Load) { - errs() << "*"; + dbgs() << "*"; if (C.Offset != 0) - errs() << "("; + dbgs() << "("; } else if (C.Type == Constraint::AddressOf) - errs() << "&"; + dbgs() << "&"; PrintNode(&GraphNodes[C.Src]); if (C.Offset != 0 && C.Type != Constraint::Store) - errs() << " + " << C.Offset; + dbgs() << " + " << C.Offset; if (C.Type == Constraint::Load && C.Offset != 0) - errs() << ")"; - errs() << "\n"; + dbgs() << ")"; + dbgs() << "\n"; } void Andersens::PrintConstraints() const { - errs() << "Constraints:\n"; + dbgs() << "Constraints:\n"; for (unsigned i = 0, e = Constraints.size(); i != e; ++i) PrintConstraint(Constraints[i]); } void Andersens::PrintPointsToGraph() const { - errs() << "Points-to graph:\n"; + dbgs() << "Points-to graph:\n"; for (unsigned i = 0, e = GraphNodes.size(); i != e; ++i) { const Node *N = &GraphNodes[i]; if (FindNode(i) != i) { PrintNode(N); - errs() << "\t--> same as "; + dbgs() << "\t--> same as "; PrintNode(&GraphNodes[FindNode(i)]); - errs() << "\n"; + dbgs() << "\n"; } else { - errs() << "[" << (N->PointsTo->count()) << "] "; + dbgs() << "[" << (N->PointsTo->count()) << "] "; PrintNode(N); - errs() << "\t--> "; + dbgs() << "\t--> "; bool first = true; for (SparseBitVector<>::iterator bi = N->PointsTo->begin(); bi != N->PointsTo->end(); ++bi) { if (!first) - errs() << ", "; + dbgs() << ", "; PrintNode(&GraphNodes[*bi]); first = false; } - errs() << "\n"; + dbgs() << "\n"; } } } diff --git a/lib/Analysis/IPA/CallGraph.cpp b/lib/Analysis/IPA/CallGraph.cpp index 9cd8bb8..a826177 100644 --- a/lib/Analysis/IPA/CallGraph.cpp +++ b/lib/Analysis/IPA/CallGraph.cpp @@ -17,6 +17,7 @@ #include "llvm/Instructions.h" #include "llvm/IntrinsicInst.h" #include "llvm/Support/CallSite.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; @@ -181,7 +182,7 @@ void CallGraph::print(raw_ostream &OS, Module*) const { I->second->print(OS); } void CallGraph::dump() const { - print(errs(), 0); + print(dbgs(), 0); } //===----------------------------------------------------------------------===// @@ -232,7 +233,7 @@ void CallGraphNode::print(raw_ostream &OS) const { OS << "\n"; } -void CallGraphNode::dump() const { print(errs()); } +void CallGraphNode::dump() const { print(dbgs()); } /// removeCallEdgeFor - This method removes the edge in the node for the /// specified call site. Note that this method takes linear time, so it diff --git a/lib/Analysis/IPA/CallGraphSCCPass.cpp b/lib/Analysis/IPA/CallGraphSCCPass.cpp index a96a5c5..5504b9b 100644 --- a/lib/Analysis/IPA/CallGraphSCCPass.cpp +++ b/lib/Analysis/IPA/CallGraphSCCPass.cpp @@ -126,7 +126,7 @@ bool CGPassManager::RunPassOnSCC(Pass *P, std::vector<CallGraphNode*> &CurSCC, // The function pass(es) modified the IR, they may have clobbered the // callgraph. if (Changed && CallGraphUpToDate) { - DEBUG(errs() << "CGSCCPASSMGR: Pass Dirtied SCC: " + DEBUG(dbgs() << "CGSCCPASSMGR: Pass Dirtied SCC: " << P->getPassName() << '\n'); CallGraphUpToDate = false; } @@ -143,7 +143,7 @@ void CGPassManager::RefreshCallGraph(std::vector<CallGraphNode*> &CurSCC, CallGraph &CG, bool CheckingMode) { DenseMap<Value*, CallGraphNode*> CallSites; - DEBUG(errs() << "CGSCCPASSMGR: Refreshing SCC with " << CurSCC.size() + DEBUG(dbgs() << "CGSCCPASSMGR: Refreshing SCC with " << CurSCC.size() << " nodes:\n"; for (unsigned i = 0, e = CurSCC.size(); i != e; ++i) CurSCC[i]->dump(); @@ -277,11 +277,11 @@ void CGPassManager::RefreshCallGraph(std::vector<CallGraphNode*> &CurSCC, } DEBUG(if (MadeChange) { - errs() << "CGSCCPASSMGR: Refreshed SCC is now:\n"; + dbgs() << "CGSCCPASSMGR: Refreshed SCC is now:\n"; for (unsigned i = 0, e = CurSCC.size(); i != e; ++i) CurSCC[i]->dump(); } else { - errs() << "CGSCCPASSMGR: SCC Refresh didn't change call graph.\n"; + dbgs() << "CGSCCPASSMGR: SCC Refresh didn't change call graph.\n"; } ); } diff --git a/lib/Analysis/IVUsers.cpp b/lib/Analysis/IVUsers.cpp index 627dbbb..df9e31c 100644 --- a/lib/Analysis/IVUsers.cpp +++ b/lib/Analysis/IVUsers.cpp @@ -53,7 +53,7 @@ static bool containsAddRecFromDifferentLoop(const SCEV *S, Loop *L) { if (newLoop == L) return false; // if newLoop is an outer loop of L, this is OK. - if (newLoop->contains(L->getHeader())) + if (newLoop->contains(L)) return false; } return true; @@ -128,7 +128,7 @@ static bool getSCEVStartAndStride(const SCEV *&SH, Loop *L, Loop *UseLoop, if (!AddRecStride->properlyDominates(Header, DT)) return false; - DEBUG(errs() << "[" << L->getHeader()->getName() + DEBUG(dbgs() << "[" << L->getHeader()->getName() << "] Variable stride: " << *AddRec << "\n"); } @@ -148,7 +148,7 @@ static bool IVUseShouldUsePostIncValue(Instruction *User, Instruction *IV, Loop *L, LoopInfo *LI, DominatorTree *DT, Pass *P) { // If the user is in the loop, use the preinc value. - if (L->contains(User->getParent())) return false; + if (L->contains(User)) return false; BasicBlock *LatchBlock = L->getLoopLatch(); if (!LatchBlock) @@ -209,7 +209,7 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) { return false; // Non-reducible symbolic expression, bail out. // Keep things simple. Don't touch loop-variant strides. - if (!Stride->isLoopInvariant(L) && L->contains(I->getParent())) + if (!Stride->isLoopInvariant(L) && L->contains(I)) return false; SmallPtrSet<Instruction *, 4> UniqueUsers; @@ -233,13 +233,13 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) { if (LI->getLoopFor(User->getParent()) != L) { if (isa<PHINode>(User) || Processed.count(User) || !AddUsersIfInteresting(User)) { - DEBUG(errs() << "FOUND USER in other loop: " << *User << '\n' + DEBUG(dbgs() << "FOUND USER in other loop: " << *User << '\n' << " OF SCEV: " << *ISE << '\n'); AddUserToIVUsers = true; } } else if (Processed.count(User) || !AddUsersIfInteresting(User)) { - DEBUG(errs() << "FOUND USER: " << *User << '\n' + DEBUG(dbgs() << "FOUND USER: " << *User << '\n' << " OF SCEV: " << *ISE << '\n'); AddUserToIVUsers = true; } @@ -262,7 +262,7 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) { const SCEV *NewStart = SE->getMinusSCEV(Start, Stride); StrideUses->addUser(NewStart, User, I); StrideUses->Users.back().setIsUseOfPostIncrementedValue(true); - DEBUG(errs() << " USING POSTINC SCEV, START=" << *NewStart<< "\n"); + DEBUG(dbgs() << " USING POSTINC SCEV, START=" << *NewStart<< "\n"); } else { StrideUses->addUser(Start, User, I); } @@ -307,7 +307,6 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) { for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) AddUsersIfInteresting(I); - Processed.clear(); return false; } @@ -325,7 +324,7 @@ const SCEV *IVUsers::getReplacementExpr(const IVStrideUse &U) const { if (U.isUseOfPostIncrementedValue()) RetVal = SE->getAddExpr(RetVal, U.getParent()->Stride); // Evaluate the expression out of the loop, if possible. - if (!L->contains(U.getUser()->getParent())) { + if (!L->contains(U.getUser())) { const SCEV *ExitVal = SE->getSCEVAtScope(RetVal, L->getParentLoop()); if (ExitVal->isLoopInvariant(L)) RetVal = ExitVal; @@ -364,12 +363,13 @@ void IVUsers::print(raw_ostream &OS, const Module *M) const { } void IVUsers::dump() const { - print(errs()); + print(dbgs()); } void IVUsers::releaseMemory() { IVUsesByStride.clear(); StrideOrder.clear(); + Processed.clear(); IVUses.clear(); } diff --git a/lib/Analysis/InstCount.cpp b/lib/Analysis/InstCount.cpp index a4b041f..bb2cf53 100644 --- a/lib/Analysis/InstCount.cpp +++ b/lib/Analysis/InstCount.cpp @@ -15,6 +15,7 @@ #include "llvm/Analysis/Passes.h" #include "llvm/Pass.h" #include "llvm/Function.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/InstVisitor.h" #include "llvm/Support/raw_ostream.h" diff --git a/lib/Analysis/LazyValueInfo.cpp b/lib/Analysis/LazyValueInfo.cpp index 5796c6f..ff9026b 100644 --- a/lib/Analysis/LazyValueInfo.cpp +++ b/lib/Analysis/LazyValueInfo.cpp @@ -342,7 +342,7 @@ LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) { // If we've already computed this block's value, return it. if (!BBLV.isUndefined()) { - DEBUG(errs() << " reuse BB '" << BB->getName() << "' val=" << BBLV <<'\n'); + DEBUG(dbgs() << " reuse BB '" << BB->getName() << "' val=" << BBLV <<'\n'); return BBLV; } @@ -365,7 +365,7 @@ LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) { // If we hit overdefined, exit early. The BlockVals entry is already set // to overdefined. if (Result.isOverdefined()) { - DEBUG(errs() << " compute BB '" << BB->getName() + DEBUG(dbgs() << " compute BB '" << BB->getName() << "' - overdefined because of pred.\n"); return Result; } @@ -394,7 +394,7 @@ LVILatticeVal LVIQuery::getBlockValue(BasicBlock *BB) { } - DEBUG(errs() << " compute BB '" << BB->getName() + DEBUG(dbgs() << " compute BB '" << BB->getName() << "' - overdefined because inst def found.\n"); LVILatticeVal Result; @@ -471,12 +471,12 @@ LVILatticeVal LazyValueInfoCache::getValueInBlock(Value *V, BasicBlock *BB) { if (Constant *VC = dyn_cast<Constant>(V)) return LVILatticeVal::get(VC); - DEBUG(errs() << "LVI Getting block end value " << *V << " at '" + DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '" << BB->getName() << "'\n"); LVILatticeVal Result = LVIQuery(V, ValueCache[V]).getBlockValue(BB); - DEBUG(errs() << " Result = " << Result << "\n"); + DEBUG(dbgs() << " Result = " << Result << "\n"); return Result; } @@ -486,12 +486,12 @@ getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB) { if (Constant *VC = dyn_cast<Constant>(V)) return LVILatticeVal::get(VC); - DEBUG(errs() << "LVI Getting edge value " << *V << " from '" + DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '" << FromBB->getName() << "' to '" << ToBB->getName() << "'\n"); LVILatticeVal Result = LVIQuery(V, ValueCache[V]).getEdgeValue(FromBB, ToBB); - DEBUG(errs() << " Result = " << Result << "\n"); + DEBUG(dbgs() << " Result = " << Result << "\n"); return Result; } diff --git a/lib/Analysis/LoopDependenceAnalysis.cpp b/lib/Analysis/LoopDependenceAnalysis.cpp index 32d2266..bb4f46d 100644 --- a/lib/Analysis/LoopDependenceAnalysis.cpp +++ b/lib/Analysis/LoopDependenceAnalysis.cpp @@ -181,15 +181,15 @@ LoopDependenceAnalysis::DependenceResult LoopDependenceAnalysis::analyseSubscript(const SCEV *A, const SCEV *B, Subscript *S) const { - DEBUG(errs() << " Testing subscript: " << *A << ", " << *B << "\n"); + DEBUG(dbgs() << " Testing subscript: " << *A << ", " << *B << "\n"); if (A == B) { - DEBUG(errs() << " -> [D] same SCEV\n"); + DEBUG(dbgs() << " -> [D] same SCEV\n"); return Dependent; } if (!isAffine(A) || !isAffine(B)) { - DEBUG(errs() << " -> [?] not affine\n"); + DEBUG(dbgs() << " -> [?] not affine\n"); return Unknown; } @@ -204,12 +204,12 @@ LoopDependenceAnalysis::analyseSubscript(const SCEV *A, LoopDependenceAnalysis::DependenceResult LoopDependenceAnalysis::analysePair(DependencePair *P) const { - DEBUG(errs() << "Analysing:\n" << *P->A << "\n" << *P->B << "\n"); + DEBUG(dbgs() << "Analysing:\n" << *P->A << "\n" << *P->B << "\n"); // We only analyse loads and stores but no possible memory accesses by e.g. // free, call, or invoke instructions. if (!IsLoadOrStoreInst(P->A) || !IsLoadOrStoreInst(P->B)) { - DEBUG(errs() << "--> [?] no load/store\n"); + DEBUG(dbgs() << "--> [?] no load/store\n"); return Unknown; } @@ -219,12 +219,12 @@ LoopDependenceAnalysis::analysePair(DependencePair *P) const { switch (UnderlyingObjectsAlias(AA, aPtr, bPtr)) { case AliasAnalysis::MayAlias: // We can not analyse objects if we do not know about their aliasing. - DEBUG(errs() << "---> [?] may alias\n"); + DEBUG(dbgs() << "---> [?] may alias\n"); return Unknown; case AliasAnalysis::NoAlias: // If the objects noalias, they are distinct, accesses are independent. - DEBUG(errs() << "---> [I] no alias\n"); + DEBUG(dbgs() << "---> [I] no alias\n"); return Independent; case AliasAnalysis::MustAlias: diff --git a/lib/Analysis/LoopInfo.cpp b/lib/Analysis/LoopInfo.cpp index 34089ee..5d31c11 100644 --- a/lib/Analysis/LoopInfo.cpp +++ b/lib/Analysis/LoopInfo.cpp @@ -56,7 +56,7 @@ bool Loop::isLoopInvariant(Value *V) const { /// loop-invariant. /// bool Loop::isLoopInvariant(Instruction *I) const { - return !contains(I->getParent()); + return !contains(I); } /// makeLoopInvariant - If the given value is an instruciton inside of the diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp index a0c7706..2d74709d 100644 --- a/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -275,7 +275,8 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad, // a subsequent bitcast of the malloc call result. There can be stores to // the malloced memory between the malloc call and its bitcast uses, and we // need to continue scanning until the malloc call. - if (isa<AllocaInst>(Inst) || extractMallocCall(Inst)) { + if (isa<AllocaInst>(Inst) || + (isa<CallInst>(Inst) && extractMallocCall(Inst))) { Value *AccessPtr = MemPtr->getUnderlyingObject(); if (AccessPtr == Inst || @@ -546,9 +547,9 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { // If we had a dirty entry for the block, update it. Otherwise, just add // a new entry. if (ExistingResult) - ExistingResult->setResult(Dep, 0); + ExistingResult->setResult(Dep); else - Cache.push_back(NonLocalDepEntry(DirtyBB, Dep, 0)); + Cache.push_back(NonLocalDepEntry(DirtyBB, Dep)); // If the block has a dependency (i.e. it isn't completely transparent to // the value), remember the association! @@ -578,7 +579,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) { /// void MemoryDependenceAnalysis:: getNonLocalPointerDependency(Value *Pointer, bool isLoad, BasicBlock *FromBB, - SmallVectorImpl<NonLocalDepEntry> &Result) { + SmallVectorImpl<NonLocalDepResult> &Result) { assert(isa<PointerType>(Pointer->getType()) && "Can't get pointer deps of a non-pointer!"); Result.clear(); @@ -599,9 +600,9 @@ getNonLocalPointerDependency(Value *Pointer, bool isLoad, BasicBlock *FromBB, Result, Visited, true)) return; Result.clear(); - Result.push_back(NonLocalDepEntry(FromBB, - MemDepResult::getClobber(FromBB->begin()), - Pointer)); + Result.push_back(NonLocalDepResult(FromBB, + MemDepResult::getClobber(FromBB->begin()), + Pointer)); } /// GetNonLocalInfoForBlock - Compute the memdep value for BB with @@ -656,9 +657,9 @@ GetNonLocalInfoForBlock(Value *Pointer, uint64_t PointeeSize, // If we had a dirty entry for the block, update it. Otherwise, just add // a new entry. if (ExistingResult) - ExistingResult->setResult(Dep, Pointer); + ExistingResult->setResult(Dep); else - Cache->push_back(NonLocalDepEntry(BB, Dep, Pointer)); + Cache->push_back(NonLocalDepEntry(BB, Dep)); // If the block has a dependency (i.e. it isn't completely transparent to // the value), remember the reverse association because we just added it @@ -726,7 +727,7 @@ SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache, bool MemoryDependenceAnalysis:: getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, uint64_t PointeeSize, bool isLoad, BasicBlock *StartBB, - SmallVectorImpl<NonLocalDepEntry> &Result, + SmallVectorImpl<NonLocalDepResult> &Result, DenseMap<BasicBlock*, Value*> &Visited, bool SkipFirstBlock) { @@ -759,11 +760,12 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, uint64_t PointeeSize, } } + Value *Addr = Pointer.getAddr(); for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end(); I != E; ++I) { - Visited.insert(std::make_pair(I->getBB(), Pointer.getAddr())); + Visited.insert(std::make_pair(I->getBB(), Addr)); if (!I->getResult().isNonLocal()) - Result.push_back(*I); + Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr)); } ++NumCacheCompleteNonLocalPtr; return false; @@ -807,7 +809,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, uint64_t PointeeSize, // If we got a Def or Clobber, add this to the list of results. if (!Dep.isNonLocal()) { - Result.push_back(NonLocalDepEntry(BB, Dep, Pointer.getAddr())); + Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr())); continue; } } @@ -889,41 +891,17 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, uint64_t PointeeSize, // a computation of the pointer in this predecessor. if (PredPtrVal == 0) { // Add the entry to the Result list. - NonLocalDepEntry Entry(Pred, - MemDepResult::getClobber(Pred->getTerminator()), - PredPtrVal); + NonLocalDepResult Entry(Pred, + MemDepResult::getClobber(Pred->getTerminator()), + PredPtrVal); Result.push_back(Entry); - // Add it to the cache for this CacheKey so that subsequent queries get - // this result. - Cache = &NonLocalPointerDeps[CacheKey].second; - MemoryDependenceAnalysis::NonLocalDepInfo::iterator It = - std::upper_bound(Cache->begin(), Cache->end(), Entry); - - if (It != Cache->begin() && (It-1)->getBB() == Pred) - --It; - - if (It == Cache->end() || It->getBB() != Pred) { - Cache->insert(It, Entry); - // Add it to the reverse map. - ReverseNonLocalPtrDeps[Pred->getTerminator()].insert(CacheKey); - } else if (!It->getResult().isDirty()) { - // noop - } else if (It->getResult().getInst() == Pred->getTerminator()) { - // Same instruction, clear the dirty marker. - It->setResult(Entry.getResult(), PredPtrVal); - } else if (It->getResult().getInst() == 0) { - // Dirty, with no instruction, just add this. - It->setResult(Entry.getResult(), PredPtrVal); - ReverseNonLocalPtrDeps[Pred->getTerminator()].insert(CacheKey); - } else { - // Otherwise, dirty with a different instruction. - RemoveFromReverseMap(ReverseNonLocalPtrDeps, - It->getResult().getInst(), CacheKey); - It->setResult(Entry.getResult(),PredPtrVal); - ReverseNonLocalPtrDeps[Pred->getTerminator()].insert(CacheKey); - } - Cache = 0; + // Since we had a phi translation failure, the cache for CacheKey won't + // include all of the entries that we need to immediately satisfy future + // queries. Mark this in NonLocalPointerDeps by setting the + // BBSkipFirstBlockPair pointer to null. This requires reuse of the + // cached value to do more work but not miss the phi trans failure. + NonLocalPointerDeps[CacheKey].first = BBSkipFirstBlockPair(); continue; } @@ -961,10 +939,10 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, uint64_t PointeeSize, NumSortedEntries = Cache->size(); } - // Since we did phi translation, the "Cache" set won't contain all of the + // Since we failed phi translation, the "Cache" set won't contain all of the // results for the query. This is ok (we can still use it to accelerate // specific block queries) but we can't do the fastpath "return all - // results from the set" Clear out the indicator for this. + // results from the set". Clear out the indicator for this. CacheInfo->first = BBSkipFirstBlockPair(); // If *nothing* works, mark the pointer as being clobbered by the first @@ -983,9 +961,10 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, uint64_t PointeeSize, assert(I->getResult().isNonLocal() && "Should only be here with transparent block"); - I->setResult(MemDepResult::getClobber(BB->begin()), Pointer.getAddr()); + I->setResult(MemDepResult::getClobber(BB->begin())); ReverseNonLocalPtrDeps[BB->begin()].insert(CacheKey); - Result.push_back(*I); + Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), + Pointer.getAddr())); break; } } @@ -1139,7 +1118,7 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { if (DI->getResult().getInst() != RemInst) continue; // Convert to a dirty entry for the subsequent instruction. - DI->setResult(NewDirtyVal, DI->getAddress()); + DI->setResult(NewDirtyVal); if (Instruction *NextI = NewDirtyVal.getInst()) ReverseDepsToAdd.push_back(std::make_pair(NextI, *I)); @@ -1181,7 +1160,7 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) { if (DI->getResult().getInst() != RemInst) continue; // Convert to a dirty entry for the subsequent instruction. - DI->setResult(NewDirtyVal, DI->getAddress()); + DI->setResult(NewDirtyVal); if (Instruction *NewDirtyInst = NewDirtyVal.getInst()) ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P)); diff --git a/lib/Analysis/PHITransAddr.cpp b/lib/Analysis/PHITransAddr.cpp index 07e2919..334a188 100644 --- a/lib/Analysis/PHITransAddr.cpp +++ b/lib/Analysis/PHITransAddr.cpp @@ -14,6 +14,7 @@ #include "llvm/Analysis/PHITransAddr.h" #include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/InstructionSimplify.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; @@ -35,12 +36,12 @@ static bool CanPHITrans(Instruction *Inst) { void PHITransAddr::dump() const { if (Addr == 0) { - errs() << "PHITransAddr: null\n"; + dbgs() << "PHITransAddr: null\n"; return; } - errs() << "PHITransAddr: " << *Addr << "\n"; + dbgs() << "PHITransAddr: " << *Addr << "\n"; for (unsigned i = 0, e = InstInputs.size(); i != e; ++i) - errs() << " Input #" << i << " is " << *InstInputs[i] << "\n"; + dbgs() << " Input #" << i << " is " << *InstInputs[i] << "\n"; } diff --git a/lib/Analysis/PostDominators.cpp b/lib/Analysis/PostDominators.cpp index 69d6b47..c38e050 100644 --- a/lib/Analysis/PostDominators.cpp +++ b/lib/Analysis/PostDominators.cpp @@ -33,7 +33,7 @@ F("postdomtree", "Post-Dominator Tree Construction", true, true); bool PostDominatorTree::runOnFunction(Function &F) { DT->recalculate(F); - DEBUG(DT->print(errs())); + DEBUG(DT->print(dbgs())); return false; } diff --git a/lib/Analysis/ProfileEstimatorPass.cpp b/lib/Analysis/ProfileEstimatorPass.cpp index 8148429..cf9311a 100644 --- a/lib/Analysis/ProfileEstimatorPass.cpp +++ b/lib/Analysis/ProfileEstimatorPass.cpp @@ -87,11 +87,11 @@ static double ignoreMissing(double w) { } static void inline printEdgeError(ProfileInfo::Edge e, const char *M) { - DEBUG(errs() << "-- Edge " << e << " is not calculated, " << M << "\n"); + DEBUG(dbgs() << "-- Edge " << e << " is not calculated, " << M << "\n"); } void inline ProfileEstimatorPass::printEdgeWeight(Edge E) { - DEBUG(errs() << "-- Weight of Edge " << E << ":" + DEBUG(dbgs() << "-- Weight of Edge " << E << ":" << format("%20.20g", getEdgeWeight(E)) << "\n"); } @@ -179,7 +179,7 @@ void ProfileEstimatorPass::recurseBasicBlock(BasicBlock *BB) { // from weight. if (MinimalWeight.find(*ei) != MinimalWeight.end()) { incoming -= MinimalWeight[*ei]; - DEBUG(errs() << "Reserving " << format("%.20g",MinimalWeight[*ei]) << " at " << (*ei) << "\n"); + DEBUG(dbgs() << "Reserving " << format("%.20g",MinimalWeight[*ei]) << " at " << (*ei) << "\n"); } } else { incoming -= w; @@ -217,7 +217,7 @@ void ProfileEstimatorPass::recurseBasicBlock(BasicBlock *BB) { // Read necessary minimal weight. if (MinimalWeight.find(*ei) != MinimalWeight.end()) { EdgeInformation[BB->getParent()][*ei] += MinimalWeight[*ei]; - DEBUG(errs() << "Additionally " << format("%.20g",MinimalWeight[*ei]) << " at " << (*ei) << "\n"); + DEBUG(dbgs() << "Additionally " << format("%.20g",MinimalWeight[*ei]) << " at " << (*ei) << "\n"); } printEdgeWeight(*ei); @@ -232,7 +232,7 @@ void ProfileEstimatorPass::recurseBasicBlock(BasicBlock *BB) { MinimalWeight[e] = 0; } MinimalWeight[e] += w; - DEBUG(errs() << "Minimal Weight for " << e << ": " << format("%.20g",MinimalWeight[e]) << "\n"); + DEBUG(dbgs() << "Minimal Weight for " << e << ": " << format("%.20g",MinimalWeight[e]) << "\n"); Dest = Parent; } } @@ -268,7 +268,7 @@ void ProfileEstimatorPass::recurseBasicBlock(BasicBlock *BB) { // from block weight, this is readded later on. if (MinimalWeight.find(edge) != MinimalWeight.end()) { BBWeight -= MinimalWeight[edge]; - DEBUG(errs() << "Reserving " << format("%.20g",MinimalWeight[edge]) << " at " << edge << "\n"); + DEBUG(dbgs() << "Reserving " << format("%.20g",MinimalWeight[edge]) << " at " << edge << "\n"); } } } @@ -288,7 +288,7 @@ void ProfileEstimatorPass::recurseBasicBlock(BasicBlock *BB) { // Readd minial necessary weight. if (MinimalWeight.find(*ei) != MinimalWeight.end()) { EdgeInformation[BB->getParent()][*ei] += MinimalWeight[*ei]; - DEBUG(errs() << "Additionally " << format("%.20g",MinimalWeight[*ei]) << " at " << (*ei) << "\n"); + DEBUG(dbgs() << "Additionally " << format("%.20g",MinimalWeight[*ei]) << " at " << (*ei) << "\n"); } printEdgeWeight(*ei); } @@ -319,7 +319,7 @@ bool ProfileEstimatorPass::runOnFunction(Function &F) { // Clear Minimal Edges. MinimalWeight.clear(); - DEBUG(errs() << "Working on function " << F.getNameStr() << "\n"); + DEBUG(dbgs() << "Working on function " << F.getNameStr() << "\n"); // Since the entry block is the first one and has no predecessors, the edge // (0,entry) is inserted with the starting weight of 1. @@ -366,7 +366,7 @@ bool ProfileEstimatorPass::runOnFunction(Function &F) { if (Dest != *bbi) { // If there is no circle, just set edge weight to 0 EdgeInformation[&F][e] = 0; - DEBUG(errs() << "Assuming edge weight: "); + DEBUG(dbgs() << "Assuming edge weight: "); printEdgeWeight(e); found = true; } @@ -375,7 +375,7 @@ bool ProfileEstimatorPass::runOnFunction(Function &F) { } if (!found) { cleanup = true; - DEBUG(errs() << "No assumption possible in Fuction "<<F.getName()<<", setting all to zero\n"); + DEBUG(dbgs() << "No assumption possible in Fuction "<<F.getName()<<", setting all to zero\n"); } } } diff --git a/lib/Analysis/ProfileInfo.cpp b/lib/Analysis/ProfileInfo.cpp index c49c6e1..afd86b1 100644 --- a/lib/Analysis/ProfileInfo.cpp +++ b/lib/Analysis/ProfileInfo.cpp @@ -163,7 +163,7 @@ double ProfileInfoT<MachineFunction, MachineBasicBlock>:: template<> void ProfileInfoT<Function,BasicBlock>:: setExecutionCount(const BasicBlock *BB, double w) { - DEBUG(errs() << "Creating Block " << BB->getName() + DEBUG(dbgs() << "Creating Block " << BB->getName() << " (weight: " << format("%.20g",w) << ")\n"); BlockInformation[BB->getParent()][BB] = w; } @@ -171,7 +171,7 @@ void ProfileInfoT<Function,BasicBlock>:: template<> void ProfileInfoT<MachineFunction, MachineBasicBlock>:: setExecutionCount(const MachineBasicBlock *MBB, double w) { - DEBUG(errs() << "Creating Block " << MBB->getBasicBlock()->getName() + DEBUG(dbgs() << "Creating Block " << MBB->getBasicBlock()->getName() << " (weight: " << format("%.20g",w) << ")\n"); BlockInformation[MBB->getParent()][MBB] = w; } @@ -180,7 +180,7 @@ template<> void ProfileInfoT<Function,BasicBlock>::addEdgeWeight(Edge e, double w) { double oldw = getEdgeWeight(e); assert (oldw != MissingValue && "Adding weight to Edge with no previous weight"); - DEBUG(errs() << "Adding to Edge " << e + DEBUG(dbgs() << "Adding to Edge " << e << " (new weight: " << format("%.20g",oldw + w) << ")\n"); EdgeInformation[getFunction(e)][e] = oldw + w; } @@ -190,7 +190,7 @@ void ProfileInfoT<Function,BasicBlock>:: addExecutionCount(const BasicBlock *BB, double w) { double oldw = getExecutionCount(BB); assert (oldw != MissingValue && "Adding weight to Block with no previous weight"); - DEBUG(errs() << "Adding to Block " << BB->getName() + DEBUG(dbgs() << "Adding to Block " << BB->getName() << " (new weight: " << format("%.20g",oldw + w) << ")\n"); BlockInformation[BB->getParent()][BB] = oldw + w; } @@ -201,7 +201,7 @@ void ProfileInfoT<Function,BasicBlock>::removeBlock(const BasicBlock *BB) { BlockInformation.find(BB->getParent()); if (J == BlockInformation.end()) return; - DEBUG(errs() << "Deleting " << BB->getName() << "\n"); + DEBUG(dbgs() << "Deleting " << BB->getName() << "\n"); J->second.erase(BB); } @@ -211,7 +211,7 @@ void ProfileInfoT<Function,BasicBlock>::removeEdge(Edge e) { EdgeInformation.find(getFunction(e)); if (J == EdgeInformation.end()) return; - DEBUG(errs() << "Deleting" << e << "\n"); + DEBUG(dbgs() << "Deleting" << e << "\n"); J->second.erase(e); } @@ -221,10 +221,10 @@ void ProfileInfoT<Function,BasicBlock>:: double w; if ((w = getEdgeWeight(newedge)) == MissingValue) { w = getEdgeWeight(oldedge); - DEBUG(errs() << "Replacing " << oldedge << " with " << newedge << "\n"); + DEBUG(dbgs() << "Replacing " << oldedge << " with " << newedge << "\n"); } else { w += getEdgeWeight(oldedge); - DEBUG(errs() << "Adding " << oldedge << " to " << newedge << "\n"); + DEBUG(dbgs() << "Adding " << oldedge << " to " << newedge << "\n"); } setEdgeWeight(newedge,w); removeEdge(oldedge); @@ -277,7 +277,7 @@ const BasicBlock *ProfileInfoT<Function,BasicBlock>:: template<> void ProfileInfoT<Function,BasicBlock>:: divertFlow(const Edge &oldedge, const Edge &newedge) { - DEBUG(errs() << "Diverting " << oldedge << " via " << newedge ); + DEBUG(dbgs() << "Diverting " << oldedge << " via " << newedge ); // First check if the old edge was taken, if not, just delete it... if (getEdgeWeight(oldedge) == 0) { @@ -291,7 +291,7 @@ void ProfileInfoT<Function,BasicBlock>:: const BasicBlock *BB = GetPath(newedge.second,oldedge.second,P,GetPathToExit | GetPathToDest); double w = getEdgeWeight (oldedge); - DEBUG(errs() << ", Weight: " << format("%.20g",w) << "\n"); + DEBUG(dbgs() << ", Weight: " << format("%.20g",w) << "\n"); do { const BasicBlock *Parent = P.find(BB)->second; Edge e = getEdge(Parent,BB); @@ -312,7 +312,7 @@ void ProfileInfoT<Function,BasicBlock>:: template<> void ProfileInfoT<Function,BasicBlock>:: replaceAllUses(const BasicBlock *RmBB, const BasicBlock *DestBB) { - DEBUG(errs() << "Replacing " << RmBB->getName() + DEBUG(dbgs() << "Replacing " << RmBB->getName() << " with " << DestBB->getName() << "\n"); const Function *F = DestBB->getParent(); std::map<const Function*, EdgeWeights>::iterator J = @@ -413,7 +413,7 @@ void ProfileInfoT<Function,BasicBlock>::splitBlock(const BasicBlock *Old, EdgeInformation.find(F); if (J == EdgeInformation.end()) return; - DEBUG(errs() << "Splitting " << Old->getName() << " to " << New->getName() << "\n"); + DEBUG(dbgs() << "Splitting " << Old->getName() << " to " << New->getName() << "\n"); std::set<Edge> Edges; for (EdgeWeights::iterator ewi = J->second.begin(), ewe = J->second.end(); @@ -444,7 +444,7 @@ void ProfileInfoT<Function,BasicBlock>::splitBlock(const BasicBlock *BB, EdgeInformation.find(F); if (J == EdgeInformation.end()) return; - DEBUG(errs() << "Splitting " << NumPreds << " Edges from " << BB->getName() + DEBUG(dbgs() << "Splitting " << NumPreds << " Edges from " << BB->getName() << " to " << NewBB->getName() << "\n"); // Collect weight that was redirected over NewBB. @@ -474,7 +474,7 @@ void ProfileInfoT<Function,BasicBlock>::splitBlock(const BasicBlock *BB, template<> void ProfileInfoT<Function,BasicBlock>::transfer(const Function *Old, const Function *New) { - DEBUG(errs() << "Replacing Function " << Old->getName() << " with " + DEBUG(dbgs() << "Replacing Function " << Old->getName() << " with " << New->getName() << "\n"); std::map<const Function*, EdgeWeights>::iterator J = EdgeInformation.find(Old); @@ -552,7 +552,7 @@ bool ProfileInfoT<Function,BasicBlock>:: } else { EdgeInformation[BB->getParent()][edgetocalc] = incount-outcount; } - DEBUG(errs() << "--Calc Edge Counter for " << edgetocalc << ": " + DEBUG(dbgs() << "--Calc Edge Counter for " << edgetocalc << ": " << format("%.20g", getEdgeWeight(edgetocalc)) << "\n"); removed = edgetocalc; return true; @@ -982,9 +982,9 @@ void ProfileInfoT<Function,BasicBlock>::repair(const Function *F) { FI = Unvisited.begin(), FE = Unvisited.end(); while(FI != FE) { const BasicBlock *BB = *FI; ++FI; - errs() << BB->getName(); + dbgs() << BB->getName(); if (FI != FE) - errs() << ","; + dbgs() << ","; } errs() << "}"; diff --git a/lib/Analysis/ProfileInfoLoaderPass.cpp b/lib/Analysis/ProfileInfoLoaderPass.cpp index cbd0430..d8c511f 100644 --- a/lib/Analysis/ProfileInfoLoaderPass.cpp +++ b/lib/Analysis/ProfileInfoLoaderPass.cpp @@ -131,7 +131,7 @@ void LoaderPass::readEdge(ProfileInfo::Edge e, // in double. EdgeInformation[getFunction(e)][e] += (double)weight; - DEBUG(errs() << "--Read Edge Counter for " << e + DEBUG(dbgs() << "--Read Edge Counter for " << e << " (# "<< (ReadCount-1) << "): " << (unsigned)getEdgeWeight(e) << "\n"); } else { @@ -151,7 +151,7 @@ bool LoaderPass::runOnModule(Module &M) { ReadCount = 0; for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) { if (F->isDeclaration()) continue; - DEBUG(errs()<<"Working on "<<F->getNameStr()<<"\n"); + DEBUG(dbgs()<<"Working on "<<F->getNameStr()<<"\n"); readEdge(getEdge(0,&F->getEntryBlock()), Counters); for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) { TerminatorInst *TI = BB->getTerminator(); @@ -172,7 +172,7 @@ bool LoaderPass::runOnModule(Module &M) { ReadCount = 0; for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) { if (F->isDeclaration()) continue; - DEBUG(errs()<<"Working on "<<F->getNameStr()<<"\n"); + DEBUG(dbgs()<<"Working on "<<F->getNameStr()<<"\n"); readEdge(getEdge(0,&F->getEntryBlock()), Counters); for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) { TerminatorInst *TI = BB->getTerminator(); @@ -198,10 +198,10 @@ bool LoaderPass::runOnModule(Module &M) { } if (SpanningTree.size() == size) { - DEBUG(errs()<<"{"); + DEBUG(dbgs()<<"{"); for (std::set<Edge>::iterator ei = SpanningTree.begin(), ee = SpanningTree.end(); ei != ee; ++ei) { - DEBUG(errs()<< *ei <<","); + DEBUG(dbgs()<< *ei <<","); } assert(0 && "No edge calculated!"); } diff --git a/lib/Analysis/ProfileVerifierPass.cpp b/lib/Analysis/ProfileVerifierPass.cpp index 36a80ba..a2ddc8e 100644 --- a/lib/Analysis/ProfileVerifierPass.cpp +++ b/lib/Analysis/ProfileVerifierPass.cpp @@ -102,7 +102,7 @@ namespace llvm { typename ProfileInfoT<FType, BType>::Edge E = PI->getEdge(*bbi,BB); double EdgeWeight = PI->getEdgeWeight(E); if (EdgeWeight == ProfileInfoT<FType, BType>::MissingValue) { EdgeWeight = 0; } - errs() << "calculated in-edge " << E << ": " + dbgs() << "calculated in-edge " << E << ": " << format("%20.20g",EdgeWeight) << "\n"; inWeight += EdgeWeight; inCount++; @@ -117,13 +117,13 @@ namespace llvm { typename ProfileInfoT<FType, BType>::Edge E = PI->getEdge(BB,*bbi); double EdgeWeight = PI->getEdgeWeight(E); if (EdgeWeight == ProfileInfoT<FType, BType>::MissingValue) { EdgeWeight = 0; } - errs() << "calculated out-edge " << E << ": " + dbgs() << "calculated out-edge " << E << ": " << format("%20.20g",EdgeWeight) << "\n"; outWeight += EdgeWeight; outCount++; } } - errs() << "Block " << BB->getNameStr() << " in " + dbgs() << "Block " << BB->getNameStr() << " in " << BB->getParent()->getNameStr() << ":" << "BBWeight=" << format("%20.20g",BBWeight) << "," << "inWeight=" << format("%20.20g",inWeight) << "," @@ -141,7 +141,7 @@ namespace llvm { template<class FType, class BType> void ProfileVerifierPassT<FType, BType>::debugEntry (DetailedBlockInfo *DI) { - errs() << "TROUBLE: Block " << DI->BB->getNameStr() << " in " + dbgs() << "TROUBLE: Block " << DI->BB->getNameStr() << " in " << DI->BB->getParent()->getNameStr() << ":" << "BBWeight=" << format("%20.20g",DI->BBWeight) << "," << "inWeight=" << format("%20.20g",DI->inWeight) << "," @@ -191,20 +191,20 @@ namespace llvm { } #define ASSERTMESSAGE(M) \ - { errs() << "ASSERT:" << (M) << "\n"; \ + { dbgs() << "ASSERT:" << (M) << "\n"; \ if (!DisableAssertions) assert(0 && (M)); } template<class FType, class BType> double ProfileVerifierPassT<FType, BType>::ReadOrAssert(typename ProfileInfoT<FType, BType>::Edge E) { double EdgeWeight = PI->getEdgeWeight(E); if (EdgeWeight == ProfileInfoT<FType, BType>::MissingValue) { - errs() << "Edge " << E << " in Function " + dbgs() << "Edge " << E << " in Function " << ProfileInfoT<FType, BType>::getFunction(E)->getNameStr() << ": "; ASSERTMESSAGE("Edge has missing value"); return 0; } else { if (EdgeWeight < 0) { - errs() << "Edge " << E << " in Function " + dbgs() << "Edge " << E << " in Function " << ProfileInfoT<FType, BType>::getFunction(E)->getNameStr() << ": "; ASSERTMESSAGE("Edge has negative value"); } @@ -218,7 +218,7 @@ namespace llvm { DetailedBlockInfo *DI) { if (Error) { DEBUG(debugEntry(DI)); - errs() << "Block " << DI->BB->getNameStr() << " in Function " + dbgs() << "Block " << DI->BB->getNameStr() << " in Function " << DI->BB->getParent()->getNameStr() << ": "; ASSERTMESSAGE(Message); } diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index c6835ef..17dc686 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -75,6 +75,7 @@ #include "llvm/Target/TargetData.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ConstantRange.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/InstIterator.h" @@ -117,8 +118,8 @@ char ScalarEvolution::ID = 0; SCEV::~SCEV() {} void SCEV::dump() const { - print(errs()); - errs() << '\n'; + print(dbgs()); + dbgs() << '\n'; } bool SCEV::isZero() const { @@ -298,7 +299,7 @@ bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const { return false; // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L. - if (QueryLoop->contains(L->getHeader())) + if (QueryLoop->contains(L)) return false; // This recurrence is variant w.r.t. QueryLoop if any of its operands @@ -333,7 +334,7 @@ bool SCEVUnknown::isLoopInvariant(const Loop *L) const { // Instructions are never considered invariant in the function body // (null loop) because they are defined within the "loop". if (Instruction *I = dyn_cast<Instruction>(V)) - return L && !L->contains(I->getParent()); + return L && !L->contains(I); return true; } @@ -1457,10 +1458,13 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops, LIOps.push_back(AddRec->getStart()); SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(), - AddRec->op_end()); + AddRec->op_end()); AddRecOps[0] = getAddExpr(LIOps); + // It's tempting to propagate NUW/NSW flags here, but nuw/nsw addition + // is not associative so this isn't necessarily safe. const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop()); + // If all of the other operands were loop invariant, we are done. if (Ops.size() == 1) return NewRec; @@ -1636,6 +1640,8 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, } } + // It's tempting to propagate the NSW flag here, but nsw multiplication + // is not associative so this isn't necessarily safe. const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop()); // If all of the other operands were loop invariant, we are done. @@ -1838,10 +1844,10 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands, // Canonicalize nested AddRecs in by nesting them in order of loop depth. if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) { - const Loop* NestedLoop = NestedAR->getLoop(); + const Loop *NestedLoop = NestedAR->getLoop(); if (L->getLoopDepth() < NestedLoop->getLoopDepth()) { SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(), - NestedAR->op_end()); + NestedAR->op_end()); Operands[0] = NestedAR->getStart(); // AddRecs require their operands be loop-invariant with respect to their // loops. Don't perform this transformation if it would break this @@ -2441,7 +2447,7 @@ ScalarEvolution::ForgetSymbolicName(Instruction *I, const SCEV *SymName) { Instruction *I = Worklist.pop_back_val(); if (!Visited.insert(I)) continue; - std::map<SCEVCallbackVH, const SCEV*>::iterator It = + std::map<SCEVCallbackVH, const SCEV *>::iterator It = Scalars.find(static_cast<Value *>(I)); if (It != Scalars.end()) { // Short-circuit the def-use traversal if the symbolic name @@ -2592,8 +2598,9 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { /// createNodeForGEP - Expand GEP instructions into add and multiply /// operations. This allows them to be analyzed by regular SCEV code. /// -const SCEV *ScalarEvolution::createNodeForGEP(Operator *GEP) { +const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) { + bool InBounds = GEP->isInBounds(); const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType()); Value *Base = GEP->getOperand(0); // Don't attempt to analyze GEPs over unsized objects. @@ -2610,18 +2617,23 @@ const SCEV *ScalarEvolution::createNodeForGEP(Operator *GEP) { // For a struct, add the member offset. unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); TotalOffset = getAddExpr(TotalOffset, - getFieldOffsetExpr(STy, FieldNo)); + getFieldOffsetExpr(STy, FieldNo), + /*HasNUW=*/false, /*HasNSW=*/InBounds); } else { // For an array, add the element offset, explicitly scaled. const SCEV *LocalOffset = getSCEV(Index); if (!isa<PointerType>(LocalOffset->getType())) // Getelementptr indicies are signed. LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy); - LocalOffset = getMulExpr(LocalOffset, getAllocSizeExpr(*GTI)); - TotalOffset = getAddExpr(TotalOffset, LocalOffset); + // Lower "inbounds" GEPs to NSW arithmetic. + LocalOffset = getMulExpr(LocalOffset, getAllocSizeExpr(*GTI), + /*HasNUW=*/false, /*HasNSW=*/InBounds); + TotalOffset = getAddExpr(TotalOffset, LocalOffset, + /*HasNUW=*/false, /*HasNSW=*/InBounds); } } - return getAddExpr(getSCEV(Base), TotalOffset); + return getAddExpr(getSCEV(Base), TotalOffset, + /*HasNUW=*/false, /*HasNSW=*/InBounds); } /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is @@ -3130,7 +3142,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { // expressions we handle are GEPs and address literals. case Instruction::GetElementPtr: - return createNodeForGEP(U); + return createNodeForGEP(cast<GEPOperator>(U)); case Instruction::PHI: return createNodeForPHI(cast<PHINode>(U)); @@ -3241,7 +3253,7 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { // update the value. The temporary CouldNotCompute value tells SCEV // code elsewhere that it shouldn't attempt to request a new // backedge-taken count, which could result in infinite recursion. - std::pair<std::map<const Loop*, BackedgeTakenInfo>::iterator, bool> Pair = + std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair = BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute())); if (Pair.second) { BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L); @@ -3276,7 +3288,7 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) { Instruction *I = Worklist.pop_back_val(); if (!Visited.insert(I)) continue; - std::map<SCEVCallbackVH, const SCEV*>::iterator It = + std::map<SCEVCallbackVH, const SCEV *>::iterator It = Scalars.find(static_cast<Value *>(I)); if (It != Scalars.end()) { // SCEVUnknown for a PHI either means that it has an unrecognized @@ -3316,7 +3328,7 @@ void ScalarEvolution::forgetLoop(const Loop *L) { Instruction *I = Worklist.pop_back_val(); if (!Visited.insert(I)) continue; - std::map<SCEVCallbackVH, const SCEV*>::iterator It = + std::map<SCEVCallbackVH, const SCEV *>::iterator It = Scalars.find(static_cast<Value *>(I)); if (It != Scalars.end()) { ValuesAtScopes.erase(It->second); @@ -3333,7 +3345,7 @@ void ScalarEvolution::forgetLoop(const Loop *L) { /// of the specified loop will execute. ScalarEvolution::BackedgeTakenInfo ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) { - SmallVector<BasicBlock*, 8> ExitingBlocks; + SmallVector<BasicBlock *, 8> ExitingBlocks; L->getExitingBlocks(ExitingBlocks); // Examine all exits and pick the most conservative values. @@ -3616,10 +3628,10 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L, } default: #if 0 - errs() << "ComputeBackedgeTakenCount "; + dbgs() << "ComputeBackedgeTakenCount "; if (ExitCond->getOperand(0)->getType()->isUnsigned()) - errs() << "[unsigned] "; - errs() << *LHS << " " + dbgs() << "[unsigned] "; + dbgs() << *LHS << " " << Instruction::getOpcodeName(Instruction::ICmp) << " " << *RHS << "\n"; #endif @@ -3740,7 +3752,7 @@ ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount( if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure if (cast<ConstantInt>(Result)->getValue().isMinValue()) { #if 0 - errs() << "\n***\n*** Computed loop count " << *ItCst + dbgs() << "\n***\n*** Computed loop count " << *ItCst << "\n*** From global " << *GV << "*** BB: " << *L->getHeader() << "***\n"; #endif @@ -3774,7 +3786,7 @@ static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { // If this is not an instruction, or if this is an instruction outside of the // loop, it can't be derived from a loop PHI. Instruction *I = dyn_cast<Instruction>(V); - if (I == 0 || !L->contains(I->getParent())) return 0; + if (I == 0 || !L->contains(I)) return 0; if (PHINode *PN = dyn_cast<PHINode>(I)) { if (L->getHeader() == I->getParent()) @@ -3839,7 +3851,7 @@ static Constant *EvaluateExpression(Value *V, Constant *PHIVal, /// involving constants, fold it. Constant * ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, - const APInt& BEs, + const APInt &BEs, const Loop *L) { std::map<PHINode*, Constant*>::iterator I = ConstantEvolutionLoopExitValue.find(PN); @@ -4008,7 +4020,7 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { if (!isSCEVable(Op->getType())) return V; - const SCEV* OpV = getSCEVAtScope(Op, L); + const SCEV *OpV = getSCEVAtScope(Op, L); if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) { Constant *C = SC->getValue(); if (C->getType() != Op->getType()) @@ -4091,7 +4103,7 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { // If this is a loop recurrence for a loop that does not contain L, then we // are dealing with the final value computed by the loop. if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) { - if (!L || !AddRec->getLoop()->contains(L->getHeader())) { + if (!L || !AddRec->getLoop()->contains(L)) { // To evaluate this recurrence, we need to know how many times the AddRec // loop iterates. Compute this now. const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop()); @@ -4306,7 +4318,7 @@ const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) { const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second); if (R1) { #if 0 - errs() << "HFTZ: " << *V << " - sol#1: " << *R1 + dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1 << " sol#2: " << *R2 << "\n"; #endif // Pick the smallest positive root value. @@ -5183,7 +5195,7 @@ static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, OS << "Loop " << L->getHeader()->getName() << ": "; - SmallVector<BasicBlock*, 8> ExitBlocks; + SmallVector<BasicBlock *, 8> ExitBlocks; L->getExitBlocks(ExitBlocks); if (ExitBlocks.size() != 1) OS << "<multiple exits> "; @@ -5206,14 +5218,14 @@ static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, OS << "\n"; } -void ScalarEvolution::print(raw_ostream &OS, const Module* ) const { +void ScalarEvolution::print(raw_ostream &OS, const Module *) const { // ScalarEvolution's implementaiton of the print method is to print // out SCEV values of all instructions that are interesting. Doing // this potentially causes it to create new SCEV objects though, // which technically conflicts with the const qualifier. This isn't // observable from outside the class though, so casting away the // const isn't dangerous. - ScalarEvolution &SE = *const_cast<ScalarEvolution*>(this); + ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this); OS << "Classifying expressions for: " << F->getName() << "\n"; for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) diff --git a/lib/Analysis/SparsePropagation.cpp b/lib/Analysis/SparsePropagation.cpp index d7bcac2..d8c207b 100644 --- a/lib/Analysis/SparsePropagation.cpp +++ b/lib/Analysis/SparsePropagation.cpp @@ -17,7 +17,6 @@ #include "llvm/Constants.h" #include "llvm/Function.h" #include "llvm/Instructions.h" -#include "llvm/LLVMContext.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; @@ -89,7 +88,7 @@ void SparseSolver::UpdateState(Instruction &Inst, LatticeVal V) { /// MarkBlockExecutable - This method can be used by clients to mark all of /// the blocks that are known to be intrinsically live in the processed unit. void SparseSolver::MarkBlockExecutable(BasicBlock *BB) { - DEBUG(errs() << "Marking Block Executable: " << BB->getName() << "\n"); + DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << "\n"); BBExecutable.insert(BB); // Basic block is executable! BBWorkList.push_back(BB); // Add the block to the work list! } @@ -100,7 +99,7 @@ void SparseSolver::markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest) { if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second) return; // This edge is already known to be executable! - DEBUG(errs() << "Marking Edge Executable: " << Source->getName() + DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName() << " -> " << Dest->getName() << "\n"); if (BBExecutable.count(Dest)) { @@ -155,7 +154,7 @@ void SparseSolver::getFeasibleSuccessors(TerminatorInst &TI, } // Constant condition variables mean the branch can only go a single way - Succs[C == ConstantInt::getFalse(*Context)] = true; + Succs[C->isNullValue()] = true; return; } @@ -300,7 +299,7 @@ void SparseSolver::Solve(Function &F) { Instruction *I = InstWorkList.back(); InstWorkList.pop_back(); - DEBUG(errs() << "\nPopped off I-WL: " << *I << "\n"); + DEBUG(dbgs() << "\nPopped off I-WL: " << *I << "\n"); // "I" got into the work list because it made a transition. See if any // users are both live and in need of updating. @@ -317,7 +316,7 @@ void SparseSolver::Solve(Function &F) { BasicBlock *BB = BBWorkList.back(); BBWorkList.pop_back(); - DEBUG(errs() << "\nPopped off BBWL: " << *BB); + DEBUG(dbgs() << "\nPopped off BBWL: " << *BB); // Notify all instructions in this basic block that they are newly // executable. diff --git a/lib/Analysis/Trace.cpp b/lib/Analysis/Trace.cpp index c9b303b..68a39cd 100644 --- a/lib/Analysis/Trace.cpp +++ b/lib/Analysis/Trace.cpp @@ -18,6 +18,7 @@ #include "llvm/Analysis/Trace.h" #include "llvm/Function.h" #include "llvm/Assembly/Writer.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; @@ -46,5 +47,5 @@ void Trace::print(raw_ostream &O) const { /// output stream. /// void Trace::dump() const { - print(errs()); + print(dbgs()); } diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index 22c6e3b..acd3119 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -1369,11 +1369,6 @@ bool llvm::GetConstantStringInfo(Value *V, std::string &Str, uint64_t Offset, StopAtNul); } - if (MDString *MDStr = dyn_cast<MDString>(V)) { - Str = MDStr->getString(); - return true; - } - // The GEP instruction, constant or instruction, must reference a global // variable that is a constant and is initialized. The referenced constant // initializer is the array that we'll use for optimization. diff --git a/lib/AsmParser/LLLexer.cpp b/lib/AsmParser/LLLexer.cpp index cad1d3b..8ad658d 100644 --- a/lib/AsmParser/LLLexer.cpp +++ b/lib/AsmParser/LLLexer.cpp @@ -254,7 +254,7 @@ lltok::Kind LLLexer::LexToken() { case ';': SkipLineComment(); return LexToken(); - case '!': return LexMetadata(); + case '!': return LexExclaim(); case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '-': @@ -422,11 +422,11 @@ static bool JustWhitespaceNewLine(const char *&Ptr) { return false; } -/// LexMetadata: -/// !{...} -/// !42 +/// LexExclaim: /// !foo -lltok::Kind LLLexer::LexMetadata() { +/// ! +lltok::Kind LLLexer::LexExclaim() { + // Lex a metadata name as a MetadataVar. if (isalpha(CurPtr[0])) { ++CurPtr; while (isalnum(CurPtr[0]) || CurPtr[0] == '-' || CurPtr[0] == '$' || @@ -434,9 +434,9 @@ lltok::Kind LLLexer::LexMetadata() { ++CurPtr; StrVal.assign(TokStart+1, CurPtr); // Skip ! - return lltok::NamedOrCustomMD; + return lltok::MetadataVar; } - return lltok::Metadata; + return lltok::exclaim; } /// LexIdentifier: Handle several related productions: diff --git a/lib/AsmParser/LLLexer.h b/lib/AsmParser/LLLexer.h index de39272..3057992 100644 --- a/lib/AsmParser/LLLexer.h +++ b/lib/AsmParser/LLLexer.h @@ -75,7 +75,7 @@ namespace llvm { lltok::Kind LexDigitOrNegative(); lltok::Kind LexPositive(); lltok::Kind LexAt(); - lltok::Kind LexMetadata(); + lltok::Kind LexExclaim(); lltok::Kind LexPercent(); lltok::Kind LexQuote(); lltok::Kind Lex0x(); diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp index 0333eed..15a9832 100644 --- a/lib/AsmParser/LLParser.cpp +++ b/lib/AsmParser/LLParser.cpp @@ -18,8 +18,6 @@ #include "llvm/DerivedTypes.h" #include "llvm/InlineAsm.h" #include "llvm/Instructions.h" -#include "llvm/LLVMContext.h" -#include "llvm/Metadata.h" #include "llvm/Module.h" #include "llvm/Operator.h" #include "llvm/ValueSymbolTable.h" @@ -170,8 +168,8 @@ bool LLParser::ParseTopLevelEntities() { case lltok::LocalVar: if (ParseNamedType()) return true; break; case lltok::GlobalID: if (ParseUnnamedGlobal()) return true; break; case lltok::GlobalVar: if (ParseNamedGlobal()) return true; break; - case lltok::Metadata: if (ParseStandaloneMetadata()) return true; break; - case lltok::NamedOrCustomMD: if (ParseNamedMetadata()) return true; break; + case lltok::exclaim: if (ParseStandaloneMetadata()) return true; break; + case lltok::MetadataVar: if (ParseNamedMetadata()) return true; break; // The Global variable production with no name can have many different // optional leading prefixes, the production is: @@ -465,69 +463,61 @@ bool LLParser::ParseNamedGlobal() { // MDString: // ::= '!' STRINGCONSTANT -bool LLParser::ParseMDString(MetadataBase *&MDS) { +bool LLParser::ParseMDString(MDString *&Result) { std::string Str; if (ParseStringConstant(Str)) return true; - MDS = MDString::get(Context, Str); + Result = MDString::get(Context, Str); return false; } // MDNode: // ::= '!' MDNodeNumber -bool LLParser::ParseMDNode(MetadataBase *&Node) { +bool LLParser::ParseMDNodeID(MDNode *&Result) { // !{ ..., !42, ... } unsigned MID = 0; - if (ParseUInt32(MID)) return true; + if (ParseUInt32(MID)) return true; // Check existing MDNode. - std::map<unsigned, WeakVH>::iterator I = MetadataCache.find(MID); - if (I != MetadataCache.end()) { - Node = cast<MetadataBase>(I->second); + if (MID < NumberedMetadata.size() && NumberedMetadata[MID] != 0) { + Result = NumberedMetadata[MID]; return false; } - // Check known forward references. - std::map<unsigned, std::pair<WeakVH, LocTy> >::iterator - FI = ForwardRefMDNodes.find(MID); - if (FI != ForwardRefMDNodes.end()) { - Node = cast<MetadataBase>(FI->second.first); - return false; - } + // Create MDNode forward reference. - // Create MDNode forward reference - SmallVector<Value *, 1> Elts; + // FIXME: This is not unique enough! std::string FwdRefName = "llvm.mdnode.fwdref." + utostr(MID); - Elts.push_back(MDString::get(Context, FwdRefName)); - MDNode *FwdNode = MDNode::get(Context, Elts.data(), Elts.size()); + Value *V = MDString::get(Context, FwdRefName); + MDNode *FwdNode = MDNode::get(Context, &V, 1); ForwardRefMDNodes[MID] = std::make_pair(FwdNode, Lex.getLoc()); - Node = FwdNode; + + if (NumberedMetadata.size() <= MID) + NumberedMetadata.resize(MID+1); + NumberedMetadata[MID] = FwdNode; + Result = FwdNode; return false; } -///ParseNamedMetadata: +/// ParseNamedMetadata: /// !foo = !{ !1, !2 } bool LLParser::ParseNamedMetadata() { - assert(Lex.getKind() == lltok::NamedOrCustomMD); - Lex.Lex(); + assert(Lex.getKind() == lltok::MetadataVar); std::string Name = Lex.getStrVal(); + Lex.Lex(); - if (ParseToken(lltok::equal, "expected '=' here")) + if (ParseToken(lltok::equal, "expected '=' here") || + ParseToken(lltok::exclaim, "Expected '!' here") || + ParseToken(lltok::lbrace, "Expected '{' here")) return true; - if (Lex.getKind() != lltok::Metadata) - return TokError("Expected '!' here"); - Lex.Lex(); - - if (Lex.getKind() != lltok::lbrace) - return TokError("Expected '{' here"); - Lex.Lex(); SmallVector<MetadataBase *, 8> Elts; do { - if (Lex.getKind() != lltok::Metadata) - return TokError("Expected '!' here"); - Lex.Lex(); - MetadataBase *N = 0; - if (ParseMDNode(N)) return true; + if (ParseToken(lltok::exclaim, "Expected '!' here")) + return true; + + // FIXME: This rejects MDStrings. Are they legal in an named MDNode or not? + MDNode *N = 0; + if (ParseMDNodeID(N)) return true; Elts.push_back(N); } while (EatIfPresent(lltok::comma)); @@ -541,74 +531,41 @@ bool LLParser::ParseNamedMetadata() { /// ParseStandaloneMetadata: /// !42 = !{...} bool LLParser::ParseStandaloneMetadata() { - assert(Lex.getKind() == lltok::Metadata); + assert(Lex.getKind() == lltok::exclaim); Lex.Lex(); unsigned MetadataID = 0; - if (ParseUInt32(MetadataID)) - return true; - if (MetadataCache.find(MetadataID) != MetadataCache.end()) - return TokError("Metadata id is already used"); - if (ParseToken(lltok::equal, "expected '=' here")) - return true; LocTy TyLoc; PATypeHolder Ty(Type::getVoidTy(Context)); - if (ParseType(Ty, TyLoc)) - return true; - - if (Lex.getKind() != lltok::Metadata) - return TokError("Expected metadata here"); - - Lex.Lex(); - if (Lex.getKind() != lltok::lbrace) - return TokError("Expected '{' here"); - SmallVector<Value *, 16> Elts; - if (ParseMDNodeVector(Elts) - || ParseToken(lltok::rbrace, "expected end of metadata node")) + if (ParseUInt32(MetadataID) || + ParseToken(lltok::equal, "expected '=' here") || + ParseType(Ty, TyLoc) || + ParseToken(lltok::exclaim, "Expected '!' here") || + ParseToken(lltok::lbrace, "Expected '{' here") || + ParseMDNodeVector(Elts) || + ParseToken(lltok::rbrace, "expected end of metadata node")) return true; MDNode *Init = MDNode::get(Context, Elts.data(), Elts.size()); - MetadataCache[MetadataID] = Init; - std::map<unsigned, std::pair<WeakVH, LocTy> >::iterator + + // See if this was forward referenced, if so, handle it. + std::map<unsigned, std::pair<TrackingVH<MDNode>, LocTy> >::iterator FI = ForwardRefMDNodes.find(MetadataID); if (FI != ForwardRefMDNodes.end()) { - MDNode *FwdNode = cast<MDNode>(FI->second.first); - FwdNode->replaceAllUsesWith(Init); + FI->second.first->replaceAllUsesWith(Init); ForwardRefMDNodes.erase(FI); - } - - return false; -} - -/// ParseInlineMetadata: -/// !{type %instr} -/// !{...} MDNode -/// !"foo" MDString -bool LLParser::ParseInlineMetadata(Value *&V, PerFunctionState &PFS) { - assert(Lex.getKind() == lltok::Metadata && "Only for Metadata"); - V = 0; - - Lex.Lex(); - if (Lex.getKind() == lltok::lbrace) { - Lex.Lex(); - if (ParseTypeAndValue(V, PFS) || - ParseToken(lltok::rbrace, "expected end of metadata node")) - return true; + + assert(NumberedMetadata[MetadataID] == Init && "Tracking VH didn't work"); + } else { + if (MetadataID >= NumberedMetadata.size()) + NumberedMetadata.resize(MetadataID+1); - Value *Vals[] = { V }; - V = MDNode::get(Context, Vals, 1); - return false; + if (NumberedMetadata[MetadataID] != 0) + return TokError("Metadata id is already used"); + NumberedMetadata[MetadataID] = Init; } - // Standalone metadata reference - // !{ ..., !42, ... } - if (!ParseMDNode((MetadataBase *&)V)) - return false; - - // MDString: - // '!' STRINGCONSTANT - if (ParseMDString((MetadataBase *&)V)) return true; return false; } @@ -1105,29 +1062,28 @@ bool LLParser::ParseOptionalCallingConv(CallingConv::ID &CC) { return false; } -/// ParseOptionalCustomMetadata -/// ::= /* empty */ -/// ::= !dbg !42 -bool LLParser::ParseOptionalCustomMetadata() { - if (Lex.getKind() != lltok::NamedOrCustomMD) - return false; - - std::string Name = Lex.getStrVal(); - Lex.Lex(); +/// ParseInstructionMetadata +/// ::= !dbg !42 (',' !dbg !57)* +bool LLParser:: +ParseInstructionMetadata(SmallVectorImpl<std::pair<unsigned, + MDNode *> > &Result){ + do { + if (Lex.getKind() != lltok::MetadataVar) + return TokError("expected metadata after comma"); - if (Lex.getKind() != lltok::Metadata) - return TokError("Expected '!' here"); - Lex.Lex(); + std::string Name = Lex.getStrVal(); + Lex.Lex(); - MetadataBase *Node; - if (ParseMDNode(Node)) return true; + MDNode *Node; + if (ParseToken(lltok::exclaim, "expected '!' here") || + ParseMDNodeID(Node)) + return true; - MetadataContext &TheMetadata = M->getContext().getMetadata(); - unsigned MDK = TheMetadata.getMDKind(Name.c_str()); - if (!MDK) - MDK = TheMetadata.registerMDKind(Name.c_str()); - MDsOnInst.push_back(std::make_pair(MDK, cast<MDNode>(Node))); + unsigned MDK = M->getMDKindID(Name.c_str()); + Result.push_back(std::make_pair(MDK, Node)); + // If this is the end of the list, we're done. + } while (EatIfPresent(lltok::comma)); return false; } @@ -1145,33 +1101,53 @@ bool LLParser::ParseOptionalAlignment(unsigned &Alignment) { return false; } -/// ParseOptionalInfo -/// ::= OptionalInfo (',' OptionalInfo)+ -bool LLParser::ParseOptionalInfo(unsigned &Alignment) { - - // FIXME: Handle customized metadata info attached with an instruction. - do { - if (Lex.getKind() == lltok::NamedOrCustomMD) { - if (ParseOptionalCustomMetadata()) return true; - } else if (Lex.getKind() == lltok::kw_align) { +/// ParseOptionalCommaAlign +/// ::= +/// ::= ',' align 4 +/// +/// This returns with AteExtraComma set to true if it ate an excess comma at the +/// end. +bool LLParser::ParseOptionalCommaAlign(unsigned &Alignment, + bool &AteExtraComma) { + AteExtraComma = false; + while (EatIfPresent(lltok::comma)) { + // Metadata at the end is an early exit. + if (Lex.getKind() == lltok::MetadataVar) { + AteExtraComma = true; + return false; + } + + if (Lex.getKind() == lltok::kw_align) { if (ParseOptionalAlignment(Alignment)) return true; } else return true; - } while (EatIfPresent(lltok::comma)); + } return false; } +/// ParseIndexList - This parses the index list for an insert/extractvalue +/// instruction. This sets AteExtraComma in the case where we eat an extra +/// comma at the end of the line and find that it is followed by metadata. +/// Clients that don't allow metadata can call the version of this function that +/// only takes one argument. +/// /// ParseIndexList /// ::= (',' uint32)+ -bool LLParser::ParseIndexList(SmallVectorImpl<unsigned> &Indices) { +/// +bool LLParser::ParseIndexList(SmallVectorImpl<unsigned> &Indices, + bool &AteExtraComma) { + AteExtraComma = false; + if (Lex.getKind() != lltok::comma) return TokError("expected ',' as start of index list"); while (EatIfPresent(lltok::comma)) { - if (Lex.getKind() == lltok::NamedOrCustomMD) - break; + if (Lex.getKind() == lltok::MetadataVar) { + AteExtraComma = true; + return false; + } unsigned Idx; if (ParseUInt32(Idx)) return true; Indices.push_back(Idx); @@ -1213,7 +1189,7 @@ PATypeHolder LLParser::HandleUpRefs(const Type *ty) { PATypeHolder Ty(ty); #if 0 - errs() << "Type '" << Ty->getDescription() + dbgs() << "Type '" << Ty->getDescription() << "' newly formed. Resolving upreferences.\n" << UpRefs.size() << " upreferences active!\n"; #endif @@ -1231,7 +1207,7 @@ PATypeHolder LLParser::HandleUpRefs(const Type *ty) { UpRefs[i].LastContainedTy) != Ty->subtype_end(); #if 0 - errs() << " UR#" << i << " - TypeContains(" << Ty->getDescription() << ", " + dbgs() << " UR#" << i << " - TypeContains(" << Ty->getDescription() << ", " << UpRefs[i].LastContainedTy->getDescription() << ") = " << (ContainsType ? "true" : "false") << " level=" << UpRefs[i].NestingLevel << "\n"; @@ -1248,7 +1224,7 @@ PATypeHolder LLParser::HandleUpRefs(const Type *ty) { continue; #if 0 - errs() << " * Resolving upreference for " << UpRefs[i].UpRefTy << "\n"; + dbgs() << " * Resolving upreference for " << UpRefs[i].UpRefTy << "\n"; #endif if (!TypeToResolve) TypeToResolve = UpRefs[i].UpRefTy; @@ -1416,17 +1392,13 @@ bool LLParser::ParseParameterList(SmallVectorImpl<ParamInfo> &ArgList, if (ParseType(ArgTy, ArgLoc)) return true; - if (Lex.getKind() == lltok::Metadata) { - if (ParseInlineMetadata(V, PFS)) - return true; - } else { - if (ParseOptionalAttrs(ArgAttrs1, 0) || - ParseValue(ArgTy, V, PFS) || - // FIXME: Should not allow attributes after the argument, remove this - // in LLVM 3.0. - ParseOptionalAttrs(ArgAttrs2, 3)) - return true; - } + // Otherwise, handle normal operands. + if (ParseOptionalAttrs(ArgAttrs1, 0) || + ParseValue(ArgTy, V, PFS) || + // FIXME: Should not allow attributes after the argument, remove this + // in LLVM 3.0. + ParseOptionalAttrs(ArgAttrs2, 3)) + return true; ArgList.push_back(ParamInfo(ArgLoc, V, ArgAttrs1|ArgAttrs2)); } @@ -1931,30 +1903,33 @@ bool LLParser::ParseValID(ValID &ID) { ID.StrVal = Lex.getStrVal(); ID.Kind = ValID::t_LocalName; break; - case lltok::Metadata: { // !{...} MDNode, !"foo" MDString - ID.Kind = ValID::t_Metadata; + case lltok::exclaim: // !{...} MDNode, !"foo" MDString Lex.Lex(); - if (Lex.getKind() == lltok::lbrace) { + + if (EatIfPresent(lltok::lbrace)) { SmallVector<Value*, 16> Elts; if (ParseMDNodeVector(Elts) || ParseToken(lltok::rbrace, "expected end of metadata node")) return true; - ID.MetadataVal = MDNode::get(Context, Elts.data(), Elts.size()); + ID.MDNodeVal = MDNode::get(Context, Elts.data(), Elts.size()); + ID.Kind = ValID::t_MDNode; return false; } // Standalone metadata reference // !{ ..., !42, ... } - if (!ParseMDNode(ID.MetadataVal)) + if (Lex.getKind() == lltok::APSInt) { + if (ParseMDNodeID(ID.MDNodeVal)) return true; + ID.Kind = ValID::t_MDNode; return false; - + } + // MDString: // ::= '!' STRINGCONSTANT - if (ParseMDString(ID.MetadataVal)) return true; - ID.Kind = ValID::t_Metadata; + if (ParseMDString(ID.MDStringVal)) return true; + ID.Kind = ValID::t_MDString; return false; - } case lltok::APSInt: ID.APSIntVal = Lex.getAPSIntVal(); ID.Kind = ValID::t_APSInt; @@ -2154,8 +2129,6 @@ bool LLParser::ParseValID(ValID &ID) { ParseIndexList(Indices) || ParseToken(lltok::rparen, "expected ')' in extractvalue constantexpr")) return true; - if (Lex.getKind() == lltok::NamedOrCustomMD) - if (ParseOptionalCustomMetadata()) return true; if (!isa<StructType>(Val->getType()) && !isa<ArrayType>(Val->getType())) return Error(ID.Loc, "extractvalue operand must be array or struct"); @@ -2178,8 +2151,6 @@ bool LLParser::ParseValID(ValID &ID) { ParseIndexList(Indices) || ParseToken(lltok::rparen, "expected ')' in insertvalue constantexpr")) return true; - if (Lex.getKind() == lltok::NamedOrCustomMD) - if (ParseOptionalCustomMetadata()) return true; if (!isa<StructType>(Val0->getType()) && !isa<ArrayType>(Val0->getType())) return Error(ID.Loc, "extractvalue operand must be array or struct"); if (!ExtractValueInst::getIndexedType(Val0->getType(), Indices.begin(), @@ -2398,7 +2369,8 @@ bool LLParser::ConvertGlobalValIDToValue(const Type *Ty, ValID &ID, switch (ID.Kind) { default: llvm_unreachable("Unknown ValID!"); - case ValID::t_Metadata: + case ValID::t_MDNode: + case ValID::t_MDString: return Error(ID.Loc, "invalid use of metadata"); case ValID::t_LocalID: case ValID::t_LocalName: @@ -2468,6 +2440,30 @@ bool LLParser::ConvertGlobalValIDToValue(const Type *Ty, ValID &ID, } } +/// ConvertGlobalOrMetadataValIDToValue - Apply a type to a ValID to get a fully +/// resolved constant or metadata value. +bool LLParser::ConvertGlobalOrMetadataValIDToValue(const Type *Ty, ValID &ID, + Value *&V) { + switch (ID.Kind) { + case ValID::t_MDNode: + if (!Ty->isMetadataTy()) + return Error(ID.Loc, "metadata value must have metadata type"); + V = ID.MDNodeVal; + return false; + case ValID::t_MDString: + if (!Ty->isMetadataTy()) + return Error(ID.Loc, "metadata value must have metadata type"); + V = ID.MDStringVal; + return false; + default: + Constant *C; + if (ConvertGlobalValIDToValue(Ty, ID, C)) return true; + V = C; + return false; + } +} + + bool LLParser::ParseGlobalTypeAndValue(Constant *&V) { PATypeHolder Type(Type::getVoidTy(Context)); return ParseType(Type) || @@ -2504,25 +2500,20 @@ bool LLParser::ParseGlobalValueVector(SmallVectorImpl<Constant*> &Elts) { bool LLParser::ConvertValIDToValue(const Type *Ty, ValID &ID, Value *&V, PerFunctionState &PFS) { - if (ID.Kind == ValID::t_LocalID) - V = PFS.GetVal(ID.UIntVal, Ty, ID.Loc); - else if (ID.Kind == ValID::t_LocalName) - V = PFS.GetVal(ID.StrVal, Ty, ID.Loc); - else if (ID.Kind == ValID::t_InlineAsm) { + switch (ID.Kind) { + case ValID::t_LocalID: V = PFS.GetVal(ID.UIntVal, Ty, ID.Loc); break; + case ValID::t_LocalName: V = PFS.GetVal(ID.StrVal, Ty, ID.Loc); break; + case ValID::t_InlineAsm: { const PointerType *PTy = dyn_cast<PointerType>(Ty); - const FunctionType *FTy = + const FunctionType *FTy = PTy ? dyn_cast<FunctionType>(PTy->getElementType()) : 0; if (!FTy || !InlineAsm::Verify(FTy, ID.StrVal2)) return Error(ID.Loc, "invalid type for inline asm constraint string"); V = InlineAsm::get(FTy, ID.StrVal, ID.StrVal2, ID.UIntVal&1, ID.UIntVal>>1); return false; - } else if (ID.Kind == ValID::t_Metadata) { - V = ID.MetadataVal; - } else { - Constant *C; - if (ConvertGlobalValIDToValue(Ty, ID, C)) return true; - V = C; - return false; + } + default: + return ConvertGlobalOrMetadataValIDToValue(Ty, ID, V); } return V == 0; @@ -2803,6 +2794,7 @@ bool LLParser::ParseBasicBlock(PerFunctionState &PFS) { // Parse the instructions in this block until we get a terminator. Instruction *Inst; + SmallVector<std::pair<unsigned, MDNode *>, 4> MetadataOnInst; do { // This instruction may have three possibilities for a name: a) none // specified, b) name specified "%foo =", c) number specified: "%4 =". @@ -2824,16 +2816,28 @@ bool LLParser::ParseBasicBlock(PerFunctionState &PFS) { return true; } - if (ParseInstruction(Inst, BB, PFS)) return true; - if (EatIfPresent(lltok::comma)) - ParseOptionalCustomMetadata(); + switch (ParseInstruction(Inst, BB, PFS)) { + default: assert(0 && "Unknown ParseInstruction result!"); + case InstError: return true; + case InstNormal: + // With a normal result, we check to see if the instruction is followed by + // a comma and metadata. + if (EatIfPresent(lltok::comma)) + if (ParseInstructionMetadata(MetadataOnInst)) + return true; + break; + case InstExtraComma: + // If the instruction parser ate an extra comma at the end of it, it + // *must* be followed by metadata. + if (ParseInstructionMetadata(MetadataOnInst)) + return true; + break; + } // Set metadata attached with this instruction. - MetadataContext &TheMetadata = M->getContext().getMetadata(); - for (SmallVector<std::pair<unsigned, MDNode *>, 2>::iterator - MDI = MDsOnInst.begin(), MDE = MDsOnInst.end(); MDI != MDE; ++MDI) - TheMetadata.addMD(MDI->first, MDI->second, Inst); - MDsOnInst.clear(); + for (unsigned i = 0, e = MetadataOnInst.size(); i != e; ++i) + Inst->setMetadata(MetadataOnInst[i].first, MetadataOnInst[i].second); + MetadataOnInst.clear(); BB->getInstList().push_back(Inst); @@ -2850,8 +2854,8 @@ bool LLParser::ParseBasicBlock(PerFunctionState &PFS) { /// ParseInstruction - Parse one of the many different instructions. /// -bool LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB, - PerFunctionState &PFS) { +int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB, + PerFunctionState &PFS) { lltok::Kind Token = Lex.getKind(); if (Token == lltok::Eof) return TokError("found end of file when expecting more instructions"); @@ -3015,12 +3019,12 @@ bool LLParser::ParseCmpPredicate(unsigned &P, unsigned Opc) { //===----------------------------------------------------------------------===// /// ParseRet - Parse a return instruction. -/// ::= 'ret' void (',' !dbg, !1) -/// ::= 'ret' TypeAndValue (',' !dbg, !1) -/// ::= 'ret' TypeAndValue (',' TypeAndValue)+ (',' !dbg, !1) +/// ::= 'ret' void (',' !dbg, !1)* +/// ::= 'ret' TypeAndValue (',' !dbg, !1)* +/// ::= 'ret' TypeAndValue (',' TypeAndValue)+ (',' !dbg, !1)* /// [[obsolete: LLVM 3.0]] -bool LLParser::ParseRet(Instruction *&Inst, BasicBlock *BB, - PerFunctionState &PFS) { +int LLParser::ParseRet(Instruction *&Inst, BasicBlock *BB, + PerFunctionState &PFS) { PATypeHolder Ty(Type::getVoidTy(Context)); if (ParseType(Ty, true /*void allowed*/)) return true; @@ -3032,21 +3036,22 @@ bool LLParser::ParseRet(Instruction *&Inst, BasicBlock *BB, Value *RV; if (ParseValue(Ty, RV, PFS)) return true; + bool ExtraComma = false; if (EatIfPresent(lltok::comma)) { // Parse optional custom metadata, e.g. !dbg - if (Lex.getKind() == lltok::NamedOrCustomMD) { - if (ParseOptionalCustomMetadata()) return true; + if (Lex.getKind() == lltok::MetadataVar) { + ExtraComma = true; } else { // The normal case is one return value. - // FIXME: LLVM 3.0 remove MRV support for 'ret i32 1, i32 2', requiring use - // of 'ret {i32,i32} {i32 1, i32 2}' + // FIXME: LLVM 3.0 remove MRV support for 'ret i32 1, i32 2', requiring + // use of 'ret {i32,i32} {i32 1, i32 2}' SmallVector<Value*, 8> RVs; RVs.push_back(RV); do { // If optional custom metadata, e.g. !dbg is seen then this is the // end of MRV. - if (Lex.getKind() == lltok::NamedOrCustomMD) + if (Lex.getKind() == lltok::MetadataVar) break; if (ParseTypeAndValue(RV, PFS)) return true; RVs.push_back(RV); @@ -3062,7 +3067,7 @@ bool LLParser::ParseRet(Instruction *&Inst, BasicBlock *BB, } Inst = ReturnInst::Create(Context, RV); - return false; + return ExtraComma ? InstExtraComma : InstNormal; } @@ -3485,7 +3490,7 @@ bool LLParser::ParseShuffleVector(Instruction *&Inst, PerFunctionState &PFS) { /// ParsePHI /// ::= 'phi' Type '[' Value ',' Value ']' (',' '[' Value ',' Value ']')* -bool LLParser::ParsePHI(Instruction *&Inst, PerFunctionState &PFS) { +int LLParser::ParsePHI(Instruction *&Inst, PerFunctionState &PFS) { PATypeHolder Ty(Type::getVoidTy(Context)); Value *Op0, *Op1; LocTy TypeLoc = Lex.getLoc(); @@ -3498,6 +3503,7 @@ bool LLParser::ParsePHI(Instruction *&Inst, PerFunctionState &PFS) { ParseToken(lltok::rsquare, "expected ']' in phi value list")) return true; + bool AteExtraComma = false; SmallVector<std::pair<Value*, BasicBlock*>, 16> PHIVals; while (1) { PHIVals.push_back(std::make_pair(Op0, cast<BasicBlock>(Op1))); @@ -3505,8 +3511,10 @@ bool LLParser::ParsePHI(Instruction *&Inst, PerFunctionState &PFS) { if (!EatIfPresent(lltok::comma)) break; - if (Lex.getKind() == lltok::NamedOrCustomMD) + if (Lex.getKind() == lltok::MetadataVar) { + AteExtraComma = true; break; + } if (ParseToken(lltok::lsquare, "expected '[' in phi value list") || ParseValue(Ty, Op0, PFS) || @@ -3516,9 +3524,6 @@ bool LLParser::ParsePHI(Instruction *&Inst, PerFunctionState &PFS) { return true; } - if (Lex.getKind() == lltok::NamedOrCustomMD) - if (ParseOptionalCustomMetadata()) return true; - if (!Ty->isFirstClassType()) return Error(TypeLoc, "phi node must have first class type"); @@ -3527,7 +3532,7 @@ bool LLParser::ParsePHI(Instruction *&Inst, PerFunctionState &PFS) { for (unsigned i = 0, e = PHIVals.size(); i != e; ++i) PN->addIncoming(PHIVals[i].first, PHIVals[i].second); Inst = PN; - return false; + return AteExtraComma ? InstExtraComma : InstNormal; } /// ParseCall @@ -3634,22 +3639,24 @@ bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS, /// ParseAlloc /// ::= 'malloc' Type (',' TypeAndValue)? (',' OptionalInfo)? /// ::= 'alloca' Type (',' TypeAndValue)? (',' OptionalInfo)? -bool LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS, - BasicBlock* BB, bool isAlloca) { +int LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS, + BasicBlock* BB, bool isAlloca) { PATypeHolder Ty(Type::getVoidTy(Context)); Value *Size = 0; LocTy SizeLoc; unsigned Alignment = 0; if (ParseType(Ty)) return true; + bool AteExtraComma = false; if (EatIfPresent(lltok::comma)) { - if (Lex.getKind() == lltok::kw_align - || Lex.getKind() == lltok::NamedOrCustomMD) { - if (ParseOptionalInfo(Alignment)) return true; + if (Lex.getKind() == lltok::kw_align) { + if (ParseOptionalAlignment(Alignment)) return true; + } else if (Lex.getKind() == lltok::MetadataVar) { + AteExtraComma = true; } else { - if (ParseTypeAndValue(Size, SizeLoc, PFS)) return true; - if (EatIfPresent(lltok::comma)) - if (ParseOptionalInfo(Alignment)) return true; + if (ParseTypeAndValue(Size, SizeLoc, PFS) || + ParseOptionalCommaAlign(Alignment, AteExtraComma)) + return true; } } @@ -3658,7 +3665,7 @@ bool LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS, if (isAlloca) { Inst = new AllocaInst(Ty, Size, Alignment); - return false; + return AteExtraComma ? InstExtraComma : InstNormal; } // Autoupgrade old malloc instruction to malloc call. @@ -3672,7 +3679,7 @@ bool LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS, MallocF = cast<Function>( M->getOrInsertFunction("", Type::getInt8PtrTy(Context), IntPtrTy, NULL)); Inst = CallInst::CreateMalloc(BB, IntPtrTy, Ty, AllocSize, Size, MallocF); - return false; +return AteExtraComma ? InstExtraComma : InstNormal; } /// ParseFree @@ -3689,37 +3696,36 @@ bool LLParser::ParseFree(Instruction *&Inst, PerFunctionState &PFS, /// ParseLoad /// ::= 'volatile'? 'load' TypeAndValue (',' OptionalInfo)? -bool LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS, - bool isVolatile) { +int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS, + bool isVolatile) { Value *Val; LocTy Loc; unsigned Alignment = 0; - if (ParseTypeAndValue(Val, Loc, PFS)) return true; - - if (EatIfPresent(lltok::comma)) - if (ParseOptionalInfo(Alignment)) return true; + bool AteExtraComma = false; + if (ParseTypeAndValue(Val, Loc, PFS) || + ParseOptionalCommaAlign(Alignment, AteExtraComma)) + return true; if (!isa<PointerType>(Val->getType()) || !cast<PointerType>(Val->getType())->getElementType()->isFirstClassType()) return Error(Loc, "load operand must be a pointer to a first class type"); Inst = new LoadInst(Val, "", isVolatile, Alignment); - return false; + return AteExtraComma ? InstExtraComma : InstNormal; } /// ParseStore /// ::= 'volatile'? 'store' TypeAndValue ',' TypeAndValue (',' 'align' i32)? -bool LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS, - bool isVolatile) { +int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS, + bool isVolatile) { Value *Val, *Ptr; LocTy Loc, PtrLoc; unsigned Alignment = 0; + bool AteExtraComma = false; if (ParseTypeAndValue(Val, Loc, PFS) || ParseToken(lltok::comma, "expected ',' after store operand") || - ParseTypeAndValue(Ptr, PtrLoc, PFS)) + ParseTypeAndValue(Ptr, PtrLoc, PFS) || + ParseOptionalCommaAlign(Alignment, AteExtraComma)) return true; - if (EatIfPresent(lltok::comma)) - if (ParseOptionalInfo(Alignment)) return true; - if (!isa<PointerType>(Ptr->getType())) return Error(PtrLoc, "store operand must be a pointer"); if (!Val->getType()->isFirstClassType()) @@ -3728,7 +3734,7 @@ bool LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS, return Error(Loc, "stored value and pointer type do not match"); Inst = new StoreInst(Val, Ptr, isVolatile, Alignment); - return false; + return AteExtraComma ? InstExtraComma : InstNormal; } /// ParseGetResult @@ -3752,7 +3758,7 @@ bool LLParser::ParseGetResult(Instruction *&Inst, PerFunctionState &PFS) { /// ParseGetElementPtr /// ::= 'getelementptr' 'inbounds'? TypeAndValue (',' TypeAndValue)* -bool LLParser::ParseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS) { +int LLParser::ParseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS) { Value *Ptr, *Val; LocTy Loc, EltLoc; bool InBounds = EatIfPresent(lltok::kw_inbounds); @@ -3763,16 +3769,17 @@ bool LLParser::ParseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS) { return Error(Loc, "base of getelementptr must be a pointer"); SmallVector<Value*, 16> Indices; + bool AteExtraComma = false; while (EatIfPresent(lltok::comma)) { - if (Lex.getKind() == lltok::NamedOrCustomMD) + if (Lex.getKind() == lltok::MetadataVar) { + AteExtraComma = true; break; + } if (ParseTypeAndValue(Val, EltLoc, PFS)) return true; if (!isa<IntegerType>(Val->getType())) return Error(EltLoc, "getelementptr index must be an integer"); Indices.push_back(Val); } - if (Lex.getKind() == lltok::NamedOrCustomMD) - if (ParseOptionalCustomMetadata()) return true; if (!GetElementPtrInst::getIndexedType(Ptr->getType(), Indices.begin(), Indices.end())) @@ -3780,19 +3787,18 @@ bool LLParser::ParseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS) { Inst = GetElementPtrInst::Create(Ptr, Indices.begin(), Indices.end()); if (InBounds) cast<GetElementPtrInst>(Inst)->setIsInBounds(true); - return false; + return AteExtraComma ? InstExtraComma : InstNormal; } /// ParseExtractValue /// ::= 'extractvalue' TypeAndValue (',' uint32)+ -bool LLParser::ParseExtractValue(Instruction *&Inst, PerFunctionState &PFS) { +int LLParser::ParseExtractValue(Instruction *&Inst, PerFunctionState &PFS) { Value *Val; LocTy Loc; SmallVector<unsigned, 4> Indices; + bool AteExtraComma; if (ParseTypeAndValue(Val, Loc, PFS) || - ParseIndexList(Indices)) + ParseIndexList(Indices, AteExtraComma)) return true; - if (Lex.getKind() == lltok::NamedOrCustomMD) - if (ParseOptionalCustomMetadata()) return true; if (!isa<StructType>(Val->getType()) && !isa<ArrayType>(Val->getType())) return Error(Loc, "extractvalue operand must be array or struct"); @@ -3801,22 +3807,21 @@ bool LLParser::ParseExtractValue(Instruction *&Inst, PerFunctionState &PFS) { Indices.end())) return Error(Loc, "invalid indices for extractvalue"); Inst = ExtractValueInst::Create(Val, Indices.begin(), Indices.end()); - return false; + return AteExtraComma ? InstExtraComma : InstNormal; } /// ParseInsertValue /// ::= 'insertvalue' TypeAndValue ',' TypeAndValue (',' uint32)+ -bool LLParser::ParseInsertValue(Instruction *&Inst, PerFunctionState &PFS) { +int LLParser::ParseInsertValue(Instruction *&Inst, PerFunctionState &PFS) { Value *Val0, *Val1; LocTy Loc0, Loc1; SmallVector<unsigned, 4> Indices; + bool AteExtraComma; if (ParseTypeAndValue(Val0, Loc0, PFS) || ParseToken(lltok::comma, "expected comma after insertvalue operand") || ParseTypeAndValue(Val1, Loc1, PFS) || - ParseIndexList(Indices)) + ParseIndexList(Indices, AteExtraComma)) return true; - if (Lex.getKind() == lltok::NamedOrCustomMD) - if (ParseOptionalCustomMetadata()) return true; - + if (!isa<StructType>(Val0->getType()) && !isa<ArrayType>(Val0->getType())) return Error(Loc0, "extractvalue operand must be array or struct"); @@ -3824,7 +3829,7 @@ bool LLParser::ParseInsertValue(Instruction *&Inst, PerFunctionState &PFS) { Indices.end())) return Error(Loc0, "invalid indices for insertvalue"); Inst = InsertValueInst::Create(Val0, Val1, Indices.begin(), Indices.end()); - return false; + return AteExtraComma ? InstExtraComma : InstNormal; } //===----------------------------------------------------------------------===// @@ -3836,32 +3841,20 @@ bool LLParser::ParseInsertValue(Instruction *&Inst, PerFunctionState &PFS) { /// Element /// ::= 'null' | TypeAndValue bool LLParser::ParseMDNodeVector(SmallVectorImpl<Value*> &Elts) { - assert(Lex.getKind() == lltok::lbrace); - Lex.Lex(); do { - Value *V = 0; - if (Lex.getKind() == lltok::kw_null) { - Lex.Lex(); - V = 0; - } else { - PATypeHolder Ty(Type::getVoidTy(Context)); - if (ParseType(Ty)) return true; - if (Lex.getKind() == lltok::Metadata) { - Lex.Lex(); - MetadataBase *Node = 0; - if (!ParseMDNode(Node)) - V = Node; - else { - MetadataBase *MDS = 0; - if (ParseMDString(MDS)) return true; - V = MDS; - } - } else { - Constant *C; - if (ParseGlobalValue(Ty, C)) return true; - V = C; - } + // Null is a special case since it is typeless. + if (EatIfPresent(lltok::kw_null)) { + Elts.push_back(0); + continue; } + + Value *V = 0; + PATypeHolder Ty(Type::getVoidTy(Context)); + ValID ID; + if (ParseType(Ty) || ParseValID(ID) || + ConvertGlobalOrMetadataValIDToValue(Ty, ID, V)) + return true; + Elts.push_back(V); } while (EatIfPresent(lltok::comma)); diff --git a/lib/AsmParser/LLParser.h b/lib/AsmParser/LLParser.h index d14b1cb..803832f 100644 --- a/lib/AsmParser/LLParser.h +++ b/lib/AsmParser/LLParser.h @@ -17,6 +17,7 @@ #include "LLLexer.h" #include "llvm/Module.h" #include "llvm/Type.h" +#include "llvm/Support/ValueHandle.h" #include <map> namespace llvm { @@ -45,7 +46,8 @@ namespace llvm { t_EmptyArray, // No value: [] t_Constant, // Value in ConstantVal. t_InlineAsm, // Value in StrVal/StrVal2/UIntVal. - t_Metadata // Value in MetadataVal. + t_MDNode, // Value in MDNodeVal. + t_MDString // Value in MDStringVal. } Kind; LLLexer::LocTy Loc; @@ -54,7 +56,8 @@ namespace llvm { APSInt APSIntVal; APFloat APFloatVal; Constant *ConstantVal; - MetadataBase *MetadataVal; + MDNode *MDNodeVal; + MDString *MDStringVal; ValID() : APFloatVal(0.0) {} bool operator<(const ValID &RHS) const { @@ -78,10 +81,8 @@ namespace llvm { std::map<std::string, std::pair<PATypeHolder, LocTy> > ForwardRefTypes; std::map<unsigned, std::pair<PATypeHolder, LocTy> > ForwardRefTypeIDs; std::vector<PATypeHolder> NumberedTypes; - /// MetadataCache - This map keeps track of parsed metadata constants. - std::map<unsigned, WeakVH> MetadataCache; - std::map<unsigned, std::pair<WeakVH, LocTy> > ForwardRefMDNodes; - SmallVector<std::pair<unsigned, MDNode *>, 2> MDsOnInst; + std::vector<TrackingVH<MDNode> > NumberedMetadata; + std::map<unsigned, std::pair<TrackingVH<MDNode>, LocTy> > ForwardRefMDNodes; struct UpRefRecord { /// Loc - This is the location of the upref. LocTy Loc; @@ -169,9 +170,17 @@ namespace llvm { bool ParseOptionalVisibility(unsigned &Visibility); bool ParseOptionalCallingConv(CallingConv::ID &CC); bool ParseOptionalAlignment(unsigned &Alignment); - bool ParseOptionalCustomMetadata(); - bool ParseOptionalInfo(unsigned &Alignment); - bool ParseIndexList(SmallVectorImpl<unsigned> &Indices); + bool ParseInstructionMetadata(SmallVectorImpl<std::pair<unsigned, + MDNode *> > &); + bool ParseOptionalCommaAlign(unsigned &Alignment, bool &AteExtraComma); + bool ParseIndexList(SmallVectorImpl<unsigned> &Indices,bool &AteExtraComma); + bool ParseIndexList(SmallVectorImpl<unsigned> &Indices) { + bool AteExtraComma; + if (ParseIndexList(Indices, AteExtraComma)) return true; + if (AteExtraComma) + return TokError("expected index"); + return false; + } // Top-Level Entities bool ParseTopLevelEntities(); @@ -192,8 +201,8 @@ namespace llvm { bool ParseAlias(const std::string &Name, LocTy Loc, unsigned Visibility); bool ParseStandaloneMetadata(); bool ParseNamedMetadata(); - bool ParseMDString(MetadataBase *&S); - bool ParseMDNode(MetadataBase *&N); + bool ParseMDString(MDString *&Result); + bool ParseMDNodeID(MDNode *&Result); // Type Parsing. bool ParseType(PATypeHolder &Result, bool AllowVoid = false); @@ -210,6 +219,8 @@ namespace llvm { // Constants. bool ParseValID(ValID &ID); bool ConvertGlobalValIDToValue(const Type *Ty, ValID &ID, Constant *&V); + bool ConvertGlobalOrMetadataValIDToValue(const Type *Ty, ValID &ID, + Value *&V); bool ParseGlobalValue(const Type *Ty, Constant *&V); bool ParseGlobalTypeAndValue(Constant *&V); bool ParseGlobalValueVector(SmallVectorImpl<Constant*> &Elts); @@ -280,8 +291,6 @@ namespace llvm { return ParseTypeAndBasicBlock(BB, Loc, PFS); } - bool ParseInlineMetadata(Value *&V, PerFunctionState &PFS); - struct ParamInfo { LocTy Loc; Value *V; @@ -307,12 +316,14 @@ namespace llvm { bool ParseFunctionBody(Function &Fn); bool ParseBasicBlock(PerFunctionState &PFS); - // Instruction Parsing. - bool ParseInstruction(Instruction *&Inst, BasicBlock *BB, - PerFunctionState &PFS); + // Instruction Parsing. Each instruction parsing routine can return with a + // normal result, an error result, or return having eaten an extra comma. + enum InstResult { InstNormal = 0, InstError = 1, InstExtraComma = 2 }; + int ParseInstruction(Instruction *&Inst, BasicBlock *BB, + PerFunctionState &PFS); bool ParseCmpPredicate(unsigned &Pred, unsigned Opc); - bool ParseRet(Instruction *&Inst, BasicBlock *BB, PerFunctionState &PFS); + int ParseRet(Instruction *&Inst, BasicBlock *BB, PerFunctionState &PFS); bool ParseBr(Instruction *&Inst, PerFunctionState &PFS); bool ParseSwitch(Instruction *&Inst, PerFunctionState &PFS); bool ParseIndirectBr(Instruction *&Inst, PerFunctionState &PFS); @@ -328,17 +339,17 @@ namespace llvm { bool ParseExtractElement(Instruction *&I, PerFunctionState &PFS); bool ParseInsertElement(Instruction *&I, PerFunctionState &PFS); bool ParseShuffleVector(Instruction *&I, PerFunctionState &PFS); - bool ParsePHI(Instruction *&I, PerFunctionState &PFS); + int ParsePHI(Instruction *&I, PerFunctionState &PFS); bool ParseCall(Instruction *&I, PerFunctionState &PFS, bool isTail); - bool ParseAlloc(Instruction *&I, PerFunctionState &PFS, + int ParseAlloc(Instruction *&I, PerFunctionState &PFS, BasicBlock *BB = 0, bool isAlloca = true); bool ParseFree(Instruction *&I, PerFunctionState &PFS, BasicBlock *BB); - bool ParseLoad(Instruction *&I, PerFunctionState &PFS, bool isVolatile); - bool ParseStore(Instruction *&I, PerFunctionState &PFS, bool isVolatile); + int ParseLoad(Instruction *&I, PerFunctionState &PFS, bool isVolatile); + int ParseStore(Instruction *&I, PerFunctionState &PFS, bool isVolatile); bool ParseGetResult(Instruction *&I, PerFunctionState &PFS); - bool ParseGetElementPtr(Instruction *&I, PerFunctionState &PFS); - bool ParseExtractValue(Instruction *&I, PerFunctionState &PFS); - bool ParseInsertValue(Instruction *&I, PerFunctionState &PFS); + int ParseGetElementPtr(Instruction *&I, PerFunctionState &PFS); + int ParseExtractValue(Instruction *&I, PerFunctionState &PFS); + int ParseInsertValue(Instruction *&I, PerFunctionState &PFS); bool ResolveForwardRefBlockAddresses(Function *TheFn, std::vector<std::pair<ValID, GlobalValue*> > &Refs, diff --git a/lib/AsmParser/LLToken.h b/lib/AsmParser/LLToken.h index 1165766..7f1807c 100644 --- a/lib/AsmParser/LLToken.h +++ b/lib/AsmParser/LLToken.h @@ -29,6 +29,7 @@ namespace lltok { less, greater, // < > lparen, rparen, // ( ) backslash, // \ (not /) + exclaim, // ! kw_x, kw_begin, kw_end, @@ -128,11 +129,8 @@ namespace lltok { LabelStr, // foo: GlobalVar, // @foo @"foo" LocalVar, // %foo %"foo" + MetadataVar, // !foo StringConstant, // "foo" - NamedOrCustomMD, // !foo - - // Metadata valued tokens. - Metadata, // !"foo" !{i8 42} // Type valued tokens (TyVal). Type, diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp index 9916388..7dffafa 100644 --- a/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/lib/Bitcode/Reader/BitcodeReader.cpp @@ -17,8 +17,6 @@ #include "llvm/DerivedTypes.h" #include "llvm/InlineAsm.h" #include "llvm/IntrinsicInst.h" -#include "llvm/LLVMContext.h" -#include "llvm/Metadata.h" #include "llvm/Module.h" #include "llvm/Operator.h" #include "llvm/AutoUpgrade.h" @@ -840,17 +838,10 @@ bool BitcodeReader::ParseMetadata() { (void) Kind; for (unsigned i = 1; i != RecordLength; ++i) Name[i-1] = Record[i]; - MetadataContext &TheMetadata = Context.getMetadata(); - unsigned ExistingKind = TheMetadata.getMDKind(Name.str()); - if (ExistingKind == 0) { - unsigned NewKind = TheMetadata.registerMDKind(Name.str()); - (void) NewKind; - assert (Kind == NewKind - && "Unable to handle custom metadata mismatch!"); - } else { - assert (ExistingKind == Kind - && "Unable to handle custom metadata mismatch!"); - } + + unsigned NewKind = TheModule->getMDKindID(Name.str()); + assert(Kind == NewKind && + "FIXME: Unable to handle custom metadata mismatch!");(void)NewKind; break; } } @@ -1580,7 +1571,6 @@ bool BitcodeReader::ParseMetadataAttachment() { if (Stream.EnterSubBlock(bitc::METADATA_ATTACHMENT_ID)) return Error("Malformed block record"); - MetadataContext &TheMetadata = Context.getMetadata(); SmallVector<uint64_t, 64> Record; while(1) { unsigned Code = Stream.ReadCode(); @@ -1606,7 +1596,7 @@ bool BitcodeReader::ParseMetadataAttachment() { for (unsigned i = 1; i != RecordLength; i = i+2) { unsigned Kind = Record[i]; Value *Node = MDValueList.getValueFwdRef(Record[i+1]); - TheMetadata.addMD(Kind, cast<MDNode>(Node), Inst); + Inst->setMetadata(Kind, cast<MDNode>(Node)); } break; } diff --git a/lib/Bitcode/Reader/BitcodeReader.h b/lib/Bitcode/Reader/BitcodeReader.h index 7b3a1ae..bb3961a 100644 --- a/lib/Bitcode/Reader/BitcodeReader.h +++ b/lib/Bitcode/Reader/BitcodeReader.h @@ -170,7 +170,7 @@ class BitcodeReader : public ModuleProvider { DenseMap<Function*, std::vector<BlockAddrRefTy> > BlockAddrFwdRefs; public: - explicit BitcodeReader(MemoryBuffer *buffer, LLVMContext& C) + explicit BitcodeReader(MemoryBuffer *buffer, LLVMContext &C) : Context(C), Buffer(buffer), ErrorString(0), ValueList(C), MDValueList(C) { HasReversedFunctionsWithBodies = false; } diff --git a/lib/Bitcode/Reader/CMakeLists.txt b/lib/Bitcode/Reader/CMakeLists.txt index a19c79a..693d431 100644 --- a/lib/Bitcode/Reader/CMakeLists.txt +++ b/lib/Bitcode/Reader/CMakeLists.txt @@ -1,7 +1,4 @@ add_llvm_library(LLVMBitReader BitReader.cpp BitcodeReader.cpp - Deserialize.cpp - DeserializeAPFloat.cpp - DeserializeAPInt.cpp - )
\ No newline at end of file + ) diff --git a/lib/Bitcode/Reader/Deserialize.cpp b/lib/Bitcode/Reader/Deserialize.cpp deleted file mode 100644 index b8e720a..0000000 --- a/lib/Bitcode/Reader/Deserialize.cpp +++ /dev/null @@ -1,450 +0,0 @@ -//==- Deserialize.cpp - Generic Object Serialization to Bitcode --*- C++ -*-==// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines the internal methods used for object serialization. -// -//===----------------------------------------------------------------------===// - -#include "llvm/Bitcode/Deserialize.h" -#include "llvm/Support/raw_ostream.h" -using namespace llvm; - -Deserializer::Deserializer(BitstreamReader& stream) - : Stream(stream), RecIdx(0), FreeList(NULL), AbbrevNo(0), RecordCode(0) { - - StreamStart = Stream.GetCurrentBitNo(); -} - -Deserializer::~Deserializer() { - assert (RecIdx >= Record.size() && - "Still scanning bitcode record when deserialization completed."); - -#ifdef DEBUG_BACKPATCH - for (MapTy::iterator I=BPatchMap.begin(), E=BPatchMap.end(); I!=E; ++I) - assert (I->first.hasFinalPtr() && - "Some pointers were not backpatched."); -#endif -} - - -bool Deserializer::inRecord() { - if (Record.size() > 0) { - if (RecIdx >= Record.size()) { - RecIdx = 0; - Record.clear(); - AbbrevNo = 0; - return false; - } - else - return true; - } - - return false; -} - -bool Deserializer::AdvanceStream() { - assert (!inRecord() && - "Cannot advance stream. Still processing a record."); - - if (AbbrevNo == bitc::ENTER_SUBBLOCK || - AbbrevNo >= bitc::UNABBREV_RECORD) - return true; - - while (!Stream.AtEndOfStream()) { - - uint64_t Pos = Stream.GetCurrentBitNo(); - AbbrevNo = Stream.ReadCode(); - - switch (AbbrevNo) { - case bitc::ENTER_SUBBLOCK: { - unsigned id = Stream.ReadSubBlockID(); - - // Determine the extent of the block. This is useful for jumping around - // the stream. This is hack: we read the header of the block, save - // the length, and then revert the bitstream to a location just before - // the block is entered. - uint64_t BPos = Stream.GetCurrentBitNo(); - Stream.ReadVBR(bitc::CodeLenWidth); // Skip the code size. - Stream.SkipToWord(); - unsigned NumWords = Stream.Read(bitc::BlockSizeWidth); - Stream.JumpToBit(BPos); - - BlockStack.push_back(Location(Pos,id,NumWords)); - break; - } - - case bitc::END_BLOCK: { - bool x = Stream.ReadBlockEnd(); - assert(!x && "Error at block end."); x=x; - BlockStack.pop_back(); - continue; - } - - case bitc::DEFINE_ABBREV: - Stream.ReadAbbrevRecord(); - continue; - - default: - break; - } - - return true; - } - - return false; -} - -void Deserializer::ReadRecord() { - - while (AdvanceStream() && AbbrevNo == bitc::ENTER_SUBBLOCK) { - assert (!BlockStack.empty()); - Stream.EnterSubBlock(BlockStack.back().BlockID); - AbbrevNo = 0; - } - - if (Stream.AtEndOfStream()) - return; - - assert (Record.empty()); - assert (AbbrevNo >= bitc::UNABBREV_RECORD); - RecordCode = Stream.ReadRecord(AbbrevNo,Record); - assert (Record.size() > 0); -} - -void Deserializer::SkipBlock() { - assert (!inRecord()); - - if (AtEnd()) - return; - - AdvanceStream(); - - assert (AbbrevNo == bitc::ENTER_SUBBLOCK); - BlockStack.pop_back(); - Stream.SkipBlock(); - - AbbrevNo = 0; - AdvanceStream(); -} - -bool Deserializer::SkipToBlock(unsigned BlockID) { - assert (!inRecord()); - - AdvanceStream(); - assert (AbbrevNo == bitc::ENTER_SUBBLOCK); - - unsigned BlockLevel = BlockStack.size(); - - while (!AtEnd() && - BlockLevel == BlockStack.size() && - getCurrentBlockID() != BlockID) - SkipBlock(); - - return !(AtEnd() || BlockLevel != BlockStack.size()); -} - -Deserializer::Location Deserializer::getCurrentBlockLocation() { - if (!inRecord()) - AdvanceStream(); - - return BlockStack.back(); -} - -bool Deserializer::JumpTo(const Location& Loc) { - - assert (!inRecord()); - - AdvanceStream(); - - assert (!BlockStack.empty() || AtEnd()); - - uint64_t LastBPos = StreamStart; - - while (!BlockStack.empty()) { - - LastBPos = BlockStack.back().BitNo; - - // Determine of the current block contains the location of the block - // we are looking for. - if (BlockStack.back().contains(Loc)) { - // We found the enclosing block. We must first POP it off to - // destroy any accumulated context within the block scope. We then - // jump to the position of the block and enter it. - Stream.JumpToBit(LastBPos); - - if (BlockStack.size() == Stream.BlockScope.size()) - Stream.PopBlockScope(); - - BlockStack.pop_back(); - - AbbrevNo = 0; - AdvanceStream(); - assert (AbbrevNo == bitc::ENTER_SUBBLOCK); - - Stream.EnterSubBlock(BlockStack.back().BlockID); - break; - } - - // This block does not contain the block we are looking for. Pop it. - if (BlockStack.size() == Stream.BlockScope.size()) - Stream.PopBlockScope(); - - BlockStack.pop_back(); - - } - - // Check if we have popped our way to the outermost scope. If so, - // we need to adjust our position. - if (BlockStack.empty()) { - assert (Stream.BlockScope.empty()); - - Stream.JumpToBit(Loc.BitNo < LastBPos ? StreamStart : LastBPos); - AbbrevNo = 0; - AdvanceStream(); - } - - assert (AbbrevNo == bitc::ENTER_SUBBLOCK); - assert (!BlockStack.empty()); - - while (!AtEnd() && BlockStack.back() != Loc) { - if (BlockStack.back().contains(Loc)) { - Stream.EnterSubBlock(BlockStack.back().BlockID); - AbbrevNo = 0; - AdvanceStream(); - continue; - } - else - SkipBlock(); - } - - if (AtEnd()) - return false; - - assert (BlockStack.back() == Loc); - - return true; -} - -void Deserializer::Rewind() { - while (!Stream.BlockScope.empty()) - Stream.PopBlockScope(); - - while (!BlockStack.empty()) - BlockStack.pop_back(); - - Stream.JumpToBit(StreamStart); - AbbrevNo = 0; -} - - -unsigned Deserializer::getCurrentBlockID() { - if (!inRecord()) - AdvanceStream(); - - return BlockStack.back().BlockID; -} - -unsigned Deserializer::getRecordCode() { - if (!inRecord()) { - AdvanceStream(); - assert (AbbrevNo >= bitc::UNABBREV_RECORD); - ReadRecord(); - } - - return RecordCode; -} - -bool Deserializer::FinishedBlock(Location BlockLoc) { - if (!inRecord()) - AdvanceStream(); - - for (llvm::SmallVector<Location,8>::reverse_iterator - I=BlockStack.rbegin(), E=BlockStack.rend(); I!=E; ++I) - if (*I == BlockLoc) - return false; - - return true; -} - -unsigned Deserializer::getAbbrevNo() { - if (!inRecord()) - AdvanceStream(); - - return AbbrevNo; -} - -bool Deserializer::AtEnd() { - if (inRecord()) - return false; - - if (!AdvanceStream()) - return true; - - return false; -} - -uint64_t Deserializer::ReadInt() { - // FIXME: Any error recovery/handling with incomplete or bad files? - if (!inRecord()) - ReadRecord(); - - return Record[RecIdx++]; -} - -int64_t Deserializer::ReadSInt() { - uint64_t x = ReadInt(); - int64_t magnitude = x >> 1; - return x & 0x1 ? -magnitude : magnitude; -} - -char* Deserializer::ReadCStr(char* cstr, unsigned MaxLen, bool isNullTerm) { - if (cstr == NULL) - MaxLen = 0; // Zero this just in case someone does something funny. - - unsigned len = ReadInt(); - - assert (MaxLen == 0 || (len + (isNullTerm ? 1 : 0)) <= MaxLen); - - if (!cstr) - cstr = new char[len + (isNullTerm ? 1 : 0)]; - - assert (cstr != NULL); - - for (unsigned i = 0; i < len; ++i) - cstr[i] = (char) ReadInt(); - - if (isNullTerm) - cstr[len] = '\0'; - - return cstr; -} - -void Deserializer::ReadCStr(std::vector<char>& buff, bool isNullTerm, - unsigned Idx) { - - unsigned len = ReadInt(); - - // If Idx is beyond the current before size, reduce Idx to refer to the - // element after the last element. - if (Idx > buff.size()) - Idx = buff.size(); - - buff.reserve(len+Idx); - buff.resize(Idx); - - for (unsigned i = 0; i < len; ++i) - buff.push_back((char) ReadInt()); - - if (isNullTerm) - buff.push_back('\0'); -} - -void Deserializer::RegisterPtr(const SerializedPtrID& PtrId, - const void* Ptr) { - - MapTy::value_type& E = BPatchMap.FindAndConstruct(BPKey(PtrId)); - - assert (!HasFinalPtr(E) && "Pointer already registered."); - -#ifdef DEBUG_BACKPATCH - errs() << "RegisterPtr: " << PtrId << " => " << Ptr << "\n"; -#endif - - SetPtr(E,Ptr); -} - -void Deserializer::ReadUIntPtr(uintptr_t& PtrRef, - const SerializedPtrID& PtrId, - bool AllowBackpatch) { - if (PtrId == 0) { - PtrRef = 0; - return; - } - - MapTy::value_type& E = BPatchMap.FindAndConstruct(BPKey(PtrId)); - - if (HasFinalPtr(E)) { - PtrRef = GetFinalPtr(E); - -#ifdef DEBUG_BACKPATCH - errs() << "ReadUintPtr: " << PtrId - << " <-- " << (void*) GetFinalPtr(E) << '\n'; -#endif - } - else { - assert (AllowBackpatch && - "Client forbids backpatching for this pointer."); - -#ifdef DEBUG_BACKPATCH - errs() << "ReadUintPtr: " << PtrId << " (NO PTR YET)\n"; -#endif - - // Register backpatch. Check the freelist for a BPNode. - BPNode* N; - - if (FreeList) { - N = FreeList; - FreeList = FreeList->Next; - } - else // No available BPNode. Allocate one. - N = (BPNode*) Allocator.Allocate<BPNode>(); - - new (N) BPNode(GetBPNode(E),PtrRef); - SetBPNode(E,N); - } -} - -uintptr_t Deserializer::ReadInternalRefPtr() { - SerializedPtrID PtrId = ReadPtrID(); - - assert (PtrId != 0 && "References cannot refer the NULL address."); - - MapTy::value_type& E = BPatchMap.FindAndConstruct(BPKey(PtrId)); - - assert (HasFinalPtr(E) && - "Cannot backpatch references. Object must be already deserialized."); - - return GetFinalPtr(E); -} - -void BPEntry::SetPtr(BPNode*& FreeList, void* P) { - BPNode* Last = NULL; - - for (BPNode* N = Head; N != NULL; N=N->Next) { - Last = N; - N->PtrRef |= reinterpret_cast<uintptr_t>(P); - } - - if (Last) { - Last->Next = FreeList; - FreeList = Head; - } - - Ptr = const_cast<void*>(P); -} - - -#define INT_READ(TYPE)\ -void SerializeTrait<TYPE>::Read(Deserializer& D, TYPE& X) {\ - X = (TYPE) D.ReadInt(); } - -INT_READ(bool) -INT_READ(unsigned char) -INT_READ(unsigned short) -INT_READ(unsigned int) -INT_READ(unsigned long) - -#define SINT_READ(TYPE)\ -void SerializeTrait<TYPE>::Read(Deserializer& D, TYPE& X) {\ - X = (TYPE) D.ReadSInt(); } - -INT_READ(signed char) -INT_READ(signed short) -INT_READ(signed int) -INT_READ(signed long) diff --git a/lib/Bitcode/Reader/DeserializeAPFloat.cpp b/lib/Bitcode/Reader/DeserializeAPFloat.cpp deleted file mode 100644 index ee24b68..0000000 --- a/lib/Bitcode/Reader/DeserializeAPFloat.cpp +++ /dev/null @@ -1,24 +0,0 @@ -//===-- SerializeAPInt.cpp - Serialization for APFloat ---------*- C++ -*--===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements deserialization of APFloat. -// -//===----------------------------------------------------------------------===// - -#include "llvm/ADT/APFloat.h" -#include "llvm/Bitcode/Deserialize.h" - -using namespace llvm; - -APFloat APFloat::ReadVal(Deserializer& D) { - APInt x; - D.Read(x); - return APFloat(x); -} - diff --git a/lib/Bitcode/Reader/DeserializeAPInt.cpp b/lib/Bitcode/Reader/DeserializeAPInt.cpp deleted file mode 100644 index 1b5b2bf..0000000 --- a/lib/Bitcode/Reader/DeserializeAPInt.cpp +++ /dev/null @@ -1,33 +0,0 @@ -//===-- DeserializeAPInt.cpp - Deserialization for APInts ------*- C++ -*--===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements deserialization of APInts. -// -//===----------------------------------------------------------------------===// - -#include "llvm/ADT/APInt.h" -#include "llvm/Bitcode/Deserialize.h" -#include <cassert> - -using namespace llvm; - -void APInt::Read(Deserializer& D) { - BitWidth = D.ReadInt(); - - if (isSingleWord()) - VAL = D.ReadInt(); - else { - uint32_t NumWords = D.ReadInt(); - assert (NumWords > 1); - pVal = new uint64_t[NumWords]; - assert (pVal && "Allocation in deserialization of APInt failed."); - for (unsigned i = 0; i < NumWords; ++i) - pVal[i] = D.ReadInt(); - } -} diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp index af0b8ac..c78a30e 100644 --- a/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -19,8 +19,6 @@ #include "llvm/DerivedTypes.h" #include "llvm/InlineAsm.h" #include "llvm/Instructions.h" -#include "llvm/LLVMContext.h" -#include "llvm/Metadata.h" #include "llvm/Module.h" #include "llvm/Operator.h" #include "llvm/TypeSymbolTable.h" @@ -477,10 +475,10 @@ static void WriteMDNode(const MDNode *N, const ValueEnumerator &VE, BitstreamWriter &Stream, SmallVector<uint64_t, 64> &Record) { - for (unsigned i = 0, e = N->getNumElements(); i != e; ++i) { - if (N->getElement(i)) { - Record.push_back(VE.getTypeID(N->getElement(i)->getType())); - Record.push_back(VE.getValueID(N->getElement(i))); + for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { + if (N->getOperand(i)) { + Record.push_back(VE.getTypeID(N->getOperand(i)->getType())); + Record.push_back(VE.getValueID(N->getOperand(i))); } else { Record.push_back(VE.getTypeID(Type::getVoidTy(N->getContext()))); Record.push_back(0); @@ -537,10 +535,10 @@ static void WriteModuleMetadata(const ValueEnumerator &VE, Stream.EmitRecord(bitc::METADATA_NAME, Record, 0/*TODO*/); Record.clear(); - // Write named metadata elements. - for (unsigned i = 0, e = NMD->getNumElements(); i != e; ++i) { - if (NMD->getElement(i)) - Record.push_back(VE.getValueID(NMD->getElement(i))); + // Write named metadata operands. + for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) { + if (NMD->getOperand(i)) + Record.push_back(VE.getValueID(NMD->getOperand(i))); else Record.push_back(0); } @@ -561,67 +559,58 @@ static void WriteMetadataAttachment(const Function &F, // Write metadata attachments // METADATA_ATTACHMENT - [m x [value, [n x [id, mdnode]]] - MetadataContext &TheMetadata = F.getContext().getMetadata(); - typedef SmallVector<std::pair<unsigned, TrackingVH<MDNode> >, 2> MDMapTy; - MDMapTy MDs; + SmallVector<std::pair<unsigned, MDNode*>, 4> MDs; + for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB) for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) { MDs.clear(); - TheMetadata.getMDs(I, MDs); - bool RecordedInstruction = false; - for (MDMapTy::const_iterator PI = MDs.begin(), PE = MDs.end(); - PI != PE; ++PI) { - if (RecordedInstruction == false) { - Record.push_back(VE.getInstructionID(I)); - RecordedInstruction = true; - } - Record.push_back(PI->first); - Record.push_back(VE.getValueID(PI->second)); + I->getAllMetadata(MDs); + + // If no metadata, ignore instruction. + if (MDs.empty()) continue; + + Record.push_back(VE.getInstructionID(I)); + + for (unsigned i = 0, e = MDs.size(); i != e; ++i) { + Record.push_back(MDs[i].first); + Record.push_back(VE.getValueID(MDs[i].second)); } - if (!Record.empty()) { - if (!StartedMetadataBlock) { - Stream.EnterSubblock(bitc::METADATA_ATTACHMENT_ID, 3); - StartedMetadataBlock = true; - } - Stream.EmitRecord(bitc::METADATA_ATTACHMENT, Record, 0); - Record.clear(); + if (!StartedMetadataBlock) { + Stream.EnterSubblock(bitc::METADATA_ATTACHMENT_ID, 3); + StartedMetadataBlock = true; } + Stream.EmitRecord(bitc::METADATA_ATTACHMENT, Record, 0); + Record.clear(); } if (StartedMetadataBlock) Stream.ExitBlock(); } -static void WriteModuleMetadataStore(const Module *M, - const ValueEnumerator &VE, - BitstreamWriter &Stream) { - - bool StartedMetadataBlock = false; +static void WriteModuleMetadataStore(const Module *M, BitstreamWriter &Stream) { SmallVector<uint64_t, 64> Record; // Write metadata kinds // METADATA_KIND - [n x [id, name]] - MetadataContext &TheMetadata = M->getContext().getMetadata(); - SmallVector<std::pair<unsigned, StringRef>, 4> Names; - TheMetadata.getHandlerNames(Names); - for (SmallVector<std::pair<unsigned, StringRef>, 4>::iterator - I = Names.begin(), - E = Names.end(); I != E; ++I) { - Record.push_back(I->first); - StringRef KName = I->second; - for (unsigned i = 0, e = KName.size(); i != e; ++i) - Record.push_back(KName[i]); - if (!StartedMetadataBlock) { - Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3); - StartedMetadataBlock = true; - } + SmallVector<StringRef, 4> Names; + M->getMDKindNames(Names); + + assert(Names[0] == "" && "MDKind #0 is invalid"); + if (Names.size() == 1) return; + + Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3); + + for (unsigned MDKindID = 1, e = Names.size(); MDKindID != e; ++MDKindID) { + Record.push_back(MDKindID); + StringRef KName = Names[MDKindID]; + Record.append(KName.begin(), KName.end()); + Stream.EmitRecord(bitc::METADATA_KIND, Record, 0); Record.clear(); } - if (StartedMetadataBlock) - Stream.ExitBlock(); + Stream.ExitBlock(); } static void WriteConstants(unsigned FirstVal, unsigned LastVal, @@ -1213,7 +1202,7 @@ static void WriteFunction(const Function &F, ValueEnumerator &VE, for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) { WriteInstruction(*I, InstID, VE, Stream, Vals); - if (I->getType() != Type::getVoidTy(F.getContext())) + if (!I->getType()->isVoidTy()) ++InstID; } @@ -1466,7 +1455,7 @@ static void WriteModule(const Module *M, BitstreamWriter &Stream) { WriteFunction(*I, VE, Stream); // Emit metadata. - WriteModuleMetadataStore(M, VE, Stream); + WriteModuleMetadataStore(M, Stream); // Emit the type symbol table information. WriteTypeSymbolTable(M->getTypeSymbolTable(), VE, Stream); diff --git a/lib/Bitcode/Writer/CMakeLists.txt b/lib/Bitcode/Writer/CMakeLists.txt index ac5bb99..f097b09 100644 --- a/lib/Bitcode/Writer/CMakeLists.txt +++ b/lib/Bitcode/Writer/CMakeLists.txt @@ -2,8 +2,5 @@ add_llvm_library(LLVMBitWriter BitWriter.cpp BitcodeWriter.cpp BitcodeWriterPass.cpp - Serialize.cpp - SerializeAPFloat.cpp - SerializeAPInt.cpp ValueEnumerator.cpp ) diff --git a/lib/Bitcode/Writer/Serialize.cpp b/lib/Bitcode/Writer/Serialize.cpp deleted file mode 100644 index a6beb17..0000000 --- a/lib/Bitcode/Writer/Serialize.cpp +++ /dev/null @@ -1,115 +0,0 @@ -//==- Serialize.cpp - Generic Object Serialization to Bitcode ----*- C++ -*-==// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines the internal methods used for object serialization. -// -//===----------------------------------------------------------------------===// - -#include "llvm/Bitcode/Serialize.h" -#include "llvm/Support/raw_ostream.h" -#include <cstring> - -using namespace llvm; - -Serializer::Serializer(BitstreamWriter& stream) - : Stream(stream), BlockLevel(0) {} - -Serializer::~Serializer() { - if (inRecord()) - EmitRecord(); - - while (BlockLevel > 0) - Stream.ExitBlock(); - - Stream.FlushToWord(); -} - -void Serializer::EmitRecord() { - assert(Record.size() > 0 && "Cannot emit empty record."); - Stream.EmitRecord(8,Record); - Record.clear(); -} - -void Serializer::EnterBlock(unsigned BlockID,unsigned CodeLen) { - FlushRecord(); - Stream.EnterSubblock(BlockID,CodeLen); - ++BlockLevel; -} - -void Serializer::ExitBlock() { - assert (BlockLevel > 0); - --BlockLevel; - FlushRecord(); - Stream.ExitBlock(); -} - -void Serializer::EmitInt(uint64_t X) { - assert (BlockLevel > 0); - Record.push_back(X); -} - -void Serializer::EmitSInt(int64_t X) { - if (X >= 0) - EmitInt(X << 1); - else - EmitInt((-X << 1) | 1); -} - -void Serializer::EmitCStr(const char* s, const char* end) { - Record.push_back(end - s); - - while(s != end) { - Record.push_back(*s); - ++s; - } -} - -void Serializer::EmitCStr(const char* s) { - EmitCStr(s,s+strlen(s)); -} - -SerializedPtrID Serializer::getPtrId(const void* ptr) { - if (!ptr) - return 0; - - MapTy::iterator I = PtrMap.find(ptr); - - if (I == PtrMap.end()) { - unsigned id = PtrMap.size()+1; -#ifdef DEBUG_BACKPATCH - errs() << "Registered PTR: " << ptr << " => " << id << "\n"; -#endif - PtrMap[ptr] = id; - return id; - } - else return I->second; -} - -bool Serializer::isRegistered(const void* ptr) const { - MapTy::const_iterator I = PtrMap.find(ptr); - return I != PtrMap.end(); -} - - -#define INT_EMIT(TYPE)\ -void SerializeTrait<TYPE>::Emit(Serializer&S, TYPE X) { S.EmitInt(X); } - -INT_EMIT(bool) -INT_EMIT(unsigned char) -INT_EMIT(unsigned short) -INT_EMIT(unsigned int) -INT_EMIT(unsigned long) - -#define SINT_EMIT(TYPE)\ -void SerializeTrait<TYPE>::Emit(Serializer&S, TYPE X) { S.EmitSInt(X); } - -SINT_EMIT(signed char) -SINT_EMIT(signed short) -SINT_EMIT(signed int) -SINT_EMIT(signed long) diff --git a/lib/Bitcode/Writer/SerializeAPFloat.cpp b/lib/Bitcode/Writer/SerializeAPFloat.cpp deleted file mode 100644 index 25d954f..0000000 --- a/lib/Bitcode/Writer/SerializeAPFloat.cpp +++ /dev/null @@ -1,21 +0,0 @@ -//===-- SerializeAPInt.cpp - Serialization for APFloat ---------*- C++ -*--===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements serialization of APFloat. -// -//===----------------------------------------------------------------------===// - -#include "llvm/ADT/APFloat.h" -#include "llvm/Bitcode/Serialize.h" - -using namespace llvm; - -void APFloat::Emit(Serializer& S) const { - S.Emit(bitcastToAPInt()); -} diff --git a/lib/Bitcode/Writer/SerializeAPInt.cpp b/lib/Bitcode/Writer/SerializeAPInt.cpp deleted file mode 100644 index 47792c7..0000000 --- a/lib/Bitcode/Writer/SerializeAPInt.cpp +++ /dev/null @@ -1,31 +0,0 @@ -//===-- SerializeAPInt.cpp - Serialization for APInts ----------*- C++ -*--===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements serialization of APInts. -// -//===----------------------------------------------------------------------===// - -#include "llvm/ADT/APInt.h" -#include "llvm/Bitcode/Serialize.h" -#include <cassert> - -using namespace llvm; - -void APInt::Emit(Serializer& S) const { - S.EmitInt(BitWidth); - - if (isSingleWord()) - S.EmitInt(VAL); - else { - uint32_t NumWords = getNumWords(); - S.EmitInt(NumWords); - for (unsigned i = 0; i < NumWords; ++i) - S.EmitInt(pVal[i]); - } -} diff --git a/lib/Bitcode/Writer/ValueEnumerator.cpp b/lib/Bitcode/Writer/ValueEnumerator.cpp index d840d4a..d8128db 100644 --- a/lib/Bitcode/Writer/ValueEnumerator.cpp +++ b/lib/Bitcode/Writer/ValueEnumerator.cpp @@ -14,8 +14,6 @@ #include "ValueEnumerator.h" #include "llvm/Constants.h" #include "llvm/DerivedTypes.h" -#include "llvm/LLVMContext.h" -#include "llvm/Metadata.h" #include "llvm/Module.h" #include "llvm/TypeSymbolTable.h" #include "llvm/ValueSymbolTable.h" @@ -80,6 +78,8 @@ ValueEnumerator::ValueEnumerator(const Module *M) { // the module symbol table can refer to them... EnumerateValueSymbolTable(M->getValueSymbolTable()); + SmallVector<std::pair<unsigned, MDNode*>, 8> MDs; + // Enumerate types used by function bodies and argument lists. for (Module::const_iterator F = M->begin(), E = M->end(); F != E; ++F) { @@ -87,9 +87,6 @@ ValueEnumerator::ValueEnumerator(const Module *M) { I != E; ++I) EnumerateType(I->getType()); - MetadataContext &TheMetadata = F->getContext().getMetadata(); - typedef SmallVector<std::pair<unsigned, TrackingVH<MDNode> >, 2> MDMapTy; - MDMapTy MDs; for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB) for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I!=E;++I){ for (User::const_op_iterator OI = I->op_begin(), E = I->op_end(); @@ -103,10 +100,9 @@ ValueEnumerator::ValueEnumerator(const Module *M) { // Enumerate metadata attached with this instruction. MDs.clear(); - TheMetadata.getMDs(I, MDs); - for (MDMapTy::const_iterator MI = MDs.begin(), ME = MDs.end(); MI != ME; - ++MI) - EnumerateMetadata(MI->second); + I->getAllMetadata(MDs); + for (unsigned i = 0, e = MDs.size(); i != e; ++i) + EnumerateMetadata(MDs[i].second); } } @@ -216,8 +212,8 @@ void ValueEnumerator::EnumerateMetadata(const MetadataBase *MD) { MDValues.push_back(std::make_pair(MD, 1U)); MDValueMap[MD] = MDValues.size(); MDValueID = MDValues.size(); - for (unsigned i = 0, e = N->getNumElements(); i != e; ++i) { - if (Value *V = N->getElement(i)) + for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { + if (Value *V = N->getOperand(i)) EnumerateValue(V); else EnumerateType(Type::getVoidTy(MD->getContext())); @@ -226,24 +222,21 @@ void ValueEnumerator::EnumerateMetadata(const MetadataBase *MD) { } if (const NamedMDNode *N = dyn_cast<NamedMDNode>(MD)) { - for(NamedMDNode::const_elem_iterator I = N->elem_begin(), - E = N->elem_end(); I != E; ++I) { - MetadataBase *M = *I; - EnumerateValue(M); - } + for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) + EnumerateValue(N->getOperand(i)); MDValues.push_back(std::make_pair(MD, 1U)); MDValueMap[MD] = Values.size(); return; } // Add the value. + assert(isa<MDString>(MD) && "Unknown metadata kind"); MDValues.push_back(std::make_pair(MD, 1U)); MDValueID = MDValues.size(); } void ValueEnumerator::EnumerateValue(const Value *V) { - assert(V->getType() != Type::getVoidTy(V->getContext()) && - "Can't insert void values!"); + assert(!V->getType()->isVoidTy() && "Can't insert void values!"); if (const MetadataBase *MB = dyn_cast<MetadataBase>(V)) return EnumerateMetadata(MB); @@ -334,8 +327,8 @@ void ValueEnumerator::EnumerateOperandType(const Value *V) { } if (const MDNode *N = dyn_cast<MDNode>(V)) { - for (unsigned i = 0, e = N->getNumElements(); i != e; ++i) - if (Value *Elem = N->getElement(i)) + for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) + if (Value *Elem = N->getOperand(i)) EnumerateOperandType(Elem); } } else if (isa<MDString>(V) || isa<MDNode>(V)) diff --git a/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/lib/CodeGen/AggressiveAntiDepBreaker.cpp index bb61682..761fbc6 100644 --- a/lib/CodeGen/AggressiveAntiDepBreaker.cpp +++ b/lib/CodeGen/AggressiveAntiDepBreaker.cpp @@ -127,11 +127,11 @@ AggressiveAntiDepBreaker(MachineFunction& MFi, CriticalPathSet |= CPSet; } - DEBUG(errs() << "AntiDep Critical-Path Registers:"); + DEBUG(dbgs() << "AntiDep Critical-Path Registers:"); DEBUG(for (int r = CriticalPathSet.find_first(); r != -1; r = CriticalPathSet.find_next(r)) - errs() << " " << TRI->getName(r)); - DEBUG(errs() << '\n'); + dbgs() << " " << TRI->getName(r)); + DEBUG(dbgs() << '\n'); } AggressiveAntiDepBreaker::~AggressiveAntiDepBreaker() { @@ -218,9 +218,9 @@ void AggressiveAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count, PrescanInstruction(MI, Count, PassthruRegs); ScanInstruction(MI, Count); - DEBUG(errs() << "Observe: "); + DEBUG(dbgs() << "Observe: "); DEBUG(MI->dump()); - DEBUG(errs() << "\tRegs:"); + DEBUG(dbgs() << "\tRegs:"); unsigned *DefIndices = State->GetDefIndices(); for (unsigned Reg = 0; Reg != TRI->getNumRegs(); ++Reg) { @@ -232,14 +232,14 @@ void AggressiveAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count, // schedule region). if (State->IsLive(Reg)) { DEBUG(if (State->GetGroup(Reg) != 0) - errs() << " " << TRI->getName(Reg) << "=g" << + dbgs() << " " << TRI->getName(Reg) << "=g" << State->GetGroup(Reg) << "->g0(region live-out)"); State->UnionGroups(Reg, 0); } else if ((DefIndices[Reg] < InsertPosIndex) && (DefIndices[Reg] >= Count)) { DefIndices[Reg] = Count; } } - DEBUG(errs() << '\n'); + DEBUG(dbgs() << '\n'); } bool AggressiveAntiDepBreaker::IsImplicitDefUse(MachineInstr *MI, @@ -333,8 +333,8 @@ void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx, RegRefs.erase(Reg); State->LeaveGroup(Reg); DEBUG(if (header != NULL) { - errs() << header << TRI->getName(Reg); header = NULL; }); - DEBUG(errs() << "->g" << State->GetGroup(Reg) << tag); + dbgs() << header << TRI->getName(Reg); header = NULL; }); + DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << tag); } // Repeat for subregisters. for (const unsigned *Subreg = TRI->getSubRegisters(Reg); @@ -346,13 +346,13 @@ void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx, RegRefs.erase(SubregReg); State->LeaveGroup(SubregReg); DEBUG(if (header != NULL) { - errs() << header << TRI->getName(Reg); header = NULL; }); - DEBUG(errs() << " " << TRI->getName(SubregReg) << "->g" << + dbgs() << header << TRI->getName(Reg); header = NULL; }); + DEBUG(dbgs() << " " << TRI->getName(SubregReg) << "->g" << State->GetGroup(SubregReg) << tag); } } - DEBUG(if ((header == NULL) && (footer != NULL)) errs() << footer); + DEBUG(if ((header == NULL) && (footer != NULL)) dbgs() << footer); } void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI, unsigned Count, @@ -375,20 +375,20 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI, unsigned Cou HandleLastUse(Reg, Count + 1, "", "\tDead Def: ", "\n"); } - DEBUG(errs() << "\tDef Groups:"); + DEBUG(dbgs() << "\tDef Groups:"); for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || !MO.isDef()) continue; unsigned Reg = MO.getReg(); if (Reg == 0) continue; - DEBUG(errs() << " " << TRI->getName(Reg) << "=g" << State->GetGroup(Reg)); + DEBUG(dbgs() << " " << TRI->getName(Reg) << "=g" << State->GetGroup(Reg)); // If MI's defs have a special allocation requirement, don't allow // any def registers to be changed. Also assume all registers // defined in a call must not be changed (ABI). if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq()) { - DEBUG(if (State->GetGroup(Reg) != 0) errs() << "->g0(alloc-req)"); + DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)"); State->UnionGroups(Reg, 0); } @@ -398,7 +398,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI, unsigned Cou unsigned AliasReg = *Alias; if (State->IsLive(AliasReg)) { State->UnionGroups(Reg, AliasReg); - DEBUG(errs() << "->g" << State->GetGroup(Reg) << "(via " << + DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << "(via " << TRI->getName(AliasReg) << ")"); } } @@ -411,7 +411,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI, unsigned Cou RegRefs.insert(std::make_pair(Reg, RR)); } - DEBUG(errs() << '\n'); + DEBUG(dbgs() << '\n'); // Scan the register defs for this instruction and update // live-ranges. @@ -437,7 +437,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI, unsigned Cou void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr *MI, unsigned Count) { - DEBUG(errs() << "\tUse Groups:"); + DEBUG(dbgs() << "\tUse Groups:"); std::multimap<unsigned, AggressiveAntiDepState::RegisterReference>& RegRefs = State->GetRegRefs(); @@ -449,7 +449,7 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr *MI, unsigned Reg = MO.getReg(); if (Reg == 0) continue; - DEBUG(errs() << " " << TRI->getName(Reg) << "=g" << + DEBUG(dbgs() << " " << TRI->getName(Reg) << "=g" << State->GetGroup(Reg)); // It wasn't previously live but now it is, this is a kill. Forget @@ -461,7 +461,7 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr *MI, // any use registers to be changed. Also assume all registers // used in a call must not be changed (ABI). if (MI->getDesc().isCall() || MI->getDesc().hasExtraSrcRegAllocReq()) { - DEBUG(if (State->GetGroup(Reg) != 0) errs() << "->g0(alloc-req)"); + DEBUG(if (State->GetGroup(Reg) != 0) dbgs() << "->g0(alloc-req)"); State->UnionGroups(Reg, 0); } @@ -473,12 +473,12 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr *MI, RegRefs.insert(std::make_pair(Reg, RR)); } - DEBUG(errs() << '\n'); + DEBUG(dbgs() << '\n'); // Form a group of all defs and uses of a KILL instruction to ensure // that all registers are renamed as a group. if (MI->getOpcode() == TargetInstrInfo::KILL) { - DEBUG(errs() << "\tKill Group:"); + DEBUG(dbgs() << "\tKill Group:"); unsigned FirstReg = 0; for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { @@ -488,15 +488,15 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr *MI, if (Reg == 0) continue; if (FirstReg != 0) { - DEBUG(errs() << "=" << TRI->getName(Reg)); + DEBUG(dbgs() << "=" << TRI->getName(Reg)); State->UnionGroups(FirstReg, Reg); } else { - DEBUG(errs() << " " << TRI->getName(Reg)); + DEBUG(dbgs() << " " << TRI->getName(Reg)); FirstReg = Reg; } } - DEBUG(errs() << "->g" << State->GetGroup(FirstReg) << '\n'); + DEBUG(dbgs() << "->g" << State->GetGroup(FirstReg) << '\n'); } } @@ -525,7 +525,7 @@ BitVector AggressiveAntiDepBreaker::GetRenameRegisters(unsigned Reg) { BV &= RCBV; } - DEBUG(errs() << " " << RC->getName()); + DEBUG(dbgs() << " " << RC->getName()); } return BV; @@ -552,7 +552,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters( // Find the "superest" register in the group. At the same time, // collect the BitVector of registers that can be used to rename // each register. - DEBUG(errs() << "\tRename Candidates for Group g" << AntiDepGroupIndex << ":\n"); + DEBUG(dbgs() << "\tRename Candidates for Group g" << AntiDepGroupIndex << ":\n"); std::map<unsigned, BitVector> RenameRegisterMap; unsigned SuperReg = 0; for (unsigned i = 0, e = Regs.size(); i != e; ++i) { @@ -562,15 +562,15 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters( // If Reg has any references, then collect possible rename regs if (RegRefs.count(Reg) > 0) { - DEBUG(errs() << "\t\t" << TRI->getName(Reg) << ":"); + DEBUG(dbgs() << "\t\t" << TRI->getName(Reg) << ":"); BitVector BV = GetRenameRegisters(Reg); RenameRegisterMap.insert(std::pair<unsigned, BitVector>(Reg, BV)); - DEBUG(errs() << " ::"); + DEBUG(dbgs() << " ::"); DEBUG(for (int r = BV.find_first(); r != -1; r = BV.find_next(r)) - errs() << " " << TRI->getName(r)); - DEBUG(errs() << "\n"); + dbgs() << " " << TRI->getName(r)); + DEBUG(dbgs() << "\n"); } } @@ -591,7 +591,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters( if (renamecnt++ % DebugDiv != DebugMod) return false; - errs() << "*** Performing rename " << TRI->getName(SuperReg) << + dbgs() << "*** Performing rename " << TRI->getName(SuperReg) << " for debug ***\n"; } #endif @@ -606,11 +606,11 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters( const TargetRegisterClass::iterator RB = SuperRC->allocation_order_begin(MF); const TargetRegisterClass::iterator RE = SuperRC->allocation_order_end(MF); if (RB == RE) { - DEBUG(errs() << "\tEmpty Super Regclass!!\n"); + DEBUG(dbgs() << "\tEmpty Super Regclass!!\n"); return false; } - DEBUG(errs() << "\tFind Registers:"); + DEBUG(dbgs() << "\tFind Registers:"); if (RenameOrder.count(SuperRC) == 0) RenameOrder.insert(RenameOrderType::value_type(SuperRC, RE)); @@ -625,7 +625,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters( // Don't replace a register with itself. if (NewSuperReg == SuperReg) continue; - DEBUG(errs() << " [" << TRI->getName(NewSuperReg) << ':'); + DEBUG(dbgs() << " [" << TRI->getName(NewSuperReg) << ':'); RenameMap.clear(); // For each referenced group register (which must be a SuperReg or @@ -642,12 +642,12 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters( NewReg = TRI->getSubReg(NewSuperReg, NewSubRegIdx); } - DEBUG(errs() << " " << TRI->getName(NewReg)); + DEBUG(dbgs() << " " << TRI->getName(NewReg)); // Check if Reg can be renamed to NewReg. BitVector BV = RenameRegisterMap[Reg]; if (!BV.test(NewReg)) { - DEBUG(errs() << "(no rename)"); + DEBUG(dbgs() << "(no rename)"); goto next_super_reg; } @@ -656,7 +656,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters( // must also check all aliases of NewReg, because we can't define a // register when any sub or super is already live. if (State->IsLive(NewReg) || (KillIndices[Reg] > DefIndices[NewReg])) { - DEBUG(errs() << "(live)"); + DEBUG(dbgs() << "(live)"); goto next_super_reg; } else { bool found = false; @@ -664,7 +664,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters( *Alias; ++Alias) { unsigned AliasReg = *Alias; if (State->IsLive(AliasReg) || (KillIndices[Reg] > DefIndices[AliasReg])) { - DEBUG(errs() << "(alias " << TRI->getName(AliasReg) << " live)"); + DEBUG(dbgs() << "(alias " << TRI->getName(AliasReg) << " live)"); found = true; break; } @@ -681,14 +681,14 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters( // renamed, as recorded in RenameMap. RenameOrder.erase(SuperRC); RenameOrder.insert(RenameOrderType::value_type(SuperRC, R)); - DEBUG(errs() << "]\n"); + DEBUG(dbgs() << "]\n"); return true; next_super_reg: - DEBUG(errs() << ']'); + DEBUG(dbgs() << ']'); } while (R != EndR); - DEBUG(errs() << '\n'); + DEBUG(dbgs() << '\n'); // No registers are free and available! return false; @@ -740,13 +740,13 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies( } #ifndef NDEBUG - DEBUG(errs() << "\n===== Aggressive anti-dependency breaking\n"); - DEBUG(errs() << "Available regs:"); + DEBUG(dbgs() << "\n===== Aggressive anti-dependency breaking\n"); + DEBUG(dbgs() << "Available regs:"); for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) { if (!State->IsLive(Reg)) - DEBUG(errs() << " " << TRI->getName(Reg)); + DEBUG(dbgs() << " " << TRI->getName(Reg)); } - DEBUG(errs() << '\n'); + DEBUG(dbgs() << '\n'); #endif // Attempt to break anti-dependence edges. Walk the instructions @@ -758,7 +758,7 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies( I != E; --Count) { MachineInstr *MI = --I; - DEBUG(errs() << "Anti: "); + DEBUG(dbgs() << "Anti: "); DEBUG(MI->dump()); std::set<unsigned> PassthruRegs; @@ -795,30 +795,30 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies( (Edge->getKind() != SDep::Output)) continue; unsigned AntiDepReg = Edge->getReg(); - DEBUG(errs() << "\tAntidep reg: " << TRI->getName(AntiDepReg)); + DEBUG(dbgs() << "\tAntidep reg: " << TRI->getName(AntiDepReg)); assert(AntiDepReg != 0 && "Anti-dependence on reg0?"); if (!AllocatableSet.test(AntiDepReg)) { // Don't break anti-dependencies on non-allocatable registers. - DEBUG(errs() << " (non-allocatable)\n"); + DEBUG(dbgs() << " (non-allocatable)\n"); continue; } else if ((ExcludeRegs != NULL) && ExcludeRegs->test(AntiDepReg)) { // Don't break anti-dependencies for critical path registers // if not on the critical path - DEBUG(errs() << " (not critical-path)\n"); + DEBUG(dbgs() << " (not critical-path)\n"); continue; } else if (PassthruRegs.count(AntiDepReg) != 0) { // If the anti-dep register liveness "passes-thru", then // don't try to change it. It will be changed along with // the use if required to break an earlier antidep. - DEBUG(errs() << " (passthru)\n"); + DEBUG(dbgs() << " (passthru)\n"); continue; } else { // No anti-dep breaking for implicit deps MachineOperand *AntiDepOp = MI->findRegisterDefOperand(AntiDepReg); assert(AntiDepOp != NULL && "Can't find index for defined register operand"); if ((AntiDepOp == NULL) || AntiDepOp->isImplicit()) { - DEBUG(errs() << " (implicit)\n"); + DEBUG(dbgs() << " (implicit)\n"); continue; } @@ -844,13 +844,13 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies( PE = PathSU->Preds.end(); P != PE; ++P) { if ((P->getSUnit() == NextSU) && (P->getKind() != SDep::Anti) && (P->getKind() != SDep::Output)) { - DEBUG(errs() << " (real dependency)\n"); + DEBUG(dbgs() << " (real dependency)\n"); AntiDepReg = 0; break; } else if ((P->getSUnit() != NextSU) && (P->getKind() == SDep::Data) && (P->getReg() == AntiDepReg)) { - DEBUG(errs() << " (other dependency)\n"); + DEBUG(dbgs() << " (other dependency)\n"); AntiDepReg = 0; break; } @@ -865,16 +865,16 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies( // Determine AntiDepReg's register group. const unsigned GroupIndex = State->GetGroup(AntiDepReg); if (GroupIndex == 0) { - DEBUG(errs() << " (zero group)\n"); + DEBUG(dbgs() << " (zero group)\n"); continue; } - DEBUG(errs() << '\n'); + DEBUG(dbgs() << '\n'); // Look for a suitable register to use to break the anti-dependence. std::map<unsigned, unsigned> RenameMap; if (FindSuitableFreeRegisters(GroupIndex, RenameOrder, RenameMap)) { - DEBUG(errs() << "\tBreaking anti-dependence edge on " + DEBUG(dbgs() << "\tBreaking anti-dependence edge on " << TRI->getName(AntiDepReg) << ":"); // Handle each group register... @@ -883,7 +883,7 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies( unsigned CurrReg = S->first; unsigned NewReg = S->second; - DEBUG(errs() << " " << TRI->getName(CurrReg) << "->" << + DEBUG(dbgs() << " " << TRI->getName(CurrReg) << "->" << TRI->getName(NewReg) << "(" << RegRefs.count(CurrReg) << " refs)"); @@ -917,7 +917,7 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies( } ++Broken; - DEBUG(errs() << '\n'); + DEBUG(dbgs() << '\n'); } } } diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index 44fd176..6b24e24 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -236,7 +236,7 @@ namespace { const MCSection *S; unsigned Alignment; SmallVector<unsigned, 4> CPEs; - SectionCPs(const MCSection *s, unsigned a) : S(s), Alignment(a) {}; + SectionCPs(const MCSection *s, unsigned a) : S(s), Alignment(a) {} }; } @@ -1905,7 +1905,6 @@ void AsmPrinter::EmitComments(const MachineInstr &MI) const { if (Newline) O << '\n'; O.PadToColumn(MAI->getCommentColumn()); O << MAI->getCommentString() << " Reload Reuse"; - Newline = true; } } } diff --git a/lib/CodeGen/AsmPrinter/DIE.cpp b/lib/CodeGen/AsmPrinter/DIE.cpp index 0e93b98..b85e11a 100644 --- a/lib/CodeGen/AsmPrinter/DIE.cpp +++ b/lib/CodeGen/AsmPrinter/DIE.cpp @@ -16,6 +16,7 @@ #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/Target/TargetData.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Format.h" using namespace llvm; @@ -93,7 +94,7 @@ void DIEAbbrev::print(raw_ostream &O) { << '\n'; } } -void DIEAbbrev::dump() { print(errs()); } +void DIEAbbrev::dump() { print(dbgs()); } #endif //===----------------------------------------------------------------------===// @@ -164,14 +165,14 @@ void DIE::print(raw_ostream &O, unsigned IncIndent) { } void DIE::dump() { - print(errs()); + print(dbgs()); } #endif #ifndef NDEBUG void DIEValue::dump() { - print(errs()); + print(dbgs()); } #endif diff --git a/lib/CodeGen/AsmPrinter/DIE.h b/lib/CodeGen/AsmPrinter/DIE.h index cad8b89..a6dc9b6 100644 --- a/lib/CodeGen/AsmPrinter/DIE.h +++ b/lib/CodeGen/AsmPrinter/DIE.h @@ -68,6 +68,7 @@ namespace llvm { /// Data - Raw data bytes for abbreviation. /// SmallVector<DIEAbbrevData, 8> Data; + public: DIEAbbrev(unsigned T, unsigned C) : Tag(T), ChildrenFlag(C), Data() {} virtual ~DIEAbbrev() {} @@ -131,19 +132,18 @@ namespace llvm { /// std::vector<DIE *> Children; + DIE *Parent; + /// Attributes values. /// SmallVector<DIEValue*, 32> Values; - /// Abstract compile unit. - CompileUnit *AbstractCU; - // Private data for print() mutable unsigned IndentCount; public: explicit DIE(unsigned Tag) : Abbrev(Tag, dwarf::DW_CHILDREN_no), Offset(0), - Size(0), IndentCount(0) {} + Size(0), Parent (0), IndentCount(0) {} virtual ~DIE(); // Accessors. @@ -154,13 +154,12 @@ namespace llvm { unsigned getSize() const { return Size; } const std::vector<DIE *> &getChildren() const { return Children; } SmallVector<DIEValue*, 32> &getValues() { return Values; } - CompileUnit *getAbstractCompileUnit() const { return AbstractCU; } - + DIE *getParent() const { return Parent; } void setTag(unsigned Tag) { Abbrev.setTag(Tag); } void setOffset(unsigned O) { Offset = O; } void setSize(unsigned S) { Size = S; } - void setAbstractCompileUnit(CompileUnit *CU) { AbstractCU = CU; } - + void setParent(DIE *P) { Parent = P; } + /// addValue - Add a value and attributes to a DIE. /// void addValue(unsigned Attribute, unsigned Form, DIEValue *Value) { @@ -179,8 +178,13 @@ namespace llvm { /// addChild - Add a child to the DIE. /// void addChild(DIE *Child) { + if (Child->getParent()) { + assert (Child->getParent() == this && "Unexpected DIE Parent!"); + return; + } Abbrev.setChildrenFlag(dwarf::DW_CHILDREN_yes); Children.push_back(Child); + Child->setParent(this); } #ifndef NDEBUG diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index c200a46..8a3ceb6 100644 --- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -30,11 +30,6 @@ #include "llvm/System/Path.h" using namespace llvm; -static TimerGroup &getDwarfTimerGroup() { - static TimerGroup DwarfTimerGroup("Dwarf Debugging"); - return DwarfTimerGroup; -} - //===----------------------------------------------------------------------===// /// Configuration values for initial hash set sizes (log2). @@ -112,7 +107,12 @@ public: /// getDIEEntry - Returns the debug information entry for the speciefied /// debug variable. - DIEEntry *getDIEEntry(MDNode *N) { return GVToDIEEntryMap.lookup(N); } + DIEEntry *getDIEEntry(MDNode *N) { + ValueMap<MDNode *, DIEEntry *>::iterator I = GVToDIEEntryMap.find(N); + if (I == GVToDIEEntryMap.end()) + return NULL; + return I->second; + } /// insertDIEEntry - Insert debug information entry into the map. void insertDIEEntry(MDNode *N, DIEEntry *E) { @@ -234,7 +234,7 @@ public: #ifndef NDEBUG void DbgScope::dump() const { - raw_ostream &err = errs(); + raw_ostream &err = dbgs(); err.indent(IndentLevel); MDNode *N = Desc.getNode(); N->dump(); @@ -269,8 +269,7 @@ DwarfDebug::DwarfDebug(raw_ostream &OS, AsmPrinter *A, const MCAsmInfo *T) SectionSourceLines(), didInitial(false), shouldEmit(false), CurrentFnDbgScope(0), DebugTimer(0) { if (TimePassesIsEnabled) - DebugTimer = new Timer("Dwarf Debug Writer", - getDwarfTimerGroup()); + DebugTimer = new Timer("Dwarf Debug Writer"); } DwarfDebug::~DwarfDebug() { for (unsigned j = 0, M = DIEValues.size(); j < M; ++j) @@ -446,6 +445,23 @@ void DwarfDebug::addSourceLine(DIE *Die, const DIType *Ty) { addUInt(Die, dwarf::DW_AT_decl_line, 0, Line); } +/// addSourceLine - Add location information to specified debug information +/// entry. +void DwarfDebug::addSourceLine(DIE *Die, const DINameSpace *NS) { + // If there is no compile unit specified, don't add a line #. + if (NS->getCompileUnit().isNull()) + return; + + unsigned Line = NS->getLineNumber(); + StringRef FN = NS->getFilename(); + StringRef Dir = NS->getDirectory(); + + unsigned FileID = GetOrCreateSourceID(Dir, FN); + assert(FileID && "Invalid file id"); + addUInt(Die, dwarf::DW_AT_decl_file, 0, FileID); + addUInt(Die, dwarf::DW_AT_decl_line, 0, Line); +} + /* Byref variables, in Blocks, are declared by the programmer as "SomeType VarName;", but the compiler creates a __Block_byref_x_VarName struct, and gives the variable VarName @@ -745,6 +761,9 @@ void DwarfDebug::addToContextOwner(DIE *Die, DIDescriptor Context) { else if (Context.isType()) { DIE *ContextDIE = getOrCreateTypeDIE(DIType(Context.getNode())); ContextDIE->addChild(Die); + } else if (Context.isNameSpace()) { + DIE *ContextDIE = getOrCreateNameSpace(DINameSpace(Context.getNode())); + ContextDIE->addChild(Die); } else if (DIE *ContextDIE = ModuleCU->getDIE(Context.getNode())) ContextDIE->addChild(Die); else @@ -781,7 +800,6 @@ void DwarfDebug::addType(DIE *Entity, DIType Ty) { // Check for pre-existence. DIEEntry *Entry = ModuleCU->getDIEEntry(Ty.getNode()); - // If it exists then use the existing value. if (Entry) { Entity->addValue(dwarf::DW_AT_type, dwarf::DW_FORM_ref4, Entry); @@ -1030,13 +1048,6 @@ DIE *DwarfDebug::createGlobalVariableDIE(const DIGlobalVariable &GV) { addUInt(GVDie, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1); addSourceLine(GVDie, &GV); - // Add address. - DIEBlock *Block = new DIEBlock(); - addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_addr); - addObjectLabel(Block, 0, dwarf::DW_FORM_udata, - Asm->Mang->getMangledName(GV.getGlobal())); - addBlock(GVDie, dwarf::DW_AT_location, 0, Block); - return GVDie; } @@ -1285,7 +1296,6 @@ DIE *DwarfDebug::updateSubprogramScopeDIE(MDNode *SPNode) { SPDie = new DIE(dwarf::DW_TAG_subprogram); addDIEEntry(SPDie, dwarf::DW_AT_specification, dwarf::DW_FORM_ref4, SPDeclDie); - ModuleCU->addDie(SPDie); } @@ -1559,6 +1569,20 @@ unsigned DwarfDebug::GetOrCreateSourceID(StringRef DirName, StringRef FileName) return SrcId; } +/// getOrCreateNameSpace - Create a DIE for DINameSpace. +DIE *DwarfDebug::getOrCreateNameSpace(DINameSpace NS) { + DIE *NDie = ModuleCU->getDIE(NS.getNode()); + if (NDie) + return NDie; + NDie = new DIE(dwarf::DW_TAG_namespace); + ModuleCU->insertDIE(NS.getNode(), NDie); + if (!NS.getName().empty()) + addString(NDie, dwarf::DW_AT_name, dwarf::DW_FORM_string, NS.getName()); + addSourceLine(NDie, &NS); + addToContextOwner(NDie, NS.getContext()); + return NDie; +} + CompileUnit *DwarfDebug::constructCompileUnit(MDNode *N) { DICompileUnit DIUnit(N); StringRef FN = DIUnit.getFilename(); @@ -1620,6 +1644,25 @@ void DwarfDebug::constructGlobalVariableDIE(MDNode *N) { ModuleCU->insertDIE(N, VariableDie); // Add to context owner. + if (DI_GV.isDefinition() + && !DI_GV.getContext().isCompileUnit()) { + // Create specification DIE. + DIE *VariableSpecDIE = new DIE(dwarf::DW_TAG_variable); + addDIEEntry(VariableSpecDIE, dwarf::DW_AT_specification, + dwarf::DW_FORM_ref4, VariableDie); + DIEBlock *Block = new DIEBlock(); + addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_addr); + addObjectLabel(Block, 0, dwarf::DW_FORM_udata, + Asm->Mang->getMangledName(DI_GV.getGlobal())); + addBlock(VariableSpecDIE, dwarf::DW_AT_location, 0, Block); + ModuleCU->addDie(VariableSpecDIE); + } else { + DIEBlock *Block = new DIEBlock(); + addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_addr); + addObjectLabel(Block, 0, dwarf::DW_FORM_udata, + Asm->Mang->getMangledName(DI_GV.getGlobal())); + addBlock(VariableDie, dwarf::DW_AT_location, 0, Block); + } addToContextOwner(VariableDie, DI_GV.getContext()); // Expose as global. FIXME - need to check external flag. @@ -1652,9 +1695,7 @@ void DwarfDebug::constructSubprogramDIE(MDNode *N) { ModuleCU->insertDIE(N, SubprogramDie); // Add to context owner. - if (SP.getContext().getNode() == SP.getCompileUnit().getNode()) - if (TopLevelDIEs.insert(SubprogramDie)) - TopLevelDIEsVector.push_back(SubprogramDie); + addToContextOwner(SubprogramDie, SP.getContext()); // Expose as global. ModuleCU->addGlobal(SP.getName(), SubprogramDie); @@ -2365,7 +2406,6 @@ void DwarfDebug::emitDebugInfo() { EmitLabel("info_end", ModuleCU->getID()); Asm->EOL(); - } /// emitAbbreviations - Emit the abbreviation section. diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.h b/lib/CodeGen/AsmPrinter/DwarfDebug.h index 12ad322..2b8164e 100644 --- a/lib/CodeGen/AsmPrinter/DwarfDebug.h +++ b/lib/CodeGen/AsmPrinter/DwarfDebug.h @@ -285,6 +285,7 @@ class DwarfDebug : public Dwarf { void addSourceLine(DIE *Die, const DIGlobal *G); void addSourceLine(DIE *Die, const DISubprogram *SP); void addSourceLine(DIE *Die, const DIType *Ty); + void addSourceLine(DIE *Die, const DINameSpace *NS); /// addAddress - Add an address attribute to a die based on the location /// provided. @@ -315,6 +316,10 @@ class DwarfDebug : public Dwarf { /// addType - Add a new type attribute to the specified entity. void addType(DIE *Entity, DIType Ty); + + /// getOrCreateNameSpace - Create a DIE for DINameSpace. + DIE *getOrCreateNameSpace(DINameSpace NS); + /// getOrCreateTypeDIE - Find existing DIE or create new DIE for the /// given DIType. DIE *getOrCreateTypeDIE(DIType Ty); diff --git a/lib/CodeGen/AsmPrinter/DwarfException.cpp b/lib/CodeGen/AsmPrinter/DwarfException.cpp index 3fd077f..d01f300 100644 --- a/lib/CodeGen/AsmPrinter/DwarfException.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfException.cpp @@ -35,19 +35,13 @@ #include "llvm/ADT/StringExtras.h" using namespace llvm; -static TimerGroup &getDwarfTimerGroup() { - static TimerGroup DwarfTimerGroup("DWARF Exception"); - return DwarfTimerGroup; -} - DwarfException::DwarfException(raw_ostream &OS, AsmPrinter *A, const MCAsmInfo *T) : Dwarf(OS, A, T, "eh"), shouldEmitTable(false), shouldEmitMoves(false), shouldEmitTableModule(false), shouldEmitMovesModule(false), ExceptionTimer(0) { if (TimePassesIsEnabled) - ExceptionTimer = new Timer("DWARF Exception Writer", - getDwarfTimerGroup()); + ExceptionTimer = new Timer("DWARF Exception Writer"); } DwarfException::~DwarfException() { @@ -292,13 +286,14 @@ void DwarfException::EmitFDE(const FunctionEHFrameInfo &EHFrameInfo) { Asm->EmitULEB128Bytes(is4Byte ? 4 : 8); Asm->EOL("Augmentation size"); - // We force 32-bits here because we've encoded our LSDA in the CIE with - // `dwarf::DW_EH_PE_sdata4'. And the CIE and FDE should agree. if (EHFrameInfo.hasLandingPads) - EmitReference("exception", EHFrameInfo.Number, true, true); - else - Asm->EmitInt32((int)0); - + EmitReference("exception", EHFrameInfo.Number, true, false); + else { + if (is4Byte) + Asm->EmitInt32((int)0); + else + Asm->EmitInt64((int)0); + } Asm->EOL("Language Specific Data Area"); } else { Asm->EmitULEB128Bytes(0); diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp index 3887e6d..92849d3 100644 --- a/lib/CodeGen/BranchFolding.cpp +++ b/lib/CodeGen/BranchFolding.cpp @@ -98,7 +98,7 @@ BranchFolder::BranchFolder(bool defaultEnableTailMerge) { /// function, updating the CFG. void BranchFolder::RemoveDeadBlock(MachineBasicBlock *MBB) { assert(MBB->pred_empty() && "MBB must be dead!"); - DEBUG(errs() << "\nRemoving MBB: " << *MBB); + DEBUG(dbgs() << "\nRemoving MBB: " << *MBB); MachineFunction *MF = MBB->getParent(); // drop all successors. @@ -636,7 +636,7 @@ unsigned BranchFolder::CreateCommonTailOnlyBlock(MachineBasicBlock *&PredBB, SameTails[commonTailIndex].getTailStartPos(); MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock(); - DEBUG(errs() << "\nSplitting BB#" << MBB->getNumber() << ", size " + DEBUG(dbgs() << "\nSplitting BB#" << MBB->getNumber() << ", size " << maxCommonTailLength); MachineBasicBlock *newMBB = SplitMBBAt(*MBB, BBI); @@ -666,18 +666,18 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB, // this many instructions in common. unsigned minCommonTailLength = TailMergeSize; - DEBUG(errs() << "\nTryTailMergeBlocks: "; + DEBUG(dbgs() << "\nTryTailMergeBlocks: "; for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i) - errs() << "BB#" << MergePotentials[i].getBlock()->getNumber() + dbgs() << "BB#" << MergePotentials[i].getBlock()->getNumber() << (i == e-1 ? "" : ", "); - errs() << "\n"; + dbgs() << "\n"; if (SuccBB) { - errs() << " with successor BB#" << SuccBB->getNumber() << '\n'; + dbgs() << " with successor BB#" << SuccBB->getNumber() << '\n'; if (PredBB) - errs() << " which has fall-through from BB#" + dbgs() << " which has fall-through from BB#" << PredBB->getNumber() << "\n"; } - errs() << "Looking for common tails of at least " + dbgs() << "Looking for common tails of at least " << minCommonTailLength << " instruction" << (minCommonTailLength == 1 ? "" : "s") << '\n'; ); @@ -748,19 +748,19 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB, MachineBasicBlock *MBB = SameTails[commonTailIndex].getBlock(); // MBB is common tail. Adjust all other BB's to jump to this one. // Traversal must be forwards so erases work. - DEBUG(errs() << "\nUsing common tail in BB#" << MBB->getNumber() + DEBUG(dbgs() << "\nUsing common tail in BB#" << MBB->getNumber() << " for "); for (unsigned int i=0, e = SameTails.size(); i != e; ++i) { if (commonTailIndex == i) continue; - DEBUG(errs() << "BB#" << SameTails[i].getBlock()->getNumber() + DEBUG(dbgs() << "BB#" << SameTails[i].getBlock()->getNumber() << (i == e-1 ? "" : ", ")); // Hack the end off BB i, making it jump to BB commonTailIndex instead. ReplaceTailWithBranchTo(SameTails[i].getTailStartPos(), MBB); // BB i is no longer a predecessor of SuccBB; remove it from the worklist. MergePotentials.erase(SameTails[i].getMPIter()); } - DEBUG(errs() << "\n"); + DEBUG(dbgs() << "\n"); // We leave commonTailIndex in the worklist in case there are other blocks // that match it with a smaller number of instructions. MadeChange = true; @@ -999,7 +999,7 @@ ReoptimizeBlock: if (PriorCond.empty() && !PriorTBB && MBB->pred_size() == 1 && PrevBB.succ_size() == 1 && !MBB->hasAddressTaken()) { - DEBUG(errs() << "\nMerging into block: " << PrevBB + DEBUG(dbgs() << "\nMerging into block: " << PrevBB << "From MBB: " << *MBB); PrevBB.splice(PrevBB.end(), MBB, MBB->begin(), MBB->end()); PrevBB.removeSuccessor(PrevBB.succ_begin());; @@ -1084,7 +1084,7 @@ ReoptimizeBlock: // Reverse the branch so we will fall through on the previous true cond. SmallVector<MachineOperand, 4> NewPriorCond(PriorCond); if (!TII->ReverseBranchCondition(NewPriorCond)) { - DEBUG(errs() << "\nMoving MBB: " << *MBB + DEBUG(dbgs() << "\nMoving MBB: " << *MBB << "To make fallthrough to: " << *PriorTBB << "\n"); TII->RemoveBranch(PrevBB); @@ -1222,7 +1222,7 @@ ReoptimizeBlock: // Analyze the branch at the end of the pred. MachineBasicBlock *PredBB = *PI; MachineFunction::iterator PredFallthrough = PredBB; ++PredFallthrough; - MachineBasicBlock *PredTBB, *PredFBB; + MachineBasicBlock *PredTBB = 0, *PredFBB = 0; SmallVector<MachineOperand, 4> PredCond; if (PredBB != MBB && !PredBB->canFallThrough() && !TII->AnalyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true) @@ -1274,7 +1274,7 @@ ReoptimizeBlock: // Okay, there is no really great place to put this block. If, however, // the block before this one would be a fall-through if this block were // removed, move this block to the end of the function. - MachineBasicBlock *PrevTBB, *PrevFBB; + MachineBasicBlock *PrevTBB = 0, *PrevFBB = 0; SmallVector<MachineOperand, 4> PrevCond; if (FallThrough != MF.end() && !TII->AnalyzeBranch(PrevBB, PrevTBB, PrevFBB, PrevCond, true) && diff --git a/lib/CodeGen/CalcSpillWeights.cpp b/lib/CodeGen/CalcSpillWeights.cpp index dcffb8a2..b8ef219 100644 --- a/lib/CodeGen/CalcSpillWeights.cpp +++ b/lib/CodeGen/CalcSpillWeights.cpp @@ -37,7 +37,7 @@ void CalculateSpillWeights::getAnalysisUsage(AnalysisUsage &au) const { bool CalculateSpillWeights::runOnMachineFunction(MachineFunction &fn) { - DEBUG(errs() << "********** Compute Spill Weights **********\n" + DEBUG(dbgs() << "********** Compute Spill Weights **********\n" << "********** Function: " << fn.getFunction()->getName() << '\n'); @@ -95,7 +95,7 @@ bool CalculateSpillWeights::runOnMachineFunction(MachineFunction &fn) { SlotIndex defIdx = lis->getInstructionIndex(mi).getDefIndex(); const LiveRange *dlr = lis->getInterval(reg).getLiveRangeContaining(defIdx); - if (dlr->end > mbbEnd) + if (dlr->end >= mbbEnd) weight *= 3.0F; } regInt.weight += weight; diff --git a/lib/CodeGen/CodePlacementOpt.cpp b/lib/CodeGen/CodePlacementOpt.cpp index ff71f6b..126700b 100644 --- a/lib/CodeGen/CodePlacementOpt.cpp +++ b/lib/CodeGen/CodePlacementOpt.cpp @@ -233,7 +233,6 @@ bool CodePlacementOpt::EliminateUnconditionalJumpsToTop(MachineFunction &MF, !BotHasFallthrough && HasFallthrough(L->getBottomBlock())) { ++NumIntraElim; - BotHasFallthrough = true; } return Changed; diff --git a/lib/CodeGen/ELF.h b/lib/CodeGen/ELF.h index e303ebb..cb5a8c0 100644 --- a/lib/CodeGen/ELF.h +++ b/lib/CodeGen/ELF.h @@ -82,14 +82,14 @@ namespace llvm { const GlobalValue *getGlobalValue() const { assert(SourceType == isGV && "This is not a global value"); return Source.GV; - }; + } // getExternalSym - If this is an external symbol which originated the // elf symbol, return a reference to it. const char *getExternalSymbol() const { assert(SourceType == isExtSym && "This is not an external symbol"); return Source.Ext; - }; + } // getGV - From a global value return a elf symbol to represent it static ELFSym *getGV(const GlobalValue *GV, unsigned Bind, diff --git a/lib/CodeGen/LLVMTargetMachine.cpp b/lib/CodeGen/LLVMTargetMachine.cpp index 297dd31..d5fd051 100644 --- a/lib/CodeGen/LLVMTargetMachine.cpp +++ b/lib/CodeGen/LLVMTargetMachine.cpp @@ -83,7 +83,18 @@ LLVMTargetMachine::LLVMTargetMachine(const Target &T, AsmInfo = T.createAsmInfo(TargetTriple); } +// Set the default code model for the JIT for a generic target. +// FIXME: Is small right here? or .is64Bit() ? Large : Small? +void +LLVMTargetMachine::setCodeModelForJIT() { + setCodeModel(CodeModel::Small); +} +// Set the default code model for static compilation for a generic target. +void +LLVMTargetMachine::setCodeModelForStatic() { + setCodeModel(CodeModel::Small); +} FileModel::Model LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM, @@ -130,6 +141,9 @@ bool LLVMTargetMachine::addAssemblyEmitter(PassManagerBase &PM, bool LLVMTargetMachine::addPassesToEmitFileFinish(PassManagerBase &PM, MachineCodeEmitter *MCE, CodeGenOpt::Level OptLevel) { + // Make sure the code model is set. + setCodeModelForStatic(); + if (MCE) addSimpleCodeEmitter(PM, OptLevel, *MCE); if (PrintEmittedAsm) @@ -146,6 +160,9 @@ bool LLVMTargetMachine::addPassesToEmitFileFinish(PassManagerBase &PM, bool LLVMTargetMachine::addPassesToEmitFileFinish(PassManagerBase &PM, JITCodeEmitter *JCE, CodeGenOpt::Level OptLevel) { + // Make sure the code model is set. + setCodeModelForJIT(); + if (JCE) addSimpleCodeEmitter(PM, OptLevel, *JCE); if (PrintEmittedAsm) @@ -162,6 +179,9 @@ bool LLVMTargetMachine::addPassesToEmitFileFinish(PassManagerBase &PM, bool LLVMTargetMachine::addPassesToEmitFileFinish(PassManagerBase &PM, ObjectCodeEmitter *OCE, CodeGenOpt::Level OptLevel) { + // Make sure the code model is set. + setCodeModelForStatic(); + if (OCE) addSimpleCodeEmitter(PM, OptLevel, *OCE); if (PrintEmittedAsm) @@ -181,6 +201,9 @@ bool LLVMTargetMachine::addPassesToEmitFileFinish(PassManagerBase &PM, bool LLVMTargetMachine::addPassesToEmitMachineCode(PassManagerBase &PM, MachineCodeEmitter &MCE, CodeGenOpt::Level OptLevel) { + // Make sure the code model is set. + setCodeModelForJIT(); + // Add common CodeGen passes. if (addCommonCodeGenPasses(PM, OptLevel)) return true; @@ -203,6 +226,9 @@ bool LLVMTargetMachine::addPassesToEmitMachineCode(PassManagerBase &PM, bool LLVMTargetMachine::addPassesToEmitMachineCode(PassManagerBase &PM, JITCodeEmitter &JCE, CodeGenOpt::Level OptLevel) { + // Make sure the code model is set. + setCodeModelForJIT(); + // Add common CodeGen passes. if (addCommonCodeGenPasses(PM, OptLevel)) return true; diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 8806439f..452f872 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -324,8 +324,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, // of the defining block, potentially live across some blocks, then is // live into some number of blocks, but gets killed. Start by adding a // range that goes from this definition to the end of the defining block. - LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(), - ValNo); + LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo); DEBUG(errs() << " +" << NewLR); interval.addRange(NewLR); @@ -334,10 +333,8 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, // live interval. for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(), E = vi.AliveBlocks.end(); I != E; ++I) { - LiveRange LR( - getMBBStartIdx(mf_->getBlockNumbered(*I)), - getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(), - ValNo); + MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I); + LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo); interval.addRange(LR); DEBUG(errs() << " +" << LR); } @@ -415,19 +412,32 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, // first redefinition of the vreg that we have seen, go back and change // the live range in the PHI block to be a different value number. if (interval.containsOneValue()) { - // Remove the old range that we now know has an incorrect number. + VNInfo *VNI = interval.getValNumInfo(0); - MachineInstr *Killer = vi.Kills[0]; - SlotIndex Start = getMBBStartIdx(Killer->getParent()); - SlotIndex End = getInstructionIndex(Killer).getDefIndex(); - DEBUG({ - errs() << " Removing [" << Start << "," << End << "] from: "; - interval.print(errs(), tri_); - errs() << "\n"; - }); - interval.removeRange(Start, End); - assert(interval.ranges.size() == 1 && - "Newly discovered PHI interval has >1 ranges."); + // Phi elimination may have reused the register for multiple identical + // phi nodes. There will be a kill per phi. Remove the old ranges that + // we now know have an incorrect number. + for (unsigned ki=0, ke=vi.Kills.size(); ki != ke; ++ki) { + MachineInstr *Killer = vi.Kills[ki]; + SlotIndex Start = getMBBStartIdx(Killer->getParent()); + SlotIndex End = getInstructionIndex(Killer).getDefIndex(); + DEBUG({ + errs() << "\n\t\trenaming [" << Start << "," << End << "] in: "; + interval.print(errs(), tri_); + }); + interval.removeRange(Start, End); + + // Replace the interval with one of a NEW value number. Note that + // this value number isn't actually defined by an instruction, weird + // huh? :) + LiveRange LR(Start, End, + interval.getNextValue(SlotIndex(Start, true), + 0, false, VNInfoAllocator)); + LR.valno->setIsPHIDef(true); + interval.addRange(LR); + LR.valno->addKill(End); + } + MachineBasicBlock *killMBB = getMBBFromIndex(VNI->def); VNI->addKill(indexes_->getTerminatorGap(killMBB)); VNI->setHasPHIKill(true); @@ -435,20 +445,6 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, errs() << " RESULT: "; interval.print(errs(), tri_); }); - - // Replace the interval with one of a NEW value number. Note that this - // value number isn't actually defined by an instruction, weird huh? :) - LiveRange LR(Start, End, - interval.getNextValue(SlotIndex(getMBBStartIdx(Killer->getParent()), true), - 0, false, VNInfoAllocator)); - LR.valno->setIsPHIDef(true); - DEBUG(errs() << " replace range with " << LR); - interval.addRange(LR); - LR.valno->addKill(End); - DEBUG({ - errs() << " RESULT: "; - interval.print(errs(), tri_); - }); } // In the case of PHI elimination, each variable definition is only @@ -468,7 +464,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, CopyMI = mi; ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); - SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex(); + SlotIndex killIndex = getMBBEndIdx(mbb); LiveRange LR(defIndex, killIndex, ValNo); interval.addRange(LR); ValNo->addKill(indexes_->getTerminatorGap(mbb)); @@ -1248,7 +1244,7 @@ bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li, continue; SlotIndex KillIdx = VNI->kills[j]; - if (KillIdx > Idx && KillIdx < End) + if (KillIdx > Idx && KillIdx <= End) return true; } return false; @@ -2086,7 +2082,7 @@ LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg, VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent())); LiveRange LR( SlotIndex(getInstructionIndex(startInst).getDefIndex()), - getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN); + getMBBEndIdx(startInst->getParent()), VN); Interval.addRange(LR); return LR; diff --git a/lib/CodeGen/MachineBasicBlock.cpp b/lib/CodeGen/MachineBasicBlock.cpp index a58286d..74a0d57 100644 --- a/lib/CodeGen/MachineBasicBlock.cpp +++ b/lib/CodeGen/MachineBasicBlock.cpp @@ -450,14 +450,29 @@ void MachineBasicBlock::ReplaceUsesOfBlockWith(MachineBasicBlock *Old, /// CorrectExtraCFGEdges - Various pieces of code can cause excess edges in the /// CFG to be inserted. If we have proven that MBB can only branch to DestA and -/// DestB, remove any other MBB successors from the CFG. DestA and DestB can -/// be null. +/// DestB, remove any other MBB successors from the CFG. DestA and DestB can be +/// null. +/// /// Besides DestA and DestB, retain other edges leading to LandingPads /// (currently there can be only one; we don't check or require that here). /// Note it is possible that DestA and/or DestB are LandingPads. bool MachineBasicBlock::CorrectExtraCFGEdges(MachineBasicBlock *DestA, MachineBasicBlock *DestB, bool isCond) { + // The values of DestA and DestB frequently come from a call to the + // 'TargetInstrInfo::AnalyzeBranch' method. We take our meaning of the initial + // values from there. + // + // 1. If both DestA and DestB are null, then the block ends with no branches + // (it falls through to its successor). + // 2. If DestA is set, DestB is null, and isCond is false, then the block ends + // with only an unconditional branch. + // 3. If DestA is set, DestB is null, and isCond is true, then the block ends + // with a conditional branch that falls through to a successor (DestB). + // 4. If DestA and DestB is set and isCond is true, then the block ends with a + // conditional branch followed by an unconditional branch. DestA is the + // 'true' destination and DestB is the 'false' destination. + bool MadeChange = false; bool AddedFallThrough = false; @@ -483,14 +498,15 @@ bool MachineBasicBlock::CorrectExtraCFGEdges(MachineBasicBlock *DestA, MachineBasicBlock::succ_iterator SI = succ_begin(); MachineBasicBlock *OrigDestA = DestA, *OrigDestB = DestB; while (SI != succ_end()) { - if (*SI == DestA) { + const MachineBasicBlock *MBB = *SI; + if (MBB == DestA) { DestA = 0; ++SI; - } else if (*SI == DestB) { + } else if (MBB == DestB) { DestB = 0; ++SI; - } else if ((*SI)->isLandingPad() && - *SI!=OrigDestA && *SI!=OrigDestB) { + } else if (MBB->isLandingPad() && + MBB != OrigDestA && MBB != OrigDestB) { ++SI; } else { // Otherwise, this is a superfluous edge, remove it. @@ -498,12 +514,12 @@ bool MachineBasicBlock::CorrectExtraCFGEdges(MachineBasicBlock *DestA, MadeChange = true; } } - if (!AddedFallThrough) { - assert(DestA == 0 && DestB == 0 && - "MachineCFG is missing edges!"); - } else if (isCond) { + + if (!AddedFallThrough) + assert(DestA == 0 && DestB == 0 && "MachineCFG is missing edges!"); + else if (isCond) assert(DestA == 0 && "MachineCFG is missing edges!"); - } + return MadeChange; } diff --git a/lib/CodeGen/MachineDominators.cpp b/lib/CodeGen/MachineDominators.cpp index 0f796f3..4088739 100644 --- a/lib/CodeGen/MachineDominators.cpp +++ b/lib/CodeGen/MachineDominators.cpp @@ -17,8 +17,10 @@ using namespace llvm; +namespace llvm { TEMPLATE_INSTANTIATION(class DomTreeNodeBase<MachineBasicBlock>); TEMPLATE_INSTANTIATION(class DominatorTreeBase<MachineBasicBlock>); +} char MachineDominatorTree::ID = 0; diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp index 12b974d..a761c2d 100644 --- a/lib/CodeGen/MachineInstr.cpp +++ b/lib/CodeGen/MachineInstr.cpp @@ -15,6 +15,7 @@ #include "llvm/Constants.h" #include "llvm/Function.h" #include "llvm/InlineAsm.h" +#include "llvm/Type.h" #include "llvm/Value.h" #include "llvm/Assembly/Writer.h" #include "llvm/CodeGen/MachineFunction.h" @@ -555,8 +556,13 @@ void MachineInstr::addOperand(const MachineOperand &Op) { Operands.back().ParentMI = this; // If the operand is a register, update the operand's use list. - if (Op.isReg()) + if (Op.isReg()) { Operands.back().AddRegOperandToRegInfo(RegInfo); + // If the register operand is flagged as early, mark the operand as such + unsigned OpNo = Operands.size() - 1; + if (TID->getOperandConstraint(OpNo, TOI::EARLY_CLOBBER) != -1) + Operands[OpNo].setIsEarlyClobber(true); + } return; } } @@ -573,8 +579,12 @@ void MachineInstr::addOperand(const MachineOperand &Op) { // Do explicitly set the reginfo for this operand though, to ensure the // next/prev fields are properly nulled out. - if (Operands[OpNo].isReg()) + if (Operands[OpNo].isReg()) { Operands[OpNo].AddRegOperandToRegInfo(0); + // If the register operand is flagged as early, mark the operand as such + if (TID->getOperandConstraint(OpNo, TOI::EARLY_CLOBBER) != -1) + Operands[OpNo].setIsEarlyClobber(true); + } } else if (Operands.size()+1 <= Operands.capacity()) { // Otherwise, we have to remove register operands from their register use @@ -594,8 +604,12 @@ void MachineInstr::addOperand(const MachineOperand &Op) { Operands.insert(Operands.begin()+OpNo, Op); Operands[OpNo].ParentMI = this; - if (Operands[OpNo].isReg()) + if (Operands[OpNo].isReg()) { Operands[OpNo].AddRegOperandToRegInfo(RegInfo); + // If the register operand is flagged as early, mark the operand as such + if (TID->getOperandConstraint(OpNo, TOI::EARLY_CLOBBER) != -1) + Operands[OpNo].setIsEarlyClobber(true); + } // Re-add all the implicit ops. for (unsigned i = OpNo+1, e = Operands.size(); i != e; ++i) { @@ -613,6 +627,11 @@ void MachineInstr::addOperand(const MachineOperand &Op) { // Re-add all the operands. AddRegOperandsToUseLists(*RegInfo); + + // If the register operand is flagged as early, mark the operand as such + if (Operands[OpNo].isReg() + && TID->getOperandConstraint(OpNo, TOI::EARLY_CLOBBER) != -1) + Operands[OpNo].setIsEarlyClobber(true); } } @@ -1141,7 +1160,7 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const { // Briefly indicate whether any call clobbers were omitted. if (OmittedAnyCallClobbers) { - if (FirstOp) FirstOp = false; else OS << ","; + if (!FirstOp) OS << ","; OS << " ..."; } @@ -1159,7 +1178,7 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const { } if (!debugLoc.isUnknown() && MF) { - if (!HaveSemi) OS << ";"; HaveSemi = true; + if (!HaveSemi) OS << ";"; // TODO: print InlinedAtLoc information diff --git a/lib/CodeGen/MachineLICM.cpp b/lib/CodeGen/MachineLICM.cpp index 66de535..0a57ea1 100644 --- a/lib/CodeGen/MachineLICM.cpp +++ b/lib/CodeGen/MachineLICM.cpp @@ -322,7 +322,7 @@ bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) { // If the loop contains the definition of an operand, then the instruction // isn't loop invariant. - if (CurLoop->contains(RegInfo->getVRegDef(Reg)->getParent())) + if (CurLoop->contains(RegInfo->getVRegDef(Reg))) return false; } diff --git a/lib/CodeGen/MachineLoopInfo.cpp b/lib/CodeGen/MachineLoopInfo.cpp index 63f4f18..d561a5b 100644 --- a/lib/CodeGen/MachineLoopInfo.cpp +++ b/lib/CodeGen/MachineLoopInfo.cpp @@ -19,12 +19,14 @@ #include "llvm/CodeGen/Passes.h" using namespace llvm; +namespace llvm { #define MLB class LoopBase<MachineBasicBlock, MachineLoop> TEMPLATE_INSTANTIATION(MLB); #undef MLB #define MLIB class LoopInfoBase<MachineBasicBlock, MachineLoop> TEMPLATE_INSTANTIATION(MLIB); #undef MLIB +} char MachineLoopInfo::ID = 0; static RegisterPass<MachineLoopInfo> diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp index 917d053..0772319 100644 --- a/lib/CodeGen/MachineVerifier.cpp +++ b/lib/CodeGen/MachineVerifier.cpp @@ -365,24 +365,6 @@ void MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { const TargetInstrInfo *TII = MF->getTarget().getInstrInfo(); - // Start with minimal CFG sanity checks. - MachineFunction::const_iterator MBBI = MBB; - ++MBBI; - if (MBBI != MF->end()) { - // Block is not last in function. - if (!MBB->isSuccessor(MBBI)) { - // Block does not fall through. - if (MBB->empty()) { - report("MBB doesn't fall through but is empty!", MBB); - } - } - } else { - // Block is last in function. - if (MBB->empty()) { - report("MBB is last in function but is empty!", MBB); - } - } - // Call AnalyzeBranch. If it succeeds, there several more conditions to check. MachineBasicBlock *TBB = 0, *FBB = 0; SmallVector<MachineOperand, 4> Cond; @@ -553,7 +535,8 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) { report("Explicit operand marked as implicit", MO, MONum); } } else { - if (MO->isReg() && !MO->isImplicit() && !TI.isVariadic()) + // ARM adds %reg0 operands to indicate predicates. We'll allow that. + if (MO->isReg() && !MO->isImplicit() && !TI.isVariadic() && MO->getReg()) report("Extra explicit operand on non-variadic instruction", MO, MONum); } diff --git a/lib/CodeGen/PBQP/AnnotatedGraph.h b/lib/CodeGen/PBQP/AnnotatedGraph.h index 904061c..a47dce9 100644 --- a/lib/CodeGen/PBQP/AnnotatedGraph.h +++ b/lib/CodeGen/PBQP/AnnotatedGraph.h @@ -132,19 +132,19 @@ public: } NodeData& getNodeData(const NodeIterator &nodeItr) { - return getNodeEntry(nodeItr).getNodeData(); + return PGraph::getNodeEntry(nodeItr).getNodeData(); } const NodeData& getNodeData(const NodeIterator &nodeItr) const { - return getNodeEntry(nodeItr).getNodeData(); + return PGraph::getNodeEntry(nodeItr).getNodeData(); } EdgeData& getEdgeData(const EdgeIterator &edgeItr) { - return getEdgeEntry(edgeItr).getEdgeData(); + return PGraph::getEdgeEntry(edgeItr).getEdgeData(); } const EdgeEntry& getEdgeData(const EdgeIterator &edgeItr) const { - return getEdgeEntry(edgeItr).getEdgeData(); + return PGraph::getEdgeEntry(edgeItr).getEdgeData(); } SimpleGraph toSimpleGraph() const { diff --git a/lib/CodeGen/PBQP/GraphBase.h b/lib/CodeGen/PBQP/GraphBase.h index cc3e017..0c7493b 100644 --- a/lib/CodeGen/PBQP/GraphBase.h +++ b/lib/CodeGen/PBQP/GraphBase.h @@ -298,7 +298,7 @@ public: for (ConstAdjEdgeIterator adjEdgeItr = adjEdgesBegin(node1Itr), adjEdgeEnd = adjEdgesEnd(node1Itr); - adjEdgeItr != adjEdgesEnd; ++adjEdgeItr) { + adjEdgeItr != adjEdgeEnd; ++adjEdgeItr) { if ((getEdgeNode1Itr(*adjEdgeItr) == node2Itr) || (getEdgeNode2Itr(*adjEdgeItr) == node2Itr)) { return *adjEdgeItr; diff --git a/lib/CodeGen/PBQP/HeuristicSolver.h b/lib/CodeGen/PBQP/HeuristicSolver.h index e786246..1670877 100644 --- a/lib/CodeGen/PBQP/HeuristicSolver.h +++ b/lib/CodeGen/PBQP/HeuristicSolver.h @@ -536,7 +536,7 @@ private: else reductionFinished = true; } - }; + } void processR1() { diff --git a/lib/CodeGen/PHIElimination.cpp b/lib/CodeGen/PHIElimination.cpp index c62d179..58c3dec 100644 --- a/lib/CodeGen/PHIElimination.cpp +++ b/lib/CodeGen/PHIElimination.cpp @@ -35,6 +35,7 @@ using namespace llvm; STATISTIC(NumAtomic, "Number of atomic phis lowered"); STATISTIC(NumSplits, "Number of critical edges split on demand"); +STATISTIC(NumReused, "Number of reused lowered phis"); char PHIElimination::ID = 0; static RegisterPass<PHIElimination> @@ -70,7 +71,7 @@ bool llvm::PHIElimination::runOnMachineFunction(MachineFunction &Fn) { Changed |= EliminatePHINodes(Fn, *I); // Remove dead IMPLICIT_DEF instructions. - for (SmallPtrSet<MachineInstr*,4>::iterator I = ImpDefs.begin(), + for (SmallPtrSet<MachineInstr*, 4>::iterator I = ImpDefs.begin(), E = ImpDefs.end(); I != E; ++I) { MachineInstr *DefMI = *I; unsigned DefReg = DefMI->getOperand(0).getReg(); @@ -78,6 +79,12 @@ bool llvm::PHIElimination::runOnMachineFunction(MachineFunction &Fn) { DefMI->eraseFromParent(); } + // Clean up the lowered PHI instructions. + for (LoweredPHIMap::iterator I = LoweredPHIs.begin(), E = LoweredPHIs.end(); + I != E; ++I) + Fn.DeleteMachineInstr(I->first); + + LoweredPHIs.clear(); ImpDefs.clear(); VRegPHIUseCount.clear(); return Changed; @@ -168,6 +175,7 @@ llvm::PHIElimination::FindCopyInsertPoint(MachineBasicBlock &MBB, void llvm::PHIElimination::LowerAtomicPHINode( MachineBasicBlock &MBB, MachineBasicBlock::iterator AfterPHIsIt) { + ++NumAtomic; // Unlink the PHI node from the basic block, but don't delete the PHI yet. MachineInstr *MPhi = MBB.remove(MBB.begin()); @@ -179,6 +187,7 @@ void llvm::PHIElimination::LowerAtomicPHINode( MachineFunction &MF = *MBB.getParent(); const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(DestReg); unsigned IncomingReg = 0; + bool reusedIncoming = false; // Is IncomingReg reused from an earlier PHI? // Insert a register to register copy at the top of the current block (but // after any remaining phi nodes) which copies the new incoming register @@ -190,7 +199,18 @@ void llvm::PHIElimination::LowerAtomicPHINode( BuildMI(MBB, AfterPHIsIt, MPhi->getDebugLoc(), TII->get(TargetInstrInfo::IMPLICIT_DEF), DestReg); else { - IncomingReg = MF.getRegInfo().createVirtualRegister(RC); + // Can we reuse an earlier PHI node? This only happens for critical edges, + // typically those created by tail duplication. + unsigned &entry = LoweredPHIs[MPhi]; + if (entry) { + // An identical PHI node was already lowered. Reuse the incoming register. + IncomingReg = entry; + reusedIncoming = true; + ++NumReused; + DEBUG(errs() << "Reusing %reg" << IncomingReg << " for " << *MPhi); + } else { + entry = IncomingReg = MF.getRegInfo().createVirtualRegister(RC); + } TII->copyRegToReg(MBB, AfterPHIsIt, DestReg, IncomingReg, RC, RC); } @@ -204,8 +224,20 @@ void llvm::PHIElimination::LowerAtomicPHINode( MachineInstr *PHICopy = prior(AfterPHIsIt); if (IncomingReg) { + LiveVariables::VarInfo &VI = LV->getVarInfo(IncomingReg); + // Increment use count of the newly created virtual register. - LV->getVarInfo(IncomingReg).NumUses++; + VI.NumUses++; + + // When we are reusing the incoming register, it may already have been + // killed in this block. The old kill will also have been inserted at + // AfterPHIsIt, so it appears before the current PHICopy. + if (reusedIncoming) + if (MachineInstr *OldKill = VI.findKill(&MBB)) { + DEBUG(errs() << "Remove old kill from " << *OldKill); + LV->removeVirtualRegisterKilled(IncomingReg, OldKill); + DEBUG(MBB.dump()); + } // Add information to LiveVariables to know that the incoming value is // killed. Note that because the value is defined in several places (once @@ -228,7 +260,7 @@ void llvm::PHIElimination::LowerAtomicPHINode( // Adjust the VRegPHIUseCount map to account for the removal of this PHI node. for (unsigned i = 1; i != MPhi->getNumOperands(); i += 2) - --VRegPHIUseCount[BBVRegPair(MPhi->getOperand(i + 1).getMBB(), + --VRegPHIUseCount[BBVRegPair(MPhi->getOperand(i+1).getMBB()->getNumber(), MPhi->getOperand(i).getReg())]; // Now loop over all of the incoming arguments, changing them to copy into the @@ -266,7 +298,8 @@ void llvm::PHIElimination::LowerAtomicPHINode( FindCopyInsertPoint(opBlock, MBB, SrcReg); // Insert the copy. - TII->copyRegToReg(opBlock, InsertPos, IncomingReg, SrcReg, RC, RC); + if (!reusedIncoming && IncomingReg) + TII->copyRegToReg(opBlock, InsertPos, IncomingReg, SrcReg, RC, RC); // Now update live variable information if we have it. Otherwise we're done if (!LV) continue; @@ -283,7 +316,7 @@ void llvm::PHIElimination::LowerAtomicPHINode( // point later. // Is it used by any PHI instructions in this block? - bool ValueIsUsed = VRegPHIUseCount[BBVRegPair(&opBlock, SrcReg)] != 0; + bool ValueIsUsed = VRegPHIUseCount[BBVRegPair(opBlock.getNumber(), SrcReg)]; // Okay, if we now know that the value is not live out of the block, we can // add a kill marker in this block saying that it kills the incoming value! @@ -293,11 +326,10 @@ void llvm::PHIElimination::LowerAtomicPHINode( // terminator instruction at the end of the block may also use the value. // In this case, we should mark *it* as being the killing block, not the // copy. - MachineBasicBlock::iterator KillInst = prior(InsertPos); + MachineBasicBlock::iterator KillInst; MachineBasicBlock::iterator Term = opBlock.getFirstTerminator(); - if (Term != opBlock.end()) { - if (Term->readsRegister(SrcReg)) - KillInst = Term; + if (Term != opBlock.end() && Term->readsRegister(SrcReg)) { + KillInst = Term; // Check that no other terminators use values. #ifndef NDEBUG @@ -308,7 +340,17 @@ void llvm::PHIElimination::LowerAtomicPHINode( "they are the first terminator in a block!"); } #endif + } else if (reusedIncoming || !IncomingReg) { + // We may have to rewind a bit if we didn't insert a copy this time. + KillInst = Term; + while (KillInst != opBlock.begin()) + if ((--KillInst)->readsRegister(SrcReg)) + break; + } else { + // We just inserted this copy. + KillInst = prior(InsertPos); } + assert(KillInst->readsRegister(SrcReg) && "Cannot find kill instruction"); // Finally, mark it killed. LV->addVirtualRegisterKilled(SrcReg, KillInst); @@ -319,9 +361,9 @@ void llvm::PHIElimination::LowerAtomicPHINode( } } - // Really delete the PHI instruction now! - MF.DeleteMachineInstr(MPhi); - ++NumAtomic; + // Really delete the PHI instruction now, if it is not in the LoweredPHIs map. + if (reusedIncoming || !IncomingReg) + MF.DeleteMachineInstr(MPhi); } /// analyzePHINodes - Gather information about the PHI nodes in here. In @@ -335,14 +377,15 @@ void llvm::PHIElimination::analyzePHINodes(const MachineFunction& Fn) { for (MachineBasicBlock::const_iterator BBI = I->begin(), BBE = I->end(); BBI != BBE && BBI->getOpcode() == TargetInstrInfo::PHI; ++BBI) for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) - ++VRegPHIUseCount[BBVRegPair(BBI->getOperand(i + 1).getMBB(), + ++VRegPHIUseCount[BBVRegPair(BBI->getOperand(i+1).getMBB()->getNumber(), BBI->getOperand(i).getReg())]; } bool llvm::PHIElimination::SplitPHIEdges(MachineFunction &MF, MachineBasicBlock &MBB, LiveVariables &LV) { - if (MBB.empty() || MBB.front().getOpcode() != TargetInstrInfo::PHI) + if (MBB.empty() || MBB.front().getOpcode() != TargetInstrInfo::PHI || + MBB.isLandingPad()) return false; // Quick exit for basic blocks without PHIs. for (MachineBasicBlock::const_iterator BBI = MBB.begin(), BBE = MBB.end(); @@ -408,3 +451,34 @@ MachineBasicBlock *PHIElimination::SplitCriticalEdge(MachineBasicBlock *A, return NMBB; } + +unsigned +PHIElimination::PHINodeTraits::getHashValue(const MachineInstr *MI) { + if (!MI || MI==getEmptyKey() || MI==getTombstoneKey()) + return DenseMapInfo<MachineInstr*>::getHashValue(MI); + unsigned hash = 0; + for (unsigned ni = 1, ne = MI->getNumOperands(); ni != ne; ni += 2) + hash = hash*37 + DenseMapInfo<BBVRegPair>:: + getHashValue(BBVRegPair(MI->getOperand(ni+1).getMBB()->getNumber(), + MI->getOperand(ni).getReg())); + return hash; +} + +bool PHIElimination::PHINodeTraits::isEqual(const MachineInstr *LHS, + const MachineInstr *RHS) { + const MachineInstr *EmptyKey = getEmptyKey(); + const MachineInstr *TombstoneKey = getTombstoneKey(); + if (!LHS || !RHS || LHS==EmptyKey || RHS==EmptyKey || + LHS==TombstoneKey || RHS==TombstoneKey) + return LHS==RHS; + + unsigned ne = LHS->getNumOperands(); + if (ne != RHS->getNumOperands()) + return false; + // Ignore operand 0, the defined register. + for (unsigned ni = 1; ni != ne; ni += 2) + if (LHS->getOperand(ni).getReg() != RHS->getOperand(ni).getReg() || + LHS->getOperand(ni+1).getMBB() != RHS->getOperand(ni+1).getMBB()) + return false; + return true; +} diff --git a/lib/CodeGen/PHIElimination.h b/lib/CodeGen/PHIElimination.h index b0b71ce..1bcc9dc 100644 --- a/lib/CodeGen/PHIElimination.h +++ b/lib/CodeGen/PHIElimination.h @@ -16,8 +16,6 @@ #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/Target/TargetInstrInfo.h" -#include <map> - namespace llvm { /// Lower PHI instructions to copies. @@ -120,8 +118,8 @@ namespace llvm { return I; } - typedef std::pair<const MachineBasicBlock*, unsigned> BBVRegPair; - typedef std::map<BBVRegPair, unsigned> VRegPHIUse; + typedef std::pair<unsigned, unsigned> BBVRegPair; + typedef DenseMap<BBVRegPair, unsigned> VRegPHIUse; VRegPHIUse VRegPHIUseCount; PHIDefMap PHIDefs; @@ -129,6 +127,17 @@ namespace llvm { // Defs of PHI sources which are implicit_def. SmallPtrSet<MachineInstr*, 4> ImpDefs; + + // Lowered PHI nodes may be reused. We provide special DenseMap traits to + // match PHI nodes with identical arguments. + struct PHINodeTraits : public DenseMapInfo<MachineInstr*> { + static unsigned getHashValue(const MachineInstr *PtrVal); + static bool isEqual(const MachineInstr *LHS, const MachineInstr *RHS); + }; + + // Map reusable lowered PHI node -> incoming join register. + typedef DenseMap<MachineInstr*, unsigned, PHINodeTraits> LoweredPHIMap; + LoweredPHIMap LoweredPHIs; }; } diff --git a/lib/CodeGen/PreAllocSplitting.cpp b/lib/CodeGen/PreAllocSplitting.cpp index b0d7a47..1c5222c 100644 --- a/lib/CodeGen/PreAllocSplitting.cpp +++ b/lib/CodeGen/PreAllocSplitting.cpp @@ -378,7 +378,7 @@ PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, SlotIndex SpillIndex, SmallPtrSet<MachineBasicBlock*, 4> Processed; SlotIndex EndIdx = LIs->getMBBEndIdx(MBB); - LiveRange SLR(SpillIndex, EndIdx.getNextSlot(), CurrSValNo); + LiveRange SLR(SpillIndex, EndIdx, CurrSValNo); CurrSLI->addRange(SLR); Processed.insert(MBB); @@ -475,7 +475,7 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI, SlotIndex EndIndex = LIs->getMBBEndIdx(MBB); RetVNI = NewVNs[Walker]; - LI->addRange(LiveRange(DefIndex, EndIndex.getNextSlot(), RetVNI)); + LI->addRange(LiveRange(DefIndex, EndIndex, RetVNI)); } else if (!ContainsDefs && ContainsUses) { SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB]; @@ -511,8 +511,7 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI, UseIndex = UseIndex.getUseIndex(); SlotIndex EndIndex; if (IsIntraBlock) { - EndIndex = LIs->getInstructionIndex(UseI); - EndIndex = EndIndex.getUseIndex(); + EndIndex = LIs->getInstructionIndex(UseI).getDefIndex(); } else EndIndex = LIs->getMBBEndIdx(MBB); @@ -521,7 +520,7 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI, RetVNI = PerformPHIConstruction(Walker, MBB, LI, Visited, Defs, Uses, NewVNs, LiveOut, Phis, false, true); - LI->addRange(LiveRange(UseIndex, EndIndex.getNextSlot(), RetVNI)); + LI->addRange(LiveRange(UseIndex, EndIndex, RetVNI)); // FIXME: Need to set kills properly for inter-block stuff. if (RetVNI->isKill(UseIndex)) RetVNI->removeKill(UseIndex); @@ -571,8 +570,7 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI, StartIndex = foundDef ? StartIndex.getDefIndex() : StartIndex.getUseIndex(); SlotIndex EndIndex; if (IsIntraBlock) { - EndIndex = LIs->getInstructionIndex(UseI); - EndIndex = EndIndex.getUseIndex(); + EndIndex = LIs->getInstructionIndex(UseI).getDefIndex(); } else EndIndex = LIs->getMBBEndIdx(MBB); @@ -582,7 +580,7 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI, RetVNI = PerformPHIConstruction(Walker, MBB, LI, Visited, Defs, Uses, NewVNs, LiveOut, Phis, false, true); - LI->addRange(LiveRange(StartIndex, EndIndex.getNextSlot(), RetVNI)); + LI->addRange(LiveRange(StartIndex, EndIndex, RetVNI)); if (foundUse && RetVNI->isKill(StartIndex)) RetVNI->removeKill(StartIndex); @@ -663,7 +661,7 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator I = IncomingVNs.begin(), E = IncomingVNs.end(); I != E; ++I) { I->second->setHasPHIKill(true); - SlotIndex KillIndex = LIs->getMBBEndIdx(I->first); + SlotIndex KillIndex(LIs->getMBBEndIdx(I->first), true); if (!I->second->isKill(KillIndex)) I->second->addKill(KillIndex); } @@ -671,11 +669,10 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us SlotIndex EndIndex; if (IsIntraBlock) { - EndIndex = LIs->getInstructionIndex(UseI); - EndIndex = EndIndex.getUseIndex(); + EndIndex = LIs->getInstructionIndex(UseI).getDefIndex(); } else EndIndex = LIs->getMBBEndIdx(MBB); - LI->addRange(LiveRange(StartIndex, EndIndex.getNextSlot(), RetVNI)); + LI->addRange(LiveRange(StartIndex, EndIndex, RetVNI)); if (IsIntraBlock) RetVNI->addKill(EndIndex); @@ -902,8 +899,6 @@ MachineInstr* PreAllocSplitting::FoldSpill(unsigned vreg, MachineBasicBlock* MBB, int& SS, SmallPtrSet<MachineInstr*, 4>& RefsInMBB) { - MachineBasicBlock::iterator Pt = MBB->begin(); - // Go top down if RefsInMBB is empty. if (RefsInMBB.empty()) return 0; diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp index e94247f..709d46a 100644 --- a/lib/CodeGen/PrologEpilogInserter.cpp +++ b/lib/CodeGen/PrologEpilogInserter.cpp @@ -860,7 +860,7 @@ void PEI::scavengeFrameVirtualRegs(MachineFunction &Fn) { // Remove all instructions up 'til the last use, since they're // just calculating the value we already have. BB->erase(I, LastUseMI); - MI = I = LastUseMI; + I = LastUseMI; // Extend the live range of the scratch register PrevLastUseMI->getOperand(PrevLastUseOp).setIsKill(false); diff --git a/lib/CodeGen/RegAllocLinearScan.cpp b/lib/CodeGen/RegAllocLinearScan.cpp index c02d47b..9e97d89 100644 --- a/lib/CodeGen/RegAllocLinearScan.cpp +++ b/lib/CodeGen/RegAllocLinearScan.cpp @@ -891,7 +891,7 @@ namespace { const RALinScan &Allocator; public: - WeightCompare(const RALinScan &Alloc) : Allocator(Alloc) {}; + WeightCompare(const RALinScan &Alloc) : Allocator(Alloc) {} typedef std::pair<unsigned, float> RegWeightPair; bool operator()(const RegWeightPair &LHS, const RegWeightPair &RHS) const { diff --git a/lib/CodeGen/RegAllocLocal.cpp b/lib/CodeGen/RegAllocLocal.cpp index 7bb020a..aea5cff 100644 --- a/lib/CodeGen/RegAllocLocal.cpp +++ b/lib/CodeGen/RegAllocLocal.cpp @@ -233,14 +233,17 @@ namespace { /// in one of several ways: if the register is available in a physical /// register already, it uses that physical register. If the value is not /// in a physical register, and if there are physical registers available, - /// it loads it into a register. If register pressure is high, and it is - /// possible, it tries to fold the load of the virtual register into the - /// instruction itself. It avoids doing this if register pressure is low to - /// improve the chance that subsequent instructions can use the reloaded - /// value. This method returns the modified instruction. + /// it loads it into a register: PhysReg if that is an available physical + /// register, otherwise any physical register of the right class. + /// If register pressure is high, and it is possible, it tries to fold the + /// load of the virtual register into the instruction itself. It avoids + /// doing this if register pressure is low to improve the chance that + /// subsequent instructions can use the reloaded value. This method + /// returns the modified instruction. /// MachineInstr *reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI, - unsigned OpNum, SmallSet<unsigned, 4> &RRegs); + unsigned OpNum, SmallSet<unsigned, 4> &RRegs, + unsigned PhysReg); /// ComputeLocalLiveness - Computes liveness of registers within a basic /// block, setting the killed/dead flags as appropriate. @@ -471,15 +474,17 @@ unsigned RALocal::getReg(MachineBasicBlock &MBB, MachineInstr *I, /// one of several ways: if the register is available in a physical register /// already, it uses that physical register. If the value is not in a physical /// register, and if there are physical registers available, it loads it into a +/// register: PhysReg if that is an available physical register, otherwise any /// register. If register pressure is high, and it is possible, it tries to /// fold the load of the virtual register into the instruction itself. It /// avoids doing this if register pressure is low to improve the chance that -/// subsequent instructions can use the reloaded value. This method returns the -/// modified instruction. +/// subsequent instructions can use the reloaded value. This method returns +/// the modified instruction. /// MachineInstr *RALocal::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI, unsigned OpNum, - SmallSet<unsigned, 4> &ReloadedRegs) { + SmallSet<unsigned, 4> &ReloadedRegs, + unsigned PhysReg) { unsigned VirtReg = MI->getOperand(OpNum).getReg(); // If the virtual register is already available, just update the instruction @@ -494,7 +499,11 @@ MachineInstr *RALocal::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI, // Otherwise, we need to fold it into the current instruction, or reload it. // If we have registers available to hold the value, use them. const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(VirtReg); - unsigned PhysReg = getFreeReg(RC); + // If we already have a PhysReg (this happens when the instruction is a + // reg-to-reg copy with a PhysReg destination) use that. + if (!PhysReg || !TargetRegisterInfo::isPhysicalRegister(PhysReg) || + !isPhysRegAvailable(PhysReg)) + PhysReg = getFreeReg(RC); int FrameIndex = getStackSpaceFor(VirtReg, RC); if (PhysReg) { // Register is available, allocate it! @@ -752,6 +761,12 @@ void RALocal::AllocateBasicBlock(MachineBasicBlock &MBB) { errs() << '\n'; }); + // Determine whether this is a copy instruction. The cases where the + // source or destination are phys regs are handled specially. + unsigned SrcCopyReg, DstCopyReg, SrcCopySubReg, DstCopySubReg; + bool isCopy = TII->isMoveInstr(*MI, SrcCopyReg, DstCopyReg, + SrcCopySubReg, DstCopySubReg); + // Loop over the implicit uses, making sure that they are at the head of the // use order list, so they don't get reallocated. if (TID.ImplicitUses) { @@ -835,7 +850,8 @@ void RALocal::AllocateBasicBlock(MachineBasicBlock &MBB) { // here we are looking for only used operands (never def&use) if (MO.isReg() && !MO.isDef() && MO.getReg() && !MO.isImplicit() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) - MI = reloadVirtReg(MBB, MI, i, ReloadedRegs); + MI = reloadVirtReg(MBB, MI, i, ReloadedRegs, + isCopy ? DstCopyReg : 0); } // If this instruction is the last user of this register, kill the @@ -948,8 +964,17 @@ void RALocal::AllocateBasicBlock(MachineBasicBlock &MBB) { unsigned DestPhysReg; // If DestVirtReg already has a value, use it. - if (!(DestPhysReg = getVirt2PhysRegMapSlot(DestVirtReg))) - DestPhysReg = getReg(MBB, MI, DestVirtReg); + if (!(DestPhysReg = getVirt2PhysRegMapSlot(DestVirtReg))) { + // If this is a copy, the source reg is a phys reg, and + // that reg is available, use that phys reg for DestPhysReg. + if (isCopy && + TargetRegisterInfo::isPhysicalRegister(SrcCopyReg) && + isPhysRegAvailable(SrcCopyReg)) { + DestPhysReg = SrcCopyReg; + assignVirtToPhysReg(DestVirtReg, DestPhysReg); + } else + DestPhysReg = getReg(MBB, MI, DestVirtReg); + } MF->getRegInfo().setPhysRegUsed(DestPhysReg); markVirtRegModified(DestVirtReg); getVirtRegLastUse(DestVirtReg) = std::make_pair((MachineInstr*)0, 0); @@ -995,9 +1020,9 @@ void RALocal::AllocateBasicBlock(MachineBasicBlock &MBB) { // Finally, if this is a noop copy instruction, zap it. (Except that if // the copy is dead, it must be kept to avoid messing up liveness info for // the register scavenger. See pr4100.) - unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; - if (TII->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) && - SrcReg == DstReg && DeadDefs.empty()) + if (TII->isMoveInstr(*MI, SrcCopyReg, DstCopyReg, + SrcCopySubReg, DstCopySubReg) && + SrcCopyReg == DstCopyReg && DeadDefs.empty()) MBB.erase(MI); } diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 2b52187..e6aa14c 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2755,7 +2755,34 @@ SDValue DAGCombiner::visitSRL(SDNode *N) { if (N1C && SimplifyDemandedBits(SDValue(N, 0))) return SDValue(N, 0); - return N1C ? visitShiftByConstant(N, N1C->getZExtValue()) : SDValue(); + if (N1C) { + SDValue NewSRL = visitShiftByConstant(N, N1C->getZExtValue()); + if (NewSRL.getNode()) + return NewSRL; + } + + // Here is a common situation. We want to optimize: + // + // %a = ... + // %b = and i32 %a, 2 + // %c = srl i32 %b, 1 + // brcond i32 %c ... + // + // into + // + // %a = ... + // %b = and %a, 2 + // %c = setcc eq %b, 0 + // brcond %c ... + // + // However when after the source operand of SRL is optimized into AND, the SRL + // itself may not be optimized further. Look for it and add the BRCOND into + // the worklist. + if (N->hasOneUse() && + N->use_begin()->getOpcode() == ISD::BRCOND) + AddToWorkList(*N->use_begin()); + + return SDValue(); } SDValue DAGCombiner::visitCTLZ(SDNode *N) { @@ -3202,19 +3229,6 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) { X, DAG.getConstant(Mask, VT)); } - // Fold (zext (and x, cst)) -> (and (zext x), cst) - if (N0.getOpcode() == ISD::AND && - N0.getOperand(1).getOpcode() == ISD::Constant && - N0.getOperand(0).getOpcode() != ISD::TRUNCATE && - N0.getOperand(0).hasOneUse()) { - APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); - Mask.zext(VT.getSizeInBits()); - return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, - DAG.getNode(ISD::ZERO_EXTEND, N->getDebugLoc(), VT, - N0.getOperand(0)), - DAG.getConstant(Mask, VT)); - } - // fold (zext (load x)) -> (zext (truncate (zextload x))) if (ISD::isNON_EXTLoad(N0.getNode()) && ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp index 4ead9c9..33694f2 100644 --- a/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -43,7 +43,6 @@ #include "llvm/GlobalVariable.h" #include "llvm/Instructions.h" #include "llvm/IntrinsicInst.h" -#include "llvm/LLVMContext.h" #include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineModuleInfo.h" @@ -334,7 +333,7 @@ bool FastISel::SelectCall(User *I) { return true; case Intrinsic::dbg_declare: { DbgDeclareInst *DI = cast<DbgDeclareInst>(I); - if (!isValidDebugInfoIntrinsic(*DI, CodeGenOpt::None) || !DW + if (!DIDescriptor::ValidDebugInfo(DI->getVariable(), CodeGenOpt::None)||!DW || !DW->ShouldEmitDwarfDebug()) return true; @@ -349,11 +348,8 @@ bool FastISel::SelectCall(User *I) { if (SI == StaticAllocaMap.end()) break; // VLAs. int FI = SI->second; if (MMI) { - MetadataContext &TheMetadata = - DI->getParent()->getContext().getMetadata(); - unsigned MDDbgKind = TheMetadata.getMDKind("dbg"); - MDNode *Dbg = TheMetadata.getMD(MDDbgKind, DI); - MMI->setVariableDbgInfo(DI->getVariable(), FI, Dbg); + if (MDNode *Dbg = DI->getMetadata("dbg")) + MMI->setVariableDbgInfo(DI->getVariable(), FI, Dbg); } return true; } @@ -548,9 +544,6 @@ FastISel::SelectInstruction(Instruction *I) { /// the CFG. void FastISel::FastEmitBranch(MachineBasicBlock *MSucc) { - MachineFunction::iterator NextMBB = - llvm::next(MachineFunction::iterator(MBB)); - if (MBB->isLayoutSuccessor(MSucc)) { // The unconditional fall-through case, which needs no instructions. } else { diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index f9c05d0..474d833 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -187,7 +187,6 @@ SDValue SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT, EVT VT, DebugLoc dl, SDValue N1, SDValue N2, SmallVectorImpl<int> &Mask) const { - EVT EltVT = NVT.getVectorElementType(); unsigned NumMaskElts = VT.getVectorNumElements(); unsigned NumDestElts = NVT.getVectorNumElements(); unsigned NumEltsGrowth = NumDestElts / NumMaskElts; @@ -461,8 +460,7 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, !ST->getMemoryVT().isVector() && "Unaligned store of unknown type."); // Get the half-size VT - EVT NewStoredVT = - (MVT::SimpleValueType)(ST->getMemoryVT().getSimpleVT().SimpleTy - 1); + EVT NewStoredVT = ST->getMemoryVT().getHalfSizedIntegerVT(*DAG.getContext()); int NumBits = NewStoredVT.getSizeInBits(); int IncrementSize = NumBits / 8; @@ -1170,8 +1168,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { Tmp2 = LegalizeOp(Ch); } else if (SrcWidth & (SrcWidth - 1)) { // If not loading a power-of-2 number of bits, expand as two loads. - assert(SrcVT.isExtended() && !SrcVT.isVector() && - "Unsupported extload!"); + assert(!SrcVT.isVector() && "Unsupported extload!"); unsigned RoundWidth = 1 << Log2_32(SrcWidth); assert(RoundWidth < SrcWidth); unsigned ExtraWidth = SrcWidth - RoundWidth; @@ -1384,8 +1381,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { SVOffset, NVT, isVolatile, Alignment); } else if (StWidth & (StWidth - 1)) { // If not storing a power-of-2 number of bits, expand as two stores. - assert(StVT.isExtended() && !StVT.isVector() && - "Unsupported truncstore!"); + assert(!StVT.isVector() && "Unsupported truncstore!"); unsigned RoundWidth = 1 << Log2_32(StWidth); assert(RoundWidth < StWidth); unsigned ExtraWidth = StWidth - RoundWidth; @@ -1869,7 +1865,7 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, 0, TLI.getLibcallCallingConv(LC), false, /*isReturnValueUsed=*/true, Callee, Args, DAG, - Node->getDebugLoc()); + Node->getDebugLoc(), DAG.GetOrdering(Node)); // Legalize the call sequence, starting with the chain. This will advance // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that @@ -2274,7 +2270,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, false, false, false, false, 0, CallingConv::C, false, /*isReturnValueUsed=*/true, DAG.getExternalSymbol("abort", TLI.getPointerTy()), - Args, DAG, dl); + Args, DAG, dl, DAG.GetOrdering(Node)); Results.push_back(CallResult.second); break; } @@ -2750,7 +2746,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, SDValue RHS = Node->getOperand(1); SDValue BottomHalf; SDValue TopHalf; - static unsigned Ops[2][3] = + static const unsigned Ops[2][3] = { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; bool isSigned = Node->getOpcode() == ISD::SMULO; @@ -2967,7 +2963,7 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node, break; case ISD::BSWAP: { unsigned DiffBits = NVT.getSizeInBits() - OVT.getSizeInBits(); - Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Tmp1); + Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, Node->getOperand(0)); Tmp1 = DAG.getNode(ISD::BSWAP, dl, NVT, Tmp1); Tmp1 = DAG.getNode(ISD::SRL, dl, NVT, Tmp1, DAG.getConstant(DiffBits, TLI.getShiftAmountTy())); diff --git a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp index 84e39b4..2831617 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp @@ -637,7 +637,8 @@ void DAGTypeLegalizer::SoftenSetCCOperands(SDValue &NewLHS, SDValue &NewRHS, } } - EVT RetVT = MVT::i32; // FIXME: is this the correct return type? + // Use the target specific return value for comparions lib calls. + EVT RetVT = TLI.getCmpLibcallReturnType(); SDValue Ops[2] = { LHSInt, RHSInt }; NewLHS = MakeLibCall(LC1, RetVT, Ops, 2, false/*sign irrelevant*/, dl); NewRHS = DAG.getConstant(0, RetVT); diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp index 2f4457e..bd3b97a 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -2026,8 +2026,6 @@ void DAGTypeLegalizer::IntegerExpandSetCCOperands(SDValue &NewLHS, GetExpandedInteger(NewLHS, LHSLo, LHSHi); GetExpandedInteger(NewRHS, RHSLo, RHSHi); - EVT VT = NewLHS.getValueType(); - if (CCCode == ISD::SETEQ || CCCode == ISD::SETNE) { if (RHSLo == RHSHi) { if (ConstantSDNode *RHSCST = dyn_cast<ConstantSDNode>(RHSLo)) { diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp index 003cea7..d9efd4f 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp @@ -1033,7 +1033,8 @@ SDValue DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, EVT RetVT, TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false, false, 0, TLI.getLibcallCallingConv(LC), false, /*isReturnValueUsed=*/true, - Callee, Args, DAG, dl); + Callee, Args, DAG, dl, + DAG.GetOrdering(DAG.getEntryNode().getNode())); return CallInfo.first; } diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp index dbd3e39..a1b6ced 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp @@ -464,7 +464,6 @@ void DAGTypeLegalizer::SplitRes_SELECT_CC(SDNode *N, SDValue &Lo, void DAGTypeLegalizer::SplitRes_UNDEF(SDNode *N, SDValue &Lo, SDValue &Hi) { EVT LoVT, HiVT; - DebugLoc dl = N->getDebugLoc(); GetSplitDestVTs(N->getValueType(0), LoVT, HiVT); Lo = DAG.getUNDEF(LoVT); Hi = DAG.getUNDEF(HiVT); diff --git a/lib/CodeGen/SelectionDAG/SDNodeOrdering.h b/lib/CodeGen/SelectionDAG/SDNodeOrdering.h new file mode 100644 index 0000000..f88b26d --- /dev/null +++ b/lib/CodeGen/SelectionDAG/SDNodeOrdering.h @@ -0,0 +1,54 @@ +//===-- llvm/CodeGen/SDNodeOrdering.h - SDNode Ordering ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the SDNodeOrdering class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_SDNODEORDERING_H +#define LLVM_CODEGEN_SDNODEORDERING_H + +#include "llvm/ADT/DenseMap.h" + +namespace llvm { + +class SDNode; + +/// SDNodeOrdering - Maps a unique (monotonically increasing) value to each +/// SDNode that roughly corresponds to the ordering of the original LLVM +/// instruction. This is used for turning off scheduling, because we'll forgo +/// the normal scheduling algorithms and output the instructions according to +/// this ordering. +class SDNodeOrdering { + DenseMap<const SDNode*, unsigned> OrderMap; + + void operator=(const SDNodeOrdering&); // Do not implement. + SDNodeOrdering(const SDNodeOrdering&); // Do not implement. +public: + SDNodeOrdering() {} + + void add(const SDNode *Node, unsigned O) { + OrderMap[Node] = O; + } + void remove(const SDNode *Node) { + DenseMap<const SDNode*, unsigned>::iterator Itr = OrderMap.find(Node); + if (Itr != OrderMap.end()) + OrderMap.erase(Itr); + } + void clear() { + OrderMap.clear(); + } + unsigned getOrder(const SDNode *Node) { + return OrderMap[Node]; + } +}; + +} // end llvm namespace + +#endif diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp index b2ee8bb..d53de34 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp @@ -20,16 +20,10 @@ #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetSubtarget.h" -#include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; -cl::opt<bool> -DisableInstScheduling("disable-inst-scheduling", - cl::init(false), - cl::desc("Disable instruction scheduling")); - ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf) : ScheduleDAG(mf) { } diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index da55e6b..77301b0 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -10,7 +10,9 @@ // This implements the SelectionDAG class. // //===----------------------------------------------------------------------===// + #include "llvm/CodeGen/SelectionDAG.h" +#include "SDNodeOrdering.h" #include "llvm/Constants.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/Function.h" @@ -48,8 +50,6 @@ #include <cmath> using namespace llvm; -extern cl::opt<bool> DisableInstScheduling; - /// makeVTList - Return an instance of the SDVTList struct initialized with the /// specified members. static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) { @@ -554,9 +554,6 @@ void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes, } DeallocateNode(N); - - // Remove the ordering of this node. - if (Ordering) Ordering->remove(N); } } @@ -582,9 +579,6 @@ void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) { N->DropOperands(); DeallocateNode(N); - - // Remove the ordering of this node. - if (Ordering) Ordering->remove(N); } void SelectionDAG::DeallocateNode(SDNode *N) { @@ -703,7 +697,6 @@ SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1); AddNodeIDCustom(ID, N); SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); - if (Ordering) Ordering->remove(Node); return Node; } @@ -722,7 +715,6 @@ SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2); AddNodeIDCustom(ID, N); SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); - if (Ordering) Ordering->remove(Node); return Node; } @@ -741,7 +733,6 @@ SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, NumOps); AddNodeIDCustom(ID, N); SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos); - if (Ordering) Ordering->remove(Node); return Node; } @@ -798,10 +789,8 @@ SelectionDAG::SelectionDAG(TargetLowering &tli, FunctionLoweringInfo &fli) getVTList(MVT::Other)), Root(getEntryNode()), Ordering(0) { AllNodes.push_back(&EntryNode); - if (DisableInstScheduling) { - Ordering = new NodeOrdering(); - Ordering->add(&EntryNode); - } + if (DisableScheduling) + Ordering = new SDNodeOrdering(); } void SelectionDAG::init(MachineFunction &mf, MachineModuleInfo *mmi, @@ -840,10 +829,8 @@ void SelectionDAG::clear() { EntryNode.UseList = 0; AllNodes.push_back(&EntryNode); Root = getEntryNode(); - if (DisableInstScheduling) { - Ordering = new NodeOrdering(); - Ordering->add(&EntryNode); - } + if (DisableScheduling) + Ordering = new SDNodeOrdering(); } SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) { @@ -904,17 +891,15 @@ SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) { ID.AddPointer(&Val); void *IP = 0; SDNode *N = NULL; - if ((N = CSEMap.FindNodeOrInsertPos(ID, IP))) { - if (Ordering) Ordering->add(N); + if ((N = CSEMap.FindNodeOrInsertPos(ID, IP))) if (!VT.isVector()) return SDValue(N, 0); - } + if (!N) { N = NodeAllocator.Allocate<ConstantSDNode>(); new (N) ConstantSDNode(isT, &Val, EltVT); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); } SDValue Result(N, 0); @@ -951,17 +936,15 @@ SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){ ID.AddPointer(&V); void *IP = 0; SDNode *N = NULL; - if ((N = CSEMap.FindNodeOrInsertPos(ID, IP))) { - if (Ordering) Ordering->add(N); + if ((N = CSEMap.FindNodeOrInsertPos(ID, IP))) if (!VT.isVector()) return SDValue(N, 0); - } + if (!N) { N = NodeAllocator.Allocate<ConstantFPSDNode>(); new (N) ConstantFPSDNode(isTarget, &V, EltVT); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); } SDValue Result(N, 0); @@ -1016,15 +999,13 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, ID.AddInteger(Offset); ID.AddInteger(TargetFlags); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } + SDNode *N = NodeAllocator.Allocate<GlobalAddressSDNode>(); new (N) GlobalAddressSDNode(Opc, GV, VT, Offset, TargetFlags); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -1034,15 +1015,13 @@ SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) { AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); ID.AddInteger(FI); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } + SDNode *N = NodeAllocator.Allocate<FrameIndexSDNode>(); new (N) FrameIndexSDNode(FI, VT, isTarget); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -1056,15 +1035,13 @@ SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget, ID.AddInteger(JTI); ID.AddInteger(TargetFlags); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } + SDNode *N = NodeAllocator.Allocate<JumpTableSDNode>(); new (N) JumpTableSDNode(JTI, VT, isTarget, TargetFlags); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -1084,15 +1061,13 @@ SDValue SelectionDAG::getConstantPool(Constant *C, EVT VT, ID.AddPointer(C); ID.AddInteger(TargetFlags); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } + SDNode *N = NodeAllocator.Allocate<ConstantPoolSDNode>(); new (N) ConstantPoolSDNode(isTarget, C, VT, Offset, Alignment, TargetFlags); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -1113,15 +1088,13 @@ SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, C->AddSelectionDAGCSEId(ID); ID.AddInteger(TargetFlags); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } + SDNode *N = NodeAllocator.Allocate<ConstantPoolSDNode>(); new (N) ConstantPoolSDNode(isTarget, C, VT, Offset, Alignment, TargetFlags); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -1130,15 +1103,13 @@ SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), 0, 0); ID.AddPointer(MBB); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } + SDNode *N = NodeAllocator.Allocate<BasicBlockSDNode>(); new (N) BasicBlockSDNode(MBB); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -1154,7 +1125,6 @@ SDValue SelectionDAG::getValueType(EVT VT) { N = NodeAllocator.Allocate<VTSDNode>(); new (N) VTSDNode(VT); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -1164,7 +1134,6 @@ SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) { N = NodeAllocator.Allocate<ExternalSymbolSDNode>(); new (N) ExternalSymbolSDNode(false, Sym, 0, VT); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -1177,7 +1146,6 @@ SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT, N = NodeAllocator.Allocate<ExternalSymbolSDNode>(); new (N) ExternalSymbolSDNode(true, Sym, TargetFlags, VT); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -1190,8 +1158,8 @@ SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { new (N) CondCodeSDNode(Cond); CondCodeNodes[Cond] = N; AllNodes.push_back(N); - if (Ordering) Ordering->add(N); } + return SDValue(CondCodeNodes[Cond], 0); } @@ -1283,10 +1251,8 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, DebugLoc dl, SDValue N1, ID.AddInteger(MaskVec[i]); void* IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } // Allocate the mask array for the node out of the BumpPtrAllocator, since // SDNode doesn't have access to it. This memory will be "leaked" when @@ -1298,7 +1264,6 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, DebugLoc dl, SDValue N1, new (N) ShuffleVectorSDNode(VT, dl, N1, N2, MaskAlloc); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -1316,15 +1281,13 @@ SDValue SelectionDAG::getConvertRndSat(EVT VT, DebugLoc dl, SDValue Ops[] = { Val, DTy, STy, Rnd, Sat }; AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), &Ops[0], 5); void* IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } + CvtRndSatSDNode *N = NodeAllocator.Allocate<CvtRndSatSDNode>(); new (N) CvtRndSatSDNode(VT, dl, Ops, 5, Code); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -1333,15 +1296,13 @@ SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) { AddNodeIDNode(ID, ISD::Register, getVTList(VT), 0, 0); ID.AddInteger(RegNo); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } + SDNode *N = NodeAllocator.Allocate<RegisterSDNode>(); new (N) RegisterSDNode(RegNo, VT); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -1353,15 +1314,13 @@ SDValue SelectionDAG::getLabel(unsigned Opcode, DebugLoc dl, AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), &Ops[0], 1); ID.AddInteger(LabelID); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } + SDNode *N = NodeAllocator.Allocate<LabelSDNode>(); new (N) LabelSDNode(Opcode, dl, Root, LabelID); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -1375,15 +1334,13 @@ SDValue SelectionDAG::getBlockAddress(BlockAddress *BA, EVT VT, ID.AddPointer(BA); ID.AddInteger(TargetFlags); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } + SDNode *N = NodeAllocator.Allocate<BlockAddressSDNode>(); new (N) BlockAddressSDNode(Opc, VT, BA, TargetFlags); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -1396,16 +1353,13 @@ SDValue SelectionDAG::getSrcValue(const Value *V) { ID.AddPointer(V); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } SDNode *N = NodeAllocator.Allocate<SrcValueSDNode>(); new (N) SrcValueSDNode(V); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -2316,16 +2270,14 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT) { FoldingSetNodeID ID; AddNodeIDNode(ID, Opcode, getVTList(VT), 0, 0); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } + SDNode *N = NodeAllocator.Allocate<SDNode>(); new (N) SDNode(Opcode, DL, getVTList(VT)); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); #ifndef NDEBUG VerifyNode(N); #endif @@ -2549,10 +2501,9 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDValue Ops[1] = { Operand }; AddNodeIDNode(ID, Opcode, VTs, Ops, 1); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } + N = NodeAllocator.Allocate<UnarySDNode>(); new (N) UnarySDNode(Opcode, DL, VTs, Operand); CSEMap.InsertNode(N, IP); @@ -2562,7 +2513,6 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, } AllNodes.push_back(N); - if (Ordering) Ordering->add(N); #ifndef NDEBUG VerifyNode(N); #endif @@ -2970,10 +2920,9 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, FoldingSetNodeID ID; AddNodeIDNode(ID, Opcode, VTs, Ops, 2); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } + N = NodeAllocator.Allocate<BinarySDNode>(); new (N) BinarySDNode(Opcode, DL, VTs, N1, N2); CSEMap.InsertNode(N, IP); @@ -2983,7 +2932,6 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, } AllNodes.push_back(N); - if (Ordering) Ordering->add(N); #ifndef NDEBUG VerifyNode(N); #endif @@ -3050,10 +2998,9 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, FoldingSetNodeID ID; AddNodeIDNode(ID, Opcode, VTs, Ops, 3); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } + N = NodeAllocator.Allocate<TernarySDNode>(); new (N) TernarySDNode(Opcode, DL, VTs, N1, N2, N3); CSEMap.InsertNode(N, IP); @@ -3063,7 +3010,6 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, } AllNodes.push_back(N); - if (Ordering) Ordering->add(N); #ifndef NDEBUG VerifyNode(N); #endif @@ -3503,7 +3449,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst, /*isReturnValueUsed=*/false, getExternalSymbol(TLI.getLibcallName(RTLIB::MEMCPY), TLI.getPointerTy()), - Args, *this, dl); + Args, *this, dl, GetOrdering(Chain.getNode())); return CallResult.second; } @@ -3552,7 +3498,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst, /*isReturnValueUsed=*/false, getExternalSymbol(TLI.getLibcallName(RTLIB::MEMMOVE), TLI.getPointerTy()), - Args, *this, dl); + Args, *this, dl, GetOrdering(Chain.getNode())); return CallResult.second; } @@ -3611,7 +3557,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst, /*isReturnValueUsed=*/false, getExternalSymbol(TLI.getLibcallName(RTLIB::MEMSET), TLI.getPointerTy()), - Args, *this, dl); + Args, *this, dl, GetOrdering(Chain.getNode())); return CallResult.second; } @@ -3659,14 +3605,12 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, void* IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { cast<AtomicSDNode>(E)->refineAlignment(MMO); - if (Ordering) Ordering->add(E); return SDValue(E, 0); } SDNode* N = NodeAllocator.Allocate<AtomicSDNode>(); new (N) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain, Ptr, Cmp, Swp, MMO); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -3724,14 +3668,12 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, void* IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { cast<AtomicSDNode>(E)->refineAlignment(MMO); - if (Ordering) Ordering->add(E); return SDValue(E, 0); } SDNode* N = NodeAllocator.Allocate<AtomicSDNode>(); new (N) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain, Ptr, Val, MMO); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -3804,7 +3746,6 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList, void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO); - if (Ordering) Ordering->add(E); return SDValue(E, 0); } @@ -3816,7 +3757,6 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList, new (N) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO); } AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -3881,14 +3821,12 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl, void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { cast<LoadSDNode>(E)->refineAlignment(MMO); - if (Ordering) Ordering->add(E); return SDValue(E, 0); } SDNode *N = NodeAllocator.Allocate<LoadSDNode>(); new (N) LoadSDNode(Ops, dl, VTs, AM, ExtType, MemVT, MMO); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -3959,14 +3897,12 @@ SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val, void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { cast<StoreSDNode>(E)->refineAlignment(MMO); - if (Ordering) Ordering->add(E); return SDValue(E, 0); } SDNode *N = NodeAllocator.Allocate<StoreSDNode>(); new (N) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, false, VT, MMO); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -4021,14 +3957,12 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val, void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { cast<StoreSDNode>(E)->refineAlignment(MMO); - if (Ordering) Ordering->add(E); return SDValue(E, 0); } SDNode *N = NodeAllocator.Allocate<StoreSDNode>(); new (N) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED, true, SVT, MMO); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -4045,17 +3979,15 @@ SelectionDAG::getIndexedStore(SDValue OrigStore, DebugLoc dl, SDValue Base, ID.AddInteger(ST->getMemoryVT().getRawBits()); ID.AddInteger(ST->getRawSubclassData()); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } + SDNode *N = NodeAllocator.Allocate<StoreSDNode>(); new (N) StoreSDNode(Ops, dl, VTs, AM, ST->isTruncatingStore(), ST->getMemoryVT(), ST->getMemOperand()); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); return SDValue(N, 0); } @@ -4121,10 +4053,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } N = NodeAllocator.Allocate<SDNode>(); new (N) SDNode(Opcode, DL, VTs, Ops, NumOps); @@ -4135,7 +4065,6 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, } AllNodes.push_back(N); - if (Ordering) Ordering->add(N); #ifndef NDEBUG VerifyNode(N); #endif @@ -4191,10 +4120,9 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList, FoldingSetNodeID ID; AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - } + if (NumOps == 1) { N = NodeAllocator.Allocate<UnarySDNode>(); new (N) UnarySDNode(Opcode, DL, VTList, Ops[0]); @@ -4225,7 +4153,6 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList, } } AllNodes.push_back(N); - if (Ordering) Ordering->add(N); #ifndef NDEBUG VerifyNode(N); #endif @@ -4325,6 +4252,7 @@ SDVTList SelectionDAG::getVTList(const EVT *VTs, unsigned NumVTs) { case 1: return getVTList(VTs[0]); case 2: return getVTList(VTs[0], VTs[1]); case 3: return getVTList(VTs[0], VTs[1], VTs[2]); + case 4: return getVTList(VTs[0], VTs[1], VTs[2], VTs[3]); default: break; } @@ -4688,10 +4616,8 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, if (VTs.VTs[VTs.NumVTs-1] != MVT::Flag) { FoldingSetNodeID ID; AddNodeIDNode(ID, Opc, VTs, Ops, NumOps); - if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(ON); + if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP)) return ON; - } } if (!RemoveNodeFromCSEMaps(N)) @@ -4755,7 +4681,6 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, if (IP) CSEMap.InsertNode(N, IP); // Memoize the new node. - if (Ordering) Ordering->add(N); return N; } @@ -4894,10 +4819,8 @@ SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc DL, SDVTList VTs, FoldingSetNodeID ID; AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps); IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return cast<MachineSDNode>(E); - } } // Allocate a new MachineSDNode. @@ -4919,7 +4842,6 @@ SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc DL, SDVTList VTs, CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - if (Ordering) Ordering->add(N); #ifndef NDEBUG VerifyNode(N); #endif @@ -4956,10 +4878,8 @@ SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, FoldingSetNodeID ID; AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps); void *IP = 0; - if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) { - if (Ordering) Ordering->add(E); + if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return E; - } } return NULL; } @@ -5291,6 +5211,18 @@ unsigned SelectionDAG::AssignTopologicalOrder() { return DAGSize; } +/// AssignOrdering - Assign an order to the SDNode. +void SelectionDAG::AssignOrdering(SDNode *SD, unsigned Order) { + assert(SD && "Trying to assign an order to a null node!"); + if (Ordering) + Ordering->add(SD, Order); +} + +/// GetOrdering - Get the order for the SDNode. +unsigned SelectionDAG::GetOrdering(const SDNode *SD) const { + assert(SD && "Trying to get the order of a null node!"); + return Ordering ? Ordering->getOrder(SD) : 0; +} //===----------------------------------------------------------------------===// @@ -5931,6 +5863,10 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const { if (unsigned int TF = BA->getTargetFlags()) OS << " [TF=" << TF << ']'; } + + if (G) + if (unsigned Order = G->GetOrdering(this)) + OS << " [ORD=" << Order << ']'; } void SDNode::print(raw_ostream &OS, const SelectionDAG *G) const { @@ -6126,9 +6062,6 @@ void SelectionDAG::dump() const { errs() << "\n\n"; } -void SelectionDAG::NodeOrdering::dump() const { -} - void SDNode::printr(raw_ostream &OS, const SelectionDAG *G) const { print_types(OS, G); print_details(OS, G); @@ -6139,25 +6072,31 @@ static void DumpNodesr(raw_ostream &OS, const SDNode *N, unsigned indent, const SelectionDAG *G, VisitedSDNodeSet &once) { if (!once.insert(N)) // If we've been here before, return now. return; + // Dump the current SDNode, but don't end the line yet. OS << std::string(indent, ' '); N->printr(OS, G); + // Having printed this SDNode, walk the children: for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { const SDNode *child = N->getOperand(i).getNode(); + if (i) OS << ","; OS << " "; + if (child->getNumOperands() == 0) { // This child has no grandchildren; print it inline right here. child->printr(OS, G); once.insert(child); - } else { // Just the address. FIXME: also print the child's opcode + } else { // Just the address. FIXME: also print the child's opcode. OS << (void*)child; if (unsigned RN = N->getOperand(i).getResNo()) OS << ":" << RN; } } + OS << "\n"; + // Dump children that have grandchildren on their own line(s). for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { const SDNode *child = N->getOperand(i).getNode(); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 7568384..74d624f 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -17,6 +17,7 @@ #include "llvm/ADT/BitVector.h" #include "llvm/ADT/SmallSet.h" #include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/ConstantFolding.h" #include "llvm/Constants.h" #include "llvm/CallingConv.h" #include "llvm/DerivedTypes.h" @@ -26,7 +27,6 @@ #include "llvm/Instructions.h" #include "llvm/Intrinsics.h" #include "llvm/IntrinsicInst.h" -#include "llvm/LLVMContext.h" #include "llvm/Module.h" #include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/GCStrategy.h" @@ -70,13 +70,13 @@ LimitFPPrecision("limit-float-precision", namespace { /// RegsForValue - This struct represents the registers (physical or virtual) - /// that a particular set of values is assigned, and the type information about - /// the value. The most common situation is to represent one value at a time, - /// but struct or array values are handled element-wise as multiple values. - /// The splitting of aggregates is performed recursively, so that we never - /// have aggregate-typed registers. The values at this point do not necessarily - /// have legal types, so each value may require one or more registers of some - /// legal type. + /// that a particular set of values is assigned, and the type information + /// about the value. The most common situation is to represent one value at a + /// time, but struct or array values are handled element-wise as multiple + /// values. The splitting of aggregates is performed recursively, so that we + /// never have aggregate-typed registers. The values at this point do not + /// necessarily have legal types, so each value may require one or more + /// registers of some legal type. /// struct RegsForValue { /// TLI - The TargetLowering object. @@ -144,22 +144,23 @@ namespace { /// this value and returns the result as a ValueVTs value. This uses /// Chain/Flag as the input and updates them for the output Chain/Flag. /// If the Flag pointer is NULL, no flag is used. - SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl, - SDValue &Chain, SDValue *Flag) const; + SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl, unsigned Order, + SDValue &Chain, SDValue *Flag) const; /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the /// specified value into the registers specified by this object. This uses /// Chain/Flag as the input and updates them for the output Chain/Flag. /// If the Flag pointer is NULL, no flag is used. void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl, - SDValue &Chain, SDValue *Flag) const; + unsigned Order, SDValue &Chain, SDValue *Flag) const; /// AddInlineAsmOperands - Add this value to the specified inlineasm node /// operand list. This adds the code marker, matching input operand index /// (if applicable), and includes the number of values added into it. void AddInlineAsmOperands(unsigned Code, bool HasMatching, unsigned MatchingIdx, - SelectionDAG &DAG, std::vector<SDValue> &Ops) const; + SelectionDAG &DAG, unsigned Order, + std::vector<SDValue> &Ops) const; }; } @@ -168,13 +169,14 @@ namespace { /// larger then ValueVT then AssertOp can be used to specify whether the extra /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT /// (ISD::AssertSext). -static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl, +static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl, unsigned Order, const SDValue *Parts, unsigned NumParts, EVT PartVT, EVT ValueVT, ISD::NodeType AssertOp = ISD::DELETED_NODE) { assert(NumParts > 0 && "No parts to assemble!"); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDValue Val = Parts[0]; + if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order); if (NumParts > 1) { // Assemble the value from multiple parts. @@ -193,23 +195,32 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl, EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2); if (RoundParts > 2) { - Lo = getCopyFromParts(DAG, dl, Parts, RoundParts/2, PartVT, HalfVT); - Hi = getCopyFromParts(DAG, dl, Parts+RoundParts/2, RoundParts/2, + Lo = getCopyFromParts(DAG, dl, Order, Parts, RoundParts / 2, PartVT, HalfVT); + Hi = getCopyFromParts(DAG, dl, Order, Parts + RoundParts / 2, + RoundParts / 2, PartVT, HalfVT); } else { Lo = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[0]); Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[1]); } + if (TLI.isBigEndian()) std::swap(Lo, Hi); + Val = DAG.getNode(ISD::BUILD_PAIR, dl, RoundVT, Lo, Hi); + if (DisableScheduling) { + DAG.AssignOrdering(Lo.getNode(), Order); + DAG.AssignOrdering(Hi.getNode(), Order); + DAG.AssignOrdering(Val.getNode(), Order); + } + if (RoundParts < NumParts) { // Assemble the trailing non-power-of-2 part. unsigned OddParts = NumParts - RoundParts; EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits); - Hi = getCopyFromParts(DAG, dl, - Parts+RoundParts, OddParts, PartVT, OddVT); + Hi = getCopyFromParts(DAG, dl, Order, + Parts + RoundParts, OddParts, PartVT, OddVT); // Combine the round and odd parts. Lo = Val; @@ -217,22 +228,28 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl, std::swap(Lo, Hi); EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi); + if (DisableScheduling) DAG.AssignOrdering(Hi.getNode(), Order); Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi, DAG.getConstant(Lo.getValueType().getSizeInBits(), TLI.getPointerTy())); + if (DisableScheduling) DAG.AssignOrdering(Hi.getNode(), Order); Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, TotalVT, Lo); + if (DisableScheduling) DAG.AssignOrdering(Lo.getNode(), Order); Val = DAG.getNode(ISD::OR, dl, TotalVT, Lo, Hi); + if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order); } } else if (ValueVT.isVector()) { // Handle a multi-element vector. EVT IntermediateVT, RegisterVT; unsigned NumIntermediates; unsigned NumRegs = - TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT, + TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT, NumIntermediates, RegisterVT); - assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); + assert(NumRegs == NumParts + && "Part count doesn't match vector breakdown!"); NumParts = NumRegs; // Silence a compiler warning. - assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); + assert(RegisterVT == PartVT + && "Part type doesn't match vector breakdown!"); assert(RegisterVT == Parts[0].getValueType() && "Part type doesn't match part!"); @@ -242,24 +259,25 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl, // If the register was not expanded, truncate or copy the value, // as appropriate. for (unsigned i = 0; i != NumParts; ++i) - Ops[i] = getCopyFromParts(DAG, dl, &Parts[i], 1, + Ops[i] = getCopyFromParts(DAG, dl, Order, &Parts[i], 1, PartVT, IntermediateVT); } else if (NumParts > 0) { - // If the intermediate type was expanded, build the intermediate operands - // from the parts. + // If the intermediate type was expanded, build the intermediate + // operands from the parts. assert(NumParts % NumIntermediates == 0 && "Must expand into a divisible number of parts!"); unsigned Factor = NumParts / NumIntermediates; for (unsigned i = 0; i != NumIntermediates; ++i) - Ops[i] = getCopyFromParts(DAG, dl, &Parts[i * Factor], Factor, + Ops[i] = getCopyFromParts(DAG, dl, Order, &Parts[i * Factor], Factor, PartVT, IntermediateVT); } - // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate - // operands. + // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the + // intermediate operands. Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, dl, ValueVT, &Ops[0], NumIntermediates); + if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order); } else if (PartVT.isFloatingPoint()) { // FP split into multiple FP parts (for ppcf128) assert(ValueVT == EVT(MVT::ppcf128) && PartVT == EVT(MVT::f64) && @@ -270,12 +288,18 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl, if (TLI.isBigEndian()) std::swap(Lo, Hi); Val = DAG.getNode(ISD::BUILD_PAIR, dl, ValueVT, Lo, Hi); + + if (DisableScheduling) { + DAG.AssignOrdering(Hi.getNode(), Order); + DAG.AssignOrdering(Lo.getNode(), Order); + DAG.AssignOrdering(Val.getNode(), Order); + } } else { // FP split into integer parts (soft fp) assert(ValueVT.isFloatingPoint() && PartVT.isInteger() && !PartVT.isVector() && "Unexpected split"); EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); - Val = getCopyFromParts(DAG, dl, Parts, NumParts, PartVT, IntVT); + Val = getCopyFromParts(DAG, dl, Order, Parts, NumParts, PartVT, IntVT); } } @@ -287,14 +311,20 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl, if (PartVT.isVector()) { assert(ValueVT.isVector() && "Unknown vector conversion!"); - return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val); + SDValue Res = DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), Order); + return Res; } if (ValueVT.isVector()) { assert(ValueVT.getVectorElementType() == PartVT && ValueVT.getVectorNumElements() == 1 && "Only trivial scalar-to-vector conversions should get here!"); - return DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val); + SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), Order); + return Res; } if (PartVT.isInteger() && @@ -306,22 +336,36 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl, if (AssertOp != ISD::DELETED_NODE) Val = DAG.getNode(AssertOp, dl, PartVT, Val, DAG.getValueType(ValueVT)); - return DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val); + if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order); + Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val); + if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order); + return Val; } else { - return DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val); + Val = DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val); + if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order); + return Val; } } if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { - if (ValueVT.bitsLT(Val.getValueType())) + if (ValueVT.bitsLT(Val.getValueType())) { // FP_ROUND's are always exact here. - return DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val, - DAG.getIntPtrConstant(1)); - return DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val); + Val = DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val, + DAG.getIntPtrConstant(1)); + if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order); + return Val; + } + + Val = DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val); + if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order); + return Val; } - if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) - return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val); + if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) { + Val = DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val); + if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order); + return Val; + } llvm_unreachable("Unknown mismatch!"); return SDValue(); @@ -330,8 +374,9 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl, /// getCopyToParts - Create a series of nodes that contain the specified value /// split into legal parts. If the parts contain more bits than Val, then, for /// integers, ExtendKind can be used to specify how to generate the extra bits. -static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val, - SDValue *Parts, unsigned NumParts, EVT PartVT, +static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, unsigned Order, + SDValue Val, SDValue *Parts, unsigned NumParts, + EVT PartVT, ISD::NodeType ExtendKind = ISD::ANY_EXTEND) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); EVT PtrVT = TLI.getPointerTy(); @@ -375,6 +420,8 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val, } } + if (DisableScheduling) DAG.AssignOrdering(Val.getNode(), Order); + // The value may have changed - recompute ValueVT. ValueVT = Val.getValueType(); assert(NumParts * PartBits == ValueVT.getSizeInBits() && @@ -397,20 +444,33 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val, SDValue OddVal = DAG.getNode(ISD::SRL, dl, ValueVT, Val, DAG.getConstant(RoundBits, TLI.getPointerTy())); - getCopyToParts(DAG, dl, OddVal, Parts + RoundParts, OddParts, PartVT); + getCopyToParts(DAG, dl, Order, OddVal, Parts + RoundParts, + OddParts, PartVT); + if (TLI.isBigEndian()) // The odd parts were reversed by getCopyToParts - unreverse them. std::reverse(Parts + RoundParts, Parts + NumParts); + NumParts = RoundParts; ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val); + + if (DisableScheduling) { + DAG.AssignOrdering(OddVal.getNode(), Order); + DAG.AssignOrdering(Val.getNode(), Order); + } } // The number of parts is a power of 2. Repeatedly bisect the value using // EXTRACT_ELEMENT. Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl, - EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()), + EVT::getIntegerVT(*DAG.getContext(), + ValueVT.getSizeInBits()), Val); + + if (DisableScheduling) + DAG.AssignOrdering(Parts[0].getNode(), Order); + for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) { for (unsigned i = 0; i < NumParts; i += StepSize) { unsigned ThisBits = StepSize * PartBits / 2; @@ -425,11 +485,20 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val, ThisVT, Part0, DAG.getConstant(0, PtrVT)); + if (DisableScheduling) { + DAG.AssignOrdering(Part0.getNode(), Order); + DAG.AssignOrdering(Part1.getNode(), Order); + } + if (ThisBits == PartBits && ThisVT != PartVT) { Part0 = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Part0); Part1 = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Part1); + if (DisableScheduling) { + DAG.AssignOrdering(Part0.getNode(), Order); + DAG.AssignOrdering(Part1.getNode(), Order); + } } } } @@ -443,7 +512,7 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val, // Vector ValueVT. if (NumParts == 1) { if (PartVT != ValueVT) { - if (PartVT.isVector()) { + if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) { Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val); } else { assert(ValueVT.getVectorElementType() == PartVT && @@ -455,6 +524,9 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val, } } + if (DisableScheduling) + DAG.AssignOrdering(Val.getNode(), Order); + Parts[0] = Val; return; } @@ -472,7 +544,7 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val, // Split the vector into intermediate operands. SmallVector<SDValue, 8> Ops(NumIntermediates); - for (unsigned i = 0; i != NumIntermediates; ++i) + for (unsigned i = 0; i != NumIntermediates; ++i) { if (IntermediateVT.isVector()) Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, IntermediateVT, Val, @@ -483,12 +555,16 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val, IntermediateVT, Val, DAG.getConstant(i, PtrVT)); + if (DisableScheduling) + DAG.AssignOrdering(Ops[i].getNode(), Order); + } + // Split the intermediate operands into legal parts. if (NumParts == NumIntermediates) { // If the register was not expanded, promote or copy the value, // as appropriate. for (unsigned i = 0; i != NumParts; ++i) - getCopyToParts(DAG, dl, Ops[i], &Parts[i], 1, PartVT); + getCopyToParts(DAG, dl, Order, Ops[i], &Parts[i], 1, PartVT); } else if (NumParts > 0) { // If the intermediate type was expanded, split each the value into // legal parts. @@ -496,7 +572,7 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val, "Must expand into a divisible number of parts!"); unsigned Factor = NumParts / NumIntermediates; for (unsigned i = 0; i != NumIntermediates; ++i) - getCopyToParts(DAG, dl, Ops[i], &Parts[i * Factor], Factor, PartVT); + getCopyToParts(DAG, dl, Order, Ops[i], &Parts[i*Factor], Factor, PartVT); } } @@ -583,8 +659,8 @@ void SelectionDAGBuilder::visit(Instruction &I) { } void SelectionDAGBuilder::visit(unsigned Opcode, User &I) { - // Tell the DAG that we're processing a new instruction. - DAG.NewInst(); + // We're processing a new instruction. + ++SDNodeOrder; // Note: this doesn't use InstVisitor, because it has to work with // ConstantExpr's in addition to instructions. @@ -592,7 +668,7 @@ void SelectionDAGBuilder::visit(unsigned Opcode, User &I) { default: llvm_unreachable("Unknown instruction type encountered!"); // Build the switch statement using the Instruction.def file. #define HANDLE_INST(NUM, OPCODE, CLASS) \ - case Instruction::OPCODE:return visit##OPCODE((CLASS&)I); + case Instruction::OPCODE: return visit##OPCODE((CLASS&)I); #include "llvm/Instruction.def" } } @@ -638,8 +714,12 @@ SDValue SelectionDAGBuilder::getValue(const Value *V) { for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) Constants.push_back(SDValue(Val, i)); } - return DAG.getMergeValues(&Constants[0], Constants.size(), - getCurDebugLoc()); + + SDValue Res = DAG.getMergeValues(&Constants[0], Constants.size(), + getCurDebugLoc()); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + return Res; } if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) { @@ -661,7 +741,12 @@ SDValue SelectionDAGBuilder::getValue(const Value *V) { else Constants[i] = DAG.getConstant(0, EltVT); } - return DAG.getMergeValues(&Constants[0], NumElts, getCurDebugLoc()); + + SDValue Res = DAG.getMergeValues(&Constants[0], NumElts, + getCurDebugLoc()); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + return Res; } if (BlockAddress *BA = dyn_cast<BlockAddress>(C)) @@ -689,8 +774,12 @@ SDValue SelectionDAGBuilder::getValue(const Value *V) { } // Create a BUILD_VECTOR node. - return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(), - VT, &Ops[0], Ops.size()); + SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(), + VT, &Ops[0], Ops.size()); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + + return NodeMap[V] = Res; } // If this is a static alloca, generate it as the frameindex instead of @@ -707,7 +796,8 @@ SDValue SelectionDAGBuilder::getValue(const Value *V) { RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType()); SDValue Chain = DAG.getEntryNode(); - return RFV.getCopyFromRegs(DAG, getCurDebugLoc(), Chain, NULL); + return RFV.getCopyFromRegs(DAG, getCurDebugLoc(), + SDNodeOrder, Chain, NULL); } /// Get the EVTs and ArgFlags collections that represent the return type @@ -766,7 +856,7 @@ void SelectionDAGBuilder::visitRet(ReturnInst &I) { SDValue Chain = getControlRoot(); SmallVector<ISD::OutputArg, 8> Outs; FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo(); - + if (!FLI.CanLowerReturn) { unsigned DemoteReg = FLI.DemoteRegister; const Function *F = I.getParent()->getParent(); @@ -775,12 +865,12 @@ void SelectionDAGBuilder::visitRet(ReturnInst &I) { // Leave Outs empty so that LowerReturn won't try to load return // registers the usual way. SmallVector<EVT, 1> PtrValueVTs; - ComputeValueVTs(TLI, PointerType::getUnqual(F->getReturnType()), + ComputeValueVTs(TLI, PointerType::getUnqual(F->getReturnType()), PtrValueVTs); SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]); SDValue RetOp = getValue(I.getOperand(0)); - + SmallVector<EVT, 4> ValueVTs; SmallVector<uint64_t, 4> Offsets; ComputeValueVTs(TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets); @@ -788,22 +878,32 @@ void SelectionDAGBuilder::visitRet(ReturnInst &I) { SmallVector<SDValue, 4> Chains(NumValues); EVT PtrVT = PtrValueVTs[0]; - for (unsigned i = 0; i != NumValues; ++i) - Chains[i] = DAG.getStore(Chain, getCurDebugLoc(), - SDValue(RetOp.getNode(), RetOp.getResNo() + i), - DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, RetPtr, - DAG.getConstant(Offsets[i], PtrVT)), - NULL, Offsets[i], false, 0); + for (unsigned i = 0; i != NumValues; ++i) { + SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, RetPtr, + DAG.getConstant(Offsets[i], PtrVT)); + Chains[i] = + DAG.getStore(Chain, getCurDebugLoc(), + SDValue(RetOp.getNode(), RetOp.getResNo() + i), + Add, NULL, Offsets[i], false, 0); + + if (DisableScheduling) { + DAG.AssignOrdering(Add.getNode(), SDNodeOrder); + DAG.AssignOrdering(Chains[i].getNode(), SDNodeOrder); + } + } + Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other, &Chains[0], NumValues); - } - else { + + if (DisableScheduling) + DAG.AssignOrdering(Chain.getNode(), SDNodeOrder); + } else { for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { SmallVector<EVT, 4> ValueVTs; ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs); unsigned NumValues = ValueVTs.size(); if (NumValues == 0) continue; - + SDValue RetOp = getValue(I.getOperand(i)); for (unsigned j = 0, f = NumValues; j != f; ++j) { EVT VT = ValueVTs[j]; @@ -816,8 +916,8 @@ void SelectionDAGBuilder::visitRet(ReturnInst &I) { else if (F->paramHasAttr(0, Attribute::ZExt)) ExtendKind = ISD::ZERO_EXTEND; - // FIXME: C calling convention requires the return type to be promoted to - // at least 32-bit. But this is not necessary for non-C calling + // FIXME: C calling convention requires the return type to be promoted + // to at least 32-bit. But this is not necessary for non-C calling // conventions. The frontend should mark functions whose return values // require promoting with signext or zeroext attributes. if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) { @@ -829,7 +929,7 @@ void SelectionDAGBuilder::visitRet(ReturnInst &I) { unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), VT); EVT PartVT = TLI.getRegisterType(*DAG.getContext(), VT); SmallVector<SDValue, 4> Parts(NumParts); - getCopyToParts(DAG, getCurDebugLoc(), + getCopyToParts(DAG, getCurDebugLoc(), SDNodeOrder, SDValue(RetOp.getNode(), RetOp.getResNo() + j), &Parts[0], NumParts, PartVT, ExtendKind); @@ -862,6 +962,9 @@ void SelectionDAGBuilder::visitRet(ReturnInst &I) { // Update the DAG with the new chain value resulting from return lowering. DAG.setRoot(Chain); + + if (DisableScheduling) + DAG.AssignOrdering(Chain.getNode(), SDNodeOrder); } /// CopyToExportRegsIfNeeded - If the given value has virtual registers @@ -1110,10 +1213,16 @@ void SelectionDAGBuilder::visitBr(BranchInst &I) { CurMBB->addSuccessor(Succ0MBB); // If this is not a fall-through branch, emit the branch. - if (Succ0MBB != NextBlock) - DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), + if (Succ0MBB != NextBlock) { + SDValue V = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, getControlRoot(), - DAG.getBasicBlock(Succ0MBB))); + DAG.getBasicBlock(Succ0MBB)); + DAG.setRoot(V); + + if (DisableScheduling) + DAG.AssignOrdering(V.getNode(), SDNodeOrder); + } + return; } @@ -1172,6 +1281,7 @@ void SelectionDAGBuilder::visitBr(BranchInst &I) { // Create a CaseBlock record representing this branch. CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()), NULL, Succ0MBB, Succ1MBB, CurMBB); + // Use visitSwitchCase to actually insert the fast branch sequence for this // cond branch. visitSwitchCase(CB); @@ -1217,6 +1327,9 @@ void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB) { } } + if (DisableScheduling) + DAG.AssignOrdering(Cond.getNode(), SDNodeOrder); + // Update successor info CurMBB->addSuccessor(CB.TrueBB); CurMBB->addSuccessor(CB.FalseBB); @@ -1234,26 +1347,36 @@ void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB) { std::swap(CB.TrueBB, CB.FalseBB); SDValue True = DAG.getConstant(1, Cond.getValueType()); Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True); + + if (DisableScheduling) + DAG.AssignOrdering(Cond.getNode(), SDNodeOrder); } + SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, MVT::Other, getControlRoot(), Cond, DAG.getBasicBlock(CB.TrueBB)); + if (DisableScheduling) + DAG.AssignOrdering(BrCond.getNode(), SDNodeOrder); + // If the branch was constant folded, fix up the CFG. if (BrCond.getOpcode() == ISD::BR) { CurMBB->removeSuccessor(CB.FalseBB); - DAG.setRoot(BrCond); } else { // Otherwise, go ahead and insert the false branch. if (BrCond == getControlRoot()) CurMBB->removeSuccessor(CB.TrueBB); - if (CB.FalseBB == NextBlock) - DAG.setRoot(BrCond); - else - DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, BrCond, - DAG.getBasicBlock(CB.FalseBB))); + if (CB.FalseBB != NextBlock) { + BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond, + DAG.getBasicBlock(CB.FalseBB)); + + if (DisableScheduling) + DAG.AssignOrdering(BrCond.getNode(), SDNodeOrder); + } } + + DAG.setRoot(BrCond); } /// visitJumpTable - Emit JumpTable node in the current MBB @@ -1264,9 +1387,16 @@ void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) { SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), JT.Reg, PTy); SDValue Table = DAG.getJumpTable(JT.JTI, PTy); - DAG.setRoot(DAG.getNode(ISD::BR_JT, getCurDebugLoc(), - MVT::Other, Index.getValue(1), - Table, Index)); + SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurDebugLoc(), + MVT::Other, Index.getValue(1), + Table, Index); + DAG.setRoot(BrJumpTable); + + if (DisableScheduling) { + DAG.AssignOrdering(Index.getNode(), SDNodeOrder); + DAG.AssignOrdering(Table.getNode(), SDNodeOrder); + DAG.AssignOrdering(BrJumpTable.getNode(), SDNodeOrder); + } } /// visitJumpTableHeader - This function emits necessary code to produce index @@ -1278,7 +1408,7 @@ void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT, // difference between smallest and largest cases. SDValue SwitchOp = getValue(JTH.SValue); EVT VT = SwitchOp.getValueType(); - SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp, + SDValue Sub = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp, DAG.getConstant(JTH.First, VT)); // The SDNode we just created, which holds the value being switched on minus @@ -1286,7 +1416,7 @@ void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT, // can be used as an index into the jump table in a subsequent basic block. // This value may be smaller or larger than the target's pointer type, and // therefore require extension or truncating. - SwitchOp = DAG.getZExtOrTrunc(SUB, getCurDebugLoc(), TLI.getPointerTy()); + SwitchOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(), TLI.getPointerTy()); unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy()); SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(), @@ -1297,14 +1427,22 @@ void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT, // for the switch statement if the value being switched on exceeds the largest // case in the switch. SDValue CMP = DAG.getSetCC(getCurDebugLoc(), - TLI.getSetCCResultType(SUB.getValueType()), SUB, + TLI.getSetCCResultType(Sub.getValueType()), Sub, DAG.getConstant(JTH.Last-JTH.First,VT), ISD::SETUGT); + if (DisableScheduling) { + DAG.AssignOrdering(Sub.getNode(), SDNodeOrder); + DAG.AssignOrdering(SwitchOp.getNode(), SDNodeOrder); + DAG.AssignOrdering(CopyTo.getNode(), SDNodeOrder); + DAG.AssignOrdering(CMP.getNode(), SDNodeOrder); + } + // Set NextBlock to be the MBB immediately after the current one, if any. // This is used to avoid emitting unnecessary branches to the next block. MachineBasicBlock *NextBlock = 0; MachineFunction::iterator BBI = CurMBB; + if (++BBI != FuncInfo.MF->end()) NextBlock = BBI; @@ -1312,11 +1450,18 @@ void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT, MVT::Other, CopyTo, CMP, DAG.getBasicBlock(JT.Default)); - if (JT.MBB == NextBlock) - DAG.setRoot(BrCond); - else - DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond, - DAG.getBasicBlock(JT.MBB))); + if (DisableScheduling) + DAG.AssignOrdering(BrCond.getNode(), SDNodeOrder); + + if (JT.MBB != NextBlock) { + BrCond = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond, + DAG.getBasicBlock(JT.MBB)); + + if (DisableScheduling) + DAG.AssignOrdering(BrCond.getNode(), SDNodeOrder); + } + + DAG.setRoot(BrCond); } /// visitBitTestHeader - This function emits necessary code to produce value @@ -1325,21 +1470,29 @@ void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B) { // Subtract the minimum value SDValue SwitchOp = getValue(B.SValue); EVT VT = SwitchOp.getValueType(); - SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp, + SDValue Sub = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp, DAG.getConstant(B.First, VT)); // Check range SDValue RangeCmp = DAG.getSetCC(getCurDebugLoc(), - TLI.getSetCCResultType(SUB.getValueType()), - SUB, DAG.getConstant(B.Range, VT), + TLI.getSetCCResultType(Sub.getValueType()), + Sub, DAG.getConstant(B.Range, VT), ISD::SETUGT); - SDValue ShiftOp = DAG.getZExtOrTrunc(SUB, getCurDebugLoc(), TLI.getPointerTy()); + SDValue ShiftOp = DAG.getZExtOrTrunc(Sub, getCurDebugLoc(), + TLI.getPointerTy()); B.Reg = FuncInfo.MakeReg(TLI.getPointerTy()); SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(), B.Reg, ShiftOp); + if (DisableScheduling) { + DAG.AssignOrdering(Sub.getNode(), SDNodeOrder); + DAG.AssignOrdering(RangeCmp.getNode(), SDNodeOrder); + DAG.AssignOrdering(ShiftOp.getNode(), SDNodeOrder); + DAG.AssignOrdering(CopyTo.getNode(), SDNodeOrder); + } + // Set NextBlock to be the MBB immediately after the current one, if any. // This is used to avoid emitting unnecessary branches to the next block. MachineBasicBlock *NextBlock = 0; @@ -1356,11 +1509,18 @@ void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B) { MVT::Other, CopyTo, RangeCmp, DAG.getBasicBlock(B.Default)); - if (MBB == NextBlock) - DAG.setRoot(BrRange); - else - DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo, - DAG.getBasicBlock(MBB))); + if (DisableScheduling) + DAG.AssignOrdering(BrRange.getNode(), SDNodeOrder); + + if (MBB != NextBlock) { + BrRange = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo, + DAG.getBasicBlock(MBB)); + + if (DisableScheduling) + DAG.AssignOrdering(BrRange.getNode(), SDNodeOrder); + } + + DAG.setRoot(BrRange); } /// visitBitTestCase - this function produces one "bit test" @@ -1384,6 +1544,13 @@ void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB, AndOp, DAG.getConstant(0, TLI.getPointerTy()), ISD::SETNE); + if (DisableScheduling) { + DAG.AssignOrdering(ShiftOp.getNode(), SDNodeOrder); + DAG.AssignOrdering(SwitchVal.getNode(), SDNodeOrder); + DAG.AssignOrdering(AndOp.getNode(), SDNodeOrder); + DAG.AssignOrdering(AndCmp.getNode(), SDNodeOrder); + } + CurMBB->addSuccessor(B.TargetBB); CurMBB->addSuccessor(NextMBB); @@ -1391,6 +1558,9 @@ void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB, MVT::Other, getControlRoot(), AndCmp, DAG.getBasicBlock(B.TargetBB)); + if (DisableScheduling) + DAG.AssignOrdering(BrAnd.getNode(), SDNodeOrder); + // Set NextBlock to be the MBB immediately after the current one, if any. // This is used to avoid emitting unnecessary branches to the next block. MachineBasicBlock *NextBlock = 0; @@ -1398,11 +1568,15 @@ void SelectionDAGBuilder::visitBitTestCase(MachineBasicBlock* NextMBB, if (++BBI != FuncInfo.MF->end()) NextBlock = BBI; - if (NextMBB == NextBlock) - DAG.setRoot(BrAnd); - else - DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd, - DAG.getBasicBlock(NextMBB))); + if (NextMBB != NextBlock) { + BrAnd = DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd, + DAG.getBasicBlock(NextMBB)); + + if (DisableScheduling) + DAG.AssignOrdering(BrAnd.getNode(), SDNodeOrder); + } + + DAG.setRoot(BrAnd); } void SelectionDAGBuilder::visitInvoke(InvokeInst &I) { @@ -1425,9 +1599,13 @@ void SelectionDAGBuilder::visitInvoke(InvokeInst &I) { CurMBB->addSuccessor(LandingPad); // Drop into normal successor. - DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), - MVT::Other, getControlRoot(), - DAG.getBasicBlock(Return))); + SDValue Branch = DAG.getNode(ISD::BR, getCurDebugLoc(), + MVT::Other, getControlRoot(), + DAG.getBasicBlock(Return)); + DAG.setRoot(Branch); + + if (DisableScheduling) + DAG.AssignOrdering(Branch.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitUnwind(UnwindInst &I) { @@ -1669,7 +1847,7 @@ bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR, APInt Range = ComputeRange(LEnd, RBegin); assert((Range - 2ULL).isNonNegative() && "Invalid case distance"); - double LDensity = (double)LSize.roundToDouble() / + double LDensity = (double)LSize.roundToDouble() / (LEnd - First + 1ULL).roundToDouble(); double RDensity = (double)RSize.roundToDouble() / (Last - RBegin + 1ULL).roundToDouble(); @@ -1787,8 +1965,9 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR, // Don't bother the code below, if there are too much unique destinations return false; } - DEBUG(errs() << "Total number of unique destinations: " << Dests.size() << '\n' - << "Total number of comparisons: " << numCmps << '\n'); + DEBUG(errs() << "Total number of unique destinations: " + << Dests.size() << '\n' + << "Total number of comparisons: " << numCmps << '\n'); // Compute span of values. const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue(); @@ -1883,7 +2062,6 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR, return true; } - /// Clusterify - Transform simple list of Cases into list of CaseRange's size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases, const SwitchInst& SI) { @@ -1930,7 +2108,6 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases, void SelectionDAGBuilder::visitSwitch(SwitchInst &SI) { // Figure out which block is immediately after the current one. MachineBasicBlock *NextBlock = 0; - MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()]; // If there is only the default destination, branch to it if it is not the @@ -1940,10 +2117,16 @@ void SelectionDAGBuilder::visitSwitch(SwitchInst &SI) { // If this is not a fall-through branch, emit the branch. CurMBB->addSuccessor(Default); - if (Default != NextBlock) - DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), - MVT::Other, getControlRoot(), - DAG.getBasicBlock(Default))); + if (Default != NextBlock) { + SDValue Res = DAG.getNode(ISD::BR, getCurDebugLoc(), + MVT::Other, getControlRoot(), + DAG.getBasicBlock(Default)); + DAG.setRoot(Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + } + return; } @@ -1995,11 +2178,14 @@ void SelectionDAGBuilder::visitIndirectBr(IndirectBrInst &I) { for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) CurMBB->addSuccessor(FuncInfo.MBBMap[I.getSuccessor(i)]); - DAG.setRoot(DAG.getNode(ISD::BRIND, getCurDebugLoc(), - MVT::Other, getControlRoot(), - getValue(I.getAddress()))); -} + SDValue Res = DAG.getNode(ISD::BRIND, getCurDebugLoc(), + MVT::Other, getControlRoot(), + getValue(I.getAddress())); + DAG.setRoot(Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); +} void SelectionDAGBuilder::visitFSub(User &I) { // -0.0 - X --> fneg @@ -2013,17 +2199,28 @@ void SelectionDAGBuilder::visitFSub(User &I) { Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size()); if (CV == CNZ) { SDValue Op2 = getValue(I.getOperand(1)); - setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(), - Op2.getValueType(), Op2)); + SDValue Res = DAG.getNode(ISD::FNEG, getCurDebugLoc(), + Op2.getValueType(), Op2); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + return; } } } + if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0))) if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) { SDValue Op2 = getValue(I.getOperand(1)); - setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(), - Op2.getValueType(), Op2)); + SDValue Res = DAG.getNode(ISD::FNEG, getCurDebugLoc(), + Op2.getValueType(), Op2); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + return; } @@ -2033,9 +2230,12 @@ void SelectionDAGBuilder::visitFSub(User &I) { void SelectionDAGBuilder::visitBinary(User &I, unsigned OpCode) { SDValue Op1 = getValue(I.getOperand(0)); SDValue Op2 = getValue(I.getOperand(1)); + SDValue Res = DAG.getNode(OpCode, getCurDebugLoc(), + Op1.getValueType(), Op1, Op2); + setValue(&I, Res); - setValue(&I, DAG.getNode(OpCode, getCurDebugLoc(), - Op1.getValueType(), Op1, Op2)); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitShift(User &I, unsigned Opcode) { @@ -2068,8 +2268,15 @@ void SelectionDAGBuilder::visitShift(User &I, unsigned Opcode) { TLI.getPointerTy(), Op2); } - setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(), - Op1.getValueType(), Op1, Op2)); + SDValue Res = DAG.getNode(Opcode, getCurDebugLoc(), + Op1.getValueType(), Op1, Op2); + setValue(&I, Res); + + if (DisableScheduling) { + DAG.AssignOrdering(Op1.getNode(), SDNodeOrder); + DAG.AssignOrdering(Op2.getNode(), SDNodeOrder); + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + } } void SelectionDAGBuilder::visitICmp(User &I) { @@ -2081,9 +2288,13 @@ void SelectionDAGBuilder::visitICmp(User &I) { SDValue Op1 = getValue(I.getOperand(0)); SDValue Op2 = getValue(I.getOperand(1)); ISD::CondCode Opcode = getICmpCondCode(predicate); - + EVT DestVT = TLI.getValueType(I.getType()); - setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode)); + SDValue Res = DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitFCmp(User &I) { @@ -2096,37 +2307,54 @@ void SelectionDAGBuilder::visitFCmp(User &I) { SDValue Op2 = getValue(I.getOperand(1)); ISD::CondCode Condition = getFCmpCondCode(predicate); EVT DestVT = TLI.getValueType(I.getType()); - setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition)); + SDValue Res = DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitSelect(User &I) { SmallVector<EVT, 4> ValueVTs; ComputeValueVTs(TLI, I.getType(), ValueVTs); unsigned NumValues = ValueVTs.size(); - if (NumValues != 0) { - SmallVector<SDValue, 4> Values(NumValues); - SDValue Cond = getValue(I.getOperand(0)); - SDValue TrueVal = getValue(I.getOperand(1)); - SDValue FalseVal = getValue(I.getOperand(2)); + if (NumValues == 0) return; + + SmallVector<SDValue, 4> Values(NumValues); + SDValue Cond = getValue(I.getOperand(0)); + SDValue TrueVal = getValue(I.getOperand(1)); + SDValue FalseVal = getValue(I.getOperand(2)); - for (unsigned i = 0; i != NumValues; ++i) - Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(), - TrueVal.getNode()->getValueType(i), Cond, - SDValue(TrueVal.getNode(), TrueVal.getResNo() + i), - SDValue(FalseVal.getNode(), FalseVal.getResNo() + i)); + for (unsigned i = 0; i != NumValues; ++i) { + Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(), + TrueVal.getNode()->getValueType(i), Cond, + SDValue(TrueVal.getNode(), + TrueVal.getResNo() + i), + SDValue(FalseVal.getNode(), + FalseVal.getResNo() + i)); - setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(), - DAG.getVTList(&ValueVTs[0], NumValues), - &Values[0], NumValues)); + if (DisableScheduling) + DAG.AssignOrdering(Values[i].getNode(), SDNodeOrder); } -} + SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(), + DAG.getVTList(&ValueVTs[0], NumValues), + &Values[0], NumValues); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); +} void SelectionDAGBuilder::visitTrunc(User &I) { // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest). SDValue N = getValue(I.getOperand(0)); EVT DestVT = TLI.getValueType(I.getType()); - setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N)); + SDValue Res = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitZExt(User &I) { @@ -2134,7 +2362,11 @@ void SelectionDAGBuilder::visitZExt(User &I) { // ZExt also can't be a cast to bool for same reason. So, nothing much to do SDValue N = getValue(I.getOperand(0)); EVT DestVT = TLI.getValueType(I.getType()); - setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N)); + SDValue Res = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitSExt(User &I) { @@ -2142,50 +2374,78 @@ void SelectionDAGBuilder::visitSExt(User &I) { // SExt also can't be a cast to bool for same reason. So, nothing much to do SDValue N = getValue(I.getOperand(0)); EVT DestVT = TLI.getValueType(I.getType()); - setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N)); + SDValue Res = DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitFPTrunc(User &I) { // FPTrunc is never a no-op cast, no need to check SDValue N = getValue(I.getOperand(0)); EVT DestVT = TLI.getValueType(I.getType()); - setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(), - DestVT, N, DAG.getIntPtrConstant(0))); + SDValue Res = DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(), + DestVT, N, DAG.getIntPtrConstant(0)); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitFPExt(User &I){ // FPTrunc is never a no-op cast, no need to check SDValue N = getValue(I.getOperand(0)); EVT DestVT = TLI.getValueType(I.getType()); - setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N)); + SDValue Res = DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitFPToUI(User &I) { // FPToUI is never a no-op cast, no need to check SDValue N = getValue(I.getOperand(0)); EVT DestVT = TLI.getValueType(I.getType()); - setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N)); + SDValue Res = DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitFPToSI(User &I) { // FPToSI is never a no-op cast, no need to check SDValue N = getValue(I.getOperand(0)); EVT DestVT = TLI.getValueType(I.getType()); - setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N)); + SDValue Res = DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitUIToFP(User &I) { // UIToFP is never a no-op cast, no need to check SDValue N = getValue(I.getOperand(0)); EVT DestVT = TLI.getValueType(I.getType()); - setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N)); + SDValue Res = DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitSIToFP(User &I){ // SIToFP is never a no-op cast, no need to check SDValue N = getValue(I.getOperand(0)); EVT DestVT = TLI.getValueType(I.getType()); - setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N)); + SDValue Res = DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitPtrToInt(User &I) { @@ -2194,8 +2454,11 @@ void SelectionDAGBuilder::visitPtrToInt(User &I) { SDValue N = getValue(I.getOperand(0)); EVT SrcVT = N.getValueType(); EVT DestVT = TLI.getValueType(I.getType()); - SDValue Result = DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT); - setValue(&I, Result); + SDValue Res = DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitIntToPtr(User &I) { @@ -2204,41 +2467,61 @@ void SelectionDAGBuilder::visitIntToPtr(User &I) { SDValue N = getValue(I.getOperand(0)); EVT SrcVT = N.getValueType(); EVT DestVT = TLI.getValueType(I.getType()); - setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT)); + SDValue Res = DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitBitCast(User &I) { SDValue N = getValue(I.getOperand(0)); EVT DestVT = TLI.getValueType(I.getType()); - // BitCast assures us that source and destination are the same size so this - // is either a BIT_CONVERT or a no-op. - if (DestVT != N.getValueType()) - setValue(&I, DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), - DestVT, N)); // convert types - else - setValue(&I, N); // noop cast. + // BitCast assures us that source and destination are the same size so this is + // either a BIT_CONVERT or a no-op. + if (DestVT != N.getValueType()) { + SDValue Res = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), + DestVT, N); // convert types. + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + } else { + setValue(&I, N); // noop cast. + } } void SelectionDAGBuilder::visitInsertElement(User &I) { SDValue InVec = getValue(I.getOperand(0)); SDValue InVal = getValue(I.getOperand(1)); SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), - TLI.getPointerTy(), - getValue(I.getOperand(2))); + TLI.getPointerTy(), + getValue(I.getOperand(2))); + SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(), + TLI.getValueType(I.getType()), + InVec, InVal, InIdx); + setValue(&I, Res); - setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(), - TLI.getValueType(I.getType()), - InVec, InVal, InIdx)); + if (DisableScheduling) { + DAG.AssignOrdering(InIdx.getNode(), SDNodeOrder); + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + } } void SelectionDAGBuilder::visitExtractElement(User &I) { SDValue InVec = getValue(I.getOperand(0)); SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), - TLI.getPointerTy(), - getValue(I.getOperand(1))); - setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(), - TLI.getValueType(I.getType()), InVec, InIdx)); + TLI.getPointerTy(), + getValue(I.getOperand(1))); + SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(), + TLI.getValueType(I.getType()), InVec, InIdx); + setValue(&I, Res); + + if (DisableScheduling) { + DAG.AssignOrdering(InIdx.getNode(), SDNodeOrder); + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + } } @@ -2260,7 +2543,7 @@ void SelectionDAGBuilder::visitShuffleVector(User &I) { // Convert the ConstantVector mask operand into an array of ints, with -1 // representing undef values. SmallVector<Constant*, 8> MaskElts; - cast<Constant>(I.getOperand(2))->getVectorElements(*DAG.getContext(), + cast<Constant>(I.getOperand(2))->getVectorElements(*DAG.getContext(), MaskElts); unsigned MaskNumElts = MaskElts.size(); for (unsigned i = 0; i != MaskNumElts; ++i) { @@ -2269,14 +2552,19 @@ void SelectionDAGBuilder::visitShuffleVector(User &I) { else Mask.push_back(cast<ConstantInt>(MaskElts[i])->getSExtValue()); } - + EVT VT = TLI.getValueType(I.getType()); EVT SrcVT = Src1.getValueType(); unsigned SrcNumElts = SrcVT.getVectorNumElements(); if (SrcNumElts == MaskNumElts) { - setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2, - &Mask[0])); + SDValue Res = DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2, + &Mask[0]); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + return; } @@ -2287,8 +2575,13 @@ void SelectionDAGBuilder::visitShuffleVector(User &I) { // lengths match. if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) { // The shuffle is concatenating two vectors together. - setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(), - VT, Src1, Src2)); + SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(), + VT, Src1, Src2); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + return; } @@ -2302,12 +2595,12 @@ void SelectionDAGBuilder::visitShuffleVector(User &I) { SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal); MOps1[0] = Src1; MOps2[0] = Src2; - - Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS, - getCurDebugLoc(), VT, + + Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS, + getCurDebugLoc(), VT, &MOps1[0], NumConcat); Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS, - getCurDebugLoc(), VT, + getCurDebugLoc(), VT, &MOps2[0], NumConcat); // Readjust mask for new input vector length. @@ -2319,8 +2612,17 @@ void SelectionDAGBuilder::visitShuffleVector(User &I) { else MappedOps.push_back(Idx + MaskNumElts - SrcNumElts); } - setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2, - &MappedOps[0])); + + SDValue Res = DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2, + &MappedOps[0]); + setValue(&I, Res); + + if (DisableScheduling) { + DAG.AssignOrdering(Src1.getNode(), SDNodeOrder); + DAG.AssignOrdering(Src2.getNode(), SDNodeOrder); + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + } + return; } @@ -2336,7 +2638,7 @@ void SelectionDAGBuilder::visitShuffleVector(User &I) { int Input = 0; if (Idx < 0) continue; - + if (Idx >= (int)SrcNumElts) { Input = 1; Idx -= SrcNumElts; @@ -2349,7 +2651,8 @@ void SelectionDAGBuilder::visitShuffleVector(User &I) { // Check if the access is smaller than the vector size and can we find // a reasonable extract index. - int RangeUse[2] = { 2, 2 }; // 0 = Unused, 1 = Extract, 2 = Can not Extract. + int RangeUse[2] = { 2, 2 }; // 0 = Unused, 1 = Extract, 2 = Can not + // Extract. int StartIdx[2]; // StartIdx to extract from for (int Input=0; Input < 2; ++Input) { if (MinRange[Input] == (int)(SrcNumElts+1) && MaxRange[Input] == -1) { @@ -2371,20 +2674,28 @@ void SelectionDAGBuilder::visitShuffleVector(User &I) { } if (RangeUse[0] == 0 && RangeUse[1] == 0) { - setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used. + SDValue Res = DAG.getUNDEF(VT); + setValue(&I, Res); // Vectors are not used. + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + return; } else if (RangeUse[0] < 2 && RangeUse[1] < 2) { // Extract appropriate subvector and generate a vector shuffle for (int Input=0; Input < 2; ++Input) { - SDValue& Src = Input == 0 ? Src1 : Src2; - if (RangeUse[Input] == 0) { + SDValue &Src = Input == 0 ? Src1 : Src2; + if (RangeUse[Input] == 0) Src = DAG.getUNDEF(VT); - } else { + else Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT, Src, DAG.getIntPtrConstant(StartIdx[Input])); - } + + if (DisableScheduling) + DAG.AssignOrdering(Src.getNode(), SDNodeOrder); } + // Calculate new mask. SmallVector<int, 8> MappedOps; for (unsigned i = 0; i != MaskNumElts; ++i) { @@ -2396,8 +2707,14 @@ void SelectionDAGBuilder::visitShuffleVector(User &I) { else MappedOps.push_back(Idx - SrcNumElts - StartIdx[1] + MaskNumElts); } - setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2, - &MappedOps[0])); + + SDValue Res = DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2, + &MappedOps[0]); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + return; } } @@ -2413,17 +2730,29 @@ void SelectionDAGBuilder::visitShuffleVector(User &I) { Ops.push_back(DAG.getUNDEF(EltVT)); } else { int Idx = Mask[i]; + SDValue Res; + if (Idx < (int)SrcNumElts) - Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(), - EltVT, Src1, DAG.getConstant(Idx, PtrVT))); + Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(), + EltVT, Src1, DAG.getConstant(Idx, PtrVT)); else - Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(), - EltVT, Src2, - DAG.getConstant(Idx - SrcNumElts, PtrVT))); + Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(), + EltVT, Src2, + DAG.getConstant(Idx - SrcNumElts, PtrVT)); + + Ops.push_back(Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } } - setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(), - VT, &Ops[0], Ops.size())); + + SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(), + VT, &Ops[0], Ops.size()); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitInsertValue(InsertValueInst &I) { @@ -2462,9 +2791,13 @@ void SelectionDAGBuilder::visitInsertValue(InsertValueInst &I) { Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) : SDValue(Agg.getNode(), Agg.getResNo() + i); - setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(), - DAG.getVTList(&AggValueVTs[0], NumAggValues), - &Values[0], NumAggValues)); + SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(), + DAG.getVTList(&AggValueVTs[0], NumAggValues), + &Values[0], NumAggValues); + setValue(&I, Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } void SelectionDAGBuilder::visitExtractValue(ExtractValueInst &I) { @@ -2490,11 +2823,14 @@ void SelectionDAGBuilder::visitExtractValue(ExtractValueInst &I) { DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) : SDValue(Agg.getNode(), Agg.getResNo() + i); - setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(), - DAG.getVTList(&ValValueVTs[0], NumValValues), - &Values[0], NumValValues)); -} + SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(), + DAG.getVTList(&ValValueVTs[0], NumValValues), + &Values[0], NumValValues); + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); +} void SelectionDAGBuilder::visitGetElementPtr(User &I) { SDValue N = getValue(I.getOperand(0)); @@ -2510,7 +2846,11 @@ void SelectionDAGBuilder::visitGetElementPtr(User &I) { uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field); N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N, DAG.getIntPtrConstant(Offset)); + + if (DisableScheduling) + DAG.AssignOrdering(N.getNode(), SDNodeOrder); } + Ty = StTy->getElementType(Field); } else { Ty = cast<SequentialType>(Ty)->getElementType(); @@ -2523,14 +2863,21 @@ void SelectionDAGBuilder::visitGetElementPtr(User &I) { SDValue OffsVal; EVT PTy = TLI.getPointerTy(); unsigned PtrBits = PTy.getSizeInBits(); - if (PtrBits < 64) { + if (PtrBits < 64) OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), TLI.getPointerTy(), DAG.getConstant(Offs, MVT::i64)); - } else + else OffsVal = DAG.getIntPtrConstant(Offs); + N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N, OffsVal); + + if (DisableScheduling) { + DAG.AssignOrdering(OffsVal.getNode(), SDNodeOrder); + DAG.AssignOrdering(N.getNode(), SDNodeOrder); + } + continue; } @@ -2556,12 +2903,19 @@ void SelectionDAGBuilder::visitGetElementPtr(User &I) { IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(), N.getValueType(), IdxN, Scale); } + + if (DisableScheduling) + DAG.AssignOrdering(IdxN.getNode(), SDNodeOrder); } N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N, IdxN); + + if (DisableScheduling) + DAG.AssignOrdering(N.getNode(), SDNodeOrder); } } + setValue(&I, N); } @@ -2578,16 +2932,20 @@ void SelectionDAGBuilder::visitAlloca(AllocaInst &I) { I.getAlignment()); SDValue AllocSize = getValue(I.getArraySize()); - + AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), AllocSize.getValueType(), AllocSize, DAG.getConstant(TySize, AllocSize.getValueType())); - - - + + if (DisableScheduling) + DAG.AssignOrdering(AllocSize.getNode(), SDNodeOrder); + EVT IntPtr = TLI.getPointerTy(); AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurDebugLoc(), IntPtr); + if (DisableScheduling) + DAG.AssignOrdering(AllocSize.getNode(), SDNodeOrder); + // Handle alignment. If the requested alignment is less than or equal to // the stack alignment, ignore it. If the size is greater than or equal to // the stack alignment, we note this in the DYNAMIC_STACKALLOC node. @@ -2601,10 +2959,15 @@ void SelectionDAGBuilder::visitAlloca(AllocaInst &I) { AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(), AllocSize.getValueType(), AllocSize, DAG.getIntPtrConstant(StackAlign-1)); + if (DisableScheduling) + DAG.AssignOrdering(AllocSize.getNode(), SDNodeOrder); + // Mask out the low bits for alignment purposes. AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(), AllocSize.getValueType(), AllocSize, DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1))); + if (DisableScheduling) + DAG.AssignOrdering(AllocSize.getNode(), SDNodeOrder); SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) }; SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other); @@ -2613,6 +2976,9 @@ void SelectionDAGBuilder::visitAlloca(AllocaInst &I) { setValue(&I, DSA); DAG.setRoot(DSA.getValue(1)); + if (DisableScheduling) + DAG.AssignOrdering(DSA.getNode(), SDNodeOrder); + // Inform the Frame Information that we have just allocated a variable-sized // object. FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject(); @@ -2651,30 +3017,41 @@ void SelectionDAGBuilder::visitLoad(LoadInst &I) { SmallVector<SDValue, 4> Chains(NumValues); EVT PtrVT = Ptr.getValueType(); for (unsigned i = 0; i != NumValues; ++i) { + SDValue A = DAG.getNode(ISD::ADD, getCurDebugLoc(), + PtrVT, Ptr, + DAG.getConstant(Offsets[i], PtrVT)); SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root, - DAG.getNode(ISD::ADD, getCurDebugLoc(), - PtrVT, Ptr, - DAG.getConstant(Offsets[i], PtrVT)), - SV, Offsets[i], isVolatile, Alignment); + A, SV, Offsets[i], isVolatile, Alignment); + Values[i] = L; Chains[i] = L.getValue(1); + + if (DisableScheduling) { + DAG.AssignOrdering(A.getNode(), SDNodeOrder); + DAG.AssignOrdering(L.getNode(), SDNodeOrder); + } } if (!ConstantMemory) { SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), - MVT::Other, - &Chains[0], NumValues); + MVT::Other, &Chains[0], NumValues); if (isVolatile) DAG.setRoot(Chain); else PendingLoads.push_back(Chain); + + if (DisableScheduling) + DAG.AssignOrdering(Chain.getNode(), SDNodeOrder); } - setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(), - DAG.getVTList(&ValueVTs[0], NumValues), - &Values[0], NumValues)); -} + SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(), + DAG.getVTList(&ValueVTs[0], NumValues), + &Values[0], NumValues); + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); +} void SelectionDAGBuilder::visitStore(StoreInst &I) { Value *SrcV = I.getOperand(0); @@ -2698,16 +3075,26 @@ void SelectionDAGBuilder::visitStore(StoreInst &I) { EVT PtrVT = Ptr.getValueType(); bool isVolatile = I.isVolatile(); unsigned Alignment = I.getAlignment(); - for (unsigned i = 0; i != NumValues; ++i) + + for (unsigned i = 0; i != NumValues; ++i) { + SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, Ptr, + DAG.getConstant(Offsets[i], PtrVT)); Chains[i] = DAG.getStore(Root, getCurDebugLoc(), SDValue(Src.getNode(), Src.getResNo() + i), - DAG.getNode(ISD::ADD, getCurDebugLoc(), - PtrVT, Ptr, - DAG.getConstant(Offsets[i], PtrVT)), - PtrV, Offsets[i], isVolatile, Alignment); + Add, PtrV, Offsets[i], isVolatile, Alignment); + + if (DisableScheduling) { + DAG.AssignOrdering(Add.getNode(), SDNodeOrder); + DAG.AssignOrdering(Chains[i].getNode(), SDNodeOrder); + } + } + + SDValue Res = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), + MVT::Other, &Chains[0], NumValues); + DAG.setRoot(Res); - DAG.setRoot(DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), - MVT::Other, &Chains[0], NumValues)); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC @@ -2752,6 +3139,7 @@ void SelectionDAGBuilder::visitTargetIntrinsic(CallInst &I, "Intrinsic uses a non-legal type?"); } #endif // NDEBUG + if (HasChain) ValueVTs.push_back(MVT::Other); @@ -2766,16 +3154,19 @@ void SelectionDAGBuilder::visitTargetIntrinsic(CallInst &I, Info.memVT, Info.ptrVal, Info.offset, Info.align, Info.vol, Info.readMem, Info.writeMem); - } - else if (!HasChain) + } else if (!HasChain) { Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(), VTs, &Ops[0], Ops.size()); - else if (I.getType() != Type::getVoidTy(*DAG.getContext())) + } else if (I.getType() != Type::getVoidTy(*DAG.getContext())) { Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(), VTs, &Ops[0], Ops.size()); - else + } else { Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(), VTs, &Ops[0], Ops.size()); + } + + if (DisableScheduling) + DAG.AssignOrdering(Result.getNode(), SDNodeOrder); if (HasChain) { SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1); @@ -2784,11 +3175,16 @@ void SelectionDAGBuilder::visitTargetIntrinsic(CallInst &I, else DAG.setRoot(Chain); } + if (I.getType() != Type::getVoidTy(*DAG.getContext())) { if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) { EVT VT = TLI.getValueType(PTy); Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result); + + if (DisableScheduling) + DAG.AssignOrdering(Result.getNode(), SDNodeOrder); } + setValue(&I, Result); } } @@ -2800,12 +3196,20 @@ void SelectionDAGBuilder::visitTargetIntrinsic(CallInst &I, /// /// where Op is the hexidecimal representation of floating point value. static SDValue -GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) { +GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl, unsigned Order) { SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op, DAG.getConstant(0x007fffff, MVT::i32)); SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1, DAG.getConstant(0x3f800000, MVT::i32)); - return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2); + SDValue Res = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2); + + if (DisableScheduling) { + DAG.AssignOrdering(t1.getNode(), Order); + DAG.AssignOrdering(t2.getNode(), Order); + DAG.AssignOrdering(Res.getNode(), Order); + } + + return Res; } /// GetExponent - Get the exponent: @@ -2815,14 +3219,23 @@ GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) { /// where Op is the hexidecimal representation of floating point value. static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, - DebugLoc dl) { + DebugLoc dl, unsigned Order) { SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op, DAG.getConstant(0x7f800000, MVT::i32)); SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0, DAG.getConstant(23, TLI.getPointerTy())); SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1, DAG.getConstant(127, MVT::i32)); - return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2); + SDValue Res = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2); + + if (DisableScheduling) { + DAG.AssignOrdering(t0.getNode(), Order); + DAG.AssignOrdering(t1.getNode(), Order); + DAG.AssignOrdering(t2.getNode(), Order); + DAG.AssignOrdering(Res.getNode(), Order); + } + + return Res; } /// getF32Constant - Get 32-bit floating point constant. @@ -2846,6 +3259,10 @@ SelectionDAGBuilder::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) { I.getOperand(1)); setValue(&I, L); DAG.setRoot(L.getValue(1)); + + if (DisableScheduling) + DAG.AssignOrdering(L.getNode(), SDNodeOrder); + return 0; } @@ -2859,6 +3276,10 @@ SelectionDAGBuilder::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) { SDValue Result = DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2); setValue(&I, Result); + + if (DisableScheduling) + DAG.AssignOrdering(Result.getNode(), SDNodeOrder); + return 0; } @@ -2886,10 +3307,20 @@ SelectionDAGBuilder::visitExp(CallInst &I) { SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX); SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1); + if (DisableScheduling) { + DAG.AssignOrdering(t0.getNode(), SDNodeOrder); + DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder); + DAG.AssignOrdering(t1.getNode(), SDNodeOrder); + DAG.AssignOrdering(X.getNode(), SDNodeOrder); + } + // IntegerPartOfX <<= 23; IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX, DAG.getConstant(23, TLI.getPointerTy())); + if (DisableScheduling) + DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder); + if (LimitFloatPrecision <= 6) { // For floating-point precision of 6: // @@ -2912,6 +3343,16 @@ SelectionDAGBuilder::visitExp(CallInst &I) { TwoToFracPartOfX, IntegerPartOfX); result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6); + + if (DisableScheduling) { + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(t3.getNode(), SDNodeOrder); + DAG.AssignOrdering(t4.getNode(), SDNodeOrder); + DAG.AssignOrdering(t5.getNode(), SDNodeOrder); + DAG.AssignOrdering(t6.getNode(), SDNodeOrder); + DAG.AssignOrdering(TwoToFracPartOfX.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) { // For floating-point precision of 12: // @@ -2938,6 +3379,18 @@ SelectionDAGBuilder::visitExp(CallInst &I) { TwoToFracPartOfX, IntegerPartOfX); result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8); + + if (DisableScheduling) { + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(t3.getNode(), SDNodeOrder); + DAG.AssignOrdering(t4.getNode(), SDNodeOrder); + DAG.AssignOrdering(t5.getNode(), SDNodeOrder); + DAG.AssignOrdering(t6.getNode(), SDNodeOrder); + DAG.AssignOrdering(t7.getNode(), SDNodeOrder); + DAG.AssignOrdering(t8.getNode(), SDNodeOrder); + DAG.AssignOrdering(TwoToFracPartOfX.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18 // For floating-point precision of 18: // @@ -2977,12 +3430,32 @@ SelectionDAGBuilder::visitExp(CallInst &I) { TwoToFracPartOfX, IntegerPartOfX); result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14); + + if (DisableScheduling) { + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(t3.getNode(), SDNodeOrder); + DAG.AssignOrdering(t4.getNode(), SDNodeOrder); + DAG.AssignOrdering(t5.getNode(), SDNodeOrder); + DAG.AssignOrdering(t6.getNode(), SDNodeOrder); + DAG.AssignOrdering(t7.getNode(), SDNodeOrder); + DAG.AssignOrdering(t8.getNode(), SDNodeOrder); + DAG.AssignOrdering(t9.getNode(), SDNodeOrder); + DAG.AssignOrdering(t10.getNode(), SDNodeOrder); + DAG.AssignOrdering(t11.getNode(), SDNodeOrder); + DAG.AssignOrdering(t12.getNode(), SDNodeOrder); + DAG.AssignOrdering(t13.getNode(), SDNodeOrder); + DAG.AssignOrdering(t14.getNode(), SDNodeOrder); + DAG.AssignOrdering(TwoToFracPartOfX.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } } else { // No special expansion. result = DAG.getNode(ISD::FEXP, dl, getValue(I.getOperand(1)).getValueType(), getValue(I.getOperand(1))); + if (DisableScheduling) + DAG.AssignOrdering(result.getNode(), SDNodeOrder); } setValue(&I, result); @@ -3000,14 +3473,20 @@ SelectionDAGBuilder::visitLog(CallInst &I) { SDValue Op = getValue(I.getOperand(1)); SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op); + if (DisableScheduling) + DAG.AssignOrdering(Op1.getNode(), SDNodeOrder); + // Scale the exponent by log(2) [0.69314718f]. - SDValue Exp = GetExponent(DAG, Op1, TLI, dl); + SDValue Exp = GetExponent(DAG, Op1, TLI, dl, SDNodeOrder); SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp, getF32Constant(DAG, 0x3f317218)); + if (DisableScheduling) + DAG.AssignOrdering(LogOfExponent.getNode(), SDNodeOrder); + // Get the significand and build it into a floating-point number with // exponent of 1. - SDValue X = GetSignificand(DAG, Op1, dl); + SDValue X = GetSignificand(DAG, Op1, dl, SDNodeOrder); if (LimitFloatPrecision <= 6) { // For floating-point precision of 6: @@ -3027,6 +3506,14 @@ SelectionDAGBuilder::visitLog(CallInst &I) { result = DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa); + + if (DisableScheduling) { + DAG.AssignOrdering(t0.getNode(), SDNodeOrder); + DAG.AssignOrdering(t1.getNode(), SDNodeOrder); + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(LogOfMantissa.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) { // For floating-point precision of 12: // @@ -3053,6 +3540,18 @@ SelectionDAGBuilder::visitLog(CallInst &I) { result = DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa); + + if (DisableScheduling) { + DAG.AssignOrdering(t0.getNode(), SDNodeOrder); + DAG.AssignOrdering(t1.getNode(), SDNodeOrder); + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(t3.getNode(), SDNodeOrder); + DAG.AssignOrdering(t4.getNode(), SDNodeOrder); + DAG.AssignOrdering(t5.getNode(), SDNodeOrder); + DAG.AssignOrdering(t6.getNode(), SDNodeOrder); + DAG.AssignOrdering(LogOfMantissa.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18 // For floating-point precision of 18: // @@ -3087,12 +3586,31 @@ SelectionDAGBuilder::visitLog(CallInst &I) { result = DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa); + + if (DisableScheduling) { + DAG.AssignOrdering(t0.getNode(), SDNodeOrder); + DAG.AssignOrdering(t1.getNode(), SDNodeOrder); + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(t3.getNode(), SDNodeOrder); + DAG.AssignOrdering(t4.getNode(), SDNodeOrder); + DAG.AssignOrdering(t5.getNode(), SDNodeOrder); + DAG.AssignOrdering(t6.getNode(), SDNodeOrder); + DAG.AssignOrdering(t7.getNode(), SDNodeOrder); + DAG.AssignOrdering(t8.getNode(), SDNodeOrder); + DAG.AssignOrdering(t9.getNode(), SDNodeOrder); + DAG.AssignOrdering(t10.getNode(), SDNodeOrder); + DAG.AssignOrdering(LogOfMantissa.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } } else { // No special expansion. result = DAG.getNode(ISD::FLOG, dl, getValue(I.getOperand(1)).getValueType(), getValue(I.getOperand(1))); + + if (DisableScheduling) + DAG.AssignOrdering(result.getNode(), SDNodeOrder); } setValue(&I, result); @@ -3110,12 +3628,18 @@ SelectionDAGBuilder::visitLog2(CallInst &I) { SDValue Op = getValue(I.getOperand(1)); SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op); + if (DisableScheduling) + DAG.AssignOrdering(Op1.getNode(), SDNodeOrder); + // Get the exponent. - SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl); + SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl, SDNodeOrder); + + if (DisableScheduling) + DAG.AssignOrdering(LogOfExponent.getNode(), SDNodeOrder); // Get the significand and build it into a floating-point number with // exponent of 1. - SDValue X = GetSignificand(DAG, Op1, dl); + SDValue X = GetSignificand(DAG, Op1, dl, SDNodeOrder); // Different possible minimax approximations of significand in // floating-point for various degrees of accuracy over [1,2]. @@ -3135,6 +3659,14 @@ SelectionDAGBuilder::visitLog2(CallInst &I) { result = DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa); + + if (DisableScheduling) { + DAG.AssignOrdering(t0.getNode(), SDNodeOrder); + DAG.AssignOrdering(t1.getNode(), SDNodeOrder); + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(Log2ofMantissa.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) { // For floating-point precision of 12: // @@ -3161,6 +3693,18 @@ SelectionDAGBuilder::visitLog2(CallInst &I) { result = DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa); + + if (DisableScheduling) { + DAG.AssignOrdering(t0.getNode(), SDNodeOrder); + DAG.AssignOrdering(t1.getNode(), SDNodeOrder); + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(t3.getNode(), SDNodeOrder); + DAG.AssignOrdering(t4.getNode(), SDNodeOrder); + DAG.AssignOrdering(t5.getNode(), SDNodeOrder); + DAG.AssignOrdering(t6.getNode(), SDNodeOrder); + DAG.AssignOrdering(Log2ofMantissa.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18 // For floating-point precision of 18: // @@ -3196,12 +3740,31 @@ SelectionDAGBuilder::visitLog2(CallInst &I) { result = DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa); + + if (DisableScheduling) { + DAG.AssignOrdering(t0.getNode(), SDNodeOrder); + DAG.AssignOrdering(t1.getNode(), SDNodeOrder); + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(t3.getNode(), SDNodeOrder); + DAG.AssignOrdering(t4.getNode(), SDNodeOrder); + DAG.AssignOrdering(t5.getNode(), SDNodeOrder); + DAG.AssignOrdering(t6.getNode(), SDNodeOrder); + DAG.AssignOrdering(t7.getNode(), SDNodeOrder); + DAG.AssignOrdering(t8.getNode(), SDNodeOrder); + DAG.AssignOrdering(t9.getNode(), SDNodeOrder); + DAG.AssignOrdering(t10.getNode(), SDNodeOrder); + DAG.AssignOrdering(Log2ofMantissa.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } } else { // No special expansion. result = DAG.getNode(ISD::FLOG2, dl, getValue(I.getOperand(1)).getValueType(), getValue(I.getOperand(1))); + + if (DisableScheduling) + DAG.AssignOrdering(result.getNode(), SDNodeOrder); } setValue(&I, result); @@ -3219,14 +3782,20 @@ SelectionDAGBuilder::visitLog10(CallInst &I) { SDValue Op = getValue(I.getOperand(1)); SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op); + if (DisableScheduling) + DAG.AssignOrdering(Op1.getNode(), SDNodeOrder); + // Scale the exponent by log10(2) [0.30102999f]. - SDValue Exp = GetExponent(DAG, Op1, TLI, dl); + SDValue Exp = GetExponent(DAG, Op1, TLI, dl, SDNodeOrder); SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp, getF32Constant(DAG, 0x3e9a209a)); + if (DisableScheduling) + DAG.AssignOrdering(LogOfExponent.getNode(), SDNodeOrder); + // Get the significand and build it into a floating-point number with // exponent of 1. - SDValue X = GetSignificand(DAG, Op1, dl); + SDValue X = GetSignificand(DAG, Op1, dl, SDNodeOrder); if (LimitFloatPrecision <= 6) { // For floating-point precision of 6: @@ -3246,6 +3815,14 @@ SelectionDAGBuilder::visitLog10(CallInst &I) { result = DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa); + + if (DisableScheduling) { + DAG.AssignOrdering(t0.getNode(), SDNodeOrder); + DAG.AssignOrdering(t1.getNode(), SDNodeOrder); + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(Log10ofMantissa.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) { // For floating-point precision of 12: // @@ -3268,6 +3845,16 @@ SelectionDAGBuilder::visitLog10(CallInst &I) { result = DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa); + + if (DisableScheduling) { + DAG.AssignOrdering(t0.getNode(), SDNodeOrder); + DAG.AssignOrdering(t1.getNode(), SDNodeOrder); + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(t3.getNode(), SDNodeOrder); + DAG.AssignOrdering(t4.getNode(), SDNodeOrder); + DAG.AssignOrdering(Log10ofMantissa.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18 // For floating-point precision of 18: // @@ -3298,12 +3885,29 @@ SelectionDAGBuilder::visitLog10(CallInst &I) { result = DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa); + + if (DisableScheduling) { + DAG.AssignOrdering(t0.getNode(), SDNodeOrder); + DAG.AssignOrdering(t1.getNode(), SDNodeOrder); + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(t3.getNode(), SDNodeOrder); + DAG.AssignOrdering(t4.getNode(), SDNodeOrder); + DAG.AssignOrdering(t5.getNode(), SDNodeOrder); + DAG.AssignOrdering(t6.getNode(), SDNodeOrder); + DAG.AssignOrdering(t7.getNode(), SDNodeOrder); + DAG.AssignOrdering(t8.getNode(), SDNodeOrder); + DAG.AssignOrdering(Log10ofMantissa.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } } else { // No special expansion. result = DAG.getNode(ISD::FLOG10, dl, getValue(I.getOperand(1)).getValueType(), getValue(I.getOperand(1))); + + if (DisableScheduling) + DAG.AssignOrdering(result.getNode(), SDNodeOrder); } setValue(&I, result); @@ -3322,6 +3926,9 @@ SelectionDAGBuilder::visitExp2(CallInst &I) { SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op); + if (DisableScheduling) + DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder); + // FractionalPartOfX = x - (float)IntegerPartOfX; SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX); SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1); @@ -3330,6 +3937,12 @@ SelectionDAGBuilder::visitExp2(CallInst &I) { IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX, DAG.getConstant(23, TLI.getPointerTy())); + if (DisableScheduling) { + DAG.AssignOrdering(t1.getNode(), SDNodeOrder); + DAG.AssignOrdering(X.getNode(), SDNodeOrder); + DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder); + } + if (LimitFloatPrecision <= 6) { // For floating-point precision of 6: // @@ -3351,6 +3964,16 @@ SelectionDAGBuilder::visitExp2(CallInst &I) { result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, TwoToFractionalPartOfX); + + if (DisableScheduling) { + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(t3.getNode(), SDNodeOrder); + DAG.AssignOrdering(t4.getNode(), SDNodeOrder); + DAG.AssignOrdering(t5.getNode(), SDNodeOrder); + DAG.AssignOrdering(t6.getNode(), SDNodeOrder); + DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) { // For floating-point precision of 12: // @@ -3376,6 +3999,18 @@ SelectionDAGBuilder::visitExp2(CallInst &I) { result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, TwoToFractionalPartOfX); + + if (DisableScheduling) { + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(t3.getNode(), SDNodeOrder); + DAG.AssignOrdering(t4.getNode(), SDNodeOrder); + DAG.AssignOrdering(t5.getNode(), SDNodeOrder); + DAG.AssignOrdering(t6.getNode(), SDNodeOrder); + DAG.AssignOrdering(t7.getNode(), SDNodeOrder); + DAG.AssignOrdering(t8.getNode(), SDNodeOrder); + DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18 // For floating-point precision of 18: // @@ -3412,12 +4047,33 @@ SelectionDAGBuilder::visitExp2(CallInst &I) { result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, TwoToFractionalPartOfX); + + if (DisableScheduling) { + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(t3.getNode(), SDNodeOrder); + DAG.AssignOrdering(t4.getNode(), SDNodeOrder); + DAG.AssignOrdering(t5.getNode(), SDNodeOrder); + DAG.AssignOrdering(t6.getNode(), SDNodeOrder); + DAG.AssignOrdering(t7.getNode(), SDNodeOrder); + DAG.AssignOrdering(t8.getNode(), SDNodeOrder); + DAG.AssignOrdering(t9.getNode(), SDNodeOrder); + DAG.AssignOrdering(t10.getNode(), SDNodeOrder); + DAG.AssignOrdering(t11.getNode(), SDNodeOrder); + DAG.AssignOrdering(t12.getNode(), SDNodeOrder); + DAG.AssignOrdering(t13.getNode(), SDNodeOrder); + DAG.AssignOrdering(t14.getNode(), SDNodeOrder); + DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } } else { // No special expansion. result = DAG.getNode(ISD::FEXP2, dl, getValue(I.getOperand(1)).getValueType(), getValue(I.getOperand(1))); + + if (DisableScheduling) + DAG.AssignOrdering(result.getNode(), SDNodeOrder); } setValue(&I, result); @@ -3459,10 +4115,20 @@ SelectionDAGBuilder::visitPow(CallInst &I) { SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX); SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1); + if (DisableScheduling) { + DAG.AssignOrdering(t0.getNode(), SDNodeOrder); + DAG.AssignOrdering(t1.getNode(), SDNodeOrder); + DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder); + DAG.AssignOrdering(X.getNode(), SDNodeOrder); + } + // IntegerPartOfX <<= 23; IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX, DAG.getConstant(23, TLI.getPointerTy())); + if (DisableScheduling) + DAG.AssignOrdering(IntegerPartOfX.getNode(), SDNodeOrder); + if (LimitFloatPrecision <= 6) { // For floating-point precision of 6: // @@ -3484,6 +4150,16 @@ SelectionDAGBuilder::visitPow(CallInst &I) { result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, TwoToFractionalPartOfX); + + if (DisableScheduling) { + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(t3.getNode(), SDNodeOrder); + DAG.AssignOrdering(t4.getNode(), SDNodeOrder); + DAG.AssignOrdering(t5.getNode(), SDNodeOrder); + DAG.AssignOrdering(t6.getNode(), SDNodeOrder); + DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) { // For floating-point precision of 12: // @@ -3509,6 +4185,18 @@ SelectionDAGBuilder::visitPow(CallInst &I) { result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, TwoToFractionalPartOfX); + + if (DisableScheduling) { + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(t3.getNode(), SDNodeOrder); + DAG.AssignOrdering(t4.getNode(), SDNodeOrder); + DAG.AssignOrdering(t5.getNode(), SDNodeOrder); + DAG.AssignOrdering(t6.getNode(), SDNodeOrder); + DAG.AssignOrdering(t7.getNode(), SDNodeOrder); + DAG.AssignOrdering(t8.getNode(), SDNodeOrder); + DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18 // For floating-point precision of 18: // @@ -3545,6 +4233,24 @@ SelectionDAGBuilder::visitPow(CallInst &I) { result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, TwoToFractionalPartOfX); + + if (DisableScheduling) { + DAG.AssignOrdering(t2.getNode(), SDNodeOrder); + DAG.AssignOrdering(t3.getNode(), SDNodeOrder); + DAG.AssignOrdering(t4.getNode(), SDNodeOrder); + DAG.AssignOrdering(t5.getNode(), SDNodeOrder); + DAG.AssignOrdering(t6.getNode(), SDNodeOrder); + DAG.AssignOrdering(t7.getNode(), SDNodeOrder); + DAG.AssignOrdering(t8.getNode(), SDNodeOrder); + DAG.AssignOrdering(t9.getNode(), SDNodeOrder); + DAG.AssignOrdering(t10.getNode(), SDNodeOrder); + DAG.AssignOrdering(t11.getNode(), SDNodeOrder); + DAG.AssignOrdering(t12.getNode(), SDNodeOrder); + DAG.AssignOrdering(t13.getNode(), SDNodeOrder); + DAG.AssignOrdering(t14.getNode(), SDNodeOrder); + DAG.AssignOrdering(TwoToFractionalPartOfX.getNode(), SDNodeOrder); + DAG.AssignOrdering(result.getNode(), SDNodeOrder); + } } } else { // No special expansion. @@ -3552,17 +4258,76 @@ SelectionDAGBuilder::visitPow(CallInst &I) { getValue(I.getOperand(1)).getValueType(), getValue(I.getOperand(1)), getValue(I.getOperand(2))); + + if (DisableScheduling) + DAG.AssignOrdering(result.getNode(), SDNodeOrder); } setValue(&I, result); } + +/// ExpandPowI - Expand a llvm.powi intrinsic. +static SDValue ExpandPowI(DebugLoc DL, SDValue LHS, SDValue RHS, + SelectionDAG &DAG) { + // If RHS is a constant, we can expand this out to a multiplication tree, + // otherwise we end up lowering to a call to __powidf2 (for example). When + // optimizing for size, we only want to do this if the expansion would produce + // a small number of multiplies, otherwise we do the full expansion. + if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { + // Get the exponent as a positive value. + unsigned Val = RHSC->getSExtValue(); + if ((int)Val < 0) Val = -Val; + + // powi(x, 0) -> 1.0 + if (Val == 0) + return DAG.getConstantFP(1.0, LHS.getValueType()); + + Function *F = DAG.getMachineFunction().getFunction(); + if (!F->hasFnAttr(Attribute::OptimizeForSize) || + // If optimizing for size, don't insert too many multiplies. This + // inserts up to 5 multiplies. + CountPopulation_32(Val)+Log2_32(Val) < 7) { + // We use the simple binary decomposition method to generate the multiply + // sequence. There are more optimal ways to do this (for example, + // powi(x,15) generates one more multiply than it should), but this has + // the benefit of being both really simple and much better than a libcall. + SDValue Res; // Logically starts equal to 1.0 + SDValue CurSquare = LHS; + while (Val) { + if (Val & 1) { + if (Res.getNode()) + Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare); + else + Res = CurSquare; // 1.0*CurSquare. + } + + CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(), + CurSquare, CurSquare); + Val >>= 1; + } + + // If the original was negative, invert the result, producing 1/(x*x*x). + if (RHSC->getSExtValue() < 0) + Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(), + DAG.getConstantFP(1.0, LHS.getValueType()), Res); + return Res; + } + } + + // Otherwise, expand to a libcall. + return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS); +} + + /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If /// we want to emit this as a call to a named external function, return the name /// otherwise lower it and return null. const char * SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { DebugLoc dl = getCurDebugLoc(); + SDValue Res; + switch (Intrinsic) { default: // By default, turn this into a target intrinsic node. @@ -3572,26 +4337,33 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { case Intrinsic::vaend: visitVAEnd(I); return 0; case Intrinsic::vacopy: visitVACopy(I); return 0; case Intrinsic::returnaddress: - setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(), - getValue(I.getOperand(1)))); + Res = DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(), + getValue(I.getOperand(1))); + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; case Intrinsic::frameaddress: - setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(), - getValue(I.getOperand(1)))); + Res = DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(), + getValue(I.getOperand(1))); + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; case Intrinsic::setjmp: return "_setjmp"+!TLI.usesUnderscoreSetJmp(); - break; case Intrinsic::longjmp: return "_longjmp"+!TLI.usesUnderscoreLongJmp(); - break; case Intrinsic::memcpy: { SDValue Op1 = getValue(I.getOperand(1)); SDValue Op2 = getValue(I.getOperand(2)); SDValue Op3 = getValue(I.getOperand(3)); unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue(); - DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false, - I.getOperand(1), 0, I.getOperand(2), 0)); + Res = DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false, + I.getOperand(1), 0, I.getOperand(2), 0); + DAG.setRoot(Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } case Intrinsic::memset: { @@ -3599,8 +4371,11 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { SDValue Op2 = getValue(I.getOperand(2)); SDValue Op3 = getValue(I.getOperand(3)); unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue(); - DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align, - I.getOperand(1), 0)); + Res = DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align, + I.getOperand(1), 0); + DAG.setRoot(Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } case Intrinsic::memmove: { @@ -3616,30 +4391,36 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { Size = C->getZExtValue(); if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) == AliasAnalysis::NoAlias) { - DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false, - I.getOperand(1), 0, I.getOperand(2), 0)); + Res = DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false, + I.getOperand(1), 0, I.getOperand(2), 0); + DAG.setRoot(Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } - DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align, - I.getOperand(1), 0, I.getOperand(2), 0)); + Res = DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align, + I.getOperand(1), 0, I.getOperand(2), 0); + DAG.setRoot(Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } - case Intrinsic::dbg_stoppoint: + case Intrinsic::dbg_stoppoint: case Intrinsic::dbg_region_start: case Intrinsic::dbg_region_end: case Intrinsic::dbg_func_start: // FIXME - Remove this instructions once the dust settles. return 0; case Intrinsic::dbg_declare: { - if (OptLevel != CodeGenOpt::None) + if (OptLevel != CodeGenOpt::None) // FIXME: Variable debug info is not supported here. return 0; DwarfWriter *DW = DAG.getDwarfWriter(); if (!DW) return 0; DbgDeclareInst &DI = cast<DbgDeclareInst>(I); - if (!isValidDebugInfoIntrinsic(DI, CodeGenOpt::None)) + if (!DIDescriptor::ValidDebugInfo(DI.getVariable(), CodeGenOpt::None)) return 0; MDNode *Variable = DI.getVariable(); @@ -3652,18 +4433,13 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { return 0; DenseMap<const AllocaInst*, int>::iterator SI = FuncInfo.StaticAllocaMap.find(AI); - if (SI == FuncInfo.StaticAllocaMap.end()) + if (SI == FuncInfo.StaticAllocaMap.end()) return 0; // VLAs. int FI = SI->second; - MachineModuleInfo *MMI = DAG.getMachineModuleInfo(); - if (MMI) { - MetadataContext &TheMetadata = - DI.getParent()->getContext().getMetadata(); - unsigned MDDbgKind = TheMetadata.getMDKind("dbg"); - MDNode *Dbg = TheMetadata.getMD(MDDbgKind, &DI); - MMI->setVariableDbgInfo(Variable, FI, Dbg); - } + if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) + if (MDNode *Dbg = DI.getMetadata("dbg")) + MMI->setVariableDbgInfo(Variable, FI, Dbg); return 0; } case Intrinsic::eh_exception: { @@ -3675,6 +4451,8 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, dl, VTs, Ops, 1); setValue(&I, Op); DAG.setRoot(Op.getValue(1)); + if (DisableScheduling) + DAG.AssignOrdering(Op.getNode(), SDNodeOrder); return 0; } @@ -3701,7 +4479,12 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { DAG.setRoot(Op.getValue(1)); - setValue(&I, DAG.getSExtOrTrunc(Op, dl, MVT::i32)); + Res = DAG.getSExtOrTrunc(Op, dl, MVT::i32); + setValue(&I, Res); + if (DisableScheduling) { + DAG.AssignOrdering(Op.getNode(), SDNodeOrder); + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + } return 0; } @@ -3711,14 +4494,16 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { if (MMI) { // Find the type id for the given typeinfo. GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1)); - unsigned TypeID = MMI->getTypeIDFor(GV); - setValue(&I, DAG.getConstant(TypeID, MVT::i32)); + Res = DAG.getConstant(TypeID, MVT::i32); } else { // Return something different to eh_selector. - setValue(&I, DAG.getConstant(1, MVT::i32)); + Res = DAG.getConstant(1, MVT::i32); } + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } @@ -3726,11 +4511,14 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { case Intrinsic::eh_return_i64: if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) { MMI->setCallsEHReturn(true); - DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl, - MVT::Other, - getControlRoot(), - getValue(I.getOperand(1)), - getValue(I.getOperand(2)))); + Res = DAG.getNode(ISD::EH_RETURN, dl, + MVT::Other, + getControlRoot(), + getValue(I.getOperand(1)), + getValue(I.getOperand(2))); + DAG.setRoot(Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); } else { setValue(&I, DAG.getConstant(0, TLI.getPointerTy())); } @@ -3740,26 +4528,28 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) { MMI->setCallsUnwindInit(true); } - return 0; - case Intrinsic::eh_dwarf_cfa: { EVT VT = getValue(I.getOperand(1)).getValueType(); SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), dl, TLI.getPointerTy()); - SDValue Offset = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl, TLI.getPointerTy()), CfaArg); - setValue(&I, DAG.getNode(ISD::ADD, dl, + SDValue FA = DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(), - DAG.getNode(ISD::FRAMEADDR, dl, - TLI.getPointerTy(), - DAG.getConstant(0, - TLI.getPointerTy())), - Offset)); + DAG.getConstant(0, TLI.getPointerTy())); + Res = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), + FA, Offset); + setValue(&I, Res); + if (DisableScheduling) { + DAG.AssignOrdering(CfaArg.getNode(), SDNodeOrder); + DAG.AssignOrdering(Offset.getNode(), SDNodeOrder); + DAG.AssignOrdering(FA.getNode(), SDNodeOrder); + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); + } return 0; } case Intrinsic::convertff: @@ -3784,36 +4574,48 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { case Intrinsic::convertuu: Code = ISD::CVT_UU; break; } EVT DestVT = TLI.getValueType(I.getType()); - Value* Op1 = I.getOperand(1); - setValue(&I, DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1), - DAG.getValueType(DestVT), - DAG.getValueType(getValue(Op1).getValueType()), - getValue(I.getOperand(2)), - getValue(I.getOperand(3)), - Code)); + Value *Op1 = I.getOperand(1); + Res = DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1), + DAG.getValueType(DestVT), + DAG.getValueType(getValue(Op1).getValueType()), + getValue(I.getOperand(2)), + getValue(I.getOperand(3)), + Code); + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } - case Intrinsic::sqrt: - setValue(&I, DAG.getNode(ISD::FSQRT, dl, - getValue(I.getOperand(1)).getValueType(), - getValue(I.getOperand(1)))); + Res = DAG.getNode(ISD::FSQRT, dl, + getValue(I.getOperand(1)).getValueType(), + getValue(I.getOperand(1))); + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; case Intrinsic::powi: - setValue(&I, DAG.getNode(ISD::FPOWI, dl, - getValue(I.getOperand(1)).getValueType(), - getValue(I.getOperand(1)), - getValue(I.getOperand(2)))); + Res = ExpandPowI(dl, getValue(I.getOperand(1)), getValue(I.getOperand(2)), + DAG); + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; case Intrinsic::sin: - setValue(&I, DAG.getNode(ISD::FSIN, dl, - getValue(I.getOperand(1)).getValueType(), - getValue(I.getOperand(1)))); + Res = DAG.getNode(ISD::FSIN, dl, + getValue(I.getOperand(1)).getValueType(), + getValue(I.getOperand(1))); + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; case Intrinsic::cos: - setValue(&I, DAG.getNode(ISD::FCOS, dl, - getValue(I.getOperand(1)).getValueType(), - getValue(I.getOperand(1)))); + Res = DAG.getNode(ISD::FCOS, dl, + getValue(I.getOperand(1)).getValueType(), + getValue(I.getOperand(1))); + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; case Intrinsic::log: visitLog(I); @@ -3835,55 +4637,74 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { return 0; case Intrinsic::pcmarker: { SDValue Tmp = getValue(I.getOperand(1)); - DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp)); + Res = DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp); + DAG.setRoot(Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } case Intrinsic::readcyclecounter: { SDValue Op = getRoot(); - SDValue Tmp = DAG.getNode(ISD::READCYCLECOUNTER, dl, - DAG.getVTList(MVT::i64, MVT::Other), - &Op, 1); - setValue(&I, Tmp); - DAG.setRoot(Tmp.getValue(1)); + Res = DAG.getNode(ISD::READCYCLECOUNTER, dl, + DAG.getVTList(MVT::i64, MVT::Other), + &Op, 1); + setValue(&I, Res); + DAG.setRoot(Res.getValue(1)); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } case Intrinsic::bswap: - setValue(&I, DAG.getNode(ISD::BSWAP, dl, - getValue(I.getOperand(1)).getValueType(), - getValue(I.getOperand(1)))); + Res = DAG.getNode(ISD::BSWAP, dl, + getValue(I.getOperand(1)).getValueType(), + getValue(I.getOperand(1))); + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; case Intrinsic::cttz: { SDValue Arg = getValue(I.getOperand(1)); EVT Ty = Arg.getValueType(); - SDValue result = DAG.getNode(ISD::CTTZ, dl, Ty, Arg); - setValue(&I, result); + Res = DAG.getNode(ISD::CTTZ, dl, Ty, Arg); + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } case Intrinsic::ctlz: { SDValue Arg = getValue(I.getOperand(1)); EVT Ty = Arg.getValueType(); - SDValue result = DAG.getNode(ISD::CTLZ, dl, Ty, Arg); - setValue(&I, result); + Res = DAG.getNode(ISD::CTLZ, dl, Ty, Arg); + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } case Intrinsic::ctpop: { SDValue Arg = getValue(I.getOperand(1)); EVT Ty = Arg.getValueType(); - SDValue result = DAG.getNode(ISD::CTPOP, dl, Ty, Arg); - setValue(&I, result); + Res = DAG.getNode(ISD::CTPOP, dl, Ty, Arg); + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } case Intrinsic::stacksave: { SDValue Op = getRoot(); - SDValue Tmp = DAG.getNode(ISD::STACKSAVE, dl, - DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1); - setValue(&I, Tmp); - DAG.setRoot(Tmp.getValue(1)); + Res = DAG.getNode(ISD::STACKSAVE, dl, + DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1); + setValue(&I, Res); + DAG.setRoot(Res.getValue(1)); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } case Intrinsic::stackrestore: { - SDValue Tmp = getValue(I.getOperand(1)); - DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Tmp)); + Res = getValue(I.getOperand(1)); + Res = DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Res); + DAG.setRoot(Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } case Intrinsic::stackprotector: { @@ -3901,11 +4722,13 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { SDValue FIN = DAG.getFrameIndex(FI, PtrTy); // Store the stack protector onto the stack. - SDValue Result = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN, - PseudoSourceValue::getFixedStack(FI), - 0, true); - setValue(&I, Result); - DAG.setRoot(Result); + Res = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN, + PseudoSourceValue::getFixedStack(FI), + 0, true); + setValue(&I, Res); + DAG.setRoot(Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } case Intrinsic::objectsize: { @@ -3917,10 +4740,14 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { SDValue Arg = getValue(I.getOperand(0)); EVT Ty = Arg.getValueType(); - if (CI->getZExtValue() < 2) - setValue(&I, DAG.getConstant(-1ULL, Ty)); + if (CI->getZExtValue() == 0) + Res = DAG.getConstant(-1ULL, Ty); else - setValue(&I, DAG.getConstant(0, Ty)); + Res = DAG.getConstant(0, Ty); + + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } case Intrinsic::var_annotation: @@ -3938,15 +4765,16 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { Ops[4] = DAG.getSrcValue(I.getOperand(1)); Ops[5] = DAG.getSrcValue(F); - SDValue Tmp = DAG.getNode(ISD::TRAMPOLINE, dl, - DAG.getVTList(TLI.getPointerTy(), MVT::Other), - Ops, 6); + Res = DAG.getNode(ISD::TRAMPOLINE, dl, + DAG.getVTList(TLI.getPointerTy(), MVT::Other), + Ops, 6); - setValue(&I, Tmp); - DAG.setRoot(Tmp.getValue(1)); + setValue(&I, Res); + DAG.setRoot(Res.getValue(1)); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } - case Intrinsic::gcroot: if (GFI) { Value *Alloca = I.getOperand(1); @@ -3956,22 +4784,22 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { GFI->addStackRoot(FI->getIndex(), TypeMap); } return 0; - case Intrinsic::gcread: case Intrinsic::gcwrite: llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!"); return 0; - - case Intrinsic::flt_rounds: { - setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32)); + case Intrinsic::flt_rounds: + Res = DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32); + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; - } - - case Intrinsic::trap: { - DAG.setRoot(DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot())); + case Intrinsic::trap: + Res = DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot()); + DAG.setRoot(Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; - } - case Intrinsic::uadd_with_overflow: return implVisitAluOverflow(I, ISD::UADDO); case Intrinsic::sadd_with_overflow: @@ -3991,7 +4819,10 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { Ops[1] = getValue(I.getOperand(1)); Ops[2] = getValue(I.getOperand(2)); Ops[3] = getValue(I.getOperand(3)); - DAG.setRoot(DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4)); + Res = DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4); + DAG.setRoot(Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } @@ -4001,7 +4832,10 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { for (int x = 1; x < 6; ++x) Ops[x] = getValue(I.getOperand(x)); - DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6)); + Res = DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6); + DAG.setRoot(Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; } case Intrinsic::atomic_cmp_swap: { @@ -4016,6 +4850,8 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { I.getOperand(1)); setValue(&I, L); DAG.setRoot(L.getValue(1)); + if (DisableScheduling) + DAG.AssignOrdering(L.getNode(), SDNodeOrder); return 0; } case Intrinsic::atomic_load_add: @@ -4044,7 +4880,10 @@ SelectionDAGBuilder::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { case Intrinsic::invariant_start: case Intrinsic::lifetime_start: // Discard region information. - setValue(&I, DAG.getUNDEF(TLI.getPointerTy())); + Res = DAG.getUNDEF(TLI.getPointerTy()); + setValue(&I, Res); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), SDNodeOrder); return 0; case Intrinsic::invariant_end: case Intrinsic::lifetime_end: @@ -4143,11 +4982,10 @@ void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee, SmallVector<EVT, 4> OutVTs; SmallVector<ISD::ArgFlagsTy, 4> OutsFlags; SmallVector<uint64_t, 4> Offsets; - getReturnInfo(RetTy, CS.getAttributes().getRetAttributes(), - OutVTs, OutsFlags, TLI, &Offsets); - + getReturnInfo(RetTy, CS.getAttributes().getRetAttributes(), + OutVTs, OutsFlags, TLI, &Offsets); - bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(), + bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(), FTy->isVarArg(), OutVTs, OutsFlags, DAG); SDValue DemoteStackSlot; @@ -4219,14 +5057,16 @@ void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee, CS.getCallingConv(), isTailCall, !CS.getInstruction()->use_empty(), - Callee, Args, DAG, getCurDebugLoc()); + Callee, Args, DAG, getCurDebugLoc(), SDNodeOrder); assert((isTailCall || Result.second.getNode()) && "Non-null chain expected with non-tail call!"); assert((Result.second.getNode() || !Result.first.getNode()) && "Null value expected with tail call!"); - if (Result.first.getNode()) + if (Result.first.getNode()) { setValue(CS.getInstruction(), Result.first); - else if (!CanLowerReturn && Result.second.getNode()) { + if (DisableScheduling) + DAG.AssignOrdering(Result.first.getNode(), SDNodeOrder); + } else if (!CanLowerReturn && Result.second.getNode()) { // The instruction result is the result of loading from the // hidden sret parameter. SmallVector<EVT, 1> PVTs; @@ -4240,27 +5080,40 @@ void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee, SmallVector<SDValue, 4> Chains(NumValues); for (unsigned i = 0; i < NumValues; ++i) { + SDValue Add = DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, + DemoteStackSlot, + DAG.getConstant(Offsets[i], PtrVT)); SDValue L = DAG.getLoad(OutVTs[i], getCurDebugLoc(), Result.second, - DAG.getNode(ISD::ADD, getCurDebugLoc(), PtrVT, DemoteStackSlot, - DAG.getConstant(Offsets[i], PtrVT)), - NULL, Offsets[i], false, 1); + Add, NULL, Offsets[i], false, 1); Values[i] = L; Chains[i] = L.getValue(1); } + SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other, &Chains[0], NumValues); PendingLoads.push_back(Chain); - setValue(CS.getInstruction(), DAG.getNode(ISD::MERGE_VALUES, - getCurDebugLoc(), DAG.getVTList(&OutVTs[0], NumValues), - &Values[0], NumValues)); + SDValue MV = DAG.getNode(ISD::MERGE_VALUES, + getCurDebugLoc(), + DAG.getVTList(&OutVTs[0], NumValues), + &Values[0], NumValues); + setValue(CS.getInstruction(), MV); + + if (DisableScheduling) { + DAG.AssignOrdering(Chain.getNode(), SDNodeOrder); + DAG.AssignOrdering(MV.getNode(), SDNodeOrder); + } } - // As a special case, a null chain means that a tail call has - // been emitted and the DAG root is already updated. - if (Result.second.getNode()) + + // As a special case, a null chain means that a tail call has been emitted and + // the DAG root is already updated. + if (Result.second.getNode()) { DAG.setRoot(Result.second); - else + if (DisableScheduling) + DAG.AssignOrdering(Result.second.getNode(), SDNodeOrder); + } else { HasTailCall = true; + } if (LandingPad && MMI) { // Insert a label at the end of the invoke call to mark the try range. This @@ -4274,6 +5127,140 @@ void SelectionDAGBuilder::LowerCallTo(CallSite CS, SDValue Callee, } } +/// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the +/// value is equal or not-equal to zero. +static bool IsOnlyUsedInZeroEqualityComparison(Value *V) { + for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); + UI != E; ++UI) { + if (ICmpInst *IC = dyn_cast<ICmpInst>(*UI)) + if (IC->isEquality()) + if (Constant *C = dyn_cast<Constant>(IC->getOperand(1))) + if (C->isNullValue()) + continue; + // Unknown instruction. + return false; + } + return true; +} + +static SDValue getMemCmpLoad(Value *PtrVal, MVT LoadVT, const Type *LoadTy, + SelectionDAGBuilder &Builder) { + + // Check to see if this load can be trivially constant folded, e.g. if the + // input is from a string literal. + if (Constant *LoadInput = dyn_cast<Constant>(PtrVal)) { + // Cast pointer to the type we really want to load. + LoadInput = ConstantExpr::getBitCast(LoadInput, + PointerType::getUnqual(LoadTy)); + + if (Constant *LoadCst = ConstantFoldLoadFromConstPtr(LoadInput, Builder.TD)) + return Builder.getValue(LoadCst); + } + + // Otherwise, we have to emit the load. If the pointer is to unfoldable but + // still constant memory, the input chain can be the entry node. + SDValue Root; + bool ConstantMemory = false; + + // Do not serialize (non-volatile) loads of constant memory with anything. + if (Builder.AA->pointsToConstantMemory(PtrVal)) { + Root = Builder.DAG.getEntryNode(); + ConstantMemory = true; + } else { + // Do not serialize non-volatile loads against each other. + Root = Builder.DAG.getRoot(); + } + + SDValue Ptr = Builder.getValue(PtrVal); + SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurDebugLoc(), Root, + Ptr, PtrVal /*SrcValue*/, 0/*SVOffset*/, + false /*volatile*/, 1 /* align=1 */); + + if (!ConstantMemory) + Builder.PendingLoads.push_back(LoadVal.getValue(1)); + return LoadVal; +} + + +/// visitMemCmpCall - See if we can lower a call to memcmp in an optimized form. +/// If so, return true and lower it, otherwise return false and it will be +/// lowered like a normal call. +bool SelectionDAGBuilder::visitMemCmpCall(CallInst &I) { + // Verify that the prototype makes sense. int memcmp(void*,void*,size_t) + if (I.getNumOperands() != 4) + return false; + + Value *LHS = I.getOperand(1), *RHS = I.getOperand(2); + if (!isa<PointerType>(LHS->getType()) || !isa<PointerType>(RHS->getType()) || + !isa<IntegerType>(I.getOperand(3)->getType()) || + !isa<IntegerType>(I.getType())) + return false; + + ConstantInt *Size = dyn_cast<ConstantInt>(I.getOperand(3)); + + // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0 + // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0 + if (Size && IsOnlyUsedInZeroEqualityComparison(&I)) { + bool ActuallyDoIt = true; + MVT LoadVT; + const Type *LoadTy; + switch (Size->getZExtValue()) { + default: + LoadVT = MVT::Other; + LoadTy = 0; + ActuallyDoIt = false; + break; + case 2: + LoadVT = MVT::i16; + LoadTy = Type::getInt16Ty(Size->getContext()); + break; + case 4: + LoadVT = MVT::i32; + LoadTy = Type::getInt32Ty(Size->getContext()); + break; + case 8: + LoadVT = MVT::i64; + LoadTy = Type::getInt64Ty(Size->getContext()); + break; + /* + case 16: + LoadVT = MVT::v4i32; + LoadTy = Type::getInt32Ty(Size->getContext()); + LoadTy = VectorType::get(LoadTy, 4); + break; + */ + } + + // This turns into unaligned loads. We only do this if the target natively + // supports the MVT we'll be loading or if it is small enough (<= 4) that + // we'll only produce a small number of byte loads. + + // Require that we can find a legal MVT, and only do this if the target + // supports unaligned loads of that type. Expanding into byte loads would + // bloat the code. + if (ActuallyDoIt && Size->getZExtValue() > 4) { + // TODO: Handle 5 byte compare as 4-byte + 1 byte. + // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads. + if (!TLI.isTypeLegal(LoadVT) ||!TLI.allowsUnalignedMemoryAccesses(LoadVT)) + ActuallyDoIt = false; + } + + if (ActuallyDoIt) { + SDValue LHSVal = getMemCmpLoad(LHS, LoadVT, LoadTy, *this); + SDValue RHSVal = getMemCmpLoad(RHS, LoadVT, LoadTy, *this); + + SDValue Res = DAG.getSetCC(getCurDebugLoc(), MVT::i1, LHSVal, RHSVal, + ISD::SETNE); + EVT CallVT = TLI.getValueType(I.getType(), true); + setValue(&I, DAG.getZExtOrTrunc(Res, getCurDebugLoc(), CallVT)); + return true; + } + } + + + return false; +} + void SelectionDAGBuilder::visitCall(CallInst &I) { const char *RenameFn = 0; @@ -4348,6 +5335,9 @@ void SelectionDAGBuilder::visitCall(CallInst &I) { Tmp.getValueType(), Tmp)); return; } + } else if (Name == "memcmp") { + if (visitMemCmpCall(I)) + return; } } } else if (isa<InlineAsm>(I.getOperand(0))) { @@ -4361,21 +5351,19 @@ void SelectionDAGBuilder::visitCall(CallInst &I) { else Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy()); - // Check if we can potentially perform a tail call. More detailed - // checking is be done within LowerCallTo, after more information - // about the call is known. + // Check if we can potentially perform a tail call. More detailed checking is + // be done within LowerCallTo, after more information about the call is known. bool isTailCall = PerformTailCallOpt && I.isTailCall(); LowerCallTo(&I, Callee, isTailCall); } - /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from /// this value and returns the result as a ValueVT value. This uses /// Chain/Flag as the input and updates them for the output Chain/Flag. /// If the Flag pointer is NULL, no flag is used. SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl, - SDValue &Chain, + unsigned Order, SDValue &Chain, SDValue *Flag) const { // Assemble the legal parts into the final values. SmallVector<SDValue, 4> Values(ValueVTs.size()); @@ -4389,14 +5377,18 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl, Parts.resize(NumRegs); for (unsigned i = 0; i != NumRegs; ++i) { SDValue P; - if (Flag == 0) + if (Flag == 0) { P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT); - else { + } else { P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag); *Flag = P.getValue(2); } + Chain = P.getValue(1); + if (DisableScheduling) + DAG.AssignOrdering(P.getNode(), Order); + // If the source register was virtual and if we know something about it, // add an assert node. if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) && @@ -4435,6 +5427,8 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl, P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl, RegisterVT, P, DAG.getValueType(FromVT)); + if (DisableScheduling) + DAG.AssignOrdering(P.getNode(), Order); } } } @@ -4442,15 +5436,20 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl, Parts[i] = P; } - Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), + Values[Value] = getCopyFromParts(DAG, dl, Order, Parts.begin(), NumRegs, RegisterVT, ValueVT); + if (DisableScheduling) + DAG.AssignOrdering(Values[Value].getNode(), Order); Part += NumRegs; Parts.clear(); } - return DAG.getNode(ISD::MERGE_VALUES, dl, - DAG.getVTList(&ValueVTs[0], ValueVTs.size()), - &Values[0], ValueVTs.size()); + SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl, + DAG.getVTList(&ValueVTs[0], ValueVTs.size()), + &Values[0], ValueVTs.size()); + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), Order); + return Res; } /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the @@ -4458,7 +5457,8 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl, /// Chain/Flag as the input and updates them for the output Chain/Flag. /// If the Flag pointer is NULL, no flag is used. void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl, - SDValue &Chain, SDValue *Flag) const { + unsigned Order, SDValue &Chain, + SDValue *Flag) const { // Get the list of the values's legal parts. unsigned NumRegs = Regs.size(); SmallVector<SDValue, 8> Parts(NumRegs); @@ -4467,7 +5467,8 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl, unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), ValueVT); EVT RegisterVT = RegVTs[Value]; - getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), + getCopyToParts(DAG, dl, Order, + Val.getValue(Val.getResNo() + Value), &Parts[Part], NumParts, RegisterVT); Part += NumParts; } @@ -4476,13 +5477,17 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl, SmallVector<SDValue, 8> Chains(NumRegs); for (unsigned i = 0; i != NumRegs; ++i) { SDValue Part; - if (Flag == 0) + if (Flag == 0) { Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]); - else { + } else { Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag); *Flag = Part.getValue(1); } + Chains[i] = Part.getValue(0); + + if (DisableScheduling) + DAG.AssignOrdering(Part.getNode(), Order); } if (NumRegs == 1 || Flag) @@ -4499,6 +5504,9 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl, Chain = Chains[NumRegs-1]; else Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs); + + if (DisableScheduling) + DAG.AssignOrdering(Chain.getNode(), Order); } /// AddInlineAsmOperands - Add this value to the specified inlineasm node @@ -4506,20 +5514,28 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl, /// values added into it. void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,unsigned MatchingIdx, - SelectionDAG &DAG, + SelectionDAG &DAG, unsigned Order, std::vector<SDValue> &Ops) const { - EVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy(); assert(Regs.size() < (1 << 13) && "Too many inline asm outputs!"); unsigned Flag = Code | (Regs.size() << 3); if (HasMatching) Flag |= 0x80000000 | (MatchingIdx << 16); - Ops.push_back(DAG.getTargetConstant(Flag, IntPtrTy)); + SDValue Res = DAG.getTargetConstant(Flag, MVT::i32); + Ops.push_back(Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), Order); + for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) { unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVTs[Value]); EVT RegisterVT = RegVTs[Value]; for (unsigned i = 0; i != NumRegs; ++i) { assert(Reg < Regs.size() && "Mismatch in # registers expected"); - Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT)); + SDValue Res = DAG.getRegister(Regs[Reg++], RegisterVT); + Ops.push_back(Res); + + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), Order); } } } @@ -4611,7 +5627,7 @@ public: /// getCallOperandValEVT - Return the EVT of the Value* that this operand /// corresponds to. If there is no Value* for this operand, it returns /// MVT::Other. - EVT getCallOperandValEVT(LLVMContext &Context, + EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI, const TargetData *TD) const { if (CallOperandVal == 0) return MVT::Other; @@ -4623,8 +5639,12 @@ public: // If this is an indirect operand, the operand is a pointer to the // accessed type. - if (isIndirect) - OpTy = cast<PointerType>(OpTy)->getElementType(); + if (isIndirect) { + const llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy); + if (!PtrTy) + llvm_report_error("Indirect operand for inline asm not a pointer!"); + OpTy = PtrTy->getElementType(); + } // If OpTy is not a single value, it may be a struct/union that we // can tile with integers. @@ -4663,8 +5683,8 @@ private: /// GetRegistersForValue - Assign registers (virtual or physical) for the /// specified operand. We prefer to assign virtual registers, to allow the -/// register allocator handle the assignment process. However, if the asm uses -/// features that we can't model on machineinstrs, we have SDISel do the +/// register allocator to handle the assignment process. However, if the asm +/// uses features that we can't model on machineinstrs, we have SDISel do the /// allocation. This produces generally horrible, but correct, code. /// /// OpInfo describes the operand. @@ -4728,12 +5748,15 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo, // bitcast to the corresponding integer type. This turns an f64 value // into i64, which can be passed with two i32 values on a 32-bit // machine. - RegVT = EVT::getIntegerVT(Context, + RegVT = EVT::getIntegerVT(Context, OpInfo.ConstraintVT.getSizeInBits()); OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), RegVT, OpInfo.CallOperand); OpInfo.ConstraintVT = RegVT; } + + if (DisableScheduling) + DAG.AssignOrdering(OpInfo.CallOperand.getNode(), SDNodeOrder); } NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT); @@ -4770,6 +5793,7 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo, Regs.push_back(*I); } } + OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT); const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo(); OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI); @@ -4791,7 +5815,7 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo, OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT); return; } - + // This is a reference to a register class that doesn't directly correspond // to an LLVM register class. Allocate NumRegs consecutive, available, // registers from the class. @@ -4853,7 +5877,7 @@ hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos, if (CType == TargetLowering::C_Memory) return true; } - + // Indirect operand accesses access memory. if (CI.isIndirect) return true; @@ -4878,9 +5902,9 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) { ConstraintInfos = IA->ParseConstraints(); bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI); - + SDValue Chain, Flag; - + // We won't need to flush pending loads if this asm doesn't touch // memory and is nonvolatile. if (hasMemory || IA->hasSideEffects()) @@ -5004,6 +6028,7 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) { // There is no longer a Value* corresponding to this operand. OpInfo.CallOperandVal = 0; + // It is now an indirect operand. OpInfo.isIndirect = true; } @@ -5013,8 +6038,8 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) { if (OpInfo.ConstraintType == TargetLowering::C_Register) GetRegistersForValue(OpInfo, OutputRegs, InputRegs); } - ConstraintInfos.clear(); + ConstraintInfos.clear(); // Second pass - Loop over all of the operands, assigning virtual or physregs // to register class operands. @@ -5088,7 +6113,8 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) { 2 /* REGDEF */ , false, 0, - DAG, AsmNodeOperands); + DAG, SDNodeOrder, + AsmNodeOperands); break; } case InlineAsm::isInput: { @@ -5130,15 +6156,15 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) { MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo(); for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag); i != e; ++i) - MatchedRegs.Regs. - push_back(RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT))); + MatchedRegs.Regs.push_back + (RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT))); // Use the produced MatchedRegs object to MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(), - Chain, &Flag); + SDNodeOrder, Chain, &Flag); MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, true, OpInfo.getMatchedOperand(), - DAG, AsmNodeOperands); + DAG, SDNodeOrder, AsmNodeOperands); break; } else { assert(((OpFlag & 7) == 4) && "Unknown matching constraint!"); @@ -5198,10 +6224,11 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) { } OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(), - Chain, &Flag); + SDNodeOrder, Chain, &Flag); OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/, false, 0, - DAG, AsmNodeOperands); + DAG, SDNodeOrder, + AsmNodeOperands); break; } case InlineAsm::isClobber: { @@ -5209,7 +6236,8 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) { // allocator is aware that the physreg got clobbered. if (!OpInfo.AssignedRegs.Regs.empty()) OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */, - false, 0, DAG,AsmNodeOperands); + false, 0, DAG, SDNodeOrder, + AsmNodeOperands); break; } } @@ -5228,7 +6256,7 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) { // and set it as the value of the call. if (!RetValRegs.Regs.empty()) { SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(), - Chain, &Flag); + SDNodeOrder, Chain, &Flag); // FIXME: Why don't we do this for inline asms with MRVs? if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) { @@ -5268,21 +6296,25 @@ void SelectionDAGBuilder::visitInlineAsm(CallSite CS) { RegsForValue &OutRegs = IndirectStoresToEmit[i].first; Value *Ptr = IndirectStoresToEmit[i].second; SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(), - Chain, &Flag); + SDNodeOrder, Chain, &Flag); StoresToEmit.push_back(std::make_pair(OutVal, Ptr)); } // Emit the non-flagged stores from the physregs. SmallVector<SDValue, 8> OutChains; - for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) - OutChains.push_back(DAG.getStore(Chain, getCurDebugLoc(), - StoresToEmit[i].first, - getValue(StoresToEmit[i].second), - StoresToEmit[i].second, 0)); + for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) { + SDValue Val = DAG.getStore(Chain, getCurDebugLoc(), + StoresToEmit[i].first, + getValue(StoresToEmit[i].second), + StoresToEmit[i].second, 0); + OutChains.push_back(Val); + } + if (!OutChains.empty()) Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other, &OutChains[0], OutChains.size()); + DAG.setRoot(Chain); } @@ -5328,8 +6360,8 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy, CallingConv::ID CallConv, bool isTailCall, bool isReturnValueUsed, SDValue Callee, - ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl) { - + ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl, + unsigned Order) { assert((!isTailCall || PerformTailCallOpt) && "isTailCall set when tail-call optimizations are disabled!"); @@ -5383,7 +6415,8 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy, else if (Args[i].isZExt) ExtendKind = ISD::ZERO_EXTEND; - getCopyToParts(DAG, dl, Op, &Parts[0], NumParts, PartVT, ExtendKind); + getCopyToParts(DAG, dl, Order, Op, &Parts[0], NumParts, + PartVT, ExtendKind); for (unsigned j = 0; j != NumParts; ++j) { // if it isn't first piece, alignment must be 1 @@ -5444,6 +6477,9 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy, "LowerCall emitted a value with the wrong type!"); }); + if (DisableScheduling) + DAG.AssignOrdering(Chain.getNode(), Order); + // For a tail call, the return value is merely live-out and there aren't // any nodes in the DAG representing it. Return a special value to // indicate that a tail call has been emitted and no more Instructions @@ -5468,9 +6504,11 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy, unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT); SDValue ReturnValue = - getCopyFromParts(DAG, dl, &InVals[CurReg], NumRegs, RegisterVT, VT, - AssertOp); + getCopyFromParts(DAG, dl, Order, &InVals[CurReg], NumRegs, + RegisterVT, VT, AssertOp); ReturnValues.push_back(ReturnValue); + if (DisableScheduling) + DAG.AssignOrdering(ReturnValue.getNode(), Order); CurReg += NumRegs; } @@ -5483,7 +6521,8 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy, SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(&RetTys[0], RetTys.size()), &ReturnValues[0], ReturnValues.size()); - + if (DisableScheduling) + DAG.AssignOrdering(Res.getNode(), Order); return std::make_pair(Res, Chain); } @@ -5500,7 +6539,6 @@ SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { return SDValue(); } - void SelectionDAGBuilder::CopyValueToVirtualRegister(Value *V, unsigned Reg) { SDValue Op = getValue(V); assert((Op.getOpcode() != ISD::CopyFromReg || @@ -5510,7 +6548,7 @@ void SelectionDAGBuilder::CopyValueToVirtualRegister(Value *V, unsigned Reg) { RegsForValue RFV(V->getContext(), TLI, Reg, V->getType()); SDValue Chain = DAG.getEntryNode(); - RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0); + RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), SDNodeOrder, Chain, 0); PendingExports.push_back(Chain); } @@ -5528,12 +6566,12 @@ void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) { // Check whether the function can return without sret-demotion. SmallVector<EVT, 4> OutVTs; SmallVector<ISD::ArgFlagsTy, 4> OutsFlags; - getReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), + getReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(), OutVTs, OutsFlags, TLI); FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo(); - FLI.CanLowerReturn = TLI.CanLowerReturn(F.getCallingConv(), F.isVarArg(), - OutVTs, OutsFlags, DAG); + FLI.CanLowerReturn = TLI.CanLowerReturn(F.getCallingConv(), F.isVarArg(), + OutVTs, OutsFlags, DAG); if (!FLI.CanLowerReturn) { // Put in an sret pointer parameter before all the other parameters. SmallVector<EVT, 1> ValueVTs; @@ -5613,12 +6651,14 @@ void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) { "LowerFormalArguments didn't return a valid chain!"); assert(InVals.size() == Ins.size() && "LowerFormalArguments didn't emit the correct number of values!"); - DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) { - assert(InVals[i].getNode() && - "LowerFormalArguments emitted a null value!"); - assert(Ins[i].VT == InVals[i].getValueType() && - "LowerFormalArguments emitted a value with the wrong type!"); - }); + DEBUG({ + for (unsigned i = 0, e = Ins.size(); i != e; ++i) { + assert(InVals[i].getNode() && + "LowerFormalArguments emitted a null value!"); + assert(Ins[i].VT == InVals[i].getValueType() && + "LowerFormalArguments emitted a value with the wrong type!"); + } + }); // Update the DAG with the new chain value resulting from argument lowering. DAG.setRoot(NewRoot); @@ -5634,20 +6674,22 @@ void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) { EVT VT = ValueVTs[0]; EVT RegVT = TLI.getRegisterType(*CurDAG->getContext(), VT); ISD::NodeType AssertOp = ISD::DELETED_NODE; - SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, - VT, AssertOp); + SDValue ArgValue = getCopyFromParts(DAG, dl, 0, &InVals[0], 1, + RegVT, VT, AssertOp); MachineFunction& MF = SDB->DAG.getMachineFunction(); MachineRegisterInfo& RegInfo = MF.getRegInfo(); unsigned SRetReg = RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT)); FLI.DemoteRegister = SRetReg; - NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurDebugLoc(), SRetReg, ArgValue); + NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurDebugLoc(), + SRetReg, ArgValue); DAG.setRoot(NewRoot); - + // i indexes lowered arguments. Bump it past the hidden sret argument. // Idx indexes LLVM arguments. Don't touch it. ++i; } + for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I, ++Idx) { SmallVector<SDValue, 4> ArgValues; @@ -5666,19 +6708,25 @@ void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) { else if (F.paramHasAttr(Idx, Attribute::ZExt)) AssertOp = ISD::AssertZext; - ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts, - PartVT, VT, AssertOp)); + ArgValues.push_back(getCopyFromParts(DAG, dl, 0, &InVals[i], + NumParts, PartVT, VT, + AssertOp)); } + i += NumParts; } + if (!I->use_empty()) { - SDB->setValue(I, DAG.getMergeValues(&ArgValues[0], NumValues, - SDB->getCurDebugLoc())); + SDValue Res = DAG.getMergeValues(&ArgValues[0], NumValues, + SDB->getCurDebugLoc()); + SDB->setValue(I, Res); + // If this argument is live outside of the entry block, insert a copy from // whereever we got it to the vreg that other BB's will reference it as. SDB->CopyToExportRegsIfNeeded(I); } } + assert(i == InVals.size() && "Argument register count mismatch!"); // Finally, if the target has anything special to do, allow it to do so. diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h index 244f9b5..88a2017 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h @@ -91,11 +91,13 @@ class SelectionDAGBuilder { DenseMap<const Value*, SDValue> NodeMap; +public: /// PendingLoads - Loads are not emitted to the program immediately. We bunch /// them up and then emit token factor nodes when possible. This allows us to /// get simple disambiguation between loads without worrying about alias /// analysis. SmallVector<SDValue, 8> PendingLoads; +private: /// PendingExports - CopyToReg nodes that copy values to virtual registers /// for export to other blocks need to be emitted before any terminator @@ -104,6 +106,10 @@ class SelectionDAGBuilder { /// instructions. SmallVector<SDValue, 8> PendingExports; + /// SDNodeOrder - A unique monotonically increasing number used to order the + /// SDNodes we create. + unsigned SDNodeOrder; + /// Case - A struct to record the Value for a switch case, and the /// case's target basic block. struct Case { @@ -300,7 +306,7 @@ public: SelectionDAGBuilder(SelectionDAG &dag, TargetLowering &tli, FunctionLoweringInfo &funcinfo, CodeGenOpt::Level ol) - : CurDebugLoc(DebugLoc::getUnknownLoc()), + : CurDebugLoc(DebugLoc::getUnknownLoc()), SDNodeOrder(0), TLI(tli), DAG(dag), FuncInfo(funcinfo), OptLevel(ol), HasTailCall(false), Context(dag.getContext()) { @@ -332,6 +338,8 @@ public: DebugLoc getCurDebugLoc() const { return CurDebugLoc; } void setCurDebugLoc(DebugLoc dl) { CurDebugLoc = dl; } + unsigned getSDNodeOrder() const { return SDNodeOrder; } + void CopyValueToVirtualRegister(Value *V, unsigned Reg); void visit(Instruction &I); @@ -455,6 +463,8 @@ private: void visitStore(StoreInst &I); void visitPHI(PHINode &I) { } // PHI nodes are handled specially. void visitCall(CallInst &I); + bool visitMemCmpCall(CallInst &I); + void visitInlineAsm(CallSite CS); const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic); void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index a640c7d..05669c0 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -362,32 +362,29 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { /// SetDebugLoc - Update MF's and SDB's DebugLocs if debug information is /// attached with this instruction. -static void SetDebugLoc(unsigned MDDbgKind, - MetadataContext &TheMetadata, - Instruction *I, +static void SetDebugLoc(unsigned MDDbgKind, Instruction *I, SelectionDAGBuilder *SDB, - FastISel *FastIS, - MachineFunction *MF) { - if (!isa<DbgInfoIntrinsic>(I)) - if (MDNode *Dbg = TheMetadata.getMD(MDDbgKind, I)) { - DILocation DILoc(Dbg); - DebugLoc Loc = ExtractDebugLocation(DILoc, MF->getDebugLocInfo()); - - SDB->setCurDebugLoc(Loc); - - if (FastIS) - FastIS->setCurDebugLoc(Loc); - - // If the function doesn't have a default debug location yet, set - // it. This is kind of a hack. - if (MF->getDefaultDebugLoc().isUnknown()) - MF->setDefaultDebugLoc(Loc); - } + FastISel *FastIS, MachineFunction *MF) { + if (isa<DbgInfoIntrinsic>(I)) return; + + if (MDNode *Dbg = I->getMetadata(MDDbgKind)) { + DILocation DILoc(Dbg); + DebugLoc Loc = ExtractDebugLocation(DILoc, MF->getDebugLocInfo()); + + SDB->setCurDebugLoc(Loc); + + if (FastIS) + FastIS->setCurDebugLoc(Loc); + + // If the function doesn't have a default debug location yet, set + // it. This is kind of a hack. + if (MF->getDefaultDebugLoc().isUnknown()) + MF->setDefaultDebugLoc(Loc); + } } /// ResetDebugLoc - Set MF's and SDB's DebugLocs to Unknown. -static void ResetDebugLoc(SelectionDAGBuilder *SDB, - FastISel *FastIS) { +static void ResetDebugLoc(SelectionDAGBuilder *SDB, FastISel *FastIS) { SDB->setCurDebugLoc(DebugLoc::getUnknownLoc()); if (FastIS) FastIS->setCurDebugLoc(DebugLoc::getUnknownLoc()); @@ -398,14 +395,12 @@ void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, BasicBlock::iterator End, bool &HadTailCall) { SDB->setCurrentBasicBlock(BB); - MetadataContext &TheMetadata = LLVMBB->getParent()->getContext().getMetadata(); - unsigned MDDbgKind = TheMetadata.getMDKind("dbg"); + unsigned MDDbgKind = LLVMBB->getContext().getMDKindID("dbg"); // Lower all of the non-terminator instructions. If a call is emitted // as a tail call, cease emitting nodes for this block. for (BasicBlock::iterator I = Begin; I != End && !SDB->HasTailCall; ++I) { - if (MDDbgKind) - SetDebugLoc(MDDbgKind, TheMetadata, I, SDB, 0, MF); + SetDebugLoc(MDDbgKind, I, SDB, 0, MF); if (!isa<TerminatorInst>(I)) { SDB->visit(*I); @@ -428,7 +423,7 @@ void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, HandlePHINodesInSuccessorBlocks(LLVMBB); // Lower the terminator after the copies are emitted. - SetDebugLoc(MDDbgKind, TheMetadata, LLVMBB->getTerminator(), SDB, 0, MF); + SetDebugLoc(MDDbgKind, LLVMBB->getTerminator(), SDB, 0, MF); SDB->visit(*LLVMBB->getTerminator()); ResetDebugLoc(SDB, 0); } @@ -567,9 +562,9 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { if (Changed) { if (TimePassesIsEnabled) { NamedRegionTimer T("Type Legalization 2", GroupName); - Changed = CurDAG->LegalizeTypes(); + CurDAG->LegalizeTypes(); } else { - Changed = CurDAG->LegalizeTypes(); + CurDAG->LegalizeTypes(); } if (ViewDAGCombineLT) @@ -680,8 +675,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn, #endif ); - MetadataContext &TheMetadata = Fn.getContext().getMetadata(); - unsigned MDDbgKind = TheMetadata.getMDKind("dbg"); + unsigned MDDbgKind = Fn.getContext().getMDKindID("dbg"); // Iterate over all basic blocks in the function. for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) { @@ -779,8 +773,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(Function &Fn, break; } - if (MDDbgKind) - SetDebugLoc(MDDbgKind, TheMetadata, BI, SDB, FastIS, &MF); + SetDebugLoc(MDDbgKind, BI, SDB, FastIS, &MF); // First try normal tablegen-generated "fast" selection. if (FastIS->SelectInstruction(BI)) { @@ -1182,9 +1175,8 @@ SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops) { } // Add this to the output node. - EVT IntPtrTy = TLI.getPointerTy(); Ops.push_back(CurDAG->getTargetConstant(4/*MEM*/ | (SelOps.size()<< 3), - IntPtrTy)); + MVT::i32)); Ops.insert(Ops.end(), SelOps.begin(), SelOps.end()); i += 2; } diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 1026169..d9a5a13 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -713,6 +713,10 @@ MVT::SimpleValueType TargetLowering::getSetCCResultType(EVT VT) const { return PointerTy.SimpleTy; } +MVT::SimpleValueType TargetLowering::getCmpLibcallReturnType() const { + return MVT::i32; // return the default value +} + /// getVectorTypeBreakdown - Vector types are broken down into some number of /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. diff --git a/lib/CodeGen/SimpleRegisterCoalescing.cpp b/lib/CodeGen/SimpleRegisterCoalescing.cpp index ed407eb..6314331 100644 --- a/lib/CodeGen/SimpleRegisterCoalescing.cpp +++ b/lib/CodeGen/SimpleRegisterCoalescing.cpp @@ -1065,7 +1065,7 @@ SimpleRegisterCoalescing::isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI, if (SuccMBB == CopyMBB) continue; if (DstInt.overlaps(li_->getMBBStartIdx(SuccMBB), - li_->getMBBEndIdx(SuccMBB).getNextIndex().getBaseIndex())) + li_->getMBBEndIdx(SuccMBB))) return false; } } @@ -1121,7 +1121,7 @@ SimpleRegisterCoalescing::isWinToJoinVRWithDstPhysReg(MachineInstr *CopyMI, if (PredMBB == SMBB) continue; if (SrcInt.overlaps(li_->getMBBStartIdx(PredMBB), - li_->getMBBEndIdx(PredMBB).getNextIndex().getBaseIndex())) + li_->getMBBEndIdx(PredMBB))) return false; } } @@ -2246,8 +2246,9 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS, continue; // Figure out the value # from the RHS. - LHSValsDefinedFromRHS[VNI]= - RHS.getLiveRangeContaining(VNI->def.getPrevSlot())->valno; + LiveRange *lr = RHS.getLiveRangeContaining(VNI->def.getPrevSlot()); + assert(lr && "Cannot find live range"); + LHSValsDefinedFromRHS[VNI] = lr->valno; } // Loop over the value numbers of the RHS, seeing if any are defined from @@ -2264,8 +2265,9 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS, continue; // Figure out the value # from the LHS. - RHSValsDefinedFromLHS[VNI]= - LHS.getLiveRangeContaining(VNI->def.getPrevSlot())->valno; + LiveRange *lr = LHS.getLiveRangeContaining(VNI->def.getPrevSlot()); + assert(lr && "Cannot find live range"); + RHSValsDefinedFromLHS[VNI] = lr->valno; } LHSValNoAssignments.resize(LHS.getNumValNums(), -1); diff --git a/lib/CodeGen/SimpleRegisterCoalescing.h b/lib/CodeGen/SimpleRegisterCoalescing.h index 605a740..f668064 100644 --- a/lib/CodeGen/SimpleRegisterCoalescing.h +++ b/lib/CodeGen/SimpleRegisterCoalescing.h @@ -33,7 +33,7 @@ namespace llvm { MachineInstr *MI; unsigned LoopDepth; CopyRec(MachineInstr *mi, unsigned depth) - : MI(mi), LoopDepth(depth) {}; + : MI(mi), LoopDepth(depth) {} }; class SimpleRegisterCoalescing : public MachineFunctionPass, @@ -85,7 +85,7 @@ namespace llvm { bool coalesceFunction(MachineFunction &mf, RegallocQuery &) { // This runs as an independent pass, so don't do anything. return false; - }; + } /// print - Implement the dump method. virtual void print(raw_ostream &O, const Module* = 0) const; diff --git a/lib/CodeGen/SlotIndexes.cpp b/lib/CodeGen/SlotIndexes.cpp index f85384b..782af12 100644 --- a/lib/CodeGen/SlotIndexes.cpp +++ b/lib/CodeGen/SlotIndexes.cpp @@ -92,13 +92,14 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) { functionSize = 0; unsigned index = 0; + push_back(createEntry(0, index)); + // Iterate over the the function. for (MachineFunction::iterator mbbItr = mf->begin(), mbbEnd = mf->end(); mbbItr != mbbEnd; ++mbbItr) { MachineBasicBlock *mbb = &*mbbItr; // Insert an index for the MBB start. - push_back(createEntry(0, index)); SlotIndex blockStartIndex(back(), SlotIndex::LOAD); index += SlotIndex::NUM; @@ -137,16 +138,16 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) { index += SlotIndex::NUM; } - SlotIndex blockEndIndex(back(), SlotIndex::STORE); + // One blank instruction at the end. + push_back(createEntry(0, index)); + + SlotIndex blockEndIndex(back(), SlotIndex::LOAD); mbb2IdxMap.insert( std::make_pair(mbb, std::make_pair(blockStartIndex, blockEndIndex))); idx2MBBMap.push_back(IdxMBBPair(blockStartIndex, mbb)); } - // One blank instruction at the end. - push_back(createEntry(0, index)); - // Sort the Idx2MBBMap std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare()); diff --git a/lib/CodeGen/Spiller.cpp b/lib/CodeGen/Spiller.cpp index bc246c1..bec9294 100644 --- a/lib/CodeGen/Spiller.cpp +++ b/lib/CodeGen/Spiller.cpp @@ -486,10 +486,10 @@ private: SlotIndex newKillRangeEnd = oldKillRange->end; oldKillRange->end = copyIdx.getDefIndex(); - if (newKillRangeEnd != lis->getMBBEndIdx(killMBB).getNextSlot()) { - assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB).getNextSlot() && + if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) { + assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) && "PHI kill range doesn't reach kill-block end. Not sane."); - newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB).getNextSlot(), + newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB), newKillRangeEnd, newVNI)); } @@ -500,7 +500,7 @@ private: newKillVNI->addKill(lis->getMBBTerminatorGap(killMBB)); newKillVNI->setHasPHIKill(true); li->addRange(LiveRange(copyIdx.getDefIndex(), - lis->getMBBEndIdx(killMBB).getNextSlot(), + lis->getMBBEndIdx(killMBB), newKillVNI)); } diff --git a/lib/CompilerDriver/CompilationGraph.cpp b/lib/CompilerDriver/CompilationGraph.cpp index 3e6e050..56e5b9c 100644 --- a/lib/CompilerDriver/CompilationGraph.cpp +++ b/lib/CompilerDriver/CompilationGraph.cpp @@ -35,7 +35,7 @@ namespace llvmc { const std::string& LanguageMap::GetLanguage(const sys::Path& File) const { LanguageMap::const_iterator Lang = this->find(File.getSuffix()); if (Lang == this->end()) - throw std::runtime_error("Unknown suffix: " + File.getSuffix()); + throw std::runtime_error(("Unknown suffix: " + File.getSuffix()).str()); return Lang->second; } } diff --git a/lib/ExecutionEngine/JIT/JIT.cpp b/lib/ExecutionEngine/JIT/JIT.cpp index 26afa54..ebc2567 100644 --- a/lib/ExecutionEngine/JIT/JIT.cpp +++ b/lib/ExecutionEngine/JIT/JIT.cpp @@ -366,6 +366,32 @@ void JIT::deleteModuleProvider(ModuleProvider *MP, std::string *E) { } } +/// materializeFunction - make sure the given function is fully read. If the +/// module is corrupt, this returns true and fills in the optional string with +/// information about the problem. If successful, this returns false. +bool JIT::materializeFunction(Function *F, std::string *ErrInfo) { + // Read in the function if it exists in this Module. + if (F->hasNotBeenReadFromBitcode()) { + // Determine the module provider this function is provided by. + Module *M = F->getParent(); + ModuleProvider *MP = 0; + for (unsigned i = 0, e = Modules.size(); i != e; ++i) { + if (Modules[i]->getModule() == M) { + MP = Modules[i]; + break; + } + } + if (MP) + return MP->materializeFunction(F, ErrInfo); + + if (ErrInfo) + *ErrInfo = "Function isn't in a module we know about!"; + return true; + } + // Succeed if the function is already read. + return false; +} + /// run - Start execution with the specified function and arguments. /// GenericValue JIT::runFunction(Function *F, @@ -585,11 +611,13 @@ void JIT::runJITOnFunction(Function *F, MachineCodeInfo *MCI) { } }; MCIListener MCIL(MCI); - RegisterJITEventListener(&MCIL); + if (MCI) + RegisterJITEventListener(&MCIL); runJITOnFunctionUnlocked(F, locked); - UnregisterJITEventListener(&MCIL); + if (MCI) + UnregisterJITEventListener(&MCIL); } void JIT::runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked) { @@ -607,6 +635,9 @@ void JIT::runJITOnFunctionUnlocked(Function *F, const MutexGuard &locked) { Function *PF = jitstate->getPendingFunctions(locked).back(); jitstate->getPendingFunctions(locked).pop_back(); + assert(!PF->hasAvailableExternallyLinkage() && + "Externally-defined function should not be in pending list."); + // JIT the function isAlreadyCodeGenerating = true; jitstate->getPM(locked).run(*PF); @@ -627,36 +658,19 @@ void *JIT::getPointerToFunction(Function *F) { return Addr; // Check if function already code gen'd MutexGuard locked(lock); - - // Now that this thread owns the lock, check if another thread has already - // code gen'd the function. - if (void *Addr = getPointerToGlobalIfAvailable(F)) - return Addr; - // Make sure we read in the function if it exists in this Module. - if (F->hasNotBeenReadFromBitcode()) { - // Determine the module provider this function is provided by. - Module *M = F->getParent(); - ModuleProvider *MP = 0; - for (unsigned i = 0, e = Modules.size(); i != e; ++i) { - if (Modules[i]->getModule() == M) { - MP = Modules[i]; - break; - } - } - assert(MP && "Function isn't in a module we know about!"); - - std::string ErrorMsg; - if (MP->materializeFunction(F, &ErrorMsg)) { - llvm_report_error("Error reading function '" + F->getName()+ - "' from bitcode file: " + ErrorMsg); - } - - // Now retry to get the address. - if (void *Addr = getPointerToGlobalIfAvailable(F)) - return Addr; + // Now that this thread owns the lock, make sure we read in the function if it + // exists in this Module. + std::string ErrorMsg; + if (materializeFunction(F, &ErrorMsg)) { + llvm_report_error("Error reading function '" + F->getName()+ + "' from bitcode file: " + ErrorMsg); } + // ... and check if another thread has already code gen'd the function. + if (void *Addr = getPointerToGlobalIfAvailable(F)) + return Addr; + if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) { bool AbortOnFailure = !F->hasExternalWeakLinkage(); void *Addr = getPointerToNamedFunction(F->getName(), AbortOnFailure); diff --git a/lib/ExecutionEngine/JIT/JIT.h b/lib/ExecutionEngine/JIT/JIT.h index f165bd6..b6f74ff 100644 --- a/lib/ExecutionEngine/JIT/JIT.h +++ b/lib/ExecutionEngine/JIT/JIT.h @@ -104,6 +104,12 @@ public: /// the underlying module. virtual void deleteModuleProvider(ModuleProvider *P,std::string *ErrInfo = 0); + /// materializeFunction - make sure the given function is fully read. If the + /// module is corrupt, this returns true and fills in the optional string with + /// information about the problem. If successful, this returns false. + /// + bool materializeFunction(Function *F, std::string *ErrInfo = 0); + /// runFunction - Start execution with the specified function and arguments. /// virtual GenericValue runFunction(Function *F, diff --git a/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp b/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp index 0193486..c1051a9 100644 --- a/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp +++ b/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp @@ -429,13 +429,12 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF, // Asm->EOL("Region start"); - if (!S.EndLabel) { + if (!S.EndLabel) EndLabelPtr = (intptr_t)EndFunction; - JCE->emitInt32((intptr_t)EndFunction - BeginLabelPtr); - } else { + else EndLabelPtr = JCE->getLabelAddress(S.EndLabel); - JCE->emitInt32(EndLabelPtr - BeginLabelPtr); - } + + JCE->emitInt32(EndLabelPtr - BeginLabelPtr); //Asm->EOL("Region length"); if (!S.PadLabel) { diff --git a/lib/ExecutionEngine/JIT/JITEmitter.cpp b/lib/ExecutionEngine/JIT/JITEmitter.cpp index bbac762..ef323b5 100644 --- a/lib/ExecutionEngine/JIT/JITEmitter.cpp +++ b/lib/ExecutionEngine/JIT/JITEmitter.cpp @@ -59,6 +59,12 @@ STATISTIC(NumRetries, "Number of retries with more memory"); static JIT *TheJIT = 0; +// A declaration may stop being a declaration once it's fully read from bitcode. +// This function returns true if F is fully read and is still a declaration. +static bool isNonGhostDeclaration(const Function *F) { + return F->isDeclaration() && !F->hasNotBeenReadFromBitcode(); +} + //===----------------------------------------------------------------------===// // JIT lazy compilation code. // @@ -271,6 +277,10 @@ namespace { class JITEmitter : public JITCodeEmitter { JITMemoryManager *MemMgr; + // When outputting a function stub in the context of some other function, we + // save BufferBegin/BufferEnd/CurBufferPtr here. + uint8_t *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr; + // When reattempting to JIT a function after running out of space, we store // the estimated size of the function we're trying to JIT here, so we can // ask the memory manager for at least this much space. When we @@ -396,11 +406,13 @@ namespace { void initJumpTableInfo(MachineJumpTableInfo *MJTI); void emitJumpTableInfo(MachineJumpTableInfo *MJTI); - virtual void startGVStub(BufferState &BS, const GlobalValue* GV, - unsigned StubSize, unsigned Alignment = 1); - virtual void startGVStub(BufferState &BS, void *Buffer, - unsigned StubSize); - virtual void* finishGVStub(BufferState &BS); + void startGVStub(const GlobalValue* GV, + unsigned StubSize, unsigned Alignment = 1); + void startGVStub(void *Buffer, unsigned StubSize); + void finishGVStub(); + virtual void *allocIndirectGV(const GlobalValue *GV, + const uint8_t *Buffer, size_t Size, + unsigned Alignment); /// allocateSpace - Reserves space in the current block if any, or /// allocate a new one of the given size. @@ -513,7 +525,7 @@ void *JITResolver::getLazyFunctionStub(Function *F) { // If this is an external declaration, attempt to resolve the address now // to place in the stub. - if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode()) { + if (isNonGhostDeclaration(F) || F->hasAvailableExternallyLinkage()) { Actual = TheJIT->getPointerToFunction(F); // If we resolved the symbol to a null address (eg. a weak external) @@ -521,13 +533,12 @@ void *JITResolver::getLazyFunctionStub(Function *F) { if (!Actual) return 0; } - MachineCodeEmitter::BufferState BS; TargetJITInfo::StubLayout SL = TheJIT->getJITInfo().getStubLayout(); - JE.startGVStub(BS, F, SL.Size, SL.Alignment); + JE.startGVStub(F, SL.Size, SL.Alignment); // Codegen a new stub, calling the lazy resolver or the actual address of the // external function, if it was resolved. Stub = TheJIT->getJITInfo().emitFunctionStub(F, Actual, JE); - JE.finishGVStub(BS); + JE.finishGVStub(); if (Actual != (void*)(intptr_t)LazyResolverFn) { // If we are getting the stub for an external function, we really want the @@ -547,7 +558,7 @@ void *JITResolver::getLazyFunctionStub(Function *F) { // exist yet, add it to the JIT's work list so that we can fill in the stub // address later. if (!Actual && !TheJIT->isCompilingLazily()) - if (!F->isDeclaration() || F->hasNotBeenReadFromBitcode()) + if (!isNonGhostDeclaration(F) && !F->hasAvailableExternallyLinkage()) TheJIT->addPendingFunction(F); return Stub; @@ -579,11 +590,10 @@ void *JITResolver::getExternalFunctionStub(void *FnAddr) { void *&Stub = ExternalFnToStubMap[FnAddr]; if (Stub) return Stub; - MachineCodeEmitter::BufferState BS; TargetJITInfo::StubLayout SL = TheJIT->getJITInfo().getStubLayout(); - JE.startGVStub(BS, 0, SL.Size, SL.Alignment); + JE.startGVStub(0, SL.Size, SL.Alignment); Stub = TheJIT->getJITInfo().emitFunctionStub(0, FnAddr, JE); - JE.finishGVStub(BS); + JE.finishGVStub(); DEBUG(errs() << "JIT: Stub emitted at [" << Stub << "] for external function at '" << FnAddr << "'\n"); @@ -753,7 +763,7 @@ void *JITEmitter::getPointerToGlobal(GlobalValue *V, void *Reference, // If this is an external function pointer, we can force the JIT to // 'compile' it, which really just adds it to the map. - if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode()) + if (isNonGhostDeclaration(F) || F->hasAvailableExternallyLinkage()) return TheJIT->getPointerToFunction(F); } @@ -1215,8 +1225,9 @@ bool JITEmitter::finishFunction(MachineFunction &F) { if (DwarfExceptionHandling || JITEmitDebugInfo) { uintptr_t ActualSize = 0; - BufferState BS; - SaveStateTo(BS); + SavedBufferBegin = BufferBegin; + SavedBufferEnd = BufferEnd; + SavedCurBufferPtr = CurBufferPtr; if (MemMgr->NeedsExactSize()) { ActualSize = DE->GetDwarfTableSizeInBytes(F, *this, FnStart, FnEnd); @@ -1232,7 +1243,9 @@ bool JITEmitter::finishFunction(MachineFunction &F) { MemMgr->endExceptionTable(F.getFunction(), BufferBegin, CurBufferPtr, FrameRegister); uint8_t *EhEnd = CurBufferPtr; - RestoreStateFrom(BS); + BufferBegin = SavedBufferBegin; + BufferEnd = SavedBufferEnd; + CurBufferPtr = SavedCurBufferPtr; if (DwarfExceptionHandling) { TheJIT->RegisterTable(FrameRegister); @@ -1438,27 +1451,39 @@ void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) { } } -void JITEmitter::startGVStub(BufferState &BS, const GlobalValue* GV, +void JITEmitter::startGVStub(const GlobalValue* GV, unsigned StubSize, unsigned Alignment) { - SaveStateTo(BS); + SavedBufferBegin = BufferBegin; + SavedBufferEnd = BufferEnd; + SavedCurBufferPtr = CurBufferPtr; BufferBegin = CurBufferPtr = MemMgr->allocateStub(GV, StubSize, Alignment); BufferEnd = BufferBegin+StubSize+1; } -void JITEmitter::startGVStub(BufferState &BS, void *Buffer, unsigned StubSize) { - SaveStateTo(BS); +void JITEmitter::startGVStub(void *Buffer, unsigned StubSize) { + SavedBufferBegin = BufferBegin; + SavedBufferEnd = BufferEnd; + SavedCurBufferPtr = CurBufferPtr; BufferBegin = CurBufferPtr = (uint8_t *)Buffer; BufferEnd = BufferBegin+StubSize+1; } -void *JITEmitter::finishGVStub(BufferState &BS) { +void JITEmitter::finishGVStub() { assert(CurBufferPtr != BufferEnd && "Stub overflowed allocated space."); NumBytes += getCurrentPCOffset(); - void *Result = BufferBegin; - RestoreStateFrom(BS); - return Result; + BufferBegin = SavedBufferBegin; + BufferEnd = SavedBufferEnd; + CurBufferPtr = SavedCurBufferPtr; +} + +void *JITEmitter::allocIndirectGV(const GlobalValue *GV, + const uint8_t *Buffer, size_t Size, + unsigned Alignment) { + uint8_t *IndGV = MemMgr->allocateStub(GV, Size, Alignment); + memcpy(IndGV, Buffer, Size); + return IndGV; } // getConstantPoolEntryAddress - Return the address of the 'ConstantNum' entry @@ -1543,14 +1568,14 @@ void JIT::updateFunctionStub(Function *F) { JITEmitter *JE = cast<JITEmitter>(getCodeEmitter()); void *Stub = JE->getJITResolver().getLazyFunctionStub(F); void *Addr = getPointerToGlobalIfAvailable(F); + assert(Addr != Stub && "Function must have non-stub address to be updated."); // Tell the target jit info to rewrite the stub at the specified address, // rather than creating a new one. - MachineCodeEmitter::BufferState BS; TargetJITInfo::StubLayout layout = getJITInfo().getStubLayout(); - JE->startGVStub(BS, Stub, layout.Size); + JE->startGVStub(Stub, layout.Size); getJITInfo().emitFunctionStub(F, Addr, *getCodeEmitter()); - JE->finishGVStub(BS); + JE->finishGVStub(); } /// freeMachineCodeForFunction - release machine code memory for given Function. diff --git a/lib/Linker/LinkModules.cpp b/lib/Linker/LinkModules.cpp index 8ece835..104cbe9 100644 --- a/lib/Linker/LinkModules.cpp +++ b/lib/Linker/LinkModules.cpp @@ -538,8 +538,8 @@ static void LinkNamedMDNodes(Module *Dest, Module *Src) { NamedMDNode::Create(SrcNMD, Dest); else { // Add Src elements into Dest node. - for (unsigned i = 0, e = SrcNMD->getNumElements(); i != e; ++i) - DestNMD->addElement(SrcNMD->getElement(i)); + for (unsigned i = 0, e = SrcNMD->getNumOperands(); i != e; ++i) + DestNMD->addOperand(SrcNMD->getOperand(i)); } } } @@ -664,7 +664,6 @@ static bool LinkGlobals(Module *Dest, const Module *Src, Var->eraseFromParent(); else cast<Function>(DGV)->eraseFromParent(); - DGV = NewDGV; // If the symbol table renamed the global, but it is an externally visible // symbol, DGV must be an existing global with internal linkage. Rename. diff --git a/lib/Support/APFloat.cpp b/lib/Support/APFloat.cpp index b9b323c..1e6d22f 100644 --- a/lib/Support/APFloat.cpp +++ b/lib/Support/APFloat.cpp @@ -3139,6 +3139,60 @@ APFloat::initFromAPInt(const APInt& api, bool isIEEE) llvm_unreachable(0); } +APFloat APFloat::getLargest(const fltSemantics &Sem, bool Negative) { + APFloat Val(Sem, fcNormal, Negative); + + // We want (in interchange format): + // sign = {Negative} + // exponent = 1..10 + // significand = 1..1 + + Val.exponent = Sem.maxExponent; // unbiased + + // 1-initialize all bits.... + Val.zeroSignificand(); + integerPart *significand = Val.significandParts(); + unsigned N = partCountForBits(Sem.precision); + for (unsigned i = 0; i != N; ++i) + significand[i] = ~((integerPart) 0); + + // ...and then clear the top bits for internal consistency. + significand[N-1] + &= (((integerPart) 1) << ((Sem.precision % integerPartWidth) - 1)) - 1; + + return Val; +} + +APFloat APFloat::getSmallest(const fltSemantics &Sem, bool Negative) { + APFloat Val(Sem, fcNormal, Negative); + + // We want (in interchange format): + // sign = {Negative} + // exponent = 0..0 + // significand = 0..01 + + Val.exponent = Sem.minExponent; // unbiased + Val.zeroSignificand(); + Val.significandParts()[0] = 1; + return Val; +} + +APFloat APFloat::getSmallestNormalized(const fltSemantics &Sem, bool Negative) { + APFloat Val(Sem, fcNormal, Negative); + + // We want (in interchange format): + // sign = {Negative} + // exponent = 0..0 + // significand = 10..0 + + Val.exponent = Sem.minExponent; + Val.zeroSignificand(); + Val.significandParts()[partCountForBits(Sem.precision)-1] + |= (((integerPart) 1) << ((Sem.precision % integerPartWidth) - 1)); + + return Val; +} + APFloat::APFloat(const APInt& api, bool isIEEE) { initFromAPInt(api, isIEEE); @@ -3155,3 +3209,297 @@ APFloat::APFloat(double d) APInt api = APInt(64, 0); initFromAPInt(api.doubleToBits(d)); } + +namespace { + static void append(SmallVectorImpl<char> &Buffer, + unsigned N, const char *Str) { + unsigned Start = Buffer.size(); + Buffer.set_size(Start + N); + memcpy(&Buffer[Start], Str, N); + } + + template <unsigned N> + void append(SmallVectorImpl<char> &Buffer, const char (&Str)[N]) { + append(Buffer, N, Str); + } + + /// Removes data from the given significand until it is no more + /// precise than is required for the desired precision. + void AdjustToPrecision(APInt &significand, + int &exp, unsigned FormatPrecision) { + unsigned bits = significand.getActiveBits(); + + // 196/59 is a very slight overestimate of lg_2(10). + unsigned bitsRequired = (FormatPrecision * 196 + 58) / 59; + + if (bits <= bitsRequired) return; + + unsigned tensRemovable = (bits - bitsRequired) * 59 / 196; + if (!tensRemovable) return; + + exp += tensRemovable; + + APInt divisor(significand.getBitWidth(), 1); + APInt powten(significand.getBitWidth(), 10); + while (true) { + if (tensRemovable & 1) + divisor *= powten; + tensRemovable >>= 1; + if (!tensRemovable) break; + powten *= powten; + } + + significand = significand.udiv(divisor); + + // Truncate the significand down to its active bit count, but + // don't try to drop below 32. + unsigned newPrecision = std::max(32U, significand.getActiveBits()); + significand.trunc(newPrecision); + } + + + void AdjustToPrecision(SmallVectorImpl<char> &buffer, + int &exp, unsigned FormatPrecision) { + unsigned N = buffer.size(); + if (N <= FormatPrecision) return; + + // The most significant figures are the last ones in the buffer. + unsigned FirstSignificant = N - FormatPrecision; + + // Round. + // FIXME: this probably shouldn't use 'round half up'. + + // Rounding down is just a truncation, except we also want to drop + // trailing zeros from the new result. + if (buffer[FirstSignificant - 1] < '5') { + while (buffer[FirstSignificant] == '0') + FirstSignificant++; + + exp += FirstSignificant; + buffer.erase(&buffer[0], &buffer[FirstSignificant]); + return; + } + + // Rounding up requires a decimal add-with-carry. If we continue + // the carry, the newly-introduced zeros will just be truncated. + for (unsigned I = FirstSignificant; I != N; ++I) { + if (buffer[I] == '9') { + FirstSignificant++; + } else { + buffer[I]++; + break; + } + } + + // If we carried through, we have exactly one digit of precision. + if (FirstSignificant == N) { + exp += FirstSignificant; + buffer.clear(); + buffer.push_back('1'); + return; + } + + exp += FirstSignificant; + buffer.erase(&buffer[0], &buffer[FirstSignificant]); + } +} + +void APFloat::toString(SmallVectorImpl<char> &Str, + unsigned FormatPrecision, + unsigned FormatMaxPadding) { + switch (category) { + case fcInfinity: + if (isNegative()) + return append(Str, "-Inf"); + else + return append(Str, "+Inf"); + + case fcNaN: return append(Str, "NaN"); + + case fcZero: + if (isNegative()) + Str.push_back('-'); + + if (!FormatMaxPadding) + append(Str, "0.0E+0"); + else + Str.push_back('0'); + return; + + case fcNormal: + break; + } + + if (isNegative()) + Str.push_back('-'); + + // Decompose the number into an APInt and an exponent. + int exp = exponent - ((int) semantics->precision - 1); + APInt significand(semantics->precision, + partCountForBits(semantics->precision), + significandParts()); + + // Set FormatPrecision if zero. We want to do this before we + // truncate trailing zeros, as those are part of the precision. + if (!FormatPrecision) { + // It's an interesting question whether to use the nominal + // precision or the active precision here for denormals. + + // FormatPrecision = ceil(significandBits / lg_2(10)) + FormatPrecision = (semantics->precision * 59 + 195) / 196; + } + + // Ignore trailing binary zeros. + int trailingZeros = significand.countTrailingZeros(); + exp += trailingZeros; + significand = significand.lshr(trailingZeros); + + // Change the exponent from 2^e to 10^e. + if (exp == 0) { + // Nothing to do. + } else if (exp > 0) { + // Just shift left. + significand.zext(semantics->precision + exp); + significand <<= exp; + exp = 0; + } else { /* exp < 0 */ + int texp = -exp; + + // We transform this using the identity: + // (N)(2^-e) == (N)(5^e)(10^-e) + // This means we have to multiply N (the significand) by 5^e. + // To avoid overflow, we have to operate on numbers large + // enough to store N * 5^e: + // log2(N * 5^e) == log2(N) + e * log2(5) + // <= semantics->precision + e * 137 / 59 + // (log_2(5) ~ 2.321928 < 2.322034 ~ 137/59) + + unsigned precision = semantics->precision + 137 * texp / 59; + + // Multiply significand by 5^e. + // N * 5^0101 == N * 5^(1*1) * 5^(0*2) * 5^(1*4) * 5^(0*8) + significand.zext(precision); + APInt five_to_the_i(precision, 5); + while (true) { + if (texp & 1) significand *= five_to_the_i; + + texp >>= 1; + if (!texp) break; + five_to_the_i *= five_to_the_i; + } + } + + AdjustToPrecision(significand, exp, FormatPrecision); + + llvm::SmallVector<char, 256> buffer; + + // Fill the buffer. + unsigned precision = significand.getBitWidth(); + APInt ten(precision, 10); + APInt digit(precision, 0); + + bool inTrail = true; + while (significand != 0) { + // digit <- significand % 10 + // significand <- significand / 10 + APInt::udivrem(significand, ten, significand, digit); + + unsigned d = digit.getZExtValue(); + + // Drop trailing zeros. + if (inTrail && !d) exp++; + else { + buffer.push_back((char) ('0' + d)); + inTrail = false; + } + } + + assert(!buffer.empty() && "no characters in buffer!"); + + // Drop down to FormatPrecision. + // TODO: don't do more precise calculations above than are required. + AdjustToPrecision(buffer, exp, FormatPrecision); + + unsigned NDigits = buffer.size(); + + // Check whether we should use scientific notation. + bool FormatScientific; + if (!FormatMaxPadding) + FormatScientific = true; + else { + if (exp >= 0) { + // 765e3 --> 765000 + // ^^^ + // But we shouldn't make the number look more precise than it is. + FormatScientific = ((unsigned) exp > FormatMaxPadding || + NDigits + (unsigned) exp > FormatPrecision); + } else { + // Power of the most significant digit. + int MSD = exp + (int) (NDigits - 1); + if (MSD >= 0) { + // 765e-2 == 7.65 + FormatScientific = false; + } else { + // 765e-5 == 0.00765 + // ^ ^^ + FormatScientific = ((unsigned) -MSD) > FormatMaxPadding; + } + } + } + + // Scientific formatting is pretty straightforward. + if (FormatScientific) { + exp += (NDigits - 1); + + Str.push_back(buffer[NDigits-1]); + Str.push_back('.'); + if (NDigits == 1) + Str.push_back('0'); + else + for (unsigned I = 1; I != NDigits; ++I) + Str.push_back(buffer[NDigits-1-I]); + Str.push_back('E'); + + Str.push_back(exp >= 0 ? '+' : '-'); + if (exp < 0) exp = -exp; + SmallVector<char, 6> expbuf; + do { + expbuf.push_back((char) ('0' + (exp % 10))); + exp /= 10; + } while (exp); + for (unsigned I = 0, E = expbuf.size(); I != E; ++I) + Str.push_back(expbuf[E-1-I]); + return; + } + + // Non-scientific, positive exponents. + if (exp >= 0) { + for (unsigned I = 0; I != NDigits; ++I) + Str.push_back(buffer[NDigits-1-I]); + for (unsigned I = 0; I != (unsigned) exp; ++I) + Str.push_back('0'); + return; + } + + // Non-scientific, negative exponents. + + // The number of digits to the left of the decimal point. + int NWholeDigits = exp + (int) NDigits; + + unsigned I = 0; + if (NWholeDigits > 0) { + for (; I != (unsigned) NWholeDigits; ++I) + Str.push_back(buffer[NDigits-I-1]); + Str.push_back('.'); + } else { + unsigned NZeros = 1 + (unsigned) -NWholeDigits; + + Str.push_back('0'); + Str.push_back('.'); + for (unsigned Z = 1; Z != NZeros; ++Z) + Str.push_back('0'); + } + + for (; I != NDigits; ++I) + Str.push_back(buffer[NDigits-I-1]); +} diff --git a/lib/Support/APInt.cpp b/lib/Support/APInt.cpp index 56d4773..9532e1e 100644 --- a/lib/Support/APInt.cpp +++ b/lib/Support/APInt.cpp @@ -2012,8 +2012,8 @@ void APInt::udivrem(const APInt &LHS, const APInt &RHS, } if (lhsWords < rhsWords || LHS.ult(RHS)) { - Quotient = 0; // X / Y ===> 0, iff X < Y Remainder = LHS; // X % Y ===> X, iff X < Y + Quotient = 0; // X / Y ===> 0, iff X < Y return; } diff --git a/lib/Support/CMakeLists.txt b/lib/Support/CMakeLists.txt index ac736dc..f1347f9 100644 --- a/lib/Support/CMakeLists.txt +++ b/lib/Support/CMakeLists.txt @@ -3,6 +3,7 @@ add_llvm_library(LLVMSupport APInt.cpp APSInt.cpp Allocator.cpp + circular_raw_ostream.cpp CommandLine.cpp ConstantRange.cpp Debug.cpp @@ -23,6 +24,7 @@ add_llvm_library(LLVMSupport Regex.cpp SlowOperationInformer.cpp SmallPtrSet.cpp + SmallVector.cpp SourceMgr.cpp Statistic.cpp StringExtras.cpp diff --git a/lib/Support/Debug.cpp b/lib/Support/Debug.cpp index 50abe01..a035771 100644 --- a/lib/Support/Debug.cpp +++ b/lib/Support/Debug.cpp @@ -25,6 +25,9 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/circular_raw_ostream.h" +#include "llvm/System/Signals.h" + using namespace llvm; // All Debug.h functionality is a no-op in NDEBUG mode. @@ -37,6 +40,16 @@ static cl::opt<bool, true> Debug("debug", cl::desc("Enable debug output"), cl::Hidden, cl::location(DebugFlag)); +// -debug-buffer-size - Buffer the last N characters of debug output +//until program termination. +static cl::opt<unsigned> +DebugBufferSize("debug-buffer-size", + cl::desc("Buffer the last N characters of debug output" + "until program termination. " + "[default 0 -- immediate print-out]"), + cl::Hidden, + cl::init(0)); + static std::string CurrentDebugType; static struct DebugOnlyOpt { void operator=(const std::string &Val) const { @@ -50,6 +63,18 @@ DebugOnly("debug-only", cl::desc("Enable a specific type of debug output"), cl::Hidden, cl::value_desc("debug string"), cl::location(DebugOnlyOptLoc), cl::ValueRequired); +// Signal handlers - dump debug output on termination. +static void debug_user_sig_handler(void *Cookie) +{ + // This is a bit sneaky. Since this is under #ifndef NDEBUG, we + // know that debug mode is enabled and dbgs() really is a + // circular_raw_ostream. If NDEBUG is defined, then dbgs() == + // errs() but this will never be invoked. + llvm::circular_raw_ostream *dbgout = + static_cast<llvm::circular_raw_ostream *>(&llvm::dbgs()); + dbgout->flushBufferWithBanner(); +} + // isCurrentDebugType - Return true if the specified string is the debug type // specified on the command line, or if none was specified on the command line // with the -debug-only=X option. @@ -66,9 +91,38 @@ void llvm::SetCurrentDebugType(const char *Type) { CurrentDebugType = Type; } +/// dbgs - Return a circular-buffered debug stream. +raw_ostream &llvm::dbgs() { + // Do one-time initialization in a thread-safe way. + static struct dbgstream { + circular_raw_ostream strm; + + dbgstream() : + strm(errs(), "*** Debug Log Output ***\n", + (!EnableDebugBuffering || !DebugFlag) ? 0 : DebugBufferSize) { + if (EnableDebugBuffering && DebugFlag && DebugBufferSize != 0) + // TODO: Add a handler for SIGUSER1-type signals so the user can + // force a debug dump. + sys::AddSignalHandler(&debug_user_sig_handler, 0); + // Otherwise we've already set the debug stream buffer size to + // zero, disabling buffering so it will output directly to errs(). + } + } thestrm; + + return thestrm.strm; +} + #else // Avoid "has no symbols" warning. namespace llvm { -int Debug_dummy = 0; + /// dbgs - Return dbgs(). + raw_ostream &dbgs() { + return dbgs(); + } } + #endif + +/// EnableDebugBuffering - Turn on signal handler installation. +/// +bool llvm::EnableDebugBuffering = false; diff --git a/lib/Support/Dwarf.cpp b/lib/Support/Dwarf.cpp index 8b688ca..d1230b9 100644 --- a/lib/Support/Dwarf.cpp +++ b/lib/Support/Dwarf.cpp @@ -12,579 +12,549 @@ //===----------------------------------------------------------------------===// #include "llvm/Support/Dwarf.h" -#include "llvm/Support/ErrorHandling.h" - -#include <cassert> - -namespace llvm { - -namespace dwarf { +using namespace llvm; +using namespace dwarf; /// TagString - Return the string for the specified tag. /// -const char *TagString(unsigned Tag) { +const char *llvm::dwarf::TagString(unsigned Tag) { switch (Tag) { - case DW_TAG_array_type: return "DW_TAG_array_type"; - case DW_TAG_class_type: return "DW_TAG_class_type"; - case DW_TAG_entry_point: return "DW_TAG_entry_point"; - case DW_TAG_enumeration_type: return "DW_TAG_enumeration_type"; - case DW_TAG_formal_parameter: return "DW_TAG_formal_parameter"; - case DW_TAG_imported_declaration: return "DW_TAG_imported_declaration"; - case DW_TAG_label: return "DW_TAG_label"; - case DW_TAG_lexical_block: return "DW_TAG_lexical_block"; - case DW_TAG_member: return "DW_TAG_member"; - case DW_TAG_pointer_type: return "DW_TAG_pointer_type"; - case DW_TAG_reference_type: return "DW_TAG_reference_type"; - case DW_TAG_compile_unit: return "DW_TAG_compile_unit"; - case DW_TAG_string_type: return "DW_TAG_string_type"; - case DW_TAG_structure_type: return "DW_TAG_structure_type"; - case DW_TAG_subroutine_type: return "DW_TAG_subroutine_type"; - case DW_TAG_typedef: return "DW_TAG_typedef"; - case DW_TAG_union_type: return "DW_TAG_union_type"; - case DW_TAG_unspecified_parameters: return "DW_TAG_unspecified_parameters"; - case DW_TAG_variant: return "DW_TAG_variant"; - case DW_TAG_common_block: return "DW_TAG_common_block"; - case DW_TAG_common_inclusion: return "DW_TAG_common_inclusion"; - case DW_TAG_inheritance: return "DW_TAG_inheritance"; - case DW_TAG_inlined_subroutine: return "DW_TAG_inlined_subroutine"; - case DW_TAG_module: return "DW_TAG_module"; - case DW_TAG_ptr_to_member_type: return "DW_TAG_ptr_to_member_type"; - case DW_TAG_set_type: return "DW_TAG_set_type"; - case DW_TAG_subrange_type: return "DW_TAG_subrange_type"; - case DW_TAG_with_stmt: return "DW_TAG_with_stmt"; - case DW_TAG_access_declaration: return "DW_TAG_access_declaration"; - case DW_TAG_base_type: return "DW_TAG_base_type"; - case DW_TAG_catch_block: return "DW_TAG_catch_block"; - case DW_TAG_const_type: return "DW_TAG_const_type"; - case DW_TAG_constant: return "DW_TAG_constant"; - case DW_TAG_enumerator: return "DW_TAG_enumerator"; - case DW_TAG_file_type: return "DW_TAG_file_type"; - case DW_TAG_friend: return "DW_TAG_friend"; - case DW_TAG_namelist: return "DW_TAG_namelist"; - case DW_TAG_namelist_item: return "DW_TAG_namelist_item"; - case DW_TAG_packed_type: return "DW_TAG_packed_type"; - case DW_TAG_subprogram: return "DW_TAG_subprogram"; - case DW_TAG_template_type_parameter: return "DW_TAG_template_type_parameter"; - case DW_TAG_template_value_parameter: return "DW_TAG_template_value_parameter"; - case DW_TAG_thrown_type: return "DW_TAG_thrown_type"; - case DW_TAG_try_block: return "DW_TAG_try_block"; - case DW_TAG_variant_part: return "DW_TAG_variant_part"; - case DW_TAG_variable: return "DW_TAG_variable"; - case DW_TAG_volatile_type: return "DW_TAG_volatile_type"; - case DW_TAG_dwarf_procedure: return "DW_TAG_dwarf_procedure"; - case DW_TAG_restrict_type: return "DW_TAG_restrict_type"; - case DW_TAG_interface_type: return "DW_TAG_interface_type"; - case DW_TAG_namespace: return "DW_TAG_namespace"; - case DW_TAG_imported_module: return "DW_TAG_imported_module"; - case DW_TAG_unspecified_type: return "DW_TAG_unspecified_type"; - case DW_TAG_partial_unit: return "DW_TAG_partial_unit"; - case DW_TAG_imported_unit: return "DW_TAG_imported_unit"; - case DW_TAG_condition: return "DW_TAG_condition"; - case DW_TAG_shared_type: return "DW_TAG_shared_type"; - case DW_TAG_lo_user: return "DW_TAG_lo_user"; - case DW_TAG_hi_user: return "DW_TAG_hi_user"; + case DW_TAG_array_type: return "DW_TAG_array_type"; + case DW_TAG_class_type: return "DW_TAG_class_type"; + case DW_TAG_entry_point: return "DW_TAG_entry_point"; + case DW_TAG_enumeration_type: return "DW_TAG_enumeration_type"; + case DW_TAG_formal_parameter: return "DW_TAG_formal_parameter"; + case DW_TAG_imported_declaration: return "DW_TAG_imported_declaration"; + case DW_TAG_label: return "DW_TAG_label"; + case DW_TAG_lexical_block: return "DW_TAG_lexical_block"; + case DW_TAG_member: return "DW_TAG_member"; + case DW_TAG_pointer_type: return "DW_TAG_pointer_type"; + case DW_TAG_reference_type: return "DW_TAG_reference_type"; + case DW_TAG_compile_unit: return "DW_TAG_compile_unit"; + case DW_TAG_string_type: return "DW_TAG_string_type"; + case DW_TAG_structure_type: return "DW_TAG_structure_type"; + case DW_TAG_subroutine_type: return "DW_TAG_subroutine_type"; + case DW_TAG_typedef: return "DW_TAG_typedef"; + case DW_TAG_union_type: return "DW_TAG_union_type"; + case DW_TAG_unspecified_parameters: return "DW_TAG_unspecified_parameters"; + case DW_TAG_variant: return "DW_TAG_variant"; + case DW_TAG_common_block: return "DW_TAG_common_block"; + case DW_TAG_common_inclusion: return "DW_TAG_common_inclusion"; + case DW_TAG_inheritance: return "DW_TAG_inheritance"; + case DW_TAG_inlined_subroutine: return "DW_TAG_inlined_subroutine"; + case DW_TAG_module: return "DW_TAG_module"; + case DW_TAG_ptr_to_member_type: return "DW_TAG_ptr_to_member_type"; + case DW_TAG_set_type: return "DW_TAG_set_type"; + case DW_TAG_subrange_type: return "DW_TAG_subrange_type"; + case DW_TAG_with_stmt: return "DW_TAG_with_stmt"; + case DW_TAG_access_declaration: return "DW_TAG_access_declaration"; + case DW_TAG_base_type: return "DW_TAG_base_type"; + case DW_TAG_catch_block: return "DW_TAG_catch_block"; + case DW_TAG_const_type: return "DW_TAG_const_type"; + case DW_TAG_constant: return "DW_TAG_constant"; + case DW_TAG_enumerator: return "DW_TAG_enumerator"; + case DW_TAG_file_type: return "DW_TAG_file_type"; + case DW_TAG_friend: return "DW_TAG_friend"; + case DW_TAG_namelist: return "DW_TAG_namelist"; + case DW_TAG_namelist_item: return "DW_TAG_namelist_item"; + case DW_TAG_packed_type: return "DW_TAG_packed_type"; + case DW_TAG_subprogram: return "DW_TAG_subprogram"; + case DW_TAG_template_type_parameter: return "DW_TAG_template_type_parameter"; + case DW_TAG_template_value_parameter:return "DW_TAG_template_value_parameter"; + case DW_TAG_thrown_type: return "DW_TAG_thrown_type"; + case DW_TAG_try_block: return "DW_TAG_try_block"; + case DW_TAG_variant_part: return "DW_TAG_variant_part"; + case DW_TAG_variable: return "DW_TAG_variable"; + case DW_TAG_volatile_type: return "DW_TAG_volatile_type"; + case DW_TAG_dwarf_procedure: return "DW_TAG_dwarf_procedure"; + case DW_TAG_restrict_type: return "DW_TAG_restrict_type"; + case DW_TAG_interface_type: return "DW_TAG_interface_type"; + case DW_TAG_namespace: return "DW_TAG_namespace"; + case DW_TAG_imported_module: return "DW_TAG_imported_module"; + case DW_TAG_unspecified_type: return "DW_TAG_unspecified_type"; + case DW_TAG_partial_unit: return "DW_TAG_partial_unit"; + case DW_TAG_imported_unit: return "DW_TAG_imported_unit"; + case DW_TAG_condition: return "DW_TAG_condition"; + case DW_TAG_shared_type: return "DW_TAG_shared_type"; + case DW_TAG_lo_user: return "DW_TAG_lo_user"; + case DW_TAG_hi_user: return "DW_TAG_hi_user"; } - llvm_unreachable("Unknown Dwarf Tag"); - return ""; + return 0; } /// ChildrenString - Return the string for the specified children flag. /// -const char *ChildrenString(unsigned Children) { +const char *llvm::dwarf::ChildrenString(unsigned Children) { switch (Children) { - case DW_CHILDREN_no: return "CHILDREN_no"; - case DW_CHILDREN_yes: return "CHILDREN_yes"; + case DW_CHILDREN_no: return "CHILDREN_no"; + case DW_CHILDREN_yes: return "CHILDREN_yes"; } - llvm_unreachable("Unknown Dwarf ChildrenFlag"); - return ""; + return 0; } /// AttributeString - Return the string for the specified attribute. /// -const char *AttributeString(unsigned Attribute) { +const char *llvm::dwarf::AttributeString(unsigned Attribute) { switch (Attribute) { - case DW_AT_sibling: return "DW_AT_sibling"; - case DW_AT_location: return "DW_AT_location"; - case DW_AT_name: return "DW_AT_name"; - case DW_AT_ordering: return "DW_AT_ordering"; - case DW_AT_byte_size: return "DW_AT_byte_size"; - case DW_AT_bit_offset: return "DW_AT_bit_offset"; - case DW_AT_bit_size: return "DW_AT_bit_size"; - case DW_AT_stmt_list: return "DW_AT_stmt_list"; - case DW_AT_low_pc: return "DW_AT_low_pc"; - case DW_AT_high_pc: return "DW_AT_high_pc"; - case DW_AT_language: return "DW_AT_language"; - case DW_AT_discr: return "DW_AT_discr"; - case DW_AT_discr_value: return "DW_AT_discr_value"; - case DW_AT_visibility: return "DW_AT_visibility"; - case DW_AT_import: return "DW_AT_import"; - case DW_AT_string_length: return "DW_AT_string_length"; - case DW_AT_common_reference: return "DW_AT_common_reference"; - case DW_AT_comp_dir: return "DW_AT_comp_dir"; - case DW_AT_const_value: return "DW_AT_const_value"; - case DW_AT_containing_type: return "DW_AT_containing_type"; - case DW_AT_default_value: return "DW_AT_default_value"; - case DW_AT_inline: return "DW_AT_inline"; - case DW_AT_is_optional: return "DW_AT_is_optional"; - case DW_AT_lower_bound: return "DW_AT_lower_bound"; - case DW_AT_producer: return "DW_AT_producer"; - case DW_AT_prototyped: return "DW_AT_prototyped"; - case DW_AT_return_addr: return "DW_AT_return_addr"; - case DW_AT_start_scope: return "DW_AT_start_scope"; - case DW_AT_bit_stride: return "DW_AT_bit_stride"; - case DW_AT_upper_bound: return "DW_AT_upper_bound"; - case DW_AT_abstract_origin: return "DW_AT_abstract_origin"; - case DW_AT_accessibility: return "DW_AT_accessibility"; - case DW_AT_address_class: return "DW_AT_address_class"; - case DW_AT_artificial: return "DW_AT_artificial"; - case DW_AT_base_types: return "DW_AT_base_types"; - case DW_AT_calling_convention: return "DW_AT_calling_convention"; - case DW_AT_count: return "DW_AT_count"; - case DW_AT_data_member_location: return "DW_AT_data_member_location"; - case DW_AT_decl_column: return "DW_AT_decl_column"; - case DW_AT_decl_file: return "DW_AT_decl_file"; - case DW_AT_decl_line: return "DW_AT_decl_line"; - case DW_AT_declaration: return "DW_AT_declaration"; - case DW_AT_discr_list: return "DW_AT_discr_list"; - case DW_AT_encoding: return "DW_AT_encoding"; - case DW_AT_external: return "DW_AT_external"; - case DW_AT_frame_base: return "DW_AT_frame_base"; - case DW_AT_friend: return "DW_AT_friend"; - case DW_AT_identifier_case: return "DW_AT_identifier_case"; - case DW_AT_macro_info: return "DW_AT_macro_info"; - case DW_AT_namelist_item: return "DW_AT_namelist_item"; - case DW_AT_priority: return "DW_AT_priority"; - case DW_AT_segment: return "DW_AT_segment"; - case DW_AT_specification: return "DW_AT_specification"; - case DW_AT_static_link: return "DW_AT_static_link"; - case DW_AT_type: return "DW_AT_type"; - case DW_AT_use_location: return "DW_AT_use_location"; - case DW_AT_variable_parameter: return "DW_AT_variable_parameter"; - case DW_AT_virtuality: return "DW_AT_virtuality"; - case DW_AT_vtable_elem_location: return "DW_AT_vtable_elem_location"; - case DW_AT_allocated: return "DW_AT_allocated"; - case DW_AT_associated: return "DW_AT_associated"; - case DW_AT_data_location: return "DW_AT_data_location"; - case DW_AT_byte_stride: return "DW_AT_byte_stride"; - case DW_AT_entry_pc: return "DW_AT_entry_pc"; - case DW_AT_use_UTF8: return "DW_AT_use_UTF8"; - case DW_AT_extension: return "DW_AT_extension"; - case DW_AT_ranges: return "DW_AT_ranges"; - case DW_AT_trampoline: return "DW_AT_trampoline"; - case DW_AT_call_column: return "DW_AT_call_column"; - case DW_AT_call_file: return "DW_AT_call_file"; - case DW_AT_call_line: return "DW_AT_call_line"; - case DW_AT_description: return "DW_AT_description"; - case DW_AT_binary_scale: return "DW_AT_binary_scale"; - case DW_AT_decimal_scale: return "DW_AT_decimal_scale"; - case DW_AT_small: return "DW_AT_small"; - case DW_AT_decimal_sign: return "DW_AT_decimal_sign"; - case DW_AT_digit_count: return "DW_AT_digit_count"; - case DW_AT_picture_string: return "DW_AT_picture_string"; - case DW_AT_mutable: return "DW_AT_mutable"; - case DW_AT_threads_scaled: return "DW_AT_threads_scaled"; - case DW_AT_explicit: return "DW_AT_explicit"; - case DW_AT_object_pointer: return "DW_AT_object_pointer"; - case DW_AT_endianity: return "DW_AT_endianity"; - case DW_AT_elemental: return "DW_AT_elemental"; - case DW_AT_pure: return "DW_AT_pure"; - case DW_AT_recursive: return "DW_AT_recursive"; - case DW_AT_MIPS_linkage_name: return "DW_AT_MIPS_linkage_name"; - case DW_AT_sf_names: return "DW_AT_sf_names"; - case DW_AT_src_info: return "DW_AT_src_info"; - case DW_AT_mac_info: return "DW_AT_mac_info"; - case DW_AT_src_coords: return "DW_AT_src_coords"; - case DW_AT_body_begin: return "DW_AT_body_begin"; - case DW_AT_body_end: return "DW_AT_body_end"; - case DW_AT_GNU_vector: return "DW_AT_GNU_vector"; - case DW_AT_lo_user: return "DW_AT_lo_user"; - case DW_AT_hi_user: return "DW_AT_hi_user"; - case DW_AT_APPLE_optimized: return "DW_AT_APPLE_optimized"; - case DW_AT_APPLE_flags: return "DW_AT_APPLE_flags"; - case DW_AT_APPLE_isa: return "DW_AT_APPLE_isa"; - case DW_AT_APPLE_block: return "DW_AT_APPLE_block"; - case DW_AT_APPLE_major_runtime_vers: return "DW_AT_APPLE_major_runtime_vers"; - case DW_AT_APPLE_runtime_class: return "DW_AT_APPLE_runtime_class"; + case DW_AT_sibling: return "DW_AT_sibling"; + case DW_AT_location: return "DW_AT_location"; + case DW_AT_name: return "DW_AT_name"; + case DW_AT_ordering: return "DW_AT_ordering"; + case DW_AT_byte_size: return "DW_AT_byte_size"; + case DW_AT_bit_offset: return "DW_AT_bit_offset"; + case DW_AT_bit_size: return "DW_AT_bit_size"; + case DW_AT_stmt_list: return "DW_AT_stmt_list"; + case DW_AT_low_pc: return "DW_AT_low_pc"; + case DW_AT_high_pc: return "DW_AT_high_pc"; + case DW_AT_language: return "DW_AT_language"; + case DW_AT_discr: return "DW_AT_discr"; + case DW_AT_discr_value: return "DW_AT_discr_value"; + case DW_AT_visibility: return "DW_AT_visibility"; + case DW_AT_import: return "DW_AT_import"; + case DW_AT_string_length: return "DW_AT_string_length"; + case DW_AT_common_reference: return "DW_AT_common_reference"; + case DW_AT_comp_dir: return "DW_AT_comp_dir"; + case DW_AT_const_value: return "DW_AT_const_value"; + case DW_AT_containing_type: return "DW_AT_containing_type"; + case DW_AT_default_value: return "DW_AT_default_value"; + case DW_AT_inline: return "DW_AT_inline"; + case DW_AT_is_optional: return "DW_AT_is_optional"; + case DW_AT_lower_bound: return "DW_AT_lower_bound"; + case DW_AT_producer: return "DW_AT_producer"; + case DW_AT_prototyped: return "DW_AT_prototyped"; + case DW_AT_return_addr: return "DW_AT_return_addr"; + case DW_AT_start_scope: return "DW_AT_start_scope"; + case DW_AT_bit_stride: return "DW_AT_bit_stride"; + case DW_AT_upper_bound: return "DW_AT_upper_bound"; + case DW_AT_abstract_origin: return "DW_AT_abstract_origin"; + case DW_AT_accessibility: return "DW_AT_accessibility"; + case DW_AT_address_class: return "DW_AT_address_class"; + case DW_AT_artificial: return "DW_AT_artificial"; + case DW_AT_base_types: return "DW_AT_base_types"; + case DW_AT_calling_convention: return "DW_AT_calling_convention"; + case DW_AT_count: return "DW_AT_count"; + case DW_AT_data_member_location: return "DW_AT_data_member_location"; + case DW_AT_decl_column: return "DW_AT_decl_column"; + case DW_AT_decl_file: return "DW_AT_decl_file"; + case DW_AT_decl_line: return "DW_AT_decl_line"; + case DW_AT_declaration: return "DW_AT_declaration"; + case DW_AT_discr_list: return "DW_AT_discr_list"; + case DW_AT_encoding: return "DW_AT_encoding"; + case DW_AT_external: return "DW_AT_external"; + case DW_AT_frame_base: return "DW_AT_frame_base"; + case DW_AT_friend: return "DW_AT_friend"; + case DW_AT_identifier_case: return "DW_AT_identifier_case"; + case DW_AT_macro_info: return "DW_AT_macro_info"; + case DW_AT_namelist_item: return "DW_AT_namelist_item"; + case DW_AT_priority: return "DW_AT_priority"; + case DW_AT_segment: return "DW_AT_segment"; + case DW_AT_specification: return "DW_AT_specification"; + case DW_AT_static_link: return "DW_AT_static_link"; + case DW_AT_type: return "DW_AT_type"; + case DW_AT_use_location: return "DW_AT_use_location"; + case DW_AT_variable_parameter: return "DW_AT_variable_parameter"; + case DW_AT_virtuality: return "DW_AT_virtuality"; + case DW_AT_vtable_elem_location: return "DW_AT_vtable_elem_location"; + case DW_AT_allocated: return "DW_AT_allocated"; + case DW_AT_associated: return "DW_AT_associated"; + case DW_AT_data_location: return "DW_AT_data_location"; + case DW_AT_byte_stride: return "DW_AT_byte_stride"; + case DW_AT_entry_pc: return "DW_AT_entry_pc"; + case DW_AT_use_UTF8: return "DW_AT_use_UTF8"; + case DW_AT_extension: return "DW_AT_extension"; + case DW_AT_ranges: return "DW_AT_ranges"; + case DW_AT_trampoline: return "DW_AT_trampoline"; + case DW_AT_call_column: return "DW_AT_call_column"; + case DW_AT_call_file: return "DW_AT_call_file"; + case DW_AT_call_line: return "DW_AT_call_line"; + case DW_AT_description: return "DW_AT_description"; + case DW_AT_binary_scale: return "DW_AT_binary_scale"; + case DW_AT_decimal_scale: return "DW_AT_decimal_scale"; + case DW_AT_small: return "DW_AT_small"; + case DW_AT_decimal_sign: return "DW_AT_decimal_sign"; + case DW_AT_digit_count: return "DW_AT_digit_count"; + case DW_AT_picture_string: return "DW_AT_picture_string"; + case DW_AT_mutable: return "DW_AT_mutable"; + case DW_AT_threads_scaled: return "DW_AT_threads_scaled"; + case DW_AT_explicit: return "DW_AT_explicit"; + case DW_AT_object_pointer: return "DW_AT_object_pointer"; + case DW_AT_endianity: return "DW_AT_endianity"; + case DW_AT_elemental: return "DW_AT_elemental"; + case DW_AT_pure: return "DW_AT_pure"; + case DW_AT_recursive: return "DW_AT_recursive"; + case DW_AT_MIPS_linkage_name: return "DW_AT_MIPS_linkage_name"; + case DW_AT_sf_names: return "DW_AT_sf_names"; + case DW_AT_src_info: return "DW_AT_src_info"; + case DW_AT_mac_info: return "DW_AT_mac_info"; + case DW_AT_src_coords: return "DW_AT_src_coords"; + case DW_AT_body_begin: return "DW_AT_body_begin"; + case DW_AT_body_end: return "DW_AT_body_end"; + case DW_AT_GNU_vector: return "DW_AT_GNU_vector"; + case DW_AT_lo_user: return "DW_AT_lo_user"; + case DW_AT_hi_user: return "DW_AT_hi_user"; + case DW_AT_APPLE_optimized: return "DW_AT_APPLE_optimized"; + case DW_AT_APPLE_flags: return "DW_AT_APPLE_flags"; + case DW_AT_APPLE_isa: return "DW_AT_APPLE_isa"; + case DW_AT_APPLE_block: return "DW_AT_APPLE_block"; + case DW_AT_APPLE_major_runtime_vers: return "DW_AT_APPLE_major_runtime_vers"; + case DW_AT_APPLE_runtime_class: return "DW_AT_APPLE_runtime_class"; } - llvm_unreachable("Unknown Dwarf Attribute"); - return ""; + return 0; } /// FormEncodingString - Return the string for the specified form encoding. /// -const char *FormEncodingString(unsigned Encoding) { +const char *llvm::dwarf::FormEncodingString(unsigned Encoding) { switch (Encoding) { - case DW_FORM_addr: return "FORM_addr"; - case DW_FORM_block2: return "FORM_block2"; - case DW_FORM_block4: return "FORM_block4"; - case DW_FORM_data2: return "FORM_data2"; - case DW_FORM_data4: return "FORM_data4"; - case DW_FORM_data8: return "FORM_data8"; - case DW_FORM_string: return "FORM_string"; - case DW_FORM_block: return "FORM_block"; - case DW_FORM_block1: return "FORM_block1"; - case DW_FORM_data1: return "FORM_data1"; - case DW_FORM_flag: return "FORM_flag"; - case DW_FORM_sdata: return "FORM_sdata"; - case DW_FORM_strp: return "FORM_strp"; - case DW_FORM_udata: return "FORM_udata"; - case DW_FORM_ref_addr: return "FORM_ref_addr"; - case DW_FORM_ref1: return "FORM_ref1"; - case DW_FORM_ref2: return "FORM_ref2"; - case DW_FORM_ref4: return "FORM_ref4"; - case DW_FORM_ref8: return "FORM_ref8"; - case DW_FORM_ref_udata: return "FORM_ref_udata"; - case DW_FORM_indirect: return "FORM_indirect"; + case DW_FORM_addr: return "FORM_addr"; + case DW_FORM_block2: return "FORM_block2"; + case DW_FORM_block4: return "FORM_block4"; + case DW_FORM_data2: return "FORM_data2"; + case DW_FORM_data4: return "FORM_data4"; + case DW_FORM_data8: return "FORM_data8"; + case DW_FORM_string: return "FORM_string"; + case DW_FORM_block: return "FORM_block"; + case DW_FORM_block1: return "FORM_block1"; + case DW_FORM_data1: return "FORM_data1"; + case DW_FORM_flag: return "FORM_flag"; + case DW_FORM_sdata: return "FORM_sdata"; + case DW_FORM_strp: return "FORM_strp"; + case DW_FORM_udata: return "FORM_udata"; + case DW_FORM_ref_addr: return "FORM_ref_addr"; + case DW_FORM_ref1: return "FORM_ref1"; + case DW_FORM_ref2: return "FORM_ref2"; + case DW_FORM_ref4: return "FORM_ref4"; + case DW_FORM_ref8: return "FORM_ref8"; + case DW_FORM_ref_udata: return "FORM_ref_udata"; + case DW_FORM_indirect: return "FORM_indirect"; } - llvm_unreachable("Unknown Dwarf Form Encoding"); - return ""; + return 0; } /// OperationEncodingString - Return the string for the specified operation /// encoding. -const char *OperationEncodingString(unsigned Encoding) { +const char *llvm::dwarf::OperationEncodingString(unsigned Encoding) { switch (Encoding) { - case DW_OP_addr: return "OP_addr"; - case DW_OP_deref: return "OP_deref"; - case DW_OP_const1u: return "OP_const1u"; - case DW_OP_const1s: return "OP_const1s"; - case DW_OP_const2u: return "OP_const2u"; - case DW_OP_const2s: return "OP_const2s"; - case DW_OP_const4u: return "OP_const4u"; - case DW_OP_const4s: return "OP_const4s"; - case DW_OP_const8u: return "OP_const8u"; - case DW_OP_const8s: return "OP_const8s"; - case DW_OP_constu: return "OP_constu"; - case DW_OP_consts: return "OP_consts"; - case DW_OP_dup: return "OP_dup"; - case DW_OP_drop: return "OP_drop"; - case DW_OP_over: return "OP_over"; - case DW_OP_pick: return "OP_pick"; - case DW_OP_swap: return "OP_swap"; - case DW_OP_rot: return "OP_rot"; - case DW_OP_xderef: return "OP_xderef"; - case DW_OP_abs: return "OP_abs"; - case DW_OP_and: return "OP_and"; - case DW_OP_div: return "OP_div"; - case DW_OP_minus: return "OP_minus"; - case DW_OP_mod: return "OP_mod"; - case DW_OP_mul: return "OP_mul"; - case DW_OP_neg: return "OP_neg"; - case DW_OP_not: return "OP_not"; - case DW_OP_or: return "OP_or"; - case DW_OP_plus: return "OP_plus"; - case DW_OP_plus_uconst: return "OP_plus_uconst"; - case DW_OP_shl: return "OP_shl"; - case DW_OP_shr: return "OP_shr"; - case DW_OP_shra: return "OP_shra"; - case DW_OP_xor: return "OP_xor"; - case DW_OP_skip: return "OP_skip"; - case DW_OP_bra: return "OP_bra"; - case DW_OP_eq: return "OP_eq"; - case DW_OP_ge: return "OP_ge"; - case DW_OP_gt: return "OP_gt"; - case DW_OP_le: return "OP_le"; - case DW_OP_lt: return "OP_lt"; - case DW_OP_ne: return "OP_ne"; - case DW_OP_lit0: return "OP_lit0"; - case DW_OP_lit1: return "OP_lit1"; - case DW_OP_lit31: return "OP_lit31"; - case DW_OP_reg0: return "OP_reg0"; - case DW_OP_reg1: return "OP_reg1"; - case DW_OP_reg31: return "OP_reg31"; - case DW_OP_breg0: return "OP_breg0"; - case DW_OP_breg1: return "OP_breg1"; - case DW_OP_breg31: return "OP_breg31"; - case DW_OP_regx: return "OP_regx"; - case DW_OP_fbreg: return "OP_fbreg"; - case DW_OP_bregx: return "OP_bregx"; - case DW_OP_piece: return "OP_piece"; - case DW_OP_deref_size: return "OP_deref_size"; - case DW_OP_xderef_size: return "OP_xderef_size"; - case DW_OP_nop: return "OP_nop"; - case DW_OP_push_object_address: return "OP_push_object_address"; - case DW_OP_call2: return "OP_call2"; - case DW_OP_call4: return "OP_call4"; - case DW_OP_call_ref: return "OP_call_ref"; - case DW_OP_form_tls_address: return "OP_form_tls_address"; - case DW_OP_call_frame_cfa: return "OP_call_frame_cfa"; - case DW_OP_lo_user: return "OP_lo_user"; - case DW_OP_hi_user: return "OP_hi_user"; + case DW_OP_addr: return "OP_addr"; + case DW_OP_deref: return "OP_deref"; + case DW_OP_const1u: return "OP_const1u"; + case DW_OP_const1s: return "OP_const1s"; + case DW_OP_const2u: return "OP_const2u"; + case DW_OP_const2s: return "OP_const2s"; + case DW_OP_const4u: return "OP_const4u"; + case DW_OP_const4s: return "OP_const4s"; + case DW_OP_const8u: return "OP_const8u"; + case DW_OP_const8s: return "OP_const8s"; + case DW_OP_constu: return "OP_constu"; + case DW_OP_consts: return "OP_consts"; + case DW_OP_dup: return "OP_dup"; + case DW_OP_drop: return "OP_drop"; + case DW_OP_over: return "OP_over"; + case DW_OP_pick: return "OP_pick"; + case DW_OP_swap: return "OP_swap"; + case DW_OP_rot: return "OP_rot"; + case DW_OP_xderef: return "OP_xderef"; + case DW_OP_abs: return "OP_abs"; + case DW_OP_and: return "OP_and"; + case DW_OP_div: return "OP_div"; + case DW_OP_minus: return "OP_minus"; + case DW_OP_mod: return "OP_mod"; + case DW_OP_mul: return "OP_mul"; + case DW_OP_neg: return "OP_neg"; + case DW_OP_not: return "OP_not"; + case DW_OP_or: return "OP_or"; + case DW_OP_plus: return "OP_plus"; + case DW_OP_plus_uconst: return "OP_plus_uconst"; + case DW_OP_shl: return "OP_shl"; + case DW_OP_shr: return "OP_shr"; + case DW_OP_shra: return "OP_shra"; + case DW_OP_xor: return "OP_xor"; + case DW_OP_skip: return "OP_skip"; + case DW_OP_bra: return "OP_bra"; + case DW_OP_eq: return "OP_eq"; + case DW_OP_ge: return "OP_ge"; + case DW_OP_gt: return "OP_gt"; + case DW_OP_le: return "OP_le"; + case DW_OP_lt: return "OP_lt"; + case DW_OP_ne: return "OP_ne"; + case DW_OP_lit0: return "OP_lit0"; + case DW_OP_lit1: return "OP_lit1"; + case DW_OP_lit31: return "OP_lit31"; + case DW_OP_reg0: return "OP_reg0"; + case DW_OP_reg1: return "OP_reg1"; + case DW_OP_reg31: return "OP_reg31"; + case DW_OP_breg0: return "OP_breg0"; + case DW_OP_breg1: return "OP_breg1"; + case DW_OP_breg31: return "OP_breg31"; + case DW_OP_regx: return "OP_regx"; + case DW_OP_fbreg: return "OP_fbreg"; + case DW_OP_bregx: return "OP_bregx"; + case DW_OP_piece: return "OP_piece"; + case DW_OP_deref_size: return "OP_deref_size"; + case DW_OP_xderef_size: return "OP_xderef_size"; + case DW_OP_nop: return "OP_nop"; + case DW_OP_push_object_address: return "OP_push_object_address"; + case DW_OP_call2: return "OP_call2"; + case DW_OP_call4: return "OP_call4"; + case DW_OP_call_ref: return "OP_call_ref"; + case DW_OP_form_tls_address: return "OP_form_tls_address"; + case DW_OP_call_frame_cfa: return "OP_call_frame_cfa"; + case DW_OP_lo_user: return "OP_lo_user"; + case DW_OP_hi_user: return "OP_hi_user"; } - llvm_unreachable("Unknown Dwarf Operation Encoding"); - return ""; + return 0; } /// AttributeEncodingString - Return the string for the specified attribute /// encoding. -const char *AttributeEncodingString(unsigned Encoding) { +const char *llvm::dwarf::AttributeEncodingString(unsigned Encoding) { switch (Encoding) { - case DW_ATE_address: return "ATE_address"; - case DW_ATE_boolean: return "ATE_boolean"; - case DW_ATE_complex_float: return "ATE_complex_float"; - case DW_ATE_float: return "ATE_float"; - case DW_ATE_signed: return "ATE_signed"; - case DW_ATE_signed_char: return "ATE_signed_char"; - case DW_ATE_unsigned: return "ATE_unsigned"; - case DW_ATE_unsigned_char: return "ATE_unsigned_char"; - case DW_ATE_imaginary_float: return "ATE_imaginary_float"; - case DW_ATE_packed_decimal: return "ATE_packed_decimal"; - case DW_ATE_numeric_string: return "ATE_numeric_string"; - case DW_ATE_edited: return "ATE_edited"; - case DW_ATE_signed_fixed: return "ATE_signed_fixed"; - case DW_ATE_unsigned_fixed: return "ATE_unsigned_fixed"; - case DW_ATE_decimal_float: return "ATE_decimal_float"; - case DW_ATE_lo_user: return "ATE_lo_user"; - case DW_ATE_hi_user: return "ATE_hi_user"; + case DW_ATE_address: return "ATE_address"; + case DW_ATE_boolean: return "ATE_boolean"; + case DW_ATE_complex_float: return "ATE_complex_float"; + case DW_ATE_float: return "ATE_float"; + case DW_ATE_signed: return "ATE_signed"; + case DW_ATE_signed_char: return "ATE_signed_char"; + case DW_ATE_unsigned: return "ATE_unsigned"; + case DW_ATE_unsigned_char: return "ATE_unsigned_char"; + case DW_ATE_imaginary_float: return "ATE_imaginary_float"; + case DW_ATE_packed_decimal: return "ATE_packed_decimal"; + case DW_ATE_numeric_string: return "ATE_numeric_string"; + case DW_ATE_edited: return "ATE_edited"; + case DW_ATE_signed_fixed: return "ATE_signed_fixed"; + case DW_ATE_unsigned_fixed: return "ATE_unsigned_fixed"; + case DW_ATE_decimal_float: return "ATE_decimal_float"; + case DW_ATE_lo_user: return "ATE_lo_user"; + case DW_ATE_hi_user: return "ATE_hi_user"; } - llvm_unreachable("Unknown Dwarf Attribute Encoding"); - return ""; + return 0; } /// DecimalSignString - Return the string for the specified decimal sign /// attribute. -const char *DecimalSignString(unsigned Sign) { +const char *llvm::dwarf::DecimalSignString(unsigned Sign) { switch (Sign) { - case DW_DS_unsigned: return "DS_unsigned"; - case DW_DS_leading_overpunch: return "DS_leading_overpunch"; - case DW_DS_trailing_overpunch: return "DS_trailing_overpunch"; - case DW_DS_leading_separate: return "DS_leading_separate"; - case DW_DS_trailing_separate: return "DS_trailing_separate"; + case DW_DS_unsigned: return "DS_unsigned"; + case DW_DS_leading_overpunch: return "DS_leading_overpunch"; + case DW_DS_trailing_overpunch: return "DS_trailing_overpunch"; + case DW_DS_leading_separate: return "DS_leading_separate"; + case DW_DS_trailing_separate: return "DS_trailing_separate"; } - llvm_unreachable("Unknown Dwarf Decimal Sign Attribute"); - return ""; + return 0; } /// EndianityString - Return the string for the specified endianity. /// -const char *EndianityString(unsigned Endian) { +const char *llvm::dwarf::EndianityString(unsigned Endian) { switch (Endian) { - case DW_END_default: return "END_default"; - case DW_END_big: return "END_big"; - case DW_END_little: return "END_little"; - case DW_END_lo_user: return "END_lo_user"; - case DW_END_hi_user: return "END_hi_user"; + case DW_END_default: return "END_default"; + case DW_END_big: return "END_big"; + case DW_END_little: return "END_little"; + case DW_END_lo_user: return "END_lo_user"; + case DW_END_hi_user: return "END_hi_user"; } - llvm_unreachable("Unknown Dwarf Endianity"); - return ""; + return 0; } /// AccessibilityString - Return the string for the specified accessibility. /// -const char *AccessibilityString(unsigned Access) { +const char *llvm::dwarf::AccessibilityString(unsigned Access) { switch (Access) { - // Accessibility codes - case DW_ACCESS_public: return "ACCESS_public"; - case DW_ACCESS_protected: return "ACCESS_protected"; - case DW_ACCESS_private: return "ACCESS_private"; + // Accessibility codes + case DW_ACCESS_public: return "ACCESS_public"; + case DW_ACCESS_protected: return "ACCESS_protected"; + case DW_ACCESS_private: return "ACCESS_private"; } - llvm_unreachable("Unknown Dwarf Accessibility"); - return ""; + return 0; } /// VisibilityString - Return the string for the specified visibility. /// -const char *VisibilityString(unsigned Visibility) { +const char *llvm::dwarf::VisibilityString(unsigned Visibility) { switch (Visibility) { - case DW_VIS_local: return "VIS_local"; - case DW_VIS_exported: return "VIS_exported"; - case DW_VIS_qualified: return "VIS_qualified"; + case DW_VIS_local: return "VIS_local"; + case DW_VIS_exported: return "VIS_exported"; + case DW_VIS_qualified: return "VIS_qualified"; } - llvm_unreachable("Unknown Dwarf Visibility"); - return ""; + return 0; } /// VirtualityString - Return the string for the specified virtuality. /// -const char *VirtualityString(unsigned Virtuality) { +const char *llvm::dwarf::VirtualityString(unsigned Virtuality) { switch (Virtuality) { - case DW_VIRTUALITY_none: return "VIRTUALITY_none"; - case DW_VIRTUALITY_virtual: return "VIRTUALITY_virtual"; - case DW_VIRTUALITY_pure_virtual: return "VIRTUALITY_pure_virtual"; + case DW_VIRTUALITY_none: return "VIRTUALITY_none"; + case DW_VIRTUALITY_virtual: return "VIRTUALITY_virtual"; + case DW_VIRTUALITY_pure_virtual: return "VIRTUALITY_pure_virtual"; } - llvm_unreachable("Unknown Dwarf Virtuality"); - return ""; + return 0; } /// LanguageString - Return the string for the specified language. /// -const char *LanguageString(unsigned Language) { +const char *llvm::dwarf::LanguageString(unsigned Language) { switch (Language) { - case DW_LANG_C89: return "LANG_C89"; - case DW_LANG_C: return "LANG_C"; - case DW_LANG_Ada83: return "LANG_Ada83"; - case DW_LANG_C_plus_plus: return "LANG_C_plus_plus"; - case DW_LANG_Cobol74: return "LANG_Cobol74"; - case DW_LANG_Cobol85: return "LANG_Cobol85"; - case DW_LANG_Fortran77: return "LANG_Fortran77"; - case DW_LANG_Fortran90: return "LANG_Fortran90"; - case DW_LANG_Pascal83: return "LANG_Pascal83"; - case DW_LANG_Modula2: return "LANG_Modula2"; - case DW_LANG_Java: return "LANG_Java"; - case DW_LANG_C99: return "LANG_C99"; - case DW_LANG_Ada95: return "LANG_Ada95"; - case DW_LANG_Fortran95: return "LANG_Fortran95"; - case DW_LANG_PLI: return "LANG_PLI"; - case DW_LANG_ObjC: return "LANG_ObjC"; - case DW_LANG_ObjC_plus_plus: return "LANG_ObjC_plus_plus"; - case DW_LANG_UPC: return "LANG_UPC"; - case DW_LANG_D: return "LANG_D"; - case DW_LANG_lo_user: return "LANG_lo_user"; - case DW_LANG_hi_user: return "LANG_hi_user"; + case DW_LANG_C89: return "LANG_C89"; + case DW_LANG_C: return "LANG_C"; + case DW_LANG_Ada83: return "LANG_Ada83"; + case DW_LANG_C_plus_plus: return "LANG_C_plus_plus"; + case DW_LANG_Cobol74: return "LANG_Cobol74"; + case DW_LANG_Cobol85: return "LANG_Cobol85"; + case DW_LANG_Fortran77: return "LANG_Fortran77"; + case DW_LANG_Fortran90: return "LANG_Fortran90"; + case DW_LANG_Pascal83: return "LANG_Pascal83"; + case DW_LANG_Modula2: return "LANG_Modula2"; + case DW_LANG_Java: return "LANG_Java"; + case DW_LANG_C99: return "LANG_C99"; + case DW_LANG_Ada95: return "LANG_Ada95"; + case DW_LANG_Fortran95: return "LANG_Fortran95"; + case DW_LANG_PLI: return "LANG_PLI"; + case DW_LANG_ObjC: return "LANG_ObjC"; + case DW_LANG_ObjC_plus_plus: return "LANG_ObjC_plus_plus"; + case DW_LANG_UPC: return "LANG_UPC"; + case DW_LANG_D: return "LANG_D"; + case DW_LANG_lo_user: return "LANG_lo_user"; + case DW_LANG_hi_user: return "LANG_hi_user"; } - llvm_unreachable("Unknown Dwarf Language"); - return ""; + return 0; } /// CaseString - Return the string for the specified identifier case. /// -const char *CaseString(unsigned Case) { - switch (Case) { - case DW_ID_case_sensitive: return "ID_case_sensitive"; - case DW_ID_up_case: return "ID_up_case"; - case DW_ID_down_case: return "ID_down_case"; - case DW_ID_case_insensitive: return "ID_case_insensitive"; +const char *llvm::dwarf::CaseString(unsigned Case) { + switch (Case) { + case DW_ID_case_sensitive: return "ID_case_sensitive"; + case DW_ID_up_case: return "ID_up_case"; + case DW_ID_down_case: return "ID_down_case"; + case DW_ID_case_insensitive: return "ID_case_insensitive"; } - llvm_unreachable("Unknown Dwarf Identifier Case"); - return ""; + return 0; } /// ConventionString - Return the string for the specified calling convention. /// -const char *ConventionString(unsigned Convention) { +const char *llvm::dwarf::ConventionString(unsigned Convention) { switch (Convention) { - case DW_CC_normal: return "CC_normal"; - case DW_CC_program: return "CC_program"; - case DW_CC_nocall: return "CC_nocall"; - case DW_CC_lo_user: return "CC_lo_user"; - case DW_CC_hi_user: return "CC_hi_user"; + case DW_CC_normal: return "CC_normal"; + case DW_CC_program: return "CC_program"; + case DW_CC_nocall: return "CC_nocall"; + case DW_CC_lo_user: return "CC_lo_user"; + case DW_CC_hi_user: return "CC_hi_user"; } - llvm_unreachable("Unknown Dwarf Calling Convention"); - return ""; + return 0; } /// InlineCodeString - Return the string for the specified inline code. /// -const char *InlineCodeString(unsigned Code) { - switch (Code) { - case DW_INL_not_inlined: return "INL_not_inlined"; - case DW_INL_inlined: return "INL_inlined"; - case DW_INL_declared_not_inlined: return "INL_declared_not_inlined"; - case DW_INL_declared_inlined: return "INL_declared_inlined"; +const char *llvm::dwarf::InlineCodeString(unsigned Code) { + switch (Code) { + case DW_INL_not_inlined: return "INL_not_inlined"; + case DW_INL_inlined: return "INL_inlined"; + case DW_INL_declared_not_inlined: return "INL_declared_not_inlined"; + case DW_INL_declared_inlined: return "INL_declared_inlined"; } - llvm_unreachable("Unknown Dwarf Inline Code"); - return ""; + return 0; } /// ArrayOrderString - Return the string for the specified array order. /// -const char *ArrayOrderString(unsigned Order) { - switch (Order) { - case DW_ORD_row_major: return "ORD_row_major"; - case DW_ORD_col_major: return "ORD_col_major"; +const char *llvm::dwarf::ArrayOrderString(unsigned Order) { + switch (Order) { + case DW_ORD_row_major: return "ORD_row_major"; + case DW_ORD_col_major: return "ORD_col_major"; } - llvm_unreachable("Unknown Dwarf Array Order"); - return ""; + return 0; } /// DiscriminantString - Return the string for the specified discriminant /// descriptor. -const char *DiscriminantString(unsigned Discriminant) { - switch (Discriminant) { - case DW_DSC_label: return "DSC_label"; - case DW_DSC_range: return "DSC_range"; +const char *llvm::dwarf::DiscriminantString(unsigned Discriminant) { + switch (Discriminant) { + case DW_DSC_label: return "DSC_label"; + case DW_DSC_range: return "DSC_range"; } - llvm_unreachable("Unknown Dwarf Discriminant Descriptor"); - return ""; + return 0; } /// LNStandardString - Return the string for the specified line number standard. /// -const char *LNStandardString(unsigned Standard) { - switch (Standard) { - case DW_LNS_copy: return "LNS_copy"; - case DW_LNS_advance_pc: return "LNS_advance_pc"; - case DW_LNS_advance_line: return "LNS_advance_line"; - case DW_LNS_set_file: return "LNS_set_file"; - case DW_LNS_set_column: return "LNS_set_column"; - case DW_LNS_negate_stmt: return "LNS_negate_stmt"; - case DW_LNS_set_basic_block: return "LNS_set_basic_block"; - case DW_LNS_const_add_pc: return "LNS_const_add_pc"; - case DW_LNS_fixed_advance_pc: return "LNS_fixed_advance_pc"; - case DW_LNS_set_prologue_end: return "LNS_set_prologue_end"; - case DW_LNS_set_epilogue_begin: return "LNS_set_epilogue_begin"; - case DW_LNS_set_isa: return "LNS_set_isa"; +const char *llvm::dwarf::LNStandardString(unsigned Standard) { + switch (Standard) { + case DW_LNS_copy: return "LNS_copy"; + case DW_LNS_advance_pc: return "LNS_advance_pc"; + case DW_LNS_advance_line: return "LNS_advance_line"; + case DW_LNS_set_file: return "LNS_set_file"; + case DW_LNS_set_column: return "LNS_set_column"; + case DW_LNS_negate_stmt: return "LNS_negate_stmt"; + case DW_LNS_set_basic_block: return "LNS_set_basic_block"; + case DW_LNS_const_add_pc: return "LNS_const_add_pc"; + case DW_LNS_fixed_advance_pc: return "LNS_fixed_advance_pc"; + case DW_LNS_set_prologue_end: return "LNS_set_prologue_end"; + case DW_LNS_set_epilogue_begin: return "LNS_set_epilogue_begin"; + case DW_LNS_set_isa: return "LNS_set_isa"; } - llvm_unreachable("Unknown Dwarf Line Number Standard"); - return ""; + return 0; } /// LNExtendedString - Return the string for the specified line number extended /// opcode encodings. -const char *LNExtendedString(unsigned Encoding) { - switch (Encoding) { - // Line Number Extended Opcode Encodings - case DW_LNE_end_sequence: return "LNE_end_sequence"; - case DW_LNE_set_address: return "LNE_set_address"; - case DW_LNE_define_file: return "LNE_define_file"; - case DW_LNE_lo_user: return "LNE_lo_user"; - case DW_LNE_hi_user: return "LNE_hi_user"; +const char *llvm::dwarf::LNExtendedString(unsigned Encoding) { + switch (Encoding) { + // Line Number Extended Opcode Encodings + case DW_LNE_end_sequence: return "LNE_end_sequence"; + case DW_LNE_set_address: return "LNE_set_address"; + case DW_LNE_define_file: return "LNE_define_file"; + case DW_LNE_lo_user: return "LNE_lo_user"; + case DW_LNE_hi_user: return "LNE_hi_user"; } - llvm_unreachable("Unknown Dwarf Line Number Extended Opcode Encoding"); - return ""; + return 0; } /// MacinfoString - Return the string for the specified macinfo type encodings. /// -const char *MacinfoString(unsigned Encoding) { - switch (Encoding) { - // Macinfo Type Encodings - case DW_MACINFO_define: return "MACINFO_define"; - case DW_MACINFO_undef: return "MACINFO_undef"; - case DW_MACINFO_start_file: return "MACINFO_start_file"; - case DW_MACINFO_end_file: return "MACINFO_end_file"; - case DW_MACINFO_vendor_ext: return "MACINFO_vendor_ext"; +const char *llvm::dwarf::MacinfoString(unsigned Encoding) { + switch (Encoding) { + // Macinfo Type Encodings + case DW_MACINFO_define: return "MACINFO_define"; + case DW_MACINFO_undef: return "MACINFO_undef"; + case DW_MACINFO_start_file: return "MACINFO_start_file"; + case DW_MACINFO_end_file: return "MACINFO_end_file"; + case DW_MACINFO_vendor_ext: return "MACINFO_vendor_ext"; } - llvm_unreachable("Unknown Dwarf Macinfo Type Encodings"); - return ""; + return 0; } /// CallFrameString - Return the string for the specified call frame instruction /// encodings. -const char *CallFrameString(unsigned Encoding) { - switch (Encoding) { - case DW_CFA_advance_loc: return "CFA_advance_loc"; - case DW_CFA_offset: return "CFA_offset"; - case DW_CFA_restore: return "CFA_restore"; - case DW_CFA_set_loc: return "CFA_set_loc"; - case DW_CFA_advance_loc1: return "CFA_advance_loc1"; - case DW_CFA_advance_loc2: return "CFA_advance_loc2"; - case DW_CFA_advance_loc4: return "CFA_advance_loc4"; - case DW_CFA_offset_extended: return "CFA_offset_extended"; - case DW_CFA_restore_extended: return "CFA_restore_extended"; - case DW_CFA_undefined: return "CFA_undefined"; - case DW_CFA_same_value: return "CFA_same_value"; - case DW_CFA_register: return "CFA_register"; - case DW_CFA_remember_state: return "CFA_remember_state"; - case DW_CFA_restore_state: return "CFA_restore_state"; - case DW_CFA_def_cfa: return "CFA_def_cfa"; - case DW_CFA_def_cfa_register: return "CFA_def_cfa_register"; - case DW_CFA_def_cfa_offset: return "CFA_def_cfa_offset"; - case DW_CFA_def_cfa_expression: return "CFA_def_cfa_expression"; - case DW_CFA_expression: return "CFA_expression"; - case DW_CFA_offset_extended_sf: return "CFA_offset_extended_sf"; - case DW_CFA_def_cfa_sf: return "CFA_def_cfa_sf"; - case DW_CFA_def_cfa_offset_sf: return "CFA_def_cfa_offset_sf"; - case DW_CFA_val_offset: return "CFA_val_offset"; - case DW_CFA_val_offset_sf: return "CFA_val_offset_sf"; - case DW_CFA_val_expression: return "CFA_val_expression"; - case DW_CFA_lo_user: return "CFA_lo_user"; - case DW_CFA_hi_user: return "CFA_hi_user"; +const char *llvm::dwarf::CallFrameString(unsigned Encoding) { + switch (Encoding) { + case DW_CFA_advance_loc: return "CFA_advance_loc"; + case DW_CFA_offset: return "CFA_offset"; + case DW_CFA_restore: return "CFA_restore"; + case DW_CFA_set_loc: return "CFA_set_loc"; + case DW_CFA_advance_loc1: return "CFA_advance_loc1"; + case DW_CFA_advance_loc2: return "CFA_advance_loc2"; + case DW_CFA_advance_loc4: return "CFA_advance_loc4"; + case DW_CFA_offset_extended: return "CFA_offset_extended"; + case DW_CFA_restore_extended: return "CFA_restore_extended"; + case DW_CFA_undefined: return "CFA_undefined"; + case DW_CFA_same_value: return "CFA_same_value"; + case DW_CFA_register: return "CFA_register"; + case DW_CFA_remember_state: return "CFA_remember_state"; + case DW_CFA_restore_state: return "CFA_restore_state"; + case DW_CFA_def_cfa: return "CFA_def_cfa"; + case DW_CFA_def_cfa_register: return "CFA_def_cfa_register"; + case DW_CFA_def_cfa_offset: return "CFA_def_cfa_offset"; + case DW_CFA_def_cfa_expression: return "CFA_def_cfa_expression"; + case DW_CFA_expression: return "CFA_expression"; + case DW_CFA_offset_extended_sf: return "CFA_offset_extended_sf"; + case DW_CFA_def_cfa_sf: return "CFA_def_cfa_sf"; + case DW_CFA_def_cfa_offset_sf: return "CFA_def_cfa_offset_sf"; + case DW_CFA_val_offset: return "CFA_val_offset"; + case DW_CFA_val_offset_sf: return "CFA_val_offset_sf"; + case DW_CFA_val_expression: return "CFA_val_expression"; + case DW_CFA_lo_user: return "CFA_lo_user"; + case DW_CFA_hi_user: return "CFA_hi_user"; } - llvm_unreachable("Unknown Dwarf Call Frame Instruction Encodings"); - return ""; + return 0; } - -} // End of namespace dwarf. - -} // End of namespace llvm. diff --git a/lib/Support/MemoryBuffer.cpp b/lib/Support/MemoryBuffer.cpp index df1aa6a..9253b01 100644 --- a/lib/Support/MemoryBuffer.cpp +++ b/lib/Support/MemoryBuffer.cpp @@ -46,7 +46,7 @@ MemoryBuffer::~MemoryBuffer() { /// successfully. void MemoryBuffer::initCopyOf(const char *BufStart, const char *BufEnd) { size_t Size = BufEnd-BufStart; - BufferStart = (char *)malloc((Size+1) * sizeof(char)); + BufferStart = (char *)malloc(Size+1); BufferEnd = BufferStart+Size; memcpy(const_cast<char*>(BufferStart), BufStart, Size); *const_cast<char*>(BufferEnd) = 0; // Null terminate buffer. @@ -108,7 +108,7 @@ MemoryBuffer *MemoryBuffer::getMemBufferCopy(const char *StartPtr, /// the MemoryBuffer object. MemoryBuffer *MemoryBuffer::getNewUninitMemBuffer(size_t Size, StringRef BufferName) { - char *Buf = (char *)malloc((Size+1) * sizeof(char)); + char *Buf = (char *)malloc(Size+1); if (!Buf) return 0; Buf[Size] = 0; MemoryBufferMem *SB = new MemoryBufferMem(Buf, Buf+Size, BufferName); diff --git a/lib/Support/SmallVector.cpp b/lib/Support/SmallVector.cpp new file mode 100644 index 0000000..6821382 --- /dev/null +++ b/lib/Support/SmallVector.cpp @@ -0,0 +1,37 @@ +//===- llvm/ADT/SmallVector.cpp - 'Normally small' vectors ----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the SmallVector class. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/SmallVector.h" +using namespace llvm; + +/// grow_pod - This is an implementation of the grow() method which only works +/// on POD-like datatypes and is out of line to reduce code duplication. +void SmallVectorBase::grow_pod(size_t MinSizeInBytes, size_t TSize) { + size_t CurSizeBytes = size_in_bytes(); + size_t NewCapacityInBytes = 2 * capacity_in_bytes(); + if (NewCapacityInBytes < MinSizeInBytes) + NewCapacityInBytes = MinSizeInBytes; + void *NewElts = operator new(NewCapacityInBytes); + + // Copy the elements over. No need to run dtors on PODs. + memcpy(NewElts, this->BeginX, CurSizeBytes); + + // If this wasn't grown from the inline copy, deallocate the old space. + if (!this->isSmall()) + operator delete(this->BeginX); + + this->EndX = (char*)NewElts+CurSizeBytes; + this->BeginX = NewElts; + this->CapacityX = (char*)this->BeginX + NewCapacityInBytes; +} + diff --git a/lib/Support/StringRef.cpp b/lib/Support/StringRef.cpp index 2d023e4..e4a9984 100644 --- a/lib/Support/StringRef.cpp +++ b/lib/Support/StringRef.cpp @@ -8,6 +8,7 @@ //===----------------------------------------------------------------------===// #include "llvm/ADT/StringRef.h" +#include "llvm/ADT/SmallVector.h" using namespace llvm; // MSVC emits references to this into the translation units which reference it. @@ -35,6 +36,45 @@ int StringRef::compare_lower(StringRef RHS) const { return Length < RHS.Length ? -1 : 1; } +// Compute the edit distance between the two given strings. +unsigned StringRef::edit_distance(llvm::StringRef Other, + bool AllowReplacements) { + // The algorithm implemented below is the "classic" + // dynamic-programming algorithm for computing the Levenshtein + // distance, which is described here: + // + // http://en.wikipedia.org/wiki/Levenshtein_distance + // + // Although the algorithm is typically described using an m x n + // array, only two rows are used at a time, so this implemenation + // just keeps two separate vectors for those two rows. + size_type m = size(); + size_type n = Other.size(); + + SmallVector<unsigned, 32> previous(n+1, 0); + for (SmallVector<unsigned, 32>::size_type i = 0; i <= n; ++i) + previous[i] = i; + + SmallVector<unsigned, 32> current(n+1, 0); + for (size_type y = 1; y <= m; ++y) { + current.assign(n+1, 0); + current[0] = y; + for (size_type x = 1; x <= n; ++x) { + if (AllowReplacements) { + current[x] = min(previous[x-1] + ((*this)[y-1] == Other[x-1]? 0u:1u), + min(current[x-1], previous[x])+1); + } + else { + if ((*this)[y-1] == Other[x-1]) current[x] = previous[x-1]; + else current[x] = min(current[x-1], previous[x]) + 1; + } + } + current.swap(previous); + } + + return previous[n]; +} + //===----------------------------------------------------------------------===// // String Searching //===----------------------------------------------------------------------===// diff --git a/lib/Support/circular_raw_ostream.cpp b/lib/Support/circular_raw_ostream.cpp new file mode 100644 index 0000000..e52996d --- /dev/null +++ b/lib/Support/circular_raw_ostream.cpp @@ -0,0 +1,47 @@ +//===- circulat_raw_ostream.cpp - Implement the circular_raw_ostream class -===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This implements support for circular buffered streams. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/circular_raw_ostream.h" + +#include <algorithm> + +using namespace llvm; + +void circular_raw_ostream::write_impl(const char *Ptr, size_t Size) { + if (BufferSize == 0) { + TheStream->write(Ptr, Size); + return; + } + + // Write into the buffer, wrapping if necessary. + while (Size != 0) { + unsigned Bytes = std::min(Size, BufferSize - (Cur - BufferArray)); + memcpy(Cur, Ptr, Bytes); + Size -= Bytes; + Cur += Bytes; + if (Cur == BufferArray + BufferSize) { + // Reset the output pointer to the start of the buffer. + Cur = BufferArray; + Filled = true; + } + } +} + +void circular_raw_ostream::flushBufferWithBanner(void) { + if (BufferSize != 0) { + // Write out the buffer + int num = std::strlen(Banner); + TheStream->write(Banner, num); + flushBuffer(); + } +} diff --git a/lib/Support/raw_os_ostream.cpp b/lib/Support/raw_os_ostream.cpp index 3374dd7..44f2325 100644 --- a/lib/Support/raw_os_ostream.cpp +++ b/lib/Support/raw_os_ostream.cpp @@ -27,4 +27,4 @@ void raw_os_ostream::write_impl(const char *Ptr, size_t Size) { OS.write(Ptr, Size); } -uint64_t raw_os_ostream::current_pos() { return OS.tellp(); } +uint64_t raw_os_ostream::current_pos() const { return OS.tellp(); } diff --git a/lib/Support/raw_ostream.cpp b/lib/Support/raw_ostream.cpp index 0c90e77..a820210 100644 --- a/lib/Support/raw_ostream.cpp +++ b/lib/Support/raw_ostream.cpp @@ -67,7 +67,7 @@ raw_ostream::~raw_ostream() { // An out of line virtual method to provide a home for the class vtable. void raw_ostream::handle() {} -size_t raw_ostream::preferred_buffer_size() { +size_t raw_ostream::preferred_buffer_size() const { // BUFSIZ is intended to be a reasonable default. return BUFSIZ; } @@ -440,20 +440,20 @@ uint64_t raw_fd_ostream::seek(uint64_t off) { return pos; } -size_t raw_fd_ostream::preferred_buffer_size() { +size_t raw_fd_ostream::preferred_buffer_size() const { #if !defined(_MSC_VER) && !defined(__MINGW32__) // Windows has no st_blksize. assert(FD >= 0 && "File not yet open!"); struct stat statbuf; - if (fstat(FD, &statbuf) == 0) { - // If this is a terminal, don't use buffering. Line buffering - // would be a more traditional thing to do, but it's not worth - // the complexity. - if (S_ISCHR(statbuf.st_mode) && isatty(FD)) - return 0; - // Return the preferred block size. - return statbuf.st_blksize; - } - error_detected(); + if (fstat(FD, &statbuf) != 0) + return 0; + + // If this is a terminal, don't use buffering. Line buffering + // would be a more traditional thing to do, but it's not worth + // the complexity. + if (S_ISCHR(statbuf.st_mode) && isatty(FD)) + return 0; + // Return the preferred block size. + return statbuf.st_blksize; #endif return raw_ostream::preferred_buffer_size(); } @@ -578,7 +578,9 @@ void raw_svector_ostream::write_impl(const char *Ptr, size_t Size) { SetBuffer(OS.end(), OS.capacity() - OS.size()); } -uint64_t raw_svector_ostream::current_pos() { return OS.size(); } +uint64_t raw_svector_ostream::current_pos() const { + return OS.size(); +} StringRef raw_svector_ostream::str() { flush(); @@ -601,6 +603,6 @@ raw_null_ostream::~raw_null_ostream() { void raw_null_ostream::write_impl(const char *Ptr, size_t Size) { } -uint64_t raw_null_ostream::current_pos() { +uint64_t raw_null_ostream::current_pos() const { return 0; } diff --git a/lib/System/DynamicLibrary.cpp b/lib/System/DynamicLibrary.cpp index 7eb9f5f..63baa6d 100644 --- a/lib/System/DynamicLibrary.cpp +++ b/lib/System/DynamicLibrary.cpp @@ -69,29 +69,7 @@ bool DynamicLibrary::LoadLibraryPermanently(const char *Filename, return false; } -void* DynamicLibrary::SearchForAddressOfSymbol(const char* symbolName) { - // First check symbols added via AddSymbol(). - if (ExplicitSymbols) { - std::map<std::string, void *>::iterator I = - ExplicitSymbols->find(symbolName); - std::map<std::string, void *>::iterator E = ExplicitSymbols->end(); - - if (I != E) - return I->second; - } - - // Now search the libraries. - if (OpenedHandles) { - for (std::vector<void *>::iterator I = OpenedHandles->begin(), - E = OpenedHandles->end(); I != E; ++I) { - //lt_ptr ptr = lt_dlsym(*I, symbolName); - void *ptr = dlsym(*I, symbolName); - if (ptr) { - return ptr; - } - } - } - +static void *SearchForAddressOfSpecialSymbol(const char* symbolName) { #define EXPLICIT_SYMBOL(SYM) \ extern void *SYM; if (!strcmp(symbolName, #SYM)) return &SYM @@ -128,6 +106,34 @@ void* DynamicLibrary::SearchForAddressOfSymbol(const char* symbolName) { #endif #undef EXPLICIT_SYMBOL + return 0; +} + +void* DynamicLibrary::SearchForAddressOfSymbol(const char* symbolName) { + // First check symbols added via AddSymbol(). + if (ExplicitSymbols) { + std::map<std::string, void *>::iterator I = + ExplicitSymbols->find(symbolName); + std::map<std::string, void *>::iterator E = ExplicitSymbols->end(); + + if (I != E) + return I->second; + } + + // Now search the libraries. + if (OpenedHandles) { + for (std::vector<void *>::iterator I = OpenedHandles->begin(), + E = OpenedHandles->end(); I != E; ++I) { + //lt_ptr ptr = lt_dlsym(*I, symbolName); + void *ptr = dlsym(*I, symbolName); + if (ptr) { + return ptr; + } + } + } + + if (void *Result = SearchForAddressOfSpecialSymbol(symbolName)) + return Result; // This macro returns the address of a well-known, explicit symbol #define EXPLICIT_SYMBOL(SYM) \ diff --git a/lib/System/Path.cpp b/lib/System/Path.cpp index 8e1fa53..6844530 100644 --- a/lib/System/Path.cpp +++ b/lib/System/Path.cpp @@ -176,7 +176,7 @@ Path::FindLibrary(std::string& name) { return sys::Path(); } -std::string Path::GetDLLSuffix() { +StringRef Path::GetDLLSuffix() { return LTDL_SHLIB_EXT; } @@ -191,7 +191,7 @@ Path::isBitcodeFile() const { return FT == Bitcode_FileType; } -bool Path::hasMagicNumber(const std::string &Magic) const { +bool Path::hasMagicNumber(StringRef Magic) const { std::string actualMagic; if (getMagicNumber(actualMagic, static_cast<unsigned>(Magic.size()))) return Magic == actualMagic; @@ -217,8 +217,9 @@ static void getPathList(const char*path, std::vector<Path>& Paths) { Paths.push_back(tmpPath); } -static std::string getDirnameCharSep(const std::string& path, char Sep) { - +static StringRef getDirnameCharSep(StringRef path, const char *Sep) { + assert(Sep[0] != '\0' && Sep[1] == '\0' && + "Sep must be a 1-character string literal."); if (path.empty()) return "."; @@ -227,31 +228,31 @@ static std::string getDirnameCharSep(const std::string& path, char Sep) { signed pos = static_cast<signed>(path.size()) - 1; - while (pos >= 0 && path[pos] == Sep) + while (pos >= 0 && path[pos] == Sep[0]) --pos; if (pos < 0) - return path[0] == Sep ? std::string(1, Sep) : std::string("."); + return path[0] == Sep[0] ? Sep : "."; // Any slashes left? signed i = 0; - while (i < pos && path[i] != Sep) + while (i < pos && path[i] != Sep[0]) ++i; if (i == pos) // No slashes? Return "." return "."; // There is at least one slash left. Remove all trailing non-slashes. - while (pos >= 0 && path[pos] != Sep) + while (pos >= 0 && path[pos] != Sep[0]) --pos; // Remove any trailing slashes. - while (pos >= 0 && path[pos] == Sep) + while (pos >= 0 && path[pos] == Sep[0]) --pos; if (pos < 0) - return path[0] == Sep ? std::string(1, Sep) : std::string("."); + return path[0] == Sep[0] ? Sep : "."; return path.substr(0, pos+1); } diff --git a/lib/System/Unix/Path.inc b/lib/System/Unix/Path.inc index 33b26f7..a99720c 100644 --- a/lib/System/Unix/Path.inc +++ b/lib/System/Unix/Path.inc @@ -16,7 +16,6 @@ //=== is guaranteed to work on *all* UNIX variants. //===----------------------------------------------------------------------===// -#include "llvm/ADT/SmallVector.h" #include "Unix.h" #if HAVE_SYS_STAT_H #include <sys/stat.h> @@ -79,15 +78,15 @@ using namespace sys; const char sys::PathSeparator = ':'; -Path::Path(const std::string& p) +Path::Path(StringRef p) : path(p) {} Path::Path(const char *StrStart, unsigned StrLen) : path(StrStart, StrLen) {} Path& -Path::operator=(const std::string &that) { - path = that; +Path::operator=(StringRef that) { + path.assign(that.data(), that.size()); return *this; } @@ -378,11 +377,11 @@ Path Path::GetMainExecutable(const char *argv0, void *MainAddr) { } -std::string Path::getDirname() const { - return getDirnameCharSep(path, '/'); +StringRef Path::getDirname() const { + return getDirnameCharSep(path, "/"); } -std::string +StringRef Path::getBasename() const { // Find the last slash std::string::size_type slash = path.rfind('/'); @@ -393,12 +392,12 @@ Path::getBasename() const { std::string::size_type dot = path.rfind('.'); if (dot == std::string::npos || dot < slash) - return path.substr(slash); + return StringRef(path).substr(slash); else - return path.substr(slash, dot - slash); + return StringRef(path).substr(slash, dot - slash); } -std::string +StringRef Path::getSuffix() const { // Find the last slash std::string::size_type slash = path.rfind('/'); @@ -409,26 +408,24 @@ Path::getSuffix() const { std::string::size_type dot = path.rfind('.'); if (dot == std::string::npos || dot < slash) - return std::string(); + return StringRef(""); else - return path.substr(dot + 1); + return StringRef(path).substr(dot + 1); } -bool Path::getMagicNumber(std::string& Magic, unsigned len) const { +bool Path::getMagicNumber(std::string &Magic, unsigned len) const { assert(len < 1024 && "Request for magic string too long"); - SmallVector<char, 128> Buf; - Buf.resize(1 + len); - char* buf = Buf.data(); + char Buf[1025]; int fd = ::open(path.c_str(), O_RDONLY); if (fd < 0) return false; - ssize_t bytes_read = ::read(fd, buf, len); + ssize_t bytes_read = ::read(fd, Buf, len); ::close(fd); if (ssize_t(len) != bytes_read) { Magic.clear(); return false; } - Magic.assign(buf,len); + Magic.assign(Buf, len); return true; } @@ -481,7 +478,7 @@ Path::canExecute() const { return true; } -std::string +StringRef Path::getLast() const { // Find the last slash size_t pos = path.rfind('/'); @@ -495,12 +492,12 @@ Path::getLast() const { // Find the second to last slash size_t pos2 = path.rfind('/', pos-1); if (pos2 == std::string::npos) - return path.substr(0,pos); + return StringRef(path).substr(0,pos); else - return path.substr(pos2+1,pos-pos2-1); + return StringRef(path).substr(pos2+1,pos-pos2-1); } // Return everything after the last slash - return path.substr(pos+1); + return StringRef(path).substr(pos+1); } const FileStatus * @@ -592,7 +589,7 @@ Path::getDirectoryContents(std::set<Path>& result, std::string* ErrMsg) const { } bool -Path::set(const std::string& a_path) { +Path::set(StringRef a_path) { if (a_path.empty()) return false; std::string save(path); @@ -605,7 +602,7 @@ Path::set(const std::string& a_path) { } bool -Path::appendComponent(const std::string& name) { +Path::appendComponent(StringRef name) { if (name.empty()) return false; std::string save(path); @@ -637,7 +634,7 @@ Path::eraseComponent() { } bool -Path::appendSuffix(const std::string& suffix) { +Path::appendSuffix(StringRef suffix) { std::string save(path); path.append("."); path.append(suffix); @@ -861,18 +858,15 @@ Path::makeUnique(bool reuse_current, std::string* ErrMsg) { // Append an XXXXXX pattern to the end of the file for use with mkstemp, // mktemp or our own implementation. - SmallVector<char, 128> Buf; - Buf.resize(path.size()+8); - char *FNBuffer = Buf.data(); - path.copy(FNBuffer,path.size()); + std::string Buf(path); if (isDirectory()) - strcpy(FNBuffer+path.size(), "/XXXXXX"); + Buf += "/XXXXXX"; else - strcpy(FNBuffer+path.size(), "-XXXXXX"); + Buf += "-XXXXXX"; #if defined(HAVE_MKSTEMP) int TempFD; - if ((TempFD = mkstemp(FNBuffer)) == -1) + if ((TempFD = mkstemp((char*)Buf.c_str())) == -1) return MakeErrMsg(ErrMsg, path + ": can't make unique filename"); // We don't need to hold the temp file descriptor... we will trust that no one @@ -880,21 +874,21 @@ Path::makeUnique(bool reuse_current, std::string* ErrMsg) { close(TempFD); // Save the name - path = FNBuffer; + path = Buf; #elif defined(HAVE_MKTEMP) // If we don't have mkstemp, use the old and obsolete mktemp function. - if (mktemp(FNBuffer) == 0) + if (mktemp(Buf.c_str()) == 0) return MakeErrMsg(ErrMsg, path + ": can't make unique filename"); // Save the name - path = FNBuffer; + path = Buf; #else // Okay, looks like we have to do it all by our lonesome. static unsigned FCounter = 0; unsigned offset = path.size() + 1; - while ( FCounter < 999999 && exists()) { - sprintf(FNBuffer+offset,"%06u",++FCounter); - path = FNBuffer; + while (FCounter < 999999 && exists()) { + sprintf(Buf.data()+offset, "%06u", ++FCounter); + path = Buf; } if (FCounter > 999999) return MakeErrMsg(ErrMsg, diff --git a/lib/System/Unix/Process.inc b/lib/System/Unix/Process.inc index 911b8c3..cf6a47a 100644 --- a/lib/System/Unix/Process.inc +++ b/lib/System/Unix/Process.inc @@ -277,7 +277,7 @@ bool Process::ColorNeedsFlush() { COLOR(FGBG, "7", BOLD)\ } -static const char* colorcodes[2][2][8] = { +static const char colorcodes[2][2][8][10] = { { ALLCOLORS("3",""), ALLCOLORS("3","1;") }, { ALLCOLORS("4",""), ALLCOLORS("4","1;") } }; diff --git a/lib/System/Win32/Path.inc b/lib/System/Win32/Path.inc index 634fbc7..b5f6374 100644 --- a/lib/System/Win32/Path.inc +++ b/lib/System/Win32/Path.inc @@ -47,7 +47,7 @@ namespace llvm { namespace sys { const char PathSeparator = ';'; -Path::Path(const std::string& p) +Path::Path(llvm::StringRef p) : path(p) { FlipBackSlashes(path); } @@ -58,8 +58,8 @@ Path::Path(const char *StrStart, unsigned StrLen) } Path& -Path::operator=(const std::string &that) { - path = that; +Path::operator=(StringRef that) { + path.assign(that.data(), that.size()); FlipBackSlashes(path); return *this; } @@ -287,11 +287,11 @@ Path::isRootDirectory() const { return len > 0 && path[len-1] == '/'; } -std::string Path::getDirname() const { - return getDirnameCharSep(path, '/'); +StringRef Path::getDirname() const { + return getDirnameCharSep(path, "/"); } -std::string +StringRef Path::getBasename() const { // Find the last slash size_t slash = path.rfind('/'); @@ -302,12 +302,12 @@ Path::getBasename() const { size_t dot = path.rfind('.'); if (dot == std::string::npos || dot < slash) - return path.substr(slash); + return StringRef(path).substr(slash); else - return path.substr(slash, dot - slash); + return StringRef(path).substr(slash, dot - slash); } -std::string +StringRef Path::getSuffix() const { // Find the last slash size_t slash = path.rfind('/'); @@ -318,9 +318,9 @@ Path::getSuffix() const { size_t dot = path.rfind('.'); if (dot == std::string::npos || dot < slash) - return std::string(); + return StringRef(""); else - return path.substr(dot + 1); + return StringRef(path).substr(dot + 1); } bool @@ -364,7 +364,7 @@ Path::isRegularFile() const { return true; } -std::string +StringRef Path::getLast() const { // Find the last slash size_t pos = path.rfind('/'); @@ -378,7 +378,7 @@ Path::getLast() const { return path; // Return everything after the last slash - return path.substr(pos+1); + return StringRef(path).substr(pos+1); } const FileStatus * @@ -490,7 +490,7 @@ Path::getDirectoryContents(std::set<Path>& result, std::string* ErrMsg) const { } bool -Path::set(const std::string& a_path) { +Path::set(StringRef a_path) { if (a_path.empty()) return false; std::string save(path); @@ -504,7 +504,7 @@ Path::set(const std::string& a_path) { } bool -Path::appendComponent(const std::string& name) { +Path::appendComponent(StringRef name) { if (name.empty()) return false; std::string save(path); @@ -536,7 +536,7 @@ Path::eraseComponent() { } bool -Path::appendSuffix(const std::string& suffix) { +Path::appendSuffix(StringRef suffix) { std::string save(path); path.append("."); path.append(suffix); diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index 1aae369..7cfa097 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -944,8 +944,6 @@ reMaterialize(MachineBasicBlock &MBB, unsigned DestReg, unsigned SubIdx, const MachineInstr *Orig, const TargetRegisterInfo *TRI) const { - DebugLoc dl = Orig->getDebugLoc(); - if (SubIdx && TargetRegisterInfo::isPhysicalRegister(DestReg)) { DestReg = TRI->getSubReg(DestReg, SubIdx); SubIdx = 0; diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp index 9b5f79f..7aebdf4 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -1373,7 +1373,7 @@ emitPrologue(MachineFunction &MF) const { // bic r4, r4, MaxAlign // mov sp, r4 // FIXME: It will be better just to find spare register here. - BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::R4) + BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2tgpr), ARM::R4) .addReg(ARM::SP, RegState::Kill); AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl, TII.get(ARM::t2BICri), ARM::R4) diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 655c762..334baae 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -1273,7 +1273,8 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, LowerCallTo(Chain, (const Type *) Type::getInt32Ty(*DAG.getContext()), false, false, false, false, 0, CallingConv::C, false, /*isReturnValueUsed=*/true, - DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); + DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl, + DAG.GetOrdering(Chain.getNode())); return CallResult.first; } @@ -3147,6 +3148,7 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, unsigned ptr = MI->getOperand(1).getReg(); unsigned incr = MI->getOperand(2).getReg(); DebugLoc dl = MI->getDebugLoc(); + bool isThumb2 = Subtarget->isThumb2(); unsigned ldrOpc, strOpc; switch (Size) { @@ -3213,6 +3215,9 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, // exitMBB: // ... BB = exitMBB; + + F->DeleteMachineInstr(MI); // The instruction is gone now. + return BB; } @@ -4265,7 +4270,7 @@ ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, case 'w': if (VT == MVT::f32) return std::make_pair(0U, ARM::SPRRegisterClass); - if (VT == MVT::f64) + if (VT.getSizeInBits() == 64) return std::make_pair(0U, ARM::DPRRegisterClass); if (VT.getSizeInBits() == 128) return std::make_pair(0U, ARM::QPRRegisterClass); @@ -4302,7 +4307,7 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint, ARM::S20,ARM::S21,ARM::S22,ARM::S23, ARM::S24,ARM::S25,ARM::S26,ARM::S27, ARM::S28,ARM::S29,ARM::S30,ARM::S31, 0); - if (VT == MVT::f64) + if (VT.getSizeInBits() == 64) return make_vector<unsigned>(ARM::D0, ARM::D1, ARM::D2, ARM::D3, ARM::D4, ARM::D5, ARM::D6, ARM::D7, ARM::D8, ARM::D9, ARM::D10,ARM::D11, diff --git a/lib/Target/ARM/ARMInstrFormats.td b/lib/Target/ARM/ARMInstrFormats.td index cf0edff..28b2821 100644 --- a/lib/Target/ARM/ARMInstrFormats.td +++ b/lib/Target/ARM/ARMInstrFormats.td @@ -146,11 +146,9 @@ def s_cc_out : OptionalDefOperand<OtherVT, (ops CCR), (ops (i32 CPSR))> { // ARM Instruction templates. // -class InstARM<AddrMode am, SizeFlagVal sz, IndexMode im, - Format f, Domain d, string cstr, InstrItinClass itin> +class InstTemplate<AddrMode am, SizeFlagVal sz, IndexMode im, + Format f, Domain d, string cstr, InstrItinClass itin> : Instruction { - field bits<32> Inst; - let Namespace = "ARM"; // TSFlagsFields @@ -179,6 +177,20 @@ class InstARM<AddrMode am, SizeFlagVal sz, IndexMode im, let Itinerary = itin; } +class Encoding { + field bits<32> Inst; +} + +class InstARM<AddrMode am, SizeFlagVal sz, IndexMode im, + Format f, Domain d, string cstr, InstrItinClass itin> + : InstTemplate<am, sz, im, f, d, cstr, itin>, Encoding; + +// This Encoding-less class is used by Thumb1 to specify the encoding bits later +// on by adding flavors to specific instructions. +class InstThumb<AddrMode am, SizeFlagVal sz, IndexMode im, + Format f, Domain d, string cstr, InstrItinClass itin> + : InstTemplate<am, sz, im, f, d, cstr, itin>; + class PseudoInst<dag oops, dag iops, InstrItinClass itin, string asm, list<dag> pattern> : InstARM<AddrModeNone, SizeSpecial, IndexModeNone, Pseudo, GenericDomain, @@ -861,7 +873,7 @@ class ARMV6Pat<dag pattern, dag result> : Pat<pattern, result> { class ThumbI<dag oops, dag iops, AddrMode am, SizeFlagVal sz, InstrItinClass itin, string asm, string cstr, list<dag> pattern> - : InstARM<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> { + : InstThumb<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> { let OutOperandList = oops; let InOperandList = iops; let AsmString = asm; @@ -876,9 +888,14 @@ class TI<dag oops, dag iops, InstrItinClass itin, string asm, list<dag> pattern> class TIt<dag oops, dag iops, InstrItinClass itin, string asm, list<dag> pattern> : ThumbI<oops, iops, AddrModeNone, Size2Bytes, itin, asm, "$lhs = $dst", pattern>; -// tBL, tBX instructions -class TIx2<dag oops, dag iops, InstrItinClass itin, string asm, list<dag> pattern> - : ThumbI<oops, iops, AddrModeNone, Size4Bytes, itin, asm, "", pattern>; +// tBL, tBX 32-bit instructions +class TIx2<bits<5> opcod1, bits<2> opcod2, bit opcod3, + dag oops, dag iops, InstrItinClass itin, string asm, list<dag> pattern> + : ThumbI<oops, iops, AddrModeNone, Size4Bytes, itin, asm, "", pattern>, Encoding { + let Inst{31-27} = opcod1; + let Inst{15-14} = opcod2; + let Inst{12} = opcod3; +} // BR_JT instructions class TJTI<dag oops, dag iops, InstrItinClass itin, string asm, list<dag> pattern> @@ -887,7 +904,7 @@ class TJTI<dag oops, dag iops, InstrItinClass itin, string asm, list<dag> patter // Thumb1 only class Thumb1I<dag oops, dag iops, AddrMode am, SizeFlagVal sz, InstrItinClass itin, string asm, string cstr, list<dag> pattern> - : InstARM<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> { + : InstThumb<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> { let OutOperandList = oops; let InOperandList = iops; let AsmString = asm; @@ -915,7 +932,7 @@ class T1It<dag oops, dag iops, InstrItinClass itin, class Thumb1sI<dag oops, dag iops, AddrMode am, SizeFlagVal sz, InstrItinClass itin, string opc, string asm, string cstr, list<dag> pattern> - : InstARM<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> { + : InstThumb<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> { let OutOperandList = !con(oops, (ops s_cc_out:$s)); let InOperandList = !con(iops, (ops pred:$p)); let AsmString = !strconcat(opc, !strconcat("${s}${p}", asm)); @@ -937,7 +954,7 @@ class T1sIt<dag oops, dag iops, InstrItinClass itin, class Thumb1pI<dag oops, dag iops, AddrMode am, SizeFlagVal sz, InstrItinClass itin, string opc, string asm, string cstr, list<dag> pattern> - : InstARM<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> { + : InstThumb<am, sz, IndexModeNone, ThumbFrm, GenericDomain, cstr, itin> { let OutOperandList = oops; let InOperandList = !con(iops, (ops pred:$p)); let AsmString = !strconcat(opc, !strconcat("${p}", asm)); @@ -968,6 +985,50 @@ class T1pIs<dag oops, dag iops, InstrItinClass itin, string opc, string asm, list<dag> pattern> : Thumb1pI<oops, iops, AddrModeT1_s, Size2Bytes, itin, opc, asm, "", pattern>; +class Encoding16 : Encoding { + let Inst{31-16} = 0x0000; +} + +// A6.2 16-bit Thumb instruction encoding +class T1Encoding<bits<6> opcode> : Encoding16 { + let Inst{15-10} = opcode; +} + +// A6.2.1 Shift (immediate), add, subtract, move, and compare encoding. +class T1General<bits<5> opcode> : Encoding16 { + let Inst{15-14} = 0b00; + let Inst{13-9} = opcode; +} + +// A6.2.2 Data-processing encoding. +class T1DataProcessing<bits<4> opcode> : Encoding16 { + let Inst{15-10} = 0b010000; + let Inst{9-6} = opcode; +} + +// A6.2.3 Special data instructions and branch and exchange encoding. +class T1Special<bits<4> opcode> : Encoding16 { + let Inst{15-10} = 0b010001; + let Inst{9-6} = opcode; +} + +// A6.2.4 Load/store single data item encoding. +class T1LoadStore<bits<4> opA, bits<3> opB> : Encoding16 { + let Inst{15-12} = opA; + let Inst{11-9} = opB; +} +class T1LdSt<bits<3> opB> : T1LoadStore<0b0101, opB>; +class T1LdSt4Imm<bits<3> opB> : T1LoadStore<0b0110, opB>; // Immediate, 4 bytes +class T1LdSt1Imm<bits<3> opB> : T1LoadStore<0b0111, opB>; // Immediate, 1 byte +class T1LdSt2Imm<bits<3> opB> : T1LoadStore<0b1000, opB>; // Immediate, 2 bytes +class T1LdStSP<bits<3> opB> : T1LoadStore<0b1001, opB>; // SP relative + +// A6.2.5 Miscellaneous 16-bit instructions encoding. +class T1Misc<bits<7> opcode> : Encoding16 { + let Inst{15-12} = 0b1011; + let Inst{11-5} = opcode; +} + // Thumb2I - Thumb2 instruction. Almost all Thumb2 instructions are predicable. class Thumb2I<dag oops, dag iops, AddrMode am, SizeFlagVal sz, InstrItinClass itin, @@ -1034,9 +1095,18 @@ class T2Iso<dag oops, dag iops, InstrItinClass itin, class T2Ipc<dag oops, dag iops, InstrItinClass itin, string opc, string asm, list<dag> pattern> : Thumb2I<oops, iops, AddrModeT2_pc, Size4Bytes, itin, opc, asm, "", pattern>; -class T2Ii8s4<dag oops, dag iops, InstrItinClass itin, +class T2Ii8s4<bit P, bit W, bit load, dag oops, dag iops, InstrItinClass itin, string opc, string asm, list<dag> pattern> - : Thumb2I<oops, iops, AddrModeT2_i8s4, Size4Bytes, itin, opc, asm, "", pattern>; + : Thumb2I<oops, iops, AddrModeT2_i8s4, Size4Bytes, itin, opc, asm, "", + pattern> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b00; + let Inst{24} = P; + let Inst{23} = ?; // The U bit. + let Inst{22} = 1; + let Inst{21} = W; + let Inst{20} = load; +} class T2sI<dag oops, dag iops, InstrItinClass itin, string opc, string asm, list<dag> pattern> @@ -1055,8 +1125,9 @@ class T2Ix2<dag oops, dag iops, InstrItinClass itin, // T2Iidxldst - Thumb2 indexed load / store instructions. -class T2Iidxldst<dag oops, dag iops, AddrMode am, IndexMode im, - InstrItinClass itin, +class T2Iidxldst<bit signed, bits<2> opcod, bit load, bit pre, + dag oops, dag iops, + AddrMode am, IndexMode im, InstrItinClass itin, string opc, string asm, string cstr, list<dag> pattern> : InstARM<am, Size4Bytes, im, ThumbFrm, GenericDomain, cstr, itin> { let OutOperandList = oops; @@ -1064,6 +1135,16 @@ class T2Iidxldst<dag oops, dag iops, AddrMode am, IndexMode im, let AsmString = !strconcat(opc, !strconcat("${p}", asm)); let Pattern = pattern; list<Predicate> Predicates = [IsThumb2]; + let Inst{31-27} = 0b11111; + let Inst{26-25} = 0b00; + let Inst{24} = signed; + let Inst{23} = 0; + let Inst{22-21} = opcod; + let Inst{20} = load; + let Inst{11} = 1; + // (P, W) = (1, 1) Pre-indexed or (0, 1) Post-indexed + let Inst{10} = pre; // The P bit. + let Inst{8} = 1; // The W bit. } // Tv5Pat - Same as Pat<>, but requires V5T Thumb mode. diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td index e14696a..da8b373 100644 --- a/lib/Target/ARM/ARMInstrInfo.td +++ b/lib/Target/ARM/ARMInstrInfo.td @@ -1740,7 +1740,7 @@ def LDREXD : AIldrex<0b01, (outs GPR:$dest, GPR:$dest2), (ins GPR:$ptr), []>; } -let mayStore = 1 in { +let mayStore = 1, Constraints = "@earlyclobber $success" in { def STREXB : AIstrex<0b10, (outs GPR:$success), (ins GPR:$src, GPR:$ptr), NoItinerary, "strexb", "\t$success, $src, [$ptr]", diff --git a/lib/Target/ARM/ARMInstrThumb.td b/lib/Target/ARM/ARMInstrThumb.td index 9306bdb..34d7d8f 100644 --- a/lib/Target/ARM/ARMInstrThumb.td +++ b/lib/Target/ARM/ARMInstrThumb.td @@ -113,7 +113,7 @@ def t_addrmode_s1 : Operand<i32>, def t_addrmode_sp : Operand<i32>, ComplexPattern<i32, 2, "SelectThumbAddrModeSP", []> { let PrintMethod = "printThumbAddrModeSPOperand"; - let MIOperandInfo = (ops tGPR:$base, i32imm:$offsimm); + let MIOperandInfo = (ops JustSP:$base, i32imm:$offsimm); } //===----------------------------------------------------------------------===// @@ -136,31 +136,46 @@ PseudoInst<(outs), (ins i32imm:$amt), NoItinerary, let isNotDuplicable = 1 in def tPICADD : TIt<(outs GPR:$dst), (ins GPR:$lhs, pclabel:$cp), IIC_iALUr, "\n$cp:\n\tadd\t$dst, pc", - [(set GPR:$dst, (ARMpic_add GPR:$lhs, imm:$cp))]>; + [(set GPR:$dst, (ARMpic_add GPR:$lhs, imm:$cp))]>, + T1Special<{0,0,?,?}> { + let Inst{6-3} = 0b1111; // A8.6.6 Rm = pc +} // PC relative add. def tADDrPCi : T1I<(outs tGPR:$dst), (ins t_imm_s4:$rhs), IIC_iALUi, - "add\t$dst, pc, $rhs", []>; + "add\t$dst, pc, $rhs", []>, + T1Encoding<{1,0,1,0,0,?}>; // A6.2 & A8.6.10 // ADD rd, sp, #imm8 def tADDrSPi : T1I<(outs tGPR:$dst), (ins GPR:$sp, t_imm_s4:$rhs), IIC_iALUi, - "add\t$dst, $sp, $rhs", []>; + "add\t$dst, $sp, $rhs", []>, + T1Encoding<{1,0,1,0,1,?}>; // A6.2 & A8.6.8 // ADD sp, sp, #imm7 def tADDspi : TIt<(outs GPR:$dst), (ins GPR:$lhs, t_imm_s4:$rhs), IIC_iALUi, - "add\t$dst, $rhs", []>; + "add\t$dst, $rhs", []>, + T1Misc<{0,0,0,0,0,?,?}>; // A6.2.5 & A8.6.8 // SUB sp, sp, #imm7 def tSUBspi : TIt<(outs GPR:$dst), (ins GPR:$lhs, t_imm_s4:$rhs), IIC_iALUi, - "sub\t$dst, $rhs", []>; + "sub\t$dst, $rhs", []>, + T1Misc<{0,0,0,0,1,?,?}>; // A6.2.5 & A8.6.215 // ADD rm, sp def tADDrSP : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr, - "add\t$dst, $rhs", []>; + "add\t$dst, $rhs", []>, + T1Special<{0,0,?,?}> { + let Inst{6-3} = 0b1101; // A8.6.9 Encoding T1 +} // ADD sp, rm def tADDspr : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr, - "add\t$dst, $rhs", []>; + "add\t$dst, $rhs", []>, + T1Special<{0,0,?,?}> { + // A8.6.9 Encoding T2 + let Inst{7} = 1; + let Inst{2-0} = 0b101; +} // Pseudo instruction that will expand into a tSUBspi + a copy. let usesCustomInserter = 1 in { // Expanded after instruction selection. @@ -180,22 +195,32 @@ def tANDsp : PseudoInst<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), // let isReturn = 1, isTerminator = 1, isBarrier = 1 in { - def tBX_RET : TI<(outs), (ins), IIC_Br, "bx\tlr", [(ARMretflag)]>; + def tBX_RET : TI<(outs), (ins), IIC_Br, "bx\tlr", [(ARMretflag)]>, + T1Special<{1,1,0,?}> { // A6.2.3 & A8.6.25 + let Inst{6-3} = 0b1110; // Rm = lr + } // Alternative return instruction used by vararg functions. - def tBX_RET_vararg : TI<(outs), (ins tGPR:$target), IIC_Br, "bx\t$target", []>; + def tBX_RET_vararg : TI<(outs), (ins tGPR:$target), IIC_Br, "bx\t$target", []>, + T1Special<{1,1,0,?}>; // A6.2.3 & A8.6.25 } // Indirect branches let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { def tBRIND : TI<(outs), (ins GPR:$dst), IIC_Br, "mov\tpc, $dst", - [(brind GPR:$dst)]>; + [(brind GPR:$dst)]>, + T1Special<{1,0,?,?}> { + // <Rd> = pc + let Inst{7} = 1; + let Inst{2-0} = 0b111; + } } // FIXME: remove when we have a way to marking a MI with these properties. let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1, hasExtraDefRegAllocReq = 1 in def tPOP_RET : T1I<(outs), (ins pred:$p, reglist:$wb, variable_ops), IIC_Br, - "pop${p}\t$wb", []>; + "pop${p}\t$wb", []>, + T1Misc<{1,1,0,?,?,?,?}>; let isCall = 1, Defs = [R0, R1, R2, R3, R12, LR, @@ -203,25 +228,29 @@ let isCall = 1, D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30, D31, CPSR, FPSCR] in { // Also used for Thumb2 - def tBL : TIx2<(outs), (ins i32imm:$func, variable_ops), IIC_Br, - "bl\t${func:call}", - [(ARMtcall tglobaladdr:$func)]>, + def tBL : TIx2<0b11110, 0b11, 1, + (outs), (ins i32imm:$func, variable_ops), IIC_Br, + "bl\t${func:call}", + [(ARMtcall tglobaladdr:$func)]>, Requires<[IsThumb, IsNotDarwin]>; // ARMv5T and above, also used for Thumb2 - def tBLXi : TIx2<(outs), (ins i32imm:$func, variable_ops), IIC_Br, - "blx\t${func:call}", - [(ARMcall tglobaladdr:$func)]>, + def tBLXi : TIx2<0b11110, 0b11, 0, + (outs), (ins i32imm:$func, variable_ops), IIC_Br, + "blx\t${func:call}", + [(ARMcall tglobaladdr:$func)]>, Requires<[IsThumb, HasV5T, IsNotDarwin]>; // Also used for Thumb2 def tBLXr : TI<(outs), (ins GPR:$func, variable_ops), IIC_Br, "blx\t$func", [(ARMtcall GPR:$func)]>, - Requires<[IsThumb, HasV5T, IsNotDarwin]>; + Requires<[IsThumb, HasV5T, IsNotDarwin]>, + T1Special<{1,1,1,?}>; // A6.2.3 & A8.6.24; // ARMv4T - def tBX : TIx2<(outs), (ins tGPR:$func, variable_ops), IIC_Br, + def tBX : TIx2<{?,?,?,?,?}, {?,?}, ?, + (outs), (ins tGPR:$func, variable_ops), IIC_Br, "mov\tlr, pc\n\tbx\t$func", [(ARMcall_nolink tGPR:$func)]>, Requires<[IsThumb1Only, IsNotDarwin]>; @@ -234,27 +263,31 @@ let isCall = 1, D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30, D31, CPSR, FPSCR] in { // Also used for Thumb2 - def tBLr9 : TIx2<(outs), (ins i32imm:$func, variable_ops), IIC_Br, + def tBLr9 : TIx2<0b11110, 0b11, 1, + (outs), (ins i32imm:$func, variable_ops), IIC_Br, "bl\t${func:call}", [(ARMtcall tglobaladdr:$func)]>, Requires<[IsThumb, IsDarwin]>; // ARMv5T and above, also used for Thumb2 - def tBLXi_r9 : TIx2<(outs), (ins i32imm:$func, variable_ops), IIC_Br, + def tBLXi_r9 : TIx2<0b11110, 0b11, 0, + (outs), (ins i32imm:$func, variable_ops), IIC_Br, "blx\t${func:call}", [(ARMcall tglobaladdr:$func)]>, Requires<[IsThumb, HasV5T, IsDarwin]>; // Also used for Thumb2 def tBLXr_r9 : TI<(outs), (ins GPR:$func, variable_ops), IIC_Br, - "blx\t$func", - [(ARMtcall GPR:$func)]>, - Requires<[IsThumb, HasV5T, IsDarwin]>; + "blx\t$func", + [(ARMtcall GPR:$func)]>, + Requires<[IsThumb, HasV5T, IsDarwin]>, + T1Special<{1,1,1,?}>; // A6.2.3 & A8.6.24 // ARMv4T - def tBXr9 : TIx2<(outs), (ins tGPR:$func, variable_ops), IIC_Br, - "mov\tlr, pc\n\tbx\t$func", - [(ARMcall_nolink tGPR:$func)]>, + def tBXr9 : TIx2<{?,?,?,?,?}, {?,?}, ?, + (outs), (ins tGPR:$func, variable_ops), IIC_Br, + "mov\tlr, pc\n\tbx\t$func", + [(ARMcall_nolink tGPR:$func)]>, Requires<[IsThumb1Only, IsDarwin]>; } @@ -262,17 +295,22 @@ let isBranch = 1, isTerminator = 1 in { let isBarrier = 1 in { let isPredicable = 1 in def tB : T1I<(outs), (ins brtarget:$target), IIC_Br, - "b\t$target", [(br bb:$target)]>; + "b\t$target", [(br bb:$target)]>, + T1Encoding<{1,1,1,0,0,?}>; // Far jump let Defs = [LR] in - def tBfar : TIx2<(outs), (ins brtarget:$target), IIC_Br, + def tBfar : TIx2<0b11110, 0b11, 1, (outs), (ins brtarget:$target), IIC_Br, "bl\t$target\t@ far jump",[]>; def tBR_JTr : T1JTI<(outs), (ins tGPR:$target, jtblock_operand:$jt, i32imm:$id), IIC_Br, "mov\tpc, $target\n\t.align\t2\n$jt", - [(ARMbrjt tGPR:$target, tjumptable:$jt, imm:$id)]>; + [(ARMbrjt tGPR:$target, tjumptable:$jt, imm:$id)]>, + Encoding16 { + let Inst{15-7} = 0b010001101; + let Inst{2-0} = 0b111; + } } } @@ -281,15 +319,18 @@ let isBranch = 1, isTerminator = 1 in { let isBranch = 1, isTerminator = 1 in def tBcc : T1I<(outs), (ins brtarget:$target, pred:$cc), IIC_Br, "b$cc\t$target", - [/*(ARMbrcond bb:$target, imm:$cc)*/]>; + [/*(ARMbrcond bb:$target, imm:$cc)*/]>, + T1Encoding<{1,1,0,1,?,?}>; // Compare and branch on zero / non-zero let isBranch = 1, isTerminator = 1 in { def tCBZ : T1I<(outs), (ins tGPR:$cmp, brtarget:$target), IIC_Br, - "cbz\t$cmp, $target", []>; + "cbz\t$cmp, $target", []>, + T1Misc<{0,0,?,1,?,?,?}>; def tCBNZ : T1I<(outs), (ins tGPR:$cmp, brtarget:$target), IIC_Br, - "cbnz\t$cmp, $target", []>; + "cbnz\t$cmp, $target", []>, + T1Misc<{1,0,?,1,?,?,?}>; } //===----------------------------------------------------------------------===// @@ -299,71 +340,85 @@ let isBranch = 1, isTerminator = 1 in { let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in def tLDR : T1pI4<(outs tGPR:$dst), (ins t_addrmode_s4:$addr), IIC_iLoadr, "ldr", "\t$dst, $addr", - [(set tGPR:$dst, (load t_addrmode_s4:$addr))]>; + [(set tGPR:$dst, (load t_addrmode_s4:$addr))]>, + T1LdSt<0b100>; def tLDRB : T1pI1<(outs tGPR:$dst), (ins t_addrmode_s1:$addr), IIC_iLoadr, "ldrb", "\t$dst, $addr", - [(set tGPR:$dst, (zextloadi8 t_addrmode_s1:$addr))]>; + [(set tGPR:$dst, (zextloadi8 t_addrmode_s1:$addr))]>, + T1LdSt<0b110>; def tLDRH : T1pI2<(outs tGPR:$dst), (ins t_addrmode_s2:$addr), IIC_iLoadr, "ldrh", "\t$dst, $addr", - [(set tGPR:$dst, (zextloadi16 t_addrmode_s2:$addr))]>; + [(set tGPR:$dst, (zextloadi16 t_addrmode_s2:$addr))]>, + T1LdSt<0b101>; let AddedComplexity = 10 in def tLDRSB : T1pI1<(outs tGPR:$dst), (ins t_addrmode_rr:$addr), IIC_iLoadr, "ldrsb", "\t$dst, $addr", - [(set tGPR:$dst, (sextloadi8 t_addrmode_rr:$addr))]>; + [(set tGPR:$dst, (sextloadi8 t_addrmode_rr:$addr))]>, + T1LdSt<0b011>; let AddedComplexity = 10 in def tLDRSH : T1pI2<(outs tGPR:$dst), (ins t_addrmode_rr:$addr), IIC_iLoadr, "ldrsh", "\t$dst, $addr", - [(set tGPR:$dst, (sextloadi16 t_addrmode_rr:$addr))]>; + [(set tGPR:$dst, (sextloadi16 t_addrmode_rr:$addr))]>, + T1LdSt<0b111>; let canFoldAsLoad = 1 in def tLDRspi : T1pIs<(outs tGPR:$dst), (ins t_addrmode_sp:$addr), IIC_iLoadi, "ldr", "\t$dst, $addr", - [(set tGPR:$dst, (load t_addrmode_sp:$addr))]>; + [(set tGPR:$dst, (load t_addrmode_sp:$addr))]>, + T1LdStSP<{1,?,?}>; // Special instruction for restore. It cannot clobber condition register // when it's expanded by eliminateCallFramePseudoInstr(). let canFoldAsLoad = 1, mayLoad = 1 in def tRestore : T1pIs<(outs tGPR:$dst), (ins t_addrmode_sp:$addr), IIC_iLoadi, - "ldr", "\t$dst, $addr", []>; + "ldr", "\t$dst, $addr", []>, + T1LdStSP<{1,?,?}>; // Load tconstpool // FIXME: Use ldr.n to work around a Darwin assembler bug. let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in def tLDRpci : T1pIs<(outs tGPR:$dst), (ins i32imm:$addr), IIC_iLoadi, "ldr", ".n\t$dst, $addr", - [(set tGPR:$dst, (load (ARMWrapper tconstpool:$addr)))]>; + [(set tGPR:$dst, (load (ARMWrapper tconstpool:$addr)))]>, + T1Encoding<{0,1,0,0,1,?}>; // A6.2 & A8.6.59 // Special LDR for loads from non-pc-relative constpools. let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in def tLDRcp : T1pIs<(outs tGPR:$dst), (ins i32imm:$addr), IIC_iLoadi, - "ldr", "\t$dst, $addr", []>; + "ldr", "\t$dst, $addr", []>, + T1LdStSP<{1,?,?}>; def tSTR : T1pI4<(outs), (ins tGPR:$src, t_addrmode_s4:$addr), IIC_iStorer, "str", "\t$src, $addr", - [(store tGPR:$src, t_addrmode_s4:$addr)]>; + [(store tGPR:$src, t_addrmode_s4:$addr)]>, + T1LdSt<0b000>; def tSTRB : T1pI1<(outs), (ins tGPR:$src, t_addrmode_s1:$addr), IIC_iStorer, "strb", "\t$src, $addr", - [(truncstorei8 tGPR:$src, t_addrmode_s1:$addr)]>; + [(truncstorei8 tGPR:$src, t_addrmode_s1:$addr)]>, + T1LdSt<0b010>; def tSTRH : T1pI2<(outs), (ins tGPR:$src, t_addrmode_s2:$addr), IIC_iStorer, "strh", "\t$src, $addr", - [(truncstorei16 tGPR:$src, t_addrmode_s2:$addr)]>; + [(truncstorei16 tGPR:$src, t_addrmode_s2:$addr)]>, + T1LdSt<0b001>; def tSTRspi : T1pIs<(outs), (ins tGPR:$src, t_addrmode_sp:$addr), IIC_iStorei, "str", "\t$src, $addr", - [(store tGPR:$src, t_addrmode_sp:$addr)]>; + [(store tGPR:$src, t_addrmode_sp:$addr)]>, + T1LdStSP<{0,?,?}>; let mayStore = 1 in { // Special instruction for spill. It cannot clobber condition register // when it's expanded by eliminateCallFramePseudoInstr(). def tSpill : T1pIs<(outs), (ins tGPR:$src, t_addrmode_sp:$addr), IIC_iStorei, - "str", "\t$src, $addr", []>; + "str", "\t$src, $addr", []>, + T1LdStSP<{0,?,?}>; } //===----------------------------------------------------------------------===// @@ -375,21 +430,25 @@ let mayLoad = 1, hasExtraDefRegAllocReq = 1 in def tLDM : T1I<(outs), (ins addrmode4:$addr, pred:$p, reglist:$wb, variable_ops), IIC_iLoadm, - "ldm${addr:submode}${p}\t$addr, $wb", []>; + "ldm${addr:submode}${p}\t$addr, $wb", []>, + T1Encoding<{1,1,0,0,1,?}>; // A6.2 & A8.6.53 let mayStore = 1, hasExtraSrcRegAllocReq = 1 in def tSTM : T1I<(outs), (ins addrmode4:$addr, pred:$p, reglist:$wb, variable_ops), IIC_iStorem, - "stm${addr:submode}${p}\t$addr, $wb", []>; + "stm${addr:submode}${p}\t$addr, $wb", []>, + T1Encoding<{1,1,0,0,0,?}>; // A6.2 & A8.6.189 let mayLoad = 1, Uses = [SP], Defs = [SP], hasExtraDefRegAllocReq = 1 in def tPOP : T1I<(outs), (ins pred:$p, reglist:$wb, variable_ops), IIC_Br, - "pop${p}\t$wb", []>; + "pop${p}\t$wb", []>, + T1Misc<{1,1,0,?,?,?,?}>; let mayStore = 1, Uses = [SP], Defs = [SP], hasExtraSrcRegAllocReq = 1 in def tPUSH : T1I<(outs), (ins pred:$p, reglist:$wb, variable_ops), IIC_Br, - "push${p}\t$wb", []>; + "push${p}\t$wb", []>, + T1Misc<{0,1,0,?,?,?,?}>; //===----------------------------------------------------------------------===// // Arithmetic Instructions. @@ -399,82 +458,98 @@ def tPUSH : T1I<(outs), (ins pred:$p, reglist:$wb, variable_ops), IIC_Br, let isCommutable = 1, Uses = [CPSR] in def tADC : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr, "adc", "\t$dst, $rhs", - [(set tGPR:$dst, (adde tGPR:$lhs, tGPR:$rhs))]>; + [(set tGPR:$dst, (adde tGPR:$lhs, tGPR:$rhs))]>, + T1DataProcessing<0b0101>; // Add immediate def tADDi3 : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iALUi, "add", "\t$dst, $lhs, $rhs", - [(set tGPR:$dst, (add tGPR:$lhs, imm0_7:$rhs))]>; + [(set tGPR:$dst, (add tGPR:$lhs, imm0_7:$rhs))]>, + T1General<0b01110>; def tADDi8 : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iALUi, "add", "\t$dst, $rhs", - [(set tGPR:$dst, (add tGPR:$lhs, imm8_255:$rhs))]>; + [(set tGPR:$dst, (add tGPR:$lhs, imm8_255:$rhs))]>, + T1General<{1,1,0,?,?}>; // Add register let isCommutable = 1 in def tADDrr : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr, "add", "\t$dst, $lhs, $rhs", - [(set tGPR:$dst, (add tGPR:$lhs, tGPR:$rhs))]>; + [(set tGPR:$dst, (add tGPR:$lhs, tGPR:$rhs))]>, + T1General<0b01100>; let neverHasSideEffects = 1 in def tADDhirr : T1pIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr, - "add", "\t$dst, $rhs", []>; + "add", "\t$dst, $rhs", []>, + T1Special<{0,0,?,?}>; // And register let isCommutable = 1 in def tAND : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr, "and", "\t$dst, $rhs", - [(set tGPR:$dst, (and tGPR:$lhs, tGPR:$rhs))]>; + [(set tGPR:$dst, (and tGPR:$lhs, tGPR:$rhs))]>, + T1DataProcessing<0b0000>; // ASR immediate def tASRri : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iMOVsi, "asr", "\t$dst, $lhs, $rhs", - [(set tGPR:$dst, (sra tGPR:$lhs, (i32 imm:$rhs)))]>; + [(set tGPR:$dst, (sra tGPR:$lhs, (i32 imm:$rhs)))]>, + T1General<{0,1,0,?,?}>; // ASR register def tASRrr : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iMOVsr, "asr", "\t$dst, $rhs", - [(set tGPR:$dst, (sra tGPR:$lhs, tGPR:$rhs))]>; + [(set tGPR:$dst, (sra tGPR:$lhs, tGPR:$rhs))]>, + T1DataProcessing<0b0100>; // BIC register def tBIC : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr, "bic", "\t$dst, $rhs", - [(set tGPR:$dst, (and tGPR:$lhs, (not tGPR:$rhs)))]>; + [(set tGPR:$dst, (and tGPR:$lhs, (not tGPR:$rhs)))]>, + T1DataProcessing<0b1110>; // CMN register let Defs = [CPSR] in { def tCMN : T1pI<(outs), (ins tGPR:$lhs, tGPR:$rhs), IIC_iCMPr, "cmn", "\t$lhs, $rhs", - [(ARMcmp tGPR:$lhs, (ineg tGPR:$rhs))]>; -def tCMNZ : T1pI<(outs), (ins tGPR:$lhs, tGPR:$rhs), IIC_iCMPr, + [(ARMcmp tGPR:$lhs, (ineg tGPR:$rhs))]>, + T1DataProcessing<0b1011>; +def tCMNz : T1pI<(outs), (ins tGPR:$lhs, tGPR:$rhs), IIC_iCMPr, "cmn", "\t$lhs, $rhs", - [(ARMcmpZ tGPR:$lhs, (ineg tGPR:$rhs))]>; + [(ARMcmpZ tGPR:$lhs, (ineg tGPR:$rhs))]>, + T1DataProcessing<0b1011>; } // CMP immediate let Defs = [CPSR] in { def tCMPi8 : T1pI<(outs), (ins tGPR:$lhs, i32imm:$rhs), IIC_iCMPi, "cmp", "\t$lhs, $rhs", - [(ARMcmp tGPR:$lhs, imm0_255:$rhs)]>; + [(ARMcmp tGPR:$lhs, imm0_255:$rhs)]>, + T1General<{1,0,1,?,?}>; def tCMPzi8 : T1pI<(outs), (ins tGPR:$lhs, i32imm:$rhs), IIC_iCMPi, "cmp", "\t$lhs, $rhs", - [(ARMcmpZ tGPR:$lhs, imm0_255:$rhs)]>; - + [(ARMcmpZ tGPR:$lhs, imm0_255:$rhs)]>, + T1General<{1,0,1,?,?}>; } // CMP register let Defs = [CPSR] in { def tCMPr : T1pI<(outs), (ins tGPR:$lhs, tGPR:$rhs), IIC_iCMPr, "cmp", "\t$lhs, $rhs", - [(ARMcmp tGPR:$lhs, tGPR:$rhs)]>; + [(ARMcmp tGPR:$lhs, tGPR:$rhs)]>, + T1DataProcessing<0b1010>; def tCMPzr : T1pI<(outs), (ins tGPR:$lhs, tGPR:$rhs), IIC_iCMPr, "cmp", "\t$lhs, $rhs", - [(ARMcmpZ tGPR:$lhs, tGPR:$rhs)]>; + [(ARMcmpZ tGPR:$lhs, tGPR:$rhs)]>, + T1DataProcessing<0b1010>; def tCMPhir : T1pI<(outs), (ins GPR:$lhs, GPR:$rhs), IIC_iCMPr, - "cmp", "\t$lhs, $rhs", []>; + "cmp", "\t$lhs, $rhs", []>, + T1Special<{0,1,?,?}>; def tCMPzhir : T1pI<(outs), (ins GPR:$lhs, GPR:$rhs), IIC_iCMPr, - "cmp", "\t$lhs, $rhs", []>; + "cmp", "\t$lhs, $rhs", []>, + T1Special<{0,1,?,?}>; } @@ -482,32 +557,38 @@ def tCMPzhir : T1pI<(outs), (ins GPR:$lhs, GPR:$rhs), IIC_iCMPr, let isCommutable = 1 in def tEOR : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr, "eor", "\t$dst, $rhs", - [(set tGPR:$dst, (xor tGPR:$lhs, tGPR:$rhs))]>; + [(set tGPR:$dst, (xor tGPR:$lhs, tGPR:$rhs))]>, + T1DataProcessing<0b0001>; // LSL immediate def tLSLri : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iMOVsi, "lsl", "\t$dst, $lhs, $rhs", - [(set tGPR:$dst, (shl tGPR:$lhs, (i32 imm:$rhs)))]>; + [(set tGPR:$dst, (shl tGPR:$lhs, (i32 imm:$rhs)))]>, + T1General<{0,0,0,?,?}>; // LSL register def tLSLrr : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iMOVsr, "lsl", "\t$dst, $rhs", - [(set tGPR:$dst, (shl tGPR:$lhs, tGPR:$rhs))]>; + [(set tGPR:$dst, (shl tGPR:$lhs, tGPR:$rhs))]>, + T1DataProcessing<0b0010>; // LSR immediate def tLSRri : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iMOVsi, "lsr", "\t$dst, $lhs, $rhs", - [(set tGPR:$dst, (srl tGPR:$lhs, (i32 imm:$rhs)))]>; + [(set tGPR:$dst, (srl tGPR:$lhs, (i32 imm:$rhs)))]>, + T1General<{0,0,1,?,?}>; // LSR register def tLSRrr : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iMOVsr, "lsr", "\t$dst, $rhs", - [(set tGPR:$dst, (srl tGPR:$lhs, tGPR:$rhs))]>; + [(set tGPR:$dst, (srl tGPR:$lhs, tGPR:$rhs))]>, + T1DataProcessing<0b0011>; // move register def tMOVi8 : T1sI<(outs tGPR:$dst), (ins i32imm:$src), IIC_iMOVi, "mov", "\t$dst, $src", - [(set tGPR:$dst, imm0_255:$src)]>; + [(set tGPR:$dst, imm0_255:$src)]>, + T1General<{1,0,0,?,?}>; // TODO: A7-73: MOV(2) - mov setting flag. @@ -515,42 +596,52 @@ def tMOVi8 : T1sI<(outs tGPR:$dst), (ins i32imm:$src), IIC_iMOVi, let neverHasSideEffects = 1 in { // FIXME: Make this predicable. def tMOVr : T1I<(outs tGPR:$dst), (ins tGPR:$src), IIC_iMOVr, - "mov\t$dst, $src", []>; + "mov\t$dst, $src", []>, + T1Special<0b1000>; let Defs = [CPSR] in def tMOVSr : T1I<(outs tGPR:$dst), (ins tGPR:$src), IIC_iMOVr, - "movs\t$dst, $src", []>; + "movs\t$dst, $src", []>, Encoding16 { + let Inst{15-6} = 0b0000000000; +} // FIXME: Make these predicable. def tMOVgpr2tgpr : T1I<(outs tGPR:$dst), (ins GPR:$src), IIC_iMOVr, - "mov\t$dst, $src", []>; + "mov\t$dst, $src", []>, + T1Special<{1,0,0,1}>; def tMOVtgpr2gpr : T1I<(outs GPR:$dst), (ins tGPR:$src), IIC_iMOVr, - "mov\t$dst, $src", []>; + "mov\t$dst, $src", []>, + T1Special<{1,0,1,0}>; def tMOVgpr2gpr : T1I<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVr, - "mov\t$dst, $src", []>; + "mov\t$dst, $src", []>, + T1Special<{1,0,1,1}>; } // neverHasSideEffects // multiply register let isCommutable = 1 in def tMUL : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iMUL32, "mul", "\t$dst, $rhs", - [(set tGPR:$dst, (mul tGPR:$lhs, tGPR:$rhs))]>; + [(set tGPR:$dst, (mul tGPR:$lhs, tGPR:$rhs))]>, + T1DataProcessing<0b1101>; // move inverse register def tMVN : T1sI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iMOVr, "mvn", "\t$dst, $src", - [(set tGPR:$dst, (not tGPR:$src))]>; + [(set tGPR:$dst, (not tGPR:$src))]>, + T1DataProcessing<0b1111>; // bitwise or register let isCommutable = 1 in def tORR : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr, "orr", "\t$dst, $rhs", - [(set tGPR:$dst, (or tGPR:$lhs, tGPR:$rhs))]>; + [(set tGPR:$dst, (or tGPR:$lhs, tGPR:$rhs))]>, + T1DataProcessing<0b1100>; // swaps def tREV : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr, "rev", "\t$dst, $src", [(set tGPR:$dst, (bswap tGPR:$src))]>, - Requires<[IsThumb1Only, HasV6]>; + Requires<[IsThumb1Only, HasV6]>, + T1Misc<{1,0,1,0,0,0,?}>; def tREV16 : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr, "rev16", "\t$dst, $src", @@ -559,7 +650,8 @@ def tREV16 : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr, (or (and (shl tGPR:$src, (i32 8)), 0xFF00), (or (and (srl tGPR:$src, (i32 8)), 0xFF0000), (and (shl tGPR:$src, (i32 8)), 0xFF000000)))))]>, - Requires<[IsThumb1Only, HasV6]>; + Requires<[IsThumb1Only, HasV6]>, + T1Misc<{1,0,1,0,0,1,?}>; def tREVSH : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr, "revsh", "\t$dst, $src", @@ -567,37 +659,44 @@ def tREVSH : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr, (sext_inreg (or (srl (and tGPR:$src, 0xFF00), (i32 8)), (shl tGPR:$src, (i32 8))), i16))]>, - Requires<[IsThumb1Only, HasV6]>; + Requires<[IsThumb1Only, HasV6]>, + T1Misc<{1,0,1,0,1,1,?}>; // rotate right register def tROR : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iMOVsr, "ror", "\t$dst, $rhs", - [(set tGPR:$dst, (rotr tGPR:$lhs, tGPR:$rhs))]>; + [(set tGPR:$dst, (rotr tGPR:$lhs, tGPR:$rhs))]>, + T1DataProcessing<0b0111>; // negate register def tRSB : T1sI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iALUi, "rsb", "\t$dst, $src, #0", - [(set tGPR:$dst, (ineg tGPR:$src))]>; + [(set tGPR:$dst, (ineg tGPR:$src))]>, + T1DataProcessing<0b1001>; // Subtract with carry register let Uses = [CPSR] in def tSBC : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr, "sbc", "\t$dst, $rhs", - [(set tGPR:$dst, (sube tGPR:$lhs, tGPR:$rhs))]>; + [(set tGPR:$dst, (sube tGPR:$lhs, tGPR:$rhs))]>, + T1DataProcessing<0b0110>; // Subtract immediate def tSUBi3 : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iALUi, "sub", "\t$dst, $lhs, $rhs", - [(set tGPR:$dst, (add tGPR:$lhs, imm0_7_neg:$rhs))]>; + [(set tGPR:$dst, (add tGPR:$lhs, imm0_7_neg:$rhs))]>, + T1General<0b01111>; def tSUBi8 : T1sIt<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs), IIC_iALUi, "sub", "\t$dst, $rhs", - [(set tGPR:$dst, (add tGPR:$lhs, imm8_255_neg:$rhs))]>; + [(set tGPR:$dst, (add tGPR:$lhs, imm8_255_neg:$rhs))]>, + T1General<{1,1,1,?,?}>; // subtract register def tSUBrr : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr, "sub", "\t$dst, $lhs, $rhs", - [(set tGPR:$dst, (sub tGPR:$lhs, tGPR:$rhs))]>; + [(set tGPR:$dst, (sub tGPR:$lhs, tGPR:$rhs))]>, + T1General<0b01101>; // TODO: A7-96: STMIA - store multiple. @@ -605,31 +704,36 @@ def tSUBrr : T1sI<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs), IIC_iALUr, def tSXTB : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr, "sxtb", "\t$dst, $src", [(set tGPR:$dst, (sext_inreg tGPR:$src, i8))]>, - Requires<[IsThumb1Only, HasV6]>; + Requires<[IsThumb1Only, HasV6]>, + T1Misc<{0,0,1,0,0,1,?}>; // sign-extend short def tSXTH : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr, "sxth", "\t$dst, $src", [(set tGPR:$dst, (sext_inreg tGPR:$src, i16))]>, - Requires<[IsThumb1Only, HasV6]>; + Requires<[IsThumb1Only, HasV6]>, + T1Misc<{0,0,1,0,0,0,?}>; // test let isCommutable = 1, Defs = [CPSR] in def tTST : T1pI<(outs), (ins tGPR:$lhs, tGPR:$rhs), IIC_iCMPr, "tst", "\t$lhs, $rhs", - [(ARMcmpZ (and tGPR:$lhs, tGPR:$rhs), 0)]>; + [(ARMcmpZ (and tGPR:$lhs, tGPR:$rhs), 0)]>, + T1DataProcessing<0b1000>; // zero-extend byte def tUXTB : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr, "uxtb", "\t$dst, $src", [(set tGPR:$dst, (and tGPR:$src, 0xFF))]>, - Requires<[IsThumb1Only, HasV6]>; + Requires<[IsThumb1Only, HasV6]>, + T1Misc<{0,0,1,0,1,1,?}>; // zero-extend short def tUXTH : T1pI<(outs tGPR:$dst), (ins tGPR:$src), IIC_iUNAr, "uxth", "\t$dst, $src", [(set tGPR:$dst, (and tGPR:$src, 0xFFFF))]>, - Requires<[IsThumb1Only, HasV6]>; + Requires<[IsThumb1Only, HasV6]>, + T1Misc<{0,0,1,0,1,0,?}>; // Conditional move tMOVCCr - Used to implement the Thumb SELECT_CC DAG operation. @@ -643,19 +747,23 @@ let usesCustomInserter = 1 in // Expanded after instruction selection. // 16-bit movcc in IT blocks for Thumb2. def tMOVCCr : T1pIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iCMOVr, - "mov", "\t$dst, $rhs", []>; + "mov", "\t$dst, $rhs", []>, + T1Special<{1,0,?,?}>; def tMOVCCi : T1pIt<(outs GPR:$dst), (ins GPR:$lhs, i32imm:$rhs), IIC_iCMOVi, - "mov", "\t$dst, $rhs", []>; + "mov", "\t$dst, $rhs", []>, + T1General<{1,0,0,?,?}>; // tLEApcrel - Load a pc-relative address into a register without offending the // assembler. def tLEApcrel : T1I<(outs tGPR:$dst), (ins i32imm:$label, pred:$p), IIC_iALUi, - "adr$p\t$dst, #$label", []>; + "adr$p\t$dst, #$label", []>, + T1Encoding<{1,0,1,0,0,?}>; // A6.2 & A8.6.10 def tLEApcrelJT : T1I<(outs tGPR:$dst), (ins i32imm:$label, nohash_imm:$id, pred:$p), - IIC_iALUi, "adr$p\t$dst, #${label}_${id}", []>; + IIC_iALUi, "adr$p\t$dst, #${label}_${id}", []>, + T1Encoding<{1,0,1,0,0,?}>; // A6.2 & A8.6.10 //===----------------------------------------------------------------------===// // TLS Instructions @@ -664,9 +772,9 @@ def tLEApcrelJT : T1I<(outs tGPR:$dst), // __aeabi_read_tp preserves the registers r1-r3. let isCall = 1, Defs = [R0, LR] in { - def tTPsoft : TIx2<(outs), (ins), IIC_Br, - "bl\t__aeabi_read_tp", - [(set R0, ARMthread_pointer)]>; + def tTPsoft : TIx2<0b11110, 0b11, 1, (outs), (ins), IIC_Br, + "bl\t__aeabi_read_tp", + [(set R0, ARMthread_pointer)]>; } // SJLJ Exception handling intrinsics diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td index 949ce73..6f20ed4 100644 --- a/lib/Target/ARM/ARMInstrThumb2.td +++ b/lib/Target/ARM/ARMInstrThumb2.td @@ -165,234 +165,465 @@ def t2addrmode_so_reg : Operand<i32>, /// T2I_un_irs - Defines a set of (op reg, {so_imm|r|so_reg}) patterns for a /// unary operation that produces a value. These are predicable and can be /// changed to modify CPSR. -multiclass T2I_un_irs<string opc, PatFrag opnode, bit Cheap = 0, bit ReMat = 0>{ +multiclass T2I_un_irs<bits<4> opcod, string opc, PatFrag opnode, + bit Cheap = 0, bit ReMat = 0> { // shifted imm def i : T2sI<(outs GPR:$dst), (ins t2_so_imm:$src), IIC_iMOVi, opc, "\t$dst, $src", [(set GPR:$dst, (opnode t2_so_imm:$src))]> { let isAsCheapAsAMove = Cheap; let isReMaterializable = ReMat; + let Inst{31-27} = 0b11110; + let Inst{25} = 0; + let Inst{24-21} = opcod; + let Inst{20} = ?; // The S bit. + let Inst{19-16} = 0b1111; // Rn + let Inst{15} = 0; } // register def r : T2I<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVr, opc, ".w\t$dst, $src", - [(set GPR:$dst, (opnode GPR:$src))]>; + [(set GPR:$dst, (opnode GPR:$src))]> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = opcod; + let Inst{20} = ?; // The S bit. + let Inst{19-16} = 0b1111; // Rn + let Inst{14-12} = 0b000; // imm3 + let Inst{7-6} = 0b00; // imm2 + let Inst{5-4} = 0b00; // type + } // shifted register def s : T2I<(outs GPR:$dst), (ins t2_so_reg:$src), IIC_iMOVsi, opc, ".w\t$dst, $src", - [(set GPR:$dst, (opnode t2_so_reg:$src))]>; + [(set GPR:$dst, (opnode t2_so_reg:$src))]> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = opcod; + let Inst{20} = ?; // The S bit. + let Inst{19-16} = 0b1111; // Rn + } } /// T2I_bin_irs - Defines a set of (op reg, {so_imm|r|so_reg}) patterns for a // binary operation that produces a value. These are predicable and can be /// changed to modify CPSR. -multiclass T2I_bin_irs<string opc, PatFrag opnode, +multiclass T2I_bin_irs<bits<4> opcod, string opc, PatFrag opnode, bit Commutable = 0, string wide =""> { // shifted imm def ri : T2sI<(outs GPR:$dst), (ins GPR:$lhs, t2_so_imm:$rhs), IIC_iALUi, opc, "\t$dst, $lhs, $rhs", - [(set GPR:$dst, (opnode GPR:$lhs, t2_so_imm:$rhs))]>; + [(set GPR:$dst, (opnode GPR:$lhs, t2_so_imm:$rhs))]> { + let Inst{31-27} = 0b11110; + let Inst{25} = 0; + let Inst{24-21} = opcod; + let Inst{20} = ?; // The S bit. + let Inst{15} = 0; + } // register def rr : T2sI<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr, opc, !strconcat(wide, "\t$dst, $lhs, $rhs"), [(set GPR:$dst, (opnode GPR:$lhs, GPR:$rhs))]> { let isCommutable = Commutable; + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = opcod; + let Inst{20} = ?; // The S bit. + let Inst{14-12} = 0b000; // imm3 + let Inst{7-6} = 0b00; // imm2 + let Inst{5-4} = 0b00; // type } // shifted register def rs : T2sI<(outs GPR:$dst), (ins GPR:$lhs, t2_so_reg:$rhs), IIC_iALUsi, opc, !strconcat(wide, "\t$dst, $lhs, $rhs"), - [(set GPR:$dst, (opnode GPR:$lhs, t2_so_reg:$rhs))]>; + [(set GPR:$dst, (opnode GPR:$lhs, t2_so_reg:$rhs))]> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = opcod; + let Inst{20} = ?; // The S bit. + } } /// T2I_bin_w_irs - Same as T2I_bin_irs except these operations need // the ".w" prefix to indicate that they are wide. -multiclass T2I_bin_w_irs<string opc, PatFrag opnode, bit Commutable = 0> : - T2I_bin_irs<opc, opnode, Commutable, ".w">; +multiclass T2I_bin_w_irs<bits<4> opcod, string opc, PatFrag opnode, + bit Commutable = 0> : + T2I_bin_irs<opcod, opc, opnode, Commutable, ".w">; /// T2I_rbin_is - Same as T2I_bin_irs except the order of operands are /// reversed. It doesn't define the 'rr' form since it's handled by its /// T2I_bin_irs counterpart. -multiclass T2I_rbin_is<string opc, PatFrag opnode> { +multiclass T2I_rbin_is<bits<4> opcod, string opc, PatFrag opnode> { // shifted imm def ri : T2I<(outs GPR:$dst), (ins GPR:$rhs, t2_so_imm:$lhs), IIC_iALUi, opc, ".w\t$dst, $rhs, $lhs", - [(set GPR:$dst, (opnode t2_so_imm:$lhs, GPR:$rhs))]>; + [(set GPR:$dst, (opnode t2_so_imm:$lhs, GPR:$rhs))]> { + let Inst{31-27} = 0b11110; + let Inst{25} = 0; + let Inst{24-21} = opcod; + let Inst{20} = 0; // The S bit. + let Inst{15} = 0; + } // shifted register def rs : T2I<(outs GPR:$dst), (ins GPR:$rhs, t2_so_reg:$lhs), IIC_iALUsi, opc, "\t$dst, $rhs, $lhs", - [(set GPR:$dst, (opnode t2_so_reg:$lhs, GPR:$rhs))]>; + [(set GPR:$dst, (opnode t2_so_reg:$lhs, GPR:$rhs))]> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = opcod; + let Inst{20} = 0; // The S bit. + } } /// T2I_bin_s_irs - Similar to T2I_bin_irs except it sets the 's' bit so the /// instruction modifies the CPSR register. let Defs = [CPSR] in { -multiclass T2I_bin_s_irs<string opc, PatFrag opnode, bit Commutable = 0> { +multiclass T2I_bin_s_irs<bits<4> opcod, string opc, PatFrag opnode, + bit Commutable = 0> { // shifted imm def ri : T2I<(outs GPR:$dst), (ins GPR:$lhs, t2_so_imm:$rhs), IIC_iALUi, !strconcat(opc, "s"), ".w\t$dst, $lhs, $rhs", - [(set GPR:$dst, (opnode GPR:$lhs, t2_so_imm:$rhs))]>; + [(set GPR:$dst, (opnode GPR:$lhs, t2_so_imm:$rhs))]> { + let Inst{31-27} = 0b11110; + let Inst{25} = 0; + let Inst{24-21} = opcod; + let Inst{20} = 1; // The S bit. + let Inst{15} = 0; + } // register def rr : T2I<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr, !strconcat(opc, "s"), ".w\t$dst, $lhs, $rhs", [(set GPR:$dst, (opnode GPR:$lhs, GPR:$rhs))]> { let isCommutable = Commutable; + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = opcod; + let Inst{20} = 1; // The S bit. + let Inst{14-12} = 0b000; // imm3 + let Inst{7-6} = 0b00; // imm2 + let Inst{5-4} = 0b00; // type } // shifted register def rs : T2I<(outs GPR:$dst), (ins GPR:$lhs, t2_so_reg:$rhs), IIC_iALUsi, !strconcat(opc, "s"), ".w\t$dst, $lhs, $rhs", - [(set GPR:$dst, (opnode GPR:$lhs, t2_so_reg:$rhs))]>; + [(set GPR:$dst, (opnode GPR:$lhs, t2_so_reg:$rhs))]> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = opcod; + let Inst{20} = 1; // The S bit. + } } } /// T2I_bin_ii12rs - Defines a set of (op reg, {so_imm|imm0_4095|r|so_reg}) /// patterns for a binary operation that produces a value. -multiclass T2I_bin_ii12rs<string opc, PatFrag opnode, bit Commutable = 0> { +multiclass T2I_bin_ii12rs<bits<3> op23_21, string opc, PatFrag opnode, + bit Commutable = 0> { // shifted imm def ri : T2sI<(outs GPR:$dst), (ins GPR:$lhs, t2_so_imm:$rhs), IIC_iALUi, opc, ".w\t$dst, $lhs, $rhs", - [(set GPR:$dst, (opnode GPR:$lhs, t2_so_imm:$rhs))]>; + [(set GPR:$dst, (opnode GPR:$lhs, t2_so_imm:$rhs))]> { + let Inst{31-27} = 0b11110; + let Inst{25} = 0; + let Inst{24} = 1; + let Inst{23-21} = op23_21; + let Inst{20} = 0; // The S bit. + let Inst{15} = 0; + } // 12-bit imm def ri12 : T2sI<(outs GPR:$dst), (ins GPR:$lhs, imm0_4095:$rhs), IIC_iALUi, !strconcat(opc, "w"), "\t$dst, $lhs, $rhs", - [(set GPR:$dst, (opnode GPR:$lhs, imm0_4095:$rhs))]>; + [(set GPR:$dst, (opnode GPR:$lhs, imm0_4095:$rhs))]> { + let Inst{31-27} = 0b11110; + let Inst{25} = 1; + let Inst{24} = 0; + let Inst{23-21} = op23_21; + let Inst{20} = 0; // The S bit. + let Inst{15} = 0; + } // register def rr : T2sI<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr, opc, ".w\t$dst, $lhs, $rhs", [(set GPR:$dst, (opnode GPR:$lhs, GPR:$rhs))]> { let isCommutable = Commutable; + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24} = 1; + let Inst{23-21} = op23_21; + let Inst{20} = 0; // The S bit. + let Inst{14-12} = 0b000; // imm3 + let Inst{7-6} = 0b00; // imm2 + let Inst{5-4} = 0b00; // type } // shifted register def rs : T2sI<(outs GPR:$dst), (ins GPR:$lhs, t2_so_reg:$rhs), IIC_iALUsi, opc, ".w\t$dst, $lhs, $rhs", - [(set GPR:$dst, (opnode GPR:$lhs, t2_so_reg:$rhs))]>; + [(set GPR:$dst, (opnode GPR:$lhs, t2_so_reg:$rhs))]> { + let Inst{31-27} = 0b11101; + let Inst{24} = 1; + let Inst{26-25} = 0b01; + let Inst{23-21} = op23_21; + let Inst{20} = 0; // The S bit. + } } /// T2I_adde_sube_irs - Defines a set of (op reg, {so_imm|r|so_reg}) patterns /// for a binary operation that produces a value and use and define the carry /// bit. It's not predicable. let Uses = [CPSR] in { -multiclass T2I_adde_sube_irs<string opc, PatFrag opnode, bit Commutable = 0> { +multiclass T2I_adde_sube_irs<bits<4> opcod, string opc, PatFrag opnode, bit Commutable = 0> { // shifted imm def ri : T2sI<(outs GPR:$dst), (ins GPR:$lhs, t2_so_imm:$rhs), IIC_iALUi, opc, "\t$dst, $lhs, $rhs", [(set GPR:$dst, (opnode GPR:$lhs, t2_so_imm:$rhs))]>, - Requires<[IsThumb2, CarryDefIsUnused]>; + Requires<[IsThumb2, CarryDefIsUnused]> { + let Inst{31-27} = 0b11110; + let Inst{25} = 0; + let Inst{24-21} = opcod; + let Inst{20} = 0; // The S bit. + let Inst{15} = 0; + } // register def rr : T2sI<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr, opc, ".w\t$dst, $lhs, $rhs", [(set GPR:$dst, (opnode GPR:$lhs, GPR:$rhs))]>, Requires<[IsThumb2, CarryDefIsUnused]> { let isCommutable = Commutable; + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = opcod; + let Inst{20} = 0; // The S bit. + let Inst{14-12} = 0b000; // imm3 + let Inst{7-6} = 0b00; // imm2 + let Inst{5-4} = 0b00; // type } // shifted register def rs : T2sI<(outs GPR:$dst), (ins GPR:$lhs, t2_so_reg:$rhs), IIC_iALUsi, opc, ".w\t$dst, $lhs, $rhs", [(set GPR:$dst, (opnode GPR:$lhs, t2_so_reg:$rhs))]>, - Requires<[IsThumb2, CarryDefIsUnused]>; + Requires<[IsThumb2, CarryDefIsUnused]> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = opcod; + let Inst{20} = 0; // The S bit. + } // Carry setting variants // shifted imm def Sri : T2XI<(outs GPR:$dst), (ins GPR:$lhs, t2_so_imm:$rhs), IIC_iALUi, !strconcat(opc, "s\t$dst, $lhs, $rhs"), [(set GPR:$dst, (opnode GPR:$lhs, t2_so_imm:$rhs))]>, Requires<[IsThumb2, CarryDefIsUsed]> { - let Defs = [CPSR]; - } + let Defs = [CPSR]; + let Inst{31-27} = 0b11110; + let Inst{25} = 0; + let Inst{24-21} = opcod; + let Inst{20} = 1; // The S bit. + let Inst{15} = 0; + } // register def Srr : T2XI<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iALUr, !strconcat(opc, "s.w\t$dst, $lhs, $rhs"), [(set GPR:$dst, (opnode GPR:$lhs, GPR:$rhs))]>, Requires<[IsThumb2, CarryDefIsUsed]> { - let Defs = [CPSR]; - let isCommutable = Commutable; + let Defs = [CPSR]; + let isCommutable = Commutable; + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = opcod; + let Inst{20} = 1; // The S bit. + let Inst{14-12} = 0b000; // imm3 + let Inst{7-6} = 0b00; // imm2 + let Inst{5-4} = 0b00; // type } // shifted register def Srs : T2XI<(outs GPR:$dst), (ins GPR:$lhs, t2_so_reg:$rhs), IIC_iALUsi, !strconcat(opc, "s.w\t$dst, $lhs, $rhs"), [(set GPR:$dst, (opnode GPR:$lhs, t2_so_reg:$rhs))]>, Requires<[IsThumb2, CarryDefIsUsed]> { - let Defs = [CPSR]; + let Defs = [CPSR]; + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = opcod; + let Inst{20} = 1; // The S bit. } } } /// T2I_rbin_s_is - Same as T2I_rbin_is except sets 's' bit. let Defs = [CPSR] in { -multiclass T2I_rbin_s_is<string opc, PatFrag opnode> { +multiclass T2I_rbin_s_is<bits<4> opcod, string opc, PatFrag opnode> { // shifted imm def ri : T2XI<(outs GPR:$dst), (ins GPR:$rhs, t2_so_imm:$lhs, cc_out:$s), IIC_iALUi, !strconcat(opc, "${s}.w\t$dst, $rhs, $lhs"), - [(set GPR:$dst, (opnode t2_so_imm:$lhs, GPR:$rhs))]>; + [(set GPR:$dst, (opnode t2_so_imm:$lhs, GPR:$rhs))]> { + let Inst{31-27} = 0b11110; + let Inst{25} = 0; + let Inst{24-21} = opcod; + let Inst{20} = 1; // The S bit. + let Inst{15} = 0; + } // shifted register def rs : T2XI<(outs GPR:$dst), (ins GPR:$rhs, t2_so_reg:$lhs, cc_out:$s), IIC_iALUsi, !strconcat(opc, "${s}\t$dst, $rhs, $lhs"), - [(set GPR:$dst, (opnode t2_so_reg:$lhs, GPR:$rhs))]>; + [(set GPR:$dst, (opnode t2_so_reg:$lhs, GPR:$rhs))]> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = opcod; + let Inst{20} = 1; // The S bit. + } } } /// T2I_sh_ir - Defines a set of (op reg, {so_imm|r}) patterns for a shift / // rotate operation that produces a value. -multiclass T2I_sh_ir<string opc, PatFrag opnode> { +multiclass T2I_sh_ir<bits<2> opcod, string opc, PatFrag opnode> { // 5-bit imm def ri : T2sI<(outs GPR:$dst), (ins GPR:$lhs, i32imm:$rhs), IIC_iMOVsi, opc, ".w\t$dst, $lhs, $rhs", - [(set GPR:$dst, (opnode GPR:$lhs, imm1_31:$rhs))]>; + [(set GPR:$dst, (opnode GPR:$lhs, imm1_31:$rhs))]> { + let Inst{31-27} = 0b11101; + let Inst{26-21} = 0b010010; + let Inst{19-16} = 0b1111; // Rn + let Inst{5-4} = opcod; + } // register def rr : T2sI<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs), IIC_iMOVsr, opc, ".w\t$dst, $lhs, $rhs", - [(set GPR:$dst, (opnode GPR:$lhs, GPR:$rhs))]>; + [(set GPR:$dst, (opnode GPR:$lhs, GPR:$rhs))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0100; + let Inst{22-21} = opcod; + let Inst{15-12} = 0b1111; + let Inst{7-4} = 0b0000; + } } -/// T2I_cmp_is - Defines a set of (op r, {so_imm|r|so_reg}) cmp / test +/// T2I_cmp_irs - Defines a set of (op r, {so_imm|r|so_reg}) cmp / test /// patterns. Similar to T2I_bin_irs except the instruction does not produce /// a explicit result, only implicitly set CPSR. let Defs = [CPSR] in { -multiclass T2I_cmp_is<string opc, PatFrag opnode> { +multiclass T2I_cmp_irs<bits<4> opcod, string opc, PatFrag opnode> { // shifted imm def ri : T2I<(outs), (ins GPR:$lhs, t2_so_imm:$rhs), IIC_iCMPi, opc, ".w\t$lhs, $rhs", - [(opnode GPR:$lhs, t2_so_imm:$rhs)]>; + [(opnode GPR:$lhs, t2_so_imm:$rhs)]> { + let Inst{31-27} = 0b11110; + let Inst{25} = 0; + let Inst{24-21} = opcod; + let Inst{20} = 1; // The S bit. + let Inst{15} = 0; + let Inst{11-8} = 0b1111; // Rd + } // register def rr : T2I<(outs), (ins GPR:$lhs, GPR:$rhs), IIC_iCMPr, opc, ".w\t$lhs, $rhs", - [(opnode GPR:$lhs, GPR:$rhs)]>; + [(opnode GPR:$lhs, GPR:$rhs)]> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = opcod; + let Inst{20} = 1; // The S bit. + let Inst{14-12} = 0b000; // imm3 + let Inst{11-8} = 0b1111; // Rd + let Inst{7-6} = 0b00; // imm2 + let Inst{5-4} = 0b00; // type + } // shifted register def rs : T2I<(outs), (ins GPR:$lhs, t2_so_reg:$rhs), IIC_iCMPsi, opc, ".w\t$lhs, $rhs", - [(opnode GPR:$lhs, t2_so_reg:$rhs)]>; + [(opnode GPR:$lhs, t2_so_reg:$rhs)]> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = opcod; + let Inst{20} = 1; // The S bit. + let Inst{11-8} = 0b1111; // Rd + } } } /// T2I_ld - Defines a set of (op r, {imm12|imm8|so_reg}) load patterns. -multiclass T2I_ld<string opc, PatFrag opnode> { +multiclass T2I_ld<bit signed, bits<2> opcod, string opc, PatFrag opnode> { def i12 : T2Ii12<(outs GPR:$dst), (ins t2addrmode_imm12:$addr), IIC_iLoadi, opc, ".w\t$dst, $addr", - [(set GPR:$dst, (opnode t2addrmode_imm12:$addr))]>; + [(set GPR:$dst, (opnode t2addrmode_imm12:$addr))]> { + let Inst{31-27} = 0b11111; + let Inst{26-25} = 0b00; + let Inst{24} = signed; + let Inst{23} = 1; + let Inst{22-21} = opcod; + let Inst{20} = 1; // load + } def i8 : T2Ii8 <(outs GPR:$dst), (ins t2addrmode_imm8:$addr), IIC_iLoadi, opc, "\t$dst, $addr", - [(set GPR:$dst, (opnode t2addrmode_imm8:$addr))]>; + [(set GPR:$dst, (opnode t2addrmode_imm8:$addr))]> { + let Inst{31-27} = 0b11111; + let Inst{26-25} = 0b00; + let Inst{24} = signed; + let Inst{23} = 0; + let Inst{22-21} = opcod; + let Inst{20} = 1; // load + let Inst{11} = 1; + // Offset: index==TRUE, wback==FALSE + let Inst{10} = 1; // The P bit. + let Inst{8} = 0; // The W bit. + } def s : T2Iso <(outs GPR:$dst), (ins t2addrmode_so_reg:$addr), IIC_iLoadr, opc, ".w\t$dst, $addr", - [(set GPR:$dst, (opnode t2addrmode_so_reg:$addr))]>; + [(set GPR:$dst, (opnode t2addrmode_so_reg:$addr))]> { + let Inst{31-27} = 0b11111; + let Inst{26-25} = 0b00; + let Inst{24} = signed; + let Inst{23} = 0; + let Inst{22-21} = opcod; + let Inst{20} = 1; // load + let Inst{11-6} = 0b000000; + } def pci : T2Ipc <(outs GPR:$dst), (ins i32imm:$addr), IIC_iLoadi, opc, ".w\t$dst, $addr", [(set GPR:$dst, (opnode (ARMWrapper tconstpool:$addr)))]> { let isReMaterializable = 1; + let Inst{31-27} = 0b11111; + let Inst{26-25} = 0b00; + let Inst{24} = signed; + let Inst{23} = ?; // add = (U == '1') + let Inst{22-21} = opcod; + let Inst{20} = 1; // load + let Inst{19-16} = 0b1111; // Rn } } /// T2I_st - Defines a set of (op r, {imm12|imm8|so_reg}) store patterns. -multiclass T2I_st<string opc, PatFrag opnode> { +multiclass T2I_st<bits<2> opcod, string opc, PatFrag opnode> { def i12 : T2Ii12<(outs), (ins GPR:$src, t2addrmode_imm12:$addr), IIC_iStorei, opc, ".w\t$src, $addr", - [(opnode GPR:$src, t2addrmode_imm12:$addr)]>; + [(opnode GPR:$src, t2addrmode_imm12:$addr)]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0001; + let Inst{22-21} = opcod; + let Inst{20} = 0; // !load + } def i8 : T2Ii8 <(outs), (ins GPR:$src, t2addrmode_imm8:$addr), IIC_iStorei, opc, "\t$src, $addr", - [(opnode GPR:$src, t2addrmode_imm8:$addr)]>; + [(opnode GPR:$src, t2addrmode_imm8:$addr)]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0000; + let Inst{22-21} = opcod; + let Inst{20} = 0; // !load + let Inst{11} = 1; + // Offset: index==TRUE, wback==FALSE + let Inst{10} = 1; // The P bit. + let Inst{8} = 0; // The W bit. + } def s : T2Iso <(outs), (ins GPR:$src, t2addrmode_so_reg:$addr), IIC_iStorer, opc, ".w\t$src, $addr", - [(opnode GPR:$src, t2addrmode_so_reg:$addr)]>; + [(opnode GPR:$src, t2addrmode_so_reg:$addr)]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0000; + let Inst{22-21} = opcod; + let Inst{20} = 0; // !load + let Inst{11-6} = 0b000000; + } } /// T2I_picld - Defines the PIC load pattern. @@ -410,25 +641,55 @@ class T2I_picst<string opc, PatFrag opnode> : /// T2I_unary_rrot - A unary operation with two forms: one whose operand is a /// register and one whose operand is a register rotated by 8/16/24. -multiclass T2I_unary_rrot<string opc, PatFrag opnode> { +multiclass T2I_unary_rrot<bits<3> opcod, string opc, PatFrag opnode> { def r : T2I<(outs GPR:$dst), (ins GPR:$src), IIC_iUNAr, opc, ".w\t$dst, $src", - [(set GPR:$dst, (opnode GPR:$src))]>; + [(set GPR:$dst, (opnode GPR:$src))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0100; + let Inst{22-20} = opcod; + let Inst{19-16} = 0b1111; // Rn + let Inst{15-12} = 0b1111; + let Inst{7} = 1; + let Inst{5-4} = 0b00; // rotate + } def r_rot : T2I<(outs GPR:$dst), (ins GPR:$src, i32imm:$rot), IIC_iUNAsi, opc, ".w\t$dst, $src, ror $rot", - [(set GPR:$dst, (opnode (rotr GPR:$src, rot_imm:$rot)))]>; + [(set GPR:$dst, (opnode (rotr GPR:$src, rot_imm:$rot)))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0100; + let Inst{22-20} = opcod; + let Inst{19-16} = 0b1111; // Rn + let Inst{15-12} = 0b1111; + let Inst{7} = 1; + let Inst{5-4} = {?,?}; // rotate + } } /// T2I_bin_rrot - A binary operation with two forms: one whose operand is a /// register and one whose operand is a register rotated by 8/16/24. -multiclass T2I_bin_rrot<string opc, PatFrag opnode> { +multiclass T2I_bin_rrot<bits<3> opcod, string opc, PatFrag opnode> { def rr : T2I<(outs GPR:$dst), (ins GPR:$LHS, GPR:$RHS), IIC_iALUr, opc, "\t$dst, $LHS, $RHS", - [(set GPR:$dst, (opnode GPR:$LHS, GPR:$RHS))]>; + [(set GPR:$dst, (opnode GPR:$LHS, GPR:$RHS))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0100; + let Inst{22-20} = opcod; + let Inst{15-12} = 0b1111; + let Inst{7} = 1; + let Inst{5-4} = 0b00; // rotate + } def rr_rot : T2I<(outs GPR:$dst), (ins GPR:$LHS, GPR:$RHS, i32imm:$rot), IIC_iALUsr, opc, "\t$dst, $LHS, $RHS, ror $rot", [(set GPR:$dst, (opnode GPR:$LHS, - (rotr GPR:$RHS, rot_imm:$rot)))]>; + (rotr GPR:$RHS, rot_imm:$rot)))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0100; + let Inst{22-20} = opcod; + let Inst{15-12} = 0b1111; + let Inst{7} = 1; + let Inst{5-4} = {?,?}; // rotate + } } //===----------------------------------------------------------------------===// @@ -442,33 +703,89 @@ multiclass T2I_bin_rrot<string opc, PatFrag opnode> { // LEApcrel - Load a pc-relative address into a register without offending the // assembler. def t2LEApcrel : T2XI<(outs GPR:$dst), (ins i32imm:$label, pred:$p), IIC_iALUi, - "adr$p.w\t$dst, #$label", []>; - + "adr$p.w\t$dst, #$label", []> { + let Inst{31-27} = 0b11110; + let Inst{25-24} = 0b10; + // Inst{23:21} = '11' (add = FALSE) or '00' (add = TRUE) + let Inst{22} = 0; + let Inst{20} = 0; + let Inst{19-16} = 0b1111; // Rn + let Inst{15} = 0; +} def t2LEApcrelJT : T2XI<(outs GPR:$dst), (ins i32imm:$label, nohash_imm:$id, pred:$p), IIC_iALUi, - "adr$p.w\t$dst, #${label}_${id}", []>; + "adr$p.w\t$dst, #${label}_${id}", []> { + let Inst{31-27} = 0b11110; + let Inst{25-24} = 0b10; + // Inst{23:21} = '11' (add = FALSE) or '00' (add = TRUE) + let Inst{22} = 0; + let Inst{20} = 0; + let Inst{19-16} = 0b1111; // Rn + let Inst{15} = 0; +} // ADD r, sp, {so_imm|i12} def t2ADDrSPi : T2sI<(outs GPR:$dst), (ins GPR:$sp, t2_so_imm:$imm), - IIC_iALUi, "add", ".w\t$dst, $sp, $imm", []>; + IIC_iALUi, "add", ".w\t$dst, $sp, $imm", []> { + let Inst{31-27} = 0b11110; + let Inst{25} = 0; + let Inst{24-21} = 0b1000; + let Inst{20} = ?; // The S bit. + let Inst{19-16} = 0b1101; // Rn = sp + let Inst{15} = 0; +} def t2ADDrSPi12 : T2I<(outs GPR:$dst), (ins GPR:$sp, imm0_4095:$imm), - IIC_iALUi, "addw", "\t$dst, $sp, $imm", []>; + IIC_iALUi, "addw", "\t$dst, $sp, $imm", []> { + let Inst{31-27} = 0b11110; + let Inst{25} = 1; + let Inst{24-21} = 0b0000; + let Inst{20} = 0; // The S bit. + let Inst{19-16} = 0b1101; // Rn = sp + let Inst{15} = 0; +} // ADD r, sp, so_reg def t2ADDrSPs : T2sI<(outs GPR:$dst), (ins GPR:$sp, t2_so_reg:$rhs), - IIC_iALUsi, "add", ".w\t$dst, $sp, $rhs", []>; + IIC_iALUsi, "add", ".w\t$dst, $sp, $rhs", []> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = 0b1000; + let Inst{20} = ?; // The S bit. + let Inst{19-16} = 0b1101; // Rn = sp + let Inst{15} = 0; +} // SUB r, sp, {so_imm|i12} def t2SUBrSPi : T2sI<(outs GPR:$dst), (ins GPR:$sp, t2_so_imm:$imm), - IIC_iALUi, "sub", ".w\t$dst, $sp, $imm", []>; + IIC_iALUi, "sub", ".w\t$dst, $sp, $imm", []> { + let Inst{31-27} = 0b11110; + let Inst{25} = 0; + let Inst{24-21} = 0b1101; + let Inst{20} = ?; // The S bit. + let Inst{19-16} = 0b1101; // Rn = sp + let Inst{15} = 0; +} def t2SUBrSPi12 : T2I<(outs GPR:$dst), (ins GPR:$sp, imm0_4095:$imm), - IIC_iALUi, "subw", "\t$dst, $sp, $imm", []>; + IIC_iALUi, "subw", "\t$dst, $sp, $imm", []> { + let Inst{31-27} = 0b11110; + let Inst{25} = 1; + let Inst{24-21} = 0b0101; + let Inst{20} = 0; // The S bit. + let Inst{19-16} = 0b1101; // Rn = sp + let Inst{15} = 0; +} // SUB r, sp, so_reg def t2SUBrSPs : T2sI<(outs GPR:$dst), (ins GPR:$sp, t2_so_reg:$rhs), IIC_iALUsi, - "sub", "\t$dst, $sp, $rhs", []>; - + "sub", "\t$dst, $sp, $rhs", []> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = 0b1101; + let Inst{20} = ?; // The S bit. + let Inst{19-16} = 0b1101; // Rn = sp + let Inst{15} = 0; +} // Pseudo instruction that will expand into a t2SUBrSPi + a copy. let usesCustomInserter = 1 in { // Expanded after instruction selection. @@ -487,24 +804,26 @@ def t2SUBrSPs_ : PseudoInst<(outs GPR:$dst), (ins GPR:$sp, t2_so_reg:$rhs), // Load let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in -defm t2LDR : T2I_ld<"ldr", UnOpFrag<(load node:$Src)>>; +defm t2LDR : T2I_ld<0, 0b10, "ldr", UnOpFrag<(load node:$Src)>>; // Loads with zero extension -defm t2LDRH : T2I_ld<"ldrh", UnOpFrag<(zextloadi16 node:$Src)>>; -defm t2LDRB : T2I_ld<"ldrb", UnOpFrag<(zextloadi8 node:$Src)>>; +defm t2LDRH : T2I_ld<0, 0b01, "ldrh", UnOpFrag<(zextloadi16 node:$Src)>>; +defm t2LDRB : T2I_ld<0, 0b00, "ldrb", UnOpFrag<(zextloadi8 node:$Src)>>; // Loads with sign extension -defm t2LDRSH : T2I_ld<"ldrsh", UnOpFrag<(sextloadi16 node:$Src)>>; -defm t2LDRSB : T2I_ld<"ldrsb", UnOpFrag<(sextloadi8 node:$Src)>>; +defm t2LDRSH : T2I_ld<1, 0b01, "ldrsh", UnOpFrag<(sextloadi16 node:$Src)>>; +defm t2LDRSB : T2I_ld<1, 0b00, "ldrsb", UnOpFrag<(sextloadi8 node:$Src)>>; let mayLoad = 1, hasExtraDefRegAllocReq = 1 in { // Load doubleword -def t2LDRDi8 : T2Ii8s4<(outs GPR:$dst1, GPR:$dst2), +def t2LDRDi8 : T2Ii8s4<1, 0, 1, (outs GPR:$dst1, GPR:$dst2), (ins t2addrmode_imm8s4:$addr), IIC_iLoadi, "ldrd", "\t$dst1, $addr", []>; -def t2LDRDpci : T2Ii8s4<(outs GPR:$dst1, GPR:$dst2), +def t2LDRDpci : T2Ii8s4<?, ?, 1, (outs GPR:$dst1, GPR:$dst2), (ins i32imm:$addr), IIC_iLoadi, - "ldrd", "\t$dst1, $addr", []>; + "ldrd", "\t$dst1, $addr", []> { + let Inst{19-16} = 0b1111; // Rn +} } // zextload i1 -> zextload i8 @@ -549,57 +868,57 @@ def : T2Pat<(extloadi16 (ARMWrapper tconstpool:$addr)), // Indexed loads let mayLoad = 1 in { -def t2LDR_PRE : T2Iidxldst<(outs GPR:$dst, GPR:$base_wb), +def t2LDR_PRE : T2Iidxldst<0, 0b10, 1, 1, (outs GPR:$dst, GPR:$base_wb), (ins t2addrmode_imm8:$addr), AddrModeT2_i8, IndexModePre, IIC_iLoadiu, "ldr", "\t$dst, $addr!", "$addr.base = $base_wb", []>; -def t2LDR_POST : T2Iidxldst<(outs GPR:$dst, GPR:$base_wb), +def t2LDR_POST : T2Iidxldst<0, 0b10, 1, 0, (outs GPR:$dst, GPR:$base_wb), (ins GPR:$base, t2am_imm8_offset:$offset), AddrModeT2_i8, IndexModePost, IIC_iLoadiu, "ldr", "\t$dst, [$base], $offset", "$base = $base_wb", []>; -def t2LDRB_PRE : T2Iidxldst<(outs GPR:$dst, GPR:$base_wb), +def t2LDRB_PRE : T2Iidxldst<0, 0b00, 1, 1, (outs GPR:$dst, GPR:$base_wb), (ins t2addrmode_imm8:$addr), AddrModeT2_i8, IndexModePre, IIC_iLoadiu, "ldrb", "\t$dst, $addr!", "$addr.base = $base_wb", []>; -def t2LDRB_POST : T2Iidxldst<(outs GPR:$dst, GPR:$base_wb), +def t2LDRB_POST : T2Iidxldst<0, 0b00, 1, 0, (outs GPR:$dst, GPR:$base_wb), (ins GPR:$base, t2am_imm8_offset:$offset), AddrModeT2_i8, IndexModePost, IIC_iLoadiu, "ldrb", "\t$dst, [$base], $offset", "$base = $base_wb", []>; -def t2LDRH_PRE : T2Iidxldst<(outs GPR:$dst, GPR:$base_wb), +def t2LDRH_PRE : T2Iidxldst<0, 0b01, 1, 1, (outs GPR:$dst, GPR:$base_wb), (ins t2addrmode_imm8:$addr), AddrModeT2_i8, IndexModePre, IIC_iLoadiu, "ldrh", "\t$dst, $addr!", "$addr.base = $base_wb", []>; -def t2LDRH_POST : T2Iidxldst<(outs GPR:$dst, GPR:$base_wb), +def t2LDRH_POST : T2Iidxldst<0, 0b01, 1, 0, (outs GPR:$dst, GPR:$base_wb), (ins GPR:$base, t2am_imm8_offset:$offset), AddrModeT2_i8, IndexModePost, IIC_iLoadiu, "ldrh", "\t$dst, [$base], $offset", "$base = $base_wb", []>; -def t2LDRSB_PRE : T2Iidxldst<(outs GPR:$dst, GPR:$base_wb), +def t2LDRSB_PRE : T2Iidxldst<1, 0b00, 1, 1, (outs GPR:$dst, GPR:$base_wb), (ins t2addrmode_imm8:$addr), AddrModeT2_i8, IndexModePre, IIC_iLoadiu, "ldrsb", "\t$dst, $addr!", "$addr.base = $base_wb", []>; -def t2LDRSB_POST : T2Iidxldst<(outs GPR:$dst, GPR:$base_wb), +def t2LDRSB_POST : T2Iidxldst<1, 0b00, 1, 0, (outs GPR:$dst, GPR:$base_wb), (ins GPR:$base, t2am_imm8_offset:$offset), AddrModeT2_i8, IndexModePost, IIC_iLoadiu, "ldrsb", "\t$dst, [$base], $offset", "$base = $base_wb", []>; -def t2LDRSH_PRE : T2Iidxldst<(outs GPR:$dst, GPR:$base_wb), +def t2LDRSH_PRE : T2Iidxldst<1, 0b01, 1, 1, (outs GPR:$dst, GPR:$base_wb), (ins t2addrmode_imm8:$addr), AddrModeT2_i8, IndexModePre, IIC_iLoadiu, "ldrsh", "\t$dst, $addr!", "$addr.base = $base_wb", []>; -def t2LDRSH_POST : T2Iidxldst<(outs GPR:$dst, GPR:$base_wb), +def t2LDRSH_POST : T2Iidxldst<1, 0b01, 1, 0, (outs GPR:$dst, GPR:$base_wb), (ins GPR:$base, t2am_imm8_offset:$offset), AddrModeT2_i8, IndexModePost, IIC_iLoadiu, "ldrsh", "\t$dst, [$base], $offset", "$base = $base_wb", @@ -607,53 +926,53 @@ def t2LDRSH_POST : T2Iidxldst<(outs GPR:$dst, GPR:$base_wb), } // Store -defm t2STR : T2I_st<"str", BinOpFrag<(store node:$LHS, node:$RHS)>>; -defm t2STRB : T2I_st<"strb", BinOpFrag<(truncstorei8 node:$LHS, node:$RHS)>>; -defm t2STRH : T2I_st<"strh", BinOpFrag<(truncstorei16 node:$LHS, node:$RHS)>>; +defm t2STR : T2I_st<0b10, "str", BinOpFrag<(store node:$LHS, node:$RHS)>>; +defm t2STRB : T2I_st<0b00, "strb", BinOpFrag<(truncstorei8 node:$LHS, node:$RHS)>>; +defm t2STRH : T2I_st<0b01, "strh", BinOpFrag<(truncstorei16 node:$LHS, node:$RHS)>>; // Store doubleword let mayLoad = 1, hasExtraSrcRegAllocReq = 1 in -def t2STRDi8 : T2Ii8s4<(outs), +def t2STRDi8 : T2Ii8s4<1, 0, 0, (outs), (ins GPR:$src1, GPR:$src2, t2addrmode_imm8s4:$addr), IIC_iStorer, "strd", "\t$src1, $addr", []>; // Indexed stores -def t2STR_PRE : T2Iidxldst<(outs GPR:$base_wb), +def t2STR_PRE : T2Iidxldst<0, 0b10, 0, 1, (outs GPR:$base_wb), (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset), AddrModeT2_i8, IndexModePre, IIC_iStoreiu, "str", "\t$src, [$base, $offset]!", "$base = $base_wb", [(set GPR:$base_wb, (pre_store GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>; -def t2STR_POST : T2Iidxldst<(outs GPR:$base_wb), +def t2STR_POST : T2Iidxldst<0, 0b10, 0, 0, (outs GPR:$base_wb), (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset), AddrModeT2_i8, IndexModePost, IIC_iStoreiu, "str", "\t$src, [$base], $offset", "$base = $base_wb", [(set GPR:$base_wb, (post_store GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>; -def t2STRH_PRE : T2Iidxldst<(outs GPR:$base_wb), +def t2STRH_PRE : T2Iidxldst<0, 0b01, 0, 1, (outs GPR:$base_wb), (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset), AddrModeT2_i8, IndexModePre, IIC_iStoreiu, "strh", "\t$src, [$base, $offset]!", "$base = $base_wb", [(set GPR:$base_wb, (pre_truncsti16 GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>; -def t2STRH_POST : T2Iidxldst<(outs GPR:$base_wb), +def t2STRH_POST : T2Iidxldst<0, 0b01, 0, 0, (outs GPR:$base_wb), (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset), AddrModeT2_i8, IndexModePost, IIC_iStoreiu, "strh", "\t$src, [$base], $offset", "$base = $base_wb", [(set GPR:$base_wb, (post_truncsti16 GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>; -def t2STRB_PRE : T2Iidxldst<(outs GPR:$base_wb), +def t2STRB_PRE : T2Iidxldst<0, 0b00, 0, 1, (outs GPR:$base_wb), (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset), AddrModeT2_i8, IndexModePre, IIC_iStoreiu, "strb", "\t$src, [$base, $offset]!", "$base = $base_wb", [(set GPR:$base_wb, (pre_truncsti8 GPR:$src, GPR:$base, t2am_imm8_offset:$offset))]>; -def t2STRB_POST : T2Iidxldst<(outs GPR:$base_wb), +def t2STRB_POST : T2Iidxldst<0, 0b00, 0, 0, (outs GPR:$base_wb), (ins GPR:$src, GPR:$base, t2am_imm8_offset:$offset), AddrModeT2_i8, IndexModePost, IIC_iStoreiu, "strb", "\t$src, [$base], $offset", "$base = $base_wb", @@ -670,12 +989,26 @@ def t2STRB_POST : T2Iidxldst<(outs GPR:$base_wb), let mayLoad = 1, hasExtraDefRegAllocReq = 1 in def t2LDM : T2XI<(outs), (ins addrmode4:$addr, pred:$p, reglist:$wb, variable_ops), - IIC_iLoadm, "ldm${addr:submode}${p}${addr:wide}\t$addr, $wb", []>; + IIC_iLoadm, "ldm${addr:submode}${p}${addr:wide}\t$addr, $wb", []> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b00; + let Inst{24-23} = {?, ?}; // IA: '01', DB: '10' + let Inst{22} = 0; + let Inst{21} = ?; // The W bit. + let Inst{20} = 1; // Load +} let mayStore = 1, hasExtraSrcRegAllocReq = 1 in def t2STM : T2XI<(outs), (ins addrmode4:$addr, pred:$p, reglist:$wb, variable_ops), - IIC_iStorem, "stm${addr:submode}${p}${addr:wide}\t$addr, $wb", []>; + IIC_iStorem, "stm${addr:submode}${p}${addr:wide}\t$addr, $wb", []> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b00; + let Inst{24-23} = {?, ?}; // IA: '01', DB: '10' + let Inst{22} = 0; + let Inst{21} = ?; // The W bit. + let Inst{20} = 0; // Store +} //===----------------------------------------------------------------------===// // Move Instructions. @@ -683,24 +1016,51 @@ def t2STM : T2XI<(outs), let neverHasSideEffects = 1 in def t2MOVr : T2sI<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVr, - "mov", ".w\t$dst, $src", []>; + "mov", ".w\t$dst, $src", []> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = 0b0010; + let Inst{20} = ?; // The S bit. + let Inst{19-16} = 0b1111; // Rn + let Inst{14-12} = 0b000; + let Inst{7-4} = 0b0000; +} // AddedComplexity to ensure isel tries t2MOVi before t2MOVi16. let isReMaterializable = 1, isAsCheapAsAMove = 1, AddedComplexity = 1 in def t2MOVi : T2sI<(outs GPR:$dst), (ins t2_so_imm:$src), IIC_iMOVi, "mov", ".w\t$dst, $src", - [(set GPR:$dst, t2_so_imm:$src)]>; + [(set GPR:$dst, t2_so_imm:$src)]> { + let Inst{31-27} = 0b11110; + let Inst{25} = 0; + let Inst{24-21} = 0b0010; + let Inst{20} = ?; // The S bit. + let Inst{19-16} = 0b1111; // Rn + let Inst{15} = 0; +} let isReMaterializable = 1, isAsCheapAsAMove = 1 in def t2MOVi16 : T2I<(outs GPR:$dst), (ins i32imm:$src), IIC_iMOVi, "movw", "\t$dst, $src", - [(set GPR:$dst, imm0_65535:$src)]>; + [(set GPR:$dst, imm0_65535:$src)]> { + let Inst{31-27} = 0b11110; + let Inst{25} = 1; + let Inst{24-21} = 0b0010; + let Inst{20} = 0; // The S bit. + let Inst{15} = 0; +} let Constraints = "$src = $dst" in def t2MOVTi16 : T2I<(outs GPR:$dst), (ins GPR:$src, i32imm:$imm), IIC_iMOVi, "movt", "\t$dst, $imm", [(set GPR:$dst, - (or (and GPR:$src, 0xffff), lo16AllZero:$imm))]>; + (or (and GPR:$src, 0xffff), lo16AllZero:$imm))]> { + let Inst{31-27} = 0b11110; + let Inst{25} = 1; + let Inst{24-21} = 0b0110; + let Inst{20} = 0; // The S bit. + let Inst{15} = 0; +} def : T2Pat<(or GPR:$src, 0xffff0000), (t2MOVTi16 GPR:$src, 0xffff)>; @@ -710,12 +1070,14 @@ def : T2Pat<(or GPR:$src, 0xffff0000), (t2MOVTi16 GPR:$src, 0xffff)>; // Sign extenders -defm t2SXTB : T2I_unary_rrot<"sxtb", UnOpFrag<(sext_inreg node:$Src, i8)>>; -defm t2SXTH : T2I_unary_rrot<"sxth", UnOpFrag<(sext_inreg node:$Src, i16)>>; +defm t2SXTB : T2I_unary_rrot<0b100, "sxtb", + UnOpFrag<(sext_inreg node:$Src, i8)>>; +defm t2SXTH : T2I_unary_rrot<0b000, "sxth", + UnOpFrag<(sext_inreg node:$Src, i16)>>; -defm t2SXTAB : T2I_bin_rrot<"sxtab", +defm t2SXTAB : T2I_bin_rrot<0b100, "sxtab", BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS, i8))>>; -defm t2SXTAH : T2I_bin_rrot<"sxtah", +defm t2SXTAH : T2I_bin_rrot<0b000, "sxtah", BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS,i16))>>; // TODO: SXT(A){B|H}16 @@ -723,18 +1085,21 @@ defm t2SXTAH : T2I_bin_rrot<"sxtah", // Zero extenders let AddedComplexity = 16 in { -defm t2UXTB : T2I_unary_rrot<"uxtb" , UnOpFrag<(and node:$Src, 0x000000FF)>>; -defm t2UXTH : T2I_unary_rrot<"uxth" , UnOpFrag<(and node:$Src, 0x0000FFFF)>>; -defm t2UXTB16 : T2I_unary_rrot<"uxtb16", UnOpFrag<(and node:$Src, 0x00FF00FF)>>; +defm t2UXTB : T2I_unary_rrot<0b101, "uxtb", + UnOpFrag<(and node:$Src, 0x000000FF)>>; +defm t2UXTH : T2I_unary_rrot<0b001, "uxth", + UnOpFrag<(and node:$Src, 0x0000FFFF)>>; +defm t2UXTB16 : T2I_unary_rrot<0b011, "uxtb16", + UnOpFrag<(and node:$Src, 0x00FF00FF)>>; def : T2Pat<(and (shl GPR:$Src, (i32 8)), 0xFF00FF), (t2UXTB16r_rot GPR:$Src, 24)>; def : T2Pat<(and (srl GPR:$Src, (i32 8)), 0xFF00FF), (t2UXTB16r_rot GPR:$Src, 8)>; -defm t2UXTAB : T2I_bin_rrot<"uxtab", +defm t2UXTAB : T2I_bin_rrot<0b101, "uxtab", BinOpFrag<(add node:$LHS, (and node:$RHS, 0x00FF))>>; -defm t2UXTAH : T2I_bin_rrot<"uxtah", +defm t2UXTAH : T2I_bin_rrot<0b001, "uxtah", BinOpFrag<(add node:$LHS, (and node:$RHS, 0xFFFF))>>; } @@ -742,19 +1107,27 @@ defm t2UXTAH : T2I_bin_rrot<"uxtah", // Arithmetic Instructions. // -defm t2ADD : T2I_bin_ii12rs<"add", BinOpFrag<(add node:$LHS, node:$RHS)>, 1>; -defm t2SUB : T2I_bin_ii12rs<"sub", BinOpFrag<(sub node:$LHS, node:$RHS)>>; +defm t2ADD : T2I_bin_ii12rs<0b000, "add", + BinOpFrag<(add node:$LHS, node:$RHS)>, 1>; +defm t2SUB : T2I_bin_ii12rs<0b101, "sub", + BinOpFrag<(sub node:$LHS, node:$RHS)>>; // ADD and SUB with 's' bit set. No 12-bit immediate (T4) variants. -defm t2ADDS : T2I_bin_s_irs <"add", BinOpFrag<(addc node:$LHS, node:$RHS)>, 1>; -defm t2SUBS : T2I_bin_s_irs <"sub", BinOpFrag<(subc node:$LHS, node:$RHS)>>; +defm t2ADDS : T2I_bin_s_irs <0b1000, "add", + BinOpFrag<(addc node:$LHS, node:$RHS)>, 1>; +defm t2SUBS : T2I_bin_s_irs <0b1101, "sub", + BinOpFrag<(subc node:$LHS, node:$RHS)>>; -defm t2ADC : T2I_adde_sube_irs<"adc",BinOpFrag<(adde node:$LHS, node:$RHS)>,1>; -defm t2SBC : T2I_adde_sube_irs<"sbc",BinOpFrag<(sube node:$LHS, node:$RHS)>>; +defm t2ADC : T2I_adde_sube_irs<0b1010, "adc", + BinOpFrag<(adde node:$LHS, node:$RHS)>, 1>; +defm t2SBC : T2I_adde_sube_irs<0b1011, "sbc", + BinOpFrag<(sube node:$LHS, node:$RHS)>>; // RSB -defm t2RSB : T2I_rbin_is <"rsb", BinOpFrag<(sub node:$LHS, node:$RHS)>>; -defm t2RSBS : T2I_rbin_s_is <"rsb", BinOpFrag<(subc node:$LHS, node:$RHS)>>; +defm t2RSB : T2I_rbin_is <0b1110, "rsb", + BinOpFrag<(sub node:$LHS, node:$RHS)>>; +defm t2RSBS : T2I_rbin_s_is <0b1110, "rsb", + BinOpFrag<(subc node:$LHS, node:$RHS)>>; // (sub X, imm) gets canonicalized to (add X, -imm). Match this form. let AddedComplexity = 1 in @@ -770,54 +1143,103 @@ def : T2Pat<(add GPR:$src, imm0_4095_neg:$imm), // Shift and rotate Instructions. // -defm t2LSL : T2I_sh_ir<"lsl", BinOpFrag<(shl node:$LHS, node:$RHS)>>; -defm t2LSR : T2I_sh_ir<"lsr", BinOpFrag<(srl node:$LHS, node:$RHS)>>; -defm t2ASR : T2I_sh_ir<"asr", BinOpFrag<(sra node:$LHS, node:$RHS)>>; -defm t2ROR : T2I_sh_ir<"ror", BinOpFrag<(rotr node:$LHS, node:$RHS)>>; +defm t2LSL : T2I_sh_ir<0b00, "lsl", BinOpFrag<(shl node:$LHS, node:$RHS)>>; +defm t2LSR : T2I_sh_ir<0b01, "lsr", BinOpFrag<(srl node:$LHS, node:$RHS)>>; +defm t2ASR : T2I_sh_ir<0b10, "asr", BinOpFrag<(sra node:$LHS, node:$RHS)>>; +defm t2ROR : T2I_sh_ir<0b11, "ror", BinOpFrag<(rotr node:$LHS, node:$RHS)>>; let Uses = [CPSR] in { def t2MOVrx : T2sI<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVsi, "rrx", "\t$dst, $src", - [(set GPR:$dst, (ARMrrx GPR:$src))]>; + [(set GPR:$dst, (ARMrrx GPR:$src))]> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = 0b0010; + let Inst{20} = ?; // The S bit. + let Inst{19-16} = 0b1111; // Rn + let Inst{14-12} = 0b000; + let Inst{7-4} = 0b0011; +} } let Defs = [CPSR] in { def t2MOVsrl_flag : T2XI<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVsi, "lsrs.w\t$dst, $src, #1", - [(set GPR:$dst, (ARMsrl_flag GPR:$src))]>; + [(set GPR:$dst, (ARMsrl_flag GPR:$src))]> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = 0b0010; + let Inst{20} = 1; // The S bit. + let Inst{19-16} = 0b1111; // Rn + let Inst{5-4} = 0b01; // Shift type. + // Shift amount = Inst{14-12:7-6} = 1. + let Inst{14-12} = 0b000; + let Inst{7-6} = 0b01; +} def t2MOVsra_flag : T2XI<(outs GPR:$dst), (ins GPR:$src), IIC_iMOVsi, "asrs.w\t$dst, $src, #1", - [(set GPR:$dst, (ARMsra_flag GPR:$src))]>; + [(set GPR:$dst, (ARMsra_flag GPR:$src))]> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = 0b0010; + let Inst{20} = 1; // The S bit. + let Inst{19-16} = 0b1111; // Rn + let Inst{5-4} = 0b10; // Shift type. + // Shift amount = Inst{14-12:7-6} = 1. + let Inst{14-12} = 0b000; + let Inst{7-6} = 0b01; +} } //===----------------------------------------------------------------------===// // Bitwise Instructions. // -defm t2AND : T2I_bin_w_irs<"and", BinOpFrag<(and node:$LHS, node:$RHS)>, 1>; -defm t2ORR : T2I_bin_w_irs<"orr", BinOpFrag<(or node:$LHS, node:$RHS)>, 1>; -defm t2EOR : T2I_bin_w_irs<"eor", BinOpFrag<(xor node:$LHS, node:$RHS)>, 1>; +defm t2AND : T2I_bin_w_irs<0b0000, "and", + BinOpFrag<(and node:$LHS, node:$RHS)>, 1>; +defm t2ORR : T2I_bin_w_irs<0b0010, "orr", + BinOpFrag<(or node:$LHS, node:$RHS)>, 1>; +defm t2EOR : T2I_bin_w_irs<0b0100, "eor", + BinOpFrag<(xor node:$LHS, node:$RHS)>, 1>; -defm t2BIC : T2I_bin_w_irs<"bic", BinOpFrag<(and node:$LHS, (not node:$RHS))>>; +defm t2BIC : T2I_bin_w_irs<0b0001, "bic", + BinOpFrag<(and node:$LHS, (not node:$RHS))>>; let Constraints = "$src = $dst" in def t2BFC : T2I<(outs GPR:$dst), (ins GPR:$src, bf_inv_mask_imm:$imm), IIC_iUNAsi, "bfc", "\t$dst, $imm", - [(set GPR:$dst, (and GPR:$src, bf_inv_mask_imm:$imm))]>; + [(set GPR:$dst, (and GPR:$src, bf_inv_mask_imm:$imm))]> { + let Inst{31-27} = 0b11110; + let Inst{25} = 1; + let Inst{24-20} = 0b10110; + let Inst{19-16} = 0b1111; // Rn + let Inst{15} = 0; +} def t2SBFX : T2I<(outs GPR:$dst), (ins GPR:$src, imm0_31:$lsb, imm0_31:$width), - IIC_iALUi, "sbfx", "\t$dst, $src, $lsb, $width", []>; + IIC_iALUi, "sbfx", "\t$dst, $src, $lsb, $width", []> { + let Inst{31-27} = 0b11110; + let Inst{25} = 1; + let Inst{24-20} = 0b10100; + let Inst{15} = 0; +} def t2UBFX : T2I<(outs GPR:$dst), (ins GPR:$src, imm0_31:$lsb, imm0_31:$width), - IIC_iALUi, "ubfx", "\t$dst, $src, $lsb, $width", []>; + IIC_iALUi, "ubfx", "\t$dst, $src, $lsb, $width", []> { + let Inst{31-27} = 0b11110; + let Inst{25} = 1; + let Inst{24-20} = 0b11100; + let Inst{15} = 0; +} // FIXME: A8.6.18 BFI - Bitfield insert (Encoding T1) -defm t2ORN : T2I_bin_irs<"orn", BinOpFrag<(or node:$LHS, (not node:$RHS))>>; +defm t2ORN : T2I_bin_irs<0b0011, "orn", BinOpFrag<(or node:$LHS, + (not node:$RHS))>>; // Prefer over of t2EORri ra, rb, -1 because mvn has 16-bit version let AddedComplexity = 1 in -defm t2MVN : T2I_un_irs <"mvn", UnOpFrag<(not node:$Src)>, 1, 1>; +defm t2MVN : T2I_un_irs <0b0011, "mvn", UnOpFrag<(not node:$Src)>, 1, 1>; def : T2Pat<(and GPR:$src, t2_so_imm_not:$imm), @@ -837,81 +1259,184 @@ def : T2Pat<(t2_so_imm_not:$src), let isCommutable = 1 in def t2MUL: T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL32, "mul", "\t$dst, $a, $b", - [(set GPR:$dst, (mul GPR:$a, GPR:$b))]>; + [(set GPR:$dst, (mul GPR:$a, GPR:$b))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b000; + let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate) + let Inst{7-4} = 0b0000; // Multiply +} def t2MLA: T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c), IIC_iMAC32, "mla", "\t$dst, $a, $b, $c", - [(set GPR:$dst, (add (mul GPR:$a, GPR:$b), GPR:$c))]>; + [(set GPR:$dst, (add (mul GPR:$a, GPR:$b), GPR:$c))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b000; + let Inst{15-12} = {?, ?, ?, ?}; // Ra + let Inst{7-4} = 0b0000; // Multiply +} def t2MLS: T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c), IIC_iMAC32, "mls", "\t$dst, $a, $b, $c", - [(set GPR:$dst, (sub GPR:$c, (mul GPR:$a, GPR:$b)))]>; + [(set GPR:$dst, (sub GPR:$c, (mul GPR:$a, GPR:$b)))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b000; + let Inst{15-12} = {?, ?, ?, ?}; // Ra + let Inst{7-4} = 0b0001; // Multiply and Subtract +} // Extra precision multiplies with low / high results let neverHasSideEffects = 1 in { let isCommutable = 1 in { def t2SMULL : T2I<(outs GPR:$ldst, GPR:$hdst), (ins GPR:$a, GPR:$b), IIC_iMUL64, - "smull", "\t$ldst, $hdst, $a, $b", []>; + "smull", "\t$ldst, $hdst, $a, $b", []> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0111; + let Inst{22-20} = 0b000; + let Inst{7-4} = 0b0000; +} def t2UMULL : T2I<(outs GPR:$ldst, GPR:$hdst), (ins GPR:$a, GPR:$b), IIC_iMUL64, - "umull", "\t$ldst, $hdst, $a, $b", []>; + "umull", "\t$ldst, $hdst, $a, $b", []> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0111; + let Inst{22-20} = 0b010; + let Inst{7-4} = 0b0000; } +} // isCommutable // Multiply + accumulate def t2SMLAL : T2I<(outs GPR:$ldst, GPR:$hdst), (ins GPR:$a, GPR:$b), IIC_iMAC64, - "smlal", "\t$ldst, $hdst, $a, $b", []>; + "smlal", "\t$ldst, $hdst, $a, $b", []>{ + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0111; + let Inst{22-20} = 0b100; + let Inst{7-4} = 0b0000; +} def t2UMLAL : T2I<(outs GPR:$ldst, GPR:$hdst), (ins GPR:$a, GPR:$b), IIC_iMAC64, - "umlal", "\t$ldst, $hdst, $a, $b", []>; + "umlal", "\t$ldst, $hdst, $a, $b", []>{ + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0111; + let Inst{22-20} = 0b110; + let Inst{7-4} = 0b0000; +} def t2UMAAL : T2I<(outs GPR:$ldst, GPR:$hdst), (ins GPR:$a, GPR:$b), IIC_iMAC64, - "umaal", "\t$ldst, $hdst, $a, $b", []>; + "umaal", "\t$ldst, $hdst, $a, $b", []>{ + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0111; + let Inst{22-20} = 0b110; + let Inst{7-4} = 0b0110; +} } // neverHasSideEffects // Most significant word multiply def t2SMMUL : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL32, "smmul", "\t$dst, $a, $b", - [(set GPR:$dst, (mulhs GPR:$a, GPR:$b))]>; + [(set GPR:$dst, (mulhs GPR:$a, GPR:$b))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b101; + let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate) + let Inst{7-4} = 0b0000; // No Rounding (Inst{4} = 0) +} def t2SMMLA : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c), IIC_iMAC32, "smmla", "\t$dst, $a, $b, $c", - [(set GPR:$dst, (add (mulhs GPR:$a, GPR:$b), GPR:$c))]>; + [(set GPR:$dst, (add (mulhs GPR:$a, GPR:$b), GPR:$c))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b101; + let Inst{15-12} = {?, ?, ?, ?}; // Ra + let Inst{7-4} = 0b0000; // No Rounding (Inst{4} = 0) +} def t2SMMLS : T2I <(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$c), IIC_iMAC32, "smmls", "\t$dst, $a, $b, $c", - [(set GPR:$dst, (sub GPR:$c, (mulhs GPR:$a, GPR:$b)))]>; + [(set GPR:$dst, (sub GPR:$c, (mulhs GPR:$a, GPR:$b)))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b110; + let Inst{15-12} = {?, ?, ?, ?}; // Ra + let Inst{7-4} = 0b0000; // No Rounding (Inst{4} = 0) +} multiclass T2I_smul<string opc, PatFrag opnode> { def BB : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL32, !strconcat(opc, "bb"), "\t$dst, $a, $b", [(set GPR:$dst, (opnode (sext_inreg GPR:$a, i16), - (sext_inreg GPR:$b, i16)))]>; + (sext_inreg GPR:$b, i16)))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b001; + let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate) + let Inst{7-6} = 0b00; + let Inst{5-4} = 0b00; + } def BT : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL32, !strconcat(opc, "bt"), "\t$dst, $a, $b", [(set GPR:$dst, (opnode (sext_inreg GPR:$a, i16), - (sra GPR:$b, (i32 16))))]>; + (sra GPR:$b, (i32 16))))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b001; + let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate) + let Inst{7-6} = 0b00; + let Inst{5-4} = 0b01; + } def TB : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL32, !strconcat(opc, "tb"), "\t$dst, $a, $b", [(set GPR:$dst, (opnode (sra GPR:$a, (i32 16)), - (sext_inreg GPR:$b, i16)))]>; + (sext_inreg GPR:$b, i16)))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b001; + let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate) + let Inst{7-6} = 0b00; + let Inst{5-4} = 0b10; + } def TT : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL32, !strconcat(opc, "tt"), "\t$dst, $a, $b", [(set GPR:$dst, (opnode (sra GPR:$a, (i32 16)), - (sra GPR:$b, (i32 16))))]>; + (sra GPR:$b, (i32 16))))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b001; + let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate) + let Inst{7-6} = 0b00; + let Inst{5-4} = 0b11; + } def WB : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL16, !strconcat(opc, "wb"), "\t$dst, $a, $b", [(set GPR:$dst, (sra (opnode GPR:$a, - (sext_inreg GPR:$b, i16)), (i32 16)))]>; + (sext_inreg GPR:$b, i16)), (i32 16)))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b011; + let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate) + let Inst{7-6} = 0b00; + let Inst{5-4} = 0b00; + } def WT : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b), IIC_iMUL16, !strconcat(opc, "wt"), "\t$dst, $a, $b", [(set GPR:$dst, (sra (opnode GPR:$a, - (sra GPR:$b, (i32 16))), (i32 16)))]>; + (sra GPR:$b, (i32 16))), (i32 16)))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b011; + let Inst{15-12} = 0b1111; // Ra = 0b1111 (no accumulate) + let Inst{7-6} = 0b00; + let Inst{5-4} = 0b01; + } } @@ -920,32 +1445,74 @@ multiclass T2I_smla<string opc, PatFrag opnode> { !strconcat(opc, "bb"), "\t$dst, $a, $b, $acc", [(set GPR:$dst, (add GPR:$acc, (opnode (sext_inreg GPR:$a, i16), - (sext_inreg GPR:$b, i16))))]>; + (sext_inreg GPR:$b, i16))))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b001; + let Inst{15-12} = {?, ?, ?, ?}; // Ra + let Inst{7-6} = 0b00; + let Inst{5-4} = 0b00; + } def BT : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc), IIC_iMAC16, !strconcat(opc, "bt"), "\t$dst, $a, $b, $acc", [(set GPR:$dst, (add GPR:$acc, (opnode (sext_inreg GPR:$a, i16), - (sra GPR:$b, (i32 16)))))]>; + (sra GPR:$b, (i32 16)))))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b001; + let Inst{15-12} = {?, ?, ?, ?}; // Ra + let Inst{7-6} = 0b00; + let Inst{5-4} = 0b01; + } def TB : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc), IIC_iMAC16, !strconcat(opc, "tb"), "\t$dst, $a, $b, $acc", [(set GPR:$dst, (add GPR:$acc, (opnode (sra GPR:$a, (i32 16)), - (sext_inreg GPR:$b, i16))))]>; + (sext_inreg GPR:$b, i16))))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b001; + let Inst{15-12} = {?, ?, ?, ?}; // Ra + let Inst{7-6} = 0b00; + let Inst{5-4} = 0b10; + } def TT : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc), IIC_iMAC16, !strconcat(opc, "tt"), "\t$dst, $a, $b, $acc", [(set GPR:$dst, (add GPR:$acc, (opnode (sra GPR:$a, (i32 16)), - (sra GPR:$b, (i32 16)))))]>; + (sra GPR:$b, (i32 16)))))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b001; + let Inst{15-12} = {?, ?, ?, ?}; // Ra + let Inst{7-6} = 0b00; + let Inst{5-4} = 0b11; + } def WB : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc), IIC_iMAC16, !strconcat(opc, "wb"), "\t$dst, $a, $b, $acc", [(set GPR:$dst, (add GPR:$acc, (sra (opnode GPR:$a, - (sext_inreg GPR:$b, i16)), (i32 16))))]>; + (sext_inreg GPR:$b, i16)), (i32 16))))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b011; + let Inst{15-12} = {?, ?, ?, ?}; // Ra + let Inst{7-6} = 0b00; + let Inst{5-4} = 0b00; + } def WT : T2I<(outs GPR:$dst), (ins GPR:$a, GPR:$b, GPR:$acc), IIC_iMAC16, !strconcat(opc, "wt"), "\t$dst, $a, $b, $acc", [(set GPR:$dst, (add GPR:$acc, (sra (opnode GPR:$a, - (sra GPR:$b, (i32 16))), (i32 16))))]>; + (sra GPR:$b, (i32 16))), (i32 16))))]> { + let Inst{31-27} = 0b11111; + let Inst{26-23} = 0b0110; + let Inst{22-20} = 0b011; + let Inst{15-12} = {?, ?, ?, ?}; // Ra + let Inst{7-6} = 0b00; + let Inst{5-4} = 0b01; + } } defm t2SMUL : T2I_smul<"smul", BinOpFrag<(mul node:$LHS, node:$RHS)>>; @@ -959,24 +1526,33 @@ defm t2SMLA : T2I_smla<"smla", BinOpFrag<(mul node:$LHS, node:$RHS)>>; // Misc. Arithmetic Instructions. // -def t2CLZ : T2I<(outs GPR:$dst), (ins GPR:$src), IIC_iUNAr, - "clz", "\t$dst, $src", - [(set GPR:$dst, (ctlz GPR:$src))]>; +class T2I_misc<bits<2> op1, bits<2> op2, dag oops, dag iops, InstrItinClass itin, + string opc, string asm, list<dag> pattern> + : T2I<oops, iops, itin, opc, asm, pattern> { + let Inst{31-27} = 0b11111; + let Inst{26-22} = 0b01010; + let Inst{21-20} = op1; + let Inst{15-12} = 0b1111; + let Inst{7-6} = 0b10; + let Inst{5-4} = op2; +} + +def t2CLZ : T2I_misc<0b11, 0b00, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr, + "clz", "\t$dst, $src", [(set GPR:$dst, (ctlz GPR:$src))]>; -def t2REV : T2I<(outs GPR:$dst), (ins GPR:$src), IIC_iUNAr, - "rev", ".w\t$dst, $src", - [(set GPR:$dst, (bswap GPR:$src))]>; +def t2REV : T2I_misc<0b01, 0b00, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr, + "rev", ".w\t$dst, $src", [(set GPR:$dst, (bswap GPR:$src))]>; -def t2REV16 : T2I<(outs GPR:$dst), (ins GPR:$src), IIC_iUNAr, - "rev16", ".w\t$dst, $src", +def t2REV16 : T2I_misc<0b01, 0b01, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr, + "rev16", ".w\t$dst, $src", [(set GPR:$dst, (or (and (srl GPR:$src, (i32 8)), 0xFF), (or (and (shl GPR:$src, (i32 8)), 0xFF00), (or (and (srl GPR:$src, (i32 8)), 0xFF0000), (and (shl GPR:$src, (i32 8)), 0xFF000000)))))]>; -def t2REVSH : T2I<(outs GPR:$dst), (ins GPR:$src), IIC_iUNAr, - "revsh", ".w\t$dst, $src", +def t2REVSH : T2I_misc<0b01, 0b11, (outs GPR:$dst), (ins GPR:$src), IIC_iUNAr, + "revsh", ".w\t$dst, $src", [(set GPR:$dst, (sext_inreg (or (srl (and GPR:$src, 0xFF00), (i32 8)), @@ -986,7 +1562,13 @@ def t2PKHBT : T2I<(outs GPR:$dst), (ins GPR:$src1, GPR:$src2, i32imm:$shamt), IIC_iALUsi, "pkhbt", "\t$dst, $src1, $src2, LSL $shamt", [(set GPR:$dst, (or (and GPR:$src1, 0xFFFF), (and (shl GPR:$src2, (i32 imm:$shamt)), - 0xFFFF0000)))]>; + 0xFFFF0000)))]> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-20} = 0b01100; + let Inst{5} = 0; // BT form + let Inst{4} = 0; +} // Alternate cases for PKHBT where identities eliminate some nodes. def : T2Pat<(or (and GPR:$src1, 0xFFFF), (and GPR:$src2, 0xFFFF0000)), @@ -998,7 +1580,13 @@ def t2PKHTB : T2I<(outs GPR:$dst), (ins GPR:$src1, GPR:$src2, i32imm:$shamt), IIC_iALUsi, "pkhtb", "\t$dst, $src1, $src2, ASR $shamt", [(set GPR:$dst, (or (and GPR:$src1, 0xFFFF0000), (and (sra GPR:$src2, imm16_31:$shamt), - 0xFFFF)))]>; + 0xFFFF)))]> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-20} = 0b01100; + let Inst{5} = 1; // TB form + let Inst{4} = 0; +} // Alternate cases for PKHTB where identities eliminate some nodes. Note that // a shift amount of 0 is *not legal* here, it is PKHBT instead. @@ -1012,15 +1600,15 @@ def : T2Pat<(or (and GPR:$src1, 0xFFFF0000), // Comparison Instructions... // -defm t2CMP : T2I_cmp_is<"cmp", - BinOpFrag<(ARMcmp node:$LHS, node:$RHS)>>; -defm t2CMPz : T2I_cmp_is<"cmp", - BinOpFrag<(ARMcmpZ node:$LHS, node:$RHS)>>; +defm t2CMP : T2I_cmp_irs<0b1101, "cmp", + BinOpFrag<(ARMcmp node:$LHS, node:$RHS)>>; +defm t2CMPz : T2I_cmp_irs<0b1101, "cmp", + BinOpFrag<(ARMcmpZ node:$LHS, node:$RHS)>>; -defm t2CMN : T2I_cmp_is<"cmn", - BinOpFrag<(ARMcmp node:$LHS,(ineg node:$RHS))>>; -defm t2CMNz : T2I_cmp_is<"cmn", - BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>>; +defm t2CMN : T2I_cmp_irs<0b1000, "cmn", + BinOpFrag<(ARMcmp node:$LHS,(ineg node:$RHS))>>; +defm t2CMNz : T2I_cmp_irs<0b1000, "cmn", + BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))>>; def : T2Pat<(ARMcmp GPR:$src, t2_so_imm_neg:$imm), (t2CMNri GPR:$src, t2_so_imm_neg:$imm)>; @@ -1028,10 +1616,10 @@ def : T2Pat<(ARMcmp GPR:$src, t2_so_imm_neg:$imm), def : T2Pat<(ARMcmpZ GPR:$src, t2_so_imm_neg:$imm), (t2CMNri GPR:$src, t2_so_imm_neg:$imm)>; -defm t2TST : T2I_cmp_is<"tst", - BinOpFrag<(ARMcmpZ (and node:$LHS, node:$RHS), 0)>>; -defm t2TEQ : T2I_cmp_is<"teq", - BinOpFrag<(ARMcmpZ (xor node:$LHS, node:$RHS), 0)>>; +defm t2TST : T2I_cmp_irs<0b0000, "tst", + BinOpFrag<(ARMcmpZ (and node:$LHS, node:$RHS), 0)>>; +defm t2TEQ : T2I_cmp_irs<0b0100, "teq", + BinOpFrag<(ARMcmpZ (xor node:$LHS, node:$RHS), 0)>>; // A8.6.27 CBNZ, CBZ - Compare and branch on (non)zero. // Short range conditional branch. Looks awesome for loops. Need to figure @@ -1044,25 +1632,54 @@ defm t2TEQ : T2I_cmp_is<"teq", def t2MOVCCr : T2I<(outs GPR:$dst), (ins GPR:$false, GPR:$true), IIC_iCMOVr, "mov", ".w\t$dst, $true", [/*(set GPR:$dst, (ARMcmov GPR:$false, GPR:$true, imm:$cc, CCR:$ccr))*/]>, - RegConstraint<"$false = $dst">; + RegConstraint<"$false = $dst"> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = 0b0010; + let Inst{20} = 0; // The S bit. + let Inst{19-16} = 0b1111; // Rn + let Inst{14-12} = 0b000; + let Inst{7-4} = 0b0000; +} def t2MOVCCi : T2I<(outs GPR:$dst), (ins GPR:$false, t2_so_imm:$true), IIC_iCMOVi, "mov", ".w\t$dst, $true", [/*(set GPR:$dst, (ARMcmov GPR:$false, t2_so_imm:$true, imm:$cc, CCR:$ccr))*/]>, - RegConstraint<"$false = $dst">; - -def t2MOVCClsl : T2I<(outs GPR:$dst), (ins GPR:$false, GPR:$true, i32imm:$rhs), - IIC_iCMOVsi, "lsl", ".w\t$dst, $true, $rhs", []>, - RegConstraint<"$false = $dst">; -def t2MOVCClsr : T2I<(outs GPR:$dst), (ins GPR:$false, GPR:$true, i32imm:$rhs), - IIC_iCMOVsi, "lsr", ".w\t$dst, $true, $rhs", []>, - RegConstraint<"$false = $dst">; -def t2MOVCCasr : T2I<(outs GPR:$dst), (ins GPR:$false, GPR:$true, i32imm:$rhs), - IIC_iCMOVsi, "asr", ".w\t$dst, $true, $rhs", []>, - RegConstraint<"$false = $dst">; -def t2MOVCCror : T2I<(outs GPR:$dst), (ins GPR:$false, GPR:$true, i32imm:$rhs), - IIC_iCMOVsi, "ror", ".w\t$dst, $true, $rhs", []>, - RegConstraint<"$false = $dst">; + RegConstraint<"$false = $dst"> { + let Inst{31-27} = 0b11110; + let Inst{25} = 0; + let Inst{24-21} = 0b0010; + let Inst{20} = 0; // The S bit. + let Inst{19-16} = 0b1111; // Rn + let Inst{15} = 0; +} + +class T2I_movcc_sh<bits<2> opcod, dag oops, dag iops, InstrItinClass itin, + string opc, string asm, list<dag> pattern> + : T2I<oops, iops, itin, opc, asm, pattern> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b01; + let Inst{24-21} = 0b0010; + let Inst{20} = 0; // The S bit. + let Inst{19-16} = 0b1111; // Rn + let Inst{5-4} = opcod; // Shift type. +} +def t2MOVCClsl : T2I_movcc_sh<0b00, (outs GPR:$dst), + (ins GPR:$false, GPR:$true, i32imm:$rhs), + IIC_iCMOVsi, "lsl", ".w\t$dst, $true, $rhs", []>, + RegConstraint<"$false = $dst">; +def t2MOVCClsr : T2I_movcc_sh<0b01, (outs GPR:$dst), + (ins GPR:$false, GPR:$true, i32imm:$rhs), + IIC_iCMOVsi, "lsr", ".w\t$dst, $true, $rhs", []>, + RegConstraint<"$false = $dst">; +def t2MOVCCasr : T2I_movcc_sh<0b10, (outs GPR:$dst), + (ins GPR:$false, GPR:$true, i32imm:$rhs), + IIC_iCMOVsi, "asr", ".w\t$dst, $true, $rhs", []>, + RegConstraint<"$false = $dst">; +def t2MOVCCror : T2I_movcc_sh<0b11, (outs GPR:$dst), + (ins GPR:$false, GPR:$true, i32imm:$rhs), + IIC_iCMOVsi, "ror", ".w\t$dst, $true, $rhs", []>, + RegConstraint<"$false = $dst">; //===----------------------------------------------------------------------===// // Atomic operations intrinsics @@ -1075,7 +1692,9 @@ def t2Int_MemBarrierV7 : AInoP<(outs), (ins), "dmb", "", [(ARMMemBarrierV7)]>, Requires<[IsThumb2]> { + let Inst{31-4} = 0xF3BF8F5; // FIXME: add support for options other than a full system DMB + let Inst{3-0} = 0b1111; } def t2Int_SyncBarrierV7 : AInoP<(outs), (ins), @@ -1083,47 +1702,76 @@ def t2Int_SyncBarrierV7 : AInoP<(outs), (ins), "dsb", "", [(ARMSyncBarrierV7)]>, Requires<[IsThumb2]> { + let Inst{31-4} = 0xF3BF8F4; // FIXME: add support for options other than a full system DSB + let Inst{3-0} = 0b1111; +} +} + +class T2I_ldrex<bits<2> opcod, dag oops, dag iops, AddrMode am, SizeFlagVal sz, + InstrItinClass itin, string opc, string asm, string cstr, + list<dag> pattern, bits<4> rt2 = 0b1111> + : Thumb2I<oops, iops, am, sz, itin, opc, asm, cstr, pattern> { + let Inst{31-27} = 0b11101; + let Inst{26-20} = 0b0001101; + let Inst{11-8} = rt2; + let Inst{7-6} = 0b01; + let Inst{5-4} = opcod; + let Inst{3-0} = 0b1111; } +class T2I_strex<bits<2> opcod, dag oops, dag iops, AddrMode am, SizeFlagVal sz, + InstrItinClass itin, string opc, string asm, string cstr, + list<dag> pattern, bits<4> rt2 = 0b1111> + : Thumb2I<oops, iops, am, sz, itin, opc, asm, cstr, pattern> { + let Inst{31-27} = 0b11101; + let Inst{26-20} = 0b0001100; + let Inst{11-8} = rt2; + let Inst{7-6} = 0b01; + let Inst{5-4} = opcod; } let mayLoad = 1 in { -def t2LDREXB : Thumb2I<(outs GPR:$dest), (ins GPR:$ptr), AddrModeNone, - Size4Bytes, NoItinerary, - "ldrexb", "\t$dest, [$ptr]", "", - []>; -def t2LDREXH : Thumb2I<(outs GPR:$dest), (ins GPR:$ptr), AddrModeNone, - Size4Bytes, NoItinerary, - "ldrexh", "\t$dest, [$ptr]", "", - []>; +def t2LDREXB : T2I_ldrex<0b00, (outs GPR:$dest), (ins GPR:$ptr), AddrModeNone, + Size4Bytes, NoItinerary, "ldrexb", "\t$dest, [$ptr]", + "", []>; +def t2LDREXH : T2I_ldrex<0b01, (outs GPR:$dest), (ins GPR:$ptr), AddrModeNone, + Size4Bytes, NoItinerary, "ldrexh", "\t$dest, [$ptr]", + "", []>; def t2LDREX : Thumb2I<(outs GPR:$dest), (ins GPR:$ptr), AddrModeNone, - Size4Bytes, NoItinerary, - "ldrex", "\t$dest, [$ptr]", "", - []>; -def t2LDREXD : Thumb2I<(outs GPR:$dest, GPR:$dest2), (ins GPR:$ptr), - AddrModeNone, Size4Bytes, NoItinerary, - "ldrexd", "\t$dest, $dest2, [$ptr]", "", - []>; -} - -let mayStore = 1 in { -def t2STREXB : Thumb2I<(outs GPR:$success), (ins GPR:$src, GPR:$ptr), - AddrModeNone, Size4Bytes, NoItinerary, - "strexb", "\t$success, $src, [$ptr]", "", - []>; -def t2STREXH : Thumb2I<(outs GPR:$success), (ins GPR:$src, GPR:$ptr), - AddrModeNone, Size4Bytes, NoItinerary, - "strexh", "\t$success, $src, [$ptr]", "", - []>; + Size4Bytes, NoItinerary, + "ldrex", "\t$dest, [$ptr]", "", + []> { + let Inst{31-27} = 0b11101; + let Inst{26-20} = 0b0000101; + let Inst{11-8} = 0b1111; + let Inst{7-0} = 0b00000000; // imm8 = 0 +} +def t2LDREXD : T2I_ldrex<0b11, (outs GPR:$dest, GPR:$dest2), (ins GPR:$ptr), + AddrModeNone, Size4Bytes, NoItinerary, + "ldrexd", "\t$dest, $dest2, [$ptr]", "", + [], {?, ?, ?, ?}>; +} + +let mayStore = 1, Constraints = "@earlyclobber $success" in { +def t2STREXB : T2I_strex<0b00, (outs GPR:$success), (ins GPR:$src, GPR:$ptr), + AddrModeNone, Size4Bytes, NoItinerary, + "strexb", "\t$success, $src, [$ptr]", "", []>; +def t2STREXH : T2I_strex<0b01, (outs GPR:$success), (ins GPR:$src, GPR:$ptr), + AddrModeNone, Size4Bytes, NoItinerary, + "strexh", "\t$success, $src, [$ptr]", "", []>; def t2STREX : Thumb2I<(outs GPR:$success), (ins GPR:$src, GPR:$ptr), - AddrModeNone, Size4Bytes, NoItinerary, - "strex", "\t$success, $src, [$ptr]", "", - []>; -def t2STREXD : Thumb2I<(outs GPR:$success), - (ins GPR:$src, GPR:$src2, GPR:$ptr), - AddrModeNone, Size4Bytes, NoItinerary, - "strexd", "\t$success, $src, $src2, [$ptr]", "", - []>; + AddrModeNone, Size4Bytes, NoItinerary, + "strex", "\t$success, $src, [$ptr]", "", + []> { + let Inst{31-27} = 0b11101; + let Inst{26-20} = 0b0000100; + let Inst{7-0} = 0b00000000; // imm8 = 0 +} +def t2STREXD : T2I_strex<0b11, (outs GPR:$success), + (ins GPR:$src, GPR:$src2, GPR:$ptr), + AddrModeNone, Size4Bytes, NoItinerary, + "strexd", "\t$success, $src, $src2, [$ptr]", "", [], + {?, ?, ?, ?}>; } //===----------------------------------------------------------------------===// @@ -1135,7 +1783,11 @@ let isCall = 1, Defs = [R0, R12, LR, CPSR] in { def t2TPsoft : T2XI<(outs), (ins), IIC_Br, "bl\t__aeabi_read_tp", - [(set R0, ARMthread_pointer)]>; + [(set R0, ARMthread_pointer)]> { + let Inst{31-27} = 0b11110; + let Inst{15-14} = 0b11; + let Inst{12} = 1; + } } //===----------------------------------------------------------------------===// @@ -1183,31 +1835,61 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, mayLoad = 1, def t2LDM_RET : T2XI<(outs), (ins addrmode4:$addr, pred:$p, reglist:$wb, variable_ops), IIC_Br, "ldm${addr:submode}${p}${addr:wide}\t$addr, $wb", - []>; + []> { + let Inst{31-27} = 0b11101; + let Inst{26-25} = 0b00; + let Inst{24-23} = {?, ?}; // IA: '01', DB: '10' + let Inst{22} = 0; + let Inst{21} = ?; // The W bit. + let Inst{20} = 1; // Load +} let isBranch = 1, isTerminator = 1, isBarrier = 1 in { let isPredicable = 1 in def t2B : T2XI<(outs), (ins brtarget:$target), IIC_Br, "b.w\t$target", - [(br bb:$target)]>; + [(br bb:$target)]> { + let Inst{31-27} = 0b11110; + let Inst{15-14} = 0b10; + let Inst{12} = 1; +} let isNotDuplicable = 1, isIndirectBranch = 1 in { def t2BR_JT : T2JTI<(outs), (ins GPR:$target, GPR:$index, jt2block_operand:$jt, i32imm:$id), IIC_Br, "mov\tpc, $target\n$jt", - [(ARMbr2jt GPR:$target, GPR:$index, tjumptable:$jt, imm:$id)]>; + [(ARMbr2jt GPR:$target, GPR:$index, tjumptable:$jt, imm:$id)]> { + let Inst{31-27} = 0b11101; + let Inst{26-20} = 0b0100100; + let Inst{19-16} = 0b1111; + let Inst{14-12} = 0b000; + let Inst{11-8} = 0b1111; // Rd = pc + let Inst{7-4} = 0b0000; +} // FIXME: Add a non-pc based case that can be predicated. def t2TBB : T2JTI<(outs), (ins tb_addrmode:$index, jt2block_operand:$jt, i32imm:$id), - IIC_Br, "tbb\t$index\n$jt", []>; + IIC_Br, "tbb\t$index\n$jt", []> { + let Inst{31-27} = 0b11101; + let Inst{26-20} = 0b0001101; + let Inst{19-16} = 0b1111; // Rn = pc (table follows this instruction) + let Inst{15-8} = 0b11110000; + let Inst{7-4} = 0b0000; // B form +} def t2TBH : T2JTI<(outs), (ins tb_addrmode:$index, jt2block_operand:$jt, i32imm:$id), - IIC_Br, "tbh\t$index\n$jt", []>; + IIC_Br, "tbh\t$index\n$jt", []> { + let Inst{31-27} = 0b11101; + let Inst{26-20} = 0b0001101; + let Inst{19-16} = 0b1111; // Rn = pc (table follows this instruction) + let Inst{15-8} = 0b11110000; + let Inst{7-4} = 0b0001; // H form +} } // isNotDuplicable, isIndirectBranch } // isBranch, isTerminator, isBarrier @@ -1217,13 +1899,21 @@ def t2TBH : let isBranch = 1, isTerminator = 1 in def t2Bcc : T2I<(outs), (ins brtarget:$target), IIC_Br, "b", ".w\t$target", - [/*(ARMbrcond bb:$target, imm:$cc)*/]>; + [/*(ARMbrcond bb:$target, imm:$cc)*/]> { + let Inst{31-27} = 0b11110; + let Inst{15-14} = 0b10; + let Inst{12} = 0; +} // IT block def t2IT : Thumb2XI<(outs), (ins it_pred:$cc, it_mask:$mask), AddrModeNone, Size2Bytes, IIC_iALUx, - "it$mask\t$cc", "", []>; + "it$mask\t$cc", "", []> { + // 16-bit instruction. + let Inst{31-16} = 0x0000; + let Inst{15-8} = 0b10111111; +} //===----------------------------------------------------------------------===// // Non-Instruction Patterns diff --git a/lib/Target/ARM/ARMJITInfo.cpp b/lib/Target/ARM/ARMJITInfo.cpp index aa50cfd..bef5a06 100644 --- a/lib/Target/ARM/ARMJITInfo.cpp +++ b/lib/Target/ARM/ARMJITInfo.cpp @@ -139,17 +139,11 @@ ARMJITInfo::getLazyResolverFunction(JITCompilerFn F) { void *ARMJITInfo::emitGlobalValueIndirectSym(const GlobalValue *GV, void *Ptr, JITCodeEmitter &JCE) { - MachineCodeEmitter::BufferState BS; - JCE.startGVStub(BS, GV, 4, 4); - intptr_t Addr = (intptr_t)JCE.getCurrentPCValue(); - if (!sys::Memory::setRangeWritable((void*)Addr, 4)) { - llvm_unreachable("ERROR: Unable to mark indirect symbol writable"); - } - JCE.emitWordLE((intptr_t)Ptr); - if (!sys::Memory::setRangeExecutable((void*)Addr, 4)) { - llvm_unreachable("ERROR: Unable to mark indirect symbol executable"); - } - void *PtrAddr = JCE.finishGVStub(BS); + uint8_t Buffer[4]; + uint8_t *Cur = Buffer; + MachineCodeEmitter::emitWordLEInto(Cur, (intptr_t)Ptr); + void *PtrAddr = JCE.allocIndirectGV( + GV, Buffer, sizeof(Buffer), /*Alignment=*/4); addIndirectSymAddr(Ptr, (intptr_t)PtrAddr); return PtrAddr; } diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index 22bd80e..b13f98a 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -78,7 +78,7 @@ namespace { MachineBasicBlock::iterator MBBI; bool Merged; MemOpQueueEntry(int o, int p, MachineBasicBlock::iterator i) - : Offset(o), Position(p), MBBI(i), Merged(false) {}; + : Offset(o), Position(p), MBBI(i), Merged(false) {} }; typedef SmallVector<MemOpQueueEntry,8> MemOpQueue; typedef MemOpQueue::iterator MemOpQueueIter; @@ -87,6 +87,20 @@ namespace { int Offset, unsigned Base, bool BaseKill, int Opcode, ARMCC::CondCodes Pred, unsigned PredReg, unsigned Scratch, DebugLoc dl, SmallVector<std::pair<unsigned, bool>, 8> &Regs); + void MergeOpsUpdate(MachineBasicBlock &MBB, + MemOpQueue &MemOps, + unsigned memOpsBegin, + unsigned memOpsEnd, + unsigned insertAfter, + int Offset, + unsigned Base, + bool BaseKill, + int Opcode, + ARMCC::CondCodes Pred, + unsigned PredReg, + unsigned Scratch, + DebugLoc dl, + SmallVector<MachineBasicBlock::iterator, 4> &Merges); void MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, unsigned Base, int Opcode, unsigned Size, ARMCC::CondCodes Pred, unsigned PredReg, @@ -248,6 +262,67 @@ ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB, return true; } +// MergeOpsUpdate - call MergeOps and update MemOps and merges accordingly on +// success. +void ARMLoadStoreOpt:: +MergeOpsUpdate(MachineBasicBlock &MBB, + MemOpQueue &memOps, + unsigned memOpsBegin, + unsigned memOpsEnd, + unsigned insertAfter, + int Offset, + unsigned Base, + bool BaseKill, + int Opcode, + ARMCC::CondCodes Pred, + unsigned PredReg, + unsigned Scratch, + DebugLoc dl, + SmallVector<MachineBasicBlock::iterator, 4> &Merges) { + // First calculate which of the registers should be killed by the merged + // instruction. + SmallVector<std::pair<unsigned, bool>, 8> Regs; + const unsigned insertPos = memOps[insertAfter].Position; + for (unsigned i = memOpsBegin; i < memOpsEnd; ++i) { + const MachineOperand &MO = memOps[i].MBBI->getOperand(0); + unsigned Reg = MO.getReg(); + bool isKill = MO.isKill(); + + // If we are inserting the merged operation after an unmerged operation that + // uses the same register, make sure to transfer any kill flag. + for (unsigned j = memOpsEnd, e = memOps.size(); !isKill && j != e; ++j) + if (memOps[j].Position<insertPos) { + const MachineOperand &MOJ = memOps[j].MBBI->getOperand(0); + if (MOJ.getReg() == Reg && MOJ.isKill()) + isKill = true; + } + + Regs.push_back(std::make_pair(Reg, isKill)); + } + + // Try to do the merge. + MachineBasicBlock::iterator Loc = memOps[insertAfter].MBBI; + Loc++; + if (!MergeOps(MBB, Loc, Offset, Base, BaseKill, Opcode, + Pred, PredReg, Scratch, dl, Regs)) + return; + + // Merge succeeded, update records. + Merges.push_back(prior(Loc)); + for (unsigned i = memOpsBegin; i < memOpsEnd; ++i) { + // Remove kill flags from any unmerged memops that come before insertPos. + if (Regs[i-memOpsBegin].second) + for (unsigned j = memOpsEnd, e = memOps.size(); j != e; ++j) + if (memOps[j].Position<insertPos) { + MachineOperand &MOJ = memOps[j].MBBI->getOperand(0); + if (MOJ.getReg() == Regs[i-memOpsBegin].first && MOJ.isKill()) + MOJ.setIsKill(false); + } + MBB.erase(memOps[i].MBBI); + memOps[i].Merged = true; + } +} + /// MergeLDR_STR - Merge a number of load / store instructions into one or more /// load / store multiple instructions. void @@ -259,58 +334,42 @@ ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, bool isAM4 = isi32Load(Opcode) || isi32Store(Opcode); int Offset = MemOps[SIndex].Offset; int SOffset = Offset; - unsigned Pos = MemOps[SIndex].Position; + unsigned insertAfter = SIndex; MachineBasicBlock::iterator Loc = MemOps[SIndex].MBBI; DebugLoc dl = Loc->getDebugLoc(); - unsigned PReg = Loc->getOperand(0).getReg(); - unsigned PRegNum = ARMRegisterInfo::getRegisterNumbering(PReg); - bool isKill = Loc->getOperand(0).isKill(); + const MachineOperand &PMO = Loc->getOperand(0); + unsigned PReg = PMO.getReg(); + unsigned PRegNum = PMO.isUndef() ? UINT_MAX + : ARMRegisterInfo::getRegisterNumbering(PReg); - SmallVector<std::pair<unsigned,bool>, 8> Regs; - Regs.push_back(std::make_pair(PReg, isKill)); for (unsigned i = SIndex+1, e = MemOps.size(); i != e; ++i) { int NewOffset = MemOps[i].Offset; - unsigned Reg = MemOps[i].MBBI->getOperand(0).getReg(); - unsigned RegNum = ARMRegisterInfo::getRegisterNumbering(Reg); - isKill = MemOps[i].MBBI->getOperand(0).isKill(); + const MachineOperand &MO = MemOps[i].MBBI->getOperand(0); + unsigned Reg = MO.getReg(); + unsigned RegNum = MO.isUndef() ? UINT_MAX + : ARMRegisterInfo::getRegisterNumbering(Reg); // AM4 - register numbers in ascending order. // AM5 - consecutive register numbers in ascending order. if (NewOffset == Offset + (int)Size && ((isAM4 && RegNum > PRegNum) || RegNum == PRegNum+1)) { Offset += Size; - Regs.push_back(std::make_pair(Reg, isKill)); PRegNum = RegNum; } else { // Can't merge this in. Try merge the earlier ones first. - if (MergeOps(MBB, ++Loc, SOffset, Base, false, Opcode, Pred, PredReg, - Scratch, dl, Regs)) { - Merges.push_back(prior(Loc)); - for (unsigned j = SIndex; j < i; ++j) { - MBB.erase(MemOps[j].MBBI); - MemOps[j].Merged = true; - } - } + MergeOpsUpdate(MBB, MemOps, SIndex, i, insertAfter, SOffset, + Base, false, Opcode, Pred, PredReg, Scratch, dl, Merges); MergeLDR_STR(MBB, i, Base, Opcode, Size, Pred, PredReg, Scratch, MemOps, Merges); return; } - if (MemOps[i].Position > Pos) { - Pos = MemOps[i].Position; - Loc = MemOps[i].MBBI; - } + if (MemOps[i].Position > MemOps[insertAfter].Position) + insertAfter = i; } bool BaseKill = Loc->findRegisterUseOperandIdx(Base, true) != -1; - if (MergeOps(MBB, ++Loc, SOffset, Base, BaseKill, Opcode, Pred, PredReg, - Scratch, dl, Regs)) { - Merges.push_back(prior(Loc)); - for (unsigned i = SIndex, e = MemOps.size(); i != e; ++i) { - MBB.erase(MemOps[i].MBBI); - MemOps[i].Merged = true; - } - } - + MergeOpsUpdate(MBB, MemOps, SIndex, MemOps.size(), insertAfter, SOffset, + Base, BaseKill, Opcode, Pred, PredReg, Scratch, dl, Merges); return; } diff --git a/lib/Target/ARM/ARMRegisterInfo.td b/lib/Target/ARM/ARMRegisterInfo.td index d393e8d..9fbde81 100644 --- a/lib/Target/ARM/ARMRegisterInfo.td +++ b/lib/Target/ARM/ARMRegisterInfo.td @@ -367,6 +367,19 @@ def QPR_8 : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], // Condition code registers. def CCR : RegisterClass<"ARM", [i32], 32, [CPSR]>; +// Just the stack pointer (for tSTRspi and friends). +def JustSP : RegisterClass<"ARM", [i32], 32, [SP]> { + let MethodProtos = [{ + iterator allocation_order_end(const MachineFunction &MF) const; + }]; + let MethodBodies = [{ + JustSPClass::iterator + JustSPClass::allocation_order_end(const MachineFunction &MF) const { + return allocation_order_begin(MF); + } + }]; +} + //===----------------------------------------------------------------------===// // Subregister Set Definitions... now that we have all of the pieces, define the // sub registers for each register. diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index 894f913a..ed4667b 100644 --- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -15,6 +15,7 @@ #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" +#include "llvm/Support/Compiler.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Target/TargetRegistry.h" #include "llvm/Target/TargetAsmParser.h" @@ -98,10 +99,6 @@ public: virtual bool ParseDirective(AsmToken DirectiveID); }; -} // end anonymous namespace - -namespace { - /// ARMOperand - Instances of this class represent a parsed ARM machine /// instruction. struct ARMOperand { @@ -670,7 +667,7 @@ bool ARMAsmParser::ParseDirectiveThumbFunc(SMLoc L) { const AsmToken &Tok = getLexer().getTok(); if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) return Error(L, "unexpected token in .syntax directive"); - StringRef SymbolName = getLexer().getTok().getIdentifier(); + StringRef ATTRIBUTE_UNUSED SymbolName = getLexer().getTok().getIdentifier(); getLexer().Lex(); // Consume the identifier token. if (getLexer().isNot(AsmToken::EndOfStatement)) diff --git a/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp b/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp index 362bbf1..931d8df 100644 --- a/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp +++ b/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp @@ -23,6 +23,7 @@ #include "ARMTargetMachine.h" #include "llvm/Constants.h" #include "llvm/Module.h" +#include "llvm/Type.h" #include "llvm/Assembly/Writer.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/CodeGen/DwarfWriter.h" diff --git a/lib/Target/ARM/Thumb1InstrInfo.cpp b/lib/Target/ARM/Thumb1InstrInfo.cpp index 66d3b83..e875394 100644 --- a/lib/Target/ARM/Thumb1InstrInfo.cpp +++ b/lib/Target/ARM/Thumb1InstrInfo.cpp @@ -180,7 +180,7 @@ restoreCalleeSavedRegisters(MachineBasicBlock &MBB, AddDefaultPred(MIB); MIB.addReg(0); // No write back. - bool NumRegs = 0; + bool NumRegs = false; for (unsigned i = CSI.size(); i != 0; --i) { unsigned Reg = CSI[i-1].getReg(); if (Reg == ARM::LR) { @@ -192,7 +192,7 @@ restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MI = MBB.erase(MI); } MIB.addReg(Reg, getDefRegState(true)); - ++NumRegs; + NumRegs = true; } // It's illegal to emit pop instruction without operands. diff --git a/lib/Target/Alpha/AlphaISelLowering.cpp b/lib/Target/Alpha/AlphaISelLowering.cpp index b5579f4..471de7f 100644 --- a/lib/Target/Alpha/AlphaISelLowering.cpp +++ b/lib/Target/Alpha/AlphaISelLowering.cpp @@ -190,7 +190,6 @@ static SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) { EVT PtrVT = Op.getValueType(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); - SDValue Zero = DAG.getConstant(0, PtrVT); // FIXME there isn't really any debug info here DebugLoc dl = Op.getDebugLoc(); diff --git a/lib/Target/Alpha/AlphaJITInfo.cpp b/lib/Target/Alpha/AlphaJITInfo.cpp index b3b711e..cb8eb51 100644 --- a/lib/Target/Alpha/AlphaJITInfo.cpp +++ b/lib/Target/Alpha/AlphaJITInfo.cpp @@ -202,7 +202,6 @@ TargetJITInfo::StubLayout AlphaJITInfo::getStubLayout() { void *AlphaJITInfo::emitFunctionStub(const Function* F, void *Fn, JITCodeEmitter &JCE) { - MachineCodeEmitter::BufferState BS; //assert(Fn == AlphaCompilationCallback && "Where are you going?\n"); //Do things in a stupid slow way! void* Addr = (void*)(intptr_t)JCE.getCurrentPCValue(); diff --git a/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp b/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp index fc62a18..2217af4 100644 --- a/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp +++ b/lib/Target/Blackfin/BlackfinISelDAGToDAG.cpp @@ -84,7 +84,6 @@ void BlackfinDAGToDAGISel::InstructionSelect() { SDNode *BlackfinDAGToDAGISel::Select(SDValue Op) { SDNode *N = Op.getNode(); - DebugLoc dl = N->getDebugLoc(); if (N->isMachineOpcode()) return NULL; // Already selected. diff --git a/lib/Target/CBackend/CBackend.cpp b/lib/Target/CBackend/CBackend.cpp index a52ca79..1ab3c0a 100644 --- a/lib/Target/CBackend/CBackend.cpp +++ b/lib/Target/CBackend/CBackend.cpp @@ -3389,7 +3389,6 @@ void CWriter::visitInlineAsm(CallInst &CI) { // Convert over the clobber constraints. IsFirst = true; - ValueCount = 0; for (std::vector<InlineAsm::ConstraintInfo>::iterator I = Constraints.begin(), E = Constraints.end(); I != E; ++I) { if (I->Type != InlineAsm::isClobber) diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp index 23e192e..aa7f910 100644 --- a/lib/Target/CellSPU/SPUISelLowering.cpp +++ b/lib/Target/CellSPU/SPUISelLowering.cpp @@ -118,8 +118,8 @@ namespace { TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false, 0, TLI.getLibcallCallingConv(LC), false, /*isReturnValueUsed=*/true, - Callee, Args, DAG, - Op.getDebugLoc()); + Callee, Args, DAG, Op.getDebugLoc(), + DAG.GetOrdering(InChain.getNode())); return CallInfo.first; } @@ -748,9 +748,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) { case ISD::UNINDEXED: { // The vector type we really want to load from the 16-byte chunk. EVT vecVT = EVT::getVectorVT(*DAG.getContext(), - VT, (128 / VT.getSizeInBits())), - stVecVT = EVT::getVectorVT(*DAG.getContext(), - StVT, (128 / StVT.getSizeInBits())); + VT, (128 / VT.getSizeInBits())); SDValue alignLoadVec; SDValue basePtr = SN->getBasePtr(); @@ -1157,11 +1155,6 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee, // Handy pointer type EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); - // Accumulate how many bytes are to be pushed on the stack, including the - // linkage area, and parameter passing area. According to the SPU ABI, - // we minimally need space for [LR] and [SP] - unsigned NumStackBytes = SPUFrameInfo::minStackSize(); - // Set up a copy of the stack pointer for use loading and storing any // arguments that may not fit in the registers available for argument // passing. @@ -1224,8 +1217,12 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee, } } - // Update number of stack bytes actually used, insert a call sequence start - NumStackBytes = (ArgOffset - SPUFrameInfo::minStackSize()); + // Accumulate how many bytes are to be pushed on the stack, including the + // linkage area, and parameter passing area. According to the SPU ABI, + // we minimally need space for [LR] and [SP]. + unsigned NumStackBytes = ArgOffset - SPUFrameInfo::minStackSize(); + + // Insert a call sequence start Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumStackBytes, true)); @@ -2623,8 +2620,6 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) // Type to extend to MVT OpVT = Op.getValueType().getSimpleVT(); - EVT VecVT = EVT::getVectorVT(*DAG.getContext(), - OpVT, (128 / OpVT.getSizeInBits())); // Type to extend from SDValue Op0 = Op.getOperand(0); diff --git a/lib/Target/MSIL/MSILWriter.cpp b/lib/Target/MSIL/MSILWriter.cpp index 949b910..b3b91da 100644 --- a/lib/Target/MSIL/MSILWriter.cpp +++ b/lib/Target/MSIL/MSILWriter.cpp @@ -556,8 +556,7 @@ void MSILWriter::printSimpleInstruction(const char* Inst, const char* Operand) { void MSILWriter::printPHICopy(const BasicBlock* Src, const BasicBlock* Dst) { - for (BasicBlock::const_iterator I = Dst->begin(), E = Dst->end(); - isa<PHINode>(I); ++I) { + for (BasicBlock::const_iterator I = Dst->begin(); isa<PHINode>(I); ++I) { const PHINode* Phi = cast<PHINode>(I); const Value* Val = Phi->getIncomingValueForBlock(Src); if (isa<UndefValue>(Val)) continue; @@ -1696,7 +1695,7 @@ bool MSILTarget::addPassesToEmitWholeFile(PassManager &PM, if (FileType != TargetMachine::AssemblyFile) return true; MSILWriter* Writer = new MSILWriter(o); PM.add(createGCLoweringPass()); - // FIXME: Handle switch trougth native IL instruction "switch" + // FIXME: Handle switch through native IL instruction "switch" PM.add(createLowerSwitchPass()); PM.add(createCFGSimplificationPass()); PM.add(new MSILModule(Writer->UsedTypes,Writer->TD)); diff --git a/lib/Target/MSP430/AsmPrinter/MSP430MCInstLower.cpp b/lib/Target/MSP430/AsmPrinter/MSP430MCInstLower.cpp index f505b23..595b7e7 100644 --- a/lib/Target/MSP430/AsmPrinter/MSP430MCInstLower.cpp +++ b/lib/Target/MSP430/AsmPrinter/MSP430MCInstLower.cpp @@ -34,7 +34,7 @@ GetGlobalAddressSymbol(const MachineOperand &MO) const { Mang.getNameWithPrefix(Name, GV, false); switch (MO.getTargetFlags()) { - default: llvm_unreachable(0 && "Unknown target flag on GV operand"); + default: llvm_unreachable("Unknown target flag on GV operand"); case 0: break; } diff --git a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp index 4edf422..4d40769 100644 --- a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp +++ b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp @@ -217,7 +217,6 @@ bool MSP430DAGToDAGISel::MatchAddressBase(SDValue N, MSP430ISelAddressMode &AM) } bool MSP430DAGToDAGISel::MatchAddress(SDValue N, MSP430ISelAddressMode &AM) { - DebugLoc dl = N.getDebugLoc(); DEBUG({ errs() << "MatchAddress: "; AM.dump(); diff --git a/lib/Target/MSP430/MSP430RegisterInfo.cpp b/lib/Target/MSP430/MSP430RegisterInfo.cpp index e85c7a2..566d902 100644 --- a/lib/Target/MSP430/MSP430RegisterInfo.cpp +++ b/lib/Target/MSP430/MSP430RegisterInfo.cpp @@ -44,15 +44,31 @@ MSP430RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { MSP430::R8W, MSP430::R9W, MSP430::R10W, MSP430::R11W, 0 }; + static const unsigned CalleeSavedRegsFP[] = { + MSP430::R5W, MSP430::R6W, MSP430::R7W, + MSP430::R8W, MSP430::R9W, MSP430::R10W, MSP430::R11W, + 0 + }; static const unsigned CalleeSavedRegsIntr[] = { MSP430::FPW, MSP430::R5W, MSP430::R6W, MSP430::R7W, MSP430::R8W, MSP430::R9W, MSP430::R10W, MSP430::R11W, MSP430::R12W, MSP430::R13W, MSP430::R14W, MSP430::R15W, 0 }; + static const unsigned CalleeSavedRegsIntrFP[] = { + MSP430::R5W, MSP430::R6W, MSP430::R7W, + MSP430::R8W, MSP430::R9W, MSP430::R10W, MSP430::R11W, + MSP430::R12W, MSP430::R13W, MSP430::R14W, MSP430::R15W, + 0 + }; + + if (hasFP(*MF)) + return (F->getCallingConv() == CallingConv::MSP430_INTR ? + CalleeSavedRegsIntrFP : CalleeSavedRegsFP); + else + return (F->getCallingConv() == CallingConv::MSP430_INTR ? + CalleeSavedRegsIntr : CalleeSavedRegs); - return (F->getCallingConv() == CallingConv::MSP430_INTR ? - CalleeSavedRegsIntr : CalleeSavedRegs); } const TargetRegisterClass *const * @@ -65,6 +81,12 @@ MSP430RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { &MSP430::GR16RegClass, &MSP430::GR16RegClass, 0 }; + static const TargetRegisterClass * const CalleeSavedRegClassesFP[] = { + &MSP430::GR16RegClass, &MSP430::GR16RegClass, + &MSP430::GR16RegClass, &MSP430::GR16RegClass, + &MSP430::GR16RegClass, &MSP430::GR16RegClass, + &MSP430::GR16RegClass, 0 + }; static const TargetRegisterClass * const CalleeSavedRegClassesIntr[] = { &MSP430::GR16RegClass, &MSP430::GR16RegClass, &MSP430::GR16RegClass, &MSP430::GR16RegClass, @@ -74,9 +96,21 @@ MSP430RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const { &MSP430::GR16RegClass, &MSP430::GR16RegClass, 0 }; + static const TargetRegisterClass * const CalleeSavedRegClassesIntrFP[] = { + &MSP430::GR16RegClass, &MSP430::GR16RegClass, + &MSP430::GR16RegClass, &MSP430::GR16RegClass, + &MSP430::GR16RegClass, &MSP430::GR16RegClass, + &MSP430::GR16RegClass, &MSP430::GR16RegClass, + &MSP430::GR16RegClass, &MSP430::GR16RegClass, + &MSP430::GR16RegClass, 0 + }; - return (F->getCallingConv() == CallingConv::MSP430_INTR ? - CalleeSavedRegClassesIntr : CalleeSavedRegClasses); + if (hasFP(*MF)) + return (F->getCallingConv() == CallingConv::MSP430_INTR ? + CalleeSavedRegClassesIntrFP : CalleeSavedRegClassesFP); + else + return (F->getCallingConv() == CallingConv::MSP430_INTR ? + CalleeSavedRegClassesIntr : CalleeSavedRegClasses); } BitVector MSP430RegisterInfo::getReservedRegs(const MachineFunction &MF) const { @@ -102,7 +136,11 @@ MSP430RegisterInfo::getPointerRegClass(unsigned Kind) const { bool MSP430RegisterInfo::hasFP(const MachineFunction &MF) const { - return NoFramePointerElim || MF.getFrameInfo()->hasVarSizedObjects(); + const MachineFrameInfo *MFI = MF.getFrameInfo(); + + return (NoFramePointerElim || + MF.getFrameInfo()->hasVarSizedObjects() || + MFI->isFrameAddressTaken()); } bool MSP430RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const { @@ -232,10 +270,10 @@ MSP430RegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF) const { // Create a frame entry for the FPW register that must be saved. if (hasFP(MF)) { - int FrameIdx = MF.getFrameInfo()->CreateFixedObject(2, -4, true, false); + int ATTRIBUTE_UNUSED FrameIdx = + MF.getFrameInfo()->CreateFixedObject(2, -4, true, false); assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() && "Slot for FPW register must be last in order to be found!"); - FrameIdx = 0; } } diff --git a/lib/Target/Mips/MipsSubtarget.h b/lib/Target/Mips/MipsSubtarget.h index 1d6f87d..2d5fd22 100644 --- a/lib/Target/Mips/MipsSubtarget.h +++ b/lib/Target/Mips/MipsSubtarget.h @@ -102,21 +102,21 @@ public: bool isMips1() const { return MipsArchVersion == Mips1; } bool isLittle() const { return IsLittle; } - bool isFP64bit() const { return IsFP64bit; }; - bool isGP64bit() const { return IsGP64bit; }; - bool isGP32bit() const { return !IsGP64bit; }; - bool isSingleFloat() const { return IsSingleFloat; }; - bool isNotSingleFloat() const { return !IsSingleFloat; }; - bool hasVFPU() const { return HasVFPU; }; - bool isLinux() const { return IsLinux; }; + bool isFP64bit() const { return IsFP64bit; } + bool isGP64bit() const { return IsGP64bit; } + bool isGP32bit() const { return !IsGP64bit; } + bool isSingleFloat() const { return IsSingleFloat; } + bool isNotSingleFloat() const { return !IsSingleFloat; } + bool hasVFPU() const { return HasVFPU; } + bool isLinux() const { return IsLinux; } /// Features related to the presence of specific instructions. - bool hasSEInReg() const { return HasSEInReg; }; - bool hasCondMov() const { return HasCondMov; }; - bool hasMulDivAdd() const { return HasMulDivAdd; }; - bool hasMinMax() const { return HasMinMax; }; - bool hasSwap() const { return HasSwap; }; - bool hasBitCount() const { return HasBitCount; }; + bool hasSEInReg() const { return HasSEInReg; } + bool hasCondMov() const { return HasCondMov; } + bool hasMulDivAdd() const { return HasMulDivAdd; } + bool hasMinMax() const { return HasMinMax; } + bool hasSwap() const { return HasSwap; } + bool hasBitCount() const { return HasBitCount; } }; } // End llvm namespace diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp index b3c2313..4724ff7 100644 --- a/lib/Target/Mips/MipsTargetMachine.cpp +++ b/lib/Target/Mips/MipsTargetMachine.cpp @@ -50,11 +50,6 @@ MipsTargetMachine(const Target &T, const std::string &TT, const std::string &FS, else setRelocationModel(Reloc::Static); } - - // TODO: create an option to enable long calls, like -mlong-calls, - // that would be our CodeModel::Large. It must not work with Abicall. - if (getCodeModel() == CodeModel::Default) - setCodeModel(CodeModel::Small); } MipselTargetMachine:: diff --git a/lib/Target/PIC16/AsmPrinter/PIC16AsmPrinter.cpp b/lib/Target/PIC16/AsmPrinter/PIC16AsmPrinter.cpp index e1f2587..87f5aad 100644 --- a/lib/Target/PIC16/AsmPrinter/PIC16AsmPrinter.cpp +++ b/lib/Target/PIC16/AsmPrinter/PIC16AsmPrinter.cpp @@ -170,7 +170,16 @@ void PIC16AsmPrinter::printOperand(const MachineInstr *MI, int opNum) { switch (MO.getType()) { case MachineOperand::MO_Register: - O << getRegisterName(MO.getReg()); + { + // For indirect load/store insns, the fsr name is printed as INDF. + std::string RegName = getRegisterName(MO.getReg()); + if ((MI->getOpcode() == PIC16::load_indirect) || + (MI->getOpcode() == PIC16::store_indirect)) + { + RegName.replace (0, 3, "INDF"); + } + O << RegName; + } return; case MachineOperand::MO_Immediate: @@ -263,10 +272,9 @@ void PIC16AsmPrinter::printLibcallDecls() { bool PIC16AsmPrinter::doInitialization(Module &M) { bool Result = AsmPrinter::doInitialization(M); - // FIXME:: This is temporary solution to generate the include file. - // The processor should be passed to llc as in input and the header file - // should be generated accordingly. - O << "\n\t#include P16F1937.INC\n"; + // Every asmbly contains these std headers. + O << "\n#include p16f1xxx.inc"; + O << "\n#include stdmacros.inc"; // Set the section names for all globals. for (Module::global_iterator I = M.global_begin(), E = M.global_end(); diff --git a/lib/Target/PIC16/PIC16ISelDAGToDAG.h b/lib/Target/PIC16/PIC16ISelDAGToDAG.h index 3a2f6b4..d9172f2 100644 --- a/lib/Target/PIC16/PIC16ISelDAGToDAG.h +++ b/lib/Target/PIC16/PIC16ISelDAGToDAG.h @@ -36,7 +36,10 @@ class VISIBILITY_HIDDEN PIC16DAGToDAGISel : public SelectionDAGISel { public: explicit PIC16DAGToDAGISel(PIC16TargetMachine &tm) : SelectionDAGISel(tm), - TM(tm), PIC16Lowering(*TM.getTargetLowering()) {} + TM(tm), PIC16Lowering(*TM.getTargetLowering()) { + // Keep PIC16 specific DAGISel to use during the lowering + PIC16Lowering.ISel = this; + } // Pass Name virtual const char *getPassName() const { diff --git a/lib/Target/PIC16/PIC16ISelLowering.cpp b/lib/Target/PIC16/PIC16ISelLowering.cpp index 71c3d37..9f093e8 100644 --- a/lib/Target/PIC16/PIC16ISelLowering.cpp +++ b/lib/Target/PIC16/PIC16ISelLowering.cpp @@ -226,6 +226,7 @@ PIC16TargetLowering::PIC16TargetLowering(PIC16TargetMachine &TM) setLibcallName(RTLIB::DIV_F32, getIntrinsicName(RTLIB::DIV_F32)); // Floationg point comparison + setLibcallName(RTLIB::O_F32, getIntrinsicName(RTLIB::O_F32)); setLibcallName(RTLIB::UO_F32, getIntrinsicName(RTLIB::UO_F32)); setLibcallName(RTLIB::OLE_F32, getIntrinsicName(RTLIB::OLE_F32)); setLibcallName(RTLIB::OGE_F32, getIntrinsicName(RTLIB::OGE_F32)); @@ -371,6 +372,11 @@ PIC16TargetLowering::getSetCCResultType(EVT ValType) const { return MVT::i8; } +MVT::SimpleValueType +PIC16TargetLowering::getCmpLibcallReturnType() const { + return MVT::i8; +} + /// The type legalizer framework of generating legalizer can generate libcalls /// only when the operand/result types are illegal. /// PIC16 needs to generate libcalls even for the legal types (i8) for some ops. @@ -413,7 +419,8 @@ PIC16TargetLowering::MakePIC16Libcall(PIC16ISD::PIC16Libcall Call, LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false, false, 0, CallingConv::C, false, /*isReturnValueUsed=*/true, - Callee, Args, DAG, dl); + Callee, Args, DAG, dl, + DAG.GetOrdering(DAG.getEntryNode().getNode())); return CallInfo.first; } @@ -924,7 +931,7 @@ SDValue PIC16TargetLowering::ExpandLoad(SDNode *N, SelectionDAG &DAG) { } else if (VT == MVT::i16) { BP = DAG.getNode(ISD::BUILD_PAIR, dl, VT, PICLoads[0], PICLoads[1]); - if (MemVT == MVT::i8) + if ((MemVT == MVT::i8) || (MemVT == MVT::i1)) Chain = getChain(PICLoads[0]); else Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, @@ -936,7 +943,7 @@ SDValue PIC16TargetLowering::ExpandLoad(SDNode *N, SelectionDAG &DAG) { BPs[1] = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i16, PICLoads[2], PICLoads[3]); BP = DAG.getNode(ISD::BUILD_PAIR, dl, VT, BPs[0], BPs[1]); - if (MemVT == MVT::i8) + if ((MemVT == MVT::i8) || (MemVT == MVT::i1)) Chain = getChain(PICLoads[0]); else if (MemVT == MVT::i16) Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, @@ -1270,7 +1277,6 @@ PIC16TargetLowering::LowerReturn(SDValue Chain, std::string FuncName = F->getName(); const char *tmpName = createESName(PAN::getFrameLabel(FuncName)); - SDVTList VTs = DAG.getVTList (MVT::i8, MVT::Other); SDValue ES = DAG.getTargetExternalSymbol(tmpName, MVT::i8); SDValue BS = DAG.getConstant(1, MVT::i8); SDValue RetVal; @@ -1482,7 +1488,8 @@ bool PIC16TargetLowering::isDirectLoad(const SDValue Op) { // operand no. of the operand to be converted in 'MemOp'. Remember, PIC16 has // no instruction that can operation on two registers. Most insns take // one register and one memory operand (addwf) / Constant (addlw). -bool PIC16TargetLowering::NeedToConvertToMemOp(SDValue Op, unsigned &MemOp) { +bool PIC16TargetLowering::NeedToConvertToMemOp(SDValue Op, unsigned &MemOp, + SelectionDAG &DAG) { // If one of the operand is a constant, return false. if (Op.getOperand(0).getOpcode() == ISD::Constant || Op.getOperand(1).getOpcode() == ISD::Constant) @@ -1491,11 +1498,33 @@ bool PIC16TargetLowering::NeedToConvertToMemOp(SDValue Op, unsigned &MemOp) { // Return false if one of the operands is already a direct // load and that operand has only one use. if (isDirectLoad(Op.getOperand(0))) { - if (Op.getOperand(0).hasOneUse()) - return false; - else - MemOp = 0; + if (Op.getOperand(0).hasOneUse()) { + // Legal and profitable folding check uses the NodeId of DAG nodes. + // This NodeId is assigned by topological order. Therefore first + // assign topological order then perform legal and profitable check. + // Note:- Though this ordering is done before begining with legalization, + // newly added node during legalization process have NodeId=-1 (NewNode) + // therefore before performing any check proper ordering of the node is + // required. + DAG.AssignTopologicalOrder(); + + // Direct load operands are folded in binary operations. But before folding + // verify if this folding is legal. Fold only if it is legal otherwise + // convert this direct load to a separate memory operation. + if(ISel->IsLegalAndProfitableToFold(Op.getOperand(0).getNode(), + Op.getNode(), Op.getNode())) + return false; + else + MemOp = 0; + } } + + // For operations that are non-cummutative there is no need to check + // for right operand because folding right operand may result in + // incorrect operation. + if (! SelectionDAG::isCommutativeBinOp(Op.getOpcode())) + return true; + if (isDirectLoad(Op.getOperand(1))) { if (Op.getOperand(1).hasOneUse()) return false; @@ -1514,7 +1543,7 @@ SDValue PIC16TargetLowering::LowerBinOp(SDValue Op, SelectionDAG &DAG) { assert (Op.getValueType() == MVT::i8 && "illegal Op to lower"); unsigned MemOp = 1; - if (NeedToConvertToMemOp(Op, MemOp)) { + if (NeedToConvertToMemOp(Op, MemOp, DAG)) { // Put one value on stack. SDValue NewVal = ConvertToMemOperand (Op.getOperand(MemOp), DAG, dl); @@ -1533,7 +1562,7 @@ SDValue PIC16TargetLowering::LowerADD(SDValue Op, SelectionDAG &DAG) { assert (Op.getValueType() == MVT::i8 && "illegal add to lower"); DebugLoc dl = Op.getDebugLoc(); unsigned MemOp = 1; - if (NeedToConvertToMemOp(Op, MemOp)) { + if (NeedToConvertToMemOp(Op, MemOp, DAG)) { // Put one value on stack. SDValue NewVal = ConvertToMemOperand (Op.getOperand(MemOp), DAG, dl); @@ -1561,30 +1590,47 @@ SDValue PIC16TargetLowering::LowerSUB(SDValue Op, SelectionDAG &DAG) { DebugLoc dl = Op.getDebugLoc(); // We should have handled larger operands in type legalizer itself. assert (Op.getValueType() == MVT::i8 && "illegal sub to lower"); + unsigned MemOp = 1; + SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Flag); - // Nothing to do if the first operand is already a direct load and it has - // only one use. - if (isDirectLoad(Op.getOperand(0)) && Op.getOperand(0).hasOneUse()) - return Op; - - // Put first operand on stack. - SDValue NewVal = ConvertToMemOperand (Op.getOperand(0), DAG, dl); + // Since we don't have an instruction for X - c , + // we can change it to X + (-c) + ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); + if (C && (Op.getOpcode() == ISD::SUB)) + { + return DAG.getNode(ISD::ADD, + dl, MVT::i8, Op.getOperand(0), + DAG.getConstant(0-(C->getZExtValue()), MVT::i8)); + } - SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Flag); - switch (Op.getOpcode()) { - default: - assert (0 && "Opcode unknown."); - case ISD::SUBE: - return DAG.getNode(Op.getOpcode(), dl, Tys, NewVal, Op.getOperand(1), - Op.getOperand(2)); - break; - case ISD::SUBC: - return DAG.getNode(Op.getOpcode(), dl, Tys, NewVal, Op.getOperand(1)); - break; - case ISD::SUB: - return DAG.getNode(Op.getOpcode(), dl, MVT::i8, NewVal, Op.getOperand(1)); - break; - } + if (NeedToConvertToMemOp(Op, MemOp, DAG) || + (isDirectLoad(Op.getOperand(1)) && + (!isDirectLoad(Op.getOperand(0))) && + (Op.getOperand(0).getOpcode() != ISD::Constant))) + { + // Put first operand on stack. + SDValue NewVal = ConvertToMemOperand (Op.getOperand(0), DAG, dl); + + switch (Op.getOpcode()) { + default: + assert (0 && "Opcode unknown."); + case ISD::SUBE: + return DAG.getNode(Op.getOpcode(), + dl, Tys, NewVal, Op.getOperand(1), + Op.getOperand(2)); + break; + case ISD::SUBC: + return DAG.getNode(Op.getOpcode(), + dl, Tys, NewVal, Op.getOperand(1)); + break; + case ISD::SUB: + return DAG.getNode(Op.getOpcode(), + dl, MVT::i8, NewVal, Op.getOperand(1)); + break; + } + } + else + return Op; } void PIC16TargetLowering::InitReservedFrameCount(const Function *F) { diff --git a/lib/Target/PIC16/PIC16ISelLowering.h b/lib/Target/PIC16/PIC16ISelLowering.h index 286ed24..afdd4b4 100644 --- a/lib/Target/PIC16/PIC16ISelLowering.h +++ b/lib/Target/PIC16/PIC16ISelLowering.h @@ -18,6 +18,7 @@ #include "PIC16.h" #include "PIC16Subtarget.h" #include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/Target/TargetLowering.h" #include <map> @@ -83,6 +84,7 @@ namespace llvm { virtual const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType - Return the ISD::SETCC ValueType virtual MVT::SimpleValueType getSetCCResultType(EVT ValType) const; + virtual MVT::SimpleValueType getCmpLibcallReturnType() const; SDValue LowerShift(SDValue Op, SelectionDAG &DAG); SDValue LowerMUL(SDValue Op, SelectionDAG &DAG); SDValue LowerADD(SDValue Op, SelectionDAG &DAG); @@ -216,7 +218,9 @@ namespace llvm { // This function checks if we need to put an operand of an operation on // stack and generate a load or not. - bool NeedToConvertToMemOp(SDValue Op, unsigned &MemOp); + // DAG parameter is required to access DAG information during + // analysis. + bool NeedToConvertToMemOp(SDValue Op, unsigned &MemOp, SelectionDAG &DAG); /// Subtarget - Keep a pointer to the PIC16Subtarget around so that we can /// make the right decision when generating code for different targets. @@ -239,6 +243,11 @@ namespace llvm { // Check if operation has a direct load operand. inline bool isDirectLoad(const SDValue Op); + public: + // Keep a pointer to SelectionDAGISel to access its public + // interface (It is required during legalization) + SelectionDAGISel *ISel; + private: // The frameindexes generated for spill/reload are stack based. // This maps maintain zero based indexes for these FIs. diff --git a/lib/Target/PIC16/PIC16InstrInfo.td b/lib/Target/PIC16/PIC16InstrInfo.td index 5eec6c4..24df251 100644 --- a/lib/Target/PIC16/PIC16InstrInfo.td +++ b/lib/Target/PIC16/PIC16InstrInfo.td @@ -151,7 +151,7 @@ let mayStore = 1 in class BinOpWF<bits<6> OpCode, string OpcStr, SDNode OpNode>: ByteFormat<OpCode, (outs), (ins GPR:$src, i8imm:$offset, i8mem:$ptrlo, i8imm:$ptrhi), - !strconcat(OpcStr, " $ptrlo + $offset"), + !strconcat(OpcStr, " $ptrlo + $offset, F"), [(PIC16Store (OpNode GPR:$src, (PIC16Load diraddr:$ptrlo, (i8 imm:$ptrhi), (i8 imm:$offset))), @@ -161,7 +161,7 @@ class BinOpWF<bits<6> OpCode, string OpcStr, SDNode OpNode>: // W = W Op L : Do Op of L with W and place result in W. let isTwoAddress = 1 in -class BinOpLW<bits<6> opcode, string OpcStr, SDNode OpNode> : +class BinOpWL<bits<6> opcode, string OpcStr, SDNode OpNode> : LiteralFormat<opcode, (outs GPR:$dst), (ins GPR:$src, i8imm:$literal), !strconcat(OpcStr, " $literal"), @@ -404,33 +404,46 @@ def subwf_cc: SUBWF<0, "subwf", PIC16Subcc>; // addlw let Defs = [STATUS] in { -def addlw_1 : BinOpLW<0, "addlw", add>; -def addlw_2 : BinOpLW<0, "addlw", addc>; +def addlw_1 : BinOpWL<0, "addlw", add>; +def addlw_2 : BinOpWL<0, "addlw", addc>; let Uses = [STATUS] in -def addlwc : BinOpLW<0, "addlwc", adde>; // With Carry. (Assembler macro). +def addlwc : BinOpWL<0, "addlwc", adde>; // With Carry. (Assembler macro). // bitwise operations involving a literal and w. -def andlw : BinOpLW<0, "andlw", and>; -def xorlw : BinOpLW<0, "xorlw", xor>; -def orlw : BinOpLW<0, "iorlw", or>; +def andlw : BinOpWL<0, "andlw", and>; +def xorlw : BinOpWL<0, "xorlw", xor>; +def orlw : BinOpWL<0, "iorlw", or>; } // sublw // W = C - W ; sub W from literal. (Without borrow). let isTwoAddress = 1 in -class SUBLW<bits<6> opcode, SDNode OpNode> : +class SUBLW<bits<6> opcode, string OpcStr, SDNode OpNode> : LiteralFormat<opcode, (outs GPR:$dst), (ins GPR:$src, i8imm:$literal), - "sublw $literal", + !strconcat(OpcStr, " $literal"), [(set GPR:$dst, (OpNode (i8 imm:$literal), GPR:$src))]>; +// subwl +// W = W - C ; sub literal from W (Without borrow). +let isTwoAddress = 1 in +class SUBWL<bits<6> opcode, string OpcStr, SDNode OpNode> : + LiteralFormat<opcode, (outs GPR:$dst), + (ins GPR:$src, i8imm:$literal), + !strconcat(OpcStr, " $literal"), + [(set GPR:$dst, (OpNode GPR:$src, (i8 imm:$literal)))]>; let Defs = [STATUS] in { -def sublw_1 : SUBLW<0, sub>; -def sublw_2 : SUBLW<0, subc>; +def sublw_1 : SUBLW<0, "sublw", sub>; +def sublw_2 : SUBLW<0, "sublw", subc>; +def sublw_3 : SUBLW<0, "sublwb", sube>; // With borrow (Assembler macro). + +def sublw_4 : SUBWL<0, "subwl", sub>; // Assembler macro replace with addlw +def sublw_5 : SUBWL<0, "subwl", subc>; // Assembler macro replace with addlw +def sublw_6 : SUBWL<0, "subwlb", sube>; // With borrow (Assembler macro). } let Defs = [STATUS], isTerminator = 1 in -def sublw_cc : SUBLW<0, PIC16Subcc>; +def sublw_cc : SUBLW<0, "sublw", PIC16Subcc>; // Call instruction. let isCall = 1, diff --git a/lib/Target/PowerPC/PPCFrameInfo.h b/lib/Target/PowerPC/PPCFrameInfo.h index 73d30bf..7587b03 100644 --- a/lib/Target/PowerPC/PPCFrameInfo.h +++ b/lib/Target/PowerPC/PPCFrameInfo.h @@ -50,7 +50,7 @@ public: return isPPC64 ? -8U : -4U; // SVR4 ABI: First slot in the general register save area. - return -4U; + return isPPC64 ? -8U : -4U; } /// getLinkageSize - Return the size of the PowerPC ABI linkage area. diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 30a7861..8248c94 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -419,6 +419,9 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { case PPCISD::Hi: return "PPCISD::Hi"; case PPCISD::Lo: return "PPCISD::Lo"; case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; + case PPCISD::TOC_RESTORE: return "PPCISD::TOC_RESTORE"; + case PPCISD::LOAD: return "PPCISD::LOAD"; + case PPCISD::LOAD_TOC: return "PPCISD::LOAD_TOC"; case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; case PPCISD::SRL: return "PPCISD::SRL"; @@ -1330,7 +1333,7 @@ SDValue PPCTargetLowering::LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) { false, false, false, false, 0, CallingConv::C, false, /*isReturnValueUsed=*/true, DAG.getExternalSymbol("__trampoline_setup", PtrVT), - Args, DAG, dl); + Args, DAG, dl, DAG.GetOrdering(Chain.getNode())); SDValue Ops[] = { CallResult.first, CallResult.second }; @@ -2428,7 +2431,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, DebugLoc dl, int SPDiff, bool isTailCall, SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys, - bool isSVR4ABI) { + bool isPPC64, bool isSVR4ABI) { EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); NodeTys.push_back(MVT::Other); // Returns a chain NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. @@ -2449,6 +2452,74 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair // to do the call, we can't use PPCISD::CALL. SDValue MTCTROps[] = {Chain, Callee, InFlag}; + + if (isSVR4ABI && isPPC64) { + // Function pointers in the 64-bit SVR4 ABI do not point to the function + // entry point, but to the function descriptor (the function entry point + // address is part of the function descriptor though). + // The function descriptor is a three doubleword structure with the + // following fields: function entry point, TOC base address and + // environment pointer. + // Thus for a call through a function pointer, the following actions need + // to be performed: + // 1. Save the TOC of the caller in the TOC save area of its stack + // frame (this is done in LowerCall_Darwin()). + // 2. Load the address of the function entry point from the function + // descriptor. + // 3. Load the TOC of the callee from the function descriptor into r2. + // 4. Load the environment pointer from the function descriptor into + // r11. + // 5. Branch to the function entry point address. + // 6. On return of the callee, the TOC of the caller needs to be + // restored (this is done in FinishCall()). + // + // All those operations are flagged together to ensure that no other + // operations can be scheduled in between. E.g. without flagging the + // operations together, a TOC access in the caller could be scheduled + // between the load of the callee TOC and the branch to the callee, which + // results in the TOC access going through the TOC of the callee instead + // of going through the TOC of the caller, which leads to incorrect code. + + // Load the address of the function entry point from the function + // descriptor. + SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other, MVT::Flag); + SDValue LoadFuncPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, MTCTROps, + InFlag.getNode() ? 3 : 2); + Chain = LoadFuncPtr.getValue(1); + InFlag = LoadFuncPtr.getValue(2); + + // Load environment pointer into r11. + // Offset of the environment pointer within the function descriptor. + SDValue PtrOff = DAG.getIntPtrConstant(16); + + SDValue AddPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Callee, PtrOff); + SDValue LoadEnvPtr = DAG.getNode(PPCISD::LOAD, dl, VTs, Chain, AddPtr, + InFlag); + Chain = LoadEnvPtr.getValue(1); + InFlag = LoadEnvPtr.getValue(2); + + SDValue EnvVal = DAG.getCopyToReg(Chain, dl, PPC::X11, LoadEnvPtr, + InFlag); + Chain = EnvVal.getValue(0); + InFlag = EnvVal.getValue(1); + + // Load TOC of the callee into r2. We are using a target-specific load + // with r2 hard coded, because the result of a target-independent load + // would never go directly into r2, since r2 is a reserved register (which + // prevents the register allocator from allocating it), resulting in an + // additional register being allocated and an unnecessary move instruction + // being generated. + VTs = DAG.getVTList(MVT::Other, MVT::Flag); + SDValue LoadTOCPtr = DAG.getNode(PPCISD::LOAD_TOC, dl, VTs, Chain, + Callee, InFlag); + Chain = LoadTOCPtr.getValue(0); + InFlag = LoadTOCPtr.getValue(1); + + MTCTROps[0] = Chain; + MTCTROps[1] = LoadFuncPtr; + MTCTROps[2] = InFlag; + } + Chain = DAG.getNode(PPCISD::MTCTR, dl, NodeTys, MTCTROps, 2 + (InFlag.getNode() != 0)); InFlag = Chain.getValue(1); @@ -2523,6 +2594,7 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl, SmallVector<SDValue, 8> Ops; unsigned CallOpc = PrepareCall(DAG, Callee, InFlag, Chain, dl, SPDiff, isTailCall, RegsToPass, Ops, NodeTys, + PPCSubTarget.isPPC64(), PPCSubTarget.isSVR4ABI()); // When performing tail call optimization the callee pops its arguments off @@ -2569,8 +2641,23 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl, // stack frame. If caller and callee belong to the same module (and have the // same TOC), the NOP will remain unchanged. if (!isTailCall && PPCSubTarget.isSVR4ABI()&& PPCSubTarget.isPPC64()) { - // Insert NOP. - InFlag = DAG.getNode(PPCISD::NOP, dl, MVT::Flag, InFlag); + SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Flag); + if (CallOpc == PPCISD::BCTRL_SVR4) { + // This is a call through a function pointer. + // Restore the caller TOC from the save area into R2. + // See PrepareCall() for more information about calls through function + // pointers in the 64-bit SVR4 ABI. + // We are using a target-specific load with r2 hard coded, because the + // result of a target-independent load would never go directly into r2, + // since r2 is a reserved register (which prevents the register allocator + // from allocating it), resulting in an additional register being + // allocated and an unnecessary move instruction being generated. + Chain = DAG.getNode(PPCISD::TOC_RESTORE, dl, VTs, Chain, InFlag); + InFlag = Chain.getValue(1); + } else { + // Otherwise insert NOP. + InFlag = DAG.getNode(PPCISD::NOP, dl, MVT::Flag, InFlag); + } } Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), @@ -3123,6 +3210,21 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOpChains[0], MemOpChains.size()); + // Check if this is an indirect call (MTCTR/BCTRL). + // See PrepareCall() for more information about calls through function + // pointers in the 64-bit SVR4 ABI. + if (!isTailCall && isPPC64 && PPCSubTarget.isSVR4ABI() && + !dyn_cast<GlobalAddressSDNode>(Callee) && + !dyn_cast<ExternalSymbolSDNode>(Callee) && + !isBLACompatibleAddress(Callee, DAG)) { + // Load r2 into a virtual register and store it to the TOC save area. + SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); + // TOC save area offset. + SDValue PtrOff = DAG.getIntPtrConstant(40); + SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); + Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, NULL, 0); + } + // Build a sequence of copy-to-reg nodes chained together with token chain // and flag operands which copy the outgoing args into the appropriate regs. SDValue InFlag; diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h index e45b261..cf81395 100644 --- a/lib/Target/PowerPC/PPCISelLowering.h +++ b/lib/Target/PowerPC/PPCISelLowering.h @@ -61,6 +61,21 @@ namespace llvm { TOC_ENTRY, + /// The following three target-specific nodes are used for calls through + /// function pointers in the 64-bit SVR4 ABI. + + /// Restore the TOC from the TOC save area of the current stack frame. + /// This is basically a hard coded load instruction which additionally + /// takes/produces a flag. + TOC_RESTORE, + + /// Like a regular LOAD but additionally taking/producing a flag. + LOAD, + + /// LOAD into r2 (also taking/producing a flag). Like TOC_RESTORE, this is + /// a hard coded load instruction. + LOAD_TOC, + /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX) /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to /// compute an allocation on the stack. diff --git a/lib/Target/PowerPC/PPCInstr64Bit.td b/lib/Target/PowerPC/PPCInstr64Bit.td index ebdc58b..219efb9 100644 --- a/lib/Target/PowerPC/PPCInstr64Bit.td +++ b/lib/Target/PowerPC/PPCInstr64Bit.td @@ -559,6 +559,14 @@ def LDtoc: DSForm_1<58, 0, (outs G8RC:$rD), (ins tocentry:$disp, G8RC:$reg), "ld $rD, $disp($reg)", LdStLD, [(set G8RC:$rD, (PPCtoc_entry tglobaladdr:$disp, G8RC:$reg))]>, isPPC64; +let RST = 2, DS = 8 in +def LDinto_toc: DSForm_1<58, 0, (outs), (ins G8RC:$reg), + "ld 2, 8($reg)", LdStLD, + [(PPCload_toc G8RC:$reg)]>, isPPC64; +let RST = 2, DS = 40, RA = 1 in +def LDtoc_restore : DSForm_1<58, 0, (outs), (ins), + "ld 2, 40(1)", LdStLD, + []>, isPPC64; def LDX : XForm_1<31, 21, (outs G8RC:$rD), (ins memrr:$src), "ldx $rD, $src", LdStLD, [(set G8RC:$rD, (load xaddr:$src))]>, isPPC64; @@ -571,6 +579,13 @@ def LDU : DSForm_1<58, 1, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memrix:$addr } +def : Pat<(PPCtoc_restore), + (LDtoc_restore)>; +def : Pat<(PPCload ixaddr:$src), + (LD ixaddr:$src)>; +def : Pat<(PPCload xaddr:$src), + (LDX xaddr:$src)>; + let PPC970_Unit = 2 in { // Truncating stores. def STB8 : DForm_1<38, (outs), (ins G8RC:$rS, memri:$src), diff --git a/lib/Target/PowerPC/PPCInstrInfo.td b/lib/Target/PowerPC/PPCInstrInfo.td index 2b3f80d..8fe151a 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.td +++ b/lib/Target/PowerPC/PPCInstrInfo.td @@ -115,6 +115,12 @@ def PPCcall_Darwin : SDNode<"PPCISD::CALL_Darwin", SDT_PPCCall, def PPCcall_SVR4 : SDNode<"PPCISD::CALL_SVR4", SDT_PPCCall, [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>; def PPCnop : SDNode<"PPCISD::NOP", SDT_PPCnop, [SDNPInFlag, SDNPOutFlag]>; +def PPCload : SDNode<"PPCISD::LOAD", SDTypeProfile<1, 1, []>, + [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>; +def PPCload_toc : SDNode<"PPCISD::LOAD_TOC", SDTypeProfile<0, 1, []>, + [SDNPHasChain, SDNPInFlag, SDNPOutFlag]>; +def PPCtoc_restore : SDNode<"PPCISD::TOC_RESTORE", SDTypeProfile<0, 0, []>, + [SDNPHasChain, SDNPInFlag, SDNPOutFlag]>; def PPCmtctr : SDNode<"PPCISD::MTCTR", SDT_PPCCall, [SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>; def PPCbctrl_Darwin : SDNode<"PPCISD::BCTRL_Darwin", SDTNone, diff --git a/lib/Target/PowerPC/PPCJITInfo.cpp b/lib/Target/PowerPC/PPCJITInfo.cpp index c679bcd..be6e51e 100644 --- a/lib/Target/PowerPC/PPCJITInfo.cpp +++ b/lib/Target/PowerPC/PPCJITInfo.cpp @@ -339,7 +339,6 @@ extern "C" void sys_icache_invalidate(const void *Addr, size_t len); void *PPCJITInfo::emitFunctionStub(const Function* F, void *Fn, JITCodeEmitter &JCE) { - MachineCodeEmitter::BufferState BS; // If this is just a call to an external function, emit a branch instead of a // call. The code is the same except for one bit of the last instruction. if (Fn != (void*)(intptr_t)PPC32CompilationCallback && diff --git a/lib/Target/README.txt b/lib/Target/README.txt index e1772c2..a6e05fa 100644 --- a/lib/Target/README.txt +++ b/lib/Target/README.txt @@ -106,7 +106,17 @@ Shrink: (setlt (loadi32 P), 0) -> (setlt (loadi8 Phi), 0) //===---------------------------------------------------------------------===// -Reassociate should turn: X*X*X*X -> t=(X*X) (t*t) to eliminate a multiply. +Reassociate should turn things like: + +int factorial(int X) { + return X*X*X*X*X*X*X*X; +} + +into llvm.powi calls, allowing the code generator to produce balanced +multiplication trees. + +First, the intrinsic needs to be extended to support integers, and second the +code generator needs to be enhanced to lower these to multiplication trees. //===---------------------------------------------------------------------===// @@ -119,7 +129,32 @@ int foo(int z, int n) { return bar(z, n) + bar(2*z, 2*n); } -Reassociate should handle the example in GCC PR16157. +This is blocked on not handling X*X*X -> powi(X, 3) (see note above). The issue +is that we end up getting t = 2*X s = t*t and don't turn this into 4*X*X, +which is the same number of multiplies and is canonical, because the 2*X has +multiple uses. Here's a simple example: + +define i32 @test15(i32 %X1) { + %B = mul i32 %X1, 47 ; X1*47 + %C = mul i32 %B, %B + ret i32 %C +} + + +//===---------------------------------------------------------------------===// + +Reassociate should handle the example in GCC PR16157: + +extern int a0, a1, a2, a3, a4; extern int b0, b1, b2, b3, b4; +void f () { /* this can be optimized to four additions... */ + b4 = a4 + a3 + a2 + a1 + a0; + b3 = a3 + a2 + a1 + a0; + b2 = a2 + a1 + a0; + b1 = a1 + a0; +} + +This requires reassociating to forms of expressions that are already available, +something that reassoc doesn't think about yet. //===---------------------------------------------------------------------===// @@ -721,47 +756,6 @@ be done safely if "b" isn't modified between the strlen and memcpy of course. //===---------------------------------------------------------------------===// -Reassociate should turn things like: - -int factorial(int X) { - return X*X*X*X*X*X*X*X; -} - -into llvm.powi calls, allowing the code generator to produce balanced -multiplication trees. - -//===---------------------------------------------------------------------===// - -We generate a horrible libcall for llvm.powi. For example, we compile: - -#include <cmath> -double f(double a) { return std::pow(a, 4); } - -into: - -__Z1fd: - subl $12, %esp - movsd 16(%esp), %xmm0 - movsd %xmm0, (%esp) - movl $4, 8(%esp) - call L___powidf2$stub - addl $12, %esp - ret - -GCC produces: - -__Z1fd: - subl $12, %esp - movsd 16(%esp), %xmm0 - mulsd %xmm0, %xmm0 - mulsd %xmm0, %xmm0 - movsd %xmm0, (%esp) - fldl (%esp) - addl $12, %esp - ret - -//===---------------------------------------------------------------------===// - We compile this program: (from GCC PR11680) http://gcc.gnu.org/bugzilla/attachment.cgi?id=4487 diff --git a/lib/Target/Target.cpp b/lib/Target/Target.cpp index cc6be9f..cddf49e 100644 --- a/lib/Target/Target.cpp +++ b/lib/Target/Target.cpp @@ -15,6 +15,7 @@ #include "llvm-c/Target.h" #include "llvm/PassManager.h" #include "llvm/Target/TargetData.h" +#include "llvm/LLVMContext.h" #include <cstring> using namespace llvm; diff --git a/lib/Target/TargetData.cpp b/lib/Target/TargetData.cpp index 9434a19..ba3cc9d 100644 --- a/lib/Target/TargetData.cpp +++ b/lib/Target/TargetData.cpp @@ -321,18 +321,24 @@ class StructLayoutMap : public AbstractTypeUser { typedef DenseMap<const StructType*, StructLayout*> LayoutInfoTy; LayoutInfoTy LayoutInfo; + void RemoveEntry(LayoutInfoTy::iterator I, bool WasAbstract) { + I->second->~StructLayout(); + free(I->second); + if (WasAbstract) + I->first->removeAbstractTypeUser(this); + LayoutInfo.erase(I); + } + + /// refineAbstractType - The callback method invoked when an abstract type is /// resolved to another type. An object must override this method to update /// its internal state to reference NewType instead of OldType. /// virtual void refineAbstractType(const DerivedType *OldTy, const Type *) { - const StructType *STy = cast<const StructType>(OldTy); - LayoutInfoTy::iterator Iter = LayoutInfo.find(STy); - Iter->second->~StructLayout(); - free(Iter->second); - LayoutInfo.erase(Iter); - OldTy->removeAbstractTypeUser(this); + LayoutInfoTy::iterator I = LayoutInfo.find(cast<const StructType>(OldTy)); + assert(I != LayoutInfo.end() && "Using type but not in map?"); + RemoveEntry(I, true); } /// typeBecameConcrete - The other case which AbstractTypeUsers must be aware @@ -341,12 +347,9 @@ class StructLayoutMap : public AbstractTypeUser { /// This method notifies ATU's when this occurs for a type. /// virtual void typeBecameConcrete(const DerivedType *AbsTy) { - const StructType *STy = cast<const StructType>(AbsTy); - LayoutInfoTy::iterator Iter = LayoutInfo.find(STy); - Iter->second->~StructLayout(); - free(Iter->second); - LayoutInfo.erase(Iter); - AbsTy->removeAbstractTypeUser(this); + LayoutInfoTy::iterator I = LayoutInfo.find(cast<const StructType>(AbsTy)); + assert(I != LayoutInfo.end() && "Using type but not in map?"); + RemoveEntry(I, true); } public: @@ -368,13 +371,7 @@ public: void InvalidateEntry(const StructType *Ty) { LayoutInfoTy::iterator I = LayoutInfo.find(Ty); if (I == LayoutInfo.end()) return; - - I->second->~StructLayout(); - free(I->second); - LayoutInfo.erase(I); - - if (Ty->isAbstract()) - Ty->removeAbstractTypeUser(this); + RemoveEntry(I, Ty->isAbstract()); } StructLayout *&operator[](const StructType *STy) { @@ -424,8 +421,7 @@ const StructLayout *TargetData::getStructLayout(const StructType *Ty) const { void TargetData::InvalidateStructLayoutInfo(const StructType *Ty) const { if (!LayoutMap) return; // No cache. - StructLayoutMap *STM = static_cast<StructLayoutMap*>(LayoutMap); - STM->InvalidateEntry(Ty); + static_cast<StructLayoutMap*>(LayoutMap)->InvalidateEntry(Ty); } std::string TargetData::getStringRepresentation() const { diff --git a/lib/Target/TargetMachine.cpp b/lib/Target/TargetMachine.cpp index fec59b5..46bc9a3 100644 --- a/lib/Target/TargetMachine.cpp +++ b/lib/Target/TargetMachine.cpp @@ -46,6 +46,7 @@ namespace llvm { bool DisableJumpTables; bool StrongPHIElim; bool AsmVerbosityDefault(false); + bool DisableScheduling; } static cl::opt<bool, true> @@ -197,6 +198,11 @@ EnableStrongPHIElim(cl::Hidden, "strong-phi-elim", cl::desc("Use strong PHI elimination."), cl::location(StrongPHIElim), cl::init(false)); +static cl::opt<bool, true> +DisableInstScheduling("disable-scheduling", + cl::desc("Disable instruction scheduling"), + cl::location(DisableScheduling), + cl::init(false)); //--------------------------------------------------------------------------- // TargetMachine Class diff --git a/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp b/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp index 8ec5b62..c74b97a 100644 --- a/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp +++ b/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp @@ -45,12 +45,14 @@ void X86ATTInstPrinter::printSSECC(const MCInst *MI, unsigned Op) { } /// print_pcrel_imm - This is used to print an immediate value that ends up -/// being encoded as a pc-relative value. These print slightly differently, for -/// example, a $ is not emitted. +/// being encoded as a pc-relative value (e.g. for jumps and calls). These +/// print slightly differently than normal immediates. For example, a $ is not +/// emitted. void X86ATTInstPrinter::print_pcrel_imm(const MCInst *MI, unsigned OpNo) { const MCOperand &Op = MI->getOperand(OpNo); if (Op.isImm()) - O << Op.getImm(); + // Print this as a signed 32-bit value. + O << (int)Op.getImm(); else { assert(Op.isExpr() && "unknown pcrel immediate operand"); Op.getExpr()->print(O, &MAI); diff --git a/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp b/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp index 38c0c28..1015b69 100644 --- a/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp +++ b/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp @@ -355,10 +355,6 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { case X86::LEA64_32r: // Handle 'subreg rewriting' for the lea64_32mem operand. lower_lea64_32mem(&OutMI, 1); break; - case X86::MOV16r0: - OutMI.setOpcode(X86::MOV32r0); - lower_subreg32(&OutMI, 0); - break; case X86::MOVZX16rr8: OutMI.setOpcode(X86::MOVZX32rr8); lower_subreg32(&OutMI, 0); diff --git a/lib/Target/X86/CMakeLists.txt b/lib/Target/X86/CMakeLists.txt index 3ad65fb..4186fec 100644 --- a/lib/Target/X86/CMakeLists.txt +++ b/lib/Target/X86/CMakeLists.txt @@ -3,6 +3,7 @@ set(LLVM_TARGET_DEFINITIONS X86.td) tablegen(X86GenRegisterInfo.h.inc -gen-register-desc-header) tablegen(X86GenRegisterNames.inc -gen-register-enums) tablegen(X86GenRegisterInfo.inc -gen-register-desc) +tablegen(X86GenDisassemblerTables.inc -gen-disassembler) tablegen(X86GenInstrNames.inc -gen-instr-enums) tablegen(X86GenInstrInfo.inc -gen-instr-desc) tablegen(X86GenAsmWriter.inc -gen-asm-writer) diff --git a/lib/Target/X86/Disassembler/CMakeLists.txt b/lib/Target/X86/Disassembler/CMakeLists.txt index b329e89..2a83a9c 100644 --- a/lib/Target/X86/Disassembler/CMakeLists.txt +++ b/lib/Target/X86/Disassembler/CMakeLists.txt @@ -2,5 +2,6 @@ include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/ add_llvm_library(LLVMX86Disassembler X86Disassembler.cpp + X86DisassemblerDecoder.c ) add_dependencies(LLVMX86Disassembler X86CodeGenTable_gen) diff --git a/lib/Target/X86/Disassembler/X86Disassembler.cpp b/lib/Target/X86/Disassembler/X86Disassembler.cpp index 2ebbc9b..a316860 100644 --- a/lib/Target/X86/Disassembler/X86Disassembler.cpp +++ b/lib/Target/X86/Disassembler/X86Disassembler.cpp @@ -6,18 +6,465 @@ // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// +// +// This file is part of the X86 Disassembler. +// It contains code to translate the data produced by the decoder into +// MCInsts. +// Documentation for the disassembler can be found in X86Disassembler.h. +// +//===----------------------------------------------------------------------===// +#include "X86Disassembler.h" +#include "X86DisassemblerDecoder.h" + +#include "llvm/MC/MCDisassembler.h" #include "llvm/MC/MCDisassembler.h" +#include "llvm/MC/MCInst.h" #include "llvm/Target/TargetRegistry.h" -#include "X86.h" +#include "llvm/Support/MemoryObject.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" + +#include "X86GenRegisterNames.inc" + using namespace llvm; +using namespace llvm::X86Disassembler; + +namespace llvm { + +// Fill-ins to make the compiler happy. These constants are never actually +// assigned; they are just filler to make an automatically-generated switch +// statement work. +namespace X86 { + enum { + BX_SI = 500, + BX_DI = 501, + BP_SI = 502, + BP_DI = 503, + sib = 504, + sib64 = 505 + }; +} + +extern Target TheX86_32Target, TheX86_64Target; + +} + +static void translateInstruction(MCInst &target, + InternalInstruction &source); + +X86GenericDisassembler::X86GenericDisassembler(DisassemblerMode mode) : + MCDisassembler(), + fMode(mode) { +} + +X86GenericDisassembler::~X86GenericDisassembler() { +} + +/// regionReader - a callback function that wraps the readByte method from +/// MemoryObject. +/// +/// @param arg - The generic callback parameter. In this case, this should +/// be a pointer to a MemoryObject. +/// @param byte - A pointer to the byte to be read. +/// @param address - The address to be read. +static int regionReader(void* arg, uint8_t* byte, uint64_t address) { + MemoryObject* region = static_cast<MemoryObject*>(arg); + return region->readByte(address, byte); +} + +/// logger - a callback function that wraps the operator<< method from +/// raw_ostream. +/// +/// @param arg - The generic callback parameter. This should be a pointe +/// to a raw_ostream. +/// @param log - A string to be logged. logger() adds a newline. +static void logger(void* arg, const char* log) { + if (!arg) + return; + + raw_ostream &vStream = *(static_cast<raw_ostream*>(arg)); + vStream << log << "\n"; +} + +// +// Public interface for the disassembler +// + +bool X86GenericDisassembler::getInstruction(MCInst &instr, + uint64_t &size, + const MemoryObject ®ion, + uint64_t address, + raw_ostream &vStream) const { + InternalInstruction internalInstr; + + int ret = decodeInstruction(&internalInstr, + regionReader, + (void*)®ion, + logger, + (void*)&vStream, + address, + fMode); + + if(ret) { + size = internalInstr.readerCursor - address; + return false; + } + else { + size = internalInstr.length; + translateInstruction(instr, internalInstr); + return true; + } +} + +// +// Private code that translates from struct InternalInstructions to MCInsts. +// + +/// translateRegister - Translates an internal register to the appropriate LLVM +/// register, and appends it as an operand to an MCInst. +/// +/// @param mcInst - The MCInst to append to. +/// @param reg - The Reg to append. +static void translateRegister(MCInst &mcInst, Reg reg) { +#define ENTRY(x) X86::x, + uint8_t llvmRegnums[] = { + ALL_REGS + 0 + }; +#undef ENTRY + + uint8_t llvmRegnum = llvmRegnums[reg]; + mcInst.addOperand(MCOperand::CreateReg(llvmRegnum)); +} + +/// translateImmediate - Appends an immediate operand to an MCInst. +/// +/// @param mcInst - The MCInst to append to. +/// @param immediate - The immediate value to append. +static void translateImmediate(MCInst &mcInst, uint64_t immediate) { + mcInst.addOperand(MCOperand::CreateImm(immediate)); +} + +/// translateRMRegister - Translates a register stored in the R/M field of the +/// ModR/M byte to its LLVM equivalent and appends it to an MCInst. +/// @param mcInst - The MCInst to append to. +/// @param insn - The internal instruction to extract the R/M field +/// from. +static void translateRMRegister(MCInst &mcInst, + InternalInstruction &insn) { + assert(insn.eaBase != EA_BASE_sib && insn.eaBase != EA_BASE_sib64 && + "A R/M register operand may not have a SIB byte"); + + switch (insn.eaBase) { + case EA_BASE_NONE: + llvm_unreachable("EA_BASE_NONE for ModR/M base"); + break; +#define ENTRY(x) case EA_BASE_##x: + ALL_EA_BASES +#undef ENTRY + llvm_unreachable("A R/M register operand may not have a base; " + "the operand must be a register."); + break; +#define ENTRY(x) \ + case EA_REG_##x: \ + mcInst.addOperand(MCOperand::CreateReg(X86::x)); break; + ALL_REGS +#undef ENTRY + default: + llvm_unreachable("Unexpected EA base register"); + } +} + +/// translateRMMemory - Translates a memory operand stored in the Mod and R/M +/// fields of an internal instruction (and possibly its SIB byte) to a memory +/// operand in LLVM's format, and appends it to an MCInst. +/// +/// @param mcInst - The MCInst to append to. +/// @param insn - The instruction to extract Mod, R/M, and SIB fields +/// from. +/// @param sr - Whether or not to emit the segment register. The +/// LEA instruction does not expect a segment-register +/// operand. +static void translateRMMemory(MCInst &mcInst, + InternalInstruction &insn, + bool sr) { + // Addresses in an MCInst are represented as five operands: + // 1. basereg (register) The R/M base, or (if there is a SIB) the + // SIB base + // 2. scaleamount (immediate) 1, or (if there is a SIB) the specified + // scale amount + // 3. indexreg (register) x86_registerNONE, or (if there is a SIB) + // the index (which is multiplied by the + // scale amount) + // 4. displacement (immediate) 0, or the displacement if there is one + // 5. segmentreg (register) x86_registerNONE for now, but could be set + // if we have segment overrides + + MCOperand baseReg; + MCOperand scaleAmount; + MCOperand indexReg; + MCOperand displacement; + MCOperand segmentReg; + + if (insn.eaBase == EA_BASE_sib || insn.eaBase == EA_BASE_sib64) { + if (insn.sibBase != SIB_BASE_NONE) { + switch (insn.sibBase) { + default: + llvm_unreachable("Unexpected sibBase"); +#define ENTRY(x) \ + case SIB_BASE_##x: \ + baseReg = MCOperand::CreateReg(X86::x); break; + ALL_SIB_BASES +#undef ENTRY + } + } else { + baseReg = MCOperand::CreateReg(0); + } + + if (insn.sibIndex != SIB_INDEX_NONE) { + switch (insn.sibIndex) { + default: + llvm_unreachable("Unexpected sibIndex"); +#define ENTRY(x) \ + case SIB_INDEX_##x: \ + indexReg = MCOperand::CreateReg(X86::x); break; + EA_BASES_32BIT + EA_BASES_64BIT +#undef ENTRY + } + } else { + indexReg = MCOperand::CreateReg(0); + } + + scaleAmount = MCOperand::CreateImm(insn.sibScale); + } else { + switch (insn.eaBase) { + case EA_BASE_NONE: + assert(insn.eaDisplacement != EA_DISP_NONE && + "EA_BASE_NONE and EA_DISP_NONE for ModR/M base"); + + if (insn.mode == MODE_64BIT) + baseReg = MCOperand::CreateReg(X86::RIP); // Section 2.2.1.6 + else + baseReg = MCOperand::CreateReg(0); + + indexReg = MCOperand::CreateReg(0); + break; + case EA_BASE_BX_SI: + baseReg = MCOperand::CreateReg(X86::BX); + indexReg = MCOperand::CreateReg(X86::SI); + break; + case EA_BASE_BX_DI: + baseReg = MCOperand::CreateReg(X86::BX); + indexReg = MCOperand::CreateReg(X86::DI); + break; + case EA_BASE_BP_SI: + baseReg = MCOperand::CreateReg(X86::BP); + indexReg = MCOperand::CreateReg(X86::SI); + break; + case EA_BASE_BP_DI: + baseReg = MCOperand::CreateReg(X86::BP); + indexReg = MCOperand::CreateReg(X86::DI); + break; + default: + indexReg = MCOperand::CreateReg(0); + switch (insn.eaBase) { + default: + llvm_unreachable("Unexpected eaBase"); + break; + // Here, we will use the fill-ins defined above. However, + // BX_SI, BX_DI, BP_SI, and BP_DI are all handled above and + // sib and sib64 were handled in the top-level if, so they're only + // placeholders to keep the compiler happy. +#define ENTRY(x) \ + case EA_BASE_##x: \ + baseReg = MCOperand::CreateReg(X86::x); break; + ALL_EA_BASES +#undef ENTRY +#define ENTRY(x) case EA_REG_##x: + ALL_REGS +#undef ENTRY + llvm_unreachable("A R/M memory operand may not be a register; " + "the base field must be a base."); + break; + } + } + + scaleAmount = MCOperand::CreateImm(1); + } + + displacement = MCOperand::CreateImm(insn.displacement); + + static const uint8_t segmentRegnums[SEG_OVERRIDE_max] = { + 0, // SEG_OVERRIDE_NONE + X86::CS, + X86::SS, + X86::DS, + X86::ES, + X86::FS, + X86::GS + }; + + segmentReg = MCOperand::CreateReg(segmentRegnums[insn.segmentOverride]); + + mcInst.addOperand(baseReg); + mcInst.addOperand(scaleAmount); + mcInst.addOperand(indexReg); + mcInst.addOperand(displacement); + + if (sr) + mcInst.addOperand(segmentReg); +} + +/// translateRM - Translates an operand stored in the R/M (and possibly SIB) +/// byte of an instruction to LLVM form, and appends it to an MCInst. +/// +/// @param mcInst - The MCInst to append to. +/// @param operand - The operand, as stored in the descriptor table. +/// @param insn - The instruction to extract Mod, R/M, and SIB fields +/// from. +static void translateRM(MCInst &mcInst, + OperandSpecifier &operand, + InternalInstruction &insn) { + switch (operand.type) { + default: + llvm_unreachable("Unexpected type for a R/M operand"); + case TYPE_R8: + case TYPE_R16: + case TYPE_R32: + case TYPE_R64: + case TYPE_Rv: + case TYPE_MM: + case TYPE_MM32: + case TYPE_MM64: + case TYPE_XMM: + case TYPE_XMM32: + case TYPE_XMM64: + case TYPE_XMM128: + case TYPE_DEBUGREG: + case TYPE_CR32: + case TYPE_CR64: + translateRMRegister(mcInst, insn); + break; + case TYPE_M: + case TYPE_M8: + case TYPE_M16: + case TYPE_M32: + case TYPE_M64: + case TYPE_M128: + case TYPE_M512: + case TYPE_Mv: + case TYPE_M32FP: + case TYPE_M64FP: + case TYPE_M80FP: + case TYPE_M16INT: + case TYPE_M32INT: + case TYPE_M64INT: + case TYPE_M1616: + case TYPE_M1632: + case TYPE_M1664: + translateRMMemory(mcInst, insn, true); + break; + case TYPE_LEA: + translateRMMemory(mcInst, insn, false); + break; + } +} + +/// translateFPRegister - Translates a stack position on the FPU stack to its +/// LLVM form, and appends it to an MCInst. +/// +/// @param mcInst - The MCInst to append to. +/// @param stackPos - The stack position to translate. +static void translateFPRegister(MCInst &mcInst, + uint8_t stackPos) { + assert(stackPos < 8 && "Invalid FP stack position"); + + mcInst.addOperand(MCOperand::CreateReg(X86::ST0 + stackPos)); +} + +/// translateOperand - Translates an operand stored in an internal instruction +/// to LLVM's format and appends it to an MCInst. +/// +/// @param mcInst - The MCInst to append to. +/// @param operand - The operand, as stored in the descriptor table. +/// @param insn - The internal instruction. +static void translateOperand(MCInst &mcInst, + OperandSpecifier &operand, + InternalInstruction &insn) { + switch (operand.encoding) { + default: + llvm_unreachable("Unhandled operand encoding during translation"); + case ENCODING_REG: + translateRegister(mcInst, insn.reg); + break; + case ENCODING_RM: + translateRM(mcInst, operand, insn); + break; + case ENCODING_CB: + case ENCODING_CW: + case ENCODING_CD: + case ENCODING_CP: + case ENCODING_CO: + case ENCODING_CT: + llvm_unreachable("Translation of code offsets isn't supported."); + case ENCODING_IB: + case ENCODING_IW: + case ENCODING_ID: + case ENCODING_IO: + case ENCODING_Iv: + case ENCODING_Ia: + translateImmediate(mcInst, + insn.immediates[insn.numImmediatesTranslated++]); + break; + case ENCODING_RB: + case ENCODING_RW: + case ENCODING_RD: + case ENCODING_RO: + translateRegister(mcInst, insn.opcodeRegister); + break; + case ENCODING_I: + translateFPRegister(mcInst, insn.opcodeModifier); + break; + case ENCODING_Rv: + translateRegister(mcInst, insn.opcodeRegister); + break; + case ENCODING_DUP: + translateOperand(mcInst, + insn.spec->operands[operand.type - TYPE_DUP0], + insn); + break; + } +} + +/// translateInstruction - Translates an internal instruction and all its +/// operands to an MCInst. +/// +/// @param mcInst - The MCInst to populate with the instruction's data. +/// @param insn - The internal instruction. +static void translateInstruction(MCInst &mcInst, + InternalInstruction &insn) { + assert(insn.spec); + + mcInst.setOpcode(insn.instructionID); + + int index; + + insn.numImmediatesTranslated = 0; + + for (index = 0; index < X86_MAX_OPERANDS; ++index) { + if (insn.spec->operands[index].encoding != ENCODING_NONE) + translateOperand(mcInst, insn.spec->operands[index], insn); + } +} static const MCDisassembler *createX86_32Disassembler(const Target &T) { - return 0; + return new X86Disassembler::X86_32Disassembler; } static const MCDisassembler *createX86_64Disassembler(const Target &T) { - return 0; + return new X86Disassembler::X86_64Disassembler; } extern "C" void LLVMInitializeX86Disassembler() { diff --git a/lib/Target/X86/Disassembler/X86Disassembler.h b/lib/Target/X86/Disassembler/X86Disassembler.h new file mode 100644 index 0000000..0e6e0b0 --- /dev/null +++ b/lib/Target/X86/Disassembler/X86Disassembler.h @@ -0,0 +1,150 @@ +//===- X86Disassembler.h - Disassembler for x86 and x86_64 ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// The X86 disassembler is a table-driven disassembler for the 16-, 32-, and +// 64-bit X86 instruction sets. The main decode sequence for an assembly +// instruction in this disassembler is: +// +// 1. Read the prefix bytes and determine the attributes of the instruction. +// These attributes, recorded in enum attributeBits +// (X86DisassemblerDecoderCommon.h), form a bitmask. The table CONTEXTS_SYM +// provides a mapping from bitmasks to contexts, which are represented by +// enum InstructionContext (ibid.). +// +// 2. Read the opcode, and determine what kind of opcode it is. The +// disassembler distinguishes four kinds of opcodes, which are enumerated in +// OpcodeType (X86DisassemblerDecoderCommon.h): one-byte (0xnn), two-byte +// (0x0f 0xnn), three-byte-38 (0x0f 0x38 0xnn), or three-byte-3a +// (0x0f 0x3a 0xnn). Mandatory prefixes are treated as part of the context. +// +// 3. Depending on the opcode type, look in one of four ClassDecision structures +// (X86DisassemblerDecoderCommon.h). Use the opcode class to determine which +// OpcodeDecision (ibid.) to look the opcode in. Look up the opcode, to get +// a ModRMDecision (ibid.). +// +// 4. Some instructions, such as escape opcodes or extended opcodes, or even +// instructions that have ModRM*Reg / ModRM*Mem forms in LLVM, need the +// ModR/M byte to complete decode. The ModRMDecision's type is an entry from +// ModRMDecisionType (X86DisassemblerDecoderCommon.h) that indicates if the +// ModR/M byte is required and how to interpret it. +// +// 5. After resolving the ModRMDecision, the disassembler has a unique ID +// of type InstrUID (X86DisassemblerDecoderCommon.h). Looking this ID up in +// INSTRUCTIONS_SYM yields the name of the instruction and the encodings and +// meanings of its operands. +// +// 6. For each operand, its encoding is an entry from OperandEncoding +// (X86DisassemblerDecoderCommon.h) and its type is an entry from +// OperandType (ibid.). The encoding indicates how to read it from the +// instruction; the type indicates how to interpret the value once it has +// been read. For example, a register operand could be stored in the R/M +// field of the ModR/M byte, the REG field of the ModR/M byte, or added to +// the main opcode. This is orthogonal from its meaning (an GPR or an XMM +// register, for instance). Given this information, the operands can be +// extracted and interpreted. +// +// 7. As the last step, the disassembler translates the instruction information +// and operands into a format understandable by the client - in this case, an +// MCInst for use by the MC infrastructure. +// +// The disassembler is broken broadly into two parts: the table emitter that +// emits the instruction decode tables discussed above during compilation, and +// the disassembler itself. The table emitter is documented in more detail in +// utils/TableGen/X86DisassemblerEmitter.h. +// +// X86Disassembler.h contains the public interface for the disassembler, +// adhering to the MCDisassembler interface. +// X86Disassembler.cpp contains the code responsible for step 7, and for +// invoking the decoder to execute steps 1-6. +// X86DisassemblerDecoderCommon.h contains the definitions needed by both the +// table emitter and the disassembler. +// X86DisassemblerDecoder.h contains the public interface of the decoder, +// factored out into C for possible use by other projects. +// X86DisassemblerDecoder.c contains the source code of the decoder, which is +// responsible for steps 1-6. +// +//===----------------------------------------------------------------------===// + +#ifndef X86DISASSEMBLER_H +#define X86DISASSEMBLER_H + +#define INSTRUCTION_SPECIFIER_FIELDS \ + const char* name; + +#define INSTRUCTION_IDS \ + InstrUID* instructionIDs; + +#include "X86DisassemblerDecoderCommon.h" + +#undef INSTRUCTION_SPECIFIER_FIELDS +#undef INSTRUCTION_IDS + +#include "llvm/MC/MCDisassembler.h" + +struct InternalInstruction; + +namespace llvm { + +class MCInst; +class MemoryObject; +class raw_ostream; + +namespace X86Disassembler { + +/// X86GenericDisassembler - Generic disassembler for all X86 platforms. +/// All each platform class should have to do is subclass the constructor, and +/// provide a different disassemblerMode value. +class X86GenericDisassembler : public MCDisassembler { +protected: + /// Constructor - Initializes the disassembler. + /// + /// @param mode - The X86 architecture mode to decode for. + X86GenericDisassembler(DisassemblerMode mode); +public: + ~X86GenericDisassembler(); + + /// getInstruction - See MCDisassembler. + bool getInstruction(MCInst &instr, + uint64_t &size, + const MemoryObject ®ion, + uint64_t address, + raw_ostream &vStream) const; +private: + DisassemblerMode fMode; +}; + +/// X86_16Disassembler - 16-bit X86 disassembler. +class X86_16Disassembler : public X86GenericDisassembler { +public: + X86_16Disassembler() : + X86GenericDisassembler(MODE_16BIT) { + } +}; + +/// X86_16Disassembler - 32-bit X86 disassembler. +class X86_32Disassembler : public X86GenericDisassembler { +public: + X86_32Disassembler() : + X86GenericDisassembler(MODE_32BIT) { + } +}; + +/// X86_16Disassembler - 64-bit X86 disassembler. +class X86_64Disassembler : public X86GenericDisassembler { +public: + X86_64Disassembler() : + X86GenericDisassembler(MODE_64BIT) { + } +}; + +} // namespace X86Disassembler + +} // namespace llvm + +#endif diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c new file mode 100644 index 0000000..a0a04ba --- /dev/null +++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c @@ -0,0 +1,1365 @@ +/*===- X86DisassemblerDecoder.c - Disassembler decoder -------------*- C -*-==* + * + * The LLVM Compiler Infrastructure + * + * This file is distributed under the University of Illinois Open Source + * License. See LICENSE.TXT for details. + * + *===----------------------------------------------------------------------===* + * + * This file is part of the X86 Disassembler. + * It contains the implementation of the instruction decoder. + * Documentation for the disassembler can be found in X86Disassembler.h. + * + *===----------------------------------------------------------------------===*/ + +#include <assert.h> /* for assert() */ +#include <stdarg.h> /* for va_*() */ +#include <stdio.h> /* for vsnprintf() */ +#include <stdlib.h> /* for exit() */ +#include <string.h> /* for memset() */ + +#include "X86DisassemblerDecoder.h" + +#include "X86GenDisassemblerTables.inc" + +#define TRUE 1 +#define FALSE 0 + +#ifdef __GNUC__ +#define NORETURN __attribute__((noreturn)) +#else +#define NORETURN +#endif + +#define unreachable(s) \ + do { \ + fprintf(stderr, "%s:%d: %s\n", __FILE__, __LINE__, s); \ + exit(-1); \ + } while (0); + +/* + * contextForAttrs - Client for the instruction context table. Takes a set of + * attributes and returns the appropriate decode context. + * + * @param attrMask - Attributes, from the enumeration attributeBits. + * @return - The InstructionContext to use when looking up an + * an instruction with these attributes. + */ +static InstructionContext contextForAttrs(uint8_t attrMask) { + return CONTEXTS_SYM[attrMask]; +} + +/* + * modRMRequired - Reads the appropriate instruction table to determine whether + * the ModR/M byte is required to decode a particular instruction. + * + * @param type - The opcode type (i.e., how many bytes it has). + * @param insnContext - The context for the instruction, as returned by + * contextForAttrs. + * @param opcode - The last byte of the instruction's opcode, not counting + * ModR/M extensions and escapes. + * @return - TRUE if the ModR/M byte is required, FALSE otherwise. + */ +static int modRMRequired(OpcodeType type, + InstructionContext insnContext, + uint8_t opcode) { + const struct ContextDecision* decision = 0; + + switch (type) { + case ONEBYTE: + decision = &ONEBYTE_SYM; + break; + case TWOBYTE: + decision = &TWOBYTE_SYM; + break; + case THREEBYTE_38: + decision = &THREEBYTE38_SYM; + break; + case THREEBYTE_3A: + decision = &THREEBYTE3A_SYM; + break; + } + + return decision->opcodeDecisions[insnContext].modRMDecisions[opcode]. + modrm_type != MODRM_ONEENTRY; + + unreachable("Unknown opcode type"); + return 0; +} + +/* + * decode - Reads the appropriate instruction table to obtain the unique ID of + * an instruction. + * + * @param type - See modRMRequired(). + * @param insnContext - See modRMRequired(). + * @param opcode - See modRMRequired(). + * @param modRM - The ModR/M byte if required, or any value if not. + */ +static InstrUID decode(OpcodeType type, + InstructionContext insnContext, + uint8_t opcode, + uint8_t modRM) { + struct ModRMDecision* dec; + + switch (type) { + default: + unreachable("Unknown opcode type"); + case ONEBYTE: + dec = &ONEBYTE_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode]; + break; + case TWOBYTE: + dec = &TWOBYTE_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode]; + break; + case THREEBYTE_38: + dec = &THREEBYTE38_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode]; + break; + case THREEBYTE_3A: + dec = &THREEBYTE3A_SYM.opcodeDecisions[insnContext].modRMDecisions[opcode]; + break; + } + + switch (dec->modrm_type) { + default: + unreachable("Corrupt table! Unknown modrm_type"); + case MODRM_ONEENTRY: + return dec->instructionIDs[0]; + case MODRM_SPLITRM: + if (modFromModRM(modRM) == 0x3) + return dec->instructionIDs[1]; + else + return dec->instructionIDs[0]; + case MODRM_FULL: + return dec->instructionIDs[modRM]; + } + + return 0; +} + +/* + * specifierForUID - Given a UID, returns the name and operand specification for + * that instruction. + * + * @param uid - The unique ID for the instruction. This should be returned by + * decode(); specifierForUID will not check bounds. + * @return - A pointer to the specification for that instruction. + */ +static struct InstructionSpecifier* specifierForUID(InstrUID uid) { + return &INSTRUCTIONS_SYM[uid]; +} + +/* + * consumeByte - Uses the reader function provided by the user to consume one + * byte from the instruction's memory and advance the cursor. + * + * @param insn - The instruction with the reader function to use. The cursor + * for this instruction is advanced. + * @param byte - A pointer to a pre-allocated memory buffer to be populated + * with the data read. + * @return - 0 if the read was successful; nonzero otherwise. + */ +static int consumeByte(struct InternalInstruction* insn, uint8_t* byte) { + int ret = insn->reader(insn->readerArg, byte, insn->readerCursor); + + if (!ret) + ++(insn->readerCursor); + + return ret; +} + +/* + * lookAtByte - Like consumeByte, but does not advance the cursor. + * + * @param insn - See consumeByte(). + * @param byte - See consumeByte(). + * @return - See consumeByte(). + */ +static int lookAtByte(struct InternalInstruction* insn, uint8_t* byte) { + return insn->reader(insn->readerArg, byte, insn->readerCursor); +} + +static void unconsumeByte(struct InternalInstruction* insn) { + insn->readerCursor--; +} + +#define CONSUME_FUNC(name, type) \ + static int name(struct InternalInstruction* insn, type* ptr) { \ + type combined = 0; \ + unsigned offset; \ + for (offset = 0; offset < sizeof(type); ++offset) { \ + uint8_t byte; \ + int ret = insn->reader(insn->readerArg, \ + &byte, \ + insn->readerCursor + offset); \ + if (ret) \ + return ret; \ + combined = combined | ((type)byte << ((type)offset * 8)); \ + } \ + *ptr = combined; \ + insn->readerCursor += sizeof(type); \ + return 0; \ + } + +/* + * consume* - Use the reader function provided by the user to consume data + * values of various sizes from the instruction's memory and advance the + * cursor appropriately. These readers perform endian conversion. + * + * @param insn - See consumeByte(). + * @param ptr - A pointer to a pre-allocated memory of appropriate size to + * be populated with the data read. + * @return - See consumeByte(). + */ +CONSUME_FUNC(consumeInt8, int8_t) +CONSUME_FUNC(consumeInt16, int16_t) +CONSUME_FUNC(consumeInt32, int32_t) +CONSUME_FUNC(consumeUInt16, uint16_t) +CONSUME_FUNC(consumeUInt32, uint32_t) +CONSUME_FUNC(consumeUInt64, uint64_t) + +/* + * dbgprintf - Uses the logging function provided by the user to log a single + * message, typically without a carriage-return. + * + * @param insn - The instruction containing the logging function. + * @param format - See printf(). + * @param ... - See printf(). + */ +static void dbgprintf(struct InternalInstruction* insn, + const char* format, + ...) { + char buffer[256]; + va_list ap; + + if (!insn->dlog) + return; + + va_start(ap, format); + (void)vsnprintf(buffer, sizeof(buffer), format, ap); + va_end(ap); + + insn->dlog(insn->dlogArg, buffer); + + return; +} + +/* + * setPrefixPresent - Marks that a particular prefix is present at a particular + * location. + * + * @param insn - The instruction to be marked as having the prefix. + * @param prefix - The prefix that is present. + * @param location - The location where the prefix is located (in the address + * space of the instruction's reader). + */ +static void setPrefixPresent(struct InternalInstruction* insn, + uint8_t prefix, + uint64_t location) +{ + insn->prefixPresent[prefix] = 1; + insn->prefixLocations[prefix] = location; +} + +/* + * isPrefixAtLocation - Queries an instruction to determine whether a prefix is + * present at a given location. + * + * @param insn - The instruction to be queried. + * @param prefix - The prefix. + * @param location - The location to query. + * @return - Whether the prefix is at that location. + */ +static BOOL isPrefixAtLocation(struct InternalInstruction* insn, + uint8_t prefix, + uint64_t location) +{ + if (insn->prefixPresent[prefix] == 1 && + insn->prefixLocations[prefix] == location) + return TRUE; + else + return FALSE; +} + +/* + * readPrefixes - Consumes all of an instruction's prefix bytes, and marks the + * instruction as having them. Also sets the instruction's default operand, + * address, and other relevant data sizes to report operands correctly. + * + * @param insn - The instruction whose prefixes are to be read. + * @return - 0 if the instruction could be read until the end of the prefix + * bytes, and no prefixes conflicted; nonzero otherwise. + */ +static int readPrefixes(struct InternalInstruction* insn) { + BOOL isPrefix = TRUE; + BOOL prefixGroups[4] = { FALSE }; + uint64_t prefixLocation; + uint8_t byte; + + BOOL hasAdSize = FALSE; + BOOL hasOpSize = FALSE; + + dbgprintf(insn, "readPrefixes()"); + + while (isPrefix) { + prefixLocation = insn->readerCursor; + + if (consumeByte(insn, &byte)) + return -1; + + switch (byte) { + case 0xf0: /* LOCK */ + case 0xf2: /* REPNE/REPNZ */ + case 0xf3: /* REP or REPE/REPZ */ + if (prefixGroups[0]) + dbgprintf(insn, "Redundant Group 1 prefix"); + prefixGroups[0] = TRUE; + setPrefixPresent(insn, byte, prefixLocation); + break; + case 0x2e: /* CS segment override -OR- Branch not taken */ + case 0x36: /* SS segment override -OR- Branch taken */ + case 0x3e: /* DS segment override */ + case 0x26: /* ES segment override */ + case 0x64: /* FS segment override */ + case 0x65: /* GS segment override */ + switch (byte) { + case 0x2e: + insn->segmentOverride = SEG_OVERRIDE_CS; + break; + case 0x36: + insn->segmentOverride = SEG_OVERRIDE_SS; + break; + case 0x3e: + insn->segmentOverride = SEG_OVERRIDE_DS; + break; + case 0x26: + insn->segmentOverride = SEG_OVERRIDE_ES; + break; + case 0x64: + insn->segmentOverride = SEG_OVERRIDE_FS; + break; + case 0x65: + insn->segmentOverride = SEG_OVERRIDE_GS; + break; + default: + unreachable("Unhandled override"); + } + if (prefixGroups[1]) + dbgprintf(insn, "Redundant Group 2 prefix"); + prefixGroups[1] = TRUE; + setPrefixPresent(insn, byte, prefixLocation); + break; + case 0x66: /* Operand-size override */ + if (prefixGroups[2]) + dbgprintf(insn, "Redundant Group 3 prefix"); + prefixGroups[2] = TRUE; + hasOpSize = TRUE; + setPrefixPresent(insn, byte, prefixLocation); + break; + case 0x67: /* Address-size override */ + if (prefixGroups[3]) + dbgprintf(insn, "Redundant Group 4 prefix"); + prefixGroups[3] = TRUE; + hasAdSize = TRUE; + setPrefixPresent(insn, byte, prefixLocation); + break; + default: /* Not a prefix byte */ + isPrefix = FALSE; + break; + } + + if (isPrefix) + dbgprintf(insn, "Found prefix 0x%hhx", byte); + } + + if (insn->mode == MODE_64BIT) { + if ((byte & 0xf0) == 0x40) { + uint8_t opcodeByte; + + if(lookAtByte(insn, &opcodeByte) || ((opcodeByte & 0xf0) == 0x40)) { + dbgprintf(insn, "Redundant REX prefix"); + return -1; + } + + insn->rexPrefix = byte; + insn->necessaryPrefixLocation = insn->readerCursor - 2; + + dbgprintf(insn, "Found REX prefix 0x%hhx", byte); + } else { + unconsumeByte(insn); + insn->necessaryPrefixLocation = insn->readerCursor - 1; + } + } else { + unconsumeByte(insn); + } + + if (insn->mode == MODE_16BIT) { + insn->registerSize = (hasOpSize ? 4 : 2); + insn->addressSize = (hasAdSize ? 4 : 2); + insn->displacementSize = (hasAdSize ? 4 : 2); + insn->immediateSize = (hasOpSize ? 4 : 2); + } else if (insn->mode == MODE_32BIT) { + insn->registerSize = (hasOpSize ? 2 : 4); + insn->addressSize = (hasAdSize ? 2 : 4); + insn->displacementSize = (hasAdSize ? 2 : 4); + insn->immediateSize = (hasAdSize ? 2 : 4); + } else if (insn->mode == MODE_64BIT) { + if (insn->rexPrefix && wFromREX(insn->rexPrefix)) { + insn->registerSize = 8; + insn->addressSize = (hasAdSize ? 4 : 8); + insn->displacementSize = 4; + insn->immediateSize = 4; + } else if (insn->rexPrefix) { + insn->registerSize = (hasOpSize ? 2 : 4); + insn->addressSize = (hasAdSize ? 4 : 8); + insn->displacementSize = (hasOpSize ? 2 : 4); + insn->immediateSize = (hasOpSize ? 2 : 4); + } else { + insn->registerSize = (hasOpSize ? 2 : 4); + insn->addressSize = (hasAdSize ? 4 : 8); + insn->displacementSize = (hasOpSize ? 2 : 4); + insn->immediateSize = (hasOpSize ? 2 : 4); + } + } + + return 0; +} + +/* + * readOpcode - Reads the opcode (excepting the ModR/M byte in the case of + * extended or escape opcodes). + * + * @param insn - The instruction whose opcode is to be read. + * @return - 0 if the opcode could be read successfully; nonzero otherwise. + */ +static int readOpcode(struct InternalInstruction* insn) { + /* Determine the length of the primary opcode */ + + uint8_t current; + + dbgprintf(insn, "readOpcode()"); + + insn->opcodeType = ONEBYTE; + if (consumeByte(insn, ¤t)) + return -1; + + if (current == 0x0f) { + dbgprintf(insn, "Found a two-byte escape prefix (0x%hhx)", current); + + insn->twoByteEscape = current; + + if (consumeByte(insn, ¤t)) + return -1; + + if (current == 0x38) { + dbgprintf(insn, "Found a three-byte escape prefix (0x%hhx)", current); + + insn->threeByteEscape = current; + + if (consumeByte(insn, ¤t)) + return -1; + + insn->opcodeType = THREEBYTE_38; + } else if (current == 0x3a) { + dbgprintf(insn, "Found a three-byte escape prefix (0x%hhx)", current); + + insn->threeByteEscape = current; + + if (consumeByte(insn, ¤t)) + return -1; + + insn->opcodeType = THREEBYTE_3A; + } else { + dbgprintf(insn, "Didn't find a three-byte escape prefix"); + + insn->opcodeType = TWOBYTE; + } + } + + /* + * At this point we have consumed the full opcode. + * Anything we consume from here on must be unconsumed. + */ + + insn->opcode = current; + + return 0; +} + +static int readModRM(struct InternalInstruction* insn); + +/* + * getIDWithAttrMask - Determines the ID of an instruction, consuming + * the ModR/M byte as appropriate for extended and escape opcodes, + * and using a supplied attribute mask. + * + * @param instructionID - A pointer whose target is filled in with the ID of the + * instruction. + * @param insn - The instruction whose ID is to be determined. + * @param attrMask - The attribute mask to search. + * @return - 0 if the ModR/M could be read when needed or was not + * needed; nonzero otherwise. + */ +static int getIDWithAttrMask(uint16_t* instructionID, + struct InternalInstruction* insn, + uint8_t attrMask) { + BOOL hasModRMExtension; + + uint8_t instructionClass; + + instructionClass = contextForAttrs(attrMask); + + hasModRMExtension = modRMRequired(insn->opcodeType, + instructionClass, + insn->opcode); + + if (hasModRMExtension) { + readModRM(insn); + + *instructionID = decode(insn->opcodeType, + instructionClass, + insn->opcode, + insn->modRM); + } else { + *instructionID = decode(insn->opcodeType, + instructionClass, + insn->opcode, + 0); + } + + return 0; +} + +/* + * is16BitEquivalent - Determines whether two instruction names refer to + * equivalent instructions but one is 16-bit whereas the other is not. + * + * @param orig - The instruction that is not 16-bit + * @param equiv - The instruction that is 16-bit + */ +static BOOL is16BitEquvalent(const char* orig, const char* equiv) { + off_t i; + + for(i = 0;; i++) { + if(orig[i] == '\0' && equiv[i] == '\0') + return TRUE; + if(orig[i] == '\0' || equiv[i] == '\0') + return FALSE; + if(orig[i] != equiv[i]) { + if((orig[i] == 'Q' || orig[i] == 'L') && equiv[i] == 'W') + continue; + if((orig[i] == '6' || orig[i] == '3') && equiv[i] == '1') + continue; + if((orig[i] == '4' || orig[i] == '2') && equiv[i] == '6') + continue; + return FALSE; + } + } +} + +/* + * is64BitEquivalent - Determines whether two instruction names refer to + * equivalent instructions but one is 64-bit whereas the other is not. + * + * @param orig - The instruction that is not 64-bit + * @param equiv - The instruction that is 64-bit + */ +static BOOL is64BitEquivalent(const char* orig, const char* equiv) { + off_t i; + + for(i = 0;; i++) { + if(orig[i] == '\0' && equiv[i] == '\0') + return TRUE; + if(orig[i] == '\0' || equiv[i] == '\0') + return FALSE; + if(orig[i] != equiv[i]) { + if((orig[i] == 'W' || orig[i] == 'L') && equiv[i] == 'Q') + continue; + if((orig[i] == '1' || orig[i] == '3') && equiv[i] == '6') + continue; + if((orig[i] == '6' || orig[i] == '2') && equiv[i] == '4') + continue; + return FALSE; + } + } +} + + +/* + * getID - Determines the ID of an instruction, consuming the ModR/M byte as + * appropriate for extended and escape opcodes. Determines the attributes and + * context for the instruction before doing so. + * + * @param insn - The instruction whose ID is to be determined. + * @return - 0 if the ModR/M could be read when needed or was not needed; + * nonzero otherwise. + */ +static int getID(struct InternalInstruction* insn) { + uint8_t attrMask; + uint16_t instructionID; + + dbgprintf(insn, "getID()"); + + attrMask = ATTR_NONE; + + if (insn->mode == MODE_64BIT) + attrMask |= ATTR_64BIT; + + if (insn->rexPrefix & 0x08) + attrMask |= ATTR_REXW; + + if (isPrefixAtLocation(insn, 0x66, insn->necessaryPrefixLocation)) + attrMask |= ATTR_OPSIZE; + else if (isPrefixAtLocation(insn, 0xf3, insn->necessaryPrefixLocation)) + attrMask |= ATTR_XS; + else if (isPrefixAtLocation(insn, 0xf2, insn->necessaryPrefixLocation)) + attrMask |= ATTR_XD; + + if(getIDWithAttrMask(&instructionID, insn, attrMask)) + return -1; + + /* The following clauses compensate for limitations of the tables. */ + + if ((attrMask & ATTR_XD) && (attrMask & ATTR_REXW)) { + /* + * Although for SSE instructions it is usually necessary to treat REX.W+F2 + * as F2 for decode (in the absence of a 64BIT_REXW_XD category) there is + * an occasional instruction where F2 is incidental and REX.W is the more + * significant. If the decoded instruction is 32-bit and adding REX.W + * instead of F2 changes a 32 to a 64, we adopt the new encoding. + */ + + struct InstructionSpecifier* spec; + uint16_t instructionIDWithREXw; + struct InstructionSpecifier* specWithREXw; + + spec = specifierForUID(instructionID); + + if (getIDWithAttrMask(&instructionIDWithREXw, + insn, + attrMask & (~ATTR_XD))) { + /* + * Decoding with REX.w would yield nothing; give up and return original + * decode. + */ + + insn->instructionID = instructionID; + insn->spec = spec; + return 0; + } + + specWithREXw = specifierForUID(instructionIDWithREXw); + + if (is64BitEquivalent(spec->name, specWithREXw->name)) { + insn->instructionID = instructionIDWithREXw; + insn->spec = specWithREXw; + } else { + insn->instructionID = instructionID; + insn->spec = spec; + } + return 0; + } + + if (insn->prefixPresent[0x66] && !(attrMask & ATTR_OPSIZE)) { + /* + * The instruction tables make no distinction between instructions that + * allow OpSize anywhere (i.e., 16-bit operations) and that need it in a + * particular spot (i.e., many MMX operations). In general we're + * conservative, but in the specific case where OpSize is present but not + * in the right place we check if there's a 16-bit operation. + */ + + struct InstructionSpecifier* spec; + uint16_t instructionIDWithOpsize; + struct InstructionSpecifier* specWithOpsize; + + spec = specifierForUID(instructionID); + + if (getIDWithAttrMask(&instructionIDWithOpsize, + insn, + attrMask | ATTR_OPSIZE)) { + /* + * ModRM required with OpSize but not present; give up and return version + * without OpSize set + */ + + insn->instructionID = instructionID; + insn->spec = spec; + return 0; + } + + specWithOpsize = specifierForUID(instructionIDWithOpsize); + + if (is16BitEquvalent(spec->name, specWithOpsize->name)) { + insn->instructionID = instructionIDWithOpsize; + insn->spec = specWithOpsize; + } else { + insn->instructionID = instructionID; + insn->spec = spec; + } + return 0; + } + + insn->instructionID = instructionID; + insn->spec = specifierForUID(insn->instructionID); + + return 0; +} + +/* + * readSIB - Consumes the SIB byte to determine addressing information for an + * instruction. + * + * @param insn - The instruction whose SIB byte is to be read. + * @return - 0 if the SIB byte was successfully read; nonzero otherwise. + */ +static int readSIB(struct InternalInstruction* insn) { + SIBIndex sibIndexBase = 0; + SIBBase sibBaseBase = 0; + uint8_t index, base; + + dbgprintf(insn, "readSIB()"); + + if (insn->consumedSIB) + return 0; + + insn->consumedSIB = TRUE; + + switch (insn->addressSize) { + case 2: + dbgprintf(insn, "SIB-based addressing doesn't work in 16-bit mode"); + return -1; + break; + case 4: + sibIndexBase = SIB_INDEX_EAX; + sibBaseBase = SIB_BASE_EAX; + break; + case 8: + sibIndexBase = SIB_INDEX_RAX; + sibBaseBase = SIB_BASE_RAX; + break; + } + + if (consumeByte(insn, &insn->sib)) + return -1; + + index = indexFromSIB(insn->sib) | (xFromREX(insn->rexPrefix) << 3); + + switch (index) { + case 0x4: + insn->sibIndex = SIB_INDEX_NONE; + break; + default: + insn->sibIndex = (EABase)(sibIndexBase + index); + if (insn->sibIndex == SIB_INDEX_sib || + insn->sibIndex == SIB_INDEX_sib64) + insn->sibIndex = SIB_INDEX_NONE; + break; + } + + switch (scaleFromSIB(insn->sib)) { + case 0: + insn->sibScale = 1; + break; + case 1: + insn->sibScale = 2; + break; + case 2: + insn->sibScale = 4; + break; + case 3: + insn->sibScale = 8; + break; + } + + base = baseFromSIB(insn->sib) | (bFromREX(insn->rexPrefix) << 3); + + switch (base) { + case 0x5: + switch (modFromModRM(insn->modRM)) { + case 0x0: + insn->eaDisplacement = EA_DISP_32; + insn->sibBase = SIB_BASE_NONE; + break; + case 0x1: + insn->eaDisplacement = EA_DISP_8; + insn->sibBase = (insn->addressSize == 4 ? + SIB_BASE_EBP : SIB_BASE_RBP); + break; + case 0x2: + insn->eaDisplacement = EA_DISP_32; + insn->sibBase = (insn->addressSize == 4 ? + SIB_BASE_EBP : SIB_BASE_RBP); + break; + case 0x3: + unreachable("Cannot have Mod = 0b11 and a SIB byte"); + } + break; + default: + insn->sibBase = (EABase)(sibBaseBase + base); + break; + } + + return 0; +} + +/* + * readDisplacement - Consumes the displacement of an instruction. + * + * @param insn - The instruction whose displacement is to be read. + * @return - 0 if the displacement byte was successfully read; nonzero + * otherwise. + */ +static int readDisplacement(struct InternalInstruction* insn) { + int8_t d8; + int16_t d16; + int32_t d32; + + dbgprintf(insn, "readDisplacement()"); + + if (insn->consumedDisplacement) + return 0; + + insn->consumedDisplacement = TRUE; + + switch (insn->eaDisplacement) { + case EA_DISP_NONE: + insn->consumedDisplacement = FALSE; + break; + case EA_DISP_8: + if (consumeInt8(insn, &d8)) + return -1; + insn->displacement = d8; + break; + case EA_DISP_16: + if (consumeInt16(insn, &d16)) + return -1; + insn->displacement = d16; + break; + case EA_DISP_32: + if (consumeInt32(insn, &d32)) + return -1; + insn->displacement = d32; + break; + } + + insn->consumedDisplacement = TRUE; + return 0; +} + +/* + * readModRM - Consumes all addressing information (ModR/M byte, SIB byte, and + * displacement) for an instruction and interprets it. + * + * @param insn - The instruction whose addressing information is to be read. + * @return - 0 if the information was successfully read; nonzero otherwise. + */ +static int readModRM(struct InternalInstruction* insn) { + uint8_t mod, rm, reg; + + dbgprintf(insn, "readModRM()"); + + if (insn->consumedModRM) + return 0; + + consumeByte(insn, &insn->modRM); + insn->consumedModRM = TRUE; + + mod = modFromModRM(insn->modRM); + rm = rmFromModRM(insn->modRM); + reg = regFromModRM(insn->modRM); + + /* + * This goes by insn->registerSize to pick the correct register, which messes + * up if we're using (say) XMM or 8-bit register operands. That gets fixed in + * fixupReg(). + */ + switch (insn->registerSize) { + case 2: + insn->regBase = MODRM_REG_AX; + insn->eaRegBase = EA_REG_AX; + break; + case 4: + insn->regBase = MODRM_REG_EAX; + insn->eaRegBase = EA_REG_EAX; + break; + case 8: + insn->regBase = MODRM_REG_RAX; + insn->eaRegBase = EA_REG_RAX; + break; + } + + reg |= rFromREX(insn->rexPrefix) << 3; + rm |= bFromREX(insn->rexPrefix) << 3; + + insn->reg = (Reg)(insn->regBase + reg); + + switch (insn->addressSize) { + case 2: + insn->eaBaseBase = EA_BASE_BX_SI; + + switch (mod) { + case 0x0: + if (rm == 0x6) { + insn->eaBase = EA_BASE_NONE; + insn->eaDisplacement = EA_DISP_16; + if(readDisplacement(insn)) + return -1; + } else { + insn->eaBase = (EABase)(insn->eaBaseBase + rm); + insn->eaDisplacement = EA_DISP_NONE; + } + break; + case 0x1: + insn->eaBase = (EABase)(insn->eaBaseBase + rm); + insn->eaDisplacement = EA_DISP_8; + if(readDisplacement(insn)) + return -1; + break; + case 0x2: + insn->eaBase = (EABase)(insn->eaBaseBase + rm); + insn->eaDisplacement = EA_DISP_16; + if(readDisplacement(insn)) + return -1; + break; + case 0x3: + insn->eaBase = (EABase)(insn->eaRegBase + rm); + if(readDisplacement(insn)) + return -1; + break; + } + break; + case 4: + case 8: + insn->eaBaseBase = (insn->addressSize == 4 ? EA_BASE_EAX : EA_BASE_RAX); + + switch (mod) { + case 0x0: + insn->eaDisplacement = EA_DISP_NONE; /* readSIB may override this */ + switch (rm) { + case 0x4: + case 0xc: /* in case REXW.b is set */ + insn->eaBase = (insn->addressSize == 4 ? + EA_BASE_sib : EA_BASE_sib64); + readSIB(insn); + if(readDisplacement(insn)) + return -1; + break; + case 0x5: + insn->eaBase = EA_BASE_NONE; + insn->eaDisplacement = EA_DISP_32; + if(readDisplacement(insn)) + return -1; + break; + default: + insn->eaBase = (EABase)(insn->eaBaseBase + rm); + break; + } + break; + case 0x1: + case 0x2: + insn->eaDisplacement = (mod == 0x1 ? EA_DISP_8 : EA_DISP_32); + switch (rm) { + case 0x4: + case 0xc: /* in case REXW.b is set */ + insn->eaBase = EA_BASE_sib; + readSIB(insn); + if(readDisplacement(insn)) + return -1; + break; + default: + insn->eaBase = (EABase)(insn->eaBaseBase + rm); + if(readDisplacement(insn)) + return -1; + break; + } + break; + case 0x3: + insn->eaDisplacement = EA_DISP_NONE; + insn->eaBase = (EABase)(insn->eaRegBase + rm); + break; + } + break; + } /* switch (insn->addressSize) */ + + return 0; +} + +#define GENERIC_FIXUP_FUNC(name, base, prefix) \ + static uint8_t name(struct InternalInstruction *insn, \ + OperandType type, \ + uint8_t index, \ + uint8_t *valid) { \ + *valid = 1; \ + switch (type) { \ + default: \ + unreachable("Unhandled register type"); \ + case TYPE_Rv: \ + return base + index; \ + case TYPE_R8: \ + if(insn->rexPrefix && \ + index >= 4 && index <= 7) { \ + return prefix##_SPL + (index - 4); \ + } else { \ + return prefix##_AL + index; \ + } \ + case TYPE_R16: \ + return prefix##_AX + index; \ + case TYPE_R32: \ + return prefix##_EAX + index; \ + case TYPE_R64: \ + return prefix##_RAX + index; \ + case TYPE_XMM128: \ + case TYPE_XMM64: \ + case TYPE_XMM32: \ + case TYPE_XMM: \ + return prefix##_XMM0 + index; \ + case TYPE_MM64: \ + case TYPE_MM32: \ + case TYPE_MM: \ + if(index > 7) \ + *valid = 0; \ + return prefix##_MM0 + index; \ + case TYPE_SEGMENTREG: \ + if(index > 5) \ + *valid = 0; \ + return prefix##_ES + index; \ + case TYPE_DEBUGREG: \ + if(index > 7) \ + *valid = 0; \ + return prefix##_DR0 + index; \ + case TYPE_CR32: \ + if(index > 7) \ + *valid = 0; \ + return prefix##_ECR0 + index; \ + case TYPE_CR64: \ + if(index > 8) \ + *valid = 0; \ + return prefix##_RCR0 + index; \ + } \ + } + +/* + * fixup*Value - Consults an operand type to determine the meaning of the + * reg or R/M field. If the operand is an XMM operand, for example, an + * operand would be XMM0 instead of AX, which readModRM() would otherwise + * misinterpret it as. + * + * @param insn - The instruction containing the operand. + * @param type - The operand type. + * @param index - The existing value of the field as reported by readModRM(). + * @param valid - The address of a uint8_t. The target is set to 1 if the + * field is valid for the register class; 0 if not. + */ +GENERIC_FIXUP_FUNC(fixupRegValue, insn->regBase, MODRM_REG) +GENERIC_FIXUP_FUNC(fixupRMValue, insn->eaRegBase, EA_REG) + +/* + * fixupReg - Consults an operand specifier to determine which of the + * fixup*Value functions to use in correcting readModRM()'ss interpretation. + * + * @param insn - See fixup*Value(). + * @param op - The operand specifier. + * @return - 0 if fixup was successful; -1 if the register returned was + * invalid for its class. + */ +static int fixupReg(struct InternalInstruction *insn, + struct OperandSpecifier *op) { + uint8_t valid; + + dbgprintf(insn, "fixupReg()"); + + switch ((OperandEncoding)op->encoding) { + default: + unreachable("Expected a REG or R/M encoding in fixupReg"); + case ENCODING_REG: + insn->reg = (Reg)fixupRegValue(insn, + (OperandType)op->type, + insn->reg - insn->regBase, + &valid); + if (!valid) + return -1; + break; + case ENCODING_RM: + if (insn->eaBase >= insn->eaRegBase) { + insn->eaBase = (EABase)fixupRMValue(insn, + (OperandType)op->type, + insn->eaBase - insn->eaRegBase, + &valid); + if (!valid) + return -1; + } + break; + } + + return 0; +} + +/* + * readOpcodeModifier - Reads an operand from the opcode field of an + * instruction. Handles AddRegFrm instructions. + * + * @param insn - The instruction whose opcode field is to be read. + * @param inModRM - Indicates that the opcode field is to be read from the + * ModR/M extension; useful for escape opcodes + */ +static void readOpcodeModifier(struct InternalInstruction* insn) { + dbgprintf(insn, "readOpcodeModifier()"); + + if (insn->consumedOpcodeModifier) + return; + + insn->consumedOpcodeModifier = TRUE; + + switch(insn->spec->modifierType) { + default: + unreachable("Unknown modifier type."); + case MODIFIER_NONE: + unreachable("No modifier but an operand expects one."); + case MODIFIER_OPCODE: + insn->opcodeModifier = insn->opcode - insn->spec->modifierBase; + break; + case MODIFIER_MODRM: + insn->opcodeModifier = insn->modRM - insn->spec->modifierBase; + break; + } +} + +/* + * readOpcodeRegister - Reads an operand from the opcode field of an + * instruction and interprets it appropriately given the operand width. + * Handles AddRegFrm instructions. + * + * @param insn - See readOpcodeModifier(). + * @param size - The width (in bytes) of the register being specified. + * 1 means AL and friends, 2 means AX, 4 means EAX, and 8 means + * RAX. + */ +static void readOpcodeRegister(struct InternalInstruction* insn, uint8_t size) { + dbgprintf(insn, "readOpcodeRegister()"); + + readOpcodeModifier(insn); + + if (size == 0) + size = insn->registerSize; + + switch (size) { + case 1: + insn->opcodeRegister = (Reg)(MODRM_REG_AL + ((bFromREX(insn->rexPrefix) << 3) + | insn->opcodeModifier)); + if(insn->rexPrefix && + insn->opcodeRegister >= MODRM_REG_AL + 0x4 && + insn->opcodeRegister < MODRM_REG_AL + 0x8) { + insn->opcodeRegister = (Reg)(MODRM_REG_SPL + + (insn->opcodeRegister - MODRM_REG_AL - 4)); + } + + break; + case 2: + insn->opcodeRegister = (Reg)(MODRM_REG_AX + + ((bFromREX(insn->rexPrefix) << 3) + | insn->opcodeModifier)); + break; + case 4: + insn->opcodeRegister = (Reg)(MODRM_REG_EAX + + + ((bFromREX(insn->rexPrefix) << 3) + | insn->opcodeModifier)); + break; + case 8: + insn->opcodeRegister = (Reg)(MODRM_REG_RAX + + ((bFromREX(insn->rexPrefix) << 3) + | insn->opcodeModifier)); + break; + } +} + +/* + * readImmediate - Consumes an immediate operand from an instruction, given the + * desired operand size. + * + * @param insn - The instruction whose operand is to be read. + * @param size - The width (in bytes) of the operand. + * @return - 0 if the immediate was successfully consumed; nonzero + * otherwise. + */ +static int readImmediate(struct InternalInstruction* insn, uint8_t size) { + uint8_t imm8; + uint16_t imm16; + uint32_t imm32; + uint64_t imm64; + + dbgprintf(insn, "readImmediate()"); + + if (insn->numImmediatesConsumed == 2) + unreachable("Already consumed two immediates"); + + if (size == 0) + size = insn->immediateSize; + else + insn->immediateSize = size; + + switch (size) { + case 1: + if (consumeByte(insn, &imm8)) + return -1; + insn->immediates[insn->numImmediatesConsumed] = imm8; + break; + case 2: + if (consumeUInt16(insn, &imm16)) + return -1; + insn->immediates[insn->numImmediatesConsumed] = imm16; + break; + case 4: + if (consumeUInt32(insn, &imm32)) + return -1; + insn->immediates[insn->numImmediatesConsumed] = imm32; + break; + case 8: + if (consumeUInt64(insn, &imm64)) + return -1; + insn->immediates[insn->numImmediatesConsumed] = imm64; + break; + } + + insn->numImmediatesConsumed++; + + return 0; +} + +/* + * readOperands - Consults the specifier for an instruction and consumes all + * operands for that instruction, interpreting them as it goes. + * + * @param insn - The instruction whose operands are to be read and interpreted. + * @return - 0 if all operands could be read; nonzero otherwise. + */ +static int readOperands(struct InternalInstruction* insn) { + int index; + + dbgprintf(insn, "readOperands()"); + + for (index = 0; index < X86_MAX_OPERANDS; ++index) { + switch (insn->spec->operands[index].encoding) { + case ENCODING_NONE: + break; + case ENCODING_REG: + case ENCODING_RM: + if (readModRM(insn)) + return -1; + if (fixupReg(insn, &insn->spec->operands[index])) + return -1; + break; + case ENCODING_CB: + case ENCODING_CW: + case ENCODING_CD: + case ENCODING_CP: + case ENCODING_CO: + case ENCODING_CT: + dbgprintf(insn, "We currently don't hande code-offset encodings"); + return -1; + case ENCODING_IB: + if (readImmediate(insn, 1)) + return -1; + break; + case ENCODING_IW: + if (readImmediate(insn, 2)) + return -1; + break; + case ENCODING_ID: + if (readImmediate(insn, 4)) + return -1; + break; + case ENCODING_IO: + if (readImmediate(insn, 8)) + return -1; + break; + case ENCODING_Iv: + readImmediate(insn, insn->immediateSize); + break; + case ENCODING_Ia: + readImmediate(insn, insn->addressSize); + break; + case ENCODING_RB: + readOpcodeRegister(insn, 1); + break; + case ENCODING_RW: + readOpcodeRegister(insn, 2); + break; + case ENCODING_RD: + readOpcodeRegister(insn, 4); + break; + case ENCODING_RO: + readOpcodeRegister(insn, 8); + break; + case ENCODING_Rv: + readOpcodeRegister(insn, 0); + break; + case ENCODING_I: + readOpcodeModifier(insn); + break; + case ENCODING_DUP: + break; + default: + dbgprintf(insn, "Encountered an operand with an unknown encoding."); + return -1; + } + } + + return 0; +} + +/* + * decodeInstruction - Reads and interprets a full instruction provided by the + * user. + * + * @param insn - A pointer to the instruction to be populated. Must be + * pre-allocated. + * @param reader - The function to be used to read the instruction's bytes. + * @param readerArg - A generic argument to be passed to the reader to store + * any internal state. + * @param logger - If non-NULL, the function to be used to write log messages + * and warnings. + * @param loggerArg - A generic argument to be passed to the logger to store + * any internal state. + * @param startLoc - The address (in the reader's address space) of the first + * byte in the instruction. + * @param mode - The mode (real mode, IA-32e, or IA-32e in 64-bit mode) to + * decode the instruction in. + * @return - 0 if the instruction's memory could be read; nonzero if + * not. + */ +int decodeInstruction(struct InternalInstruction* insn, + byteReader_t reader, + void* readerArg, + dlog_t logger, + void* loggerArg, + uint64_t startLoc, + DisassemblerMode mode) { + memset(insn, 0, sizeof(struct InternalInstruction)); + + insn->reader = reader; + insn->readerArg = readerArg; + insn->dlog = logger; + insn->dlogArg = loggerArg; + insn->startLocation = startLoc; + insn->readerCursor = startLoc; + insn->mode = mode; + insn->numImmediatesConsumed = 0; + + if (readPrefixes(insn) || + readOpcode(insn) || + getID(insn) || + insn->instructionID == 0 || + readOperands(insn)) + return -1; + + insn->length = insn->readerCursor - insn->startLocation; + + dbgprintf(insn, "Read from 0x%llx to 0x%llx: length %llu", + startLoc, insn->readerCursor, insn->length); + + if (insn->length > 15) + dbgprintf(insn, "Instruction exceeds 15-byte limit"); + + return 0; +} diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h new file mode 100644 index 0000000..c03c07a --- /dev/null +++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h @@ -0,0 +1,515 @@ +/*===- X86DisassemblerDecoderInternal.h - Disassembler decoder -----*- C -*-==* + * + * The LLVM Compiler Infrastructure + * + * This file is distributed under the University of Illinois Open Source + * License. See LICENSE.TXT for details. + * + *===----------------------------------------------------------------------===* + * + * This file is part of the X86 Disassembler. + * It contains the public interface of the instruction decoder. + * Documentation for the disassembler can be found in X86Disassembler.h. + * + *===----------------------------------------------------------------------===*/ + +#ifndef X86DISASSEMBLERDECODER_H +#define X86DISASSEMBLERDECODER_H + +#ifdef __cplusplus +extern "C" { +#endif + +#define INSTRUCTION_SPECIFIER_FIELDS \ + const char* name; + +#define INSTRUCTION_IDS \ + InstrUID* instructionIDs; + +#include "X86DisassemblerDecoderCommon.h" + +#undef INSTRUCTION_SPECIFIER_FIELDS +#undef INSTRUCTION_IDS + +/* + * Accessor functions for various fields of an Intel instruction + */ +#define modFromModRM(modRM) ((modRM & 0xc0) >> 6) +#define regFromModRM(modRM) ((modRM & 0x38) >> 3) +#define rmFromModRM(modRM) (modRM & 0x7) +#define scaleFromSIB(sib) ((sib & 0xc0) >> 6) +#define indexFromSIB(sib) ((sib & 0x38) >> 3) +#define baseFromSIB(sib) (sib & 0x7) +#define wFromREX(rex) ((rex & 0x8) >> 3) +#define rFromREX(rex) ((rex & 0x4) >> 2) +#define xFromREX(rex) ((rex & 0x2) >> 1) +#define bFromREX(rex) (rex & 0x1) + +/* + * These enums represent Intel registers for use by the decoder. + */ + +#define REGS_8BIT \ + ENTRY(AL) \ + ENTRY(CL) \ + ENTRY(DL) \ + ENTRY(BL) \ + ENTRY(AH) \ + ENTRY(CH) \ + ENTRY(DH) \ + ENTRY(BH) \ + ENTRY(R8B) \ + ENTRY(R9B) \ + ENTRY(R10B) \ + ENTRY(R11B) \ + ENTRY(R12B) \ + ENTRY(R13B) \ + ENTRY(R14B) \ + ENTRY(R15B) \ + ENTRY(SPL) \ + ENTRY(BPL) \ + ENTRY(SIL) \ + ENTRY(DIL) + +#define EA_BASES_16BIT \ + ENTRY(BX_SI) \ + ENTRY(BX_DI) \ + ENTRY(BP_SI) \ + ENTRY(BP_DI) \ + ENTRY(SI) \ + ENTRY(DI) \ + ENTRY(BP) \ + ENTRY(BX) \ + ENTRY(R8W) \ + ENTRY(R9W) \ + ENTRY(R10W) \ + ENTRY(R11W) \ + ENTRY(R12W) \ + ENTRY(R13W) \ + ENTRY(R14W) \ + ENTRY(R15W) + +#define REGS_16BIT \ + ENTRY(AX) \ + ENTRY(CX) \ + ENTRY(DX) \ + ENTRY(BX) \ + ENTRY(SP) \ + ENTRY(BP) \ + ENTRY(SI) \ + ENTRY(DI) \ + ENTRY(R8W) \ + ENTRY(R9W) \ + ENTRY(R10W) \ + ENTRY(R11W) \ + ENTRY(R12W) \ + ENTRY(R13W) \ + ENTRY(R14W) \ + ENTRY(R15W) + +#define EA_BASES_32BIT \ + ENTRY(EAX) \ + ENTRY(ECX) \ + ENTRY(EDX) \ + ENTRY(EBX) \ + ENTRY(sib) \ + ENTRY(EBP) \ + ENTRY(ESI) \ + ENTRY(EDI) \ + ENTRY(R8D) \ + ENTRY(R9D) \ + ENTRY(R10D) \ + ENTRY(R11D) \ + ENTRY(R12D) \ + ENTRY(R13D) \ + ENTRY(R14D) \ + ENTRY(R15D) + +#define REGS_32BIT \ + ENTRY(EAX) \ + ENTRY(ECX) \ + ENTRY(EDX) \ + ENTRY(EBX) \ + ENTRY(ESP) \ + ENTRY(EBP) \ + ENTRY(ESI) \ + ENTRY(EDI) \ + ENTRY(R8D) \ + ENTRY(R9D) \ + ENTRY(R10D) \ + ENTRY(R11D) \ + ENTRY(R12D) \ + ENTRY(R13D) \ + ENTRY(R14D) \ + ENTRY(R15D) + +#define EA_BASES_64BIT \ + ENTRY(RAX) \ + ENTRY(RCX) \ + ENTRY(RDX) \ + ENTRY(RBX) \ + ENTRY(sib64) \ + ENTRY(RBP) \ + ENTRY(RSI) \ + ENTRY(RDI) \ + ENTRY(R8) \ + ENTRY(R9) \ + ENTRY(R10) \ + ENTRY(R11) \ + ENTRY(R12) \ + ENTRY(R13) \ + ENTRY(R14) \ + ENTRY(R15) + +#define REGS_64BIT \ + ENTRY(RAX) \ + ENTRY(RCX) \ + ENTRY(RDX) \ + ENTRY(RBX) \ + ENTRY(RSP) \ + ENTRY(RBP) \ + ENTRY(RSI) \ + ENTRY(RDI) \ + ENTRY(R8) \ + ENTRY(R9) \ + ENTRY(R10) \ + ENTRY(R11) \ + ENTRY(R12) \ + ENTRY(R13) \ + ENTRY(R14) \ + ENTRY(R15) + +#define REGS_MMX \ + ENTRY(MM0) \ + ENTRY(MM1) \ + ENTRY(MM2) \ + ENTRY(MM3) \ + ENTRY(MM4) \ + ENTRY(MM5) \ + ENTRY(MM6) \ + ENTRY(MM7) + +#define REGS_XMM \ + ENTRY(XMM0) \ + ENTRY(XMM1) \ + ENTRY(XMM2) \ + ENTRY(XMM3) \ + ENTRY(XMM4) \ + ENTRY(XMM5) \ + ENTRY(XMM6) \ + ENTRY(XMM7) \ + ENTRY(XMM8) \ + ENTRY(XMM9) \ + ENTRY(XMM10) \ + ENTRY(XMM11) \ + ENTRY(XMM12) \ + ENTRY(XMM13) \ + ENTRY(XMM14) \ + ENTRY(XMM15) + +#define REGS_SEGMENT \ + ENTRY(ES) \ + ENTRY(CS) \ + ENTRY(SS) \ + ENTRY(DS) \ + ENTRY(FS) \ + ENTRY(GS) + +#define REGS_DEBUG \ + ENTRY(DR0) \ + ENTRY(DR1) \ + ENTRY(DR2) \ + ENTRY(DR3) \ + ENTRY(DR4) \ + ENTRY(DR5) \ + ENTRY(DR6) \ + ENTRY(DR7) + +#define REGS_CONTROL_32BIT \ + ENTRY(ECR0) \ + ENTRY(ECR1) \ + ENTRY(ECR2) \ + ENTRY(ECR3) \ + ENTRY(ECR4) \ + ENTRY(ECR5) \ + ENTRY(ECR6) \ + ENTRY(ECR7) + +#define REGS_CONTROL_64BIT \ + ENTRY(RCR0) \ + ENTRY(RCR1) \ + ENTRY(RCR2) \ + ENTRY(RCR3) \ + ENTRY(RCR4) \ + ENTRY(RCR5) \ + ENTRY(RCR6) \ + ENTRY(RCR7) \ + ENTRY(RCR8) + +#define ALL_EA_BASES \ + EA_BASES_16BIT \ + EA_BASES_32BIT \ + EA_BASES_64BIT + +#define ALL_SIB_BASES \ + REGS_32BIT \ + REGS_64BIT + +#define ALL_REGS \ + REGS_8BIT \ + REGS_16BIT \ + REGS_32BIT \ + REGS_64BIT \ + REGS_MMX \ + REGS_XMM \ + REGS_SEGMENT \ + REGS_DEBUG \ + REGS_CONTROL_32BIT \ + REGS_CONTROL_64BIT \ + ENTRY(RIP) + +/* + * EABase - All possible values of the base field for effective-address + * computations, a.k.a. the Mod and R/M fields of the ModR/M byte. We + * distinguish between bases (EA_BASE_*) and registers that just happen to be + * referred to when Mod == 0b11 (EA_REG_*). + */ +typedef enum { + EA_BASE_NONE, +#define ENTRY(x) EA_BASE_##x, + ALL_EA_BASES +#undef ENTRY +#define ENTRY(x) EA_REG_##x, + ALL_REGS +#undef ENTRY + EA_max +} EABase; + +/* + * SIBIndex - All possible values of the SIB index field. + * Borrows entries from ALL_EA_BASES with the special case that + * sib is synonymous with NONE. + */ +typedef enum { + SIB_INDEX_NONE, +#define ENTRY(x) SIB_INDEX_##x, + ALL_EA_BASES +#undef ENTRY + SIB_INDEX_max +} SIBIndex; + +/* + * SIBBase - All possible values of the SIB base field. + */ +typedef enum { + SIB_BASE_NONE, +#define ENTRY(x) SIB_BASE_##x, + ALL_SIB_BASES +#undef ENTRY + SIB_BASE_max +} SIBBase; + +/* + * EADisplacement - Possible displacement types for effective-address + * computations. + */ +typedef enum { + EA_DISP_NONE, + EA_DISP_8, + EA_DISP_16, + EA_DISP_32 +} EADisplacement; + +/* + * Reg - All possible values of the reg field in the ModR/M byte. + */ +typedef enum { +#define ENTRY(x) MODRM_REG_##x, + ALL_REGS +#undef ENTRY + MODRM_REG_max +} Reg; + +/* + * SegmentOverride - All possible segment overrides. + */ +typedef enum { + SEG_OVERRIDE_NONE, + SEG_OVERRIDE_CS, + SEG_OVERRIDE_SS, + SEG_OVERRIDE_DS, + SEG_OVERRIDE_ES, + SEG_OVERRIDE_FS, + SEG_OVERRIDE_GS, + SEG_OVERRIDE_max +} SegmentOverride; + +typedef uint8_t BOOL; + +/* + * byteReader_t - Type for the byte reader that the consumer must provide to + * the decoder. Reads a single byte from the instruction's address space. + * @param arg - A baton that the consumer can associate with any internal + * state that it needs. + * @param byte - A pointer to a single byte in memory that should be set to + * contain the value at address. + * @param address - The address in the instruction's address space that should + * be read from. + * @return - -1 if the byte cannot be read for any reason; 0 otherwise. + */ +typedef int (*byteReader_t)(void* arg, uint8_t* byte, uint64_t address); + +/* + * dlog_t - Type for the logging function that the consumer can provide to + * get debugging output from the decoder. + * @param arg - A baton that the consumer can associate with any internal + * state that it needs. + * @param log - A string that contains the message. Will be reused after + * the logger returns. + */ +typedef void (*dlog_t)(void* arg, const char *log); + +/* + * The x86 internal instruction, which is produced by the decoder. + */ +struct InternalInstruction { + /* Reader interface (C) */ + byteReader_t reader; + /* Opaque value passed to the reader */ + void* readerArg; + /* The address of the next byte to read via the reader */ + uint64_t readerCursor; + + /* Logger interface (C) */ + dlog_t dlog; + /* Opaque value passed to the logger */ + void* dlogArg; + + /* General instruction information */ + + /* The mode to disassemble for (64-bit, protected, real) */ + DisassemblerMode mode; + /* The start of the instruction, usable with the reader */ + uint64_t startLocation; + /* The length of the instruction, in bytes */ + size_t length; + + /* Prefix state */ + + /* 1 if the prefix byte corresponding to the entry is present; 0 if not */ + uint8_t prefixPresent[0x100]; + /* contains the location (for use with the reader) of the prefix byte */ + uint64_t prefixLocations[0x100]; + /* The value of the REX prefix, if present */ + uint8_t rexPrefix; + /* The location of the REX prefix */ + uint64_t rexLocation; + /* The location where a mandatory prefix would have to be (i.e., right before + the opcode, or right before the REX prefix if one is present) */ + uint64_t necessaryPrefixLocation; + /* The segment override type */ + SegmentOverride segmentOverride; + + /* Sizes of various critical pieces of data */ + uint8_t registerSize; + uint8_t addressSize; + uint8_t displacementSize; + uint8_t immediateSize; + + /* opcode state */ + + /* The value of the two-byte escape prefix (usually 0x0f) */ + uint8_t twoByteEscape; + /* The value of the three-byte escape prefix (usually 0x38 or 0x3a) */ + uint8_t threeByteEscape; + /* The last byte of the opcode, not counting any ModR/M extension */ + uint8_t opcode; + /* The ModR/M byte of the instruction, if it is an opcode extension */ + uint8_t modRMExtension; + + /* decode state */ + + /* The type of opcode, used for indexing into the array of decode tables */ + OpcodeType opcodeType; + /* The instruction ID, extracted from the decode table */ + uint16_t instructionID; + /* The specifier for the instruction, from the instruction info table */ + struct InstructionSpecifier* spec; + + /* state for additional bytes, consumed during operand decode. Pattern: + consumed___ indicates that the byte was already consumed and does not + need to be consumed again */ + + /* The ModR/M byte, which contains most register operands and some portion of + all memory operands */ + BOOL consumedModRM; + uint8_t modRM; + + /* The SIB byte, used for more complex 32- or 64-bit memory operands */ + BOOL consumedSIB; + uint8_t sib; + + /* The displacement, used for memory operands */ + BOOL consumedDisplacement; + int32_t displacement; + + /* Immediates. There can be two in some cases */ + uint8_t numImmediatesConsumed; + uint8_t numImmediatesTranslated; + uint64_t immediates[2]; + + /* A register or immediate operand encoded into the opcode */ + BOOL consumedOpcodeModifier; + uint8_t opcodeModifier; + Reg opcodeRegister; + + /* Portions of the ModR/M byte */ + + /* These fields determine the allowable values for the ModR/M fields, which + depend on operand and address widths */ + EABase eaBaseBase; + EABase eaRegBase; + Reg regBase; + + /* The Mod and R/M fields can encode a base for an effective address, or a + register. These are separated into two fields here */ + EABase eaBase; + EADisplacement eaDisplacement; + /* The reg field always encodes a register */ + Reg reg; + + /* SIB state */ + SIBIndex sibIndex; + uint8_t sibScale; + SIBBase sibBase; +}; + +/* decodeInstruction - Decode one instruction and store the decoding results in + * a buffer provided by the consumer. + * @param insn - The buffer to store the instruction in. Allocated by the + * consumer. + * @param reader - The byteReader_t for the bytes to be read. + * @param readerArg - An argument to pass to the reader for storing context + * specific to the consumer. May be NULL. + * @param logger - The dlog_t to be used in printing status messages from the + * disassembler. May be NULL. + * @param loggerArg - An argument to pass to the logger for storing context + * specific to the logger. May be NULL. + * @param startLoc - The address (in the reader's address space) of the first + * byte in the instruction. + * @param mode - The mode (16-bit, 32-bit, 64-bit) to decode in. + * @return - Nonzero if there was an error during decode, 0 otherwise. + */ +int decodeInstruction(struct InternalInstruction* insn, + byteReader_t reader, + void* readerArg, + dlog_t logger, + void* loggerArg, + uint64_t startLoc, + DisassemblerMode mode); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h new file mode 100644 index 0000000..c213f89 --- /dev/null +++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h @@ -0,0 +1,355 @@ +/*===- X86DisassemblerDecoderCommon.h - Disassembler decoder -------*- C -*-==* + * + * The LLVM Compiler Infrastructure + * + * This file is distributed under the University of Illinois Open Source + * License. See LICENSE.TXT for details. + * + *===----------------------------------------------------------------------===* + * + * This file is part of the X86 Disassembler. + * It contains common definitions used by both the disassembler and the table + * generator. + * Documentation for the disassembler can be found in X86Disassembler.h. + * + *===----------------------------------------------------------------------===*/ + +/* + * This header file provides those definitions that need to be shared between + * the decoder and the table generator in a C-friendly manner. + */ + +#ifndef X86DISASSEMBLERDECODERCOMMON_H +#define X86DISASSEMBLERDECODERCOMMON_H + +#include "llvm/System/DataTypes.h" + +#define INSTRUCTIONS_SYM x86DisassemblerInstrSpecifiers +#define CONTEXTS_SYM x86DisassemblerContexts +#define ONEBYTE_SYM x86DisassemblerOneByteOpcodes +#define TWOBYTE_SYM x86DisassemblerTwoByteOpcodes +#define THREEBYTE38_SYM x86DisassemblerThreeByte38Opcodes +#define THREEBYTE3A_SYM x86DisassemblerThreeByte3AOpcodes + +#define INSTRUCTIONS_STR "x86DisassemblerInstrSpecifiers" +#define CONTEXTS_STR "x86DisassemblerContexts" +#define ONEBYTE_STR "x86DisassemblerOneByteOpcodes" +#define TWOBYTE_STR "x86DisassemblerTwoByteOpcodes" +#define THREEBYTE38_STR "x86DisassemblerThreeByte38Opcodes" +#define THREEBYTE3A_STR "x86DisassemblerThreeByte3AOpcodes" + +/* + * Attributes of an instruction that must be known before the opcode can be + * processed correctly. Most of these indicate the presence of particular + * prefixes, but ATTR_64BIT is simply an attribute of the decoding context. + */ +#define ATTRIBUTE_BITS \ + ENUM_ENTRY(ATTR_NONE, 0x00) \ + ENUM_ENTRY(ATTR_64BIT, 0x01) \ + ENUM_ENTRY(ATTR_XS, 0x02) \ + ENUM_ENTRY(ATTR_XD, 0x04) \ + ENUM_ENTRY(ATTR_REXW, 0x08) \ + ENUM_ENTRY(ATTR_OPSIZE, 0x10) + +#define ENUM_ENTRY(n, v) n = v, +enum attributeBits { + ATTRIBUTE_BITS + ATTR_max +}; +#undef ENUM_ENTRY + +/* + * Combinations of the above attributes that are relevant to instruction + * decode. Although other combinations are possible, they can be reduced to + * these without affecting the ultimately decoded instruction. + */ + +/* Class name Rank Rationale for rank assignment */ +#define INSTRUCTION_CONTEXTS \ + ENUM_ENTRY(IC, 0, "says nothing about the instruction") \ + ENUM_ENTRY(IC_64BIT, 1, "says the instruction applies in " \ + "64-bit mode but no more") \ + ENUM_ENTRY(IC_OPSIZE, 3, "requires an OPSIZE prefix, so " \ + "operands change width") \ + ENUM_ENTRY(IC_XD, 2, "may say something about the opcode " \ + "but not the operands") \ + ENUM_ENTRY(IC_XS, 2, "may say something about the opcode " \ + "but not the operands") \ + ENUM_ENTRY(IC_64BIT_REXW, 4, "requires a REX.W prefix, so operands "\ + "change width; overrides IC_OPSIZE") \ + ENUM_ENTRY(IC_64BIT_OPSIZE, 3, "Just as meaningful as IC_OPSIZE") \ + ENUM_ENTRY(IC_64BIT_XD, 5, "XD instructions are SSE; REX.W is " \ + "secondary") \ + ENUM_ENTRY(IC_64BIT_XS, 5, "Just as meaningful as IC_64BIT_XD") \ + ENUM_ENTRY(IC_64BIT_REXW_XS, 6, "OPSIZE could mean a different " \ + "opcode") \ + ENUM_ENTRY(IC_64BIT_REXW_XD, 6, "Just as meaningful as " \ + "IC_64BIT_REXW_XS") \ + ENUM_ENTRY(IC_64BIT_REXW_OPSIZE, 7, "The Dynamic Duo! Prefer over all " \ + "else because this changes most " \ + "operands' meaning") + +#define ENUM_ENTRY(n, r, d) n, +typedef enum { + INSTRUCTION_CONTEXTS + IC_max +} InstructionContext; +#undef ENUM_ENTRY + +/* + * Opcode types, which determine which decode table to use, both in the Intel + * manual and also for the decoder. + */ +typedef enum { + ONEBYTE = 0, + TWOBYTE = 1, + THREEBYTE_38 = 2, + THREEBYTE_3A = 3 +} OpcodeType; + +/* + * The following structs are used for the hierarchical decode table. After + * determining the instruction's class (i.e., which IC_* constant applies to + * it), the decoder reads the opcode. Some instructions require specific + * values of the ModR/M byte, so the ModR/M byte indexes into the final table. + * + * If a ModR/M byte is not required, "required" is left unset, and the values + * for each instructionID are identical. + */ + +typedef uint16_t InstrUID; + +/* + * ModRMDecisionType - describes the type of ModR/M decision, allowing the + * consumer to determine the number of entries in it. + * + * MODRM_ONEENTRY - No matter what the value of the ModR/M byte is, the decoded + * instruction is the same. + * MODRM_SPLITRM - If the ModR/M byte is between 0x00 and 0xbf, the opcode + * corresponds to one instruction; otherwise, it corresponds to + * a different instruction. + * MODRM_FULL - Potentially, each value of the ModR/M byte could correspond + * to a different instruction. + */ + +#define MODRMTYPES \ + ENUM_ENTRY(MODRM_ONEENTRY) \ + ENUM_ENTRY(MODRM_SPLITRM) \ + ENUM_ENTRY(MODRM_FULL) + +#define ENUM_ENTRY(n) n, +typedef enum { + MODRMTYPES + MODRM_max +} ModRMDecisionType; +#undef ENUM_ENTRY + +/* + * ModRMDecision - Specifies whether a ModR/M byte is needed and (if so) which + * instruction each possible value of the ModR/M byte corresponds to. Once + * this information is known, we have narrowed down to a single instruction. + */ +struct ModRMDecision { + uint8_t modrm_type; + + /* The macro below must be defined wherever this file is included. */ + INSTRUCTION_IDS +}; + +/* + * OpcodeDecision - Specifies which set of ModR/M->instruction tables to look at + * given a particular opcode. + */ +struct OpcodeDecision { + struct ModRMDecision modRMDecisions[256]; +}; + +/* + * ContextDecision - Specifies which opcode->instruction tables to look at given + * a particular context (set of attributes). Since there are many possible + * contexts, the decoder first uses CONTEXTS_SYM to determine which context + * applies given a specific set of attributes. Hence there are only IC_max + * entries in this table, rather than 2^(ATTR_max). + */ +struct ContextDecision { + struct OpcodeDecision opcodeDecisions[IC_max]; +}; + +/* + * Physical encodings of instruction operands. + */ + +#define ENCODINGS \ + ENUM_ENTRY(ENCODING_NONE, "") \ + ENUM_ENTRY(ENCODING_REG, "Register operand in ModR/M byte.") \ + ENUM_ENTRY(ENCODING_RM, "R/M operand in ModR/M byte.") \ + ENUM_ENTRY(ENCODING_CB, "1-byte code offset (possible new CS value)") \ + ENUM_ENTRY(ENCODING_CW, "2-byte") \ + ENUM_ENTRY(ENCODING_CD, "4-byte") \ + ENUM_ENTRY(ENCODING_CP, "6-byte") \ + ENUM_ENTRY(ENCODING_CO, "8-byte") \ + ENUM_ENTRY(ENCODING_CT, "10-byte") \ + ENUM_ENTRY(ENCODING_IB, "1-byte immediate") \ + ENUM_ENTRY(ENCODING_IW, "2-byte") \ + ENUM_ENTRY(ENCODING_ID, "4-byte") \ + ENUM_ENTRY(ENCODING_IO, "8-byte") \ + ENUM_ENTRY(ENCODING_RB, "(AL..DIL, R8L..R15L) Register code added to " \ + "the opcode byte") \ + ENUM_ENTRY(ENCODING_RW, "(AX..DI, R8W..R15W)") \ + ENUM_ENTRY(ENCODING_RD, "(EAX..EDI, R8D..R15D)") \ + ENUM_ENTRY(ENCODING_RO, "(RAX..RDI, R8..R15)") \ + ENUM_ENTRY(ENCODING_I, "Position on floating-point stack added to the " \ + "opcode byte") \ + \ + ENUM_ENTRY(ENCODING_Iv, "Immediate of operand size") \ + ENUM_ENTRY(ENCODING_Ia, "Immediate of address size") \ + ENUM_ENTRY(ENCODING_Rv, "Register code of operand size added to the " \ + "opcode byte") \ + ENUM_ENTRY(ENCODING_DUP, "Duplicate of another operand; ID is encoded " \ + "in type") + +#define ENUM_ENTRY(n, d) n, + typedef enum { + ENCODINGS + ENCODING_max + } OperandEncoding; +#undef ENUM_ENTRY + +/* + * Semantic interpretations of instruction operands. + */ + +#define TYPES \ + ENUM_ENTRY(TYPE_NONE, "") \ + ENUM_ENTRY(TYPE_REL8, "1-byte immediate address") \ + ENUM_ENTRY(TYPE_REL16, "2-byte") \ + ENUM_ENTRY(TYPE_REL32, "4-byte") \ + ENUM_ENTRY(TYPE_REL64, "8-byte") \ + ENUM_ENTRY(TYPE_PTR1616, "2+2-byte segment+offset address") \ + ENUM_ENTRY(TYPE_PTR1632, "2+4-byte") \ + ENUM_ENTRY(TYPE_PTR1664, "2+8-byte") \ + ENUM_ENTRY(TYPE_R8, "1-byte register operand") \ + ENUM_ENTRY(TYPE_R16, "2-byte") \ + ENUM_ENTRY(TYPE_R32, "4-byte") \ + ENUM_ENTRY(TYPE_R64, "8-byte") \ + ENUM_ENTRY(TYPE_IMM8, "1-byte immediate operand") \ + ENUM_ENTRY(TYPE_IMM16, "2-byte") \ + ENUM_ENTRY(TYPE_IMM32, "4-byte") \ + ENUM_ENTRY(TYPE_IMM64, "8-byte") \ + ENUM_ENTRY(TYPE_RM8, "1-byte register or memory operand") \ + ENUM_ENTRY(TYPE_RM16, "2-byte") \ + ENUM_ENTRY(TYPE_RM32, "4-byte") \ + ENUM_ENTRY(TYPE_RM64, "8-byte") \ + ENUM_ENTRY(TYPE_M, "Memory operand") \ + ENUM_ENTRY(TYPE_M8, "1-byte") \ + ENUM_ENTRY(TYPE_M16, "2-byte") \ + ENUM_ENTRY(TYPE_M32, "4-byte") \ + ENUM_ENTRY(TYPE_M64, "8-byte") \ + ENUM_ENTRY(TYPE_LEA, "Effective address") \ + ENUM_ENTRY(TYPE_M128, "16-byte (SSE/SSE2)") \ + ENUM_ENTRY(TYPE_M1616, "2+2-byte segment+offset address") \ + ENUM_ENTRY(TYPE_M1632, "2+4-byte") \ + ENUM_ENTRY(TYPE_M1664, "2+8-byte") \ + ENUM_ENTRY(TYPE_M16_32, "2+4-byte two-part memory operand (LIDT, LGDT)") \ + ENUM_ENTRY(TYPE_M16_16, "2+2-byte (BOUND)") \ + ENUM_ENTRY(TYPE_M32_32, "4+4-byte (BOUND)") \ + ENUM_ENTRY(TYPE_M16_64, "2+8-byte (LIDT, LGDT)") \ + ENUM_ENTRY(TYPE_MOFFS8, "1-byte memory offset (relative to segment " \ + "base)") \ + ENUM_ENTRY(TYPE_MOFFS16, "2-byte") \ + ENUM_ENTRY(TYPE_MOFFS32, "4-byte") \ + ENUM_ENTRY(TYPE_MOFFS64, "8-byte") \ + ENUM_ENTRY(TYPE_SREG, "Byte with single bit set: 0 = ES, 1 = CS, " \ + "2 = SS, 3 = DS, 4 = FS, 5 = GS") \ + ENUM_ENTRY(TYPE_M32FP, "32-bit IEE754 memory floating-point operand") \ + ENUM_ENTRY(TYPE_M64FP, "64-bit") \ + ENUM_ENTRY(TYPE_M80FP, "80-bit extended") \ + ENUM_ENTRY(TYPE_M16INT, "2-byte memory integer operand for use in " \ + "floating-point instructions") \ + ENUM_ENTRY(TYPE_M32INT, "4-byte") \ + ENUM_ENTRY(TYPE_M64INT, "8-byte") \ + ENUM_ENTRY(TYPE_ST, "Position on the floating-point stack") \ + ENUM_ENTRY(TYPE_MM, "MMX register operand") \ + ENUM_ENTRY(TYPE_MM32, "4-byte MMX register or memory operand") \ + ENUM_ENTRY(TYPE_MM64, "8-byte") \ + ENUM_ENTRY(TYPE_XMM, "XMM register operand") \ + ENUM_ENTRY(TYPE_XMM32, "4-byte XMM register or memory operand") \ + ENUM_ENTRY(TYPE_XMM64, "8-byte") \ + ENUM_ENTRY(TYPE_XMM128, "16-byte") \ + ENUM_ENTRY(TYPE_XMM0, "Implicit use of XMM0") \ + ENUM_ENTRY(TYPE_SEGMENTREG, "Segment register operand") \ + ENUM_ENTRY(TYPE_DEBUGREG, "Debug register operand") \ + ENUM_ENTRY(TYPE_CR32, "4-byte control register operand") \ + ENUM_ENTRY(TYPE_CR64, "8-byte") \ + \ + ENUM_ENTRY(TYPE_Mv, "Memory operand of operand size") \ + ENUM_ENTRY(TYPE_Rv, "Register operand of operand size") \ + ENUM_ENTRY(TYPE_IMMv, "Immediate operand of operand size") \ + ENUM_ENTRY(TYPE_RELv, "Immediate address of operand size") \ + ENUM_ENTRY(TYPE_DUP0, "Duplicate of operand 0") \ + ENUM_ENTRY(TYPE_DUP1, "operand 1") \ + ENUM_ENTRY(TYPE_DUP2, "operand 2") \ + ENUM_ENTRY(TYPE_DUP3, "operand 3") \ + ENUM_ENTRY(TYPE_DUP4, "operand 4") \ + ENUM_ENTRY(TYPE_M512, "512-bit FPU/MMX/XMM/MXCSR state") + +#define ENUM_ENTRY(n, d) n, +typedef enum { + TYPES + TYPE_max +} OperandType; +#undef ENUM_ENTRY + +/* + * OperandSpecifier - The specification for how to extract and interpret one + * operand. + */ +struct OperandSpecifier { + OperandEncoding encoding; + OperandType type; +}; + +/* + * Indicates where the opcode modifier (if any) is to be found. Extended + * opcodes with AddRegFrm have the opcode modifier in the ModR/M byte. + */ + +#define MODIFIER_TYPES \ + ENUM_ENTRY(MODIFIER_NONE) \ + ENUM_ENTRY(MODIFIER_OPCODE) \ + ENUM_ENTRY(MODIFIER_MODRM) + +#define ENUM_ENTRY(n) n, +typedef enum { + MODIFIER_TYPES + MODIFIER_max +} ModifierType; +#undef ENUM_ENTRY + +#define X86_MAX_OPERANDS 5 + +/* + * The specification for how to extract and interpret a full instruction and + * its operands. + */ +struct InstructionSpecifier { + ModifierType modifierType; + uint8_t modifierBase; + struct OperandSpecifier operands[X86_MAX_OPERANDS]; + + /* The macro below must be defined wherever this file is included. */ + INSTRUCTION_SPECIFIER_FIELDS +}; + +/* + * Decoding mode for the Intel disassembler. 16-bit, 32-bit, and 64-bit mode + * are supported, and represent real mode, IA-32e, and IA-32e in 64-bit mode, + * respectively. + */ +typedef enum { + MODE_16BIT, + MODE_32BIT, + MODE_64BIT +} DisassemblerMode; + +#endif diff --git a/lib/Target/X86/Makefile b/lib/Target/X86/Makefile index b311a6e..6098dbf 100644 --- a/lib/Target/X86/Makefile +++ b/lib/Target/X86/Makefile @@ -15,8 +15,8 @@ BUILT_SOURCES = X86GenRegisterInfo.h.inc X86GenRegisterNames.inc \ X86GenRegisterInfo.inc X86GenInstrNames.inc \ X86GenInstrInfo.inc X86GenAsmWriter.inc X86GenAsmMatcher.inc \ X86GenAsmWriter1.inc X86GenDAGISel.inc \ - X86GenFastISel.inc \ - X86GenCallingConv.inc X86GenSubtarget.inc + X86GenDisassemblerTables.inc X86GenFastISel.inc \ + X86GenCallingConv.inc X86GenSubtarget.inc \ DIRS = AsmPrinter AsmParser Disassembler TargetInfo diff --git a/lib/Target/X86/README.txt b/lib/Target/X86/README.txt index 9b7aab8..afd9f53 100644 --- a/lib/Target/X86/README.txt +++ b/lib/Target/X86/README.txt @@ -123,20 +123,6 @@ when it can invert the result of the compare for free. //===---------------------------------------------------------------------===// -How about intrinsics? An example is: - *res = _mm_mulhi_epu16(*A, _mm_mul_epu32(*B, *C)); - -compiles to - pmuludq (%eax), %xmm0 - movl 8(%esp), %eax - movdqa (%eax), %xmm1 - pmulhuw %xmm0, %xmm1 - -The transformation probably requires a X86 specific pass or a DAG combiner -target specific hook. - -//===---------------------------------------------------------------------===// - In many cases, LLVM generates code like this: _test: @@ -1762,6 +1748,11 @@ LBB1_1: ## bb1 cmpl $150, %edi jne LBB1_1 ## bb1 +The issue is that we hoist the cast of "scaler" to long long outside of the +loop, the value comes into the loop as two values, and +RegsForValue::getCopyFromRegs doesn't know how to put an AssertSext on the +constructed BUILD_PAIR which represents the cast value. + //===---------------------------------------------------------------------===// Test instructions can be eliminated by using EFLAGS values from arithmetic diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td index da467fe..a6e1ca3 100644 --- a/lib/Target/X86/X86.td +++ b/lib/Target/X86/X86.td @@ -63,7 +63,7 @@ def FeatureSSE4A : SubtargetFeature<"sse4a", "HasSSE4A", "true", def FeatureAVX : SubtargetFeature<"avx", "HasAVX", "true", "Enable AVX instructions">; def FeatureFMA3 : SubtargetFeature<"fma3", "HasFMA3", "true", - "Enable three-operand fused multiple-add">; + "Enable three-operand fused multiple-add">; def FeatureFMA4 : SubtargetFeature<"fma4", "HasFMA4", "true", "Enable four-operand fused multiple-add">; diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index a9a78be..cb82383 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -50,9 +50,6 @@ #include "llvm/ADT/Statistic.h" using namespace llvm; -#include "llvm/Support/CommandLine.h" -static cl::opt<bool> AvoidDupAddrCompute("x86-avoid-dup-address", cl::Hidden); - STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor"); //===----------------------------------------------------------------------===// @@ -1275,28 +1272,7 @@ bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base, SDValue &Scale, SDValue &Index, SDValue &Disp, SDValue &Segment) { X86ISelAddressMode AM; - bool Done = false; - if (AvoidDupAddrCompute && !N.hasOneUse()) { - unsigned Opcode = N.getOpcode(); - if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex && - Opcode != X86ISD::Wrapper && Opcode != X86ISD::WrapperRIP) { - // If we are able to fold N into addressing mode, then we'll allow it even - // if N has multiple uses. In general, addressing computation is used as - // addresses by all of its uses. But watch out for CopyToReg uses, that - // means the address computation is liveout. It will be computed by a LEA - // so we want to avoid computing the address twice. - for (SDNode::use_iterator UI = N.getNode()->use_begin(), - UE = N.getNode()->use_end(); UI != UE; ++UI) { - if (UI->getOpcode() == ISD::CopyToReg) { - MatchAddressBase(N, AM); - Done = true; - break; - } - } - } - } - - if (!Done && MatchAddress(N, AM)) + if (MatchAddress(N, AM)) return false; EVT VT = N.getValueType(); @@ -1891,27 +1867,28 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) { } } - unsigned LoReg, HiReg; + unsigned LoReg, HiReg, ClrReg; unsigned ClrOpcode, SExtOpcode; + EVT ClrVT = NVT; switch (NVT.getSimpleVT().SimpleTy) { default: llvm_unreachable("Unsupported VT!"); case MVT::i8: - LoReg = X86::AL; HiReg = X86::AH; + LoReg = X86::AL; ClrReg = HiReg = X86::AH; ClrOpcode = 0; SExtOpcode = X86::CBW; break; case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; - ClrOpcode = X86::MOV16r0; + ClrOpcode = X86::MOV32r0; ClrReg = X86::EDX; ClrVT = MVT::i32; SExtOpcode = X86::CWD; break; case MVT::i32: - LoReg = X86::EAX; HiReg = X86::EDX; + LoReg = X86::EAX; ClrReg = HiReg = X86::EDX; ClrOpcode = X86::MOV32r0; SExtOpcode = X86::CDQ; break; case MVT::i64: - LoReg = X86::RAX; HiReg = X86::RDX; + LoReg = X86::RAX; ClrReg = HiReg = X86::RDX; ClrOpcode = ~0U; // NOT USED. SExtOpcode = X86::CQO; break; @@ -1966,10 +1943,10 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) { MVT::i64, Zero, ClrNode, SubRegNo), 0); } else { - ClrNode = SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0); + ClrNode = SDValue(CurDAG->getMachineNode(ClrOpcode, dl, ClrVT), 0); } - InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, HiReg, + InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg, ClrNode, InFlag).getValue(1); } } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 0517b56..c722fbf 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -980,6 +980,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setTargetDAGCombine(ISD::SRL); setTargetDAGCombine(ISD::STORE); setTargetDAGCombine(ISD::MEMBARRIER); + setTargetDAGCombine(ISD::ZERO_EXTEND); if (Subtarget->is64Bit()) setTargetDAGCombine(ISD::MUL); @@ -4583,7 +4584,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { MVT::v4i32, Vec), Op.getOperand(1))); // Transform it so it match pextrw which produces a 32-bit result. - EVT EltVT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy+1); + EVT EltVT = MVT::i32; SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, Op.getOperand(0), Op.getOperand(1)); SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, @@ -5127,12 +5128,9 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); else Tys = DAG.getVTList(Op.getValueType(), MVT::Other); - SmallVector<SDValue, 8> Ops; - Ops.push_back(Chain); - Ops.push_back(StackSlot); - Ops.push_back(DAG.getValueType(SrcVT)); + SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; SDValue Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, dl, - Tys, &Ops[0], Ops.size()); + Tys, Ops, array_lengthof(Ops)); if (useSSE) { Chain = Result.getValue(1); @@ -5145,13 +5143,10 @@ SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8, false); SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); Tys = DAG.getVTList(MVT::Other); - SmallVector<SDValue, 8> Ops; - Ops.push_back(Chain); - Ops.push_back(Result); - Ops.push_back(StackSlot); - Ops.push_back(DAG.getValueType(Op.getValueType())); - Ops.push_back(InFlag); - Chain = DAG.getNode(X86ISD::FST, dl, Tys, &Ops[0], Ops.size()); + SDValue Ops[] = { + Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag + }; + Chain = DAG.getNode(X86ISD::FST, dl, Tys, Ops, array_lengthof(Ops)); Result = DAG.getLoad(Op.getValueType(), dl, Chain, StackSlot, PseudoSourceValue::getFixedStack(SSFI), 0); } @@ -5752,14 +5747,11 @@ SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) { SDValue Cond = EmitCmp(Op0, Op1, X86CC, DAG); // Use sbb x, x to materialize carry bit into a GPR. - // FIXME: Temporarily disabled since it breaks self-hosting. It's apparently - // miscompiling ARMISelDAGToDAG.cpp. - if (0 && !isFP && X86CC == X86::COND_B) { + if (X86CC == X86::COND_B) return DAG.getNode(ISD::AND, dl, MVT::i8, DAG.getNode(X86ISD::SETCC_CARRY, dl, MVT::i8, DAG.getConstant(X86CC, MVT::i8), Cond), DAG.getConstant(1, MVT::i8)); - } return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, DAG.getConstant(X86CC, MVT::i8), Cond); @@ -5949,14 +5941,10 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) { } SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Flag); - SmallVector<SDValue, 4> Ops; // X86ISD::CMOV means set the result (which is operand 1) to the RHS if // condition is true. - Ops.push_back(Op.getOperand(2)); - Ops.push_back(Op.getOperand(1)); - Ops.push_back(CC); - Ops.push_back(Cond); - return DAG.getNode(X86ISD::CMOV, dl, VTs, &Ops[0], Ops.size()); + SDValue Ops[] = { Op.getOperand(2), Op.getOperand(1), CC, Cond }; + return DAG.getNode(X86ISD::CMOV, dl, VTs, Ops, array_lengthof(Ops)); } // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or @@ -6196,7 +6184,8 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()), false, false, false, false, 0, CallingConv::C, false, /*isReturnValueUsed=*/false, - DAG.getExternalSymbol(bzeroEntry, IntPtr), Args, DAG, dl); + DAG.getExternalSymbol(bzeroEntry, IntPtr), Args, DAG, dl, + DAG.GetOrdering(Chain.getNode())); return CallResult.second; } @@ -6266,11 +6255,8 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, InFlag = Chain.getValue(1); SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); - SmallVector<SDValue, 8> Ops; - Ops.push_back(Chain); - Ops.push_back(DAG.getValueType(AVT)); - Ops.push_back(InFlag); - Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, &Ops[0], Ops.size()); + SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag }; + Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops, array_lengthof(Ops)); if (TwoRepStos) { InFlag = Chain.getValue(1); @@ -6283,11 +6269,8 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, Left, InFlag); InFlag = Chain.getValue(1); Tys = DAG.getVTList(MVT::Other, MVT::Flag); - Ops.clear(); - Ops.push_back(Chain); - Ops.push_back(DAG.getValueType(MVT::i8)); - Ops.push_back(InFlag); - Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, &Ops[0], Ops.size()); + SDValue Ops[] = { Chain, DAG.getValueType(MVT::i8), InFlag }; + Chain = DAG.getNode(X86ISD::REP_STOS, dl, Tys, Ops, array_lengthof(Ops)); } else if (BytesLeft) { // Handle the last 1 - 7 bytes. unsigned Offset = SizeVal - BytesLeft; @@ -6351,11 +6334,9 @@ X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, InFlag = Chain.getValue(1); SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); - SmallVector<SDValue, 8> Ops; - Ops.push_back(Chain); - Ops.push_back(DAG.getValueType(AVT)); - Ops.push_back(InFlag); - SDValue RepMovs = DAG.getNode(X86ISD::REP_MOVS, dl, Tys, &Ops[0], Ops.size()); + SDValue Ops[] = { Chain, DAG.getValueType(AVT), InFlag }; + SDValue RepMovs = DAG.getNode(X86ISD::REP_MOVS, dl, Tys, Ops, + array_lengthof(Ops)); SmallVector<SDValue, 4> Results; Results.push_back(RepMovs); @@ -6979,12 +6960,13 @@ SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) { Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); // If src is zero (i.e. bsr sets ZF), returns NumBits. - SmallVector<SDValue, 4> Ops; - Ops.push_back(Op); - Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT)); - Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); - Ops.push_back(Op.getValue(1)); - Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, &Ops[0], 4); + SDValue Ops[] = { + Op, + DAG.getConstant(NumBits+NumBits-1, OpVT), + DAG.getConstant(X86::COND_E, MVT::i8), + Op.getValue(1) + }; + Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); // Finally xor with NumBits-1. Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT)); @@ -7011,12 +6993,13 @@ SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) { Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op); // If src is zero (i.e. bsf sets ZF), returns NumBits. - SmallVector<SDValue, 4> Ops; - Ops.push_back(Op); - Ops.push_back(DAG.getConstant(NumBits, OpVT)); - Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); - Ops.push_back(Op.getValue(1)); - Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, &Ops[0], 4); + SDValue Ops[] = { + Op, + DAG.getConstant(NumBits, OpVT), + DAG.getConstant(X86::COND_E, MVT::i8), + Op.getValue(1) + }; + Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops, array_lengthof(Ops)); if (VT == MVT::i8) Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); @@ -9349,6 +9332,32 @@ static SDValue PerformMEMBARRIERCombine(SDNode* N, SelectionDAG &DAG) { } } +static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG) { + // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> + // (and (i32 x86isd::setcc_carry), 1) + // This eliminates the zext. This transformation is necessary because + // ISD::SETCC is always legalized to i8. + DebugLoc dl = N->getDebugLoc(); + SDValue N0 = N->getOperand(0); + EVT VT = N->getValueType(0); + if (N0.getOpcode() == ISD::AND && + N0.hasOneUse() && + N0.getOperand(0).hasOneUse()) { + SDValue N00 = N0.getOperand(0); + if (N00.getOpcode() != X86ISD::SETCC_CARRY) + return SDValue(); + ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); + if (!C || C->getZExtValue() != 1) + return SDValue(); + return DAG.getNode(ISD::AND, dl, VT, + DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, + N00.getOperand(0), N00.getOperand(1)), + DAG.getConstant(1, VT)); + } + + return SDValue(); +} + SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; @@ -9368,6 +9377,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); case ISD::MEMBARRIER: return PerformMEMBARRIERCombine(N, DAG); + case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG); } return SDValue(); diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td index b6a2c05..65fbbda 100644 --- a/lib/Target/X86/X86Instr64bit.td +++ b/lib/Target/X86/X86Instr64bit.td @@ -111,6 +111,9 @@ def ADJCALLSTACKUP64 : I<0, Pseudo, (outs), (ins i32imm:$amt1, i32imm:$amt2), Requires<[In64BitMode]>; } +// Interrupt Instructions +def IRET64 : RI<0xcf, RawFrm, (outs), (ins), "iret{q}", []>; + //===----------------------------------------------------------------------===// // Call Instructions... // @@ -131,20 +134,21 @@ let isCall = 1 in // the 32-bit pcrel field that we have. def CALL64pcrel32 : Ii32<0xE8, RawFrm, (outs), (ins i64i32imm_pcrel:$dst, variable_ops), - "call\t$dst", []>, + "call{q}\t$dst", []>, Requires<[In64BitMode, NotWin64]>; def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops), - "call\t{*}$dst", [(X86call GR64:$dst)]>, + "call{q}\t{*}$dst", [(X86call GR64:$dst)]>, Requires<[NotWin64]>; def CALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops), - "call\t{*}$dst", [(X86call (loadi64 addr:$dst))]>, + "call{q}\t{*}$dst", [(X86call (loadi64 addr:$dst))]>, Requires<[NotWin64]>; def FARCALL64 : RI<0xFF, MRM3m, (outs), (ins opaque80mem:$dst), "lcall{q}\t{*}$dst", []>; } - // FIXME: We need to teach codegen about single list of call-clobbered registers. + // FIXME: We need to teach codegen about single list of call-clobbered + // registers. let isCall = 1 in // All calls clobber the non-callee saved registers. RSP is marked as // a use to prevent stack-pointer assignments that appear immediately @@ -162,9 +166,10 @@ let isCall = 1 in def WINCALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst, variable_ops), "call\t{*}$dst", [(X86call GR64:$dst)]>, Requires<[IsWin64]>; - def WINCALL64m : I<0xFF, MRM2m, (outs), (ins i64mem:$dst, variable_ops), - "call\t{*}$dst", - [(X86call (loadi64 addr:$dst))]>, Requires<[IsWin64]>; + def WINCALL64m : I<0xFF, MRM2m, (outs), + (ins i64mem:$dst, variable_ops), "call\t{*}$dst", + [(X86call (loadi64 addr:$dst))]>, + Requires<[IsWin64]>; } @@ -188,6 +193,8 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in // Branches let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { + def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst), + "jmp{q}\t$dst", []>; def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst", [(brind GR64:$dst)]>; def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst", @@ -210,6 +217,12 @@ def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr), //===----------------------------------------------------------------------===// // Miscellaneous Instructions... // + +def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), + "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS; +def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), + "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS; + let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in def LEAVE64 : I<0xC9, RawFrm, (outs), (ins), "leave", []>; @@ -238,9 +251,9 @@ def PUSH64i32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm), } let Defs = [RSP, EFLAGS], Uses = [RSP], mayLoad = 1 in -def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf", []>, REX_W; +def POPFQ : I<0x9D, RawFrm, (outs), (ins), "popf{q}", []>, REX_W; let Defs = [RSP], Uses = [RSP, EFLAGS], mayStore = 1 in -def PUSHFQ : I<0x9C, RawFrm, (outs), (ins), "pushf", []>; +def PUSHFQ64 : I<0x9C, RawFrm, (outs), (ins), "pushf{q}", []>; def LEA64_32r : I<0x8D, MRMSrcMem, (outs GR32:$dst), (ins lea64_32mem:$src), @@ -309,6 +322,9 @@ def MOV64ri32 : RIi32<0xC7, MRM0r, (outs GR64:$dst), (ins i64i32imm:$src), [(set GR64:$dst, i64immSExt32:$src)]>; } +def MOV64rr_REV : RI<0x8B, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>; + let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in def MOV64rm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), "mov{q}\t{$src, $dst|$dst, $src}", @@ -321,24 +337,36 @@ def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src), "mov{q}\t{$src, $dst|$dst, $src}", [(store i64immSExt32:$src, addr:$dst)]>; -def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins i8imm:$src), +def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src), "mov{q}\t{$src, %rax|%rax, $src}", []>; -def MOV64o32a : RIi32<0xA1, RawFrm, (outs), (ins i32imm:$src), +def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src), "mov{q}\t{$src, %rax|%rax, $src}", []>; -def MOV64ao8 : RIi8<0xA2, RawFrm, (outs i8imm:$dst), (ins), +def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins), "mov{q}\t{%rax, $dst|$dst, %rax}", []>; -def MOV64ao32 : RIi32<0xA3, RawFrm, (outs i32imm:$dst), (ins), +def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins), "mov{q}\t{%rax, $dst|$dst, %rax}", []>; // Moves to and from segment registers def MOV64rs : RI<0x8C, MRMDestReg, (outs GR64:$dst), (ins SEGMENT_REG:$src), - "mov{w}\t{$src, $dst|$dst, $src}", []>; + "mov{q}\t{$src, $dst|$dst, $src}", []>; def MOV64ms : RI<0x8C, MRMDestMem, (outs i64mem:$dst), (ins SEGMENT_REG:$src), - "mov{w}\t{$src, $dst|$dst, $src}", []>; + "mov{q}\t{$src, $dst|$dst, $src}", []>; def MOV64sr : RI<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR64:$src), - "mov{w}\t{$src, $dst|$dst, $src}", []>; + "mov{q}\t{$src, $dst|$dst, $src}", []>; def MOV64sm : RI<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i64mem:$src), - "mov{w}\t{$src, $dst|$dst, $src}", []>; + "mov{q}\t{$src, $dst|$dst, $src}", []>; + +// Moves to and from debug registers +def MOV64rd : I<0x21, MRMDestReg, (outs GR64:$dst), (ins DEBUG_REG:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>, TB; +def MOV64dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR64:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>, TB; + +// Moves to and from control registers +def MOV64rc : I<0x20, MRMDestReg, (outs GR64:$dst), (ins CONTROL_REG_64:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>, TB; +def MOV64cr : I<0x22, MRMSrcReg, (outs CONTROL_REG_64:$dst), (ins GR64:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>, TB; // Sign/Zero extenders @@ -365,6 +393,16 @@ def MOVSX64rm32: RI<0x63, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src), "movs{lq|xd}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (sextloadi64i32 addr:$src))]>; +// movzbq and movzwq encodings for the disassembler +def MOVZX64rr8_Q : RI<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8:$src), + "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB; +def MOVZX64rm8_Q : RI<0xB6, MRMSrcMem, (outs GR64:$dst), (ins i8mem:$src), + "movz{bq|x}\t{$src, $dst|$dst, $src}", []>, TB; +def MOVZX64rr16_Q : RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src), + "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB; +def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), + "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB; + // Use movzbl instead of movzbq when the destination is a register; it's // equivalent due to implicit zero-extending, and it has a smaller encoding. def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src), @@ -430,31 +468,36 @@ let isTwoAddress = 1 in { let isConvertibleToThreeAddress = 1 in { let isCommutable = 1 in // Register-Register Addition -def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def ADD64rr : RI<0x01, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "add{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (add GR64:$src1, GR64:$src2)), (implicit EFLAGS)]>; // Register-Integer Addition -def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +def ADD64ri8 : RIi8<0x83, MRM0r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "add{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (add GR64:$src1, i64immSExt8:$src2)), (implicit EFLAGS)]>; -def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), +def ADD64ri32 : RIi32<0x81, MRM0r, (outs GR64:$dst), + (ins GR64:$src1, i64i32imm:$src2), "add{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (add GR64:$src1, i64immSExt32:$src2)), (implicit EFLAGS)]>; } // isConvertibleToThreeAddress // Register-Memory Addition -def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def ADD64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "add{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (add GR64:$src1, (load addr:$src2))), (implicit EFLAGS)]>; // Register-Register Addition - Equivalent to the normal rr form (ADD64rr), but // differently encoded. -def ADD64mrmrr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def ADD64mrmrr : RI<0x03, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "add{l}\t{$src2, $dst|$dst, $src2}", []>; } // isTwoAddress @@ -480,18 +523,26 @@ def ADC64i32 : RI<0x15, RawFrm, (outs), (ins i32imm:$src), let isTwoAddress = 1 in { let isCommutable = 1 in -def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def ADC64rr : RI<0x11, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (adde GR64:$src1, GR64:$src2))]>; -def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def ADC64rr_REV : RI<0x13, MRMSrcReg , (outs GR32:$dst), + (ins GR64:$src1, GR64:$src2), + "adc{q}\t{$src2, $dst|$dst, $src2}", []>; + +def ADC64rm : RI<0x13, MRMSrcMem , (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (adde GR64:$src1, (load addr:$src2)))]>; -def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +def ADC64ri8 : RIi8<0x83, MRM2r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (adde GR64:$src1, i64immSExt8:$src2))]>; -def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), +def ADC64ri32 : RIi32<0x81, MRM2r, (outs GR64:$dst), + (ins GR64:$src1, i64i32imm:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (adde GR64:$src1, i64immSExt32:$src2))]>; } // isTwoAddress @@ -501,21 +552,29 @@ def ADC64mr : RI<0x11, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), [(store (adde (load addr:$dst), GR64:$src2), addr:$dst)]>; def ADC64mi8 : RIi8<0x83, MRM2m, (outs), (ins i64mem:$dst, i64i8imm :$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", - [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>; + [(store (adde (load addr:$dst), i64immSExt8:$src2), + addr:$dst)]>; def ADC64mi32 : RIi32<0x81, MRM2m, (outs), (ins i64mem:$dst, i64i32imm:$src2), "adc{q}\t{$src2, $dst|$dst, $src2}", - [(store (adde (load addr:$dst), i64immSExt8:$src2), addr:$dst)]>; + [(store (adde (load addr:$dst), i64immSExt8:$src2), + addr:$dst)]>; } // Uses = [EFLAGS] let isTwoAddress = 1 in { // Register-Register Subtraction -def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def SUB64rr : RI<0x29, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sub GR64:$src1, GR64:$src2)), (implicit EFLAGS)]>; +def SUB64rr_REV : RI<0x2B, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), + "sub{q}\t{$src2, $dst|$dst, $src2}", []>; + // Register-Memory Subtraction -def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def SUB64rm : RI<0x2B, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "sub{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sub GR64:$src1, (load addr:$src2))), (implicit EFLAGS)]>; @@ -556,18 +615,26 @@ def SUB64mi32 : RIi32<0x81, MRM5m, (outs), (ins i64mem:$dst, i64i32imm:$src2), let Uses = [EFLAGS] in { let isTwoAddress = 1 in { -def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def SBB64rr : RI<0x19, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "sbb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sube GR64:$src1, GR64:$src2))]>; -def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def SBB64rr_REV : RI<0x1B, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), + "sbb{q}\t{$src2, $dst|$dst, $src2}", []>; + +def SBB64rm : RI<0x1B, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "sbb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sube GR64:$src1, (load addr:$src2)))]>; -def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +def SBB64ri8 : RIi8<0x83, MRM3r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "sbb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sube GR64:$src1, i64immSExt8:$src2))]>; -def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), +def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst), + (ins GR64:$src1, i64i32imm:$src2), "sbb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>; } // isTwoAddress @@ -652,15 +719,19 @@ def IMUL64rmi32 : RIi32<0x69, MRMSrcMem, // GR64 = [mem64]*I32 // Unsigned division / remainder let Defs = [RAX,RDX,EFLAGS], Uses = [RAX,RDX] in { -def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX +// RDX:RAX/r64 = RAX,RDX +def DIV64r : RI<0xF7, MRM6r, (outs), (ins GR64:$src), "div{q}\t$src", []>; // Signed division / remainder -def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), // RDX:RAX/r64 = RAX,RDX +// RDX:RAX/r64 = RAX,RDX +def IDIV64r: RI<0xF7, MRM7r, (outs), (ins GR64:$src), "idiv{q}\t$src", []>; let mayLoad = 1 in { -def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX +// RDX:RAX/[mem64] = RAX,RDX +def DIV64m : RI<0xF7, MRM6m, (outs), (ins i64mem:$src), "div{q}\t$src", []>; -def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), // RDX:RAX/[mem64] = RAX,RDX +// RDX:RAX/[mem64] = RAX,RDX +def IDIV64m: RI<0xF7, MRM7m, (outs), (ins i64mem:$src), "idiv{q}\t$src", []>; } } @@ -694,19 +765,23 @@ def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst", // In 64-bit mode, single byte INC and DEC cannot be encoded. let isTwoAddress = 1, isConvertibleToThreeAddress = 1 in { // Can transform into LEA. -def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst", +def INC64_16r : I<0xFF, MRM0r, (outs GR16:$dst), (ins GR16:$src), + "inc{w}\t$dst", [(set GR16:$dst, (add GR16:$src, 1)), (implicit EFLAGS)]>, OpSize, Requires<[In64BitMode]>; -def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst", +def INC64_32r : I<0xFF, MRM0r, (outs GR32:$dst), (ins GR32:$src), + "inc{l}\t$dst", [(set GR32:$dst, (add GR32:$src, 1)), (implicit EFLAGS)]>, Requires<[In64BitMode]>; -def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst", +def DEC64_16r : I<0xFF, MRM1r, (outs GR16:$dst), (ins GR16:$src), + "dec{w}\t$dst", [(set GR16:$dst, (add GR16:$src, -1)), (implicit EFLAGS)]>, OpSize, Requires<[In64BitMode]>; -def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst", +def DEC64_32r : I<0xFF, MRM1r, (outs GR32:$dst), (ins GR32:$src), + "dec{l}\t$dst", [(set GR32:$dst, (add GR32:$src, -1)), (implicit EFLAGS)]>, Requires<[In64BitMode]>; @@ -743,13 +818,14 @@ def SHL64rCL : RI<0xD3, MRM4r, (outs GR64:$dst), (ins GR64:$src), "shl{q}\t{%cl, $dst|$dst, %CL}", [(set GR64:$dst, (shl GR64:$src, CL))]>; let isConvertibleToThreeAddress = 1 in // Can transform into LEA. -def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), +def SHL64ri : RIi8<0xC1, MRM4r, (outs GR64:$dst), + (ins GR64:$src1, i8imm:$src2), "shl{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (shl GR64:$src1, (i8 imm:$src2)))]>; // NOTE: We don't include patterns for shifts of a register by one, because // 'add reg,reg' is cheaper. def SHL64r1 : RI<0xD1, MRM4r, (outs GR64:$dst), (ins GR64:$src1), - "shr{q}\t$dst", []>; + "shl{q}\t$dst", []>; } // isTwoAddress let Uses = [CL] in @@ -792,9 +868,10 @@ let Uses = [CL] in def SAR64rCL : RI<0xD3, MRM7r, (outs GR64:$dst), (ins GR64:$src), "sar{q}\t{%cl, $dst|$dst, %CL}", [(set GR64:$dst, (sra GR64:$src, CL))]>; -def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), - "sar{q}\t{$src2, $dst|$dst, $src2}", - [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>; +def SAR64ri : RIi8<0xC1, MRM7r, (outs GR64:$dst), + (ins GR64:$src1, i8imm:$src2), + "sar{q}\t{$src2, $dst|$dst, $src2}", + [(set GR64:$dst, (sra GR64:$src1, (i8 imm:$src2)))]>; def SAR64r1 : RI<0xD1, MRM7r, (outs GR64:$dst), (ins GR64:$src1), "sar{q}\t$dst", [(set GR64:$dst, (sra GR64:$src1, (i8 1)))]>; @@ -826,7 +903,8 @@ def RCL64mCL : RI<0xD3, MRM2m, (outs i64mem:$dst), (ins i64mem:$src), } def RCL64ri : RIi8<0xC1, MRM2r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt), "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>; -def RCL64mi : RIi8<0xC1, MRM2m, (outs i64mem:$dst), (ins i64mem:$src, i8imm:$cnt), +def RCL64mi : RIi8<0xC1, MRM2m, (outs i64mem:$dst), + (ins i64mem:$src, i8imm:$cnt), "rcl{q}\t{$cnt, $dst|$dst, $cnt}", []>; def RCR64r1 : RI<0xD1, MRM3r, (outs GR64:$dst), (ins GR64:$src), @@ -841,7 +919,8 @@ def RCR64mCL : RI<0xD3, MRM3m, (outs i64mem:$dst), (ins i64mem:$src), } def RCR64ri : RIi8<0xC1, MRM3r, (outs GR64:$dst), (ins GR64:$src, i8imm:$cnt), "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>; -def RCR64mi : RIi8<0xC1, MRM3m, (outs i64mem:$dst), (ins i64mem:$src, i8imm:$cnt), +def RCR64mi : RIi8<0xC1, MRM3m, (outs i64mem:$dst), + (ins i64mem:$src, i8imm:$cnt), "rcr{q}\t{$cnt, $dst|$dst, $cnt}", []>; } @@ -850,7 +929,8 @@ let Uses = [CL] in def ROL64rCL : RI<0xD3, MRM0r, (outs GR64:$dst), (ins GR64:$src), "rol{q}\t{%cl, $dst|$dst, %CL}", [(set GR64:$dst, (rotl GR64:$src, CL))]>; -def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), +def ROL64ri : RIi8<0xC1, MRM0r, (outs GR64:$dst), + (ins GR64:$src1, i8imm:$src2), "rol{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (rotl GR64:$src1, (i8 imm:$src2)))]>; def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1), @@ -859,9 +939,9 @@ def ROL64r1 : RI<0xD1, MRM0r, (outs GR64:$dst), (ins GR64:$src1), } // isTwoAddress let Uses = [CL] in -def ROL64mCL : I<0xD3, MRM0m, (outs), (ins i64mem:$dst), - "rol{q}\t{%cl, $dst|$dst, %CL}", - [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>; +def ROL64mCL : RI<0xD3, MRM0m, (outs), (ins i64mem:$dst), + "rol{q}\t{%cl, $dst|$dst, %CL}", + [(store (rotl (loadi64 addr:$dst), CL), addr:$dst)]>; def ROL64mi : RIi8<0xC1, MRM0m, (outs), (ins i64mem:$dst, i8imm:$src), "rol{q}\t{$src, $dst|$dst, $src}", [(store (rotl (loadi64 addr:$dst), (i8 imm:$src)), addr:$dst)]>; @@ -874,7 +954,8 @@ let Uses = [CL] in def ROR64rCL : RI<0xD3, MRM1r, (outs GR64:$dst), (ins GR64:$src), "ror{q}\t{%cl, $dst|$dst, %CL}", [(set GR64:$dst, (rotr GR64:$src, CL))]>; -def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i8imm:$src2), +def ROR64ri : RIi8<0xC1, MRM1r, (outs GR64:$dst), + (ins GR64:$src1, i8imm:$src2), "ror{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (rotr GR64:$src1, (i8 imm:$src2)))]>; def ROR64r1 : RI<0xD1, MRM1r, (outs GR64:$dst), (ins GR64:$src1), @@ -896,23 +977,29 @@ def ROR64m1 : RI<0xD1, MRM1m, (outs), (ins i64mem:$dst), // Double shift instructions (generalizations of rotate) let isTwoAddress = 1 in { let Uses = [CL] in { -def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def SHLD64rrCL : RI<0xA5, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "shld{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", - [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>, TB; -def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), + [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, CL))]>, + TB; +def SHRD64rrCL : RI<0xAD, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "shrd{q}\t{%cl, $src2, $dst|$dst, $src2, %CL}", - [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>, TB; + [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, CL))]>, + TB; } let isCommutable = 1 in { // FIXME: Update X86InstrInfo::commuteInstruction def SHLD64rri8 : RIi8<0xA4, MRMDestReg, - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3), + (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2, i8imm:$src3), "shld{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set GR64:$dst, (X86shld GR64:$src1, GR64:$src2, (i8 imm:$src3)))]>, TB; def SHRD64rri8 : RIi8<0xAC, MRMDestReg, - (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, i8imm:$src3), + (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2, i8imm:$src3), "shrd{q}\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set GR64:$dst, (X86shrd GR64:$src1, GR64:$src2, (i8 imm:$src3)))]>, @@ -965,6 +1052,9 @@ def AND64rr : RI<0x21, MRMDestReg, "and{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (and GR64:$src1, GR64:$src2)), (implicit EFLAGS)]>; +def AND64rr_REV : RI<0x23, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), + "and{q}\t{$src2, $dst|$dst, $src2}", []>; def AND64rm : RI<0x23, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), "and{q}\t{$src2, $dst|$dst, $src2}", @@ -1000,19 +1090,26 @@ def AND64mi32 : RIi32<0x81, MRM4m, let isTwoAddress = 1 in { let isCommutable = 1 in -def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def OR64rr : RI<0x09, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "or{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (or GR64:$src1, GR64:$src2)), (implicit EFLAGS)]>; -def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def OR64rr_REV : RI<0x0B, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), + "or{q}\t{$src2, $dst|$dst, $src2}", []>; +def OR64rm : RI<0x0B, MRMSrcMem , (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "or{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (or GR64:$src1, (load addr:$src2))), (implicit EFLAGS)]>; -def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +def OR64ri8 : RIi8<0x83, MRM1r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "or{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (or GR64:$src1, i64immSExt8:$src2)), (implicit EFLAGS)]>; -def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), (ins GR64:$src1, i64i32imm:$src2), +def OR64ri32 : RIi32<0x81, MRM1r, (outs GR64:$dst), + (ins GR64:$src1, i64i32imm:$src2), "or{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (or GR64:$src1, i64immSExt32:$src2)), (implicit EFLAGS)]>; @@ -1036,15 +1133,21 @@ def OR64i32 : RIi32<0x0D, RawFrm, (outs), (ins i32imm:$src), let isTwoAddress = 1 in { let isCommutable = 1 in -def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), +def XOR64rr : RI<0x31, MRMDestReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), "xor{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (xor GR64:$src1, GR64:$src2)), (implicit EFLAGS)]>; -def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), +def XOR64rr_REV : RI<0x33, MRMSrcReg, (outs GR64:$dst), + (ins GR64:$src1, GR64:$src2), + "xor{q}\t{$src2, $dst|$dst, $src2}", []>; +def XOR64rm : RI<0x33, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$src1, i64mem:$src2), "xor{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (xor GR64:$src1, (load addr:$src2))), (implicit EFLAGS)]>; -def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), (ins GR64:$src1, i64i8imm:$src2), +def XOR64ri8 : RIi8<0x83, MRM6r, (outs GR64:$dst), + (ins GR64:$src1, i64i8imm:$src2), "xor{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (xor GR64:$src1, i64immSExt8:$src2)), (implicit EFLAGS)]>; @@ -1148,10 +1251,12 @@ def BT64rr : RI<0xA3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), // Unlike with the register+register form, the memory+register form of the // bt instruction does not ignore the high bits of the index. From ISel's // perspective, this is pretty bizarre. Disable these instructions for now. -//def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), -// "bt{q}\t{$src2, $src1|$src1, $src2}", +def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), + "bt{q}\t{$src2, $src1|$src1, $src2}", // [(X86bt (loadi64 addr:$src1), GR64:$src2), -// (implicit EFLAGS)]>, TB; +// (implicit EFLAGS)] + [] + >, TB; def BT64ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2), "bt{q}\t{$src2, $src1|$src1, $src2}", @@ -1164,6 +1269,33 @@ def BT64mi8 : Ii8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2), "bt{q}\t{$src2, $src1|$src1, $src2}", [(X86bt (loadi64 addr:$src1), i64immSExt8:$src2), (implicit EFLAGS)]>, TB; + +def BTC64rr : RI<0xBB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), + "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTC64mr : RI<0xBB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), + "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTC64ri8 : RIi8<0xBA, MRM7r, (outs), (ins GR64:$src1, i64i8imm:$src2), + "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTC64mi8 : RIi8<0xBA, MRM7m, (outs), (ins i64mem:$src1, i64i8imm:$src2), + "btc{q}\t{$src2, $src1|$src1, $src2}", []>, TB; + +def BTR64rr : RI<0xB3, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), + "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTR64mr : RI<0xB3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), + "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTR64ri8 : RIi8<0xBA, MRM6r, (outs), (ins GR64:$src1, i64i8imm:$src2), + "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTR64mi8 : RIi8<0xBA, MRM6m, (outs), (ins i64mem:$src1, i64i8imm:$src2), + "btr{q}\t{$src2, $src1|$src1, $src2}", []>, TB; + +def BTS64rr : RI<0xAB, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2), + "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTS64mr : RI<0xAB, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), + "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTS64ri8 : RIi8<0xBA, MRM5r, (outs), (ins GR64:$src1, i64i8imm:$src2), + "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2), + "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB; } // Defs = [EFLAGS] // Conditional moves @@ -1171,164 +1303,164 @@ let Uses = [EFLAGS], isTwoAddress = 1 in { let isCommutable = 1 in { def CMOVB64rr : RI<0x42, MRMSrcReg, // if <u, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovb\t{$src2, $dst|$dst, $src2}", + "cmovb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_B, EFLAGS))]>, TB; def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", + "cmovae{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_AE, EFLAGS))]>, TB; def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", + "cmove{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_E, EFLAGS))]>, TB; def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", + "cmovne{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_NE, EFLAGS))]>, TB; def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", + "cmovbe{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_BE, EFLAGS))]>, TB; def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", + "cmova{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_A, EFLAGS))]>, TB; def CMOVL64rr : RI<0x4C, MRMSrcReg, // if <s, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovl\t{$src2, $dst|$dst, $src2}", + "cmovl{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_L, EFLAGS))]>, TB; def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", + "cmovge{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_GE, EFLAGS))]>, TB; def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", + "cmovle{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_LE, EFLAGS))]>, TB; def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", + "cmovg{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_G, EFLAGS))]>, TB; def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", + "cmovs{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_S, EFLAGS))]>, TB; def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", + "cmovns{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_NS, EFLAGS))]>, TB; def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", + "cmovp{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_P, EFLAGS))]>, TB; def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", + "cmovnp{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_NP, EFLAGS))]>, TB; def CMOVO64rr : RI<0x40, MRMSrcReg, // if overflow, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovo\t{$src2, $dst|$dst, $src2}", + "cmovo{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_O, EFLAGS))]>, TB; def CMOVNO64rr : RI<0x41, MRMSrcReg, // if !overflow, GR64 = GR64 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2), - "cmovno\t{$src2, $dst|$dst, $src2}", + "cmovno{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2, X86_COND_NO, EFLAGS))]>, TB; } // isCommutable = 1 def CMOVB64rm : RI<0x42, MRMSrcMem, // if <u, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovb\t{$src2, $dst|$dst, $src2}", + "cmovb{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_B, EFLAGS))]>, TB; def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", + "cmovae{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_AE, EFLAGS))]>, TB; def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", + "cmove{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_E, EFLAGS))]>, TB; def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", + "cmovne{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_NE, EFLAGS))]>, TB; def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", + "cmovbe{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_BE, EFLAGS))]>, TB; def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", + "cmova{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_A, EFLAGS))]>, TB; def CMOVL64rm : RI<0x4C, MRMSrcMem, // if <s, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovl\t{$src2, $dst|$dst, $src2}", + "cmovl{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_L, EFLAGS))]>, TB; def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", + "cmovge{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_GE, EFLAGS))]>, TB; def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", + "cmovle{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_LE, EFLAGS))]>, TB; def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", + "cmovg{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_G, EFLAGS))]>, TB; def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", + "cmovs{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_S, EFLAGS))]>, TB; def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", + "cmovns{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_NS, EFLAGS))]>, TB; def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", + "cmovp{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_P, EFLAGS))]>, TB; def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", + "cmovnp{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_NP, EFLAGS))]>, TB; def CMOVO64rm : RI<0x40, MRMSrcMem, // if overflow, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovo\t{$src2, $dst|$dst, $src2}", + "cmovo{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_O, EFLAGS))]>, TB; def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64] (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2), - "cmovno\t{$src2, $dst|$dst, $src2}", + "cmovno{q}\t{$src2, $dst|$dst, $src2}", [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2), X86_COND_NO, EFLAGS))]>, TB; } // isTwoAddress @@ -1337,9 +1469,9 @@ def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64] let Defs = [EFLAGS], Uses = [EFLAGS], isCodeGenOnly = 1 in def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "sbb{q}\t$dst, $dst", - [(set GR64:$dst, (zext (X86setcc_c X86_COND_B, EFLAGS)))]>; + [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; -def : Pat<(i64 (anyext (X86setcc_c X86_COND_B, EFLAGS))), +def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), (SETB_C64r)>; //===----------------------------------------------------------------------===// @@ -1347,11 +1479,16 @@ def : Pat<(i64 (anyext (X86setcc_c X86_COND_B, EFLAGS))), // // f64 -> signed i64 +def CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src), + "cvtsd2si{q}\t{$src, $dst|$dst, $src}", []>; +def CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src), + "cvtsd2si{q}\t{$src, $dst|$dst, $src}", []>; def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), "cvtsd2si{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (int_x86_sse2_cvtsd2si64 VR128:$src))]>; -def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src), +def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), + (ins f128mem:$src), "cvtsd2si{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (int_x86_sse2_cvtsd2si64 (load addr:$src)))]>; @@ -1365,7 +1502,8 @@ def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), "cvttsd2si{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (int_x86_sse2_cvttsd2si64 VR128:$src))]>; -def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f128mem:$src), +def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), + (ins f128mem:$src), "cvttsd2si{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (int_x86_sse2_cvttsd2si64 @@ -1410,7 +1548,8 @@ let isTwoAddress = 1 in { (int_x86_sse_cvtsi642ss VR128:$src1, GR64:$src2))]>; def Int_CVTSI2SS64rm : RSSI<0x2A, MRMSrcMem, - (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2), + (outs VR128:$dst), + (ins VR128:$src1, i64mem:$src2), "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse_cvtsi642ss VR128:$src1, @@ -1418,6 +1557,10 @@ let isTwoAddress = 1 in { } // f32 -> signed i64 +def CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src), + "cvtss2si{q}\t{$src, $dst|$dst, $src}", []>; +def CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src), + "cvtss2si{q}\t{$src, $dst|$dst, $src}", []>; def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), "cvtss2si{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, @@ -1436,10 +1579,20 @@ def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), "cvttss2si{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (int_x86_sse_cvttss2si64 VR128:$src))]>; -def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src), +def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), + (ins f32mem:$src), "cvttss2si{q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (int_x86_sse_cvttss2si64 (load addr:$src)))]>; + +// Descriptor-table support instructions + +// LLDT is not interpreted specially in 64-bit mode because there is no sign +// extension. +def SLDT64r : RI<0x00, MRM0r, (outs GR64:$dst), (ins), + "sldt{q}\t$dst", []>, TB; +def SLDT64m : RI<0x00, MRM0m, (outs i16mem:$dst), (ins), + "sldt{q}\t$dst", []>, TB; //===----------------------------------------------------------------------===// // Alias Instructions @@ -1505,17 +1658,37 @@ def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap), let Constraints = "$val = $dst" in { let Defs = [EFLAGS] in -def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val), +def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr), "lock\n\t" "xadd\t$val, $ptr", [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))]>, TB, LOCK; -def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$ptr,GR64:$val), - "xchg\t$val, $ptr", +def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$val,i64mem:$ptr), + "xchg{q}\t{$val, $ptr|$ptr, $val}", [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))]>; + +def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src), + "xchg{q}\t{$val, $src|$src, $val}", []>; } +def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src), + "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB; +def XADD64rm : RI<0xC1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), + "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB; + +def CMPXCHG64rr : RI<0xB1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src), + "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB; +def CMPXCHG64rm : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), + "cmpxchg{q}\t{$src, $dst|$dst, $src}", []>, TB; + +def CMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$dst), + "cmpxchg16b\t$dst", []>, TB; + +def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src), + "xchg{q}\t{$src, %rax|%rax, $src}", []>; + // Optimized codegen when the non-memory output is not used. let Defs = [EFLAGS] in { // FIXME: Use normal add / sub instructions and add lock prefix dynamically. @@ -1585,6 +1758,36 @@ def LAR64rm : RI<0x02, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), def LAR64rr : RI<0x02, MRMSrcReg, (outs GR64:$dst), (ins GR32:$src), "lar{q}\t{$src, $dst|$dst, $src}", []>, TB; +def LSL64rm : RI<0x03, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), + "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB; +def LSL64rr : RI<0x03, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), + "lsl{q}\t{$src, $dst|$dst, $src}", []>, TB; + +def SWPGS : I<0x01, RawFrm, (outs), (ins), "swpgs", []>, TB; + +def PUSHFS64 : I<0xa0, RawFrm, (outs), (ins), + "push{q}\t%fs", []>, TB; +def PUSHGS64 : I<0xa8, RawFrm, (outs), (ins), + "push{q}\t%gs", []>, TB; + +def POPFS64 : I<0xa1, RawFrm, (outs), (ins), + "pop{q}\t%fs", []>, TB; +def POPGS64 : I<0xa9, RawFrm, (outs), (ins), + "pop{q}\t%gs", []>, TB; + +def LSS64rm : RI<0xb2, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src), + "lss{q}\t{$src, $dst|$dst, $src}", []>, TB; +def LFS64rm : RI<0xb4, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src), + "lfs{q}\t{$src, $dst|$dst, $src}", []>, TB; +def LGS64rm : RI<0xb5, MRMSrcMem, (outs GR64:$dst), (ins opaque80mem:$src), + "lgs{q}\t{$src, $dst|$dst, $src}", []>, TB; + +// Specialized register support + +// no m form encodable; use SMSW16m +def SMSW64r : RI<0x01, MRM4r, (outs GR64:$dst), (ins), + "smsw{q}\t$dst", []>, TB; + // String manipulation instructions def LODSQ : RI<0xAD, RawFrm, (outs), (ins), "lodsq", []>; @@ -1722,9 +1925,9 @@ def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, X86_COND_NO, EFLAGS), def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>; // extload -// When extloading from 16-bit and smaller memory locations into 64-bit registers, -// use zero-extending loads so that the entire 64-bit register is defined, avoiding -// partial-register updates. +// When extloading from 16-bit and smaller memory locations into 64-bit +// registers, use zero-extending loads so that the entire 64-bit register is +// defined, avoiding partial-register updates. def : Pat<(extloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>; def : Pat<(extloadi64i8 addr:$src), (MOVZX64rm8 addr:$src)>; def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>; @@ -1995,7 +2198,8 @@ def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), i64immSExt8:$src2), addr:$dst), (implicit EFLAGS)), (ADD64mi8 addr:$dst, i64immSExt8:$src2)>; -def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), i64immSExt32:$src2), +def : Pat<(parallel (store (X86add_flag (loadi64 addr:$dst), + i64immSExt32:$src2), addr:$dst), (implicit EFLAGS)), (ADD64mi32 addr:$dst, i64immSExt32:$src2)>; @@ -2025,11 +2229,13 @@ def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), GR64:$src2), (SUB64mr addr:$dst, GR64:$src2)>; // Memory-Integer Subtraction with EFLAGS result -def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), i64immSExt8:$src2), +def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), + i64immSExt8:$src2), addr:$dst), (implicit EFLAGS)), (SUB64mi8 addr:$dst, i64immSExt8:$src2)>; -def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), i64immSExt32:$src2), +def : Pat<(parallel (store (X86sub_flag (loadi64 addr:$dst), + i64immSExt32:$src2), addr:$dst), (implicit EFLAGS)), (SUB64mi32 addr:$dst, i64immSExt32:$src2)>; @@ -2153,7 +2359,8 @@ def : Pat<(parallel (store (X86xor_flag (loadi64 addr:$dst), i64immSExt8:$src2), addr:$dst), (implicit EFLAGS)), (XOR64mi8 addr:$dst, i64immSExt8:$src2)>; -def : Pat<(parallel (store (X86xor_flag (loadi64 addr:$dst), i64immSExt32:$src2), +def : Pat<(parallel (store (X86xor_flag (loadi64 addr:$dst), + i64immSExt32:$src2), addr:$dst), (implicit EFLAGS)), (XOR64mi32 addr:$dst, i64immSExt32:$src2)>; @@ -2185,7 +2392,8 @@ def : Pat<(parallel (store (X86and_flag (loadi64 addr:$dst), i64immSExt8:$src2), addr:$dst), (implicit EFLAGS)), (AND64mi8 addr:$dst, i64immSExt8:$src2)>; -def : Pat<(parallel (store (X86and_flag (loadi64 addr:$dst), i64immSExt32:$src2), +def : Pat<(parallel (store (X86and_flag (loadi64 addr:$dst), + i64immSExt32:$src2), addr:$dst), (implicit EFLAGS)), (AND64mi32 addr:$dst, i64immSExt32:$src2)>; diff --git a/lib/Target/X86/X86InstrFPStack.td b/lib/Target/X86/X86InstrFPStack.td index b0b0409..71ec178 100644 --- a/lib/Target/X86/X86InstrFPStack.td +++ b/lib/Target/X86/X86InstrFPStack.td @@ -195,48 +195,67 @@ def _Fp80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, RFP80:$src2), TwoArgFP, // These instructions cannot address 80-bit memory. multiclass FPBinary<SDNode OpNode, Format fp, string asmstring> { // ST(0) = ST(0) + [mem] -def _Fp32m : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, f32mem:$src2), OneArgFPRW, +def _Fp32m : FpIf32<(outs RFP32:$dst), + (ins RFP32:$src1, f32mem:$src2), OneArgFPRW, [(set RFP32:$dst, (OpNode RFP32:$src1, (loadf32 addr:$src2)))]>; -def _Fp64m : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, f64mem:$src2), OneArgFPRW, +def _Fp64m : FpIf64<(outs RFP64:$dst), + (ins RFP64:$src1, f64mem:$src2), OneArgFPRW, [(set RFP64:$dst, (OpNode RFP64:$src1, (loadf64 addr:$src2)))]>; -def _Fp64m32: FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, f32mem:$src2), OneArgFPRW, +def _Fp64m32: FpIf64<(outs RFP64:$dst), + (ins RFP64:$src1, f32mem:$src2), OneArgFPRW, [(set RFP64:$dst, (OpNode RFP64:$src1, (f64 (extloadf32 addr:$src2))))]>; -def _Fp80m32: FpI_<(outs RFP80:$dst), (ins RFP80:$src1, f32mem:$src2), OneArgFPRW, +def _Fp80m32: FpI_<(outs RFP80:$dst), + (ins RFP80:$src1, f32mem:$src2), OneArgFPRW, [(set RFP80:$dst, (OpNode RFP80:$src1, (f80 (extloadf32 addr:$src2))))]>; -def _Fp80m64: FpI_<(outs RFP80:$dst), (ins RFP80:$src1, f64mem:$src2), OneArgFPRW, +def _Fp80m64: FpI_<(outs RFP80:$dst), + (ins RFP80:$src1, f64mem:$src2), OneArgFPRW, [(set RFP80:$dst, (OpNode RFP80:$src1, (f80 (extloadf64 addr:$src2))))]>; def _F32m : FPI<0xD8, fp, (outs), (ins f32mem:$src), - !strconcat("f", !strconcat(asmstring, "{s}\t$src"))> { let mayLoad = 1; } + !strconcat("f", !strconcat(asmstring, "{s}\t$src"))> { + let mayLoad = 1; +} def _F64m : FPI<0xDC, fp, (outs), (ins f64mem:$src), - !strconcat("f", !strconcat(asmstring, "{l}\t$src"))> { let mayLoad = 1; } + !strconcat("f", !strconcat(asmstring, "{l}\t$src"))> { + let mayLoad = 1; +} // ST(0) = ST(0) + [memint] -def _FpI16m32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, i16mem:$src2), OneArgFPRW, +def _FpI16m32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, i16mem:$src2), + OneArgFPRW, [(set RFP32:$dst, (OpNode RFP32:$src1, (X86fild addr:$src2, i16)))]>; -def _FpI32m32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, i32mem:$src2), OneArgFPRW, +def _FpI32m32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, i32mem:$src2), + OneArgFPRW, [(set RFP32:$dst, (OpNode RFP32:$src1, (X86fild addr:$src2, i32)))]>; -def _FpI16m64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, i16mem:$src2), OneArgFPRW, +def _FpI16m64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, i16mem:$src2), + OneArgFPRW, [(set RFP64:$dst, (OpNode RFP64:$src1, (X86fild addr:$src2, i16)))]>; -def _FpI32m64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, i32mem:$src2), OneArgFPRW, +def _FpI32m64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, i32mem:$src2), + OneArgFPRW, [(set RFP64:$dst, (OpNode RFP64:$src1, (X86fild addr:$src2, i32)))]>; -def _FpI16m80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, i16mem:$src2), OneArgFPRW, +def _FpI16m80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, i16mem:$src2), + OneArgFPRW, [(set RFP80:$dst, (OpNode RFP80:$src1, (X86fild addr:$src2, i16)))]>; -def _FpI32m80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, i32mem:$src2), OneArgFPRW, +def _FpI32m80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, i32mem:$src2), + OneArgFPRW, [(set RFP80:$dst, (OpNode RFP80:$src1, (X86fild addr:$src2, i32)))]>; def _FI16m : FPI<0xDE, fp, (outs), (ins i16mem:$src), - !strconcat("fi", !strconcat(asmstring, "{s}\t$src"))> { let mayLoad = 1; } + !strconcat("fi", !strconcat(asmstring, "{s}\t$src"))> { + let mayLoad = 1; +} def _FI32m : FPI<0xDA, fp, (outs), (ins i32mem:$src), - !strconcat("fi", !strconcat(asmstring, "{l}\t$src"))> { let mayLoad = 1; } + !strconcat("fi", !strconcat(asmstring, "{l}\t$src"))> { + let mayLoad = 1; +} } defm ADD : FPBinary_rr<fadd>; @@ -279,6 +298,9 @@ def DIV_FST0r : FPST0rInst <0xF0, "fdiv\t$op">; def DIVR_FrST0 : FPrST0Inst <0xF0, "fdiv{|r}\t{%st(0), $op|$op, %ST(0)}">; def DIVR_FPrST0 : FPrST0PInst<0xF0, "fdiv{|r}p\t$op">; +def COM_FST0r : FPST0rInst <0xD0, "fcom\t$op">; +def COMP_FST0r : FPST0rInst <0xD8, "fcomp\t$op">; + // Unary operations. multiclass FPUnary<SDNode OpNode, bits<8> opcode, string asmstring> { def _Fp32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src), OneArgFPRW, @@ -305,22 +327,22 @@ def TST_F : FPI<0xE4, RawFrm, (outs), (ins), "ftst">, D9; // Versions of FP instructions that take a single memory operand. Added for the // disassembler; remove as they are included with patterns elsewhere. -def FCOM32m : FPI<0xD8, MRM2m, (outs), (ins f32mem:$src), "fcom\t$src">; -def FCOMP32m : FPI<0xD8, MRM3m, (outs), (ins f32mem:$src), "fcomp\t$src">; +def FCOM32m : FPI<0xD8, MRM2m, (outs), (ins f32mem:$src), "fcom{l}\t$src">; +def FCOMP32m : FPI<0xD8, MRM3m, (outs), (ins f32mem:$src), "fcomp{l}\t$src">; def FLDENVm : FPI<0xD9, MRM4m, (outs), (ins f32mem:$src), "fldenv\t$src">; -def FSTENVm : FPI<0xD9, MRM6m, (outs f32mem:$dst), (ins), "fstenv\t$dst">; +def FSTENVm : FPI<0xD9, MRM6m, (outs f32mem:$dst), (ins), "fnstenv\t$dst">; def FICOM32m : FPI<0xDA, MRM2m, (outs), (ins i32mem:$src), "ficom{l}\t$src">; def FICOMP32m: FPI<0xDA, MRM3m, (outs), (ins i32mem:$src), "ficomp{l}\t$src">; -def FCOM64m : FPI<0xDC, MRM2m, (outs), (ins f64mem:$src), "fcom\t$src">; -def FCOMP64m : FPI<0xDC, MRM3m, (outs), (ins f64mem:$src), "fcomp\t$src">; +def FCOM64m : FPI<0xDC, MRM2m, (outs), (ins f64mem:$src), "fcom{ll}\t$src">; +def FCOMP64m : FPI<0xDC, MRM3m, (outs), (ins f64mem:$src), "fcomp{ll}\t$src">; def FISTTP32m: FPI<0xDD, MRM1m, (outs i32mem:$dst), (ins), "fisttp{l}\t$dst">; def FRSTORm : FPI<0xDD, MRM4m, (outs f32mem:$dst), (ins), "frstor\t$dst">; -def FSAVEm : FPI<0xDD, MRM6m, (outs f32mem:$dst), (ins), "fsave\t$dst">; -def FSTSWm : FPI<0xDD, MRM7m, (outs f32mem:$dst), (ins), "fstsw\t$dst">; +def FSAVEm : FPI<0xDD, MRM6m, (outs f32mem:$dst), (ins), "fnsave\t$dst">; +def FNSTSWm : FPI<0xDD, MRM7m, (outs f32mem:$dst), (ins), "fnstsw\t$dst">; def FICOM16m : FPI<0xDE, MRM2m, (outs), (ins i16mem:$src), "ficom{w}\t$src">; def FICOMP16m: FPI<0xDE, MRM3m, (outs), (ins i16mem:$src), "ficomp{w}\t$src">; @@ -493,7 +515,8 @@ def ISTT_Fp64m80 : FpI_<(outs), (ins i64mem:$op, RFP80:$src), OneArgFP, let mayStore = 1 in { def ISTT_FP16m : FPI<0xDF, MRM1m, (outs), (ins i16mem:$dst), "fisttp{s}\t$dst">; def ISTT_FP32m : FPI<0xDB, MRM1m, (outs), (ins i32mem:$dst), "fisttp{l}\t$dst">; -def ISTT_FP64m : FPI<0xDD, MRM1m, (outs), (ins i64mem:$dst), "fisttp{ll}\t$dst">; +def ISTT_FP64m : FPI<0xDD, MRM1m, (outs), (ins i64mem:$dst), + "fisttp{ll}\t$dst">; } // FP Stack manipulation instructions. @@ -561,10 +584,15 @@ def UCOM_FIPr : FPI<0xE8, AddRegFrm, // CC = cmp ST(0) with ST(i), pop "fucomip\t{$reg, %st(0)|%ST(0), $reg}">, DF; } +def COM_FIr : FPI<0xF0, AddRegFrm, (outs), (ins RST:$reg), + "fcomi\t{$reg, %st(0)|%ST(0), $reg}">, DB; +def COM_FIPr : FPI<0xF0, AddRegFrm, (outs), (ins RST:$reg), + "fcomip\t{$reg, %st(0)|%ST(0), $reg}">, DF; + // Floating point flag ops. let Defs = [AX] in def FNSTSW8r : I<0xE0, RawFrm, // AX = fp flags - (outs), (ins), "fnstsw", []>, DF; + (outs), (ins), "fnstsw %ax", []>, DF; def FNSTCW16m : I<0xD9, MRM7m, // [mem16] = X87 control world (outs), (ins i16mem:$dst), "fnstcw\t$dst", @@ -574,6 +602,44 @@ let mayLoad = 1 in def FLDCW16m : I<0xD9, MRM5m, // X87 control world = [mem16] (outs), (ins i16mem:$dst), "fldcw\t$dst", []>; +// Register free + +def FFREE : FPI<0xC0, AddRegFrm, (outs), (ins RST:$reg), + "ffree\t$reg">, DD; + +// Clear exceptions + +def FNCLEX : I<0xE2, RawFrm, (outs), (ins), "fnclex", []>, DB; + +// Operandless floating-point instructions for the disassembler + +def FNOP : I<0xD0, RawFrm, (outs), (ins), "fnop", []>, D9; +def FXAM : I<0xE5, RawFrm, (outs), (ins), "fxam", []>, D9; +def FLDL2T : I<0xE9, RawFrm, (outs), (ins), "fldl2t", []>, D9; +def FLDL2E : I<0xEA, RawFrm, (outs), (ins), "fldl2e", []>, D9; +def FLDPI : I<0xEB, RawFrm, (outs), (ins), "fldpi", []>, D9; +def FLDLG2 : I<0xEC, RawFrm, (outs), (ins), "fldlg2", []>, D9; +def FLDLN2 : I<0xED, RawFrm, (outs), (ins), "fldln2", []>, D9; +def F2XM1 : I<0xF0, RawFrm, (outs), (ins), "f2xm1", []>, D9; +def FYL2X : I<0xF1, RawFrm, (outs), (ins), "fyl2x", []>, D9; +def FPTAN : I<0xF2, RawFrm, (outs), (ins), "fptan", []>, D9; +def FPATAN : I<0xF3, RawFrm, (outs), (ins), "fpatan", []>, D9; +def FXTRACT : I<0xF4, RawFrm, (outs), (ins), "fxtract", []>, D9; +def FPREM1 : I<0xF5, RawFrm, (outs), (ins), "fprem1", []>, D9; +def FDECSTP : I<0xF6, RawFrm, (outs), (ins), "fdecstp", []>, D9; +def FINCSTP : I<0xF7, RawFrm, (outs), (ins), "fincstp", []>, D9; +def FPREM : I<0xF8, RawFrm, (outs), (ins), "fprem", []>, D9; +def FYL2XP1 : I<0xF9, RawFrm, (outs), (ins), "fyl2xp1", []>, D9; +def FSINCOS : I<0xFB, RawFrm, (outs), (ins), "fsincos", []>, D9; +def FRNDINT : I<0xFC, RawFrm, (outs), (ins), "frndint", []>, D9; +def FSCALE : I<0xFD, RawFrm, (outs), (ins), "fscale", []>, D9; +def FCOMPP : I<0xD9, RawFrm, (outs), (ins), "fcompp", []>, DE; + +def FXSAVE : I<0xAE, MRM0m, (outs opaque512mem:$dst), (ins), + "fxsave\t$dst", []>, TB; +def FXRSTOR : I<0xAE, MRM1m, (outs), (ins opaque512mem:$src), + "fxrstor\t$src", []>, TB; + //===----------------------------------------------------------------------===// // Non-Instruction Patterns //===----------------------------------------------------------------------===// @@ -585,11 +651,15 @@ def : Pat<(X86fld addr:$src, f80), (LD_Fp80m addr:$src)>; // Required for CALL which return f32 / f64 / f80 values. def : Pat<(X86fst RFP32:$src, addr:$op, f32), (ST_Fp32m addr:$op, RFP32:$src)>; -def : Pat<(X86fst RFP64:$src, addr:$op, f32), (ST_Fp64m32 addr:$op, RFP64:$src)>; +def : Pat<(X86fst RFP64:$src, addr:$op, f32), (ST_Fp64m32 addr:$op, + RFP64:$src)>; def : Pat<(X86fst RFP64:$src, addr:$op, f64), (ST_Fp64m addr:$op, RFP64:$src)>; -def : Pat<(X86fst RFP80:$src, addr:$op, f32), (ST_Fp80m32 addr:$op, RFP80:$src)>; -def : Pat<(X86fst RFP80:$src, addr:$op, f64), (ST_Fp80m64 addr:$op, RFP80:$src)>; -def : Pat<(X86fst RFP80:$src, addr:$op, f80), (ST_FpP80m addr:$op, RFP80:$src)>; +def : Pat<(X86fst RFP80:$src, addr:$op, f32), (ST_Fp80m32 addr:$op, + RFP80:$src)>; +def : Pat<(X86fst RFP80:$src, addr:$op, f64), (ST_Fp80m64 addr:$op, + RFP80:$src)>; +def : Pat<(X86fst RFP80:$src, addr:$op, f80), (ST_FpP80m addr:$op, + RFP80:$src)>; // Floating point constant -0.0 and -1.0 def : Pat<(f32 fpimmneg0), (CHS_Fp32 (LD_Fp032))>, Requires<[FPStackf32]>; diff --git a/lib/Target/X86/X86InstrFormats.td b/lib/Target/X86/X86InstrFormats.td index 2f14bb0..a799f16 100644 --- a/lib/Target/X86/X86InstrFormats.td +++ b/lib/Target/X86/X86InstrFormats.td @@ -115,17 +115,20 @@ class I<bits<8> o, Format f, dag outs, dag ins, string asm, list<dag> pattern> let Pattern = pattern; let CodeSize = 3; } -class Ii8 <bits<8> o, Format f, dag outs, dag ins, string asm, list<dag> pattern> +class Ii8 <bits<8> o, Format f, dag outs, dag ins, string asm, + list<dag> pattern> : X86Inst<o, f, Imm8 , outs, ins, asm> { let Pattern = pattern; let CodeSize = 3; } -class Ii16<bits<8> o, Format f, dag outs, dag ins, string asm, list<dag> pattern> +class Ii16<bits<8> o, Format f, dag outs, dag ins, string asm, + list<dag> pattern> : X86Inst<o, f, Imm16, outs, ins, asm> { let Pattern = pattern; let CodeSize = 3; } -class Ii32<bits<8> o, Format f, dag outs, dag ins, string asm, list<dag> pattern> +class Ii32<bits<8> o, Format f, dag outs, dag ins, string asm, + list<dag> pattern> : X86Inst<o, f, Imm32, outs, ins, asm> { let Pattern = pattern; let CodeSize = 3; @@ -169,7 +172,8 @@ class Iseg32 <bits<8> o, Format f, dag outs, dag ins, string asm, class SSI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE1]>; -class SSIi8<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class SSIi8<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE1]>; class PSI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, TB, Requires<[HasSSE1]>; @@ -205,9 +209,11 @@ class PDIi8<bits<8> o, Format F, dag outs, dag ins, string asm, // S3SI - SSE3 instructions with XS prefix. // S3DI - SSE3 instructions with XD prefix. -class S3SI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class S3SI<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE3]>; -class S3DI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class S3DI<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, XD, Requires<[HasSSE3]>; class S3I<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, TB, OpSize, Requires<[HasSSE3]>; @@ -255,7 +261,7 @@ class SS42FI<bits<8> o, Format F, dag outs, dag ins, string asm, // SS42AI = SSE 4.2 instructions with TA prefix class SS42AI<bits<8> o, Format F, dag outs, dag ins, string asm, - list<dag> pattern> + list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, TA, Requires<[HasSSE42]>; // X86-64 Instruction templates... @@ -297,17 +303,24 @@ class RPDI<bits<8> o, Format F, dag outs, dag ins, string asm, // MMXIi8 - MMX instructions with ImmT == Imm8 and TB prefix. // MMXID - MMX instructions with XD prefix. // MMXIS - MMX instructions with XS prefix. -class MMXI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class MMXI<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, TB, Requires<[HasMMX]>; -class MMXI64<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class MMXI64<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, TB, Requires<[HasMMX,In64BitMode]>; -class MMXRI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class MMXRI<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, TB, REX_W, Requires<[HasMMX]>; -class MMX2I<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class MMX2I<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : I<o, F, outs, ins, asm, pattern>, TB, OpSize, Requires<[HasMMX]>; -class MMXIi8<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class MMXIi8<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : Ii8<o, F, outs, ins, asm, pattern>, TB, Requires<[HasMMX]>; -class MMXID<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class MMXID<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : Ii8<o, F, outs, ins, asm, pattern>, XD, Requires<[HasMMX]>; -class MMXIS<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern> +class MMXIS<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> : Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasMMX]>; diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 1947d35..e555cd1 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -1018,13 +1018,11 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, switch (Opc) { default: break; case X86::MOV8r0: - case X86::MOV16r0: case X86::MOV32r0: { if (!isSafeToClobberEFLAGS(MBB, I)) { switch (Opc) { default: break; case X86::MOV8r0: Opc = X86::MOV8ri; break; - case X86::MOV16r0: Opc = X86::MOV16ri; break; case X86::MOV32r0: Opc = X86::MOV32ri; break; } Clone = false; @@ -1880,7 +1878,7 @@ bool X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB, if (SrcReg != X86::EFLAGS) return false; if (DestRC == &X86::GR64RegClass || DestRC == &X86::GR64_NOSPRegClass) { - BuildMI(MBB, MI, DL, get(X86::PUSHFQ)); + BuildMI(MBB, MI, DL, get(X86::PUSHFQ64)); BuildMI(MBB, MI, DL, get(X86::POP64r), DestReg); return true; } else if (DestRC == &X86::GR32RegClass || @@ -2292,9 +2290,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, OpcodeTablePtr = &RegOp2MemOpTable2Addr; isTwoAddrFold = true; } else if (i == 0) { // If operand 0 - if (MI->getOpcode() == X86::MOV16r0) - NewMI = MakeM0Inst(*this, X86::MOV16mi, MOs, MI); - else if (MI->getOpcode() == X86::MOV32r0) + if (MI->getOpcode() == X86::MOV32r0) NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI); else if (MI->getOpcode() == X86::MOV8r0) NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI); @@ -2370,6 +2366,23 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, // Check switch flag if (NoFusing) return NULL; + if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize)) + switch (MI->getOpcode()) { + case X86::CVTSD2SSrr: + case X86::Int_CVTSD2SSrr: + case X86::CVTSS2SDrr: + case X86::Int_CVTSS2SDrr: + case X86::RCPSSr: + case X86::RCPSSr_Int: + case X86::ROUNDSDr_Int: + case X86::ROUNDSSr_Int: + case X86::RSQRTSSr: + case X86::RSQRTSSr_Int: + case X86::SQRTSSr: + case X86::SQRTSSr_Int: + return 0; + } + const MachineFrameInfo *MFI = MF.getFrameInfo(); unsigned Size = MFI->getObjectSize(FrameIndex); unsigned Alignment = MFI->getObjectAlignment(FrameIndex); @@ -2405,6 +2418,23 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, // Check switch flag if (NoFusing) return NULL; + if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize)) + switch (MI->getOpcode()) { + case X86::CVTSD2SSrr: + case X86::Int_CVTSD2SSrr: + case X86::CVTSS2SDrr: + case X86::Int_CVTSS2SDrr: + case X86::RCPSSr: + case X86::RCPSSr_Int: + case X86::ROUNDSDr_Int: + case X86::ROUNDSSr_Int: + case X86::RSQRTSSr: + case X86::RSQRTSSr_Int: + case X86::SQRTSSr: + case X86::SQRTSSr_Int: + return 0; + } + // Determine the alignment of the load. unsigned Alignment = 0; if (LoadMI->hasOneMemOperand()) @@ -2529,7 +2559,6 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI, } else if (OpNum == 0) { // If operand 0 switch (Opc) { case X86::MOV8r0: - case X86::MOV16r0: case X86::MOV32r0: return true; default: break; @@ -2558,7 +2587,6 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, MemOp2RegOpTable.find((unsigned*)MI->getOpcode()); if (I == MemOp2RegOpTable.end()) return false; - DebugLoc dl = MI->getDebugLoc(); unsigned Opc = I->second.first; unsigned Index = I->second.second & 0xf; bool FoldedLoad = I->second.second & (1 << 4); diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 3cc1853..4d922a5 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -1,4 +1,4 @@ -//===- X86InstrInfo.td - Describe the X86 Instruction Set --*- tablegen -*-===// + // // The LLVM Compiler Infrastructure // @@ -41,6 +41,9 @@ def SDTX86BrCond : SDTypeProfile<0, 3, def SDTX86SetCC : SDTypeProfile<1, 2, [SDTCisVT<0, i8>, SDTCisVT<1, i8>, SDTCisVT<2, i32>]>; +def SDTX86SetCC_C : SDTypeProfile<1, 2, + [SDTCisInt<0>, + SDTCisVT<1, i8>, SDTCisVT<2, i32>]>; def SDTX86cas : SDTypeProfile<0, 3, [SDTCisPtrTy<0>, SDTCisInt<1>, SDTCisVT<2, i8>]>; @@ -87,7 +90,7 @@ def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>; def X86brcond : SDNode<"X86ISD::BRCOND", SDTX86BrCond, [SDNPHasChain]>; def X86setcc : SDNode<"X86ISD::SETCC", SDTX86SetCC>; -def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC>; +def X86setcc_c : SDNode<"X86ISD::SETCC_CARRY", SDTX86SetCC_C>; def X86cas : SDNode<"X86ISD::LCMPXCHG_DAG", SDTX86cas, [SDNPHasChain, SDNPInFlag, SDNPOutFlag, SDNPMayStore, @@ -196,6 +199,12 @@ class X86MemOperand<string printMethod> : Operand<iPTR> { def opaque32mem : X86MemOperand<"printopaquemem">; def opaque48mem : X86MemOperand<"printopaquemem">; def opaque80mem : X86MemOperand<"printopaquemem">; +def opaque512mem : X86MemOperand<"printopaquemem">; + +def offset8 : Operand<i64> { let PrintMethod = "print_pcrel_imm"; } +def offset16 : Operand<i64> { let PrintMethod = "print_pcrel_imm"; } +def offset32 : Operand<i64> { let PrintMethod = "print_pcrel_imm"; } +def offset64 : Operand<i64> { let PrintMethod = "print_pcrel_imm"; } def i8mem : X86MemOperand<"printi8mem">; def i16mem : X86MemOperand<"printi16mem">; @@ -289,6 +298,7 @@ def FarData : Predicate<"TM.getCodeModel() != CodeModel::Small &&" def NearData : Predicate<"TM.getCodeModel() == CodeModel::Small ||" "TM.getCodeModel() == CodeModel::Kernel">; def IsStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">; +def OptForSize : Predicate<"OptForSize">; def OptForSpeed : Predicate<"!OptForSize">; def FastBTMem : Predicate<"!Subtarget->isBTMemSlow()">; def CallImmAddr : Predicate<"Subtarget->IsLegalToCallImmediateAddr(TM)">; @@ -351,7 +361,8 @@ def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{ return false; }]>; -def loadi16_anyext : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{ +def loadi16_anyext : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), +[{ LoadSDNode *LD = cast<LoadSDNode>(N); if (const Value *Src = LD->getSrcValue()) if (const PointerType *PT = dyn_cast<PointerType>(Src->getType())) @@ -539,13 +550,17 @@ def VASTART_SAVE_XMM_REGS : I<0, Pseudo, // Nop let neverHasSideEffects = 1 in { def NOOP : I<0x90, RawFrm, (outs), (ins), "nop", []>; + def NOOPW : I<0x1f, MRM0m, (outs), (ins i16mem:$zero), + "nop{w}\t$zero", []>, TB, OpSize; def NOOPL : I<0x1f, MRM0m, (outs), (ins i32mem:$zero), - "nopl\t$zero", []>, TB; + "nop{l}\t$zero", []>, TB; } // Trap def INT3 : I<0xcc, RawFrm, (outs), (ins), "int\t3", []>; def INT : I<0xcd, RawFrm, (outs), (ins i8imm:$trap), "int\t$trap", []>; +def IRET16 : I<0xcf, RawFrm, (outs), (ins), "iret{w}", []>, OpSize; +def IRET32 : I<0xcf, RawFrm, (outs), (ins), "iret{l}", []>; // PIC base construction. This expands to code that looks like this: // call $next_inst @@ -709,12 +724,14 @@ def ENTER : I<0xC8, RawFrm, (outs), (ins i16imm:$len, i8imm:$lvl), // Tail call stuff. let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in -def TCRETURNdi : I<0, Pseudo, (outs), (ins i32imm:$dst, i32imm:$offset, variable_ops), +def TCRETURNdi : I<0, Pseudo, (outs), + (ins i32imm:$dst, i32imm:$offset, variable_ops), "#TC_RETURN $dst $offset", []>; let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in -def TCRETURNri : I<0, Pseudo, (outs), (ins GR32:$dst, i32imm:$offset, variable_ops), +def TCRETURNri : I<0, Pseudo, (outs), + (ins GR32:$dst, i32imm:$offset, variable_ops), "#TC_RETURN $dst $offset", []>; @@ -722,7 +739,8 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in def TAILJMPd : IBr<0xE9, (ins i32imm_pcrel:$dst), "jmp\t$dst # TAILCALL", []>; let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in - def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst # TAILCALL", + def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32:$dst), + "jmp{l}\t{*}$dst # TAILCALL", []>; let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem:$dst), @@ -735,6 +753,15 @@ let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, neverHasSideEffects=1 in def LEAVE : I<0xC9, RawFrm, (outs), (ins), "leave", []>; +def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), + "popcnt{w}\t{$src, $dst|$dst, $src}", []>, OpSize, XS; +def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), + "popcnt{w}\t{$src, $dst|$dst, $src}", []>, OpSize, XS; +def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), + "popcnt{l}\t{$src, $dst|$dst, $src}", []>, XS; +def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), + "popcnt{l}\t{$src, $dst|$dst, $src}", []>, XS; + let Defs = [ESP], Uses = [ESP], neverHasSideEffects=1 in { let mayLoad = 1 in { def POP16r : I<0x58, AddRegFrm, (outs GR16:$reg), (ins), "pop{w}\t$reg", []>, @@ -770,10 +797,14 @@ def PUSH32i32 : Ii32<0x68, RawFrm, (outs), (ins i32imm:$imm), "push{l}\t$imm", []>; } -let Defs = [ESP, EFLAGS], Uses = [ESP], mayLoad = 1, neverHasSideEffects=1 in -def POPFD : I<0x9D, RawFrm, (outs), (ins), "popf", []>; -let Defs = [ESP], Uses = [ESP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in -def PUSHFD : I<0x9C, RawFrm, (outs), (ins), "pushf", []>; +let Defs = [ESP, EFLAGS], Uses = [ESP], mayLoad = 1, neverHasSideEffects=1 in { +def POPF : I<0x9D, RawFrm, (outs), (ins), "popf{w}", []>, OpSize; +def POPFD : I<0x9D, RawFrm, (outs), (ins), "popf{l}", []>; +} +let Defs = [ESP], Uses = [ESP, EFLAGS], mayStore = 1, neverHasSideEffects=1 in { +def PUSHF : I<0x9C, RawFrm, (outs), (ins), "pushf{w}", []>, OpSize; +def PUSHFD : I<0x9C, RawFrm, (outs), (ins), "pushf{l}", []>; +} let isTwoAddress = 1 in // GR32 = bswap GR32 def BSWAP32r : I<0xC8, AddRegFrm, @@ -915,6 +946,13 @@ let Uses = [EAX] in def OUT32ir : Ii8<0xE7, RawFrm, (outs), (ins i16i8imm:$port), "out{l}\t{%eax, $port|$port, %EAX}", []>; +def IN8 : I<0x6C, RawFrm, (outs), (ins), + "ins{b}", []>; +def IN16 : I<0x6D, RawFrm, (outs), (ins), + "ins{w}", []>, OpSize; +def IN32 : I<0x6D, RawFrm, (outs), (ins), + "ins{l}", []>; + //===----------------------------------------------------------------------===// // Move Instructions... // @@ -947,18 +985,18 @@ def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src), "mov{l}\t{$src, $dst|$dst, $src}", [(store (i32 imm:$src), addr:$dst)]>; -def MOV8o8a : Ii8 <0xA0, RawFrm, (outs), (ins i8imm:$src), +def MOV8o8a : Ii8 <0xA0, RawFrm, (outs), (ins offset8:$src), "mov{b}\t{$src, %al|%al, $src}", []>; -def MOV16o16a : Ii16 <0xA1, RawFrm, (outs), (ins i16imm:$src), +def MOV16o16a : Ii16 <0xA1, RawFrm, (outs), (ins offset16:$src), "mov{w}\t{$src, %ax|%ax, $src}", []>, OpSize; -def MOV32o32a : Ii32 <0xA1, RawFrm, (outs), (ins i32imm:$src), +def MOV32o32a : Ii32 <0xA1, RawFrm, (outs), (ins offset32:$src), "mov{l}\t{$src, %eax|%eax, $src}", []>; -def MOV8ao8 : Ii8 <0xA2, RawFrm, (outs i8imm:$dst), (ins), +def MOV8ao8 : Ii8 <0xA2, RawFrm, (outs offset8:$dst), (ins), "mov{b}\t{%al, $dst|$dst, %al}", []>; -def MOV16ao16 : Ii16 <0xA3, RawFrm, (outs i16imm:$dst), (ins), +def MOV16ao16 : Ii16 <0xA3, RawFrm, (outs offset16:$dst), (ins), "mov{w}\t{%ax, $dst|$dst, %ax}", []>, OpSize; -def MOV32ao32 : Ii32 <0xA3, RawFrm, (outs i32imm:$dst), (ins), +def MOV32ao32 : Ii32 <0xA3, RawFrm, (outs offset32:$dst), (ins), "mov{l}\t{%eax, $dst|$dst, %eax}", []>; // Moves to and from segment registers @@ -971,6 +1009,13 @@ def MOV16sr : I<0x8E, MRMSrcReg, (outs SEGMENT_REG:$dst), (ins GR16:$src), def MOV16sm : I<0x8E, MRMSrcMem, (outs SEGMENT_REG:$dst), (ins i16mem:$src), "mov{w}\t{$src, $dst|$dst, $src}", []>; +def MOV8rr_REV : I<0x8A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src), + "mov{b}\t{$src, $dst|$dst, $src}", []>; +def MOV16rr_REV : I<0x8B, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), + "mov{w}\t{$src, $dst|$dst, $src}", []>, OpSize; +def MOV32rr_REV : I<0x8B, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), + "mov{l}\t{$src, $dst|$dst, $src}", []>; + let canFoldAsLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in { def MOV8rm : I<0x8A, MRMSrcMem, (outs GR8 :$dst), (ins i8mem :$src), "mov{b}\t{$src, $dst|$dst, $src}", @@ -1010,6 +1055,18 @@ def MOV8rm_NOREX : I<0x8A, MRMSrcMem, (outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src), "mov{b}\t{$src, $dst|$dst, $src} # NOREX", []>; +// Moves to and from debug registers +def MOV32rd : I<0x21, MRMDestReg, (outs GR32:$dst), (ins DEBUG_REG:$src), + "mov{l}\t{$src, $dst|$dst, $src}", []>, TB; +def MOV32dr : I<0x23, MRMSrcReg, (outs DEBUG_REG:$dst), (ins GR32:$src), + "mov{l}\t{$src, $dst|$dst, $src}", []>, TB; + +// Moves to and from control registers +def MOV32rc : I<0x20, MRMDestReg, (outs GR32:$dst), (ins CONTROL_REG_32:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>, TB; +def MOV32cr : I<0x22, MRMSrcReg, (outs CONTROL_REG_32:$dst), (ins GR32:$src), + "mov{q}\t{$src, $dst|$dst, $src}", []>, TB; + //===----------------------------------------------------------------------===// // Fixed-Register Multiplication and Division Instructions... // @@ -1071,7 +1128,7 @@ def IMUL8m : I<0xF6, MRM5m, (outs), (ins i8mem :$src), let Defs = [AX,DX,EFLAGS], Uses = [AX] in def IMUL16m : I<0xF7, MRM5m, (outs), (ins i16mem:$src), "imul{w}\t$src", []>, OpSize; // AX,DX = AX*[mem16] -let Defs = [EAX,EDX], Uses = [EAX] in +let Defs = [EAX,EDX,EFLAGS], Uses = [EAX] in def IMUL32m : I<0xF7, MRM5m, (outs), (ins i32mem:$src), "imul{l}\t$src", []>; // EAX,EDX = EAX*[mem32] } @@ -1079,45 +1136,47 @@ def IMUL32m : I<0xF7, MRM5m, (outs), (ins i32mem:$src), // unsigned division/remainder let Defs = [AL,AH,EFLAGS], Uses = [AX] in -def DIV8r : I<0xF6, MRM6r, (outs), (ins GR8:$src), // AX/r8 = AL,AH +def DIV8r : I<0xF6, MRM6r, (outs), (ins GR8:$src), // AX/r8 = AL,AH "div{b}\t$src", []>; let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in -def DIV16r : I<0xF7, MRM6r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX +def DIV16r : I<0xF7, MRM6r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX "div{w}\t$src", []>, OpSize; let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in -def DIV32r : I<0xF7, MRM6r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX +def DIV32r : I<0xF7, MRM6r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX "div{l}\t$src", []>; let mayLoad = 1 in { let Defs = [AL,AH,EFLAGS], Uses = [AX] in -def DIV8m : I<0xF6, MRM6m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH +def DIV8m : I<0xF6, MRM6m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH "div{b}\t$src", []>; let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in -def DIV16m : I<0xF7, MRM6m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX +def DIV16m : I<0xF7, MRM6m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX "div{w}\t$src", []>, OpSize; let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in -def DIV32m : I<0xF7, MRM6m, (outs), (ins i32mem:$src), // EDX:EAX/[mem32] = EAX,EDX + // EDX:EAX/[mem32] = EAX,EDX +def DIV32m : I<0xF7, MRM6m, (outs), (ins i32mem:$src), "div{l}\t$src", []>; } // Signed division/remainder. let Defs = [AL,AH,EFLAGS], Uses = [AX] in -def IDIV8r : I<0xF6, MRM7r, (outs), (ins GR8:$src), // AX/r8 = AL,AH +def IDIV8r : I<0xF6, MRM7r, (outs), (ins GR8:$src), // AX/r8 = AL,AH "idiv{b}\t$src", []>; let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in -def IDIV16r: I<0xF7, MRM7r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX +def IDIV16r: I<0xF7, MRM7r, (outs), (ins GR16:$src), // DX:AX/r16 = AX,DX "idiv{w}\t$src", []>, OpSize; let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in -def IDIV32r: I<0xF7, MRM7r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX +def IDIV32r: I<0xF7, MRM7r, (outs), (ins GR32:$src), // EDX:EAX/r32 = EAX,EDX "idiv{l}\t$src", []>; let mayLoad = 1, mayLoad = 1 in { let Defs = [AL,AH,EFLAGS], Uses = [AX] in -def IDIV8m : I<0xF6, MRM7m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH +def IDIV8m : I<0xF6, MRM7m, (outs), (ins i8mem:$src), // AX/[mem8] = AL,AH "idiv{b}\t$src", []>; let Defs = [AX,DX,EFLAGS], Uses = [AX,DX] in -def IDIV16m: I<0xF7, MRM7m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX +def IDIV16m: I<0xF7, MRM7m, (outs), (ins i16mem:$src), // DX:AX/[mem16] = AX,DX "idiv{w}\t$src", []>, OpSize; let Defs = [EAX,EDX,EFLAGS], Uses = [EAX,EDX] in -def IDIV32m: I<0xF7, MRM7m, (outs), (ins i32mem:$src), // EDX:EAX/[mem32] = EAX,EDX +def IDIV32m: I<0xF7, MRM7m, (outs), (ins i32mem:$src), + // EDX:EAX/[mem32] = EAX,EDX "idiv{l}\t$src", []>; } @@ -1145,193 +1204,193 @@ def CMOV_GR8 : I<0, Pseudo, let isCommutable = 1 in { def CMOVB16rr : I<0x42, MRMSrcReg, // if <u, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovb\t{$src2, $dst|$dst, $src2}", + "cmovb{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_B, EFLAGS))]>, TB, OpSize; def CMOVB32rr : I<0x42, MRMSrcReg, // if <u, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovb\t{$src2, $dst|$dst, $src2}", + "cmovb{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_B, EFLAGS))]>, TB; def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", + "cmovae{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_AE, EFLAGS))]>, TB, OpSize; def CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", + "cmovae{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_AE, EFLAGS))]>, TB; def CMOVE16rr : I<0x44, MRMSrcReg, // if ==, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", + "cmove{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_E, EFLAGS))]>, TB, OpSize; def CMOVE32rr : I<0x44, MRMSrcReg, // if ==, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", + "cmove{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_E, EFLAGS))]>, TB; def CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", + "cmovne{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_NE, EFLAGS))]>, TB, OpSize; def CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", + "cmovne{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_NE, EFLAGS))]>, TB; def CMOVBE16rr: I<0x46, MRMSrcReg, // if <=u, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", + "cmovbe{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_BE, EFLAGS))]>, TB, OpSize; def CMOVBE32rr: I<0x46, MRMSrcReg, // if <=u, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", + "cmovbe{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_BE, EFLAGS))]>, TB; def CMOVA16rr : I<0x47, MRMSrcReg, // if >u, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", + "cmova{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_A, EFLAGS))]>, TB, OpSize; def CMOVA32rr : I<0x47, MRMSrcReg, // if >u, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", + "cmova{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_A, EFLAGS))]>, TB; def CMOVL16rr : I<0x4C, MRMSrcReg, // if <s, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovl\t{$src2, $dst|$dst, $src2}", + "cmovl{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_L, EFLAGS))]>, TB, OpSize; def CMOVL32rr : I<0x4C, MRMSrcReg, // if <s, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovl\t{$src2, $dst|$dst, $src2}", + "cmovl{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_L, EFLAGS))]>, TB; def CMOVGE16rr: I<0x4D, MRMSrcReg, // if >=s, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", + "cmovge{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_GE, EFLAGS))]>, TB, OpSize; def CMOVGE32rr: I<0x4D, MRMSrcReg, // if >=s, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", + "cmovge{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_GE, EFLAGS))]>, TB; def CMOVLE16rr: I<0x4E, MRMSrcReg, // if <=s, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", + "cmovle{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_LE, EFLAGS))]>, TB, OpSize; def CMOVLE32rr: I<0x4E, MRMSrcReg, // if <=s, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", + "cmovle{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_LE, EFLAGS))]>, TB; def CMOVG16rr : I<0x4F, MRMSrcReg, // if >s, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", + "cmovg{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_G, EFLAGS))]>, TB, OpSize; def CMOVG32rr : I<0x4F, MRMSrcReg, // if >s, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", + "cmovg{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_G, EFLAGS))]>, TB; def CMOVS16rr : I<0x48, MRMSrcReg, // if signed, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", + "cmovs{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_S, EFLAGS))]>, TB, OpSize; def CMOVS32rr : I<0x48, MRMSrcReg, // if signed, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", + "cmovs{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_S, EFLAGS))]>, TB; def CMOVNS16rr: I<0x49, MRMSrcReg, // if !signed, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", + "cmovns{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_NS, EFLAGS))]>, TB, OpSize; def CMOVNS32rr: I<0x49, MRMSrcReg, // if !signed, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", + "cmovns{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_NS, EFLAGS))]>, TB; def CMOVP16rr : I<0x4A, MRMSrcReg, // if parity, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", + "cmovp{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_P, EFLAGS))]>, TB, OpSize; def CMOVP32rr : I<0x4A, MRMSrcReg, // if parity, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", + "cmovp{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_P, EFLAGS))]>, TB; def CMOVNP16rr : I<0x4B, MRMSrcReg, // if !parity, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", + "cmovnp{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_NP, EFLAGS))]>, TB, OpSize; def CMOVNP32rr : I<0x4B, MRMSrcReg, // if !parity, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", + "cmovnp{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_NP, EFLAGS))]>, TB; def CMOVO16rr : I<0x40, MRMSrcReg, // if overflow, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovo\t{$src2, $dst|$dst, $src2}", + "cmovo{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_O, EFLAGS))]>, TB, OpSize; def CMOVO32rr : I<0x40, MRMSrcReg, // if overflow, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovo\t{$src2, $dst|$dst, $src2}", + "cmovo{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_O, EFLAGS))]>, TB; def CMOVNO16rr : I<0x41, MRMSrcReg, // if !overflow, GR16 = GR16 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), - "cmovno\t{$src2, $dst|$dst, $src2}", + "cmovno{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, X86_COND_NO, EFLAGS))]>, TB, OpSize; def CMOVNO32rr : I<0x41, MRMSrcReg, // if !overflow, GR32 = GR32 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), - "cmovno\t{$src2, $dst|$dst, $src2}", + "cmovno{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2, X86_COND_NO, EFLAGS))]>, TB; @@ -1339,193 +1398,193 @@ def CMOVNO32rr : I<0x41, MRMSrcReg, // if !overflow, GR32 = GR32 def CMOVB16rm : I<0x42, MRMSrcMem, // if <u, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovb\t{$src2, $dst|$dst, $src2}", + "cmovb{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_B, EFLAGS))]>, TB, OpSize; def CMOVB32rm : I<0x42, MRMSrcMem, // if <u, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovb\t{$src2, $dst|$dst, $src2}", + "cmovb{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_B, EFLAGS))]>, TB; def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", + "cmovae{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_AE, EFLAGS))]>, TB, OpSize; def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovae\t{$src2, $dst|$dst, $src2}", + "cmovae{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_AE, EFLAGS))]>, TB; def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", + "cmove{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_E, EFLAGS))]>, TB, OpSize; def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmove\t{$src2, $dst|$dst, $src2}", + "cmove{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_E, EFLAGS))]>, TB; def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", + "cmovne{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_NE, EFLAGS))]>, TB, OpSize; def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovne\t{$src2, $dst|$dst, $src2}", + "cmovne{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_NE, EFLAGS))]>, TB; def CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", + "cmovbe{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_BE, EFLAGS))]>, TB, OpSize; def CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovbe\t{$src2, $dst|$dst, $src2}", + "cmovbe{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_BE, EFLAGS))]>, TB; def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", + "cmova{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_A, EFLAGS))]>, TB, OpSize; def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmova\t{$src2, $dst|$dst, $src2}", + "cmova{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_A, EFLAGS))]>, TB; def CMOVL16rm : I<0x4C, MRMSrcMem, // if <s, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovl\t{$src2, $dst|$dst, $src2}", + "cmovl{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_L, EFLAGS))]>, TB, OpSize; def CMOVL32rm : I<0x4C, MRMSrcMem, // if <s, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovl\t{$src2, $dst|$dst, $src2}", + "cmovl{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_L, EFLAGS))]>, TB; def CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", + "cmovge{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_GE, EFLAGS))]>, TB, OpSize; def CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovge\t{$src2, $dst|$dst, $src2}", + "cmovge{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_GE, EFLAGS))]>, TB; def CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", + "cmovle{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_LE, EFLAGS))]>, TB, OpSize; def CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovle\t{$src2, $dst|$dst, $src2}", + "cmovle{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_LE, EFLAGS))]>, TB; def CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", + "cmovg{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_G, EFLAGS))]>, TB, OpSize; def CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovg\t{$src2, $dst|$dst, $src2}", + "cmovg{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_G, EFLAGS))]>, TB; def CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", + "cmovs{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_S, EFLAGS))]>, TB, OpSize; def CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovs\t{$src2, $dst|$dst, $src2}", + "cmovs{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_S, EFLAGS))]>, TB; def CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", + "cmovns{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_NS, EFLAGS))]>, TB, OpSize; def CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovns\t{$src2, $dst|$dst, $src2}", + "cmovns{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_NS, EFLAGS))]>, TB; def CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", + "cmovp{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_P, EFLAGS))]>, TB, OpSize; def CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovp\t{$src2, $dst|$dst, $src2}", + "cmovp{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_P, EFLAGS))]>, TB; def CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", + "cmovnp{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_NP, EFLAGS))]>, TB, OpSize; def CMOVNP32rm : I<0x4B, MRMSrcMem, // if !parity, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovnp\t{$src2, $dst|$dst, $src2}", + "cmovnp{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_NP, EFLAGS))]>, TB; def CMOVO16rm : I<0x40, MRMSrcMem, // if overflow, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovo\t{$src2, $dst|$dst, $src2}", + "cmovo{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_O, EFLAGS))]>, TB, OpSize; def CMOVO32rm : I<0x40, MRMSrcMem, // if overflow, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovo\t{$src2, $dst|$dst, $src2}", + "cmovo{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_O, EFLAGS))]>, TB; def CMOVNO16rm : I<0x41, MRMSrcMem, // if !overflow, GR16 = [mem16] (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), - "cmovno\t{$src2, $dst|$dst, $src2}", + "cmovno{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2), X86_COND_NO, EFLAGS))]>, TB, OpSize; def CMOVNO32rm : I<0x41, MRMSrcMem, // if !overflow, GR32 = [mem32] (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), - "cmovno\t{$src2, $dst|$dst, $src2}", + "cmovno{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2), X86_COND_NO, EFLAGS))]>, TB; @@ -1583,11 +1642,13 @@ def INC8r : I<0xFE, MRM0r, (outs GR8 :$dst), (ins GR8 :$src), "inc{b}\t$dst", [(set GR8:$dst, (add GR8:$src, 1)), (implicit EFLAGS)]>; let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA. -def INC16r : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src), "inc{w}\t$dst", +def INC16r : I<0x40, AddRegFrm, (outs GR16:$dst), (ins GR16:$src), + "inc{w}\t$dst", [(set GR16:$dst, (add GR16:$src, 1)), (implicit EFLAGS)]>, OpSize, Requires<[In32BitMode]>; -def INC32r : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), "inc{l}\t$dst", +def INC32r : I<0x40, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), + "inc{l}\t$dst", [(set GR32:$dst, (add GR32:$src, 1)), (implicit EFLAGS)]>, Requires<[In32BitMode]>; } @@ -1610,11 +1671,13 @@ def DEC8r : I<0xFE, MRM1r, (outs GR8 :$dst), (ins GR8 :$src), "dec{b}\t$dst", [(set GR8:$dst, (add GR8:$src, -1)), (implicit EFLAGS)]>; let isConvertibleToThreeAddress = 1, CodeSize = 1 in { // Can xform into LEA. -def DEC16r : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src), "dec{w}\t$dst", +def DEC16r : I<0x48, AddRegFrm, (outs GR16:$dst), (ins GR16:$src), + "dec{w}\t$dst", [(set GR16:$dst, (add GR16:$src, -1)), (implicit EFLAGS)]>, OpSize, Requires<[In32BitMode]>; -def DEC32r : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), "dec{l}\t$dst", +def DEC32r : I<0x48, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), + "dec{l}\t$dst", [(set GR32:$dst, (add GR32:$src, -1)), (implicit EFLAGS)]>, Requires<[In32BitMode]>; } @@ -1654,6 +1717,17 @@ def AND32rr : I<0x21, MRMDestReg, (implicit EFLAGS)]>; } +// AND instructions with the destination register in REG and the source register +// in R/M. Included for the disassembler. +def AND8rr_REV : I<0x22, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2), + "and{b}\t{$src2, $dst|$dst, $src2}", []>; +def AND16rr_REV : I<0x23, MRMSrcReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), + "and{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize; +def AND32rr_REV : I<0x23, MRMSrcReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), + "and{l}\t{$src2, $dst|$dst, $src2}", []>; + def AND8rm : I<0x22, MRMSrcMem, (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2), "and{b}\t{$src2, $dst|$dst, $src2}", @@ -1753,50 +1827,73 @@ let isTwoAddress = 0 in { let isCommutable = 1 in { // X = OR Y, Z --> X = OR Z, Y -def OR8rr : I<0x08, MRMDestReg, (outs GR8 :$dst), (ins GR8 :$src1, GR8 :$src2), +def OR8rr : I<0x08, MRMDestReg, (outs GR8 :$dst), + (ins GR8 :$src1, GR8 :$src2), "or{b}\t{$src2, $dst|$dst, $src2}", [(set GR8:$dst, (or GR8:$src1, GR8:$src2)), (implicit EFLAGS)]>; -def OR16rr : I<0x09, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), +def OR16rr : I<0x09, MRMDestReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), "or{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (or GR16:$src1, GR16:$src2)), (implicit EFLAGS)]>, OpSize; -def OR32rr : I<0x09, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), +def OR32rr : I<0x09, MRMDestReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), "or{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (or GR32:$src1, GR32:$src2)), (implicit EFLAGS)]>; } -def OR8rm : I<0x0A, MRMSrcMem , (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2), + +// OR instructions with the destination register in REG and the source register +// in R/M. Included for the disassembler. +def OR8rr_REV : I<0x0A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2), + "or{b}\t{$src2, $dst|$dst, $src2}", []>; +def OR16rr_REV : I<0x0B, MRMSrcReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), + "or{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize; +def OR32rr_REV : I<0x0B, MRMSrcReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), + "or{l}\t{$src2, $dst|$dst, $src2}", []>; + +def OR8rm : I<0x0A, MRMSrcMem , (outs GR8 :$dst), + (ins GR8 :$src1, i8mem :$src2), "or{b}\t{$src2, $dst|$dst, $src2}", [(set GR8:$dst, (or GR8:$src1, (load addr:$src2))), (implicit EFLAGS)]>; -def OR16rm : I<0x0B, MRMSrcMem , (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2), +def OR16rm : I<0x0B, MRMSrcMem , (outs GR16:$dst), + (ins GR16:$src1, i16mem:$src2), "or{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (or GR16:$src1, (load addr:$src2))), (implicit EFLAGS)]>, OpSize; -def OR32rm : I<0x0B, MRMSrcMem , (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), +def OR32rm : I<0x0B, MRMSrcMem , (outs GR32:$dst), + (ins GR32:$src1, i32mem:$src2), "or{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (or GR32:$src1, (load addr:$src2))), (implicit EFLAGS)]>; -def OR8ri : Ii8 <0x80, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2), +def OR8ri : Ii8 <0x80, MRM1r, (outs GR8 :$dst), + (ins GR8 :$src1, i8imm:$src2), "or{b}\t{$src2, $dst|$dst, $src2}", [(set GR8:$dst, (or GR8:$src1, imm:$src2)), (implicit EFLAGS)]>; -def OR16ri : Ii16<0x81, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i16imm:$src2), +def OR16ri : Ii16<0x81, MRM1r, (outs GR16:$dst), + (ins GR16:$src1, i16imm:$src2), "or{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (or GR16:$src1, imm:$src2)), (implicit EFLAGS)]>, OpSize; -def OR32ri : Ii32<0x81, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i32imm:$src2), +def OR32ri : Ii32<0x81, MRM1r, (outs GR32:$dst), + (ins GR32:$src1, i32imm:$src2), "or{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (or GR32:$src1, imm:$src2)), (implicit EFLAGS)]>; -def OR16ri8 : Ii8<0x83, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i16i8imm:$src2), +def OR16ri8 : Ii8<0x83, MRM1r, (outs GR16:$dst), + (ins GR16:$src1, i16i8imm:$src2), "or{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (or GR16:$src1, i16immSExt8:$src2)), (implicit EFLAGS)]>, OpSize; -def OR32ri8 : Ii8<0x83, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i32i8imm:$src2), +def OR32ri8 : Ii8<0x83, MRM1r, (outs GR32:$dst), + (ins GR32:$src1, i32i8imm:$src2), "or{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (or GR32:$src1, i32immSExt8:$src2)), (implicit EFLAGS)]>; @@ -1863,6 +1960,17 @@ let isCommutable = 1 in { // X = XOR Y, Z --> X = XOR Z, Y (implicit EFLAGS)]>; } // isCommutable = 1 +// XOR instructions with the destination register in REG and the source register +// in R/M. Included for the disassembler. +def XOR8rr_REV : I<0x32, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2), + "xor{b}\t{$src2, $dst|$dst, $src2}", []>; +def XOR16rr_REV : I<0x33, MRMSrcReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), + "xor{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize; +def XOR32rr_REV : I<0x33, MRMSrcReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), + "xor{l}\t{$src2, $dst|$dst, $src2}", []>; + def XOR8rm : I<0x32, MRMSrcMem , (outs GR8 :$dst), (ins GR8:$src1, i8mem :$src2), "xor{b}\t{$src2, $dst|$dst, $src2}", @@ -2202,7 +2310,8 @@ def RCL16mCL : I<0xD3, MRM2m, (outs i16mem:$dst), (ins i16mem:$src), } def RCL16ri : Ii8<0xC1, MRM2r, (outs GR16:$dst), (ins GR16:$src, i8imm:$cnt), "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize; -def RCL16mi : Ii8<0xC1, MRM2m, (outs i16mem:$dst), (ins i16mem:$src, i8imm:$cnt), +def RCL16mi : Ii8<0xC1, MRM2m, (outs i16mem:$dst), + (ins i16mem:$src, i8imm:$cnt), "rcl{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize; def RCL32r1 : I<0xD1, MRM2r, (outs GR32:$dst), (ins GR32:$src), @@ -2217,7 +2326,8 @@ def RCL32mCL : I<0xD3, MRM2m, (outs i32mem:$dst), (ins i32mem:$src), } def RCL32ri : Ii8<0xC1, MRM2r, (outs GR32:$dst), (ins GR32:$src, i8imm:$cnt), "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>; -def RCL32mi : Ii8<0xC1, MRM2m, (outs i32mem:$dst), (ins i32mem:$src, i8imm:$cnt), +def RCL32mi : Ii8<0xC1, MRM2m, (outs i32mem:$dst), + (ins i32mem:$src, i8imm:$cnt), "rcl{l}\t{$cnt, $dst|$dst, $cnt}", []>; def RCR8r1 : I<0xD0, MRM3r, (outs GR8:$dst), (ins GR8:$src), @@ -2247,7 +2357,8 @@ def RCR16mCL : I<0xD3, MRM3m, (outs i16mem:$dst), (ins i16mem:$src), } def RCR16ri : Ii8<0xC1, MRM3r, (outs GR16:$dst), (ins GR16:$src, i8imm:$cnt), "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize; -def RCR16mi : Ii8<0xC1, MRM3m, (outs i16mem:$dst), (ins i16mem:$src, i8imm:$cnt), +def RCR16mi : Ii8<0xC1, MRM3m, (outs i16mem:$dst), + (ins i16mem:$src, i8imm:$cnt), "rcr{w}\t{$cnt, $dst|$dst, $cnt}", []>, OpSize; def RCR32r1 : I<0xD1, MRM3r, (outs GR32:$dst), (ins GR32:$src), @@ -2262,7 +2373,8 @@ def RCR32mCL : I<0xD3, MRM3m, (outs i32mem:$dst), (ins i32mem:$src), } def RCR32ri : Ii8<0xC1, MRM3r, (outs GR32:$dst), (ins GR32:$src, i8imm:$cnt), "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>; -def RCR32mi : Ii8<0xC1, MRM3m, (outs i32mem:$dst), (ins i32mem:$src, i8imm:$cnt), +def RCR32mi : Ii8<0xC1, MRM3m, (outs i32mem:$dst), + (ins i32mem:$src, i8imm:$cnt), "rcr{l}\t{$cnt, $dst|$dst, $cnt}", []>; // FIXME: provide shorter instructions when imm8 == 1 @@ -2283,7 +2395,8 @@ def ROL8ri : Ii8<0xC0, MRM0r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2), [(set GR8:$dst, (rotl GR8:$src1, (i8 imm:$src2)))]>; def ROL16ri : Ii8<0xC1, MRM0r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2), "rol{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (rotl GR16:$src1, (i8 imm:$src2)))]>, OpSize; + [(set GR16:$dst, (rotl GR16:$src1, (i8 imm:$src2)))]>, + OpSize; def ROL32ri : Ii8<0xC1, MRM0r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2), "rol{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (rotl GR32:$src1, (i8 imm:$src2)))]>; @@ -2352,7 +2465,8 @@ def ROR8ri : Ii8<0xC0, MRM1r, (outs GR8 :$dst), (ins GR8 :$src1, i8imm:$src2), [(set GR8:$dst, (rotr GR8:$src1, (i8 imm:$src2)))]>; def ROR16ri : Ii8<0xC1, MRM1r, (outs GR16:$dst), (ins GR16:$src1, i8imm:$src2), "ror{w}\t{$src2, $dst|$dst, $src2}", - [(set GR16:$dst, (rotr GR16:$src1, (i8 imm:$src2)))]>, OpSize; + [(set GR16:$dst, (rotr GR16:$src1, (i8 imm:$src2)))]>, + OpSize; def ROR32ri : Ii8<0xC1, MRM1r, (outs GR32:$dst), (ins GR32:$src1, i8imm:$src2), "ror{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (rotr GR32:$src1, (i8 imm:$src2)))]>; @@ -2408,17 +2522,21 @@ let isTwoAddress = 0 in { // Double shift instructions (generalizations of rotate) let Uses = [CL] in { -def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), +def SHLD32rrCL : I<0xA5, MRMDestReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), "shld{l}\t{%cl, $src2, $dst|$dst, $src2, CL}", [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, CL))]>, TB; -def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2), +def SHRD32rrCL : I<0xAD, MRMDestReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), "shrd{l}\t{%cl, $src2, $dst|$dst, $src2, CL}", [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, CL))]>, TB; -def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), +def SHLD16rrCL : I<0xA5, MRMDestReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), "shld{w}\t{%cl, $src2, $dst|$dst, $src2, CL}", [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, CL))]>, TB, OpSize; -def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2), +def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), "shrd{w}\t{%cl, $src2, $dst|$dst, $src2, CL}", [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, CL))]>, TB, OpSize; @@ -2426,25 +2544,29 @@ def SHRD16rrCL : I<0xAD, MRMDestReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$sr let isCommutable = 1 in { // These instructions commute to each other. def SHLD32rri8 : Ii8<0xA4, MRMDestReg, - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$src3), + (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2, i8imm:$src3), "shld{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set GR32:$dst, (X86shld GR32:$src1, GR32:$src2, (i8 imm:$src3)))]>, TB; def SHRD32rri8 : Ii8<0xAC, MRMDestReg, - (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$src3), + (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2, i8imm:$src3), "shrd{l}\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set GR32:$dst, (X86shrd GR32:$src1, GR32:$src2, (i8 imm:$src3)))]>, TB; def SHLD16rri8 : Ii8<0xA4, MRMDestReg, - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$src3), + (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2, i8imm:$src3), "shld{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set GR16:$dst, (X86shld GR16:$src1, GR16:$src2, (i8 imm:$src3)))]>, TB, OpSize; def SHRD16rri8 : Ii8<0xAC, MRMDestReg, - (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$src3), + (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2, i8imm:$src3), "shrd{w}\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set GR16:$dst, (X86shrd GR16:$src1, GR16:$src2, (i8 imm:$src3)))]>, @@ -2642,6 +2764,16 @@ def ADC32rr : I<0x11, MRMDestReg, (outs GR32:$dst), "adc{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (adde GR32:$src1, GR32:$src2))]>; } + +def ADC8rr_REV : I<0x12, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2), + "adc{b}\t{$src2, $dst|$dst, $src2}", []>; +def ADC16rr_REV : I<0x13, MRMSrcReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), + "adc{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize; +def ADC32rr_REV : I<0x13, MRMSrcReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), + "adc{l}\t{$src2, $dst|$dst, $src2}", []>; + def ADC8rm : I<0x12, MRMSrcMem , (outs GR8:$dst), (ins GR8:$src1, i8mem:$src2), "adc{b}\t{$src2, $dst|$dst, $src2}", @@ -2728,6 +2860,15 @@ def SUB32rr : I<0x29, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1,GR32:$src2), [(set GR32:$dst, (sub GR32:$src1, GR32:$src2)), (implicit EFLAGS)]>; +def SUB8rr_REV : I<0x2A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2), + "sub{b}\t{$src2, $dst|$dst, $src2}", []>; +def SUB16rr_REV : I<0x2B, MRMSrcReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), + "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize; +def SUB32rr_REV : I<0x2B, MRMSrcReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), + "sub{l}\t{$src2, $dst|$dst, $src2}", []>; + // Register-Memory Subtraction def SUB8rm : I<0x2A, MRMSrcMem, (outs GR8 :$dst), (ins GR8 :$src1, i8mem :$src2), @@ -2869,6 +3010,16 @@ let isTwoAddress = 0 in { def SBB32i32 : Ii32<0x1D, RawFrm, (outs), (ins i32imm:$src), "sbb{l}\t{$src, %eax|%eax, $src}", []>; } + +def SBB8rr_REV : I<0x1A, MRMSrcReg, (outs GR8:$dst), (ins GR8:$src1, GR8:$src2), + "sbb{b}\t{$src2, $dst|$dst, $src2}", []>; +def SBB16rr_REV : I<0x1B, MRMSrcReg, (outs GR16:$dst), + (ins GR16:$src1, GR16:$src2), + "sbb{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize; +def SBB32rr_REV : I<0x1B, MRMSrcReg, (outs GR32:$dst), + (ins GR32:$src1, GR32:$src2), + "sbb{l}\t{$src2, $dst|$dst, $src2}", []>; + def SBB8rm : I<0x1A, MRMSrcMem, (outs GR8:$dst), (ins GR8:$src1, i8mem:$src2), "sbb{b}\t{$src2, $dst|$dst, $src2}", [(set GR8:$dst, (sube GR8:$src1, (load addr:$src2)))]>; @@ -2923,7 +3074,8 @@ def IMUL16rm : I<0xAF, MRMSrcMem, (outs GR16:$dst), "imul{w}\t{$src2, $dst|$dst, $src2}", [(set GR16:$dst, (mul GR16:$src1, (load addr:$src2))), (implicit EFLAGS)]>, TB, OpSize; -def IMUL32rm : I<0xAF, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2), +def IMUL32rm : I<0xAF, MRMSrcMem, (outs GR32:$dst), + (ins GR32:$src1, i32mem:$src2), "imul{l}\t{$src2, $dst|$dst, $src2}", [(set GR32:$dst, (mul GR32:$src1, (load addr:$src2))), (implicit EFLAGS)]>, TB; @@ -2955,12 +3107,12 @@ def IMUL32rri8 : Ii8<0x6B, MRMSrcReg, // GR32 = GR32*I8 (implicit EFLAGS)]>; // Memory-Integer Signed Integer Multiply -def IMUL16rmi : Ii16<0x69, MRMSrcMem, // GR16 = [mem16]*I16 +def IMUL16rmi : Ii16<0x69, MRMSrcMem, // GR16 = [mem16]*I16 (outs GR16:$dst), (ins i16mem:$src1, i16imm:$src2), "imul{w}\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set GR16:$dst, (mul (load addr:$src1), imm:$src2)), (implicit EFLAGS)]>, OpSize; -def IMUL32rmi : Ii32<0x69, MRMSrcMem, // GR32 = [mem32]*I32 +def IMUL32rmi : Ii32<0x69, MRMSrcMem, // GR32 = [mem32]*I32 (outs GR32:$dst), (ins i32mem:$src1, i32imm:$src2), "imul{l}\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set GR32:$dst, (mul (load addr:$src1), imm:$src2)), @@ -3068,11 +3220,11 @@ def SETB_C8r : I<0x18, MRMInitReg, (outs GR8:$dst), (ins), [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; def SETB_C16r : I<0x19, MRMInitReg, (outs GR16:$dst), (ins), "sbb{w}\t$dst, $dst", - [(set GR16:$dst, (zext (X86setcc_c X86_COND_B, EFLAGS)))]>, + [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>, OpSize; def SETB_C32r : I<0x19, MRMInitReg, (outs GR32:$dst), (ins), "sbb{l}\t$dst, $dst", - [(set GR32:$dst, (zext (X86setcc_c X86_COND_B, EFLAGS)))]>; + [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; } // isCodeGenOnly def SETEr : I<0x94, MRM0r, @@ -3371,15 +3523,21 @@ def BT32rr : I<0xA3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), // Unlike with the register+register form, the memory+register form of the // bt instruction does not ignore the high bits of the index. From ISel's -// perspective, this is pretty bizarre. Disable these instructions for now. -//def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), -// "bt{w}\t{$src2, $src1|$src1, $src2}", +// perspective, this is pretty bizarre. Make these instructions disassembly +// only for now. + +def BT16mr : I<0xA3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), + "bt{w}\t{$src2, $src1|$src1, $src2}", // [(X86bt (loadi16 addr:$src1), GR16:$src2), -// (implicit EFLAGS)]>, OpSize, TB, Requires<[FastBTMem]>; -//def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2), -// "bt{l}\t{$src2, $src1|$src1, $src2}", +// (implicit EFLAGS)] + [] + >, OpSize, TB, Requires<[FastBTMem]>; +def BT32mr : I<0xA3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2), + "bt{l}\t{$src2, $src1|$src1, $src2}", // [(X86bt (loadi32 addr:$src1), GR32:$src2), -// (implicit EFLAGS)]>, TB, Requires<[FastBTMem]>; +// (implicit EFLAGS)] + [] + >, TB, Requires<[FastBTMem]>; def BT16ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR16:$src1, i16i8imm:$src2), "bt{w}\t{$src2, $src1|$src1, $src2}", @@ -3400,12 +3558,67 @@ def BT32mi8 : Ii8<0xBA, MRM4m, (outs), (ins i32mem:$src1, i32i8imm:$src2), "bt{l}\t{$src2, $src1|$src1, $src2}", [(X86bt (loadi32 addr:$src1), i32immSExt8:$src2), (implicit EFLAGS)]>, TB; + +def BTC16rr : I<0xBB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), + "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTC32rr : I<0xBB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), + "btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTC16mr : I<0xBB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), + "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTC32mr : I<0xBB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2), + "btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTC16ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR16:$src1, i16i8imm:$src2), + "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTC32ri8 : Ii8<0xBA, MRM7r, (outs), (ins GR32:$src1, i32i8imm:$src2), + "btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTC16mi8 : Ii8<0xBA, MRM7m, (outs), (ins i16mem:$src1, i16i8imm:$src2), + "btc{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTC32mi8 : Ii8<0xBA, MRM7m, (outs), (ins i32mem:$src1, i32i8imm:$src2), + "btc{l}\t{$src2, $src1|$src1, $src2}", []>, TB; + +def BTR16rr : I<0xB3, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), + "btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTR32rr : I<0xB3, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), + "btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTR16mr : I<0xB3, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), + "btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTR32mr : I<0xB3, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2), + "btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTR16ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR16:$src1, i16i8imm:$src2), + "btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTR32ri8 : Ii8<0xBA, MRM6r, (outs), (ins GR32:$src1, i32i8imm:$src2), + "btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTR16mi8 : Ii8<0xBA, MRM6m, (outs), (ins i16mem:$src1, i16i8imm:$src2), + "btr{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTR32mi8 : Ii8<0xBA, MRM6m, (outs), (ins i32mem:$src1, i32i8imm:$src2), + "btr{l}\t{$src2, $src1|$src1, $src2}", []>, TB; + +def BTS16rr : I<0xAB, MRMDestReg, (outs), (ins GR16:$src1, GR16:$src2), + "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTS32rr : I<0xAB, MRMDestReg, (outs), (ins GR32:$src1, GR32:$src2), + "bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTS16mr : I<0xAB, MRMDestMem, (outs), (ins i16mem:$src1, GR16:$src2), + "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTS32mr : I<0xAB, MRMDestMem, (outs), (ins i32mem:$src1, GR32:$src2), + "bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTS16ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR16:$src1, i16i8imm:$src2), + "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTS32ri8 : Ii8<0xBA, MRM5r, (outs), (ins GR32:$src1, i32i8imm:$src2), + "bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB; +def BTS16mi8 : Ii8<0xBA, MRM5m, (outs), (ins i16mem:$src1, i16i8imm:$src2), + "bts{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize, TB; +def BTS32mi8 : Ii8<0xBA, MRM5m, (outs), (ins i32mem:$src1, i32i8imm:$src2), + "bts{l}\t{$src2, $src1|$src1, $src2}", []>, TB; } // Defs = [EFLAGS] // Sign/Zero extenders // Use movsbl intead of movsbw; we don't care about the high 16 bits // of the register here. This has a smaller encoding and avoids a -// partial-register update. +// partial-register update. Actual movsbw included for the disassembler. +def MOVSX16rr8W : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src), + "movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def MOVSX16rm8W : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src), + "movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src), "", [(set GR16:$dst, (sext GR8:$src))]>, TB; def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src), @@ -3425,7 +3638,11 @@ def MOVSX32rm16: I<0xBF, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src), // Use movzbl intead of movzbw; we don't care about the high 16 bits // of the register here. This has a smaller encoding and avoids a -// partial-register update. +// partial-register update. Actual movzbw included for the disassembler. +def MOVZX16rr8W : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src), + "movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def MOVZX16rm8W : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src), + "movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src), "", [(set GR16:$dst, (zext GR8:$src))]>, TB; def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src), @@ -3483,15 +3700,18 @@ let Defs = [EFLAGS], isReMaterializable = 1, isAsCheapAsAMove = 1, def MOV8r0 : I<0x30, MRMInitReg, (outs GR8 :$dst), (ins), "xor{b}\t$dst, $dst", [(set GR8:$dst, 0)]>; -// Use xorl instead of xorw since we don't care about the high 16 bits, -// it's smaller, and it avoids a partial-register update. -def MOV16r0 : I<0x31, MRMInitReg, (outs GR16:$dst), (ins), - "", [(set GR16:$dst, 0)]>; -def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), + +def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "xor{l}\t$dst, $dst", [(set GR32:$dst, 0)]>; } +// Use xorl instead of xorw since we don't care about the high 16 bits, +// it's smaller, and it avoids a partial-register update. +let AddedComplexity = 1 in +def : Pat<(i16 0), + (EXTRACT_SUBREG (MOV32r0), x86_subreg_16bit)>; + //===----------------------------------------------------------------------===// // Thread Local Storage Instructions // @@ -3538,18 +3758,32 @@ def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr), // Atomic swap. These are just normal xchg instructions. But since a memory // operand is referenced, the atomicity is ensured. let Constraints = "$val = $dst" in { -def XCHG32rm : I<0x87, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val), +def XCHG32rm : I<0x87, MRMSrcMem, (outs GR32:$dst), + (ins GR32:$val, i32mem:$ptr), "xchg{l}\t{$val, $ptr|$ptr, $val}", [(set GR32:$dst, (atomic_swap_32 addr:$ptr, GR32:$val))]>; -def XCHG16rm : I<0x87, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val), +def XCHG16rm : I<0x87, MRMSrcMem, (outs GR16:$dst), + (ins GR16:$val, i16mem:$ptr), "xchg{w}\t{$val, $ptr|$ptr, $val}", [(set GR16:$dst, (atomic_swap_16 addr:$ptr, GR16:$val))]>, OpSize; -def XCHG8rm : I<0x86, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val), +def XCHG8rm : I<0x86, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr), "xchg{b}\t{$val, $ptr|$ptr, $val}", [(set GR8:$dst, (atomic_swap_8 addr:$ptr, GR8:$val))]>; + +def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst), (ins GR32:$val, GR32:$src), + "xchg{l}\t{$val, $src|$src, $val}", []>; +def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst), (ins GR16:$val, GR16:$src), + "xchg{w}\t{$val, $src|$src, $val}", []>, OpSize; +def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst), (ins GR8:$val, GR8:$src), + "xchg{b}\t{$val, $src|$src, $val}", []>; } +def XCHG16ar : I<0x90, AddRegFrm, (outs), (ins GR16:$src), + "xchg{w}\t{$src, %ax|%ax, $src}", []>, OpSize; +def XCHG32ar : I<0x90, AddRegFrm, (outs), (ins GR32:$src), + "xchg{l}\t{$src, %eax|%eax, $src}", []>; + // Atomic compare and swap. let Defs = [EAX, EFLAGS], Uses = [EAX] in { def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap), @@ -3579,23 +3813,54 @@ def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap), // Atomic exchange and add let Constraints = "$val = $dst", Defs = [EFLAGS] in { -def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val), +def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins GR32:$val, i32mem:$ptr), "lock\n\t" "xadd{l}\t{$val, $ptr|$ptr, $val}", [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))]>, TB, LOCK; -def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val), +def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins GR16:$val, i16mem:$ptr), "lock\n\t" "xadd{w}\t{$val, $ptr|$ptr, $val}", [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))]>, TB, OpSize, LOCK; -def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins i8mem:$ptr, GR8:$val), +def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr), "lock\n\t" "xadd{b}\t{$val, $ptr|$ptr, $val}", [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))]>, TB, LOCK; } +def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src), + "xadd{b}\t{$src, $dst|$dst, $src}", []>, TB; +def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src), + "xadd{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src), + "xadd{l}\t{$src, $dst|$dst, $src}", []>, TB; + +def XADD8rm : I<0xC0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src), + "xadd{b}\t{$src, $dst|$dst, $src}", []>, TB; +def XADD16rm : I<0xC1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src), + "xadd{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def XADD32rm : I<0xC1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src), + "xadd{l}\t{$src, $dst|$dst, $src}", []>, TB; + +def CMPXCHG8rr : I<0xB0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src), + "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB; +def CMPXCHG16rr : I<0xB1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src), + "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def CMPXCHG32rr : I<0xB1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src), + "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB; + +def CMPXCHG8rm : I<0xB0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src), + "cmpxchg{b}\t{$src, $dst|$dst, $src}", []>, TB; +def CMPXCHG16rm : I<0xB1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src), + "cmpxchg{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def CMPXCHG32rm : I<0xB1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src), + "cmpxchg{l}\t{$src, $dst|$dst, $src}", []>, TB; + +def CMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$dst), + "cmpxchg8b\t$dst", []>, TB; + // Optimized codegen when the non-memory output is not used. // FIXME: Use normal add / sub instructions and add lock prefix dynamically. let Defs = [EFLAGS] in { @@ -3652,7 +3917,7 @@ def LOCK_SUB16mi : Ii16<0x81, MRM5m, (outs), (ins i16mem:$dst, i16imm:$src2), def LOCK_SUB32mi : Ii32<0x81, MRM5m, (outs), (ins i32mem:$dst, i32imm:$src2), "lock\n\t" "sub{l}\t{$src2, $dst|$dst, $src2}", []>, LOCK; -def LOCK_SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2), +def LOCK_SUB16mi8 : Ii8<0x83, MRM5m, (outs), (ins i16mem:$dst, i16i8imm :$src2), "lock\n\t" "sub{w}\t{$src2, $dst|$dst, $src2}", []>, OpSize, LOCK; def LOCK_SUB32mi8 : Ii8<0x83, MRM5m, (outs), (ins i32mem:$dst, i32i8imm :$src2), @@ -3777,12 +4042,193 @@ def LAR32rm : I<0x02, MRMSrcMem, (outs GR32:$dst), (ins i16mem:$src), "lar{l}\t{$src, $dst|$dst, $src}", []>, TB; def LAR32rr : I<0x02, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), "lar{l}\t{$src, $dst|$dst, $src}", []>, TB; + +def LSL16rm : I<0x03, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src), + "lsl{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def LSL16rr : I<0x03, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), + "lsl{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def LSL32rm : I<0x03, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), + "lsl{l}\t{$src, $dst|$dst, $src}", []>, TB; +def LSL32rr : I<0x03, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), + "lsl{l}\t{$src, $dst|$dst, $src}", []>, TB; + +def INVLPG : I<0x01, RawFrm, (outs), (ins), "invlpg", []>, TB; + +def STRr : I<0x00, MRM1r, (outs GR16:$dst), (ins), + "str{w}\t{$dst}", []>, TB; +def STRm : I<0x00, MRM1m, (outs i16mem:$dst), (ins), + "str{w}\t{$dst}", []>, TB; +def LTRr : I<0x00, MRM3r, (outs), (ins GR16:$src), + "ltr{w}\t{$src}", []>, TB; +def LTRm : I<0x00, MRM3m, (outs), (ins i16mem:$src), + "ltr{w}\t{$src}", []>, TB; + +def PUSHFS16 : I<0xa0, RawFrm, (outs), (ins), + "push{w}\t%fs", []>, OpSize, TB; +def PUSHFS32 : I<0xa0, RawFrm, (outs), (ins), + "push{l}\t%fs", []>, TB; +def PUSHGS16 : I<0xa8, RawFrm, (outs), (ins), + "push{w}\t%gs", []>, OpSize, TB; +def PUSHGS32 : I<0xa8, RawFrm, (outs), (ins), + "push{l}\t%gs", []>, TB; + +def POPFS16 : I<0xa1, RawFrm, (outs), (ins), + "pop{w}\t%fs", []>, OpSize, TB; +def POPFS32 : I<0xa1, RawFrm, (outs), (ins), + "pop{l}\t%fs", []>, TB; +def POPGS16 : I<0xa9, RawFrm, (outs), (ins), + "pop{w}\t%gs", []>, OpSize, TB; +def POPGS32 : I<0xa9, RawFrm, (outs), (ins), + "pop{l}\t%gs", []>, TB; + +def LDS16rm : I<0xc5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src), + "lds{w}\t{$src, $dst|$dst, $src}", []>, OpSize; +def LDS32rm : I<0xc5, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src), + "lds{l}\t{$src, $dst|$dst, $src}", []>; +def LSS16rm : I<0xb2, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src), + "lss{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def LSS32rm : I<0xb2, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src), + "lss{l}\t{$src, $dst|$dst, $src}", []>, TB; +def LES16rm : I<0xc4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src), + "les{w}\t{$src, $dst|$dst, $src}", []>, OpSize; +def LES32rm : I<0xc4, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src), + "les{l}\t{$src, $dst|$dst, $src}", []>; +def LFS16rm : I<0xb4, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src), + "lfs{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def LFS32rm : I<0xb4, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src), + "lfs{l}\t{$src, $dst|$dst, $src}", []>, TB; +def LGS16rm : I<0xb5, MRMSrcMem, (outs GR16:$dst), (ins opaque32mem:$src), + "lgs{w}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; +def LGS32rm : I<0xb5, MRMSrcMem, (outs GR32:$dst), (ins opaque48mem:$src), + "lgs{l}\t{$src, $dst|$dst, $src}", []>, TB; + +def VERRr : I<0x00, MRM4r, (outs), (ins GR16:$seg), + "verr\t$seg", []>, TB; +def VERRm : I<0x00, MRM4m, (outs), (ins i16mem:$seg), + "verr\t$seg", []>, TB; +def VERWr : I<0x00, MRM5r, (outs), (ins GR16:$seg), + "verw\t$seg", []>, TB; +def VERWm : I<0x00, MRM5m, (outs), (ins i16mem:$seg), + "verw\t$seg", []>, TB; + +// Descriptor-table support instructions + +def SGDTm : I<0x01, MRM0m, (outs opaque48mem:$dst), (ins), + "sgdt\t$dst", []>, TB; +def SIDTm : I<0x01, MRM1m, (outs opaque48mem:$dst), (ins), + "sidt\t$dst", []>, TB; +def SLDT16r : I<0x00, MRM0r, (outs GR16:$dst), (ins), + "sldt{w}\t$dst", []>, TB; +def SLDT16m : I<0x00, MRM0m, (outs i16mem:$dst), (ins), + "sldt{w}\t$dst", []>, TB; +def LGDTm : I<0x01, MRM2m, (outs), (ins opaque48mem:$src), + "lgdt\t$src", []>, TB; +def LIDTm : I<0x01, MRM3m, (outs), (ins opaque48mem:$src), + "lidt\t$src", []>, TB; +def LLDT16r : I<0x00, MRM2r, (outs), (ins GR16:$src), + "lldt{w}\t$src", []>, TB; +def LLDT16m : I<0x00, MRM2m, (outs), (ins i16mem:$src), + "lldt{w}\t$src", []>, TB; // String manipulation instructions def LODSB : I<0xAC, RawFrm, (outs), (ins), "lodsb", []>; def LODSW : I<0xAD, RawFrm, (outs), (ins), "lodsw", []>, OpSize; -def LODSD : I<0xAD, RawFrm, (outs), (ins), "lodsd", []>; +def LODSD : I<0xAD, RawFrm, (outs), (ins), "lods{l|d}", []>; + +def OUTSB : I<0x6E, RawFrm, (outs), (ins), "outsb", []>; +def OUTSW : I<0x6F, RawFrm, (outs), (ins), "outsw", []>, OpSize; +def OUTSD : I<0x6F, RawFrm, (outs), (ins), "outs{l|d}", []>; + +// CPU flow control instructions + +def HLT : I<0xF4, RawFrm, (outs), (ins), "hlt", []>; +def RSM : I<0xAA, RawFrm, (outs), (ins), "rsm", []>, TB; + +// FPU control instructions + +def FNINIT : I<0xE3, RawFrm, (outs), (ins), "fninit", []>, DB; + +// Flag instructions + +def CLC : I<0xF8, RawFrm, (outs), (ins), "clc", []>; +def STC : I<0xF9, RawFrm, (outs), (ins), "stc", []>; +def CLI : I<0xFA, RawFrm, (outs), (ins), "cli", []>; +def STI : I<0xFB, RawFrm, (outs), (ins), "sti", []>; +def CLD : I<0xFC, RawFrm, (outs), (ins), "cld", []>; +def STD : I<0xFD, RawFrm, (outs), (ins), "std", []>; +def CMC : I<0xF5, RawFrm, (outs), (ins), "cmc", []>; + +def CLTS : I<0x06, RawFrm, (outs), (ins), "clts", []>, TB; + +// Table lookup instructions + +def XLAT : I<0xD7, RawFrm, (outs), (ins), "xlatb", []>; + +// Specialized register support + +def WRMSR : I<0x30, RawFrm, (outs), (ins), "wrmsr", []>, TB; +def RDMSR : I<0x32, RawFrm, (outs), (ins), "rdmsr", []>, TB; +def RDPMC : I<0x33, RawFrm, (outs), (ins), "rdpmc", []>, TB; + +def SMSW16r : I<0x01, MRM4r, (outs GR16:$dst), (ins), + "smsw{w}\t$dst", []>, OpSize, TB; +def SMSW32r : I<0x01, MRM4r, (outs GR32:$dst), (ins), + "smsw{l}\t$dst", []>, TB; +// For memory operands, there is only a 16-bit form +def SMSW16m : I<0x01, MRM4m, (outs i16mem:$dst), (ins), + "smsw{w}\t$dst", []>, TB; + +def LMSW16r : I<0x01, MRM6r, (outs), (ins GR16:$src), + "lmsw{w}\t$src", []>, TB; +def LMSW16m : I<0x01, MRM6m, (outs), (ins i16mem:$src), + "lmsw{w}\t$src", []>, TB; + +def CPUID : I<0xA2, RawFrm, (outs), (ins), "cpuid", []>, TB; + +// Cache instructions + +def INVD : I<0x08, RawFrm, (outs), (ins), "invd", []>, TB; +def WBINVD : I<0x09, RawFrm, (outs), (ins), "wbinvd", []>, TB; + +// VMX instructions + +// 66 0F 38 80 +def INVEPT : I<0x38, RawFrm, (outs), (ins), "invept", []>, OpSize, TB; +// 66 0F 38 81 +def INVVPID : I<0x38, RawFrm, (outs), (ins), "invvpid", []>, OpSize, TB; +// 0F 01 C1 +def VMCALL : I<0x01, RawFrm, (outs), (ins), "vmcall", []>, TB; +def VMCLEARm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs), + "vmclear\t$vmcs", []>, OpSize, TB; +// 0F 01 C2 +def VMLAUNCH : I<0x01, RawFrm, (outs), (ins), "vmlaunch", []>, TB; +// 0F 01 C3 +def VMRESUME : I<0x01, RawFrm, (outs), (ins), "vmresume", []>, TB; +def VMPTRLDm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs), + "vmptrld\t$vmcs", []>, TB; +def VMPTRSTm : I<0xC7, MRM7m, (outs i64mem:$vmcs), (ins), + "vmptrst\t$vmcs", []>, TB; +def VMREAD64rm : I<0x78, MRMDestMem, (outs i64mem:$dst), (ins GR64:$src), + "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB; +def VMREAD64rr : I<0x78, MRMDestReg, (outs GR64:$dst), (ins GR64:$src), + "vmread{q}\t{$src, $dst|$dst, $src}", []>, TB; +def VMREAD32rm : I<0x78, MRMDestMem, (outs i32mem:$dst), (ins GR32:$src), + "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB; +def VMREAD32rr : I<0x78, MRMDestReg, (outs GR32:$dst), (ins GR32:$src), + "vmread{l}\t{$src, $dst|$dst, $src}", []>, TB; +def VMWRITE64rm : I<0x79, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), + "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB; +def VMWRITE64rr : I<0x79, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src), + "vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB; +def VMWRITE32rm : I<0x79, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src), + "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB; +def VMWRITE32rr : I<0x79, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src), + "vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB; +// 0F 01 C4 +def VMXOFF : I<0x01, RawFrm, (outs), (ins), "vmxoff", []>, OpSize; +def VMXON : I<0xC7, MRM6m, (outs), (ins i64mem:$vmxon), + "vmxon\t{$vmxon}", []>, XD; //===----------------------------------------------------------------------===// // Non-Instruction Patterns @@ -4028,15 +4474,18 @@ def : Pat<(srl_su GR16:$src, (i8 8)), x86_subreg_16bit)>, Requires<[In32BitMode]>; def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))), - (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), + (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, + GR16_ABCD)), x86_subreg_8bit_hi))>, Requires<[In32BitMode]>; def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))), - (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)), + (MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, + GR16_ABCD)), x86_subreg_8bit_hi))>, Requires<[In32BitMode]>; def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)), - (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)), + (MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, + GR32_ABCD)), x86_subreg_8bit_hi))>, Requires<[In32BitMode]>; @@ -4185,10 +4634,10 @@ def : Pat<(store (shld (loadi16 addr:$dst), (i8 imm:$amt1), GR16:$src2, (i8 imm:$amt2)), addr:$dst), (SHLD16mri8 addr:$dst, GR16:$src2, (i8 imm:$amt1))>; -// (anyext (setcc_carry)) -> (zext (setcc_carry)) -def : Pat<(i16 (anyext (X86setcc_c X86_COND_B, EFLAGS))), +// (anyext (setcc_carry)) -> (setcc_carry) +def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), (SETB_C16r)>; -def : Pat<(i32 (anyext (X86setcc_c X86_COND_B, EFLAGS))), +def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), (SETB_C32r)>; //===----------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td index 500785b..fc40c9a 100644 --- a/lib/Target/X86/X86InstrMMX.td +++ b/lib/Target/X86/X86InstrMMX.td @@ -72,13 +72,13 @@ let Constraints = "$src1 = $dst" in { multiclass MMXI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, ValueType OpVT, bit Commutable = 0> { def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst), - (ins VR64:$src1, VR64:$src2), + (ins VR64:$src1, VR64:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR64:$dst, (OpVT (OpNode VR64:$src1, VR64:$src2)))]> { let isCommutable = Commutable; } def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst), - (ins VR64:$src1, i64mem:$src2), + (ins VR64:$src1, i64mem:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR64:$dst, (OpVT (OpNode VR64:$src1, (bitconvert @@ -88,13 +88,13 @@ let Constraints = "$src1 = $dst" in { multiclass MMXI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId, bit Commutable = 0> { def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst), - (ins VR64:$src1, VR64:$src2), + (ins VR64:$src1, VR64:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2))]> { let isCommutable = Commutable; } def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst), - (ins VR64:$src1, i64mem:$src2), + (ins VR64:$src1, i64mem:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR64:$dst, (IntId VR64:$src1, (bitconvert (load_mmx addr:$src2))))]>; @@ -144,9 +144,9 @@ let Constraints = "$src1 = $dst" in { //===----------------------------------------------------------------------===// def MMX_EMMS : MMXI<0x77, RawFrm, (outs), (ins), "emms", - [(int_x86_mmx_emms)]>; + [(int_x86_mmx_emms)]>; def MMX_FEMMS : MMXI<0x0E, RawFrm, (outs), (ins), "femms", - [(int_x86_mmx_femms)]>; + [(int_x86_mmx_femms)]>; //===----------------------------------------------------------------------===// // MMX Scalar Instructions @@ -155,16 +155,21 @@ def MMX_FEMMS : MMXI<0x0E, RawFrm, (outs), (ins), "femms", // Data Transfer Instructions def MMX_MOVD64rr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src), "movd\t{$src, $dst|$dst, $src}", - [(set VR64:$dst, - (v2i32 (scalar_to_vector GR32:$src)))]>; + [(set VR64:$dst, + (v2i32 (scalar_to_vector GR32:$src)))]>; let canFoldAsLoad = 1, isReMaterializable = 1 in def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR64:$dst, - (v2i32 (scalar_to_vector (loadi32 addr:$src))))]>; + (v2i32 (scalar_to_vector (loadi32 addr:$src))))]>; let mayStore = 1 in def MMX_MOVD64mr : MMXI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR64:$src), "movd\t{$src, $dst|$dst, $src}", []>; +def MMX_MOVD64grr : MMXI<0x7E, MRMDestReg, (outs), (ins GR32:$dst, VR64:$src), + "movd\t{$src, $dst|$dst, $src}", []>; +def MMX_MOVQ64gmr : MMXRI<0x7E, MRMDestMem, (outs), + (ins i64mem:$dst, VR64:$src), + "movq\t{$src, $dst|$dst, $src}", []>; let neverHasSideEffects = 1 in def MMX_MOVD64to64rr : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src), @@ -181,7 +186,7 @@ def MMX_MOVD64from64rr : MMXRI<0x7E, MRMDestReg, def MMX_MOVD64rrv164 : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR64:$dst, - (v1i64 (scalar_to_vector GR64:$src)))]>; + (v1i64 (scalar_to_vector GR64:$src)))]>; let neverHasSideEffects = 1 in def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src), @@ -223,7 +228,7 @@ def MMX_MOVZDI2PDIrr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src), (v2i32 (X86vzmovl (v2i32 (scalar_to_vector GR32:$src)))))]>; let AddedComplexity = 20 in def MMX_MOVZDI2PDIrm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), - (ins i32mem:$src), + (ins i32mem:$src), "movd\t{$src, $dst|$dst, $src}", [(set VR64:$dst, (v2i32 (X86vzmovl (v2i32 @@ -432,21 +437,21 @@ def MMX_CVTPD2PIrr : MMX2I<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src), "cvtpd2pi\t{$src, $dst|$dst, $src}", []>; let mayLoad = 1 in def MMX_CVTPD2PIrm : MMX2I<0x2D, MRMSrcMem, (outs VR64:$dst), - (ins f128mem:$src), + (ins f128mem:$src), "cvtpd2pi\t{$src, $dst|$dst, $src}", []>; def MMX_CVTPI2PDrr : MMX2I<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src), "cvtpi2pd\t{$src, $dst|$dst, $src}", []>; let mayLoad = 1 in def MMX_CVTPI2PDrm : MMX2I<0x2A, MRMSrcMem, (outs VR128:$dst), - (ins i64mem:$src), + (ins i64mem:$src), "cvtpi2pd\t{$src, $dst|$dst, $src}", []>; def MMX_CVTPI2PSrr : MMXI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src), "cvtpi2ps\t{$src, $dst|$dst, $src}", []>; let mayLoad = 1 in def MMX_CVTPI2PSrm : MMXI<0x2A, MRMSrcMem, (outs VR128:$dst), - (ins i64mem:$src), + (ins i64mem:$src), "cvtpi2ps\t{$src, $dst|$dst, $src}", []>; def MMX_CVTPS2PIrr : MMXI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src), @@ -459,7 +464,7 @@ def MMX_CVTTPD2PIrr : MMX2I<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src), "cvttpd2pi\t{$src, $dst|$dst, $src}", []>; let mayLoad = 1 in def MMX_CVTTPD2PIrm : MMX2I<0x2C, MRMSrcMem, (outs VR64:$dst), - (ins f128mem:$src), + (ins f128mem:$src), "cvttpd2pi\t{$src, $dst|$dst, $src}", []>; def MMX_CVTTPS2PIrr : MMXI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src), @@ -481,14 +486,14 @@ def MMX_PEXTRWri : MMXIi8<0xC5, MRMSrcReg, (iPTR imm:$src2)))]>; let Constraints = "$src1 = $dst" in { def MMX_PINSRWrri : MMXIi8<0xC4, MRMSrcReg, - (outs VR64:$dst), (ins VR64:$src1, GR32:$src2, - i16i8imm:$src3), + (outs VR64:$dst), + (ins VR64:$src1, GR32:$src2,i16i8imm:$src3), "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set VR64:$dst, (v4i16 (MMX_X86pinsrw (v4i16 VR64:$src1), GR32:$src2,(iPTR imm:$src3))))]>; def MMX_PINSRWrmi : MMXIi8<0xC4, MRMSrcMem, - (outs VR64:$dst), (ins VR64:$src1, i16mem:$src2, - i16i8imm:$src3), + (outs VR64:$dst), + (ins VR64:$src1, i16mem:$src2, i16i8imm:$src3), "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}", [(set VR64:$dst, (v4i16 (MMX_X86pinsrw (v4i16 VR64:$src1), diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 62841f8..b26e508 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -70,7 +70,7 @@ def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>; def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>; def SDTX86CmpPTest : SDTypeProfile<0, 2, [SDTCisVT<0, v4f32>, - SDTCisVT<1, v4f32>]>; + SDTCisVT<1, v4f32>]>; def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>; //===----------------------------------------------------------------------===// @@ -116,12 +116,18 @@ def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ return cast<LoadSDNode>(N)->getAlignment() >= 16; }]>; -def alignedloadfsf32 : PatFrag<(ops node:$ptr), (f32 (alignedload node:$ptr))>; -def alignedloadfsf64 : PatFrag<(ops node:$ptr), (f64 (alignedload node:$ptr))>; -def alignedloadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (alignedload node:$ptr))>; -def alignedloadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (alignedload node:$ptr))>; -def alignedloadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (alignedload node:$ptr))>; -def alignedloadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (alignedload node:$ptr))>; +def alignedloadfsf32 : PatFrag<(ops node:$ptr), + (f32 (alignedload node:$ptr))>; +def alignedloadfsf64 : PatFrag<(ops node:$ptr), + (f64 (alignedload node:$ptr))>; +def alignedloadv4f32 : PatFrag<(ops node:$ptr), + (v4f32 (alignedload node:$ptr))>; +def alignedloadv2f64 : PatFrag<(ops node:$ptr), + (v2f64 (alignedload node:$ptr))>; +def alignedloadv4i32 : PatFrag<(ops node:$ptr), + (v4i32 (alignedload node:$ptr))>; +def alignedloadv2i64 : PatFrag<(ops node:$ptr), + (v2i64 (alignedload node:$ptr))>; // Like 'load', but uses special alignment checks suitable for use in // memory operands in most SSE instructions, which are required to @@ -363,6 +369,11 @@ def CVTSI2SSrm : SSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src), [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>; // Match intrinsics which expect XMM operand(s). +def CVTSS2SIrr: SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src), + "cvtss2si{l}\t{$src, $dst|$dst, $src}", []>; +def CVTSS2SIrm: SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src), + "cvtss2si{l}\t{$src, $dst|$dst, $src}", []>; + def Int_CVTSS2SIrr : SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), "cvtss2si\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>; @@ -441,19 +452,26 @@ def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2), "ucomiss\t{$src2, $src1|$src1, $src2}", [(X86cmp FR32:$src1, (loadf32 addr:$src2)), (implicit EFLAGS)]>; + +def COMISSrr: PSI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), + "comiss\t{$src2, $src1|$src1, $src2}", []>; +def COMISSrm: PSI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2), + "comiss\t{$src2, $src1|$src1, $src2}", []>; + } // Defs = [EFLAGS] // Aliases to match intrinsics which expect XMM operand(s). let Constraints = "$src1 = $dst" in { def Int_CMPSSrr : SSIi8<0xC2, MRMSrcReg, - (outs VR128:$dst), (ins VR128:$src1, VR128:$src, - SSECC:$cc), + (outs VR128:$dst), + (ins VR128:$src1, VR128:$src, SSECC:$cc), "cmp${cc}ss\t{$src, $dst|$dst, $src}", - [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1, - VR128:$src, imm:$cc))]>; + [(set VR128:$dst, (int_x86_sse_cmp_ss + VR128:$src1, + VR128:$src, imm:$cc))]>; def Int_CMPSSrm : SSIi8<0xC2, MRMSrcMem, - (outs VR128:$dst), (ins VR128:$src1, f32mem:$src, - SSECC:$cc), + (outs VR128:$dst), + (ins VR128:$src1, f32mem:$src, SSECC:$cc), "cmp${cc}ss\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1, (load addr:$src), imm:$cc))]>; @@ -806,9 +824,10 @@ multiclass sse1_fp_unop_rm<bits<8> opc, string OpcodeStr, } // Scalar operation, mem. - def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src), + def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src), !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"), - [(set FR32:$dst, (OpNode (load addr:$src)))]>; + [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS, + Requires<[HasSSE1, OptForSize]>; // Vector operation, reg. def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), @@ -1098,9 +1117,10 @@ def CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f64mem:$src), def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src), "cvtsd2ss\t{$src, $dst|$dst, $src}", [(set FR32:$dst, (fround FR64:$src))]>; -def CVTSD2SSrm : SDI<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src), +def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src), "cvtsd2ss\t{$src, $dst|$dst, $src}", - [(set FR32:$dst, (fround (loadf64 addr:$src)))]>; + [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD, + Requires<[HasSSE2, OptForSize]>; def CVTSI2SDrr : SDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR32:$src), "cvtsi2sd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (sint_to_fp GR32:$src))]>; @@ -1137,7 +1157,10 @@ def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src), def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src), "cvtss2sd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (extloadf32 addr:$src))]>, XS, - Requires<[HasSSE2]>; + Requires<[HasSSE2, OptForSize]>; + +def : Pat<(extloadf32 addr:$src), + (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[HasSSE2, OptForSpeed]>; // Match intrinsics which expect XMM operand(s). def Int_CVTSD2SIrr : SDI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), @@ -1205,14 +1228,14 @@ def UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2), // Aliases to match intrinsics which expect XMM operand(s). let Constraints = "$src1 = $dst" in { def Int_CMPSDrr : SDIi8<0xC2, MRMSrcReg, - (outs VR128:$dst), (ins VR128:$src1, VR128:$src, - SSECC:$cc), + (outs VR128:$dst), + (ins VR128:$src1, VR128:$src, SSECC:$cc), "cmp${cc}sd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1, VR128:$src, imm:$cc))]>; def Int_CMPSDrm : SDIi8<0xC2, MRMSrcMem, - (outs VR128:$dst), (ins VR128:$src1, f64mem:$src, - SSECC:$cc), + (outs VR128:$dst), + (ins VR128:$src1, f64mem:$src, SSECC:$cc), "cmp${cc}sd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1, (load addr:$src), imm:$cc))]>; @@ -1542,9 +1565,15 @@ def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), [(set VR128:$dst, (int_x86_sse2_cvtps2dq (memop addr:$src)))]>; // SSE2 packed instructions with XS prefix +def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), + "cvttps2dq\t{$src, $dst|$dst, $src}", []>; +def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), + "cvttps2dq\t{$src, $dst|$dst, $src}", []>; + def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvttps2dq\t{$src, $dst|$dst, $src}", - [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))]>, + [(set VR128:$dst, + (int_x86_sse2_cvttps2dq VR128:$src))]>, XS, Requires<[HasSSE2]>; def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), "cvttps2dq\t{$src, $dst|$dst, $src}", @@ -1572,6 +1601,11 @@ def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src), (memop addr:$src)))]>; // SSE2 instructions without OpSize prefix +def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), + "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB; +def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), + "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB; + def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvtps2pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>, @@ -1582,6 +1616,12 @@ def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), (load addr:$src)))]>, TB, Requires<[HasSSE2]>; +def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), + "cvtpd2ps\t{$src, $dst|$dst, $src}", []>; +def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), + "cvtpd2ps\t{$src, $dst|$dst, $src}", []>; + + def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvtpd2ps\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>; @@ -1856,31 +1896,34 @@ let Constraints = "$src1 = $dst" in { multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId, bit Commutable = 0> { - def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), + def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), + (ins VR128:$src1, VR128:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]> { let isCommutable = Commutable; } - def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2), + def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), + (ins VR128:$src1, i128mem:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (IntId VR128:$src1, - (bitconvert (memopv2i64 addr:$src2))))]>; + (bitconvert (memopv2i64 + addr:$src2))))]>; } multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm, string OpcodeStr, Intrinsic IntId, Intrinsic IntId2> { - def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, - VR128:$src2), + def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), + (ins VR128:$src1, VR128:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>; - def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, - i128mem:$src2), + def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), + (ins VR128:$src1, i128mem:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (IntId VR128:$src1, (bitconvert (memopv2i64 addr:$src2))))]>; - def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst), (ins VR128:$src1, - i32i8imm:$src2), + def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst), + (ins VR128:$src1, i32i8imm:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>; } @@ -1888,14 +1931,14 @@ multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm, /// PDI_binop_rm - Simple SSE2 binary operator. multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, ValueType OpVT, bit Commutable = 0> { - def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, - VR128:$src2), + def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), + (ins VR128:$src1, VR128:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]> { let isCommutable = Commutable; } - def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, - i128mem:$src2), + def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), + (ins VR128:$src1, i128mem:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (OpVT (OpNode VR128:$src1, (bitconvert (memopv2i64 addr:$src2)))))]>; @@ -1909,16 +1952,16 @@ multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode, multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode, bit Commutable = 0> { def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), - (ins VR128:$src1, VR128:$src2), + (ins VR128:$src1, VR128:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]> { let isCommutable = Commutable; } def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), - (ins VR128:$src1, i128mem:$src2), + (ins VR128:$src1, i128mem:$src2), !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), [(set VR128:$dst, (OpNode VR128:$src1, - (memopv2i64 addr:$src2)))]>; + (memopv2i64 addr:$src2)))]>; } } // Constraints = "$src1 = $dst" @@ -2455,6 +2498,13 @@ def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))), (MOVZPQILo2PQIrm addr:$src)>; } +// Instructions for the disassembler +// xr = XMM register +// xm = mem64 + +def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), + "movq\t{$src, $dst|$dst, $src}", []>, XS; + //===---------------------------------------------------------------------===// // SSE3 Instructions //===---------------------------------------------------------------------===// @@ -3175,13 +3225,14 @@ multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, OpSize; // Vector intrinsic operation, mem - def PSm_Int : SS4AIi8<opcps, MRMSrcMem, + def PSm_Int : Ii8<opcps, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2), !strconcat(OpcodeStr, "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src1),imm:$src2))]>, - OpSize; + TA, OpSize, + Requires<[HasSSE41]>; // Vector intrinsic operation, reg def PDr_Int : SS4AIi8<opcpd, MRMSrcReg, @@ -3661,7 +3712,7 @@ let Constraints = "$src1 = $dst" in { "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), [(set VR128:$dst, (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>, - OpSize; + OpSize; def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3), !strconcat(OpcodeStr, @@ -3786,76 +3837,63 @@ let Constraints = "$src1 = $dst" in { // String/text processing instructions. let Defs = [EFLAGS], usesCustomInserter = 1 in { def PCMPISTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst), - (ins VR128:$src1, VR128:$src2, i8imm:$src3), - "#PCMPISTRM128rr PSEUDO!", - [(set VR128:$dst, - (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2, - imm:$src3))]>, OpSize; + (ins VR128:$src1, VR128:$src2, i8imm:$src3), + "#PCMPISTRM128rr PSEUDO!", + [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2, + imm:$src3))]>, OpSize; def PCMPISTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst), - (ins VR128:$src1, i128mem:$src2, i8imm:$src3), - "#PCMPISTRM128rm PSEUDO!", - [(set VR128:$dst, - (int_x86_sse42_pcmpistrm128 VR128:$src1, - (load addr:$src2), - imm:$src3))]>, OpSize; + (ins VR128:$src1, i128mem:$src2, i8imm:$src3), + "#PCMPISTRM128rm PSEUDO!", + [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, (load addr:$src2), + imm:$src3))]>, OpSize; } let Defs = [XMM0, EFLAGS] in { def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs), - (ins VR128:$src1, VR128:$src2, i8imm:$src3), - "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", - []>, OpSize; + (ins VR128:$src1, VR128:$src2, i8imm:$src3), + "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize; def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs), - (ins VR128:$src1, i128mem:$src2, i8imm:$src3), - "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", - []>, OpSize; + (ins VR128:$src1, i128mem:$src2, i8imm:$src3), + "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize; } -let Defs = [EFLAGS], Uses = [EAX, EDX], - usesCustomInserter = 1 in { +let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in { def PCMPESTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst), - (ins VR128:$src1, VR128:$src3, i8imm:$src5), - "#PCMPESTRM128rr PSEUDO!", - [(set VR128:$dst, - (int_x86_sse42_pcmpestrm128 VR128:$src1, EAX, - VR128:$src3, - EDX, imm:$src5))]>, OpSize; + (ins VR128:$src1, VR128:$src3, i8imm:$src5), + "#PCMPESTRM128rr PSEUDO!", + [(set VR128:$dst, + (int_x86_sse42_pcmpestrm128 + VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>, OpSize; + def PCMPESTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst), - (ins VR128:$src1, i128mem:$src3, i8imm:$src5), - "#PCMPESTRM128rm PSEUDO!", - [(set VR128:$dst, - (int_x86_sse42_pcmpestrm128 VR128:$src1, EAX, - (load addr:$src3), - EDX, imm:$src5))]>, OpSize; + (ins VR128:$src1, i128mem:$src3, i8imm:$src5), + "#PCMPESTRM128rm PSEUDO!", + [(set VR128:$dst, (int_x86_sse42_pcmpestrm128 + VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>, + OpSize; } let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in { def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs), - (ins VR128:$src1, VR128:$src3, i8imm:$src5), - "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", - []>, OpSize; + (ins VR128:$src1, VR128:$src3, i8imm:$src5), + "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize; def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs), - (ins VR128:$src1, i128mem:$src3, i8imm:$src5), - "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", - []>, OpSize; + (ins VR128:$src1, i128mem:$src3, i8imm:$src5), + "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize; } let Defs = [ECX, EFLAGS] in { multiclass SS42AI_pcmpistri<Intrinsic IntId128> { - def rr : SS42AI<0x63, MRMSrcReg, (outs), - (ins VR128:$src1, VR128:$src2, i8imm:$src3), - "pcmpistri\t{$src3, $src2, $src1|$src1, $src2, $src3}", - [(set ECX, - (IntId128 VR128:$src1, VR128:$src2, imm:$src3)), - (implicit EFLAGS)]>, - OpSize; + def rr : SS42AI<0x63, MRMSrcReg, (outs), + (ins VR128:$src1, VR128:$src2, i8imm:$src3), + "pcmpistri\t{$src3, $src2, $src1|$src1, $src2, $src3}", + [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)), + (implicit EFLAGS)]>, OpSize; def rm : SS42AI<0x63, MRMSrcMem, (outs), - (ins VR128:$src1, i128mem:$src2, i8imm:$src3), - "pcmpistri\t{$src3, $src2, $src1|$src1, $src2, $src3}", - [(set ECX, - (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)), - (implicit EFLAGS)]>, - OpSize; + (ins VR128:$src1, i128mem:$src2, i8imm:$src3), + "pcmpistri\t{$src3, $src2, $src1|$src1, $src2, $src3}", + [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)), + (implicit EFLAGS)]>, OpSize; } } @@ -3870,20 +3908,16 @@ let Defs = [ECX, EFLAGS] in { let Uses = [EAX, EDX] in { multiclass SS42AI_pcmpestri<Intrinsic IntId128> { def rr : SS42AI<0x61, MRMSrcReg, (outs), - (ins VR128:$src1, VR128:$src3, i8imm:$src5), - "pcmpestri\t{$src5, $src3, $src1|$src1, $src3, $src5}", - [(set ECX, - (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)), - (implicit EFLAGS)]>, - OpSize; + (ins VR128:$src1, VR128:$src3, i8imm:$src5), + "pcmpestri\t{$src5, $src3, $src1|$src1, $src3, $src5}", + [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)), + (implicit EFLAGS)]>, OpSize; def rm : SS42AI<0x61, MRMSrcMem, (outs), - (ins VR128:$src1, i128mem:$src3, i8imm:$src5), - "pcmpestri\t{$src5, $src3, $src1|$src1, $src3, $src5}", - [(set ECX, - (IntId128 VR128:$src1, EAX, (load addr:$src3), - EDX, imm:$src5)), - (implicit EFLAGS)]>, - OpSize; + (ins VR128:$src1, i128mem:$src3, i8imm:$src5), + "pcmpestri\t{$src5, $src3, $src1|$src1, $src3, $src5}", + [(set ECX, + (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)), + (implicit EFLAGS)]>, OpSize; } } } diff --git a/lib/Target/X86/X86JITInfo.cpp b/lib/Target/X86/X86JITInfo.cpp index ce06f0f..c69cc83 100644 --- a/lib/Target/X86/X86JITInfo.cpp +++ b/lib/Target/X86/X86JITInfo.cpp @@ -426,16 +426,19 @@ X86JITInfo::X86JITInfo(X86TargetMachine &tm) : TM(tm) { void *X86JITInfo::emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr, JITCodeEmitter &JCE) { - MachineCodeEmitter::BufferState BS; #if defined (X86_64_JIT) - JCE.startGVStub(BS, GV, 8, 8); - JCE.emitWordLE((unsigned)(intptr_t)ptr); - JCE.emitWordLE((unsigned)(((intptr_t)ptr) >> 32)); + const unsigned Alignment = 8; + uint8_t Buffer[8]; + uint8_t *Cur = Buffer; + MachineCodeEmitter::emitWordLEInto(Cur, (unsigned)(intptr_t)ptr); + MachineCodeEmitter::emitWordLEInto(Cur, (unsigned)(((intptr_t)ptr) >> 32)); #else - JCE.startGVStub(BS, GV, 4, 4); - JCE.emitWordLE((intptr_t)ptr); + const unsigned Alignment = 4; + uint8_t Buffer[4]; + uint8_t *Cur = Buffer; + MachineCodeEmitter::emitWordLEInto(Cur, (intptr_t)ptr); #endif - return JCE.finishGVStub(BS); + return JCE.allocIndirectGV(GV, Buffer, sizeof(Buffer), Alignment); } TargetJITInfo::StubLayout X86JITInfo::getStubLayout() { @@ -451,7 +454,6 @@ TargetJITInfo::StubLayout X86JITInfo::getStubLayout() { void *X86JITInfo::emitFunctionStub(const Function* F, void *Target, JITCodeEmitter &JCE) { - MachineCodeEmitter::BufferState BS; // Note, we cast to intptr_t here to silence a -pedantic warning that // complains about casting a function pointer to a normal pointer. #if defined (X86_32_JIT) && !defined (_MSC_VER) diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td index 7bf074d..6db0cc3 100644 --- a/lib/Target/X86/X86RegisterInfo.td +++ b/lib/Target/X86/X86RegisterInfo.td @@ -195,6 +195,36 @@ let Namespace = "X86" in { def ES : Register<"es">; def FS : Register<"fs">; def GS : Register<"gs">; + + // Debug registers + def DR0 : Register<"dr0">; + def DR1 : Register<"dr1">; + def DR2 : Register<"dr2">; + def DR3 : Register<"dr3">; + def DR4 : Register<"dr4">; + def DR5 : Register<"dr5">; + def DR6 : Register<"dr6">; + def DR7 : Register<"dr7">; + + // Condition registers + def ECR0 : Register<"ecr0">; + def ECR1 : Register<"ecr1">; + def ECR2 : Register<"ecr2">; + def ECR3 : Register<"ecr3">; + def ECR4 : Register<"ecr4">; + def ECR5 : Register<"ecr5">; + def ECR6 : Register<"ecr6">; + def ECR7 : Register<"ecr7">; + + def RCR0 : Register<"rcr0">; + def RCR1 : Register<"rcr1">; + def RCR2 : Register<"rcr2">; + def RCR3 : Register<"rcr3">; + def RCR4 : Register<"rcr4">; + def RCR5 : Register<"rcr5">; + def RCR6 : Register<"rcr6">; + def RCR7 : Register<"rcr7">; + def RCR8 : Register<"rcr8">; } @@ -446,6 +476,22 @@ def GR64 : RegisterClass<"X86", [i64], 64, def SEGMENT_REG : RegisterClass<"X86", [i16], 16, [CS, DS, SS, ES, FS, GS]> { } +// Debug registers. +def DEBUG_REG : RegisterClass<"X86", [i32], 32, + [DR0, DR1, DR2, DR3, DR4, DR5, DR6, DR7]> { +} + +// Control registers. +def CONTROL_REG_32 : RegisterClass<"X86", [i32], 32, + [ECR0, ECR1, ECR2, ECR3, ECR4, ECR5, ECR6, + ECR7]> { +} + +def CONTROL_REG_64 : RegisterClass<"X86", [i64], 64, + [RCR0, RCR1, RCR2, RCR3, RCR4, RCR5, RCR6, + RCR7, RCR8]> { +} + // GR8_ABCD_L, GR8_ABCD_H, GR16_ABCD, GR32_ABCD, GR64_ABCD - Subclasses of // GR8, GR16, GR32, and GR64 which contain just the "a" "b", "c", and "d" // registers. On x86-32, GR16_ABCD and GR32_ABCD are classes for registers @@ -661,7 +707,8 @@ def GR64_NOREX_NOSP : RegisterClass<"X86", [i64], 64, }]; let MethodBodies = [{ GR64_NOREX_NOSPClass::iterator - GR64_NOREX_NOSPClass::allocation_order_end(const MachineFunction &MF) const { + GR64_NOREX_NOSPClass::allocation_order_end(const MachineFunction &MF) const + { const TargetMachine &TM = MF.getTarget(); const TargetRegisterInfo *RI = TM.getRegisterInfo(); // Does the function dedicate RBP to being a frame ptr? diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h index fb457dd..ef6dbaf 100644 --- a/lib/Target/X86/X86Subtarget.h +++ b/lib/Target/X86/X86Subtarget.h @@ -77,7 +77,7 @@ protected: /// IsBTMemSlow - True if BT (bit test) of memory instructions are slow. bool IsBTMemSlow; - + /// DarwinVers - Nonzero if this is a darwin platform: the numeric /// version of the platform, e.g. 8 = 10.4 (Tiger), 9 = 10.5 (Leopard), etc. unsigned char DarwinVers; // Is any darwin-x86 platform. @@ -169,8 +169,11 @@ public: p = "e-p:64:64-s:64-f64:64:64-i64:64:64-f80:128:128-n8:16:32:64"; else if (isTargetDarwin()) p = "e-p:32:32-f64:32:64-i64:32:64-f80:128:128-n8:16:32"; + else if (isTargetCygMing() || isTargetWindows()) + p = "e-p:32:32-f64:64:64-i64:64:64-f80:128:128-n8:16:32"; else p = "e-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32"; + return std::string(p); } diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp index 0152121..962f0f7 100644 --- a/lib/Target/X86/X86TargetMachine.cpp +++ b/lib/Target/X86/X86TargetMachine.cpp @@ -91,10 +91,6 @@ X86TargetMachine::X86TargetMachine(const Target &T, const std::string &TT, assert(getRelocationModel() != Reloc::Default && "Relocation mode not picked"); - // If no code model is picked, default to small. - if (getCodeModel() == CodeModel::Default) - setCodeModel(CodeModel::Small); - // ELF and X86-64 don't have a distinct DynamicNoPIC model. DynamicNoPIC // is defined as a model for code which may be used in static or dynamic // executables but not necessarily a shared library. On X86-32 we just @@ -184,10 +180,6 @@ bool X86TargetMachine::addCodeEmitter(PassManagerBase &PM, Subtarget.setPICStyle(PICStyles::None); } - // 64-bit JIT places everything in the same buffer except external functions. - if (Subtarget.is64Bit()) - setCodeModel(CodeModel::Large); - PM.add(createX86CodeEmitterPass(*this, MCE)); return false; @@ -204,9 +196,6 @@ bool X86TargetMachine::addCodeEmitter(PassManagerBase &PM, Subtarget.setPICStyle(PICStyles::None); } - // 64-bit JIT places everything in the same buffer except external functions. - if (Subtarget.is64Bit()) - setCodeModel(CodeModel::Large); PM.add(createX86JITCodeEmitterPass(*this, JCE)); @@ -240,3 +229,23 @@ bool X86TargetMachine::addSimpleCodeEmitter(PassManagerBase &PM, PM.add(createX86ObjectCodeEmitterPass(*this, OCE)); return false; } + +void X86TargetMachine::setCodeModelForStatic() { + + if (getCodeModel() != CodeModel::Default) return; + + // For static codegen, if we're not already set, use Small codegen. + setCodeModel(CodeModel::Small); +} + + +void X86TargetMachine::setCodeModelForJIT() { + + if (getCodeModel() != CodeModel::Default) return; + + // 64-bit JIT places everything in the same buffer except external functions. + if (Subtarget.is64Bit()) + setCodeModel(CodeModel::Large); + else + setCodeModel(CodeModel::Small); +} diff --git a/lib/Target/X86/X86TargetMachine.h b/lib/Target/X86/X86TargetMachine.h index b538408..6183e91 100644 --- a/lib/Target/X86/X86TargetMachine.h +++ b/lib/Target/X86/X86TargetMachine.h @@ -38,6 +38,11 @@ class X86TargetMachine : public LLVMTargetMachine { X86ELFWriterInfo ELFWriterInfo; Reloc::Model DefRelocModel; // Reloc model before it's overridden. +private: + // We have specific defaults for X86. + virtual void setCodeModelForJIT(); + virtual void setCodeModelForStatic(); + public: X86TargetMachine(const Target &T, const std::string &TT, const std::string &FS, bool is64Bit); diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index f310456..6849e0b 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -452,7 +452,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) false, false, 0, CallingConv::C, false, /*isReturnValueUsed=*/true, DAG.getExternalSymbol("__misaligned_load", getPointerTy()), - Args, DAG, dl); + Args, DAG, dl, DAG.GetOrdering(Chain.getNode())); SDValue Ops[] = { CallResult.first, CallResult.second }; @@ -513,7 +513,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) false, false, 0, CallingConv::C, false, /*isReturnValueUsed=*/true, DAG.getExternalSymbol("__misaligned_store", getPointerTy()), - Args, DAG, dl); + Args, DAG, dl, DAG.GetOrdering(Chain.getNode())); return CallResult.second; } diff --git a/lib/Transforms/Hello/Hello.cpp b/lib/Transforms/Hello/Hello.cpp index 91534a7..eac4e17 100644 --- a/lib/Transforms/Hello/Hello.cpp +++ b/lib/Transforms/Hello/Hello.cpp @@ -56,7 +56,7 @@ namespace { // We don't modify the program, so we preserve all analyses virtual void getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); - }; + } }; } diff --git a/lib/Transforms/IPO/StripSymbols.cpp b/lib/Transforms/IPO/StripSymbols.cpp index 0b5e007..ae88d9e 100644 --- a/lib/Transforms/IPO/StripSymbols.cpp +++ b/lib/Transforms/IPO/StripSymbols.cpp @@ -24,7 +24,6 @@ #include "llvm/Constants.h" #include "llvm/DerivedTypes.h" #include "llvm/Instructions.h" -#include "llvm/LLVMContext.h" #include "llvm/Module.h" #include "llvm/Pass.h" #include "llvm/Analysis/DebugInfo.h" @@ -220,17 +219,14 @@ static bool StripDebugInfo(Module &M) { Changed = true; NMD->eraseFromParent(); } - MetadataContext &TheMetadata = M.getContext().getMetadata(); - unsigned MDDbgKind = TheMetadata.getMDKind("dbg"); - if (!MDDbgKind) - return Changed; - + + unsigned MDDbgKind = M.getMDKindID("dbg"); for (Module::iterator MI = M.begin(), ME = M.end(); MI != ME; ++MI) for (Function::iterator FI = MI->begin(), FE = MI->end(); FI != FE; ++FI) for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); BI != BE; ++BI) - TheMetadata.removeMD(MDDbgKind, BI); + BI->setMetadata(MDDbgKind, 0); return true; } diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp index e4c4ae5..372616c 100644 --- a/lib/Transforms/Scalar/CodeGenPrepare.cpp +++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp @@ -48,7 +48,7 @@ namespace { /// TLI - Keep a pointer of a TargetLowering to consult for determining /// transformation profitability. const TargetLowering *TLI; - ProfileInfo *PI; + ProfileInfo *PFI; /// BackEdges - Keep a set of all the loop back edges. /// @@ -99,7 +99,7 @@ void CodeGenPrepare::findLoopBackEdges(const Function &F) { bool CodeGenPrepare::runOnFunction(Function &F) { bool EverMadeChange = false; - PI = getAnalysisIfAvailable<ProfileInfo>(); + PFI = getAnalysisIfAvailable<ProfileInfo>(); // First pass, eliminate blocks that contain only PHI nodes and an // unconditional branch. EverMadeChange |= EliminateMostlyEmptyBlocks(F); @@ -288,9 +288,9 @@ void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { // The PHIs are now updated, change everything that refers to BB to use // DestBB and remove BB. BB->replaceAllUsesWith(DestBB); - if (PI) { - PI->replaceAllUses(BB, DestBB); - PI->removeEdge(ProfileInfo::getEdge(BB, DestBB)); + if (PFI) { + PFI->replaceAllUses(BB, DestBB); + PFI->removeEdge(ProfileInfo::getEdge(BB, DestBB)); } BB->eraseFromParent(); @@ -368,9 +368,9 @@ static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, // If we found a workable predecessor, change TI to branch to Succ. if (FoundMatch) { - ProfileInfo *PI = P->getAnalysisIfAvailable<ProfileInfo>(); - if (PI) - PI->splitEdge(TIBB, Dest, Pred); + ProfileInfo *PFI = P->getAnalysisIfAvailable<ProfileInfo>(); + if (PFI) + PFI->splitEdge(TIBB, Dest, Pred); Dest->removePredecessor(TIBB); TI->setSuccessor(SuccNum, Pred); return; diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp index 222792b..612b415 100644 --- a/lib/Transforms/Scalar/GVN.cpp +++ b/lib/Transforms/Scalar/GVN.cpp @@ -20,6 +20,7 @@ #include "llvm/BasicBlock.h" #include "llvm/Constants.h" #include "llvm/DerivedTypes.h" +#include "llvm/GlobalVariable.h" #include "llvm/Function.h" #include "llvm/IntrinsicInst.h" #include "llvm/LLVMContext.h" @@ -48,7 +49,6 @@ #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/SSAUpdater.h" -#include <cstdio> using namespace llvm; STATISTIC(NumGVNInstr, "Number of instructions deleted"); @@ -733,13 +733,13 @@ static RegisterPass<GVN> X("gvn", "Global Value Numbering"); void GVN::dump(DenseMap<uint32_t, Value*>& d) { - printf("{\n"); + errs() << "{\n"; for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), E = d.end(); I != E; ++I) { - printf("%d\n", I->first); + errs() << I->first << "\n"; I->second->dump(); } - printf("}\n"); + errs() << "}\n"; } static bool isSafeReplacement(PHINode* p, Instruction *inst) { @@ -1278,6 +1278,32 @@ struct AvailableValueInBlock { assert(!isSimpleValue() && "Wrong accessor"); return cast<MemIntrinsic>(Val.getPointer()); } + + /// MaterializeAdjustedValue - Emit code into this block to adjust the value + /// defined here to the specified type. This handles various coercion cases. + Value *MaterializeAdjustedValue(const Type *LoadTy, + const TargetData *TD) const { + Value *Res; + if (isSimpleValue()) { + Res = getSimpleValue(); + if (Res->getType() != LoadTy) { + assert(TD && "Need target data to handle type mismatch case"); + Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), + *TD); + + DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " + << *getSimpleValue() << '\n' + << *Res << '\n' << "\n\n\n"); + } + } else { + Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, + LoadTy, BB->getTerminator(), *TD); + DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset + << " " << *getMemIntrinValue() << '\n' + << *Res << '\n' << "\n\n\n"); + } + return Res; + } }; /// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock, @@ -1286,7 +1312,15 @@ struct AvailableValueInBlock { static Value *ConstructSSAForLoadSet(LoadInst *LI, SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, const TargetData *TD, + const DominatorTree &DT, AliasAnalysis *AA) { + // Check for the fully redundant, dominating load case. In this case, we can + // just use the dominating value directly. + if (ValuesPerBlock.size() == 1 && + DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent())) + return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD); + + // Otherwise, we have to construct SSA form. SmallVector<PHINode*, 8> NewPHIs; SSAUpdater SSAUpdate(&NewPHIs); SSAUpdate.Initialize(LI); @@ -1300,28 +1334,7 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI, if (SSAUpdate.HasValueForBlock(BB)) continue; - unsigned Offset = AV.Offset; - - Value *AvailableVal; - if (AV.isSimpleValue()) { - AvailableVal = AV.getSimpleValue(); - if (AvailableVal->getType() != LoadTy) { - assert(TD && "Need target data to handle type mismatch case"); - AvailableVal = GetStoreValueForLoad(AvailableVal, Offset, LoadTy, - BB->getTerminator(), *TD); - - DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " - << *AV.getSimpleValue() << '\n' - << *AvailableVal << '\n' << "\n\n\n"); - } - } else { - AvailableVal = GetMemInstValueForLoad(AV.getMemIntrinValue(), Offset, - LoadTy, BB->getTerminator(), *TD); - DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset - << " " << *AV.getMemIntrinValue() << '\n' - << *AvailableVal << '\n' << "\n\n\n"); - } - SSAUpdate.AddAvailableValue(BB, AvailableVal); + SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD)); } // Perform PHI construction. @@ -1346,7 +1359,7 @@ static bool isLifetimeStart(Instruction *Inst) { bool GVN::processNonLocalLoad(LoadInst *LI, SmallVectorImpl<Instruction*> &toErase) { // Find the non-local dependencies of the load. - SmallVector<NonLocalDepEntry, 64> Deps; + SmallVector<NonLocalDepResult, 64> Deps; MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(), Deps); //DEBUG(errs() << "INVESTIGATING NONLOCAL LOAD: " @@ -1490,7 +1503,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI, DEBUG(errs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); // Perform PHI construction. - Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, + Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT, VN.getAliasAnalysis()); LI->replaceAllUsesWith(V); @@ -1679,7 +1692,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI, ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,NewLoad)); // Perform PHI construction. - Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, + Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT, VN.getAliasAnalysis()); LI->replaceAllUsesWith(V); if (isa<PHINode>(V)) diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp index 2912421..3aa4fd3 100644 --- a/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -258,7 +258,7 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, // Check that InVal is defined in the loop. Instruction *Inst = cast<Instruction>(InVal); - if (!L->contains(Inst->getParent())) + if (!L->contains(Inst)) continue; // Okay, this instruction has a user outside of the current loop diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp index b9c536f..516d72e 100644 --- a/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/lib/Transforms/Scalar/InstructionCombining.cpp @@ -75,6 +75,15 @@ STATISTIC(NumDeadInst , "Number of dead inst eliminated"); STATISTIC(NumDeadStore, "Number of dead stores eliminated"); STATISTIC(NumSunkInst , "Number of instructions sunk"); +/// SelectPatternFlavor - We can match a variety of different patterns for +/// select operations. +enum SelectPatternFlavor { + SPF_UNKNOWN = 0, + SPF_SMIN, SPF_UMIN, + SPF_SMAX, SPF_UMAX + //SPF_ABS - TODO. +}; + namespace { /// InstCombineWorklist - This is the worklist management logic for /// InstCombine. @@ -257,7 +266,8 @@ namespace { ConstantInt *RHS); Instruction *FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI, ConstantInt *DivRHS); - + Instruction *FoldICmpAddOpCst(ICmpInst &ICI, Value *X, ConstantInt *CI, + ICmpInst::Predicate Pred, Value *TheAdd); Instruction *FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS, ICmpInst::Predicate Cond, Instruction &I); Instruction *FoldShiftByConstant(Value *Op0, ConstantInt *Op1, @@ -280,6 +290,9 @@ namespace { Instruction *FoldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI); Instruction *FoldSelectIntoOp(SelectInst &SI, Value*, Value*); + Instruction *FoldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1, + Value *A, Value *B, Instruction &Outer, + SelectPatternFlavor SPF2, Value *C); Instruction *visitSelectInst(SelectInst &SI); Instruction *visitSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI); Instruction *visitCallInst(CallInst &CI); @@ -648,6 +661,57 @@ static inline Value *dyn_castFNegVal(Value *V) { return 0; } +/// MatchSelectPattern - Pattern match integer [SU]MIN, [SU]MAX, and ABS idioms, +/// returning the kind and providing the out parameter results if we +/// successfully match. +static SelectPatternFlavor +MatchSelectPattern(Value *V, Value *&LHS, Value *&RHS) { + SelectInst *SI = dyn_cast<SelectInst>(V); + if (SI == 0) return SPF_UNKNOWN; + + ICmpInst *ICI = dyn_cast<ICmpInst>(SI->getCondition()); + if (ICI == 0) return SPF_UNKNOWN; + + LHS = ICI->getOperand(0); + RHS = ICI->getOperand(1); + + // (icmp X, Y) ? X : Y + if (SI->getTrueValue() == ICI->getOperand(0) && + SI->getFalseValue() == ICI->getOperand(1)) { + switch (ICI->getPredicate()) { + default: return SPF_UNKNOWN; // Equality. + case ICmpInst::ICMP_UGT: + case ICmpInst::ICMP_UGE: return SPF_UMAX; + case ICmpInst::ICMP_SGT: + case ICmpInst::ICMP_SGE: return SPF_SMAX; + case ICmpInst::ICMP_ULT: + case ICmpInst::ICMP_ULE: return SPF_UMIN; + case ICmpInst::ICMP_SLT: + case ICmpInst::ICMP_SLE: return SPF_SMIN; + } + } + + // (icmp X, Y) ? Y : X + if (SI->getTrueValue() == ICI->getOperand(1) && + SI->getFalseValue() == ICI->getOperand(0)) { + switch (ICI->getPredicate()) { + default: return SPF_UNKNOWN; // Equality. + case ICmpInst::ICMP_UGT: + case ICmpInst::ICMP_UGE: return SPF_UMIN; + case ICmpInst::ICMP_SGT: + case ICmpInst::ICMP_SGE: return SPF_SMIN; + case ICmpInst::ICMP_ULT: + case ICmpInst::ICMP_ULE: return SPF_UMAX; + case ICmpInst::ICMP_SLT: + case ICmpInst::ICMP_SLE: return SPF_SMAX; + } + } + + // TODO: (X > 4) ? X : 5 --> (X >= 5) ? X : 5 --> MAX(X, 5) + + return SPF_UNKNOWN; +} + /// isFreeToInvert - Return true if the specified value is free to invert (apply /// ~ to). This happens in cases where the ~ can be eliminated. static inline bool isFreeToInvert(Value *V) { @@ -732,12 +796,12 @@ static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign) { APInt MulExt = LHSExt * RHSExt; - if (sign) { - APInt Min = APInt::getSignedMinValue(W).sext(W * 2); - APInt Max = APInt::getSignedMaxValue(W).sext(W * 2); - return MulExt.slt(Min) || MulExt.sgt(Max); - } else + if (!sign) return MulExt.ugt(APInt::getLowBitsSet(W * 2, W)); + + APInt Min = APInt::getSignedMinValue(W).sext(W * 2); + APInt Max = APInt::getSignedMaxValue(W).sext(W * 2); + return MulExt.slt(Min) || MulExt.sgt(Max); } @@ -2736,9 +2800,13 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) { if (Op0 == Op1) // sub X, X -> 0 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); - // If this is a 'B = x-(-A)', change to B = x+A. - if (Value *V = dyn_castNegVal(Op1)) - return BinaryOperator::CreateAdd(Op0, V); + // If this is a 'B = x-(-A)', change to B = x+A. This preserves NSW/NUW. + if (Value *V = dyn_castNegVal(Op1)) { + BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V); + Res->setHasNoSignedWrap(I.hasNoSignedWrap()); + Res->setHasNoUnsignedWrap(I.hasNoUnsignedWrap()); + return Res; + } if (isa<UndefValue>(Op0)) return ReplaceInstUsesWith(I, Op0); // undef - X -> undef @@ -6356,24 +6424,26 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { // comparison into the select arms, which will cause one to be // constant folded and the select turned into a bitwise or. Value *Op1 = 0, *Op2 = 0; - if (LHSI->hasOneUse()) { - if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) { - // Fold the known value into the constant operand. - Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); - // Insert a new ICmp of the other select operand. - Op2 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(2), - RHSC, I.getName()); - } else if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) { - // Fold the known value into the constant operand. - Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); - // Insert a new ICmp of the other select operand. + if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) + Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); + if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) + Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); + + // We only want to perform this transformation if it will not lead to + // additional code. This is true if either both sides of the select + // fold to a constant (in which case the icmp is replaced with a select + // which will usually simplify) or this is the only user of the + // select (in which case we are trading a select+icmp for a simpler + // select+icmp). + if ((Op1 && Op2) || (LHSI->hasOneUse() && (Op1 || Op2))) { + if (!Op1) Op1 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC, I.getName()); - } - } - - if (Op1) + if (!Op2) + Op2 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(2), + RHSC, I.getName()); return SelectInst::Create(LHSI->getOperand(0), Op1, Op2); + } break; } case Instruction::Call: @@ -6452,7 +6522,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { // if (X) ... // For generality, we handle any zero-extension of any operand comparison // with a constant or another cast from the same type. - if (isa<ConstantInt>(Op1) || isa<CastInst>(Op1)) + if (isa<Constant>(Op1) || isa<CastInst>(Op1)) if (Instruction *R = visitICmpInstWithCastAndCast(I)) return R; } @@ -6598,9 +6668,112 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { } } } + + { + Value *X; ConstantInt *Cst; + // icmp X+Cst, X + if (match(Op0, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op1 == X) + return FoldICmpAddOpCst(I, X, Cst, I.getPredicate(), Op0); + + // icmp X, X+Cst + if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X) + return FoldICmpAddOpCst(I, X, Cst, I.getSwappedPredicate(), Op1); + } return Changed ? &I : 0; } +/// FoldICmpAddOpCst - Fold "icmp pred (X+CI), X". +Instruction *InstCombiner::FoldICmpAddOpCst(ICmpInst &ICI, + Value *X, ConstantInt *CI, + ICmpInst::Predicate Pred, + Value *TheAdd) { + // If we have X+0, exit early (simplifying logic below) and let it get folded + // elsewhere. icmp X+0, X -> icmp X, X + if (CI->isZero()) { + bool isTrue = ICmpInst::isTrueWhenEqual(Pred); + return ReplaceInstUsesWith(ICI, ConstantInt::get(ICI.getType(), isTrue)); + } + + // (X+4) == X -> false. + if (Pred == ICmpInst::ICMP_EQ) + return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(X->getContext())); + + // (X+4) != X -> true. + if (Pred == ICmpInst::ICMP_NE) + return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(X->getContext())); + + // If this is an instruction (as opposed to constantexpr) get NUW/NSW info. + bool isNUW = false, isNSW = false; + if (BinaryOperator *Add = dyn_cast<BinaryOperator>(TheAdd)) { + isNUW = Add->hasNoUnsignedWrap(); + isNSW = Add->hasNoSignedWrap(); + } + + // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0, + // so the values can never be equal. Similiarly for all other "or equals" + // operators. + + // (X+1) <u X --> X >u (MAXUINT-1) --> X != 255 + // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253 + // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0 + if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { + // If this is an NUW add, then this is always false. + if (isNUW) + return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(X->getContext())); + + Value *R = ConstantExpr::getSub(ConstantInt::get(CI->getType(), -1ULL), CI); + return new ICmpInst(ICmpInst::ICMP_UGT, X, R); + } + + // (X+1) >u X --> X <u (0-1) --> X != 255 + // (X+2) >u X --> X <u (0-2) --> X <u 254 + // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0 + if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) { + // If this is an NUW add, then this is always true. + if (isNUW) + return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(X->getContext())); + return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantExpr::getNeg(CI)); + } + + unsigned BitWidth = CI->getType()->getPrimitiveSizeInBits(); + ConstantInt *SMax = ConstantInt::get(X->getContext(), + APInt::getSignedMaxValue(BitWidth)); + + // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127 + // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125 + // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0 + // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1 + // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126 + // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127 + if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) { + // If this is an NSW add, then we have two cases: if the constant is + // positive, then this is always false, if negative, this is always true. + if (isNSW) { + bool isTrue = CI->getValue().isNegative(); + return ReplaceInstUsesWith(ICI, ConstantInt::get(ICI.getType(), isTrue)); + } + + return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantExpr::getSub(SMax, CI)); + } + + // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127 + // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126 + // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1 + // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2 + // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126 + // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128 + + // If this is an NSW add, then we have two cases: if the constant is + // positive, then this is always true, if negative, this is always false. + if (isNSW) { + bool isTrue = !CI->getValue().isNegative(); + return ReplaceInstUsesWith(ICI, ConstantInt::get(ICI.getType(), isTrue)); + } + + assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE); + Constant *C = ConstantInt::get(X->getContext(), CI->getValue()-1); + return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C)); +} /// FoldICmpDivCst - Fold "icmp pred, ([su]div X, DivRHS), CmpRHS" where DivRHS /// and CmpRHS are both known to be integer constants. @@ -7075,8 +7248,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, break; case Instruction::Add: - // Fold: icmp pred (add, X, C1), C2 - + // Fold: icmp pred (add X, C1), C2 if (!ICI.isEquality()) { ConstantInt *LHSC = dyn_cast<ConstantInt>(LHSI->getOperand(1)); if (!LHSC) break; @@ -7299,19 +7471,17 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) { // If the re-extended constant didn't change... if (Res2 == CI) { - // Make sure that sign of the Cmp and the sign of the Cast are the same. - // For example, we might have: - // %A = sext i16 %X to i32 - // %B = icmp ugt i32 %A, 1330 - // It is incorrect to transform this into - // %B = icmp ugt i16 %X, 1330 - // because %A may have negative value. - // - // However, we allow this when the compare is EQ/NE, because they are - // signless. - if (isSignedExt == isSignedCmp || ICI.isEquality()) + // Deal with equality cases early. + if (ICI.isEquality()) return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1); - return 0; + + // A signed comparison of sign extended values simplifies into a + // signed comparison. + if (isSignedExt && isSignedCmp) + return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1); + + // The other three cases all fold into an unsigned comparison. + return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, Res1); } // The re-extended constant changed so the constant cannot be represented @@ -9372,9 +9542,6 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI, return ReplaceInstUsesWith(SI, TrueVal); /// NOTE: if we wanted to, this is where to detect integer MIN/MAX } - - /// NOTE: if we wanted to, this is where to detect integer ABS - return Changed ? &SI : 0; } @@ -9416,6 +9583,35 @@ static bool CanSelectOperandBeMappingIntoPredBlock(const Value *V, return false; } +/// FoldSPFofSPF - We have an SPF (e.g. a min or max) of an SPF of the form: +/// SPF2(SPF1(A, B), C) +Instruction *InstCombiner::FoldSPFofSPF(Instruction *Inner, + SelectPatternFlavor SPF1, + Value *A, Value *B, + Instruction &Outer, + SelectPatternFlavor SPF2, Value *C) { + if (C == A || C == B) { + // MAX(MAX(A, B), B) -> MAX(A, B) + // MIN(MIN(a, b), a) -> MIN(a, b) + if (SPF1 == SPF2) + return ReplaceInstUsesWith(Outer, Inner); + + // MAX(MIN(a, b), a) -> a + // MIN(MAX(a, b), a) -> a + if ((SPF1 == SPF_SMIN && SPF2 == SPF_SMAX) || + (SPF1 == SPF_SMAX && SPF2 == SPF_SMIN) || + (SPF1 == SPF_UMIN && SPF2 == SPF_UMAX) || + (SPF1 == SPF_UMAX && SPF2 == SPF_UMIN)) + return ReplaceInstUsesWith(Outer, C); + } + + // TODO: MIN(MIN(A, 23), 97) + return 0; +} + + + + Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { Value *CondVal = SI.getCondition(); Value *TrueVal = SI.getTrueValue(); @@ -9622,9 +9818,28 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { // See if we can fold the select into one of our operands. if (SI.getType()->isInteger()) { - Instruction *FoldI = FoldSelectIntoOp(SI, TrueVal, FalseVal); - if (FoldI) + if (Instruction *FoldI = FoldSelectIntoOp(SI, TrueVal, FalseVal)) return FoldI; + + // MAX(MAX(a, b), a) -> MAX(a, b) + // MIN(MIN(a, b), a) -> MIN(a, b) + // MAX(MIN(a, b), a) -> a + // MIN(MAX(a, b), a) -> a + Value *LHS, *RHS, *LHS2, *RHS2; + if (SelectPatternFlavor SPF = MatchSelectPattern(&SI, LHS, RHS)) { + if (SelectPatternFlavor SPF2 = MatchSelectPattern(LHS, LHS2, RHS2)) + if (Instruction *R = FoldSPFofSPF(cast<Instruction>(LHS),SPF2,LHS2,RHS2, + SI, SPF, RHS)) + return R; + if (SelectPatternFlavor SPF2 = MatchSelectPattern(RHS, LHS2, RHS2)) + if (Instruction *R = FoldSPFofSPF(cast<Instruction>(RHS),SPF2,LHS2,RHS2, + SI, SPF, LHS)) + return R; + } + + // TODO. + // ABS(-X) -> ABS(X) + // ABS(ABS(X)) -> ABS(X) } // See if we can fold the select into a phi node if the condition is a select. @@ -9896,9 +10111,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { Intrinsic::getDeclaration(M, MemCpyID, Tys, 1)); Changed = true; } + } + if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove(x,x,size) -> noop. - if (MMI->getSource() == MMI->getDest()) + if (MTI->getSource() == MTI->getDest()) return EraseInstFromFunction(CI); } @@ -9923,6 +10140,21 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { if (Operand->getIntrinsicID() == Intrinsic::bswap) return ReplaceInstUsesWith(CI, Operand->getOperand(1)); break; + case Intrinsic::powi: + if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(2))) { + // powi(x, 0) -> 1.0 + if (Power->isZero()) + return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0)); + // powi(x, 1) -> x + if (Power->isOne()) + return ReplaceInstUsesWith(CI, II->getOperand(1)); + // powi(x, -1) -> 1/x + if (Power->isAllOnesValue()) + return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0), + II->getOperand(1)); + } + break; + case Intrinsic::uadd_with_overflow: { Value *LHS = II->getOperand(1), *RHS = II->getOperand(2); const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType()); @@ -11232,6 +11464,23 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) { for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) { PHINode *PN = PHIsToSlice[PHIId]; + // Scan the input list of the PHI. If any input is an invoke, and if the + // input is defined in the predecessor, then we won't be split the critical + // edge which is required to insert a truncate. Because of this, we have to + // bail out. + for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { + InvokeInst *II = dyn_cast<InvokeInst>(PN->getIncomingValue(i)); + if (II == 0) continue; + if (II->getParent() != PN->getIncomingBlock(i)) + continue; + + // If we have a phi, and if it's directly in the predecessor, then we have + // a critical edge where we need to put the truncate. Since we can't + // split the edge in instcombine, we have to bail out. + return 0; + } + + for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ++UI) { Instruction *User = cast<Instruction>(*UI); @@ -11314,7 +11563,9 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) { PredVal = EltPHI; EltPHI->addIncoming(PredVal, Pred); continue; - } else if (PHINode *InPHI = dyn_cast<PHINode>(PN)) { + } + + if (PHINode *InPHI = dyn_cast<PHINode>(PN)) { // If the incoming value was a PHI, and if it was one of the PHIs we // already rewrote it, just use the lowered value. if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) { diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp index d58b9c9..7e6cf79 100644 --- a/lib/Transforms/Scalar/JumpThreading.cpp +++ b/lib/Transforms/Scalar/JumpThreading.cpp @@ -29,6 +29,7 @@ #include "llvm/ADT/SmallSet.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/ValueHandle.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp index 42a8fdc..99f3ae0 100644 --- a/lib/Transforms/Scalar/LICM.cpp +++ b/lib/Transforms/Scalar/LICM.cpp @@ -433,7 +433,7 @@ bool LICM::isNotUsedInLoop(Instruction &I) { if (PN->getIncomingValue(i) == &I) if (CurLoop->contains(PN->getIncomingBlock(i))) return false; - } else if (CurLoop->contains(User->getParent())) { + } else if (CurLoop->contains(User)) { return false; } } @@ -831,7 +831,7 @@ void LICM::FindPromotableValuesInLoop( UI != UE; ++UI) { // Ignore instructions not in this loop. Instruction *Use = dyn_cast<Instruction>(*UI); - if (!Use || !CurLoop->contains(Use->getParent())) + if (!Use || !CurLoop->contains(Use)) continue; if (!isa<LoadInst>(Use) && !isa<StoreInst>(Use)) { diff --git a/lib/Transforms/Scalar/LoopIndexSplit.cpp b/lib/Transforms/Scalar/LoopIndexSplit.cpp index 8b6a233..1d9dd68 100644 --- a/lib/Transforms/Scalar/LoopIndexSplit.cpp +++ b/lib/Transforms/Scalar/LoopIndexSplit.cpp @@ -288,7 +288,7 @@ bool LoopIndexSplit::runOnLoop(Loop *IncomingLoop, LPPassManager &LPM_Ref) { // isUsedOutsideLoop - Returns true iff V is used outside the loop L. static bool isUsedOutsideLoop(Value *V, Loop *L) { for(Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI) - if (!L->contains(cast<Instruction>(*UI)->getParent())) + if (!L->contains(cast<Instruction>(*UI))) return true; return false; } @@ -842,7 +842,7 @@ void LoopIndexSplit::updatePHINodes(BasicBlock *ExitBB, BasicBlock *Latch, for (Value::use_iterator UI = PHV->use_begin(), E = PHV->use_end(); UI != E; ++UI) if (PHINode *U = dyn_cast<PHINode>(*UI)) - if (LP->contains(U->getParent())) { + if (LP->contains(U)) { NewV = U; break; } diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index 85cc712..85f7368 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -144,7 +144,7 @@ namespace { /// StrengthReduceIVUsersOfStride - Strength reduce all of the users of a /// single stride of IV. All of the users may have different starting /// values, and this may not be the only stride. - void StrengthReduceIVUsersOfStride(const SCEV *const &Stride, + void StrengthReduceIVUsersOfStride(const SCEV *Stride, IVUsersOfOneStride &Uses, Loop *L); void StrengthReduceIVUsers(Loop *L); @@ -157,14 +157,14 @@ namespace { bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse, const SCEV* &CondStride); bool RequiresTypeConversion(const Type *Ty, const Type *NewTy); - const SCEV *CheckForIVReuse(bool, bool, bool, const SCEV *const&, + const SCEV *CheckForIVReuse(bool, bool, bool, const SCEV *, IVExpr&, const Type*, const std::vector<BasedUser>& UsersToProcess); bool ValidScale(bool, int64_t, const std::vector<BasedUser>& UsersToProcess); bool ValidOffset(bool, int64_t, int64_t, const std::vector<BasedUser>& UsersToProcess); - const SCEV *CollectIVUsers(const SCEV *const &Stride, + const SCEV *CollectIVUsers(const SCEV *Stride, IVUsersOfOneStride &Uses, Loop *L, bool &AllUsesAreAddresses, @@ -212,8 +212,6 @@ Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) { /// specified set are trivially dead, delete them and see if this makes any of /// their operands subsequently dead. void LoopStrengthReduce::DeleteTriviallyDeadInstructions() { - if (DeadInsts.empty()) return; - while (!DeadInsts.empty()) { Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()); @@ -232,44 +230,6 @@ void LoopStrengthReduce::DeleteTriviallyDeadInstructions() { } } -/// containsAddRecFromDifferentLoop - Determine whether expression S involves a -/// subexpression that is an AddRec from a loop other than L. An outer loop -/// of L is OK, but not an inner loop nor a disjoint loop. -static bool containsAddRecFromDifferentLoop(const SCEV *S, Loop *L) { - // This is very common, put it first. - if (isa<SCEVConstant>(S)) - return false; - if (const SCEVCommutativeExpr *AE = dyn_cast<SCEVCommutativeExpr>(S)) { - for (unsigned int i=0; i< AE->getNumOperands(); i++) - if (containsAddRecFromDifferentLoop(AE->getOperand(i), L)) - return true; - return false; - } - if (const SCEVAddRecExpr *AE = dyn_cast<SCEVAddRecExpr>(S)) { - if (const Loop *newLoop = AE->getLoop()) { - if (newLoop == L) - return false; - // if newLoop is an outer loop of L, this is OK. - if (newLoop->contains(L->getHeader())) - return false; - } - return true; - } - if (const SCEVUDivExpr *DE = dyn_cast<SCEVUDivExpr>(S)) - return containsAddRecFromDifferentLoop(DE->getLHS(), L) || - containsAddRecFromDifferentLoop(DE->getRHS(), L); -#if 0 - // SCEVSDivExpr has been backed out temporarily, but will be back; we'll - // need this when it is. - if (const SCEVSDivExpr *DE = dyn_cast<SCEVSDivExpr>(S)) - return containsAddRecFromDifferentLoop(DE->getLHS(), L) || - containsAddRecFromDifferentLoop(DE->getRHS(), L); -#endif - if (const SCEVCastExpr *CE = dyn_cast<SCEVCastExpr>(S)) - return containsAddRecFromDifferentLoop(CE->getOperand(), L); - return false; -} - /// isAddressUse - Returns true if the specified instruction is using the /// specified value as an address. static bool isAddressUse(Instruction *Inst, Value *OperandVal) { @@ -362,13 +322,13 @@ namespace { // Once we rewrite the code to insert the new IVs we want, update the // operands of Inst to use the new expression 'NewBase', with 'Imm' added // to it. - void RewriteInstructionToUseNewBase(const SCEV *const &NewBase, + void RewriteInstructionToUseNewBase(const SCEV *NewBase, Instruction *InsertPt, SCEVExpander &Rewriter, Loop *L, Pass *P, SmallVectorImpl<WeakVH> &DeadInsts, ScalarEvolution *SE); - Value *InsertCodeForBaseAtPosition(const SCEV *const &NewBase, + Value *InsertCodeForBaseAtPosition(const SCEV *NewBase, const Type *Ty, SCEVExpander &Rewriter, Instruction *IP, @@ -378,12 +338,12 @@ namespace { } void BasedUser::dump() const { - errs() << " Base=" << *Base; - errs() << " Imm=" << *Imm; - errs() << " Inst: " << *Inst; + dbgs() << " Base=" << *Base; + dbgs() << " Imm=" << *Imm; + dbgs() << " Inst: " << *Inst; } -Value *BasedUser::InsertCodeForBaseAtPosition(const SCEV *const &NewBase, +Value *BasedUser::InsertCodeForBaseAtPosition(const SCEV *NewBase, const Type *Ty, SCEVExpander &Rewriter, Instruction *IP, @@ -407,7 +367,7 @@ Value *BasedUser::InsertCodeForBaseAtPosition(const SCEV *const &NewBase, // value of NewBase in the case that it's a diffferent instruction from // the PHI that NewBase is computed from, or null otherwise. // -void BasedUser::RewriteInstructionToUseNewBase(const SCEV *const &NewBase, +void BasedUser::RewriteInstructionToUseNewBase(const SCEV *NewBase, Instruction *NewBasePt, SCEVExpander &Rewriter, Loop *L, Pass *P, SmallVectorImpl<WeakVH> &DeadInsts, @@ -428,7 +388,7 @@ void BasedUser::RewriteInstructionToUseNewBase(const SCEV *const &NewBase, // If this is a use outside the loop (which means after, since it is based // on a loop indvar) we use the post-incremented value, so that we don't // artificially make the preinc value live out the bottom of the loop. - if (!isUseOfPostIncrementedValue && L->contains(Inst->getParent())) { + if (!isUseOfPostIncrementedValue && L->contains(Inst)) { if (NewBasePt && isa<PHINode>(OperandValToReplace)) { InsertPt = NewBasePt; ++InsertPt; @@ -444,9 +404,9 @@ void BasedUser::RewriteInstructionToUseNewBase(const SCEV *const &NewBase, // Replace the use of the operand Value with the new Phi we just created. Inst->replaceUsesOfWith(OperandValToReplace, NewVal); - DEBUG(errs() << " Replacing with "); - DEBUG(WriteAsOperand(errs(), NewVal, /*PrintType=*/false)); - DEBUG(errs() << ", which has value " << *NewBase << " plus IMM " + DEBUG(dbgs() << " Replacing with "); + DEBUG(WriteAsOperand(dbgs(), NewVal, /*PrintType=*/false)); + DEBUG(dbgs() << ", which has value " << *NewBase << " plus IMM " << *Imm << "\n"); return; } @@ -469,7 +429,7 @@ void BasedUser::RewriteInstructionToUseNewBase(const SCEV *const &NewBase, // that case(?). Instruction *OldLoc = dyn_cast<Instruction>(OperandValToReplace); BasicBlock *PHIPred = PN->getIncomingBlock(i); - if (L->contains(OldLoc->getParent())) { + if (L->contains(OldLoc)) { // If this is a critical edge, split the edge so that we do not insert // the code on all predecessor/successor paths. We do this unless this // is the canonical backedge for this loop, as this can make some @@ -486,7 +446,7 @@ void BasedUser::RewriteInstructionToUseNewBase(const SCEV *const &NewBase, // is outside of the loop, and PredTI is in the loop, we want to // move the block to be immediately before the PHI block, not // immediately after PredTI. - if (L->contains(PHIPred) && !L->contains(PN->getParent())) + if (L->contains(PHIPred) && !L->contains(PN)) NewBB->moveBefore(PN->getParent()); // Splitting the edge can reduce the number of PHI entries we have. @@ -498,15 +458,15 @@ void BasedUser::RewriteInstructionToUseNewBase(const SCEV *const &NewBase, Value *&Code = InsertedCode[PHIPred]; if (!Code) { // Insert the code into the end of the predecessor block. - Instruction *InsertPt = (L->contains(OldLoc->getParent())) ? + Instruction *InsertPt = (L->contains(OldLoc)) ? PHIPred->getTerminator() : OldLoc->getParent()->getTerminator(); Code = InsertCodeForBaseAtPosition(NewBase, PN->getType(), Rewriter, InsertPt, SE); - DEBUG(errs() << " Changing PHI use to "); - DEBUG(WriteAsOperand(errs(), Code, /*PrintType=*/false)); - DEBUG(errs() << ", which has value " << *NewBase << " plus IMM " + DEBUG(dbgs() << " Changing PHI use to "); + DEBUG(WriteAsOperand(dbgs(), Code, /*PrintType=*/false)); + DEBUG(dbgs() << ", which has value " << *NewBase << " plus IMM " << *Imm << "\n"); } @@ -523,7 +483,7 @@ void BasedUser::RewriteInstructionToUseNewBase(const SCEV *const &NewBase, /// fitsInAddressMode - Return true if V can be subsumed within an addressing /// mode, and does not need to be put in a register first. -static bool fitsInAddressMode(const SCEV *const &V, const Type *AccessTy, +static bool fitsInAddressMode(const SCEV *V, const Type *AccessTy, const TargetLowering *TLI, bool HasBaseReg) { if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(V)) { int64_t VC = SC->getValue()->getSExtValue(); @@ -737,7 +697,7 @@ RemoveCommonExpressionsFromUseBases(std::vector<BasedUser> &Uses, // it is clearly shared across all the IV's. If the use is outside the loop // (which means after it) we don't want to factor anything *into* the loop, // so just use 0 as the base. - if (L->contains(Uses[0].Inst->getParent())) + if (L->contains(Uses[0].Inst)) std::swap(Result, Uses[0].Base); return Result; } @@ -762,7 +722,7 @@ RemoveCommonExpressionsFromUseBases(std::vector<BasedUser> &Uses, // after the loop to affect base computation of values *inside* the loop, // because we can always add their offsets to the result IV after the loop // is done, ensuring we get good code inside the loop. - if (!L->contains(Uses[i].Inst->getParent())) + if (!L->contains(Uses[i].Inst)) continue; NumUsesInsideLoop++; @@ -818,7 +778,7 @@ RemoveCommonExpressionsFromUseBases(std::vector<BasedUser> &Uses, // and a Result in the same instruction (for example because it would // require too many registers). Check this. for (unsigned i=0; i<NumUses; ++i) { - if (!L->contains(Uses[i].Inst->getParent())) + if (!L->contains(Uses[i].Inst)) continue; // We know this is an addressing mode use; if there are any uses that // are not, FreeResult would be Zero. @@ -854,7 +814,7 @@ RemoveCommonExpressionsFromUseBases(std::vector<BasedUser> &Uses, // the final IV value coming into those uses does. Instead of trying to // remove the pieces of the common base, which might not be there, // subtract off the base to compensate for this. - if (!L->contains(Uses[i].Inst->getParent())) { + if (!L->contains(Uses[i].Inst)) { Uses[i].Base = SE->getMinusSCEV(Uses[i].Base, Result); continue; } @@ -975,7 +935,7 @@ bool LoopStrengthReduce::RequiresTypeConversion(const Type *Ty1, const SCEV *LoopStrengthReduce::CheckForIVReuse(bool HasBaseReg, bool AllUsesAreAddresses, bool AllUsesAreOutsideLoop, - const SCEV *const &Stride, + const SCEV *Stride, IVExpr &IV, const Type *Ty, const std::vector<BasedUser>& UsersToProcess) { if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) { @@ -1088,7 +1048,7 @@ static bool PartitionByIsUseOfPostIncrementedValue(const BasedUser &Val) { /// isNonConstantNegative - Return true if the specified scev is negated, but /// not a constant. -static bool isNonConstantNegative(const SCEV *const &Expr) { +static bool isNonConstantNegative(const SCEV *Expr) { const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Expr); if (!Mul) return false; @@ -1105,7 +1065,7 @@ static bool isNonConstantNegative(const SCEV *const &Expr) { /// base of the strided accesses, as well as the old information from Uses. We /// progressively move information from the Base field to the Imm field, until /// we eventually have the full access expression to rewrite the use. -const SCEV *LoopStrengthReduce::CollectIVUsers(const SCEV *const &Stride, +const SCEV *LoopStrengthReduce::CollectIVUsers(const SCEV *Stride, IVUsersOfOneStride &Uses, Loop *L, bool &AllUsesAreAddresses, @@ -1149,7 +1109,7 @@ const SCEV *LoopStrengthReduce::CollectIVUsers(const SCEV *const &Stride, // If the user is not in the current loop, this means it is using the exit // value of the IV. Do not put anything in the base, make sure it's all in // the immediate field to allow as much factoring as possible. - if (!L->contains(UsersToProcess[i].Inst->getParent())) { + if (!L->contains(UsersToProcess[i].Inst)) { UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm, UsersToProcess[i].Base); UsersToProcess[i].Base = @@ -1361,7 +1321,7 @@ LoopStrengthReduce::PrepareToStrengthReduceFully( const SCEV *CommonExprs, const Loop *L, SCEVExpander &PreheaderRewriter) { - DEBUG(errs() << " Fully reducing all users\n"); + DEBUG(dbgs() << " Fully reducing all users\n"); // Rewrite the UsersToProcess records, creating a separate PHI for each // unique Base value. @@ -1393,7 +1353,7 @@ static Instruction *FindIVIncInsertPt(std::vector<BasedUser> &UsersToProcess, const Loop *L) { if (UsersToProcess.size() == 1 && UsersToProcess[0].isUseOfPostIncrementedValue && - L->contains(UsersToProcess[0].Inst->getParent())) + L->contains(UsersToProcess[0].Inst)) return UsersToProcess[0].Inst; return L->getLoopLatch()->getTerminator(); } @@ -1410,7 +1370,7 @@ LoopStrengthReduce::PrepareToStrengthReduceWithNewPhi( Instruction *IVIncInsertPt, const Loop *L, SCEVExpander &PreheaderRewriter) { - DEBUG(errs() << " Inserting new PHI:\n"); + DEBUG(dbgs() << " Inserting new PHI:\n"); PHINode *Phi = InsertAffinePhi(SE->getUnknown(CommonBaseV), Stride, IVIncInsertPt, L, @@ -1423,9 +1383,9 @@ LoopStrengthReduce::PrepareToStrengthReduceWithNewPhi( for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) UsersToProcess[i].Phi = Phi; - DEBUG(errs() << " IV="); - DEBUG(WriteAsOperand(errs(), Phi, /*PrintType=*/false)); - DEBUG(errs() << "\n"); + DEBUG(dbgs() << " IV="); + DEBUG(WriteAsOperand(dbgs(), Phi, /*PrintType=*/false)); + DEBUG(dbgs() << "\n"); } /// PrepareToStrengthReduceFromSmallerStride - Prepare for the given users to @@ -1438,7 +1398,7 @@ LoopStrengthReduce::PrepareToStrengthReduceFromSmallerStride( Value *CommonBaseV, const IVExpr &ReuseIV, Instruction *PreInsertPt) { - DEBUG(errs() << " Rewriting in terms of existing IV of STRIDE " + DEBUG(dbgs() << " Rewriting in terms of existing IV of STRIDE " << *ReuseIV.Stride << " and BASE " << *ReuseIV.Base << "\n"); // All the users will share the reused IV. @@ -1482,7 +1442,7 @@ static bool IsImmFoldedIntoAddrMode(GlobalValue *GV, int64_t Offset, /// stride of IV. All of the users may have different starting values, and this /// may not be the only stride. void -LoopStrengthReduce::StrengthReduceIVUsersOfStride(const SCEV *const &Stride, +LoopStrengthReduce::StrengthReduceIVUsersOfStride(const SCEV *Stride, IVUsersOfOneStride &Uses, Loop *L) { // If all the users are moved to another stride, then there is nothing to do. @@ -1547,7 +1507,7 @@ LoopStrengthReduce::StrengthReduceIVUsersOfStride(const SCEV *const &Stride, UsersToProcess, TLI); if (DoSink) { - DEBUG(errs() << " Sinking " << *Imm << " back down into uses\n"); + DEBUG(dbgs() << " Sinking " << *Imm << " back down into uses\n"); for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) UsersToProcess[i].Imm = SE->getAddExpr(UsersToProcess[i].Imm, Imm); CommonExprs = NewCommon; @@ -1559,7 +1519,7 @@ LoopStrengthReduce::StrengthReduceIVUsersOfStride(const SCEV *const &Stride, // Now that we know what we need to do, insert the PHI node itself. // - DEBUG(errs() << "LSR: Examining IVs of TYPE " << *ReplacedTy << " of STRIDE " + DEBUG(dbgs() << "LSR: Examining IVs of TYPE " << *ReplacedTy << " of STRIDE " << *Stride << ":\n" << " Common base: " << *CommonExprs << "\n"); @@ -1623,10 +1583,10 @@ LoopStrengthReduce::StrengthReduceIVUsersOfStride(const SCEV *const &Stride, if (!Base->isZero()) { BaseV = PreheaderRewriter.expandCodeFor(Base, 0, PreInsertPt); - DEBUG(errs() << " INSERTING code for BASE = " << *Base << ":"); + DEBUG(dbgs() << " INSERTING code for BASE = " << *Base << ":"); if (BaseV->hasName()) - DEBUG(errs() << " Result value name = %" << BaseV->getName()); - DEBUG(errs() << "\n"); + DEBUG(dbgs() << " Result value name = %" << BaseV->getName()); + DEBUG(dbgs() << "\n"); // If BaseV is a non-zero constant, make sure that it gets inserted into // the preheader, instead of being forward substituted into the uses. We @@ -1647,15 +1607,15 @@ LoopStrengthReduce::StrengthReduceIVUsersOfStride(const SCEV *const &Stride, // FIXME: Use emitted users to emit other users. BasedUser &User = UsersToProcess.back(); - DEBUG(errs() << " Examining "); + DEBUG(dbgs() << " Examining "); if (User.isUseOfPostIncrementedValue) - DEBUG(errs() << "postinc"); + DEBUG(dbgs() << "postinc"); else - DEBUG(errs() << "preinc"); - DEBUG(errs() << " use "); - DEBUG(WriteAsOperand(errs(), UsersToProcess.back().OperandValToReplace, + DEBUG(dbgs() << "preinc"); + DEBUG(dbgs() << " use "); + DEBUG(WriteAsOperand(dbgs(), UsersToProcess.back().OperandValToReplace, /*PrintType=*/false)); - DEBUG(errs() << " in Inst: " << *User.Inst); + DEBUG(dbgs() << " in Inst: " << *User.Inst); // If this instruction wants to use the post-incremented value, move it // after the post-inc and use its value instead of the PHI. @@ -1666,7 +1626,7 @@ LoopStrengthReduce::StrengthReduceIVUsersOfStride(const SCEV *const &Stride, // loop to ensure it is dominated by the increment. In case it's the // only use of the iv, the increment instruction is already before the // use. - if (L->contains(User.Inst->getParent()) && User.Inst != IVIncInsertPt) + if (L->contains(User.Inst) && User.Inst != IVIncInsertPt) User.Inst->moveBefore(IVIncInsertPt); } @@ -1728,7 +1688,7 @@ LoopStrengthReduce::StrengthReduceIVUsersOfStride(const SCEV *const &Stride, // common base, and are adding it back here. Use the same expression // as before, rather than CommonBaseV, so DAGCombiner will zap it. if (!CommonExprs->isZero()) { - if (L->contains(User.Inst->getParent())) + if (L->contains(User.Inst)) RewriteExpr = SE->getAddExpr(RewriteExpr, SE->getUnknown(CommonBaseV)); else @@ -1815,7 +1775,7 @@ namespace { const ScalarEvolution *SE; explicit StrideCompare(const ScalarEvolution *se) : SE(se) {} - bool operator()(const SCEV *const &LHS, const SCEV *const &RHS) { + bool operator()(const SCEV *LHS, const SCEV *RHS) { const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS); const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS); if (LHSC && RHSC) { @@ -2069,8 +2029,8 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond, Cond = new ICmpInst(OldCond, Predicate, NewCmpLHS, NewCmpRHS, L->getHeader()->getName() + ".termcond"); - DEBUG(errs() << " Change compare stride in Inst " << *OldCond); - DEBUG(errs() << " to " << *Cond << '\n'); + DEBUG(dbgs() << " Change compare stride in Inst " << *OldCond); + DEBUG(dbgs() << " to " << *Cond << '\n'); // Remove the old compare instruction. The old indvar is probably dead too. DeadInsts.push_back(CondUse->getOperandValToReplace()); @@ -2403,7 +2363,7 @@ static bool isUsedByExitBranch(ICmpInst *Cond, Loop *L) { static bool ShouldCountToZero(ICmpInst *Cond, IVStrideUse* &CondUse, ScalarEvolution *SE, Loop *L, const TargetLowering *TLI = 0) { - if (!L->contains(Cond->getParent())) + if (!L->contains(Cond)) return false; if (!isa<SCEVConstant>(CondUse->getOffset())) @@ -2529,7 +2489,7 @@ void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) { if (!UsePostInc) continue; - DEBUG(errs() << " Change loop exiting icmp to use postinc iv: " + DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: " << *Cond << '\n'); // It's possible for the setcc instruction to be anywhere in the loop, and @@ -2608,9 +2568,9 @@ bool LoopStrengthReduce::OptimizeLoopCountIVOfStride(const SCEV* &Stride, } // Replace the increment with a decrement. - DEBUG(errs() << "LSR: Examining use "); - DEBUG(WriteAsOperand(errs(), CondOp0, /*PrintType=*/false)); - DEBUG(errs() << " in Inst: " << *Cond << '\n'); + DEBUG(dbgs() << "LSR: Examining use "); + DEBUG(WriteAsOperand(dbgs(), CondOp0, /*PrintType=*/false)); + DEBUG(dbgs() << " in Inst: " << *Cond << '\n'); BinaryOperator *Decr = BinaryOperator::Create(Instruction::Sub, Incr->getOperand(0), Incr->getOperand(1), "tmp", Incr); Incr->replaceAllUsesWith(Decr); @@ -2624,7 +2584,7 @@ bool LoopStrengthReduce::OptimizeLoopCountIVOfStride(const SCEV* &Stride, unsigned InBlock = L->contains(PHIExpr->getIncomingBlock(0)) ? 1 : 0; Value *StartVal = PHIExpr->getIncomingValue(InBlock); Value *EndVal = Cond->getOperand(1); - DEBUG(errs() << " Optimize loop counting iv to count down [" + DEBUG(dbgs() << " Optimize loop counting iv to count down [" << *EndVal << " .. " << *StartVal << "]\n"); // FIXME: check for case where both are constant. @@ -2633,7 +2593,7 @@ bool LoopStrengthReduce::OptimizeLoopCountIVOfStride(const SCEV* &Stride, EndVal, StartVal, "tmp", PreInsertPt); PHIExpr->setIncomingValue(InBlock, NewStartVal); Cond->setOperand(1, Zero); - DEBUG(errs() << " New icmp: " << *Cond << "\n"); + DEBUG(dbgs() << " New icmp: " << *Cond << "\n"); int64_t SInt = cast<SCEVConstant>(Stride)->getValue()->getSExtValue(); const SCEV *NewStride = 0; @@ -2716,9 +2676,9 @@ bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) { return false; if (!IU->IVUsesByStride.empty()) { - DEBUG(errs() << "\nLSR on \"" << L->getHeader()->getParent()->getName() + DEBUG(dbgs() << "\nLSR on \"" << L->getHeader()->getParent()->getName() << "\" "; - L->dump()); + L->print(dbgs())); // Sort the StrideOrder so we process larger strides first. std::stable_sort(IU->StrideOrder.begin(), IU->StrideOrder.end(), @@ -2758,8 +2718,7 @@ bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager &LPM) { IVsByStride.clear(); // Clean up after ourselves - if (!DeadInsts.empty()) - DeleteTriviallyDeadInstructions(); + DeleteTriviallyDeadInstructions(); } // At this point, it is worth checking to see if any recurrence PHIs are also diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp index b7adfdc..0c19133 100644 --- a/lib/Transforms/Scalar/LoopUnswitch.cpp +++ b/lib/Transforms/Scalar/LoopUnswitch.cpp @@ -877,7 +877,7 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC, for (unsigned i = 0, e = Users.size(); i != e; ++i) if (Instruction *U = cast<Instruction>(Users[i])) { - if (!L->contains(U->getParent())) + if (!L->contains(U)) continue; U->replaceUsesOfWith(LIC, Replacement); Worklist.push_back(U); @@ -888,7 +888,7 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC, // can. This case occurs when we unswitch switch statements. for (unsigned i = 0, e = Users.size(); i != e; ++i) if (Instruction *U = cast<Instruction>(Users[i])) { - if (!L->contains(U->getParent())) + if (!L->contains(U)) continue; Worklist.push_back(U); diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp index 8466918..827b47d 100644 --- a/lib/Transforms/Scalar/Reassociate.cpp +++ b/lib/Transforms/Scalar/Reassociate.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // // This pass reassociates commutative expressions in an order that is designed -// to promote better constant propagation, GCSE, LICM, PRE... +// to promote better constant propagation, GCSE, LICM, PRE, etc. // // For example: 4 + (x + 5) -> x + (4 + 5) // @@ -35,8 +35,8 @@ #include "llvm/Support/raw_ostream.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/Statistic.h" +#include "llvm/ADT/DenseMap.h" #include <algorithm> -#include <map> using namespace llvm; STATISTIC(NumLinear , "Number of insts linearized"); @@ -58,21 +58,22 @@ namespace { #ifndef NDEBUG /// PrintOps - Print out the expression identified in the Ops list. /// -static void PrintOps(Instruction *I, const std::vector<ValueEntry> &Ops) { +static void PrintOps(Instruction *I, const SmallVectorImpl<ValueEntry> &Ops) { Module *M = I->getParent()->getParent()->getParent(); errs() << Instruction::getOpcodeName(I->getOpcode()) << " " - << *Ops[0].Op->getType(); + << *Ops[0].Op->getType() << '\t'; for (unsigned i = 0, e = Ops.size(); i != e; ++i) { - WriteAsOperand(errs() << " ", Ops[i].Op, false, M); - errs() << "," << Ops[i].Rank; + errs() << "[ "; + WriteAsOperand(errs(), Ops[i].Op, false, M); + errs() << ", #" << Ops[i].Rank << "] "; } } #endif namespace { class Reassociate : public FunctionPass { - std::map<BasicBlock*, unsigned> RankMap; - std::map<AssertingVH<>, unsigned> ValueRankMap; + DenseMap<BasicBlock*, unsigned> RankMap; + DenseMap<AssertingVH<>, unsigned> ValueRankMap; bool MadeChange; public: static char ID; // Pass identification, replacement for typeid @@ -86,11 +87,13 @@ namespace { private: void BuildRankMap(Function &F); unsigned getRank(Value *V); - void ReassociateExpression(BinaryOperator *I); - void RewriteExprTree(BinaryOperator *I, std::vector<ValueEntry> &Ops, + Value *ReassociateExpression(BinaryOperator *I); + void RewriteExprTree(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops, unsigned Idx = 0); - Value *OptimizeExpression(BinaryOperator *I, std::vector<ValueEntry> &Ops); - void LinearizeExprTree(BinaryOperator *I, std::vector<ValueEntry> &Ops); + Value *OptimizeExpression(BinaryOperator *I, + SmallVectorImpl<ValueEntry> &Ops); + Value *OptimizeAdd(Instruction *I, SmallVectorImpl<ValueEntry> &Ops); + void LinearizeExprTree(BinaryOperator *I, SmallVectorImpl<ValueEntry> &Ops); void LinearizeExpr(BinaryOperator *I); Value *RemoveFactorFromExpression(Value *V, Value *Factor); void ReassociateBB(BasicBlock *BB); @@ -107,10 +110,13 @@ FunctionPass *llvm::createReassociatePass() { return new Reassociate(); } void Reassociate::RemoveDeadBinaryOp(Value *V) { Instruction *Op = dyn_cast<Instruction>(V); - if (!Op || !isa<BinaryOperator>(Op) || !isa<CmpInst>(Op) || !Op->use_empty()) + if (!Op || !isa<BinaryOperator>(Op) || !Op->use_empty()) return; Value *LHS = Op->getOperand(0), *RHS = Op->getOperand(1); + + ValueRankMap.erase(Op); + Op->eraseFromParent(); RemoveDeadBinaryOp(LHS); RemoveDeadBinaryOp(RHS); } @@ -156,13 +162,14 @@ void Reassociate::BuildRankMap(Function &F) { } unsigned Reassociate::getRank(Value *V) { - if (isa<Argument>(V)) return ValueRankMap[V]; // Function argument... - Instruction *I = dyn_cast<Instruction>(V); - if (I == 0) return 0; // Otherwise it's a global or constant, rank 0. + if (I == 0) { + if (isa<Argument>(V)) return ValueRankMap[V]; // Function argument. + return 0; // Otherwise it's a global or constant, rank 0. + } - unsigned &CachedRank = ValueRankMap[I]; - if (CachedRank) return CachedRank; // Rank already known? + if (unsigned Rank = ValueRankMap[I]) + return Rank; // Rank already known? // If this is an expression, return the 1+MAX(rank(LHS), rank(RHS)) so that // we can reassociate expressions for code motion! Since we do not recurse @@ -182,7 +189,7 @@ unsigned Reassociate::getRank(Value *V) { //DEBUG(errs() << "Calculated Rank[" << V->getName() << "] = " // << Rank << "\n"); - return CachedRank = Rank; + return ValueRankMap[I] = Rank; } /// isReassociableOp - Return true if V is an instruction of the specified @@ -197,7 +204,7 @@ static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) { /// LowerNegateToMultiply - Replace 0-X with X*-1. /// static Instruction *LowerNegateToMultiply(Instruction *Neg, - std::map<AssertingVH<>, unsigned> &ValueRankMap) { + DenseMap<AssertingVH<>, unsigned> &ValueRankMap) { Constant *Cst = Constant::getAllOnesValue(Neg->getType()); Instruction *Res = BinaryOperator::CreateMul(Neg->getOperand(1), Cst, "",Neg); @@ -250,7 +257,7 @@ void Reassociate::LinearizeExpr(BinaryOperator *I) { /// caller MUST use something like RewriteExprTree to put the values back in. /// void Reassociate::LinearizeExprTree(BinaryOperator *I, - std::vector<ValueEntry> &Ops) { + SmallVectorImpl<ValueEntry> &Ops) { Value *LHS = I->getOperand(0), *RHS = I->getOperand(1); unsigned Opcode = I->getOpcode(); @@ -282,15 +289,15 @@ void Reassociate::LinearizeExprTree(BinaryOperator *I, I->setOperand(0, UndefValue::get(I->getType())); I->setOperand(1, UndefValue::get(I->getType())); return; - } else { - // Turn X+(Y+Z) -> (Y+Z)+X - std::swap(LHSBO, RHSBO); - std::swap(LHS, RHS); - bool Success = !I->swapOperands(); - assert(Success && "swapOperands failed"); - Success = false; - MadeChange = true; } + + // Turn X+(Y+Z) -> (Y+Z)+X + std::swap(LHSBO, RHSBO); + std::swap(LHS, RHS); + bool Success = !I->swapOperands(); + assert(Success && "swapOperands failed"); + Success = false; + MadeChange = true; } else if (RHSBO) { // Turn (A+B)+(C+D) -> (((A+B)+C)+D). This guarantees the the RHS is not // part of the expression tree. @@ -322,7 +329,7 @@ void Reassociate::LinearizeExprTree(BinaryOperator *I, // linearized and optimized, emit them in-order. This function is written to be // tail recursive. void Reassociate::RewriteExprTree(BinaryOperator *I, - std::vector<ValueEntry> &Ops, + SmallVectorImpl<ValueEntry> &Ops, unsigned i) { if (i+2 == Ops.size()) { if (I->getOperand(0) != Ops[i].Op || @@ -369,6 +376,9 @@ void Reassociate::RewriteExprTree(BinaryOperator *I, // that should be processed next by the reassociation pass. // static Value *NegateValue(Value *V, Instruction *BI) { + if (Constant *C = dyn_cast<Constant>(V)) + return ConstantExpr::getNeg(C); + // We are trying to expose opportunity for reassociation. One of the things // that we want to do to achieve this is to push a negation as deep into an // expression chain as possible, to expose the add instructions. In practice, @@ -376,7 +386,7 @@ static Value *NegateValue(Value *V, Instruction *BI) { // X = -(A+12+C+D) into X = -A + -12 + -C + -D = -12 + -A + -C + -D // so that later, a: Y = 12+X could get reassociated with the -12 to eliminate // the constants. We assume that instcombine will clean up the mess later if - // we introduce tons of unnecessary negation instructions... + // we introduce tons of unnecessary negation instructions. // if (Instruction *I = dyn_cast<Instruction>(V)) if (I->getOpcode() == Instruction::Add && I->hasOneUse()) { @@ -393,10 +403,36 @@ static Value *NegateValue(Value *V, Instruction *BI) { I->setName(I->getName()+".neg"); return I; } + + // Okay, we need to materialize a negated version of V with an instruction. + // Scan the use lists of V to see if we have one already. + for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){ + if (!BinaryOperator::isNeg(*UI)) continue; + + // We found one! Now we have to make sure that the definition dominates + // this use. We do this by moving it to the entry block (if it is a + // non-instruction value) or right after the definition. These negates will + // be zapped by reassociate later, so we don't need much finesse here. + BinaryOperator *TheNeg = cast<BinaryOperator>(*UI); + + BasicBlock::iterator InsertPt; + if (Instruction *InstInput = dyn_cast<Instruction>(V)) { + if (InvokeInst *II = dyn_cast<InvokeInst>(InstInput)) { + InsertPt = II->getNormalDest()->begin(); + } else { + InsertPt = InstInput; + ++InsertPt; + } + while (isa<PHINode>(InsertPt)) ++InsertPt; + } else { + InsertPt = TheNeg->getParent()->getParent()->getEntryBlock().begin(); + } + TheNeg->moveBefore(InsertPt); + return TheNeg; + } // Insert a 'neg' instruction that subtracts the value from zero to get the // negation. - // return BinaryOperator::CreateNeg(V, V->getName() + ".neg", BI); } @@ -427,12 +463,12 @@ static bool ShouldBreakUpSubtract(Instruction *Sub) { /// only used by an add, transform this into (X+(0-Y)) to promote better /// reassociation. static Instruction *BreakUpSubtract(Instruction *Sub, - std::map<AssertingVH<>, unsigned> &ValueRankMap) { - // Convert a subtract into an add and a neg instruction... so that sub - // instructions can be commuted with other add instructions... + DenseMap<AssertingVH<>, unsigned> &ValueRankMap) { + // Convert a subtract into an add and a neg instruction. This allows sub + // instructions to be commuted with other add instructions. // - // Calculate the negative value of Operand 1 of the sub instruction... - // and set it as the RHS of the add instruction we just made... + // Calculate the negative value of Operand 1 of the sub instruction, + // and set it as the RHS of the add instruction we just made. // Value *NegVal = NegateValue(Sub->getOperand(1), Sub); Instruction *New = @@ -452,7 +488,7 @@ static Instruction *BreakUpSubtract(Instruction *Sub, /// by one, change this into a multiply by a constant to assist with further /// reassociation. static Instruction *ConvertShiftToMul(Instruction *Shl, - std::map<AssertingVH<>, unsigned> &ValueRankMap) { + DenseMap<AssertingVH<>, unsigned> &ValueRankMap) { // If an operand of this shift is a reassociable multiply, or if the shift // is used by a reassociable multiply or add, turn into a multiply. if (isReassociableOp(Shl->getOperand(0), Instruction::Mul) || @@ -460,11 +496,10 @@ static Instruction *ConvertShiftToMul(Instruction *Shl, (isReassociableOp(Shl->use_back(), Instruction::Mul) || isReassociableOp(Shl->use_back(), Instruction::Add)))) { Constant *MulCst = ConstantInt::get(Shl->getType(), 1); - MulCst = - ConstantExpr::getShl(MulCst, cast<Constant>(Shl->getOperand(1))); + MulCst = ConstantExpr::getShl(MulCst, cast<Constant>(Shl->getOperand(1))); - Instruction *Mul = BinaryOperator::CreateMul(Shl->getOperand(0), MulCst, - "", Shl); + Instruction *Mul = + BinaryOperator::CreateMul(Shl->getOperand(0), MulCst, "", Shl); ValueRankMap.erase(Shl); Mul->takeName(Shl); Shl->replaceAllUsesWith(Mul); @@ -475,15 +510,16 @@ static Instruction *ConvertShiftToMul(Instruction *Shl, } // Scan backwards and forwards among values with the same rank as element i to -// see if X exists. If X does not exist, return i. -static unsigned FindInOperandList(std::vector<ValueEntry> &Ops, unsigned i, +// see if X exists. If X does not exist, return i. This is useful when +// scanning for 'x' when we see '-x' because they both get the same rank. +static unsigned FindInOperandList(SmallVectorImpl<ValueEntry> &Ops, unsigned i, Value *X) { unsigned XRank = Ops[i].Rank; unsigned e = Ops.size(); for (unsigned j = i+1; j != e && Ops[j].Rank == XRank; ++j) if (Ops[j].Op == X) return j; - // Scan backwards + // Scan backwards. for (unsigned j = i-1; j != ~0U && Ops[j].Rank == XRank; --j) if (Ops[j].Op == X) return j; @@ -492,7 +528,7 @@ static unsigned FindInOperandList(std::vector<ValueEntry> &Ops, unsigned i, /// EmitAddTreeOfValues - Emit a tree of add instructions, summing Ops together /// and returning the result. Insert the tree before I. -static Value *EmitAddTreeOfValues(Instruction *I, std::vector<Value*> &Ops) { +static Value *EmitAddTreeOfValues(Instruction *I, SmallVectorImpl<Value*> &Ops){ if (Ops.size() == 1) return Ops.back(); Value *V1 = Ops.back(); @@ -508,32 +544,57 @@ Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) { BinaryOperator *BO = isReassociableOp(V, Instruction::Mul); if (!BO) return 0; - std::vector<ValueEntry> Factors; + SmallVector<ValueEntry, 8> Factors; LinearizeExprTree(BO, Factors); bool FoundFactor = false; - for (unsigned i = 0, e = Factors.size(); i != e; ++i) + bool NeedsNegate = false; + for (unsigned i = 0, e = Factors.size(); i != e; ++i) { if (Factors[i].Op == Factor) { FoundFactor = true; Factors.erase(Factors.begin()+i); break; } + + // If this is a negative version of this factor, remove it. + if (ConstantInt *FC1 = dyn_cast<ConstantInt>(Factor)) + if (ConstantInt *FC2 = dyn_cast<ConstantInt>(Factors[i].Op)) + if (FC1->getValue() == -FC2->getValue()) { + FoundFactor = NeedsNegate = true; + Factors.erase(Factors.begin()+i); + break; + } + } + if (!FoundFactor) { // Make sure to restore the operands to the expression tree. RewriteExprTree(BO, Factors); return 0; } - if (Factors.size() == 1) return Factors[0].Op; + BasicBlock::iterator InsertPt = BO; ++InsertPt; + + // If this was just a single multiply, remove the multiply and return the only + // remaining operand. + if (Factors.size() == 1) { + ValueRankMap.erase(BO); + BO->eraseFromParent(); + V = Factors[0].Op; + } else { + RewriteExprTree(BO, Factors); + V = BO; + } - RewriteExprTree(BO, Factors); - return BO; + if (NeedsNegate) + V = BinaryOperator::CreateNeg(V, "neg", InsertPt); + + return V; } /// FindSingleUseMultiplyFactors - If V is a single-use multiply, recursively /// add its operands as factors, otherwise add V to the list of factors. static void FindSingleUseMultiplyFactors(Value *V, - std::vector<Value*> &Factors) { + SmallVectorImpl<Value*> &Factors) { BinaryOperator *BO; if ((!V->hasOneUse() && !V->use_empty()) || !(BO = dyn_cast<BinaryOperator>(V)) || @@ -547,10 +608,228 @@ static void FindSingleUseMultiplyFactors(Value *V, FindSingleUseMultiplyFactors(BO->getOperand(0), Factors); } +/// OptimizeAndOrXor - Optimize a series of operands to an 'and', 'or', or 'xor' +/// instruction. This optimizes based on identities. If it can be reduced to +/// a single Value, it is returned, otherwise the Ops list is mutated as +/// necessary. +static Value *OptimizeAndOrXor(unsigned Opcode, + SmallVectorImpl<ValueEntry> &Ops) { + // Scan the operand lists looking for X and ~X pairs, along with X,X pairs. + // If we find any, we can simplify the expression. X&~X == 0, X|~X == -1. + for (unsigned i = 0, e = Ops.size(); i != e; ++i) { + // First, check for X and ~X in the operand list. + assert(i < Ops.size()); + if (BinaryOperator::isNot(Ops[i].Op)) { // Cannot occur for ^. + Value *X = BinaryOperator::getNotArgument(Ops[i].Op); + unsigned FoundX = FindInOperandList(Ops, i, X); + if (FoundX != i) { + if (Opcode == Instruction::And) // ...&X&~X = 0 + return Constant::getNullValue(X->getType()); + + if (Opcode == Instruction::Or) // ...|X|~X = -1 + return Constant::getAllOnesValue(X->getType()); + } + } + + // Next, check for duplicate pairs of values, which we assume are next to + // each other, due to our sorting criteria. + assert(i < Ops.size()); + if (i+1 != Ops.size() && Ops[i+1].Op == Ops[i].Op) { + if (Opcode == Instruction::And || Opcode == Instruction::Or) { + // Drop duplicate values for And and Or. + Ops.erase(Ops.begin()+i); + --i; --e; + ++NumAnnihil; + continue; + } + + // Drop pairs of values for Xor. + assert(Opcode == Instruction::Xor); + if (e == 2) + return Constant::getNullValue(Ops[0].Op->getType()); + + // Y ^ X^X -> Y + Ops.erase(Ops.begin()+i, Ops.begin()+i+2); + i -= 1; e -= 2; + ++NumAnnihil; + } + } + return 0; +} +/// OptimizeAdd - Optimize a series of operands to an 'add' instruction. This +/// optimizes based on identities. If it can be reduced to a single Value, it +/// is returned, otherwise the Ops list is mutated as necessary. +Value *Reassociate::OptimizeAdd(Instruction *I, + SmallVectorImpl<ValueEntry> &Ops) { + // Scan the operand lists looking for X and -X pairs. If we find any, we + // can simplify the expression. X+-X == 0. While we're at it, scan for any + // duplicates. We want to canonicalize Y+Y+Y+Z -> 3*Y+Z. + // + // TODO: We could handle "X + ~X" -> "-1" if we wanted, since "-X = ~X+1". + // + for (unsigned i = 0, e = Ops.size(); i != e; ++i) { + Value *TheOp = Ops[i].Op; + // Check to see if we've seen this operand before. If so, we factor all + // instances of the operand together. Due to our sorting criteria, we know + // that these need to be next to each other in the vector. + if (i+1 != Ops.size() && Ops[i+1].Op == TheOp) { + // Rescan the list, remove all instances of this operand from the expr. + unsigned NumFound = 0; + do { + Ops.erase(Ops.begin()+i); + ++NumFound; + } while (i != Ops.size() && Ops[i].Op == TheOp); + + DEBUG(errs() << "\nFACTORING [" << NumFound << "]: " << *TheOp << '\n'); + ++NumFactor; + + // Insert a new multiply. + Value *Mul = ConstantInt::get(cast<IntegerType>(I->getType()), NumFound); + Mul = BinaryOperator::CreateMul(TheOp, Mul, "factor", I); + + // Now that we have inserted a multiply, optimize it. This allows us to + // handle cases that require multiple factoring steps, such as this: + // (X*2) + (X*2) + (X*2) -> (X*2)*3 -> X*6 + Mul = ReassociateExpression(cast<BinaryOperator>(Mul)); + + // If every add operand was a duplicate, return the multiply. + if (Ops.empty()) + return Mul; + + // Otherwise, we had some input that didn't have the dupe, such as + // "A + A + B" -> "A*2 + B". Add the new multiply to the list of + // things being added by this operation. + Ops.insert(Ops.begin(), ValueEntry(getRank(Mul), Mul)); + + --i; + e = Ops.size(); + continue; + } + + // Check for X and -X in the operand list. + if (!BinaryOperator::isNeg(TheOp)) + continue; + + Value *X = BinaryOperator::getNegArgument(TheOp); + unsigned FoundX = FindInOperandList(Ops, i, X); + if (FoundX == i) + continue; + + // Remove X and -X from the operand list. + if (Ops.size() == 2) + return Constant::getNullValue(X->getType()); + + Ops.erase(Ops.begin()+i); + if (i < FoundX) + --FoundX; + else + --i; // Need to back up an extra one. + Ops.erase(Ops.begin()+FoundX); + ++NumAnnihil; + --i; // Revisit element. + e -= 2; // Removed two elements. + } + + // Scan the operand list, checking to see if there are any common factors + // between operands. Consider something like A*A+A*B*C+D. We would like to + // reassociate this to A*(A+B*C)+D, which reduces the number of multiplies. + // To efficiently find this, we count the number of times a factor occurs + // for any ADD operands that are MULs. + DenseMap<Value*, unsigned> FactorOccurrences; + + // Keep track of each multiply we see, to avoid triggering on (X*4)+(X*4) + // where they are actually the same multiply. + unsigned MaxOcc = 0; + Value *MaxOccVal = 0; + for (unsigned i = 0, e = Ops.size(); i != e; ++i) { + BinaryOperator *BOp = dyn_cast<BinaryOperator>(Ops[i].Op); + if (BOp == 0 || BOp->getOpcode() != Instruction::Mul || !BOp->use_empty()) + continue; + + // Compute all of the factors of this added value. + SmallVector<Value*, 8> Factors; + FindSingleUseMultiplyFactors(BOp, Factors); + assert(Factors.size() > 1 && "Bad linearize!"); + + // Add one to FactorOccurrences for each unique factor in this op. + SmallPtrSet<Value*, 8> Duplicates; + for (unsigned i = 0, e = Factors.size(); i != e; ++i) { + Value *Factor = Factors[i]; + if (!Duplicates.insert(Factor)) continue; + + unsigned Occ = ++FactorOccurrences[Factor]; + if (Occ > MaxOcc) { MaxOcc = Occ; MaxOccVal = Factor; } + + // If Factor is a negative constant, add the negated value as a factor + // because we can percolate the negate out. Watch for minint, which + // cannot be positivified. + if (ConstantInt *CI = dyn_cast<ConstantInt>(Factor)) + if (CI->getValue().isNegative() && !CI->getValue().isMinSignedValue()) { + Factor = ConstantInt::get(CI->getContext(), -CI->getValue()); + assert(!Duplicates.count(Factor) && + "Shouldn't have two constant factors, missed a canonicalize"); + + unsigned Occ = ++FactorOccurrences[Factor]; + if (Occ > MaxOcc) { MaxOcc = Occ; MaxOccVal = Factor; } + } + } + } + + // If any factor occurred more than one time, we can pull it out. + if (MaxOcc > 1) { + DEBUG(errs() << "\nFACTORING [" << MaxOcc << "]: " << *MaxOccVal << '\n'); + ++NumFactor; + + // Create a new instruction that uses the MaxOccVal twice. If we don't do + // this, we could otherwise run into situations where removing a factor + // from an expression will drop a use of maxocc, and this can cause + // RemoveFactorFromExpression on successive values to behave differently. + Instruction *DummyInst = BinaryOperator::CreateAdd(MaxOccVal, MaxOccVal); + SmallVector<Value*, 4> NewMulOps; + for (unsigned i = 0, e = Ops.size(); i != e; ++i) { + if (Value *V = RemoveFactorFromExpression(Ops[i].Op, MaxOccVal)) { + NewMulOps.push_back(V); + Ops.erase(Ops.begin()+i); + --i; --e; + } + } + + // No need for extra uses anymore. + delete DummyInst; + + unsigned NumAddedValues = NewMulOps.size(); + Value *V = EmitAddTreeOfValues(I, NewMulOps); + + // Now that we have inserted the add tree, optimize it. This allows us to + // handle cases that require multiple factoring steps, such as this: + // A*A*B + A*A*C --> A*(A*B+A*C) --> A*(A*(B+C)) + assert(NumAddedValues > 1 && "Each occurrence should contribute a value"); + V = ReassociateExpression(cast<BinaryOperator>(V)); + + // Create the multiply. + Value *V2 = BinaryOperator::CreateMul(V, MaxOccVal, "tmp", I); + + // Rerun associate on the multiply in case the inner expression turned into + // a multiply. We want to make sure that we keep things in canonical form. + V2 = ReassociateExpression(cast<BinaryOperator>(V2)); + + // If every add operand included the factor (e.g. "A*B + A*C"), then the + // entire result expression is just the multiply "A*(B+C)". + if (Ops.empty()) + return V2; + + // Otherwise, we had some input that didn't have the factor, such as + // "A*B + A*C + D" -> "A*(B+C) + D". Add the new multiply to the list of + // things being added by this operation. + Ops.insert(Ops.begin(), ValueEntry(getRank(V2), V2)); + } + + return 0; +} Value *Reassociate::OptimizeExpression(BinaryOperator *I, - std::vector<ValueEntry> &Ops) { + SmallVectorImpl<ValueEntry> &Ops) { // Now that we have the linearized expression tree, try to optimize it. // Start by folding any constants that we found. bool IterateOptimization = false; @@ -570,198 +849,53 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I, switch (Opcode) { default: break; case Instruction::And: - if (CstVal->isZero()) { // ... & 0 -> 0 - ++NumAnnihil; + if (CstVal->isZero()) // X & 0 -> 0 return CstVal; - } else if (CstVal->isAllOnesValue()) { // ... & -1 -> ... + if (CstVal->isAllOnesValue()) // X & -1 -> X Ops.pop_back(); - } break; case Instruction::Mul: - if (CstVal->isZero()) { // ... * 0 -> 0 + if (CstVal->isZero()) { // X * 0 -> 0 ++NumAnnihil; return CstVal; - } else if (cast<ConstantInt>(CstVal)->isOne()) { - Ops.pop_back(); // ... * 1 -> ... } + + if (cast<ConstantInt>(CstVal)->isOne()) + Ops.pop_back(); // X * 1 -> X break; case Instruction::Or: - if (CstVal->isAllOnesValue()) { // ... | -1 -> -1 - ++NumAnnihil; + if (CstVal->isAllOnesValue()) // X | -1 -> -1 return CstVal; - } // FALLTHROUGH! case Instruction::Add: case Instruction::Xor: - if (CstVal->isZero()) // ... [|^+] 0 -> ... + if (CstVal->isZero()) // X [|^+] 0 -> X Ops.pop_back(); break; } if (Ops.size() == 1) return Ops[0].Op; - // Handle destructive annihilation do to identities between elements in the + // Handle destructive annihilation due to identities between elements in the // argument list here. switch (Opcode) { default: break; case Instruction::And: case Instruction::Or: - case Instruction::Xor: - // Scan the operand lists looking for X and ~X pairs, along with X,X pairs. - // If we find any, we can simplify the expression. X&~X == 0, X|~X == -1. - for (unsigned i = 0, e = Ops.size(); i != e; ++i) { - // First, check for X and ~X in the operand list. - assert(i < Ops.size()); - if (BinaryOperator::isNot(Ops[i].Op)) { // Cannot occur for ^. - Value *X = BinaryOperator::getNotArgument(Ops[i].Op); - unsigned FoundX = FindInOperandList(Ops, i, X); - if (FoundX != i) { - if (Opcode == Instruction::And) { // ...&X&~X = 0 - ++NumAnnihil; - return Constant::getNullValue(X->getType()); - } else if (Opcode == Instruction::Or) { // ...|X|~X = -1 - ++NumAnnihil; - return Constant::getAllOnesValue(X->getType()); - } - } - } - - // Next, check for duplicate pairs of values, which we assume are next to - // each other, due to our sorting criteria. - assert(i < Ops.size()); - if (i+1 != Ops.size() && Ops[i+1].Op == Ops[i].Op) { - if (Opcode == Instruction::And || Opcode == Instruction::Or) { - // Drop duplicate values. - Ops.erase(Ops.begin()+i); - --i; --e; - IterateOptimization = true; - ++NumAnnihil; - } else { - assert(Opcode == Instruction::Xor); - if (e == 2) { - ++NumAnnihil; - return Constant::getNullValue(Ops[0].Op->getType()); - } - // ... X^X -> ... - Ops.erase(Ops.begin()+i, Ops.begin()+i+2); - i -= 1; e -= 2; - IterateOptimization = true; - ++NumAnnihil; - } - } - } + case Instruction::Xor: { + unsigned NumOps = Ops.size(); + if (Value *Result = OptimizeAndOrXor(Opcode, Ops)) + return Result; + IterateOptimization |= Ops.size() != NumOps; break; + } - case Instruction::Add: - // Scan the operand lists looking for X and -X pairs. If we find any, we - // can simplify the expression. X+-X == 0. - for (unsigned i = 0, e = Ops.size(); i != e; ++i) { - assert(i < Ops.size()); - // Check for X and -X in the operand list. - if (BinaryOperator::isNeg(Ops[i].Op)) { - Value *X = BinaryOperator::getNegArgument(Ops[i].Op); - unsigned FoundX = FindInOperandList(Ops, i, X); - if (FoundX != i) { - // Remove X and -X from the operand list. - if (Ops.size() == 2) { - ++NumAnnihil; - return Constant::getNullValue(X->getType()); - } else { - Ops.erase(Ops.begin()+i); - if (i < FoundX) - --FoundX; - else - --i; // Need to back up an extra one. - Ops.erase(Ops.begin()+FoundX); - IterateOptimization = true; - ++NumAnnihil; - --i; // Revisit element. - e -= 2; // Removed two elements. - } - } - } - } - - - // Scan the operand list, checking to see if there are any common factors - // between operands. Consider something like A*A+A*B*C+D. We would like to - // reassociate this to A*(A+B*C)+D, which reduces the number of multiplies. - // To efficiently find this, we count the number of times a factor occurs - // for any ADD operands that are MULs. - std::map<Value*, unsigned> FactorOccurrences; - unsigned MaxOcc = 0; - Value *MaxOccVal = 0; - for (unsigned i = 0, e = Ops.size(); i != e; ++i) { - if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(Ops[i].Op)) { - if (BOp->getOpcode() == Instruction::Mul && BOp->use_empty()) { - // Compute all of the factors of this added value. - std::vector<Value*> Factors; - FindSingleUseMultiplyFactors(BOp, Factors); - assert(Factors.size() > 1 && "Bad linearize!"); - - // Add one to FactorOccurrences for each unique factor in this op. - if (Factors.size() == 2) { - unsigned Occ = ++FactorOccurrences[Factors[0]]; - if (Occ > MaxOcc) { MaxOcc = Occ; MaxOccVal = Factors[0]; } - if (Factors[0] != Factors[1]) { // Don't double count A*A. - Occ = ++FactorOccurrences[Factors[1]]; - if (Occ > MaxOcc) { MaxOcc = Occ; MaxOccVal = Factors[1]; } - } - } else { - std::set<Value*> Duplicates; - for (unsigned i = 0, e = Factors.size(); i != e; ++i) { - if (Duplicates.insert(Factors[i]).second) { - unsigned Occ = ++FactorOccurrences[Factors[i]]; - if (Occ > MaxOcc) { MaxOcc = Occ; MaxOccVal = Factors[i]; } - } - } - } - } - } - } - - // If any factor occurred more than one time, we can pull it out. - if (MaxOcc > 1) { - DEBUG(errs() << "\nFACTORING [" << MaxOcc << "]: " << *MaxOccVal << "\n"); - - // Create a new instruction that uses the MaxOccVal twice. If we don't do - // this, we could otherwise run into situations where removing a factor - // from an expression will drop a use of maxocc, and this can cause - // RemoveFactorFromExpression on successive values to behave differently. - Instruction *DummyInst = BinaryOperator::CreateAdd(MaxOccVal, MaxOccVal); - std::vector<Value*> NewMulOps; - for (unsigned i = 0, e = Ops.size(); i != e; ++i) { - if (Value *V = RemoveFactorFromExpression(Ops[i].Op, MaxOccVal)) { - NewMulOps.push_back(V); - Ops.erase(Ops.begin()+i); - --i; --e; - } - } - - // No need for extra uses anymore. - delete DummyInst; - - unsigned NumAddedValues = NewMulOps.size(); - Value *V = EmitAddTreeOfValues(I, NewMulOps); - Value *V2 = BinaryOperator::CreateMul(V, MaxOccVal, "tmp", I); - - // Now that we have inserted V and its sole use, optimize it. This allows - // us to handle cases that require multiple factoring steps, such as this: - // A*A*B + A*A*C --> A*(A*B+A*C) --> A*(A*(B+C)) - if (NumAddedValues > 1) - ReassociateExpression(cast<BinaryOperator>(V)); - - ++NumFactor; - - if (Ops.empty()) - return V2; + case Instruction::Add: { + unsigned NumOps = Ops.size(); + if (Value *Result = OptimizeAdd(I, Ops)) + return Result; + IterateOptimization |= Ops.size() != NumOps; + } - // Add the new value to the list of things being added. - Ops.insert(Ops.begin(), ValueEntry(getRank(V2), V2)); - - // Rewrite the tree so that there is now a use of V. - RewriteExprTree(I, Ops); - return OptimizeExpression(I, Ops); - } break; //case Instruction::Mul: } @@ -826,13 +960,14 @@ void Reassociate::ReassociateBB(BasicBlock *BB) { } } -void Reassociate::ReassociateExpression(BinaryOperator *I) { +Value *Reassociate::ReassociateExpression(BinaryOperator *I) { - // First, walk the expression tree, linearizing the tree, collecting - std::vector<ValueEntry> Ops; + // First, walk the expression tree, linearizing the tree, collecting the + // operand information. + SmallVector<ValueEntry, 8> Ops; LinearizeExprTree(I, Ops); - DEBUG(errs() << "RAIn:\t"; PrintOps(I, Ops); errs() << "\n"); + DEBUG(errs() << "RAIn:\t"; PrintOps(I, Ops); errs() << '\n'); // Now that we have linearized the tree to a list and have gathered all of // the operands and their ranks, sort the operands by their rank. Use a @@ -847,10 +982,11 @@ void Reassociate::ReassociateExpression(BinaryOperator *I) { if (Value *V = OptimizeExpression(I, Ops)) { // This expression tree simplified to something that isn't a tree, // eliminate it. - DEBUG(errs() << "Reassoc to scalar: " << *V << "\n"); + DEBUG(errs() << "Reassoc to scalar: " << *V << '\n'); I->replaceAllUsesWith(V); RemoveDeadBinaryOp(I); - return; + ++NumAnnihil; + return V; } // We want to sink immediates as deeply as possible except in the case where @@ -861,22 +997,24 @@ void Reassociate::ReassociateExpression(BinaryOperator *I) { cast<Instruction>(I->use_back())->getOpcode() == Instruction::Add && isa<ConstantInt>(Ops.back().Op) && cast<ConstantInt>(Ops.back().Op)->isAllOnesValue()) { - Ops.insert(Ops.begin(), Ops.back()); - Ops.pop_back(); + ValueEntry Tmp = Ops.pop_back_val(); + Ops.insert(Ops.begin(), Tmp); } - DEBUG(errs() << "RAOut:\t"; PrintOps(I, Ops); errs() << "\n"); + DEBUG(errs() << "RAOut:\t"; PrintOps(I, Ops); errs() << '\n'); if (Ops.size() == 1) { // This expression tree simplified to something that isn't a tree, // eliminate it. I->replaceAllUsesWith(Ops[0].Op); RemoveDeadBinaryOp(I); - } else { - // Now that we ordered and optimized the expressions, splat them back into - // the expression tree, removing any unneeded nodes. - RewriteExprTree(I, Ops); + return Ops[0].Op; } + + // Now that we ordered and optimized the expressions, splat them back into + // the expression tree, removing any unneeded nodes. + RewriteExprTree(I, Ops); + return I; } @@ -888,7 +1026,7 @@ bool Reassociate::runOnFunction(Function &F) { for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) ReassociateBB(FI); - // We are done with the rank map... + // We are done with the rank map. RankMap.clear(); ValueRankMap.clear(); return MadeChange; diff --git a/lib/Transforms/Scalar/SCCVN.cpp b/lib/Transforms/Scalar/SCCVN.cpp index dbc82e1..f91fbda 100644 --- a/lib/Transforms/Scalar/SCCVN.cpp +++ b/lib/Transforms/Scalar/SCCVN.cpp @@ -34,7 +34,6 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Transforms/Utils/SSAUpdater.h" -#include <cstdio> using namespace llvm; STATISTIC(NumSCCVNInstr, "Number of instructions deleted by SCCVN"); diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp index b040a27..79bb7c5 100644 --- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -74,6 +74,10 @@ namespace { private: TargetData *TD; + /// DeadInsts - Keep track of instructions we have made dead, so that + /// we can remove them after we are done working. + SmallVector<Value*, 32> DeadInsts; + /// AllocaInfo - When analyzing uses of an alloca instruction, this captures /// information about the uses. All these fields are initialized to false /// and set to true when something is learned. @@ -102,25 +106,29 @@ namespace { int isSafeAllocaToScalarRepl(AllocaInst *AI); - void isSafeUseOfAllocation(Instruction *User, AllocaInst *AI, - AllocaInfo &Info); - void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocaInst *AI, - AllocaInfo &Info); - void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocaInst *AI, - unsigned OpNo, AllocaInfo &Info); - void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocaInst *AI, - AllocaInfo &Info); + void isSafeForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, + AllocaInfo &Info); + void isSafeGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t &Offset, + AllocaInfo &Info); + void isSafeMemAccess(AllocaInst *AI, uint64_t Offset, uint64_t MemSize, + const Type *MemOpType, bool isStore, AllocaInfo &Info); + bool TypeHasComponent(const Type *T, uint64_t Offset, uint64_t Size); + uint64_t FindElementAndOffset(const Type *&T, uint64_t &Offset, + const Type *&IdxTy); void DoScalarReplacement(AllocaInst *AI, std::vector<AllocaInst*> &WorkList); - void CleanupGEP(GetElementPtrInst *GEP); - void CleanupAllocaUsers(AllocaInst *AI); + void DeleteDeadInstructions(); + void CleanupAllocaUsers(Value *V); AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocaInst *Base); - void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocaInst *AI, - SmallVector<AllocaInst*, 32> &NewElts); - - void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, + void RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, + SmallVector<AllocaInst*, 32> &NewElts); + void RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset, + SmallVector<AllocaInst*, 32> &NewElts); + void RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset, + SmallVector<AllocaInst*, 32> &NewElts); + void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst, AllocaInst *AI, SmallVector<AllocaInst*, 32> &NewElts); void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, @@ -360,399 +368,350 @@ void SROA::DoScalarReplacement(AllocaInst *AI, } } - // Now that we have created the alloca instructions that we want to use, - // expand the getelementptr instructions to use them. - while (!AI->use_empty()) { - Instruction *User = cast<Instruction>(AI->use_back()); - if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) { - RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas); - BCInst->eraseFromParent(); - continue; - } - - // Replace: - // %res = load { i32, i32 }* %alloc - // with: - // %load.0 = load i32* %alloc.0 - // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0 - // %load.1 = load i32* %alloc.1 - // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1 - // (Also works for arrays instead of structs) - if (LoadInst *LI = dyn_cast<LoadInst>(User)) { - Value *Insert = UndefValue::get(LI->getType()); - for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) { - Value *Load = new LoadInst(ElementAllocas[i], "load", LI); - Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI); - } - LI->replaceAllUsesWith(Insert); - LI->eraseFromParent(); - continue; - } - - // Replace: - // store { i32, i32 } %val, { i32, i32 }* %alloc - // with: - // %val.0 = extractvalue { i32, i32 } %val, 0 - // store i32 %val.0, i32* %alloc.0 - // %val.1 = extractvalue { i32, i32 } %val, 1 - // store i32 %val.1, i32* %alloc.1 - // (Also works for arrays instead of structs) - if (StoreInst *SI = dyn_cast<StoreInst>(User)) { - Value *Val = SI->getOperand(0); - for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) { - Value *Extract = ExtractValueInst::Create(Val, i, Val->getName(), SI); - new StoreInst(Extract, ElementAllocas[i], SI); - } - SI->eraseFromParent(); - continue; - } - - GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User); - // We now know that the GEP is of the form: GEP <ptr>, 0, <cst> - unsigned Idx = - (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); - - assert(Idx < ElementAllocas.size() && "Index out of range?"); - AllocaInst *AllocaToUse = ElementAllocas[Idx]; - - Value *RepValue; - if (GEPI->getNumOperands() == 3) { - // Do not insert a new getelementptr instruction with zero indices, only - // to have it optimized out later. - RepValue = AllocaToUse; - } else { - // We are indexing deeply into the structure, so we still need a - // getelement ptr instruction to finish the indexing. This may be - // expanded itself once the worklist is rerun. - // - SmallVector<Value*, 8> NewArgs; - NewArgs.push_back(Constant::getNullValue( - Type::getInt32Ty(AI->getContext()))); - NewArgs.append(GEPI->op_begin()+3, GEPI->op_end()); - RepValue = GetElementPtrInst::Create(AllocaToUse, NewArgs.begin(), - NewArgs.end(), "", GEPI); - RepValue->takeName(GEPI); - } - - // If this GEP is to the start of the aggregate, check for memcpys. - if (Idx == 0 && GEPI->hasAllZeroIndices()) - RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas); - - // Move all of the users over to the new GEP. - GEPI->replaceAllUsesWith(RepValue); - // Delete the old GEP - GEPI->eraseFromParent(); - } + // Now that we have created the new alloca instructions, rewrite all the + // uses of the old alloca. + RewriteForScalarRepl(AI, AI, 0, ElementAllocas); - // Finally, delete the Alloca instruction + // Now erase any instructions that were made dead while rewriting the alloca. + DeleteDeadInstructions(); AI->eraseFromParent(); + NumReplaced++; } -/// isSafeElementUse - Check to see if this use is an allowed use for a -/// getelementptr instruction of an array aggregate allocation. isFirstElt -/// indicates whether Ptr is known to the start of the aggregate. -void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocaInst *AI, - AllocaInfo &Info) { - for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); - I != E; ++I) { - Instruction *User = cast<Instruction>(*I); - switch (User->getOpcode()) { - case Instruction::Load: break; - case Instruction::Store: - // Store is ok if storing INTO the pointer, not storing the pointer - if (User->getOperand(0) == Ptr) return MarkUnsafe(Info); - break; - case Instruction::GetElementPtr: { - GetElementPtrInst *GEP = cast<GetElementPtrInst>(User); - bool AreAllZeroIndices = isFirstElt; - if (GEP->getNumOperands() > 1 && - (!isa<ConstantInt>(GEP->getOperand(1)) || - !cast<ConstantInt>(GEP->getOperand(1))->isZero())) - // Using pointer arithmetic to navigate the array. - return MarkUnsafe(Info); - - // Verify that any array subscripts are in range. - for (gep_type_iterator GEPIt = gep_type_begin(GEP), - E = gep_type_end(GEP); GEPIt != E; ++GEPIt) { - // Ignore struct elements, no extra checking needed for these. - if (isa<StructType>(*GEPIt)) - continue; - - // This GEP indexes an array. Verify that this is an in-range - // constant integer. Specifically, consider A[0][i]. We cannot know that - // the user isn't doing invalid things like allowing i to index an - // out-of-range subscript that accesses A[1]. Because of this, we have - // to reject SROA of any accesses into structs where any of the - // components are variables. - ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand()); - if (!IdxVal) return MarkUnsafe(Info); - - // Are all indices still zero? - AreAllZeroIndices &= IdxVal->isZero(); - - if (const ArrayType *AT = dyn_cast<ArrayType>(*GEPIt)) { - if (IdxVal->getZExtValue() >= AT->getNumElements()) - return MarkUnsafe(Info); - } else if (const VectorType *VT = dyn_cast<VectorType>(*GEPIt)) { - if (IdxVal->getZExtValue() >= VT->getNumElements()) - return MarkUnsafe(Info); - } +/// DeleteDeadInstructions - Erase instructions on the DeadInstrs list, +/// recursively including all their operands that become trivially dead. +void SROA::DeleteDeadInstructions() { + while (!DeadInsts.empty()) { + Instruction *I = cast<Instruction>(DeadInsts.pop_back_val()); + + for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) + if (Instruction *U = dyn_cast<Instruction>(*OI)) { + // Zero out the operand and see if it becomes trivially dead. + // (But, don't add allocas to the dead instruction list -- they are + // already on the worklist and will be deleted separately.) + *OI = 0; + if (isInstructionTriviallyDead(U) && !isa<AllocaInst>(U)) + DeadInsts.push_back(U); } - - isSafeElementUse(GEP, AreAllZeroIndices, AI, Info); - if (Info.isUnsafe) return; - break; - } - case Instruction::BitCast: - if (isFirstElt) { - isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI, Info); - if (Info.isUnsafe) return; - break; - } - DEBUG(errs() << " Transformation preventing inst: " << *User << '\n'); - return MarkUnsafe(Info); - case Instruction::Call: - if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { - if (isFirstElt) { - isSafeMemIntrinsicOnAllocation(MI, AI, I.getOperandNo(), Info); - if (Info.isUnsafe) return; - break; - } - } - DEBUG(errs() << " Transformation preventing inst: " << *User << '\n'); - return MarkUnsafe(Info); - default: - DEBUG(errs() << " Transformation preventing inst: " << *User << '\n'); - return MarkUnsafe(Info); - } - } - return; // All users look ok :) -} -/// AllUsersAreLoads - Return true if all users of this value are loads. -static bool AllUsersAreLoads(Value *Ptr) { - for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end(); - I != E; ++I) - if (cast<Instruction>(*I)->getOpcode() != Instruction::Load) - return false; - return true; -} - -/// isSafeUseOfAllocation - Check if this user is an allowed use for an -/// aggregate allocation. -void SROA::isSafeUseOfAllocation(Instruction *User, AllocaInst *AI, - AllocaInfo &Info) { - if (BitCastInst *C = dyn_cast<BitCastInst>(User)) - return isSafeUseOfBitCastedAllocation(C, AI, Info); - - if (LoadInst *LI = dyn_cast<LoadInst>(User)) - if (!LI->isVolatile()) - return;// Loads (returning a first class aggregrate) are always rewritable - - if (StoreInst *SI = dyn_cast<StoreInst>(User)) - if (!SI->isVolatile() && SI->getOperand(0) != AI) - return;// Store is ok if storing INTO the pointer, not storing the pointer - - GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User); - if (GEPI == 0) - return MarkUnsafe(Info); - - gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI); - - // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>". - if (I == E || - I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) { - return MarkUnsafe(Info); + I->eraseFromParent(); } +} + +/// isSafeForScalarRepl - Check if instruction I is a safe use with regard to +/// performing scalar replacement of alloca AI. The results are flagged in +/// the Info parameter. Offset indicates the position within AI that is +/// referenced by this instruction. +void SROA::isSafeForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, + AllocaInfo &Info) { + for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) { + Instruction *User = cast<Instruction>(*UI); - ++I; - if (I == E) return MarkUnsafe(Info); // ran out of GEP indices?? - - bool IsAllZeroIndices = true; - - // If the first index is a non-constant index into an array, see if we can - // handle it as a special case. - if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { - if (!isa<ConstantInt>(I.getOperand())) { - IsAllZeroIndices = 0; - uint64_t NumElements = AT->getNumElements(); - - // If this is an array index and the index is not constant, we cannot - // promote... that is unless the array has exactly one or two elements in - // it, in which case we CAN promote it, but we have to canonicalize this - // out if this is the only problem. - if ((NumElements == 1 || NumElements == 2) && - AllUsersAreLoads(GEPI)) { + if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) { + isSafeForScalarRepl(BC, AI, Offset, Info); + } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { + uint64_t GEPOffset = Offset; + isSafeGEP(GEPI, AI, GEPOffset, Info); + if (!Info.isUnsafe) + isSafeForScalarRepl(GEPI, AI, GEPOffset, Info); + } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) { + ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); + if (Length) + isSafeMemAccess(AI, Offset, Length->getZExtValue(), 0, + UI.getOperandNo() == 1, Info); + else + MarkUnsafe(Info); + } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) { + if (!LI->isVolatile()) { + const Type *LIType = LI->getType(); + isSafeMemAccess(AI, Offset, TD->getTypeAllocSize(LIType), + LIType, false, Info); + } else + MarkUnsafe(Info); + } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { + // Store is ok if storing INTO the pointer, not storing the pointer + if (!SI->isVolatile() && SI->getOperand(0) != I) { + const Type *SIType = SI->getOperand(0)->getType(); + isSafeMemAccess(AI, Offset, TD->getTypeAllocSize(SIType), + SIType, true, Info); + } else + MarkUnsafe(Info); + } else if (isa<DbgInfoIntrinsic>(UI)) { + // If one user is DbgInfoIntrinsic then check if all users are + // DbgInfoIntrinsics. + if (OnlyUsedByDbgInfoIntrinsics(I)) { Info.needsCleanup = true; - return; // Canonicalization required! + return; } - return MarkUnsafe(Info); + MarkUnsafe(Info); + } else { + DEBUG(errs() << " Transformation preventing inst: " << *User << '\n'); + MarkUnsafe(Info); } + if (Info.isUnsafe) return; } - +} + +/// isSafeGEP - Check if a GEP instruction can be handled for scalar +/// replacement. It is safe when all the indices are constant, in-bounds +/// references, and when the resulting offset corresponds to an element within +/// the alloca type. The results are flagged in the Info parameter. Upon +/// return, Offset is adjusted as specified by the GEP indices. +void SROA::isSafeGEP(GetElementPtrInst *GEPI, AllocaInst *AI, + uint64_t &Offset, AllocaInfo &Info) { + gep_type_iterator GEPIt = gep_type_begin(GEPI), E = gep_type_end(GEPI); + if (GEPIt == E) + return; + // Walk through the GEP type indices, checking the types that this indexes // into. - for (; I != E; ++I) { + for (; GEPIt != E; ++GEPIt) { // Ignore struct elements, no extra checking needed for these. - if (isa<StructType>(*I)) + if (isa<StructType>(*GEPIt)) continue; - - ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand()); - if (!IdxVal) return MarkUnsafe(Info); - // Are all indices still zero? - IsAllZeroIndices &= IdxVal->isZero(); - - if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) { - // This GEP indexes an array. Verify that this is an in-range constant - // integer. Specifically, consider A[0][i]. We cannot know that the user - // isn't doing invalid things like allowing i to index an out-of-range - // subscript that accesses A[1]. Because of this, we have to reject SROA - // of any accesses into structs where any of the components are variables. - if (IdxVal->getZExtValue() >= AT->getNumElements()) - return MarkUnsafe(Info); - } else if (const VectorType *VT = dyn_cast<VectorType>(*I)) { - if (IdxVal->getZExtValue() >= VT->getNumElements()) - return MarkUnsafe(Info); + ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand()); + if (!IdxVal) + return MarkUnsafe(Info); + } + + // Compute the offset due to this GEP and check if the alloca has a + // component element at that offset. + SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end()); + Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(), + &Indices[0], Indices.size()); + if (!TypeHasComponent(AI->getAllocatedType(), Offset, 0)) + MarkUnsafe(Info); +} + +/// isSafeMemAccess - Check if a load/store/memcpy operates on the entire AI +/// alloca or has an offset and size that corresponds to a component element +/// within it. The offset checked here may have been formed from a GEP with a +/// pointer bitcasted to a different type. +void SROA::isSafeMemAccess(AllocaInst *AI, uint64_t Offset, uint64_t MemSize, + const Type *MemOpType, bool isStore, + AllocaInfo &Info) { + // Check if this is a load/store of the entire alloca. + if (Offset == 0 && MemSize == TD->getTypeAllocSize(AI->getAllocatedType())) { + bool UsesAggregateType = (MemOpType == AI->getAllocatedType()); + // This is safe for MemIntrinsics (where MemOpType is 0), integer types + // (which are essentially the same as the MemIntrinsics, especially with + // regard to copying padding between elements), or references using the + // aggregate type of the alloca. + if (!MemOpType || isa<IntegerType>(MemOpType) || UsesAggregateType) { + if (!UsesAggregateType) { + if (isStore) + Info.isMemCpyDst = true; + else + Info.isMemCpySrc = true; + } + return; } } - - // If there are any non-simple uses of this getelementptr, make sure to reject - // them. - return isSafeElementUse(GEPI, IsAllZeroIndices, AI, Info); + // Check if the offset/size correspond to a component within the alloca type. + const Type *T = AI->getAllocatedType(); + if (TypeHasComponent(T, Offset, MemSize)) + return; + + return MarkUnsafe(Info); } -/// isSafeMemIntrinsicOnAllocation - Check if the specified memory -/// intrinsic can be promoted by SROA. At this point, we know that the operand -/// of the memintrinsic is a pointer to the beginning of the allocation. -void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocaInst *AI, - unsigned OpNo, AllocaInfo &Info) { - // If not constant length, give up. - ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); - if (!Length) return MarkUnsafe(Info); - - // If not the whole aggregate, give up. - if (Length->getZExtValue() != - TD->getTypeAllocSize(AI->getType()->getElementType())) - return MarkUnsafe(Info); - - // We only know about memcpy/memset/memmove. - if (!isa<MemIntrinsic>(MI)) - return MarkUnsafe(Info); - - // Otherwise, we can transform it. Determine whether this is a memcpy/set - // into or out of the aggregate. - if (OpNo == 1) - Info.isMemCpyDst = true; - else { - assert(OpNo == 2); - Info.isMemCpySrc = true; +/// TypeHasComponent - Return true if T has a component type with the +/// specified offset and size. If Size is zero, do not check the size. +bool SROA::TypeHasComponent(const Type *T, uint64_t Offset, uint64_t Size) { + const Type *EltTy; + uint64_t EltSize; + if (const StructType *ST = dyn_cast<StructType>(T)) { + const StructLayout *Layout = TD->getStructLayout(ST); + unsigned EltIdx = Layout->getElementContainingOffset(Offset); + EltTy = ST->getContainedType(EltIdx); + EltSize = TD->getTypeAllocSize(EltTy); + Offset -= Layout->getElementOffset(EltIdx); + } else if (const ArrayType *AT = dyn_cast<ArrayType>(T)) { + EltTy = AT->getElementType(); + EltSize = TD->getTypeAllocSize(EltTy); + if (Offset >= AT->getNumElements() * EltSize) + return false; + Offset %= EltSize; + } else { + return false; } + if (Offset == 0 && (Size == 0 || EltSize == Size)) + return true; + // Check if the component spans multiple elements. + if (Offset + Size > EltSize) + return false; + return TypeHasComponent(EltTy, Offset, Size); } -/// isSafeUseOfBitCastedAllocation - Check if all users of this bitcast -/// from an alloca are safe for SROA of that alloca. -void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocaInst *AI, - AllocaInfo &Info) { - for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end(); - UI != E; ++UI) { - if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) { - isSafeUseOfBitCastedAllocation(BCU, AI, Info); - } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) { - isSafeMemIntrinsicOnAllocation(MI, AI, UI.getOperandNo(), Info); - } else if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { - if (SI->isVolatile()) - return MarkUnsafe(Info); - - // If storing the entire alloca in one chunk through a bitcasted pointer - // to integer, we can transform it. This happens (for example) when you - // cast a {i32,i32}* to i64* and store through it. This is similar to the - // memcpy case and occurs in various "byval" cases and emulated memcpys. - if (isa<IntegerType>(SI->getOperand(0)->getType()) && - TD->getTypeAllocSize(SI->getOperand(0)->getType()) == - TD->getTypeAllocSize(AI->getType()->getElementType())) { - Info.isMemCpyDst = true; - continue; - } - return MarkUnsafe(Info); - } else if (LoadInst *LI = dyn_cast<LoadInst>(UI)) { - if (LI->isVolatile()) - return MarkUnsafe(Info); - - // If loading the entire alloca in one chunk through a bitcasted pointer - // to integer, we can transform it. This happens (for example) when you - // cast a {i32,i32}* to i64* and load through it. This is similar to the - // memcpy case and occurs in various "byval" cases and emulated memcpys. - if (isa<IntegerType>(LI->getType()) && - TD->getTypeAllocSize(LI->getType()) == - TD->getTypeAllocSize(AI->getType()->getElementType())) { - Info.isMemCpySrc = true; - continue; +/// RewriteForScalarRepl - Alloca AI is being split into NewElts, so rewrite +/// the instruction I, which references it, to use the separate elements. +/// Offset indicates the position within AI that is referenced by this +/// instruction. +void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, + SmallVector<AllocaInst*, 32> &NewElts) { + for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) { + Instruction *User = cast<Instruction>(*UI); + + if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) { + RewriteBitCast(BC, AI, Offset, NewElts); + } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { + RewriteGEP(GEPI, AI, Offset, NewElts); + } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { + ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); + uint64_t MemSize = Length->getZExtValue(); + if (Offset == 0 && + MemSize == TD->getTypeAllocSize(AI->getAllocatedType())) + RewriteMemIntrinUserOfAlloca(MI, I, AI, NewElts); + // Otherwise the intrinsic can only touch a single element and the + // address operand will be updated, so nothing else needs to be done. + } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) { + const Type *LIType = LI->getType(); + if (LIType == AI->getAllocatedType()) { + // Replace: + // %res = load { i32, i32 }* %alloc + // with: + // %load.0 = load i32* %alloc.0 + // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0 + // %load.1 = load i32* %alloc.1 + // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1 + // (Also works for arrays instead of structs) + Value *Insert = UndefValue::get(LIType); + for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { + Value *Load = new LoadInst(NewElts[i], "load", LI); + Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI); + } + LI->replaceAllUsesWith(Insert); + DeadInsts.push_back(LI); + } else if (isa<IntegerType>(LIType) && + TD->getTypeAllocSize(LIType) == + TD->getTypeAllocSize(AI->getAllocatedType())) { + // If this is a load of the entire alloca to an integer, rewrite it. + RewriteLoadUserOfWholeAlloca(LI, AI, NewElts); } - return MarkUnsafe(Info); - } else if (isa<DbgInfoIntrinsic>(UI)) { - // If one user is DbgInfoIntrinsic then check if all users are - // DbgInfoIntrinsics. - if (OnlyUsedByDbgInfoIntrinsics(BC)) { - Info.needsCleanup = true; - return; + } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { + Value *Val = SI->getOperand(0); + const Type *SIType = Val->getType(); + if (SIType == AI->getAllocatedType()) { + // Replace: + // store { i32, i32 } %val, { i32, i32 }* %alloc + // with: + // %val.0 = extractvalue { i32, i32 } %val, 0 + // store i32 %val.0, i32* %alloc.0 + // %val.1 = extractvalue { i32, i32 } %val, 1 + // store i32 %val.1, i32* %alloc.1 + // (Also works for arrays instead of structs) + for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { + Value *Extract = ExtractValueInst::Create(Val, i, Val->getName(), SI); + new StoreInst(Extract, NewElts[i], SI); + } + DeadInsts.push_back(SI); + } else if (isa<IntegerType>(SIType) && + TD->getTypeAllocSize(SIType) == + TD->getTypeAllocSize(AI->getAllocatedType())) { + // If this is a store of the entire alloca from an integer, rewrite it. + RewriteStoreUserOfWholeAlloca(SI, AI, NewElts); } - else - MarkUnsafe(Info); } - else { - return MarkUnsafe(Info); - } - if (Info.isUnsafe) return; } } -/// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes -/// to its first element. Transform users of the cast to use the new values -/// instead. -void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocaInst *AI, - SmallVector<AllocaInst*, 32> &NewElts) { - Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end(); - while (UI != UE) { - Instruction *User = cast<Instruction>(*UI++); - if (BitCastInst *BCU = dyn_cast<BitCastInst>(User)) { - RewriteBitCastUserOfAlloca(BCU, AI, NewElts); - if (BCU->use_empty()) BCU->eraseFromParent(); - continue; - } +/// RewriteBitCast - Update a bitcast reference to the alloca being replaced +/// and recursively continue updating all of its uses. +void SROA::RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset, + SmallVector<AllocaInst*, 32> &NewElts) { + RewriteForScalarRepl(BC, AI, Offset, NewElts); + if (BC->getOperand(0) != AI) + return; - if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) { - // This must be memcpy/memmove/memset of the entire aggregate. - // Split into one per element. - RewriteMemIntrinUserOfAlloca(MI, BCInst, AI, NewElts); - continue; - } - - if (StoreInst *SI = dyn_cast<StoreInst>(User)) { - // If this is a store of the entire alloca from an integer, rewrite it. - RewriteStoreUserOfWholeAlloca(SI, AI, NewElts); - continue; - } + // The bitcast references the original alloca. Replace its uses with + // references to the first new element alloca. + Instruction *Val = NewElts[0]; + if (Val->getType() != BC->getDestTy()) { + Val = new BitCastInst(Val, BC->getDestTy(), "", BC); + Val->takeName(BC); + } + BC->replaceAllUsesWith(Val); + DeadInsts.push_back(BC); +} - if (LoadInst *LI = dyn_cast<LoadInst>(User)) { - // If this is a load of the entire alloca to an integer, rewrite it. - RewriteLoadUserOfWholeAlloca(LI, AI, NewElts); - continue; - } - - // Otherwise it must be some other user of a gep of the first pointer. Just - // leave these alone. - continue; +/// FindElementAndOffset - Return the index of the element containing Offset +/// within the specified type, which must be either a struct or an array. +/// Sets T to the type of the element and Offset to the offset within that +/// element. IdxTy is set to the type of the index result to be used in a +/// GEP instruction. +uint64_t SROA::FindElementAndOffset(const Type *&T, uint64_t &Offset, + const Type *&IdxTy) { + uint64_t Idx = 0; + if (const StructType *ST = dyn_cast<StructType>(T)) { + const StructLayout *Layout = TD->getStructLayout(ST); + Idx = Layout->getElementContainingOffset(Offset); + T = ST->getContainedType(Idx); + Offset -= Layout->getElementOffset(Idx); + IdxTy = Type::getInt32Ty(T->getContext()); + return Idx; } + const ArrayType *AT = cast<ArrayType>(T); + T = AT->getElementType(); + uint64_t EltSize = TD->getTypeAllocSize(T); + Idx = Offset / EltSize; + Offset -= Idx * EltSize; + IdxTy = Type::getInt64Ty(T->getContext()); + return Idx; +} + +/// RewriteGEP - Check if this GEP instruction moves the pointer across +/// elements of the alloca that are being split apart, and if so, rewrite +/// the GEP to be relative to the new element. +void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset, + SmallVector<AllocaInst*, 32> &NewElts) { + uint64_t OldOffset = Offset; + SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end()); + Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(), + &Indices[0], Indices.size()); + + RewriteForScalarRepl(GEPI, AI, Offset, NewElts); + + const Type *T = AI->getAllocatedType(); + const Type *IdxTy; + uint64_t OldIdx = FindElementAndOffset(T, OldOffset, IdxTy); + if (GEPI->getOperand(0) == AI) + OldIdx = ~0ULL; // Force the GEP to be rewritten. + + T = AI->getAllocatedType(); + uint64_t EltOffset = Offset; + uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy); + + // If this GEP does not move the pointer across elements of the alloca + // being split, then it does not needs to be rewritten. + if (Idx == OldIdx) + return; + + const Type *i32Ty = Type::getInt32Ty(AI->getContext()); + SmallVector<Value*, 8> NewArgs; + NewArgs.push_back(Constant::getNullValue(i32Ty)); + while (EltOffset != 0) { + uint64_t EltIdx = FindElementAndOffset(T, EltOffset, IdxTy); + NewArgs.push_back(ConstantInt::get(IdxTy, EltIdx)); + } + Instruction *Val = NewElts[Idx]; + if (NewArgs.size() > 1) { + Val = GetElementPtrInst::CreateInBounds(Val, NewArgs.begin(), + NewArgs.end(), "", GEPI); + Val->takeName(GEPI); + } + if (Val->getType() != GEPI->getType()) + Val = new BitCastInst(Val, GEPI->getType(), Val->getNameStr(), GEPI); + GEPI->replaceAllUsesWith(Val); + DeadInsts.push_back(GEPI); } /// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI. /// Rewrite it to copy or set the elements of the scalarized memory. -void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, +void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst, AllocaInst *AI, SmallVector<AllocaInst*, 32> &NewElts) { - // If this is a memcpy/memmove, construct the other pointer as the // appropriate type. The "Other" pointer is the pointer that goes to memory // that doesn't have anything to do with the alloca that we are promoting. For @@ -761,28 +720,41 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, LLVMContext &Context = MI->getContext(); unsigned MemAlignment = MI->getAlignment(); if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy - if (BCInst == MTI->getRawDest()) + if (Inst == MTI->getRawDest()) OtherPtr = MTI->getRawSource(); else { - assert(BCInst == MTI->getRawSource()); + assert(Inst == MTI->getRawSource()); OtherPtr = MTI->getRawDest(); } } - // Keep track of the other intrinsic argument, so it can be removed if it - // is dead when the intrinsic is replaced. - Value *PossiblyDead = OtherPtr; - // If there is an other pointer, we want to convert it to the same pointer // type as AI has, so we can GEP through it safely. if (OtherPtr) { - // It is likely that OtherPtr is a bitcast, if so, remove it. - if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) - OtherPtr = BC->getOperand(0); - // All zero GEPs are effectively bitcasts. - if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(OtherPtr)) - if (GEP->hasAllZeroIndices()) - OtherPtr = GEP->getOperand(0); + + // Remove bitcasts and all-zero GEPs from OtherPtr. This is an + // optimization, but it's also required to detect the corner case where + // both pointer operands are referencing the same memory, and where + // OtherPtr may be a bitcast or GEP that currently being rewritten. (This + // function is only called for mem intrinsics that access the whole + // aggregate, so non-zero GEPs are not an issue here.) + while (1) { + if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) { + OtherPtr = BC->getOperand(0); + continue; + } + if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(OtherPtr)) { + // All zero GEPs are effectively bitcasts. + if (GEP->hasAllZeroIndices()) { + OtherPtr = GEP->getOperand(0); + continue; + } + } + break; + } + // If OtherPtr has already been rewritten, this intrinsic will be dead. + if (OtherPtr == NewElts[0]) + return; if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr)) if (BCE->getOpcode() == Instruction::BitCast) @@ -798,7 +770,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, // Process each element of the aggregate. Value *TheFn = MI->getOperand(0); const Type *BytePtrTy = MI->getRawDest()->getType(); - bool SROADest = MI->getRawDest() == BCInst; + bool SROADest = MI->getRawDest() == Inst; Constant *Zero = Constant::getNullValue(Type::getInt32Ty(MI->getContext())); @@ -807,12 +779,15 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, Value *OtherElt = 0; unsigned OtherEltAlign = MemAlignment; - if (OtherPtr) { + if (OtherPtr == AI) { + OtherElt = NewElts[i]; + OtherEltAlign = 0; + } else if (OtherPtr) { Value *Idx[2] = { Zero, ConstantInt::get(Type::getInt32Ty(MI->getContext()), i) }; - OtherElt = GetElementPtrInst::Create(OtherPtr, Idx, Idx + 2, + OtherElt = GetElementPtrInst::CreateInBounds(OtherPtr, Idx, Idx + 2, OtherPtr->getNameStr()+"."+Twine(i), - MI); + MI); uint64_t EltOffset; const PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType()); if (const StructType *ST = @@ -924,9 +899,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst, CallInst::Create(TheFn, Ops, Ops + 4, "", MI); } } - MI->eraseFromParent(); - if (PossiblyDead) - RecursivelyDeleteTriviallyDeadInstructions(PossiblyDead); + DeadInsts.push_back(MI); } /// RewriteStoreUserOfWholeAlloca - We found a store of an integer that @@ -937,15 +910,9 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, // Extract each element out of the integer according to its structure offset // and store the element value to the individual alloca. Value *SrcVal = SI->getOperand(0); - const Type *AllocaEltTy = AI->getType()->getElementType(); + const Type *AllocaEltTy = AI->getAllocatedType(); uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy); - // If this isn't a store of an integer to the whole alloca, it may be a store - // to the first element. Just ignore the store in this case and normal SROA - // will handle it. - if (!isa<IntegerType>(SrcVal->getType()) || - TD->getTypeAllocSizeInBits(SrcVal->getType()) != AllocaSizeBits) - return; // Handle tail padding by extending the operand if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits) SrcVal = new ZExtInst(SrcVal, @@ -1050,7 +1017,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, } } - SI->eraseFromParent(); + DeadInsts.push_back(SI); } /// RewriteLoadUserOfWholeAlloca - We found a load of the entire allocation to @@ -1059,16 +1026,9 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, SmallVector<AllocaInst*, 32> &NewElts) { // Extract each element out of the NewElts according to its structure offset // and form the result value. - const Type *AllocaEltTy = AI->getType()->getElementType(); + const Type *AllocaEltTy = AI->getAllocatedType(); uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy); - // If this isn't a load of the whole alloca to an integer, it may be a load - // of the first element. Just ignore the load in this case and normal SROA - // will handle it. - if (!isa<IntegerType>(LI->getType()) || - TD->getTypeAllocSizeInBits(LI->getType()) != AllocaSizeBits) - return; - DEBUG(errs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI << '\n'); @@ -1139,10 +1099,9 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI); LI->replaceAllUsesWith(ResultVal); - LI->eraseFromParent(); + DeadInsts.push_back(LI); } - /// HasPadding - Return true if the specified type has any structure or /// alignment padding, false otherwise. static bool HasPadding(const Type *Ty, const TargetData &TD) { @@ -1192,14 +1151,10 @@ int SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) { // the users are safe to transform. AllocaInfo Info; - for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); - I != E; ++I) { - isSafeUseOfAllocation(cast<Instruction>(*I), AI, Info); - if (Info.isUnsafe) { - DEBUG(errs() << "Cannot transform: " << *AI << "\n due to user: " - << **I << '\n'); - return 0; - } + isSafeForScalarRepl(AI, AI, 0, Info); + if (Info.isUnsafe) { + DEBUG(errs() << "Cannot transform: " << *AI << '\n'); + return 0; } // Okay, we know all the users are promotable. If the aggregate is a memcpy @@ -1208,88 +1163,28 @@ int SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) { // types, but may actually be used. In these cases, we refuse to promote the // struct. if (Info.isMemCpySrc && Info.isMemCpyDst && - HasPadding(AI->getType()->getElementType(), *TD)) + HasPadding(AI->getAllocatedType(), *TD)) return 0; // If we require cleanup, return 1, otherwise return 3. return Info.needsCleanup ? 1 : 3; } -/// CleanupGEP - GEP is used by an Alloca, which can be promoted after the GEP -/// is canonicalized here. -void SROA::CleanupGEP(GetElementPtrInst *GEPI) { - gep_type_iterator I = gep_type_begin(GEPI); - ++I; - - const ArrayType *AT = dyn_cast<ArrayType>(*I); - if (!AT) - return; - - uint64_t NumElements = AT->getNumElements(); - - if (isa<ConstantInt>(I.getOperand())) - return; - - if (NumElements == 1) { - GEPI->setOperand(2, - Constant::getNullValue(Type::getInt32Ty(GEPI->getContext()))); - return; - } - - assert(NumElements == 2 && "Unhandled case!"); - // All users of the GEP must be loads. At each use of the GEP, insert - // two loads of the appropriate indexed GEP and select between them. - Value *IsOne = new ICmpInst(GEPI, ICmpInst::ICMP_NE, I.getOperand(), - Constant::getNullValue(I.getOperand()->getType()), - "isone"); - // Insert the new GEP instructions, which are properly indexed. - SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end()); - Indices[1] = Constant::getNullValue(Type::getInt32Ty(GEPI->getContext())); - Value *ZeroIdx = GetElementPtrInst::Create(GEPI->getOperand(0), - Indices.begin(), - Indices.end(), - GEPI->getName()+".0", GEPI); - Indices[1] = ConstantInt::get(Type::getInt32Ty(GEPI->getContext()), 1); - Value *OneIdx = GetElementPtrInst::Create(GEPI->getOperand(0), - Indices.begin(), - Indices.end(), - GEPI->getName()+".1", GEPI); - // Replace all loads of the variable index GEP with loads from both - // indexes and a select. - while (!GEPI->use_empty()) { - LoadInst *LI = cast<LoadInst>(GEPI->use_back()); - Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI); - Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI); - Value *R = SelectInst::Create(IsOne, One, Zero, LI->getName(), LI); - LI->replaceAllUsesWith(R); - LI->eraseFromParent(); - } - GEPI->eraseFromParent(); -} - - /// CleanupAllocaUsers - If SROA reported that it can promote the specified /// allocation, but only if cleaned up, perform the cleanups required. -void SROA::CleanupAllocaUsers(AllocaInst *AI) { - // At this point, we know that the end result will be SROA'd and promoted, so - // we can insert ugly code if required so long as sroa+mem2reg will clean it - // up. - for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); +void SROA::CleanupAllocaUsers(Value *V) { + for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) { User *U = *UI++; - if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) - CleanupGEP(GEPI); - else { - Instruction *I = cast<Instruction>(U); - SmallVector<DbgInfoIntrinsic *, 2> DbgInUses; - if (!isa<StoreInst>(I) && OnlyUsedByDbgInfoIntrinsics(I, &DbgInUses)) { - // Safe to remove debug info uses. - while (!DbgInUses.empty()) { - DbgInfoIntrinsic *DI = DbgInUses.back(); DbgInUses.pop_back(); - DI->eraseFromParent(); - } - I->eraseFromParent(); + Instruction *I = cast<Instruction>(U); + SmallVector<DbgInfoIntrinsic *, 2> DbgInUses; + if (!isa<StoreInst>(I) && OnlyUsedByDbgInfoIntrinsics(I, &DbgInUses)) { + // Safe to remove debug info uses. + while (!DbgInUses.empty()) { + DbgInfoIntrinsic *DI = DbgInUses.back(); DbgInUses.pop_back(); + DI->eraseFromParent(); } + I->eraseFromParent(); } } } @@ -1395,7 +1290,7 @@ bool SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy, // Compute the offset that this GEP adds to the pointer. SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); - uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(), + uint64_t GEPOffset = TD->getIndexedOffset(GEP->getPointerOperandType(), &Indices[0], Indices.size()); // See if all uses can be converted. if (!CanConvertToScalar(GEP, IsNotTrivial, VecTy, SawVec,Offset+GEPOffset, @@ -1457,7 +1352,7 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) { if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { // Compute the offset that this GEP adds to the pointer. SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end()); - uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(), + uint64_t GEPOffset = TD->getIndexedOffset(GEP->getPointerOperandType(), &Indices[0], Indices.size()); ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8); GEP->eraseFromParent(); @@ -1478,13 +1373,16 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) { if (StoreInst *SI = dyn_cast<StoreInst>(User)) { assert(SI->getOperand(0) != Ptr && "Consistency error!"); - // FIXME: Remove once builder has Twine API. - Value *Old = Builder.CreateLoad(NewAI, - (NewAI->getName()+".in").str().c_str()); + Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in"); Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset, Builder); Builder.CreateStore(New, NewAI); SI->eraseFromParent(); + + // If the load we just inserted is now dead, then the inserted store + // overwrote the entire thing. + if (Old->use_empty()) + Old->eraseFromParent(); continue; } @@ -1504,13 +1402,16 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) { for (unsigned i = 1; i != NumBytes; ++i) APVal |= APVal << 8; - // FIXME: Remove once builder has Twine API. - Value *Old = Builder.CreateLoad(NewAI, - (NewAI->getName()+".in").str().c_str()); + Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in"); Value *New = ConvertScalar_InsertValue( ConstantInt::get(User->getContext(), APVal), Old, Offset, Builder); Builder.CreateStore(New, NewAI); + + // If the load we just inserted is now dead, then the memset overwrote + // the entire thing. + if (Old->use_empty()) + Old->eraseFromParent(); } MSI->eraseFromParent(); continue; diff --git a/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/lib/Transforms/Scalar/SimplifyCFGPass.cpp index e905952..a36da78 100644 --- a/lib/Transforms/Scalar/SimplifyCFGPass.cpp +++ b/lib/Transforms/Scalar/SimplifyCFGPass.cpp @@ -189,6 +189,77 @@ static bool RemoveUnreachableBlocksFromFn(Function &F) { return true; } +/// MergeEmptyReturnBlocks - If we have more than one empty (other than phi +/// node) return blocks, merge them together to promote recursive block merging. +static bool MergeEmptyReturnBlocks(Function &F) { + bool Changed = false; + + BasicBlock *RetBlock = 0; + + // Scan all the blocks in the function, looking for empty return blocks. + for (Function::iterator BBI = F.begin(), E = F.end(); BBI != E; ) { + BasicBlock &BB = *BBI++; + + // Only look at return blocks. + ReturnInst *Ret = dyn_cast<ReturnInst>(BB.getTerminator()); + if (Ret == 0) continue; + + // Only look at the block if it is empty or the only other thing in it is a + // single PHI node that is the operand to the return. + if (Ret != &BB.front()) { + // Check for something else in the block. + BasicBlock::iterator I = Ret; + --I; + if (!isa<PHINode>(I) || I != BB.begin() || + Ret->getNumOperands() == 0 || + Ret->getOperand(0) != I) + continue; + } + + // If this is the first returning block, remember it and keep going. + if (RetBlock == 0) { + RetBlock = &BB; + continue; + } + + // Otherwise, we found a duplicate return block. Merge the two. + Changed = true; + + // Case when there is no input to the return or when the returned values + // agree is trivial. Note that they can't agree if there are phis in the + // blocks. + if (Ret->getNumOperands() == 0 || + Ret->getOperand(0) == + cast<ReturnInst>(RetBlock->getTerminator())->getOperand(0)) { + BB.replaceAllUsesWith(RetBlock); + BB.eraseFromParent(); + continue; + } + + // If the canonical return block has no PHI node, create one now. + PHINode *RetBlockPHI = dyn_cast<PHINode>(RetBlock->begin()); + if (RetBlockPHI == 0) { + Value *InVal = cast<ReturnInst>(RetBlock->begin())->getOperand(0); + RetBlockPHI = PHINode::Create(Ret->getOperand(0)->getType(), "merge", + &RetBlock->front()); + + for (pred_iterator PI = pred_begin(RetBlock), E = pred_end(RetBlock); + PI != E; ++PI) + RetBlockPHI->addIncoming(InVal, *PI); + RetBlock->getTerminator()->setOperand(0, RetBlockPHI); + } + + // Turn BB into a block that just unconditionally branches to the return + // block. This handles the case when the two return blocks have a common + // predecessor but that return different things. + RetBlockPHI->addIncoming(Ret->getOperand(0), &BB); + BB.getTerminator()->eraseFromParent(); + BranchInst::Create(RetBlock, &BB); + } + + return Changed; +} + /// IterativeSimplifyCFG - Call SimplifyCFG on all the blocks in the function, /// iterating until no more changes are made. static bool IterativeSimplifyCFG(Function &F) { @@ -216,6 +287,7 @@ static bool IterativeSimplifyCFG(Function &F) { // bool CFGSimplifyPass::runOnFunction(Function &F) { bool EverChanged = RemoveUnreachableBlocksFromFn(F); + EverChanged |= MergeEmptyReturnBlocks(F); EverChanged |= IterativeSimplifyCFG(F); // If neither pass changed anything, we're done. diff --git a/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/lib/Transforms/Scalar/SimplifyLibCalls.cpp index 6fd884b..3c28ad2 100644 --- a/lib/Transforms/Scalar/SimplifyLibCalls.cpp +++ b/lib/Transforms/Scalar/SimplifyLibCalls.cpp @@ -76,6 +76,11 @@ public: /// return value has 'intptr_t' type. Value *EmitStrLen(Value *Ptr, IRBuilder<> &B); + /// EmitStrChr - Emit a call to the strchr function to the builder, for the + /// specified pointer and character. Ptr is required to be some pointer type, + /// and the return value has 'i8*' type. + Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B); + /// EmitMemCpy - Emit a call to the memcpy function to the builder. This /// always expects that the size has type 'intptr_t' and Dst/Src are pointers. Value *EmitMemCpy(Value *Dst, Value *Src, Value *Len, @@ -151,6 +156,26 @@ Value *LibCallOptimization::EmitStrLen(Value *Ptr, IRBuilder<> &B) { return CI; } +/// EmitStrChr - Emit a call to the strchr function to the builder, for the +/// specified pointer and character. Ptr is required to be some pointer type, +/// and the return value has 'i8*' type. +Value *LibCallOptimization::EmitStrChr(Value *Ptr, char C, IRBuilder<> &B) { + Module *M = Caller->getParent(); + AttributeWithIndex AWI = + AttributeWithIndex::get(~0u, Attribute::ReadOnly | Attribute::NoUnwind); + + const Type *I8Ptr = Type::getInt8PtrTy(*Context); + const Type *I32Ty = Type::getInt32Ty(*Context); + Constant *StrChr = M->getOrInsertFunction("strchr", AttrListPtr::get(&AWI, 1), + I8Ptr, I8Ptr, I32Ty, NULL); + CallInst *CI = B.CreateCall2(StrChr, CastToCStr(Ptr, B), + ConstantInt::get(I32Ty, C), "strchr"); + if (const Function *F = dyn_cast<Function>(StrChr->stripPointerCasts())) + CI->setCallingConv(F->getCallingConv()); + return CI; +} + + /// EmitMemCpy - Emit a call to the memcpy function to the builder. This always /// expects that the size has type 'intptr_t' and Dst/Src are pointers. Value *LibCallOptimization::EmitMemCpy(Value *Dst, Value *Src, Value *Len, @@ -880,17 +905,16 @@ struct StrLenOpt : public LibCallOptimization { if (uint64_t Len = GetStringLength(Src)) return ConstantInt::get(CI->getType(), Len-1); - // Handle strlen(p) != 0. - if (!IsOnlyUsedInZeroEqualityComparison(CI)) return 0; - // strlen(x) != 0 --> *x != 0 // strlen(x) == 0 --> *x == 0 - return B.CreateZExt(B.CreateLoad(Src, "strlenfirst"), CI->getType()); + if (IsOnlyUsedInZeroEqualityComparison(CI)) + return B.CreateZExt(B.CreateLoad(Src, "strlenfirst"), CI->getType()); + return 0; } }; //===---------------------------------------===// -// 'strto*' Optimizations +// 'strto*' Optimizations. This handles strtol, strtod, strtof, strtoul, etc. struct StrToOpt : public LibCallOptimization { virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { @@ -910,6 +934,52 @@ struct StrToOpt : public LibCallOptimization { } }; +//===---------------------------------------===// +// 'strstr' Optimizations + +struct StrStrOpt : public LibCallOptimization { + virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + const FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 2 || + !isa<PointerType>(FT->getParamType(0)) || + !isa<PointerType>(FT->getParamType(1)) || + !isa<PointerType>(FT->getReturnType())) + return 0; + + // fold strstr(x, x) -> x. + if (CI->getOperand(1) == CI->getOperand(2)) + return B.CreateBitCast(CI->getOperand(1), CI->getType()); + + // See if either input string is a constant string. + std::string SearchStr, ToFindStr; + bool HasStr1 = GetConstantStringInfo(CI->getOperand(1), SearchStr); + bool HasStr2 = GetConstantStringInfo(CI->getOperand(2), ToFindStr); + + // fold strstr(x, "") -> x. + if (HasStr2 && ToFindStr.empty()) + return B.CreateBitCast(CI->getOperand(1), CI->getType()); + + // If both strings are known, constant fold it. + if (HasStr1 && HasStr2) { + std::string::size_type Offset = SearchStr.find(ToFindStr); + + if (Offset == std::string::npos) // strstr("foo", "bar") -> null + return Constant::getNullValue(CI->getType()); + + // strstr("abcd", "bc") -> gep((char*)"abcd", 1) + Value *Result = CastToCStr(CI->getOperand(1), B); + Result = B.CreateConstInBoundsGEP1_64(Result, Offset, "strstr"); + return B.CreateBitCast(Result, CI->getType()); + } + + // fold strstr(x, "y") -> strchr(x, 'y'). + if (HasStr2 && ToFindStr.size() == 1) + return B.CreateBitCast(EmitStrChr(CI->getOperand(1), ToFindStr[0], B), + CI->getType()); + return 0; + } +}; + //===---------------------------------------===// // 'memcmp' Optimizations @@ -941,19 +1011,6 @@ struct MemCmpOpt : public LibCallOptimization { return B.CreateSExt(B.CreateSub(LHSV, RHSV, "chardiff"), CI->getType()); } - // memcmp(S1,S2,2) != 0 -> (*(short*)LHS ^ *(short*)RHS) != 0 - // memcmp(S1,S2,4) != 0 -> (*(int*)LHS ^ *(int*)RHS) != 0 - if ((Len == 2 || Len == 4) && IsOnlyUsedInZeroEqualityComparison(CI)) { - const Type *PTy = PointerType::getUnqual(Len == 2 ? - Type::getInt16Ty(*Context) : Type::getInt32Ty(*Context)); - LHS = B.CreateBitCast(LHS, PTy, "tmp"); - RHS = B.CreateBitCast(RHS, PTy, "tmp"); - LoadInst *LHSV = B.CreateLoad(LHS, "lhsv"); - LoadInst *RHSV = B.CreateLoad(RHS, "rhsv"); - LHSV->setAlignment(1); RHSV->setAlignment(1); // Unaligned loads. - return B.CreateZExt(B.CreateXor(LHSV, RHSV, "shortdiff"), CI->getType()); - } - // Constant folding: memcmp(x, y, l) -> cnst (all arguments are constant) std::string LHSStr, RHSStr; if (GetConstantStringInfo(LHS, LHSStr) && @@ -1051,7 +1108,7 @@ struct SizeOpt : public LibCallOptimization { const Type *Ty = Callee->getFunctionType()->getReturnType(); - if (Const->getZExtValue() < 2) + if (Const->getZExtValue() == 0) return Constant::getAllOnesValue(Ty); else return ConstantInt::get(Ty, 0); @@ -1071,8 +1128,8 @@ struct MemCpyChkOpt : public LibCallOptimization { if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) || !isa<PointerType>(FT->getParamType(0)) || !isa<PointerType>(FT->getParamType(1)) || - !isa<IntegerType>(FT->getParamType(3)) || - FT->getParamType(2) != TD->getIntPtrType(*Context)) + !isa<IntegerType>(FT->getParamType(3)) || + FT->getParamType(2) != TD->getIntPtrType(*Context)) return 0; ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(4)); @@ -1099,7 +1156,7 @@ struct MemSetChkOpt : public LibCallOptimization { if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) || !isa<PointerType>(FT->getParamType(0)) || !isa<IntegerType>(FT->getParamType(1)) || - !isa<IntegerType>(FT->getParamType(3)) || + !isa<IntegerType>(FT->getParamType(3)) || FT->getParamType(2) != TD->getIntPtrType(*Context)) return 0; @@ -1129,7 +1186,7 @@ struct MemMoveChkOpt : public LibCallOptimization { if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) || !isa<PointerType>(FT->getParamType(0)) || !isa<PointerType>(FT->getParamType(1)) || - !isa<IntegerType>(FT->getParamType(3)) || + !isa<IntegerType>(FT->getParamType(3)) || FT->getParamType(2) != TD->getIntPtrType(*Context)) return 0; @@ -1675,8 +1732,8 @@ namespace { // String and Memory LibCall Optimizations StrCatOpt StrCat; StrNCatOpt StrNCat; StrChrOpt StrChr; StrCmpOpt StrCmp; StrNCmpOpt StrNCmp; StrCpyOpt StrCpy; StrNCpyOpt StrNCpy; StrLenOpt StrLen; - StrToOpt StrTo; MemCmpOpt MemCmp; MemCpyOpt MemCpy; MemMoveOpt MemMove; - MemSetOpt MemSet; + StrToOpt StrTo; StrStrOpt StrStr; + MemCmpOpt MemCmp; MemCpyOpt MemCpy; MemMoveOpt MemMove; MemSetOpt MemSet; // Math Library Optimizations PowOpt Pow; Exp2Opt Exp2; UnaryDoubleFPOpt UnaryDoubleFP; // Integer Optimizations @@ -1738,6 +1795,7 @@ void SimplifyLibCalls::InitOptimizations() { Optimizations["strtoll"] = &StrTo; Optimizations["strtold"] = &StrTo; Optimizations["strtoull"] = &StrTo; + Optimizations["strstr"] = &StrStr; Optimizations["memcmp"] = &MemCmp; Optimizations["memcpy"] = &MemCpy; Optimizations["memmove"] = &MemMove; @@ -2644,12 +2702,6 @@ bool SimplifyLibCalls::doInitialization(Module &M) { // * strcspn("",a) -> 0 // * strcspn(s,"") -> strlen(a) // -// strstr: (PR5783) -// * strstr(x,x) -> x -// * strstr(x, "") -> x -// * strstr(x, "a") -> strchr(x, 'a') -// * strstr(s1,s2) -> result (if s1 and s2 are constant strings) -// // tan, tanf, tanl: // * tan(atan(x)) -> x // diff --git a/lib/Transforms/Utils/BreakCriticalEdges.cpp b/lib/Transforms/Utils/BreakCriticalEdges.cpp index ccd97c8..19c7206 100644 --- a/lib/Transforms/Utils/BreakCriticalEdges.cpp +++ b/lib/Transforms/Utils/BreakCriticalEdges.cpp @@ -309,10 +309,10 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum, if (TIL == DestLoop) { // Both in the same loop, the NewBB joins loop. DestLoop->addBasicBlockToLoop(NewBB, LI->getBase()); - } else if (TIL->contains(DestLoop->getHeader())) { + } else if (TIL->contains(DestLoop)) { // Edge from an outer loop to an inner loop. Add to the outer loop. TIL->addBasicBlockToLoop(NewBB, LI->getBase()); - } else if (DestLoop->contains(TIL->getHeader())) { + } else if (DestLoop->contains(TIL)) { // Edge from an inner loop to an outer loop. Add to the outer loop. DestLoop->addBasicBlockToLoop(NewBB, LI->getBase()); } else { diff --git a/lib/Transforms/Utils/CloneFunction.cpp b/lib/Transforms/Utils/CloneFunction.cpp index 162d7b3..c287747 100644 --- a/lib/Transforms/Utils/CloneFunction.cpp +++ b/lib/Transforms/Utils/CloneFunction.cpp @@ -21,6 +21,7 @@ #include "llvm/GlobalVariable.h" #include "llvm/Function.h" #include "llvm/LLVMContext.h" +#include "llvm/Metadata.h" #include "llvm/Support/CFG.h" #include "llvm/Transforms/Utils/ValueMapper.h" #include "llvm/Analysis/ConstantFolding.h" @@ -347,8 +348,7 @@ ConstantFoldMappedInstruction(const Instruction *I) { Ops.size(), TD); } -static MDNode *UpdateInlinedAtInfo(MDNode *InsnMD, MDNode *TheCallMD, - LLVMContext &Context) { +static MDNode *UpdateInlinedAtInfo(MDNode *InsnMD, MDNode *TheCallMD) { DILocation ILoc(InsnMD); if (ILoc.isNull()) return InsnMD; @@ -358,14 +358,15 @@ static MDNode *UpdateInlinedAtInfo(MDNode *InsnMD, MDNode *TheCallMD, DILocation OrigLocation = ILoc.getOrigLocation(); MDNode *NewLoc = TheCallMD; if (!OrigLocation.isNull()) - NewLoc = UpdateInlinedAtInfo(OrigLocation.getNode(), TheCallMD, Context); - - SmallVector<Value *, 4> MDVs; - MDVs.push_back(InsnMD->getElement(0)); // Line - MDVs.push_back(InsnMD->getElement(1)); // Col - MDVs.push_back(InsnMD->getElement(2)); // Scope - MDVs.push_back(NewLoc); - return MDNode::get(Context, MDVs.data(), MDVs.size()); + NewLoc = UpdateInlinedAtInfo(OrigLocation.getNode(), TheCallMD); + + Value *MDVs[] = { + InsnMD->getOperand(0), // Line + InsnMD->getOperand(1), // Col + InsnMD->getOperand(2), // Scope + NewLoc + }; + return MDNode::get(InsnMD->getContext(), MDVs, 4); } /// CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto, @@ -421,12 +422,10 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, // BasicBlock::iterator I = NewBB->begin(); - LLVMContext &Context = OldFunc->getContext(); - unsigned DbgKind = Context.getMetadata().getMDKind("dbg"); + unsigned DbgKind = OldFunc->getContext().getMDKindID("dbg"); MDNode *TheCallMD = NULL; - SmallVector<Value *, 4> MDVs; if (TheCall && TheCall->hasMetadata()) - TheCallMD = Context.getMetadata().getMD(DbgKind, TheCall); + TheCallMD = TheCall->getMetadata(DbgKind); // Handle PHI nodes specially, as we have to remove references to dead // blocks. @@ -436,32 +435,38 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, for (; (PN = dyn_cast<PHINode>(I)); ++I, ++OldI) { if (I->hasMetadata()) { if (TheCallMD) { - if (MDNode *IMD = Context.getMetadata().getMD(DbgKind, I)) { - MDNode *NewMD = UpdateInlinedAtInfo(IMD, TheCallMD, Context); - Context.getMetadata().addMD(DbgKind, NewMD, I); + if (MDNode *IMD = I->getMetadata(DbgKind)) { + MDNode *NewMD = UpdateInlinedAtInfo(IMD, TheCallMD); + I->setMetadata(DbgKind, NewMD); } } else { // The cloned instruction has dbg info but the call instruction // does not have dbg info. Remove dbg info from cloned instruction. - Context.getMetadata().removeMD(DbgKind, I); + I->setMetadata(DbgKind, 0); } } PHIToResolve.push_back(cast<PHINode>(OldI)); } } + // FIXME: + // FIXME: + // FIXME: Unclone all this metadata stuff. + // FIXME: + // FIXME: + // Otherwise, remap the rest of the instructions normally. for (; I != NewBB->end(); ++I) { if (I->hasMetadata()) { if (TheCallMD) { - if (MDNode *IMD = Context.getMetadata().getMD(DbgKind, I)) { - MDNode *NewMD = UpdateInlinedAtInfo(IMD, TheCallMD, Context); - Context.getMetadata().addMD(DbgKind, NewMD, I); + if (MDNode *IMD = I->getMetadata(DbgKind)) { + MDNode *NewMD = UpdateInlinedAtInfo(IMD, TheCallMD); + I->setMetadata(DbgKind, NewMD); } } else { // The cloned instruction has dbg info but the call instruction // does not have dbg info. Remove dbg info from cloned instruction. - Context.getMetadata().removeMD(DbgKind, I); + I->setMetadata(DbgKind, 0); } } RemapInstruction(I, ValueMap); diff --git a/lib/Transforms/Utils/Local.cpp b/lib/Transforms/Utils/Local.cpp index 7a37aa3..2426e3e 100644 --- a/lib/Transforms/Utils/Local.cpp +++ b/lib/Transforms/Utils/Local.cpp @@ -20,10 +20,9 @@ #include "llvm/Instructions.h" #include "llvm/Intrinsics.h" #include "llvm/IntrinsicInst.h" -#include "llvm/LLVMContext.h" +#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/Analysis/ConstantFolding.h" -#include "llvm/Analysis/DebugInfo.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/ProfileInfo.h" #include "llvm/Target/TargetData.h" @@ -31,6 +30,7 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/ValueHandle.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; diff --git a/lib/Transforms/Utils/LoopSimplify.cpp b/lib/Transforms/Utils/LoopSimplify.cpp index 690972d..7fcc5f7 100644 --- a/lib/Transforms/Utils/LoopSimplify.cpp +++ b/lib/Transforms/Utils/LoopSimplify.cpp @@ -109,7 +109,7 @@ X("loopsimplify", "Canonicalize natural loops", true); const PassInfo *const llvm::LoopSimplifyID = &X; Pass *llvm::createLoopSimplifyPass() { return new LoopSimplify(); } -/// runOnFunction - Run down all loops in the CFG (recursively, but we could do +/// runOnLoop - Run down all loops in the CFG (recursively, but we could do /// it in any convenient order) inserting preheaders... /// bool LoopSimplify::runOnLoop(Loop *l, LPPassManager &LPM) { @@ -305,12 +305,6 @@ ReprocessLoop: } } - // If there are duplicate phi nodes (for example, from loop rotation), - // get rid of them. - for (Loop::block_iterator BB = L->block_begin(), E = L->block_end(); - BB != E; ++BB) - EliminateDuplicatePHINodes(*BB); - return Changed; } diff --git a/lib/Transforms/Utils/LoopUnroll.cpp b/lib/Transforms/Utils/LoopUnroll.cpp index 6232f32..6b2c591 100644 --- a/lib/Transforms/Utils/LoopUnroll.cpp +++ b/lib/Transforms/Utils/LoopUnroll.cpp @@ -194,7 +194,7 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM) OrigPHINode.push_back(PN); if (Instruction *I = dyn_cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock))) - if (L->contains(I->getParent())) + if (L->contains(I)) LastValueMap[I] = I; } @@ -222,7 +222,7 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM) PHINode *NewPHI = cast<PHINode>(ValueMap[OrigPHINode[i]]); Value *InVal = NewPHI->getIncomingValueForBlock(LatchBlock); if (Instruction *InValI = dyn_cast<Instruction>(InVal)) - if (It > 1 && L->contains(InValI->getParent())) + if (It > 1 && L->contains(InValI)) InVal = LastValueMap[InValI]; ValueMap[OrigPHINode[i]] = InVal; New->getInstList().erase(NewPHI); @@ -244,7 +244,7 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM) UI != UE;) { Instruction *UseInst = cast<Instruction>(*UI); ++UI; - if (isa<PHINode>(UseInst) && !L->contains(UseInst->getParent())) { + if (isa<PHINode>(UseInst) && !L->contains(UseInst)) { PHINode *phi = cast<PHINode>(UseInst); Value *Incoming = phi->getIncomingValueForBlock(*BB); phi->addIncoming(Incoming, New); @@ -295,7 +295,7 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM) // If this value was defined in the loop, take the value defined by the // last iteration of the loop. if (Instruction *InValI = dyn_cast<Instruction>(InVal)) { - if (L->contains(InValI->getParent())) + if (L->contains(InValI)) InVal = LastValueMap[InVal]; } PN->addIncoming(InVal, LastIterationBB); diff --git a/lib/Transforms/Utils/SSAUpdater.cpp b/lib/Transforms/Utils/SSAUpdater.cpp index ba41bf9..9881b3c 100644 --- a/lib/Transforms/Utils/SSAUpdater.cpp +++ b/lib/Transforms/Utils/SSAUpdater.cpp @@ -149,7 +149,29 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) { if (SingularValue != 0) return SingularValue; - // Otherwise, we do need a PHI: insert one now. + // Otherwise, we do need a PHI: check to see if we already have one available + // in this block that produces the right value. + if (isa<PHINode>(BB->begin())) { + DenseMap<BasicBlock*, Value*> ValueMapping(PredValues.begin(), + PredValues.end()); + PHINode *SomePHI; + for (BasicBlock::iterator It = BB->begin(); + (SomePHI = dyn_cast<PHINode>(It)); ++It) { + // Scan this phi to see if it is what we need. + bool Equal = true; + for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) + if (ValueMapping[SomePHI->getIncomingBlock(i)] != + SomePHI->getIncomingValue(i)) { + Equal = false; + break; + } + + if (Equal) + return SomePHI; + } + } + + // Ok, we have no way out, insert a new one now. PHINode *InsertedPHI = PHINode::Create(PrototypeValue->getType(), PrototypeValue->getName(), &BB->front()); @@ -198,7 +220,7 @@ Value *SSAUpdater::GetValueAtEndOfBlockInternal(BasicBlock *BB) { // Query AvailableVals by doing an insertion of null. std::pair<AvailableValsTy::iterator, bool> InsertRes = - AvailableVals.insert(std::make_pair(BB, WeakVH())); + AvailableVals.insert(std::make_pair(BB, TrackingVH<Value>())); // Handle the case when the insertion fails because we have already seen BB. if (!InsertRes.second) { @@ -214,8 +236,8 @@ Value *SSAUpdater::GetValueAtEndOfBlockInternal(BasicBlock *BB) { // it. When we get back to the first instance of the recursion we will fill // in the PHI node. return InsertRes.first->second = - PHINode::Create(PrototypeValue->getType(), PrototypeValue->getName(), - &BB->front()); + PHINode::Create(PrototypeValue->getType(), PrototypeValue->getName(), + &BB->front()); } // Okay, the value isn't in the map and we just inserted a null in the entry diff --git a/lib/VMCore/AsmWriter.cpp b/lib/VMCore/AsmWriter.cpp index c765d96..d3c9d77 100644 --- a/lib/VMCore/AsmWriter.cpp +++ b/lib/VMCore/AsmWriter.cpp @@ -21,11 +21,8 @@ #include "llvm/Constants.h" #include "llvm/DerivedTypes.h" #include "llvm/InlineAsm.h" -#include "llvm/Instruction.h" -#include "llvm/Instructions.h" -#include "llvm/LLVMContext.h" +#include "llvm/IntrinsicInst.h" #include "llvm/Operator.h" -#include "llvm/Metadata.h" #include "llvm/Module.h" #include "llvm/ValueSymbolTable.h" #include "llvm/TypeSymbolTable.h" @@ -60,9 +57,11 @@ static const Module *getModuleFromVal(const Value *V) { const Function *M = I->getParent() ? I->getParent()->getParent() : 0; return M ? M->getParent() : 0; } - + if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV->getParent(); + if (const NamedMDNode *NMD = dyn_cast<NamedMDNode>(V)) + return NMD->getParent(); return 0; } @@ -475,12 +474,6 @@ private: const Function* TheFunction; bool FunctionProcessed; - /// TheMDNode - The MDNode for which we are holding slot numbers. - const MDNode *TheMDNode; - - /// TheNamedMDNode - The MDNode for which we are holding slot numbers. - const NamedMDNode *TheNamedMDNode; - /// mMap - The TypePlanes map for the module level data. ValueMap mMap; unsigned mNext; @@ -490,17 +483,13 @@ private: unsigned fNext; /// mdnMap - Map for MDNodes. - ValueMap mdnMap; + DenseMap<const MDNode*, unsigned> mdnMap; unsigned mdnNext; public: /// Construct from a module explicit SlotTracker(const Module *M); /// Construct from a function, starting out in incorp state. explicit SlotTracker(const Function *F); - /// Construct from a mdnode. - explicit SlotTracker(const MDNode *N); - /// Construct from a named mdnode. - explicit SlotTracker(const NamedMDNode *N); /// Return the slot number of the specified value in it's type /// plane. If something is not in the SlotTracker, return -1. @@ -521,10 +510,11 @@ public: void purgeFunction(); /// MDNode map iterators. - ValueMap::iterator mdnBegin() { return mdnMap.begin(); } - ValueMap::iterator mdnEnd() { return mdnMap.end(); } - unsigned mdnSize() const { return mdnMap.size(); } - bool mdnEmpty() const { return mdnMap.empty(); } + typedef DenseMap<const MDNode*, unsigned>::iterator mdn_iterator; + mdn_iterator mdn_begin() { return mdnMap.begin(); } + mdn_iterator mdn_end() { return mdnMap.end(); } + unsigned mdn_size() const { return mdnMap.size(); } + bool mdn_empty() const { return mdnMap.empty(); } /// This function does the actual initialization. inline void initialize(); @@ -547,12 +537,6 @@ private: /// Add all of the functions arguments, basic blocks, and instructions. void processFunction(); - /// Add all MDNode operands. - void processMDNode(); - - /// Add all MDNode operands. - void processNamedMDNode(); - SlotTracker(const SlotTracker &); // DO NOT IMPLEMENT void operator=(const SlotTracker &); // DO NOT IMPLEMENT }; @@ -591,27 +575,15 @@ static SlotTracker *createSlotTracker(const Value *V) { // Module level constructor. Causes the contents of the Module (sans functions) // to be added to the slot table. SlotTracker::SlotTracker(const Module *M) - : TheModule(M), TheFunction(0), FunctionProcessed(false), TheMDNode(0), - TheNamedMDNode(0), mNext(0), fNext(0), mdnNext(0) { + : TheModule(M), TheFunction(0), FunctionProcessed(false), + mNext(0), fNext(0), mdnNext(0) { } // Function level constructor. Causes the contents of the Module and the one // function provided to be added to the slot table. SlotTracker::SlotTracker(const Function *F) : TheModule(F ? F->getParent() : 0), TheFunction(F), FunctionProcessed(false), - TheMDNode(0), TheNamedMDNode(0), mNext(0), fNext(0), mdnNext(0) { -} - -// Constructor to handle single MDNode. -SlotTracker::SlotTracker(const MDNode *C) - : TheModule(0), TheFunction(0), FunctionProcessed(false), TheMDNode(C), - TheNamedMDNode(0), mNext(0), fNext(0), mdnNext(0) { -} - -// Constructor to handle single NamedMDNode. -SlotTracker::SlotTracker(const NamedMDNode *N) - : TheModule(0), TheFunction(0), FunctionProcessed(false), TheMDNode(0), - TheNamedMDNode(N), mNext(0), fNext(0), mdnNext(0) { + mNext(0), fNext(0), mdnNext(0) { } inline void SlotTracker::initialize() { @@ -622,12 +594,6 @@ inline void SlotTracker::initialize() { if (TheFunction && !FunctionProcessed) processFunction(); - - if (TheMDNode) - processMDNode(); - - if (TheNamedMDNode) - processNamedMDNode(); } // Iterate through all the global variables, functions, and global @@ -640,10 +606,6 @@ void SlotTracker::processModule() { E = TheModule->global_end(); I != E; ++I) { if (!I->hasName()) CreateModuleSlot(I); - if (I->hasInitializer()) { - if (MDNode *N = dyn_cast<MDNode>(I->getInitializer())) - CreateMetadataSlot(N); - } } // Add metadata used by named metadata. @@ -651,9 +613,9 @@ void SlotTracker::processModule() { I = TheModule->named_metadata_begin(), E = TheModule->named_metadata_end(); I != E; ++I) { const NamedMDNode *NMD = I; - for (unsigned i = 0, e = NMD->getNumElements(); i != e; ++i) { - MDNode *MD = dyn_cast_or_null<MDNode>(NMD->getElement(i)); - if (MD) + for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) { + // FIXME: Change accessor to be type safe. + if (MDNode *MD = cast_or_null<MDNode>(NMD->getOperand(i))) CreateMetadataSlot(MD); } } @@ -680,30 +642,30 @@ void SlotTracker::processFunction() { ST_DEBUG("Inserting Instructions:\n"); - MetadataContext &TheMetadata = TheFunction->getContext().getMetadata(); - typedef SmallVector<std::pair<unsigned, TrackingVH<MDNode> >, 2> MDMapTy; - MDMapTy MDs; + SmallVector<std::pair<unsigned, MDNode*>, 4> MDForInst; // Add all of the basic blocks and instructions with no names. for (Function::const_iterator BB = TheFunction->begin(), E = TheFunction->end(); BB != E; ++BB) { if (!BB->hasName()) CreateFunctionSlot(BB); + for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) { - if (I->getType() != Type::getVoidTy(TheFunction->getContext()) && - !I->hasName()) + if (!I->getType()->isVoidTy() && !I->hasName()) CreateFunctionSlot(I); - for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) - if (MDNode *N = dyn_cast_or_null<MDNode>(I->getOperand(i))) - CreateMetadataSlot(N); + + // Intrinsics can directly use metadata. + if (isa<IntrinsicInst>(I)) + for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) + if (MDNode *N = dyn_cast_or_null<MDNode>(I->getOperand(i))) + CreateMetadataSlot(N); // Process metadata attached with this instruction. - MDs.clear(); - TheMetadata.getMDs(I, MDs); - for (MDMapTy::const_iterator MI = MDs.begin(), ME = MDs.end(); MI != ME; - ++MI) - CreateMetadataSlot(MI->second); + I->getAllMetadata(MDForInst); + for (unsigned i = 0, e = MDForInst.size(); i != e; ++i) + CreateMetadataSlot(MDForInst[i].second); + MDForInst.clear(); } } @@ -712,28 +674,6 @@ void SlotTracker::processFunction() { ST_DEBUG("end processFunction!\n"); } -/// processMDNode - Process TheMDNode. -void SlotTracker::processMDNode() { - ST_DEBUG("begin processMDNode!\n"); - mdnNext = 0; - CreateMetadataSlot(TheMDNode); - TheMDNode = 0; - ST_DEBUG("end processMDNode!\n"); -} - -/// processNamedMDNode - Process TheNamedMDNode. -void SlotTracker::processNamedMDNode() { - ST_DEBUG("begin processNamedMDNode!\n"); - mdnNext = 0; - for (unsigned i = 0, e = TheNamedMDNode->getNumElements(); i != e; ++i) { - MDNode *MD = dyn_cast_or_null<MDNode>(TheNamedMDNode->getElement(i)); - if (MD) - CreateMetadataSlot(MD); - } - TheNamedMDNode = 0; - ST_DEBUG("end processNamedMDNode!\n"); -} - /// Clean up after incorporating a function. This is the only way to get out of /// the function incorporation state that affects get*Slot/Create*Slot. Function /// incorporation state is indicated by TheFunction != 0. @@ -755,13 +695,13 @@ int SlotTracker::getGlobalSlot(const GlobalValue *V) { return MI == mMap.end() ? -1 : (int)MI->second; } -/// getGlobalSlot - Get the slot number of a MDNode. +/// getMetadataSlot - Get the slot number of a MDNode. int SlotTracker::getMetadataSlot(const MDNode *N) { // Check for uninitialized state and do lazy initialization. initialize(); // Find the type plane in the module map - ValueMap::iterator MI = mdnMap.find(N); + mdn_iterator MI = mdnMap.find(N); return MI == mdnMap.end() ? -1 : (int)MI->second; } @@ -781,8 +721,7 @@ int SlotTracker::getLocalSlot(const Value *V) { /// CreateModuleSlot - Insert the specified GlobalValue* into the slot table. void SlotTracker::CreateModuleSlot(const GlobalValue *V) { assert(V && "Can't insert a null Value into SlotTracker!"); - assert(V->getType() != Type::getVoidTy(V->getContext()) && - "Doesn't need a slot!"); + assert(!V->getType()->isVoidTy() && "Doesn't need a slot!"); assert(!V->hasName() && "Doesn't need a slot!"); unsigned DestSlot = mNext++; @@ -798,8 +737,7 @@ void SlotTracker::CreateModuleSlot(const GlobalValue *V) { /// CreateSlot - Create a new slot for the specified value if it has no name. void SlotTracker::CreateFunctionSlot(const Value *V) { - assert(V->getType() != Type::getVoidTy(TheFunction->getContext()) && - !V->hasName() && "Doesn't need a slot!"); + assert(!V->getType()->isVoidTy() && !V->hasName() && "Doesn't need a slot!"); unsigned DestSlot = fNext++; fMap[V] = DestSlot; @@ -813,24 +751,22 @@ void SlotTracker::CreateFunctionSlot(const Value *V) { void SlotTracker::CreateMetadataSlot(const MDNode *N) { assert(N && "Can't insert a null Value into SlotTracker!"); - // Don't insert if N contains an instruction. - for (unsigned i = 0, e = N->getNumElements(); i != e; ++i) - if (N->getElement(i) && isa<Instruction>(N->getElement(i))) - return; + // Don't insert if N is a function-local metadata, these are always printed + // inline. + if (N->isFunctionLocal()) + return; - ValueMap::iterator I = mdnMap.find(N); + mdn_iterator I = mdnMap.find(N); if (I != mdnMap.end()) return; unsigned DestSlot = mdnNext++; mdnMap[N] = DestSlot; - for (unsigned i = 0, e = N->getNumElements(); i != e; ++i) { - const Value *TV = N->getElement(i); - if (TV) - if (const MDNode *N2 = dyn_cast<MDNode>(TV)) - CreateMetadataSlot(N2); - } + // Recursively add any MDNodes referenced by operands. + for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) + if (const MDNode *Op = dyn_cast_or_null<MDNode>(N->getOperand(i))) + CreateMetadataSlot(Op); } //===----------------------------------------------------------------------===// @@ -846,95 +782,36 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, static const char *getPredicateText(unsigned predicate) { const char * pred = "unknown"; switch (predicate) { - case FCmpInst::FCMP_FALSE: pred = "false"; break; - case FCmpInst::FCMP_OEQ: pred = "oeq"; break; - case FCmpInst::FCMP_OGT: pred = "ogt"; break; - case FCmpInst::FCMP_OGE: pred = "oge"; break; - case FCmpInst::FCMP_OLT: pred = "olt"; break; - case FCmpInst::FCMP_OLE: pred = "ole"; break; - case FCmpInst::FCMP_ONE: pred = "one"; break; - case FCmpInst::FCMP_ORD: pred = "ord"; break; - case FCmpInst::FCMP_UNO: pred = "uno"; break; - case FCmpInst::FCMP_UEQ: pred = "ueq"; break; - case FCmpInst::FCMP_UGT: pred = "ugt"; break; - case FCmpInst::FCMP_UGE: pred = "uge"; break; - case FCmpInst::FCMP_ULT: pred = "ult"; break; - case FCmpInst::FCMP_ULE: pred = "ule"; break; - case FCmpInst::FCMP_UNE: pred = "une"; break; - case FCmpInst::FCMP_TRUE: pred = "true"; break; - case ICmpInst::ICMP_EQ: pred = "eq"; break; - case ICmpInst::ICMP_NE: pred = "ne"; break; - case ICmpInst::ICMP_SGT: pred = "sgt"; break; - case ICmpInst::ICMP_SGE: pred = "sge"; break; - case ICmpInst::ICMP_SLT: pred = "slt"; break; - case ICmpInst::ICMP_SLE: pred = "sle"; break; - case ICmpInst::ICMP_UGT: pred = "ugt"; break; - case ICmpInst::ICMP_UGE: pred = "uge"; break; - case ICmpInst::ICMP_ULT: pred = "ult"; break; - case ICmpInst::ICMP_ULE: pred = "ule"; break; + case FCmpInst::FCMP_FALSE: pred = "false"; break; + case FCmpInst::FCMP_OEQ: pred = "oeq"; break; + case FCmpInst::FCMP_OGT: pred = "ogt"; break; + case FCmpInst::FCMP_OGE: pred = "oge"; break; + case FCmpInst::FCMP_OLT: pred = "olt"; break; + case FCmpInst::FCMP_OLE: pred = "ole"; break; + case FCmpInst::FCMP_ONE: pred = "one"; break; + case FCmpInst::FCMP_ORD: pred = "ord"; break; + case FCmpInst::FCMP_UNO: pred = "uno"; break; + case FCmpInst::FCMP_UEQ: pred = "ueq"; break; + case FCmpInst::FCMP_UGT: pred = "ugt"; break; + case FCmpInst::FCMP_UGE: pred = "uge"; break; + case FCmpInst::FCMP_ULT: pred = "ult"; break; + case FCmpInst::FCMP_ULE: pred = "ule"; break; + case FCmpInst::FCMP_UNE: pred = "une"; break; + case FCmpInst::FCMP_TRUE: pred = "true"; break; + case ICmpInst::ICMP_EQ: pred = "eq"; break; + case ICmpInst::ICMP_NE: pred = "ne"; break; + case ICmpInst::ICMP_SGT: pred = "sgt"; break; + case ICmpInst::ICMP_SGE: pred = "sge"; break; + case ICmpInst::ICMP_SLT: pred = "slt"; break; + case ICmpInst::ICMP_SLE: pred = "sle"; break; + case ICmpInst::ICMP_UGT: pred = "ugt"; break; + case ICmpInst::ICMP_UGE: pred = "uge"; break; + case ICmpInst::ICMP_ULT: pred = "ult"; break; + case ICmpInst::ICMP_ULE: pred = "ule"; break; } return pred; } -static void WriteMDNodeComment(const MDNode *Node, - formatted_raw_ostream &Out) { - if (Node->getNumElements() < 1) - return; - ConstantInt *CI = dyn_cast_or_null<ConstantInt>(Node->getElement(0)); - if (!CI) return; - unsigned Val = CI->getZExtValue(); - unsigned Tag = Val & ~LLVMDebugVersionMask; - if (Val >= LLVMDebugVersion) { - if (Tag == dwarf::DW_TAG_auto_variable) - Out << "; [ DW_TAG_auto_variable ]"; - else if (Tag == dwarf::DW_TAG_arg_variable) - Out << "; [ DW_TAG_arg_variable ]"; - else if (Tag == dwarf::DW_TAG_return_variable) - Out << "; [ DW_TAG_return_variable ]"; - else if (Tag == dwarf::DW_TAG_vector_type) - Out << "; [ DW_TAG_vector_type ]"; - else if (Tag == dwarf::DW_TAG_user_base) - Out << "; [ DW_TAG_user_base ]"; - else - Out << "; [" << dwarf::TagString(Tag) << " ]"; - } -} - -static void WriteMDNodes(formatted_raw_ostream &Out, TypePrinting &TypePrinter, - SlotTracker &Machine) { - SmallVector<const MDNode *, 16> Nodes; - Nodes.resize(Machine.mdnSize()); - for (SlotTracker::ValueMap::iterator I = - Machine.mdnBegin(), E = Machine.mdnEnd(); I != E; ++I) - Nodes[I->second] = cast<MDNode>(I->first); - - for (unsigned i = 0, e = Nodes.size(); i != e; ++i) { - Out << '!' << i << " = metadata "; - const MDNode *Node = Nodes[i]; - Out << "!{"; - for (unsigned mi = 0, me = Node->getNumElements(); mi != me; ++mi) { - const Value *V = Node->getElement(mi); - if (!V) - Out << "null"; - else if (const MDNode *N = dyn_cast<MDNode>(V)) { - Out << "metadata "; - Out << '!' << Machine.getMetadataSlot(N); - } - else { - TypePrinter.print(V->getType(), Out); - Out << ' '; - WriteAsOperandInternal(Out, Node->getElement(mi), - &TypePrinter, &Machine); - } - if (mi + 1 != me) - Out << ", "; - } - - Out << "}"; - WriteMDNodeComment(Node, Out); - Out << "\n"; - } -} static void WriteOptimizationInfo(raw_ostream &Out, const User *U) { if (const OverflowingBinaryOperator *OBO = @@ -1197,6 +1074,27 @@ static void WriteConstantInt(raw_ostream &Out, const Constant *CV, Out << "<placeholder or erroneous Constant>"; } +static void WriteMDNodeBodyInternal(raw_ostream &Out, const MDNode *Node, + TypePrinting *TypePrinter, + SlotTracker *Machine) { + Out << "!{"; + for (unsigned mi = 0, me = Node->getNumOperands(); mi != me; ++mi) { + const Value *V = Node->getOperand(mi); + if (V == 0) + Out << "null"; + else { + TypePrinter->print(V->getType(), Out); + Out << ' '; + WriteAsOperandInternal(Out, Node->getOperand(mi), + TypePrinter, Machine); + } + if (mi + 1 != me) + Out << ", "; + } + + Out << "}"; +} + /// WriteAsOperand - Write the name of the specified value out to the specified /// ostream. This can be useful when you just want to print int %reg126, not @@ -1232,22 +1130,9 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, } if (const MDNode *N = dyn_cast<MDNode>(V)) { - if (Machine->getMetadataSlot(N) == -1) { + if (N->isFunctionLocal()) { // Print metadata inline, not via slot reference number. - Out << "!{"; - for (unsigned mi = 0, me = N->getNumElements(); mi != me; ++mi) { - const Value *Val = N->getElement(mi); - if (!Val) - Out << "null"; - else { - TypePrinter->print(N->getElement(0)->getType(), Out); - Out << ' '; - WriteAsOperandInternal(Out, N->getElement(0), TypePrinter, Machine); - } - if (mi + 1 != me) - Out << ", "; - } - Out << '}'; + WriteMDNodeBodyInternal(Out, N, TypePrinter, Machine); return; } @@ -1331,48 +1216,28 @@ class AssemblyWriter { TypePrinting TypePrinter; AssemblyAnnotationWriter *AnnotationWriter; std::vector<const Type*> NumberedTypes; - DenseMap<unsigned, StringRef> MDNames; - + SmallVector<StringRef, 8> MDNames; + public: inline AssemblyWriter(formatted_raw_ostream &o, SlotTracker &Mac, const Module *M, AssemblyAnnotationWriter *AAW) : Out(o), Machine(Mac), TheModule(M), AnnotationWriter(AAW) { AddModuleTypesToPrinter(TypePrinter, NumberedTypes, M); - // FIXME: Provide MDPrinter - if (M) { - MetadataContext &TheMetadata = M->getContext().getMetadata(); - SmallVector<std::pair<unsigned, StringRef>, 4> Names; - TheMetadata.getHandlerNames(Names); - for (SmallVector<std::pair<unsigned, StringRef>, 4>::iterator - I = Names.begin(), - E = Names.end(); I != E; ++I) { - MDNames[I->first] = I->second; - } - } - } - - void write(const Module *M) { printModule(M); } - - void write(const GlobalValue *G) { - if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(G)) - printGlobal(GV); - else if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(G)) - printAlias(GA); - else if (const Function *F = dyn_cast<Function>(G)) - printFunction(F); - else - llvm_unreachable("Unknown global"); + if (M) + M->getMDKindNames(MDNames); } - void write(const BasicBlock *BB) { printBasicBlock(BB); } - void write(const Instruction *I) { printInstruction(*I); } + void printMDNodeBody(const MDNode *MD); + void printNamedMDNode(const NamedMDNode *NMD); + + void printModule(const Module *M); void writeOperand(const Value *Op, bool PrintType); void writeParamOperand(const Value *Operand, Attributes Attrs); -private: - void printModule(const Module *M); + void writeAllMDNodes(); + void printTypeSymbolTable(const TypeSymbolTable &ST); void printGlobal(const GlobalVariable *GV); void printAlias(const GlobalAlias *GV); @@ -1380,6 +1245,7 @@ private: void printArgument(const Argument *FA, Attributes Attrs); void printBasicBlock(const BasicBlock *BB); void printInstruction(const Instruction &I); +private: // printInfoComment - Print a little comment after the instruction indicating // which slot it occupies. @@ -1391,29 +1257,30 @@ private: void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) { if (Operand == 0) { Out << "<null operand!>"; - } else { - if (PrintType) { - TypePrinter.print(Operand->getType(), Out); - Out << ' '; - } - WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine); + return; + } + if (PrintType) { + TypePrinter.print(Operand->getType(), Out); + Out << ' '; } + WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine); } void AssemblyWriter::writeParamOperand(const Value *Operand, Attributes Attrs) { if (Operand == 0) { Out << "<null operand!>"; - } else { - // Print the type - TypePrinter.print(Operand->getType(), Out); - // Print parameter attributes list - if (Attrs != Attribute::None) - Out << ' ' << Attribute::getAsString(Attrs); - Out << ' '; - // Print the operand - WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine); + return; } + + // Print the type + TypePrinter.print(Operand->getType(), Out); + // Print parameter attributes list + if (Attrs != Attribute::None) + Out << ' ' << Attribute::getAsString(Attrs); + Out << ' '; + // Print the operand + WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine); } void AssemblyWriter::printModule(const Module *M) { @@ -1486,23 +1353,31 @@ void AssemblyWriter::printModule(const Module *M) { // Output named metadata. if (!M->named_metadata_empty()) Out << '\n'; + for (Module::const_named_metadata_iterator I = M->named_metadata_begin(), - E = M->named_metadata_end(); I != E; ++I) { - const NamedMDNode *NMD = I; - Out << "!" << NMD->getName() << " = !{"; - for (unsigned i = 0, e = NMD->getNumElements(); i != e; ++i) { - if (i) Out << ", "; - MDNode *MD = dyn_cast_or_null<MDNode>(NMD->getElement(i)); - Out << '!' << Machine.getMetadataSlot(MD); - } - Out << "}\n"; - } + E = M->named_metadata_end(); I != E; ++I) + printNamedMDNode(I); // Output metadata. - if (!Machine.mdnEmpty()) Out << '\n'; - WriteMDNodes(Out, TypePrinter, Machine); + if (!Machine.mdn_empty()) { + Out << '\n'; + writeAllMDNodes(); + } +} + +void AssemblyWriter::printNamedMDNode(const NamedMDNode *NMD) { + Out << "!" << NMD->getName() << " = !{"; + for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) { + if (i) Out << ", "; + // FIXME: Change accessor to be typesafe. + // FIXME: This doesn't handle null?? + MDNode *MD = cast_or_null<MDNode>(NMD->getOperand(i)); + Out << '!' << Machine.getMetadataSlot(MD); + } + Out << "}\n"; } + static void PrintLinkage(GlobalValue::LinkageTypes LT, formatted_raw_ostream &Out) { switch (LT) { @@ -1531,7 +1406,6 @@ static void PrintLinkage(GlobalValue::LinkageTypes LT, static void PrintVisibility(GlobalValue::VisibilityTypes Vis, formatted_raw_ostream &Out) { switch (Vis) { - default: llvm_unreachable("Invalid visibility style!"); case GlobalValue::DefaultVisibility: break; case GlobalValue::HiddenVisibility: Out << "hidden "; break; case GlobalValue::ProtectedVisibility: Out << "protected "; break; @@ -1806,12 +1680,12 @@ void AssemblyWriter::printBasicBlock(const BasicBlock *BB) { /// which slot it occupies. /// void AssemblyWriter::printInfoComment(const Value &V) { - if (V.getType() != Type::getVoidTy(V.getContext())) { - Out.PadToColumn(50); - Out << "; <"; - TypePrinter.print(V.getType(), Out); - Out << "> [#uses=" << V.getNumUses() << ']'; // Output # uses - } + if (V.getType()->isVoidTy()) return; + + Out.PadToColumn(50); + Out << "; <"; + TypePrinter.print(V.getType(), Out); + Out << "> [#uses=" << V.getNumUses() << ']'; // Output # uses } // This member is called for each Instruction in a function.. @@ -1825,7 +1699,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) { if (I.hasName()) { PrintLLVMName(Out, &I); Out << " = "; - } else if (I.getType() != Type::getVoidTy(I.getContext())) { + } else if (!I.getType()->isVoidTy()) { // Print out the def slot taken. int SlotNum = Machine.getLocalSlot(&I); if (SlotNum == -1) @@ -2076,27 +1950,68 @@ void AssemblyWriter::printInstruction(const Instruction &I) { } } - // Print post operand alignment for load/store + // Print post operand alignment for load/store. if (isa<LoadInst>(I) && cast<LoadInst>(I).getAlignment()) { Out << ", align " << cast<LoadInst>(I).getAlignment(); } else if (isa<StoreInst>(I) && cast<StoreInst>(I).getAlignment()) { Out << ", align " << cast<StoreInst>(I).getAlignment(); } - // Print Metadata info + // Print Metadata info. if (!MDNames.empty()) { - MetadataContext &TheMetadata = I.getContext().getMetadata(); - typedef SmallVector<std::pair<unsigned, TrackingVH<MDNode> >, 2> MDMapTy; - MDMapTy MDs; - TheMetadata.getMDs(&I, MDs); - for (MDMapTy::const_iterator MI = MDs.begin(), ME = MDs.end(); MI != ME; - ++MI) - Out << ", !" << MDNames[MI->first] - << " !" << Machine.getMetadataSlot(MI->second); + SmallVector<std::pair<unsigned, MDNode*>, 4> InstMD; + I.getAllMetadata(InstMD); + for (unsigned i = 0, e = InstMD.size(); i != e; ++i) + Out << ", !" << MDNames[InstMD[i].first] + << " !" << Machine.getMetadataSlot(InstMD[i].second); } printInfoComment(I); } +static void WriteMDNodeComment(const MDNode *Node, + formatted_raw_ostream &Out) { + if (Node->getNumOperands() < 1) + return; + ConstantInt *CI = dyn_cast_or_null<ConstantInt>(Node->getOperand(0)); + if (!CI) return; + unsigned Val = CI->getZExtValue(); + unsigned Tag = Val & ~LLVMDebugVersionMask; + if (Val < LLVMDebugVersion) + return; + + Out.PadToColumn(50); + if (Tag == dwarf::DW_TAG_auto_variable) + Out << "; [ DW_TAG_auto_variable ]"; + else if (Tag == dwarf::DW_TAG_arg_variable) + Out << "; [ DW_TAG_arg_variable ]"; + else if (Tag == dwarf::DW_TAG_return_variable) + Out << "; [ DW_TAG_return_variable ]"; + else if (Tag == dwarf::DW_TAG_vector_type) + Out << "; [ DW_TAG_vector_type ]"; + else if (Tag == dwarf::DW_TAG_user_base) + Out << "; [ DW_TAG_user_base ]"; + else if (const char *TagName = dwarf::TagString(Tag)) + Out << "; [ " << TagName << " ]"; +} + +void AssemblyWriter::writeAllMDNodes() { + SmallVector<const MDNode *, 16> Nodes; + Nodes.resize(Machine.mdn_size()); + for (SlotTracker::mdn_iterator I = Machine.mdn_begin(), E = Machine.mdn_end(); + I != E; ++I) + Nodes[I->second] = cast<MDNode>(I->first); + + for (unsigned i = 0, e = Nodes.size(); i != e; ++i) { + Out << '!' << i << " = metadata "; + printMDNodeBody(Nodes[i]); + } +} + +void AssemblyWriter::printMDNodeBody(const MDNode *Node) { + WriteMDNodeBodyInternal(Out, Node, &TypePrinter, &Machine); + WriteMDNodeComment(Node, Out); + Out << "\n"; +} //===----------------------------------------------------------------------===// // External Interface declarations @@ -2106,7 +2021,7 @@ void Module::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW) const { SlotTracker SlotTable(this); formatted_raw_ostream OS(ROS); AssemblyWriter W(OS, SlotTable, this, AAW); - W.write(this); + W.printModule(this); } void Type::print(raw_ostream &OS) const { @@ -2126,53 +2041,36 @@ void Value::print(raw_ostream &ROS, AssemblyAnnotationWriter *AAW) const { if (const Instruction *I = dyn_cast<Instruction>(this)) { const Function *F = I->getParent() ? I->getParent()->getParent() : 0; SlotTracker SlotTable(F); - AssemblyWriter W(OS, SlotTable, F ? F->getParent() : 0, AAW); - W.write(I); + AssemblyWriter W(OS, SlotTable, getModuleFromVal(I), AAW); + W.printInstruction(*I); } else if (const BasicBlock *BB = dyn_cast<BasicBlock>(this)) { SlotTracker SlotTable(BB->getParent()); - AssemblyWriter W(OS, SlotTable, - BB->getParent() ? BB->getParent()->getParent() : 0, AAW); - W.write(BB); + AssemblyWriter W(OS, SlotTable, getModuleFromVal(BB), AAW); + W.printBasicBlock(BB); } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(this)) { SlotTracker SlotTable(GV->getParent()); AssemblyWriter W(OS, SlotTable, GV->getParent(), AAW); - W.write(GV); - } else if (const MDString *MDS = dyn_cast<MDString>(this)) { - TypePrinting TypePrinter; - TypePrinter.print(MDS->getType(), OS); - OS << ' '; - OS << "!\""; - PrintEscapedString(MDS->getString(), OS); - OS << '"'; + if (const GlobalVariable *V = dyn_cast<GlobalVariable>(GV)) + W.printGlobal(V); + else if (const Function *F = dyn_cast<Function>(GV)) + W.printFunction(F); + else + W.printAlias(cast<GlobalAlias>(GV)); } else if (const MDNode *N = dyn_cast<MDNode>(this)) { - SlotTracker SlotTable(N); - TypePrinting TypePrinter; - SlotTable.initialize(); - WriteMDNodes(OS, TypePrinter, SlotTable); + SlotTracker SlotTable((Function*)0); + AssemblyWriter W(OS, SlotTable, 0, AAW); + W.printMDNodeBody(N); } else if (const NamedMDNode *N = dyn_cast<NamedMDNode>(this)) { - SlotTracker SlotTable(N); - TypePrinting TypePrinter; - SlotTable.initialize(); - OS << "!" << N->getName() << " = !{"; - for (unsigned i = 0, e = N->getNumElements(); i != e; ++i) { - if (i) OS << ", "; - MDNode *MD = dyn_cast_or_null<MDNode>(N->getElement(i)); - if (MD) - OS << '!' << SlotTable.getMetadataSlot(MD); - else - OS << "null"; - } - OS << "}\n"; - WriteMDNodes(OS, TypePrinter, SlotTable); + SlotTracker SlotTable(N->getParent()); + AssemblyWriter W(OS, SlotTable, N->getParent(), AAW); + W.printNamedMDNode(N); } else if (const Constant *C = dyn_cast<Constant>(this)) { TypePrinting TypePrinter; TypePrinter.print(C->getType(), OS); OS << ' '; WriteConstantInt(OS, C, TypePrinter, 0); - } else if (const Argument *A = dyn_cast<Argument>(this)) { - WriteAsOperand(OS, this, true, - A->getParent() ? A->getParent()->getParent() : 0); - } else if (isa<InlineAsm>(this)) { + } else if (isa<InlineAsm>(this) || isa<MDString>(this) || + isa<Argument>(this)) { WriteAsOperand(OS, this, true, 0); } else { // Otherwise we don't know what it is. Call the virtual function to diff --git a/lib/VMCore/BasicBlock.cpp b/lib/VMCore/BasicBlock.cpp index c7f7f53..16437bc 100644 --- a/lib/VMCore/BasicBlock.cpp +++ b/lib/VMCore/BasicBlock.cpp @@ -35,7 +35,7 @@ LLVMContext &BasicBlock::getContext() const { // Explicit instantiation of SymbolTableListTraits since some of the methods // are not in the public header file... -template class SymbolTableListTraits<Instruction, BasicBlock>; +template class llvm::SymbolTableListTraits<Instruction, BasicBlock>; BasicBlock::BasicBlock(LLVMContext &C, const Twine &Name, Function *NewParent, diff --git a/lib/VMCore/CMakeLists.txt b/lib/VMCore/CMakeLists.txt index 9b17d4b..61d01ee 100644 --- a/lib/VMCore/CMakeLists.txt +++ b/lib/VMCore/CMakeLists.txt @@ -13,6 +13,7 @@ add_llvm_library(LLVMCore Instruction.cpp Instructions.cpp IntrinsicInst.cpp + IRBuilder.cpp LLVMContext.cpp LeakDetector.cpp Mangler.cpp diff --git a/lib/VMCore/ConstantFold.cpp b/lib/VMCore/ConstantFold.cpp index 7f713d1..2449739 100644 --- a/lib/VMCore/ConstantFold.cpp +++ b/lib/VMCore/ConstantFold.cpp @@ -1839,14 +1839,16 @@ Constant *llvm::ConstantFoldCompareInstruction(LLVMContext &Context, } } - if (!isa<ConstantExpr>(C1) && isa<ConstantExpr>(C2)) { + if ((!isa<ConstantExpr>(C1) && isa<ConstantExpr>(C2)) || + (C1->isNullValue() && !C2->isNullValue())) { // If C2 is a constant expr and C1 isn't, flip them around and fold the // other way if possible. + // Also, if C1 is null and C2 isn't, flip them around. switch (pred) { case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_NE: // No change of predicate required. - return ConstantFoldCompareInstruction(Context, pred, C2, C1); + return ConstantExpr::getICmp(pred, C2, C1); case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_SLT: @@ -1858,7 +1860,7 @@ Constant *llvm::ConstantFoldCompareInstruction(LLVMContext &Context, case ICmpInst::ICMP_SGE: // Change the predicate as necessary to swap the operands. pred = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)pred); - return ConstantFoldCompareInstruction(Context, pred, C2, C1); + return ConstantExpr::getICmp(pred, C2, C1); default: // These predicates cannot be flopped around. break; diff --git a/lib/VMCore/Constants.cpp b/lib/VMCore/Constants.cpp index a62f75b..e3c6144 100644 --- a/lib/VMCore/Constants.cpp +++ b/lib/VMCore/Constants.cpp @@ -481,15 +481,12 @@ Constant *ConstantArray::get(const ArrayType *Ty, // If this is an all-zero array, return a ConstantAggregateZero object if (!V.empty()) { Constant *C = V[0]; - if (!C->isNullValue()) { - // Implicitly locked. + if (!C->isNullValue()) return pImpl->ArrayConstants.getOrCreate(Ty, V); - } + for (unsigned i = 1, e = V.size(); i != e; ++i) - if (V[i] != C) { - // Implicitly locked. + if (V[i] != C) return pImpl->ArrayConstants.getOrCreate(Ty, V); - } } return ConstantAggregateZero::get(Ty); @@ -550,7 +547,6 @@ Constant* ConstantStruct::get(const StructType* T, // Create a ConstantAggregateZero value if all elements are zeros... for (unsigned i = 0, e = V.size(); i != e; ++i) if (!V[i]->isNullValue()) - // Implicitly locked. return pImpl->StructConstants.getOrCreate(T, V); return ConstantAggregateZero::get(T); @@ -613,7 +609,6 @@ Constant* ConstantVector::get(const VectorType* T, if (isUndef) return UndefValue::get(T); - // Implicitly locked. return pImpl->VectorConstants.getOrCreate(T, V); } @@ -627,6 +622,12 @@ Constant* ConstantVector::get(Constant* const* Vals, unsigned NumVals) { return get(std::vector<Constant*>(Vals, Vals+NumVals)); } +Constant* ConstantExpr::getNSWNeg(Constant* C) { + assert(C->getType()->isIntOrIntVector() && + "Cannot NEG a nonintegral value!"); + return getNSWSub(ConstantFP::getZeroValueForNegation(C->getType()), C); +} + Constant* ConstantExpr::getNSWAdd(Constant* C1, Constant* C2) { return getTy(C1->getType(), Instruction::Add, C1, C2, OverflowingBinaryOperator::NoSignedWrap); @@ -637,6 +638,11 @@ Constant* ConstantExpr::getNSWSub(Constant* C1, Constant* C2) { OverflowingBinaryOperator::NoSignedWrap); } +Constant* ConstantExpr::getNSWMul(Constant* C1, Constant* C2) { + return getTy(C1->getType(), Instruction::Mul, C1, C2, + OverflowingBinaryOperator::NoSignedWrap); +} + Constant* ConstantExpr::getExactSDiv(Constant* C1, Constant* C2) { return getTy(C1->getType(), Instruction::SDiv, C1, C2, SDivOperator::IsExact); @@ -752,14 +758,14 @@ ConstantExpr::getWithOperandReplaced(unsigned OpNo, Constant *Op) const { ConstantExpr::getGetElementPtr(Op, &Ops[0], Ops.size()); Ops[OpNo-1] = Op; return cast<GEPOperator>(this)->isInBounds() ? - ConstantExpr::getInBoundsGetElementPtr(getOperand(0), &Ops[0], Ops.size()) : + ConstantExpr::getInBoundsGetElementPtr(getOperand(0), &Ops[0],Ops.size()): ConstantExpr::getGetElementPtr(getOperand(0), &Ops[0], Ops.size()); } default: assert(getNumOperands() == 2 && "Must be binary operator?"); Op0 = (OpNo == 0) ? Op : getOperand(0); Op1 = (OpNo == 1) ? Op : getOperand(1); - return ConstantExpr::get(getOpcode(), Op0, Op1, SubclassData); + return ConstantExpr::get(getOpcode(), Op0, Op1, SubclassOptionalData); } } @@ -809,7 +815,7 @@ getWithOperands(Constant* const *Ops, unsigned NumOps) const { return ConstantExpr::getCompare(getPredicate(), Ops[0], Ops[1]); default: assert(getNumOperands() == 2 && "Must be binary operator?"); - return ConstantExpr::get(getOpcode(), Ops[0], Ops[1], SubclassData); + return ConstantExpr::get(getOpcode(), Ops[0], Ops[1], SubclassOptionalData); } } @@ -883,14 +889,12 @@ ConstantAggregateZero* ConstantAggregateZero::get(const Type* Ty) { "Cannot create an aggregate zero of non-aggregate type!"); LLVMContextImpl *pImpl = Ty->getContext().pImpl; - // Implicitly locked. return pImpl->AggZeroConstants.getOrCreate(Ty, 0); } /// destroyConstant - Remove the constant from the constant table... /// void ConstantAggregateZero::destroyConstant() { - // Implicitly locked. getType()->getContext().pImpl->AggZeroConstants.remove(this); destroyConstantImpl(); } @@ -898,7 +902,6 @@ void ConstantAggregateZero::destroyConstant() { /// destroyConstant - Remove the constant from the constant table... /// void ConstantArray::destroyConstant() { - // Implicitly locked. getType()->getContext().pImpl->ArrayConstants.remove(this); destroyConstantImpl(); } @@ -963,7 +966,6 @@ namespace llvm { // destroyConstant - Remove the constant from the constant table... // void ConstantStruct::destroyConstant() { - // Implicitly locked. getType()->getContext().pImpl->StructConstants.remove(this); destroyConstantImpl(); } @@ -971,7 +973,6 @@ void ConstantStruct::destroyConstant() { // destroyConstant - Remove the constant from the constant table... // void ConstantVector::destroyConstant() { - // Implicitly locked. getType()->getContext().pImpl->VectorConstants.remove(this); destroyConstantImpl(); } @@ -1007,14 +1008,12 @@ Constant *ConstantVector::getSplatValue() { // ConstantPointerNull *ConstantPointerNull::get(const PointerType *Ty) { - // Implicitly locked. return Ty->getContext().pImpl->NullPtrConstants.getOrCreate(Ty, 0); } // destroyConstant - Remove the constant from the constant table... // void ConstantPointerNull::destroyConstant() { - // Implicitly locked. getType()->getContext().pImpl->NullPtrConstants.remove(this); destroyConstantImpl(); } @@ -1126,7 +1125,6 @@ static inline Constant *getFoldedCast( std::vector<Constant*> argVec(1, C); ExprMapKeyType Key(opc, argVec); - // Implicitly locked. return pImpl->ExprConstants.getOrCreate(Ty, Key); } @@ -1372,8 +1370,6 @@ Constant *ConstantExpr::getTy(const Type *ReqTy, unsigned Opcode, ExprMapKeyType Key(Opcode, argVec, 0, Flags); LLVMContextImpl *pImpl = ReqTy->getContext().pImpl; - - // Implicitly locked. return pImpl->ExprConstants.getOrCreate(ReqTy, Key); } @@ -1524,8 +1520,6 @@ Constant *ConstantExpr::getSelectTy(const Type *ReqTy, Constant *C, ExprMapKeyType Key(Instruction::Select, argVec); LLVMContextImpl *pImpl = ReqTy->getContext().pImpl; - - // Implicitly locked. return pImpl->ExprConstants.getOrCreate(ReqTy, Key); } @@ -1553,8 +1547,6 @@ Constant *ConstantExpr::getGetElementPtrTy(const Type *ReqTy, Constant *C, const ExprMapKeyType Key(Instruction::GetElementPtr, ArgVec); LLVMContextImpl *pImpl = ReqTy->getContext().pImpl; - - // Implicitly locked. return pImpl->ExprConstants.getOrCreate(ReqTy, Key); } @@ -1584,8 +1576,6 @@ Constant *ConstantExpr::getInBoundsGetElementPtrTy(const Type *ReqTy, GEPOperator::IsInBounds); LLVMContextImpl *pImpl = ReqTy->getContext().pImpl; - - // Implicitly locked. return pImpl->ExprConstants.getOrCreate(ReqTy, Key); } @@ -1639,8 +1629,6 @@ ConstantExpr::getICmp(unsigned short pred, Constant* LHS, Constant* RHS) { const ExprMapKeyType Key(Instruction::ICmp, ArgVec, pred); LLVMContextImpl *pImpl = LHS->getType()->getContext().pImpl; - - // Implicitly locked. return pImpl->ExprConstants.getOrCreate(Type::getInt1Ty(LHS->getContext()), Key); } @@ -1662,8 +1650,6 @@ ConstantExpr::getFCmp(unsigned short pred, Constant* LHS, Constant* RHS) { const ExprMapKeyType Key(Instruction::FCmp, ArgVec, pred); LLVMContextImpl *pImpl = LHS->getType()->getContext().pImpl; - - // Implicitly locked. return pImpl->ExprConstants.getOrCreate(Type::getInt1Ty(LHS->getContext()), Key); } @@ -1672,15 +1658,13 @@ Constant *ConstantExpr::getExtractElementTy(const Type *ReqTy, Constant *Val, Constant *Idx) { if (Constant *FC = ConstantFoldExtractElementInstruction( ReqTy->getContext(), Val, Idx)) - return FC; // Fold a few common cases... + return FC; // Fold a few common cases. // Look up the constant in the table first to ensure uniqueness std::vector<Constant*> ArgVec(1, Val); ArgVec.push_back(Idx); const ExprMapKeyType Key(Instruction::ExtractElement,ArgVec); LLVMContextImpl *pImpl = ReqTy->getContext().pImpl; - - // Implicitly locked. return pImpl->ExprConstants.getOrCreate(ReqTy, Key); } @@ -1697,7 +1681,7 @@ Constant *ConstantExpr::getInsertElementTy(const Type *ReqTy, Constant *Val, Constant *Elt, Constant *Idx) { if (Constant *FC = ConstantFoldInsertElementInstruction( ReqTy->getContext(), Val, Elt, Idx)) - return FC; // Fold a few common cases... + return FC; // Fold a few common cases. // Look up the constant in the table first to ensure uniqueness std::vector<Constant*> ArgVec(1, Val); ArgVec.push_back(Elt); @@ -1705,8 +1689,6 @@ Constant *ConstantExpr::getInsertElementTy(const Type *ReqTy, Constant *Val, const ExprMapKeyType Key(Instruction::InsertElement,ArgVec); LLVMContextImpl *pImpl = ReqTy->getContext().pImpl; - - // Implicitly locked. return pImpl->ExprConstants.getOrCreate(ReqTy, Key); } @@ -1733,8 +1715,6 @@ Constant *ConstantExpr::getShuffleVectorTy(const Type *ReqTy, Constant *V1, const ExprMapKeyType Key(Instruction::ShuffleVector,ArgVec); LLVMContextImpl *pImpl = ReqTy->getContext().pImpl; - - // Implicitly locked. return pImpl->ExprConstants.getOrCreate(ReqTy, Key); } @@ -1903,9 +1883,7 @@ Constant* ConstantExpr::getAShr(Constant* C1, Constant* C2) { // destroyConstant - Remove the constant from the constant table... // void ConstantExpr::destroyConstant() { - // Implicitly locked. - LLVMContextImpl *pImpl = getType()->getContext().pImpl; - pImpl->ExprConstants.remove(this); + getType()->getContext().pImpl->ExprConstants.remove(this); destroyConstantImpl(); } @@ -2185,7 +2163,7 @@ void ConstantExpr::replaceUsesOfWithOnConstant(Value *From, Value *ToV, Constant *C2 = getOperand(1); if (C1 == From) C1 = To; if (C2 == From) C2 = To; - Replacement = ConstantExpr::get(getOpcode(), C1, C2, SubclassData); + Replacement = ConstantExpr::get(getOpcode(), C1, C2, SubclassOptionalData); } else { llvm_unreachable("Unknown ConstantExpr type!"); return; diff --git a/lib/VMCore/Dominators.cpp b/lib/VMCore/Dominators.cpp index 26c02e0..3441750 100644 --- a/lib/VMCore/Dominators.cpp +++ b/lib/VMCore/Dominators.cpp @@ -47,8 +47,8 @@ VerifyDomInfoX("verify-dom-info", cl::location(VerifyDomInfo), // //===----------------------------------------------------------------------===// -TEMPLATE_INSTANTIATION(class DomTreeNodeBase<BasicBlock>); -TEMPLATE_INSTANTIATION(class DominatorTreeBase<BasicBlock>); +TEMPLATE_INSTANTIATION(class llvm::DomTreeNodeBase<BasicBlock>); +TEMPLATE_INSTANTIATION(class llvm::DominatorTreeBase<BasicBlock>); char DominatorTree::ID = 0; static RegisterPass<DominatorTree> diff --git a/lib/VMCore/Function.cpp b/lib/VMCore/Function.cpp index 88e1fe8..e04b6d6 100644 --- a/lib/VMCore/Function.cpp +++ b/lib/VMCore/Function.cpp @@ -29,8 +29,8 @@ using namespace llvm; // Explicit instantiations of SymbolTableListTraits since some of the methods // are not in the public header file... -template class SymbolTableListTraits<Argument, Function>; -template class SymbolTableListTraits<BasicBlock, Function>; +template class llvm::SymbolTableListTraits<Argument, Function>; +template class llvm::SymbolTableListTraits<BasicBlock, Function>; //===----------------------------------------------------------------------===// // Argument Implementation @@ -160,7 +160,7 @@ Function::Function(const FunctionType *Ty, LinkageTypes Linkage, // If the function has arguments, mark them as lazily built. if (Ty->getNumParams()) - SubclassData = 1; // Set the "has lazy arguments" bit. + setValueSubclassData(1); // Set the "has lazy arguments" bit. // Make sure that we get added to a function LeakDetector::addGarbageObject(this); @@ -195,7 +195,8 @@ void Function::BuildLazyArguments() const { } // Clear the lazy arguments bit. - const_cast<Function*>(this)->SubclassData &= ~1; + unsigned SDC = getSubclassDataFromValue(); + const_cast<Function*>(this)->setValueSubclassData(SDC &= ~1); } size_t Function::arg_size() const { diff --git a/lib/VMCore/IRBuilder.cpp b/lib/VMCore/IRBuilder.cpp new file mode 100644 index 0000000..699bf0f --- /dev/null +++ b/lib/VMCore/IRBuilder.cpp @@ -0,0 +1,51 @@ +//===---- IRBuilder.cpp - Builder for LLVM Instrs -------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the IRBuilder class, which is used as a convenient way +// to create LLVM instructions with a consistent and simplified interface. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/IRBuilder.h" +#include "llvm/GlobalVariable.h" +#include "llvm/Function.h" +#include "llvm/LLVMContext.h" +using namespace llvm; + +/// CreateGlobalString - Make a new global variable with an initializer that +/// has array of i8 type filled in the the nul terminated string value +/// specified. If Name is specified, it is the name of the global variable +/// created. +Value *IRBuilderBase::CreateGlobalString(const char *Str, const Twine &Name) { + Constant *StrConstant = ConstantArray::get(Context, Str, true); + Module &M = *BB->getParent()->getParent(); + GlobalVariable *GV = new GlobalVariable(M, StrConstant->getType(), + true, GlobalValue::InternalLinkage, + StrConstant, "", 0, false); + GV->setName(Name); + return GV; +} + +/// SetCurrentDebugLocation - Set location information used by debugging +/// information. +void IRBuilderBase::SetCurrentDebugLocation(MDNode *L) { + if (DbgMDKind == 0) + DbgMDKind = Context.getMDKindID("dbg"); + CurDbgLocation = L; +} + +void IRBuilderBase::SetInstDebugLocation(Instruction *I) const { + if (CurDbgLocation) + I->setMetadata(DbgMDKind, CurDbgLocation); +} + +const Type *IRBuilderBase::getCurrentFunctionReturnType() const { + assert(BB && BB->getParent() && "No current function!"); + return BB->getParent()->getReturnType(); +} diff --git a/lib/VMCore/Instruction.cpp b/lib/VMCore/Instruction.cpp index ce253d6..a5500e6 100644 --- a/lib/VMCore/Instruction.cpp +++ b/lib/VMCore/Instruction.cpp @@ -11,12 +11,10 @@ // //===----------------------------------------------------------------------===// -#include "LLVMContextImpl.h" +#include "llvm/Instruction.h" #include "llvm/Type.h" #include "llvm/Instructions.h" -#include "llvm/Function.h" #include "llvm/Constants.h" -#include "llvm/GlobalVariable.h" #include "llvm/Module.h" #include "llvm/Support/CallSite.h" #include "llvm/Support/LeakDetector.h" @@ -51,10 +49,8 @@ Instruction::Instruction(const Type *ty, unsigned it, Use *Ops, unsigned NumOps, // Out of line virtual method, so the vtable, etc has a home. Instruction::~Instruction() { assert(Parent == 0 && "Instruction still linked in the program!"); - if (hasMetadata()) { - LLVMContext &Context = getContext(); - Context.pImpl->TheMetadata.ValueIsDeleted(this); - } + if (hasMetadata()) + removeAllMetadata(); } @@ -464,7 +460,14 @@ bool Instruction::isSafeToSpeculativelyExecute() const { Instruction *Instruction::clone() const { Instruction *New = clone_impl(); New->SubclassOptionalData = SubclassOptionalData; - if (hasMetadata()) - getContext().pImpl->TheMetadata.ValueIsCloned(this, New); + if (!hasMetadata()) + return New; + + // Otherwise, enumerate and copy over metadata from the old instruction to the + // new one. + SmallVector<std::pair<unsigned, MDNode*>, 4> TheMDs; + getAllMetadata(TheMDs); + for (unsigned i = 0, e = TheMDs.size(); i != e; ++i) + New->setMetadata(TheMDs[i].first, TheMDs[i].second); return New; } diff --git a/lib/VMCore/Instructions.cpp b/lib/VMCore/Instructions.cpp index b03ee93..3e9950e 100644 --- a/lib/VMCore/Instructions.cpp +++ b/lib/VMCore/Instructions.cpp @@ -413,7 +413,9 @@ CallInst::CallInst(const CallInst &CI) OperandTraits<CallInst>::op_end(this) - CI.getNumOperands(), CI.getNumOperands()) { setAttributes(CI.getAttributes()); - SubclassData = CI.SubclassData; + setTailCall(CI.isTailCall()); + setCallingConv(CI.getCallingConv()); + Use *OL = OperandList; Use *InOL = CI.OperandList; for (unsigned i = 0, e = CI.getNumOperands(); i != e; ++i) @@ -637,7 +639,7 @@ InvokeInst::InvokeInst(const InvokeInst &II) - II.getNumOperands(), II.getNumOperands()) { setAttributes(II.getAttributes()); - SubclassData = II.SubclassData; + setCallingConv(II.getCallingConv()); Use *OL = OperandList, *InOL = II.OperandList; for (unsigned i = 0, e = II.getNumOperands(); i != e; ++i) OL[i] = InOL[i]; @@ -957,7 +959,7 @@ AllocaInst::~AllocaInst() { void AllocaInst::setAlignment(unsigned Align) { assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); - SubclassData = Log2_32(Align) + 1; + setInstructionSubclassData(Log2_32(Align) + 1); assert(getAlignment() == Align && "Alignment representation error!"); } @@ -1092,7 +1094,8 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile, void LoadInst::setAlignment(unsigned Align) { assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); - SubclassData = (SubclassData & 1) | ((Log2_32(Align)+1)<<1); + setInstructionSubclassData((getSubclassDataFromInstruction() & 1) | + ((Log2_32(Align)+1)<<1)); } //===----------------------------------------------------------------------===// @@ -1187,7 +1190,8 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, void StoreInst::setAlignment(unsigned Align) { assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!"); - SubclassData = (SubclassData & 1) | ((Log2_32(Align)+1)<<1); + setInstructionSubclassData((getSubclassDataFromInstruction() & 1) | + ((Log2_32(Align)+1) << 1)); } //===----------------------------------------------------------------------===// @@ -1772,6 +1776,18 @@ BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name, Op->getType(), Name, InsertAtEnd); } +BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, + Instruction *InsertBefore) { + Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); + return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore); +} + +BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name, + BasicBlock *InsertAtEnd) { + Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); + return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd); +} + BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name, Instruction *InsertBefore) { Value *zero = ConstantFP::getZeroValueForNegation(Op->getType()); @@ -2708,7 +2724,7 @@ CmpInst::CmpInst(const Type *ty, OtherOps op, unsigned short predicate, InsertBefore) { Op<0>() = LHS; Op<1>() = RHS; - SubclassData = predicate; + setPredicate((Predicate)predicate); setName(Name); } @@ -2721,7 +2737,7 @@ CmpInst::CmpInst(const Type *ty, OtherOps op, unsigned short predicate, InsertAtEnd) { Op<0>() = LHS; Op<1>() = RHS; - SubclassData = predicate; + setPredicate((Predicate)predicate); setName(Name); } diff --git a/lib/VMCore/IntrinsicInst.cpp b/lib/VMCore/IntrinsicInst.cpp index 5f33d0e..5e0f42e 100644 --- a/lib/VMCore/IntrinsicInst.cpp +++ b/lib/VMCore/IntrinsicInst.cpp @@ -61,11 +61,19 @@ Value *DbgInfoIntrinsic::StripCast(Value *C) { Value *DbgStopPointInst::getFileName() const { // Once the operand indices are verified, update this assert assert(LLVMDebugVersion == (7 << 16) && "Verify operand indices"); - return getContext()->getElement(3); + return getContext()->getOperand(3); } Value *DbgStopPointInst::getDirectory() const { // Once the operand indices are verified, update this assert assert(LLVMDebugVersion == (7 << 16) && "Verify operand indices"); - return getContext()->getElement(4); + return getContext()->getOperand(4); +} + +//===----------------------------------------------------------------------===// +/// DbgValueInst - This represents the llvm.dbg.value instruction. +/// + +Value *DbgValueInst::getValue() const { + return cast<MDNode>(getOperand(1))->getOperand(0); } diff --git a/lib/VMCore/LLVMContext.cpp b/lib/VMCore/LLVMContext.cpp index 3b4a1a3..5a8ea5c 100644 --- a/lib/VMCore/LLVMContext.cpp +++ b/lib/VMCore/LLVMContext.cpp @@ -17,10 +17,7 @@ #include "llvm/Constants.h" #include "llvm/Instruction.h" #include "llvm/Support/ManagedStatic.h" -#include "llvm/Support/ValueHandle.h" #include "LLVMContextImpl.h" -#include <set> - using namespace llvm; static ManagedStatic<LLVMContext> GlobalContext; @@ -45,6 +42,3 @@ GetElementPtrConstantExpr::GetElementPtrConstantExpr OperandList[i+1] = IdxList[i]; } -MetadataContext &LLVMContext::getMetadata() { - return pImpl->TheMetadata; -} diff --git a/lib/VMCore/LLVMContextImpl.h b/lib/VMCore/LLVMContextImpl.h index 8a2378e..ccca789 100644 --- a/lib/VMCore/LLVMContextImpl.h +++ b/lib/VMCore/LLVMContextImpl.h @@ -23,10 +23,12 @@ #include "llvm/Constants.h" #include "llvm/DerivedTypes.h" #include "llvm/Assembly/Writer.h" +#include "llvm/Support/ValueHandle.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/FoldingSet.h" +#include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringMap.h" #include <vector> @@ -159,13 +161,28 @@ public: TypeMap<StructValType, StructType> StructTypes; TypeMap<IntegerValType, IntegerType> IntegerTypes; + // Opaque types are not structurally uniqued, so don't use TypeMap. + typedef SmallPtrSet<const OpaqueType*, 8> OpaqueTypesTy; + OpaqueTypesTy OpaqueTypes; + + /// ValueHandles - This map keeps track of all of the value handles that are /// watching a Value*. The Value::HasValueHandle bit is used to know // whether or not a value has an entry in this map. typedef DenseMap<Value*, ValueHandleBase*> ValueHandlesTy; ValueHandlesTy ValueHandles; - MetadataContext TheMetadata; + /// CustomMDKindNames - Map to hold the metadata string to ID mapping. + StringMap<unsigned> CustomMDKindNames; + + typedef std::pair<unsigned, TrackingVH<MDNode> > MDPairTy; + typedef SmallVector<MDPairTy, 2> MDMapTy; + + /// MetadataStore - Collection of per-instruction metadata used in this + /// context. + DenseMap<const Instruction *, MDMapTy> MetadataStore; + + LLVMContextImpl(LLVMContext &C) : TheTrueVal(0), TheFalseVal(0), VoidTy(C, Type::VoidTyID), LabelTy(C, Type::LabelTyID), @@ -181,8 +198,7 @@ public: Int32Ty(C, 32), Int64Ty(C, 64) { } - ~LLVMContextImpl() - { + ~LLVMContextImpl() { ExprConstants.freeConstants(); ArrayConstants.freeConstants(); StructConstants.freeConstants(); @@ -201,6 +217,11 @@ public: delete I->second; } MDNodeSet.clear(); + for (OpaqueTypesTy::iterator I = OpaqueTypes.begin(), E = OpaqueTypes.end(); + I != E; ++I) { + (*I)->AbstractTypeUsers.clear(); + delete *I; + } } }; diff --git a/lib/VMCore/LeaksContext.h b/lib/VMCore/LeaksContext.h index bd10a47..abff090 100644 --- a/lib/VMCore/LeaksContext.h +++ b/lib/VMCore/LeaksContext.h @@ -46,8 +46,9 @@ struct LeakDetectorImpl { // immediately, it is added to the CachedValue Value. If it is // immediately removed, no set search need be performed. void addGarbage(const T* o) { + assert(Ts.count(o) == 0 && "Object already in set!"); if (Cache) { - assert(Ts.count(Cache) == 0 && "Object already in set!"); + assert(Cache != o && "Object already in set!"); Ts.insert(Cache); } Cache = o; diff --git a/lib/VMCore/Metadata.cpp b/lib/VMCore/Metadata.cpp index b80b6bf..8e9aab9 100644 --- a/lib/VMCore/Metadata.cpp +++ b/lib/VMCore/Metadata.cpp @@ -11,23 +11,24 @@ // //===----------------------------------------------------------------------===// -#include "LLVMContextImpl.h" #include "llvm/Metadata.h" +#include "LLVMContextImpl.h" #include "llvm/LLVMContext.h" #include "llvm/Module.h" #include "llvm/Instruction.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/StringMap.h" #include "SymbolTableListTraitsImpl.h" +#include "llvm/Support/ValueHandle.h" using namespace llvm; //===----------------------------------------------------------------------===// -// MetadataBase implementation. -// - -//===----------------------------------------------------------------------===// // MDString implementation. // + +MDString::MDString(LLVMContext &C, StringRef S) + : MetadataBase(Type::getMetadataTy(C), Value::MDStringVal), Str(S) {} + MDString *MDString::get(LLVMContext &Context, StringRef Str) { LLVMContextImpl *pImpl = Context.pImpl; StringMapEntry<MDString *> &Entry = @@ -47,23 +48,89 @@ MDString *MDString::get(LLVMContext &Context, const char *Str) { } //===----------------------------------------------------------------------===// +// MDNodeOperand implementation. +// + +// Use CallbackVH to hold MDNode operands. +namespace llvm { +class MDNodeOperand : public CallbackVH { + MDNode *Parent; +public: + MDNodeOperand(Value *V, MDNode *P) : CallbackVH(V), Parent(P) {} + ~MDNodeOperand() {} + + void set(Value *V) { + setValPtr(V); + } + + virtual void deleted(); + virtual void allUsesReplacedWith(Value *NV); +}; +} // end namespace llvm. + + +void MDNodeOperand::deleted() { + Parent->replaceOperand(this, 0); +} + +void MDNodeOperand::allUsesReplacedWith(Value *NV) { + Parent->replaceOperand(this, NV); +} + + + +//===----------------------------------------------------------------------===// // MDNode implementation. // -MDNode::MDNode(LLVMContext &C, Value *const *Vals, unsigned NumVals) - : MetadataBase(Type::getMetadataTy(C), Value::MDNodeVal) { - NodeSize = NumVals; - Node = new ElementVH[NodeSize]; - ElementVH *Ptr = Node; - for (unsigned i = 0; i != NumVals; ++i) - *Ptr++ = ElementVH(Vals[i], this); + +/// getOperandPtr - Helper function to get the MDNodeOperand's coallocated on +/// the end of the MDNode. +static MDNodeOperand *getOperandPtr(MDNode *N, unsigned Op) { + assert(Op < N->getNumOperands() && "Invalid operand number"); + return reinterpret_cast<MDNodeOperand*>(N+1)+Op; } -void MDNode::Profile(FoldingSetNodeID &ID) const { - for (unsigned i = 0, e = getNumElements(); i != e; ++i) - ID.AddPointer(getElement(i)); +MDNode::MDNode(LLVMContext &C, Value *const *Vals, unsigned NumVals, + bool isFunctionLocal) +: MetadataBase(Type::getMetadataTy(C), Value::MDNodeVal) { + NumOperands = NumVals; + + if (isFunctionLocal) + setValueSubclassData(getSubclassDataFromValue() | FunctionLocalBit); + + // Initialize the operand list, which is co-allocated on the end of the node. + for (MDNodeOperand *Op = getOperandPtr(this, 0), *E = Op+NumOperands; + Op != E; ++Op, ++Vals) + new (Op) MDNodeOperand(*Vals, this); +} + + +/// ~MDNode - Destroy MDNode. +MDNode::~MDNode() { + assert((getSubclassDataFromValue() & DestroyFlag) != 0 && + "Not being destroyed through destroy()?"); + if (!isNotUniqued()) { + LLVMContextImpl *pImpl = getType()->getContext().pImpl; + pImpl->MDNodeSet.RemoveNode(this); + } + + // Destroy the operands. + for (MDNodeOperand *Op = getOperandPtr(this, 0), *E = Op+NumOperands; + Op != E; ++Op) + Op->~MDNodeOperand(); +} + +// destroy - Delete this node. Only when there are no uses. +void MDNode::destroy() { + setValueSubclassData(getSubclassDataFromValue() | DestroyFlag); + // Placement delete, the free the memory. + this->~MDNode(); + free(this); } -MDNode *MDNode::get(LLVMContext &Context, Value*const* Vals, unsigned NumVals) { + +MDNode *MDNode::get(LLVMContext &Context, Value*const* Vals, unsigned NumVals, + bool isFunctionLocal) { LLVMContextImpl *pImpl = Context.pImpl; FoldingSetNodeID ID; for (unsigned i = 0; i != NumVals; ++i) @@ -72,56 +139,58 @@ MDNode *MDNode::get(LLVMContext &Context, Value*const* Vals, unsigned NumVals) { void *InsertPoint; MDNode *N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint); if (!N) { + // Coallocate space for the node and Operands together, then placement new. + void *Ptr = malloc(sizeof(MDNode)+NumVals*sizeof(MDNodeOperand)); + N = new (Ptr) MDNode(Context, Vals, NumVals, isFunctionLocal); + // InsertPoint will have been set by the FindNodeOrInsertPos call. - N = new MDNode(Context, Vals, NumVals); pImpl->MDNodeSet.InsertNode(N, InsertPoint); } return N; } -/// ~MDNode - Destroy MDNode. -MDNode::~MDNode() { - LLVMContextImpl *pImpl = getType()->getContext().pImpl; - pImpl->MDNodeSet.RemoveNode(this); - delete [] Node; - Node = NULL; +/// getOperand - Return specified operand. +Value *MDNode::getOperand(unsigned i) const { + return *getOperandPtr(const_cast<MDNode*>(this), i); } -// Replace value from this node's element list. -void MDNode::replaceElement(Value *From, Value *To) { - if (From == To || !getType()) - return; - LLVMContext &Context = getType()->getContext(); - LLVMContextImpl *pImpl = Context.pImpl; +void MDNode::Profile(FoldingSetNodeID &ID) const { + for (unsigned i = 0, e = getNumOperands(); i != e; ++i) + ID.AddPointer(getOperand(i)); +} - // Find value. This is a linear search, do something if it consumes - // lot of time. It is possible that to have multiple instances of - // From in this MDNode's element list. - SmallVector<unsigned, 4> Indexes; - unsigned Index = 0; - for (unsigned i = 0, e = getNumElements(); i != e; ++i, ++Index) { - Value *V = getElement(i); - if (V && V == From) - Indexes.push_back(Index); - } - if (Indexes.empty()) +// Replace value from this node's operand list. +void MDNode::replaceOperand(MDNodeOperand *Op, Value *To) { + Value *From = *Op; + + if (From == To) return; - // Remove "this" from the context map. + // Update the operand. + Op->set(To); + + // If this node is already not being uniqued (because one of the operands + // already went to null), then there is nothing else to do here. + if (isNotUniqued()) return; + + LLVMContextImpl *pImpl = getType()->getContext().pImpl; + + // Remove "this" from the context map. FoldingSet doesn't have to reprofile + // this node to remove it, so we don't care what state the operands are in. pImpl->MDNodeSet.RemoveNode(this); - // Replace From element(s) in place. - for (SmallVector<unsigned, 4>::iterator I = Indexes.begin(), E = Indexes.end(); - I != E; ++I) { - unsigned Index = *I; - Node[Index] = ElementVH(To, this); + // If we are dropping an argument to null, we choose to not unique the MDNode + // anymore. This commonly occurs during destruction, and uniquing these + // brings little reuse. + if (To == 0) { + setIsNotUniqued(); + return; } - - // Insert updated "this" into the context's folding node set. - // If a node with same element list already exist then before inserting - // updated "this" into the folding node set, replace all uses of existing - // node with updated "this" node. + + // Now that the node is out of the folding set, get ready to reinsert it. + // First, check to see if another node with the same operands already exists + // in the set. If it doesn't exist, this returns the position to insert it. FoldingSetNodeID ID; Profile(ID); void *InsertPoint; @@ -129,27 +198,31 @@ void MDNode::replaceElement(Value *From, Value *To) { if (N) { N->replaceAllUsesWith(this); - delete N; - N = 0; + N->destroy(); + N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint); + assert(N == 0 && "shouldn't be in the map now!"); (void)N; } - N = pImpl->MDNodeSet.FindNodeOrInsertPos(ID, InsertPoint); - if (!N) { - // InsertPoint will have been set by the FindNodeOrInsertPos call. - N = this; - pImpl->MDNodeSet.InsertNode(N, InsertPoint); - } + // InsertPoint will have been set by the FindNodeOrInsertPos call. + pImpl->MDNodeSet.InsertNode(this, InsertPoint); } //===----------------------------------------------------------------------===// // NamedMDNode implementation. // +static SmallVector<TrackingVH<MetadataBase>, 4> &getNMDOps(void *Operands) { + return *(SmallVector<TrackingVH<MetadataBase>, 4>*)Operands; +} + NamedMDNode::NamedMDNode(LLVMContext &C, const Twine &N, MetadataBase *const *MDs, unsigned NumMDs, Module *ParentModule) : MetadataBase(Type::getMetadataTy(C), Value::NamedMDNodeVal), Parent(0) { setName(N); - + + Operands = new SmallVector<TrackingVH<MetadataBase>, 4>(); + + SmallVector<TrackingVH<MetadataBase>, 4> &Node = getNMDOps(Operands); for (unsigned i = 0; i != NumMDs; ++i) Node.push_back(TrackingVH<MetadataBase>(MDs[i])); @@ -160,243 +233,54 @@ NamedMDNode::NamedMDNode(LLVMContext &C, const Twine &N, NamedMDNode *NamedMDNode::Create(const NamedMDNode *NMD, Module *M) { assert(NMD && "Invalid source NamedMDNode!"); SmallVector<MetadataBase *, 4> Elems; - for (unsigned i = 0, e = NMD->getNumElements(); i != e; ++i) - Elems.push_back(NMD->getElement(i)); + Elems.reserve(NMD->getNumOperands()); + + for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) + Elems.push_back(NMD->getOperand(i)); return new NamedMDNode(NMD->getContext(), NMD->getName().data(), Elems.data(), Elems.size(), M); } -/// eraseFromParent - Drop all references and remove the node from parent -/// module. -void NamedMDNode::eraseFromParent() { - getParent()->getNamedMDList().erase(this); -} - -/// dropAllReferences - Remove all uses and clear node vector. -void NamedMDNode::dropAllReferences() { - Node.clear(); -} - NamedMDNode::~NamedMDNode() { dropAllReferences(); + delete &getNMDOps(Operands); } -//===----------------------------------------------------------------------===// -// MetadataContextImpl implementation. -// -namespace llvm { -class MetadataContextImpl { -public: - typedef std::pair<unsigned, TrackingVH<MDNode> > MDPairTy; - typedef SmallVector<MDPairTy, 2> MDMapTy; - typedef DenseMap<const Instruction *, MDMapTy> MDStoreTy; - friend class BitcodeReader; -private: - - /// MetadataStore - Collection of metadata used in this context. - MDStoreTy MetadataStore; - - /// MDHandlerNames - Map to hold metadata handler names. - StringMap<unsigned> MDHandlerNames; - -public: - /// registerMDKind - Register a new metadata kind and return its ID. - /// A metadata kind can be registered only once. - unsigned registerMDKind(StringRef Name); - - /// getMDKind - Return metadata kind. If the requested metadata kind - /// is not registered then return 0. - unsigned getMDKind(StringRef Name) const; - - /// getMD - Get the metadata of given kind attached to an Instruction. - /// If the metadata is not found then return 0. - MDNode *getMD(unsigned Kind, const Instruction *Inst); - - /// getMDs - Get the metadata attached to an Instruction. - void getMDs(const Instruction *Inst, SmallVectorImpl<MDPairTy> &MDs) const; - - /// addMD - Attach the metadata of given kind to an Instruction. - void addMD(unsigned Kind, MDNode *Node, Instruction *Inst); - - /// removeMD - Remove metadata of given kind attached with an instruction. - void removeMD(unsigned Kind, Instruction *Inst); - - /// removeAllMetadata - Remove all metadata attached with an instruction. - void removeAllMetadata(Instruction *Inst); - - /// copyMD - If metadata is attached with Instruction In1 then attach - /// the same metadata to In2. - void copyMD(Instruction *In1, Instruction *In2); - - /// getHandlerNames - Populate client-supplied smallvector using custom - /// metadata name and ID. - void getHandlerNames(SmallVectorImpl<std::pair<unsigned, StringRef> >&) const; - - /// ValueIsDeleted - This handler is used to update metadata store - /// when a value is deleted. - void ValueIsDeleted(const Value *) {} - void ValueIsDeleted(Instruction *Inst) { - removeAllMetadata(Inst); - } - void ValueIsRAUWd(Value *V1, Value *V2); - - /// ValueIsCloned - This handler is used to update metadata store - /// when In1 is cloned to create In2. - void ValueIsCloned(const Instruction *In1, Instruction *In2); -}; -} - -/// registerMDKind - Register a new metadata kind and return its ID. -/// A metadata kind can be registered only once. -unsigned MetadataContextImpl::registerMDKind(StringRef Name) { - unsigned Count = MDHandlerNames.size(); - assert(MDHandlerNames.count(Name) == 0 && "Already registered MDKind!"); - return MDHandlerNames[Name] = Count + 1; +/// getNumOperands - Return number of NamedMDNode operands. +unsigned NamedMDNode::getNumOperands() const { + return (unsigned)getNMDOps(Operands).size(); } -/// getMDKind - Return metadata kind. If the requested metadata kind -/// is not registered then return 0. -unsigned MetadataContextImpl::getMDKind(StringRef Name) const { - StringMap<unsigned>::const_iterator I = MDHandlerNames.find(Name); - if (I == MDHandlerNames.end()) - return 0; - - return I->getValue(); +/// getOperand - Return specified operand. +MetadataBase *NamedMDNode::getOperand(unsigned i) const { + assert(i < getNumOperands() && "Invalid Operand number!"); + return getNMDOps(Operands)[i]; } -/// addMD - Attach the metadata of given kind to an Instruction. -void MetadataContextImpl::addMD(unsigned MDKind, MDNode *Node, - Instruction *Inst) { - assert(Node && "Invalid null MDNode"); - Inst->HasMetadata = true; - MDMapTy &Info = MetadataStore[Inst]; - if (Info.empty()) { - Info.push_back(std::make_pair(MDKind, Node)); - MetadataStore.insert(std::make_pair(Inst, Info)); - return; - } - - // If there is an entry for this MDKind then replace it. - for (unsigned i = 0, e = Info.size(); i != e; ++i) { - MDPairTy &P = Info[i]; - if (P.first == MDKind) { - Info[i] = std::make_pair(MDKind, Node); - return; - } - } - - // Otherwise add a new entry. - Info.push_back(std::make_pair(MDKind, Node)); +/// addOperand - Add metadata Operand. +void NamedMDNode::addOperand(MetadataBase *M) { + getNMDOps(Operands).push_back(TrackingVH<MetadataBase>(M)); } -/// removeMD - Remove metadata of given kind attached with an instruction. -void MetadataContextImpl::removeMD(unsigned Kind, Instruction *Inst) { - MDStoreTy::iterator I = MetadataStore.find(Inst); - if (I == MetadataStore.end()) - return; - - MDMapTy &Info = I->second; - for (MDMapTy::iterator MI = Info.begin(), ME = Info.end(); MI != ME; ++MI) { - MDPairTy &P = *MI; - if (P.first == Kind) { - Info.erase(MI); - return; - } - } -} - -/// removeAllMetadata - Remove all metadata attached with an instruction. -void MetadataContextImpl::removeAllMetadata(Instruction *Inst) { - MetadataStore.erase(Inst); - Inst->HasMetadata = false; -} - -/// copyMD - If metadata is attached with Instruction In1 then attach -/// the same metadata to In2. -void MetadataContextImpl::copyMD(Instruction *In1, Instruction *In2) { - assert(In1 && In2 && "Invalid instruction!"); - MDMapTy &In1Info = MetadataStore[In1]; - if (In1Info.empty()) - return; - - for (MDMapTy::iterator I = In1Info.begin(), E = In1Info.end(); I != E; ++I) - addMD(I->first, I->second, In2); -} - -/// getMD - Get the metadata of given kind attached to an Instruction. -/// If the metadata is not found then return 0. -MDNode *MetadataContextImpl::getMD(unsigned MDKind, const Instruction *Inst) { - MDMapTy &Info = MetadataStore[Inst]; - if (Info.empty()) - return NULL; - - for (MDMapTy::iterator I = Info.begin(), E = Info.end(); I != E; ++I) - if (I->first == MDKind) - return I->second; - return NULL; -} - -/// getMDs - Get the metadata attached to an Instruction. -void MetadataContextImpl:: -getMDs(const Instruction *Inst, SmallVectorImpl<MDPairTy> &MDs) const { - MDStoreTy::const_iterator I = MetadataStore.find(Inst); - if (I == MetadataStore.end()) - return; - MDs.resize(I->second.size()); - for (MDMapTy::const_iterator MI = I->second.begin(), ME = I->second.end(); - MI != ME; ++MI) - // MD kinds are numbered from 1. - MDs[MI->first - 1] = std::make_pair(MI->first, MI->second); -} - -/// getHandlerNames - Populate client supplied smallvector using custome -/// metadata name and ID. -void MetadataContextImpl:: -getHandlerNames(SmallVectorImpl<std::pair<unsigned, StringRef> >&Names) const { - Names.resize(MDHandlerNames.size()); - for (StringMap<unsigned>::const_iterator I = MDHandlerNames.begin(), - E = MDHandlerNames.end(); I != E; ++I) - // MD Handlers are numbered from 1. - Names[I->second - 1] = std::make_pair(I->second, I->first()); +/// eraseFromParent - Drop all references and remove the node from parent +/// module. +void NamedMDNode::eraseFromParent() { + getParent()->getNamedMDList().erase(this); } -/// ValueIsCloned - This handler is used to update metadata store -/// when In1 is cloned to create In2. -void MetadataContextImpl::ValueIsCloned(const Instruction *In1, - Instruction *In2) { - // Find Metadata handles for In1. - MDStoreTy::iterator I = MetadataStore.find(In1); - assert(I != MetadataStore.end() && "Invalid custom metadata info!"); - - // FIXME : Give all metadata handlers a chance to adjust. - - MDMapTy &In1Info = I->second; - MDMapTy In2Info; - for (MDMapTy::iterator I = In1Info.begin(), E = In1Info.end(); I != E; ++I) - addMD(I->first, I->second, In2); +/// dropAllReferences - Remove all uses and clear node vector. +void NamedMDNode::dropAllReferences() { + getNMDOps(Operands).clear(); } -/// ValueIsRAUWd - This handler is used when V1's all uses are replaced by -/// V2. -void MetadataContextImpl::ValueIsRAUWd(Value *V1, Value *V2) { - Instruction *I1 = dyn_cast<Instruction>(V1); - Instruction *I2 = dyn_cast<Instruction>(V2); - if (!I1 || !I2) - return; - - // FIXME : Give custom handlers a chance to override this. - ValueIsCloned(I1, I2); -} //===----------------------------------------------------------------------===// -// MetadataContext implementation. +// LLVMContext MDKind naming implementation. // -MetadataContext::MetadataContext() - : pImpl(new MetadataContextImpl()) { } -MetadataContext::~MetadataContext() { delete pImpl; } +#ifndef NDEBUG /// isValidName - Return true if Name is a valid custom metadata handler name. -bool MetadataContext::isValidName(StringRef MDName) { +static bool isValidName(StringRef MDName) { if (MDName.empty()) return false; @@ -410,72 +294,123 @@ bool MetadataContext::isValidName(StringRef MDName) { } return true; } +#endif -/// registerMDKind - Register a new metadata kind and return its ID. -/// A metadata kind can be registered only once. -unsigned MetadataContext::registerMDKind(StringRef Name) { - assert(isValidName(Name) && "Invalid custome metadata name!"); - return pImpl->registerMDKind(Name); -} - -/// getMDKind - Return metadata kind. If the requested metadata kind -/// is not registered then return 0. -unsigned MetadataContext::getMDKind(StringRef Name) const { - return pImpl->getMDKind(Name); +/// getMDKindID - Return a unique non-zero ID for the specified metadata kind. +unsigned LLVMContext::getMDKindID(StringRef Name) const { + assert(isValidName(Name) && "Invalid MDNode name"); + + unsigned &Entry = pImpl->CustomMDKindNames[Name]; + + // If this is new, assign it its ID. + if (Entry == 0) Entry = pImpl->CustomMDKindNames.size(); + return Entry; } -/// getMD - Get the metadata of given kind attached to an Instruction. -/// If the metadata is not found then return 0. -MDNode *MetadataContext::getMD(unsigned Kind, const Instruction *Inst) { - return pImpl->getMD(Kind, Inst); +/// getHandlerNames - Populate client supplied smallvector using custome +/// metadata name and ID. +void LLVMContext::getMDKindNames(SmallVectorImpl<StringRef> &Names) const { + Names.resize(pImpl->CustomMDKindNames.size()+1); + Names[0] = ""; + for (StringMap<unsigned>::const_iterator I = pImpl->CustomMDKindNames.begin(), + E = pImpl->CustomMDKindNames.end(); I != E; ++I) + // MD Handlers are numbered from 1. + Names[I->second] = I->first(); } -/// getMDs - Get the metadata attached to an Instruction. -void MetadataContext:: -getMDs(const Instruction *Inst, - SmallVectorImpl<std::pair<unsigned, TrackingVH<MDNode> > > &MDs) const { - return pImpl->getMDs(Inst, MDs); -} +//===----------------------------------------------------------------------===// +// Instruction Metadata method implementations. +// -/// addMD - Attach the metadata of given kind to an Instruction. -void MetadataContext::addMD(unsigned Kind, MDNode *Node, Instruction *Inst) { - pImpl->addMD(Kind, Node, Inst); +void Instruction::setMetadata(const char *Kind, MDNode *Node) { + if (Node == 0 && !hasMetadata()) return; + setMetadata(getContext().getMDKindID(Kind), Node); } -/// removeMD - Remove metadata of given kind attached with an instruction. -void MetadataContext::removeMD(unsigned Kind, Instruction *Inst) { - pImpl->removeMD(Kind, Inst); +MDNode *Instruction::getMetadataImpl(const char *Kind) const { + return getMetadataImpl(getContext().getMDKindID(Kind)); } -/// removeAllMetadata - Remove all metadata attached with an instruction. -void MetadataContext::removeAllMetadata(Instruction *Inst) { - pImpl->removeAllMetadata(Inst); +/// setMetadata - Set the metadata of of the specified kind to the specified +/// node. This updates/replaces metadata if already present, or removes it if +/// Node is null. +void Instruction::setMetadata(unsigned KindID, MDNode *Node) { + if (Node == 0 && !hasMetadata()) return; + + // Handle the case when we're adding/updating metadata on an instruction. + if (Node) { + LLVMContextImpl::MDMapTy &Info = getContext().pImpl->MetadataStore[this]; + assert(!Info.empty() == hasMetadata() && "HasMetadata bit is wonked"); + if (Info.empty()) { + setHasMetadata(true); + } else { + // Handle replacement of an existing value. + for (unsigned i = 0, e = Info.size(); i != e; ++i) + if (Info[i].first == KindID) { + Info[i].second = Node; + return; + } + } + + // No replacement, just add it to the list. + Info.push_back(std::make_pair(KindID, Node)); + return; + } + + // Otherwise, we're removing metadata from an instruction. + assert(hasMetadata() && getContext().pImpl->MetadataStore.count(this) && + "HasMetadata bit out of date!"); + LLVMContextImpl::MDMapTy &Info = getContext().pImpl->MetadataStore[this]; + + // Common case is removing the only entry. + if (Info.size() == 1 && Info[0].first == KindID) { + getContext().pImpl->MetadataStore.erase(this); + setHasMetadata(false); + return; + } + + // Handle replacement of an existing value. + for (unsigned i = 0, e = Info.size(); i != e; ++i) + if (Info[i].first == KindID) { + Info[i] = Info.back(); + Info.pop_back(); + assert(!Info.empty() && "Removing last entry should be handled above"); + return; + } + // Otherwise, removing an entry that doesn't exist on the instruction. } -/// copyMD - If metadata is attached with Instruction In1 then attach -/// the same metadata to In2. -void MetadataContext::copyMD(Instruction *In1, Instruction *In2) { - pImpl->copyMD(In1, In2); +MDNode *Instruction::getMetadataImpl(unsigned KindID) const { + LLVMContextImpl::MDMapTy &Info = getContext().pImpl->MetadataStore[this]; + assert(hasMetadata() && !Info.empty() && "Shouldn't have called this"); + + for (LLVMContextImpl::MDMapTy::iterator I = Info.begin(), E = Info.end(); + I != E; ++I) + if (I->first == KindID) + return I->second; + return 0; } -/// getHandlerNames - Populate client supplied smallvector using custome -/// metadata name and ID. -void MetadataContext:: -getHandlerNames(SmallVectorImpl<std::pair<unsigned, StringRef> >&N) const { - pImpl->getHandlerNames(N); +void Instruction::getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned, + MDNode*> > &Result)const { + assert(hasMetadata() && getContext().pImpl->MetadataStore.count(this) && + "Shouldn't have called this"); + const LLVMContextImpl::MDMapTy &Info = + getContext().pImpl->MetadataStore.find(this)->second; + assert(!Info.empty() && "Shouldn't have called this"); + + Result.clear(); + Result.append(Info.begin(), Info.end()); + + // Sort the resulting array so it is stable. + if (Result.size() > 1) + array_pod_sort(Result.begin(), Result.end()); } -/// ValueIsDeleted - This handler is used to update metadata store -/// when a value is deleted. -void MetadataContext::ValueIsDeleted(Instruction *Inst) { - pImpl->ValueIsDeleted(Inst); -} -void MetadataContext::ValueIsRAUWd(Value *V1, Value *V2) { - pImpl->ValueIsRAUWd(V1, V2); +/// removeAllMetadata - Remove all metadata from this instruction. +void Instruction::removeAllMetadata() { + assert(hasMetadata() && "Caller should check"); + getContext().pImpl->MetadataStore.erase(this); + setHasMetadata(false); } -/// ValueIsCloned - This handler is used to update metadata store -/// when In1 is cloned to create In2. -void MetadataContext::ValueIsCloned(const Instruction *In1, Instruction *In2) { - pImpl->ValueIsCloned(In1, In2); -} diff --git a/lib/VMCore/Module.cpp b/lib/VMCore/Module.cpp index 3efd3e3..a7f503b 100644 --- a/lib/VMCore/Module.cpp +++ b/lib/VMCore/Module.cpp @@ -47,9 +47,9 @@ GlobalAlias *ilist_traits<GlobalAlias>::createSentinel() { // Explicit instantiations of SymbolTableListTraits since some of the methods // are not in the public header file. -template class SymbolTableListTraits<GlobalVariable, Module>; -template class SymbolTableListTraits<Function, Module>; -template class SymbolTableListTraits<GlobalAlias, Module>; +template class llvm::SymbolTableListTraits<GlobalVariable, Module>; +template class llvm::SymbolTableListTraits<Function, Module>; +template class llvm::SymbolTableListTraits<GlobalAlias, Module>; //===----------------------------------------------------------------------===// // Primitive Module methods. @@ -118,6 +118,20 @@ GlobalValue *Module::getNamedValue(StringRef Name) const { return cast_or_null<GlobalValue>(getValueSymbolTable().lookup(Name)); } +/// getMDKindID - Return a unique non-zero ID for the specified metadata kind. +/// This ID is uniqued across modules in the current LLVMContext. +unsigned Module::getMDKindID(StringRef Name) const { + return Context.getMDKindID(Name); +} + +/// getMDKindNames - Populate client supplied SmallVector with the name for +/// custom metadata IDs registered in this LLVMContext. ID #0 is not used, +/// so it is filled in as an empty string. +void Module::getMDKindNames(SmallVectorImpl<StringRef> &Result) const { + return Context.getMDKindNames(Result); +} + + //===----------------------------------------------------------------------===// // Methods for easy access to the functions in the module. // diff --git a/lib/VMCore/PassManager.cpp b/lib/VMCore/PassManager.cpp index 52e8a82..d688385 100644 --- a/lib/VMCore/PassManager.cpp +++ b/lib/VMCore/PassManager.cpp @@ -1133,7 +1133,7 @@ bool BBPassManager::runOnFunction(Function &F) { removeDeadPasses(BP, I->getName(), ON_BASICBLOCK_MSG); } - return Changed |= doFinalization(F); + return doFinalization(F) || Changed; } // Implement doInitialization and doFinalization @@ -1355,7 +1355,7 @@ bool FPPassManager::runOnModule(Module &M) { for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) runOnFunction(*I); - return Changed |= doFinalization(M); + return doFinalization(M) || Changed; } bool FPPassManager::doInitialization(Module &M) { diff --git a/lib/VMCore/Type.cpp b/lib/VMCore/Type.cpp index 739c463..fd46aa1 100644 --- a/lib/VMCore/Type.cpp +++ b/lib/VMCore/Type.cpp @@ -79,6 +79,9 @@ void Type::destroy() const { operator delete(const_cast<Type *>(this)); return; + } else if (const OpaqueType *opaque_this = dyn_cast<OpaqueType>(this)) { + LLVMContextImpl *pImpl = this->getContext().pImpl; + pImpl->OpaqueTypes.erase(opaque_this); } // For all the other type subclasses, there is either no contained types or @@ -684,9 +687,11 @@ static bool TypesEqual(const Type *Ty, const Type *Ty2, } } +namespace llvm { // in namespace llvm so findable by ADL static bool TypesEqual(const Type *Ty, const Type *Ty2) { std::map<const Type *, const Type *> EqTypes; - return TypesEqual(Ty, Ty2, EqTypes); + return ::TypesEqual(Ty, Ty2, EqTypes); +} } // AbstractTypeHasCycleThrough - Return true there is a path from CurTy to @@ -722,8 +727,10 @@ static bool ConcreteTypeHasCycleThrough(const Type *TargetTy, const Type *CurTy, return false; } -/// TypeHasCycleThroughItself - Return true if the specified type has a cycle -/// back to itself. +/// TypeHasCycleThroughItself - Return true if the specified type has +/// a cycle back to itself. + +namespace llvm { // in namespace llvm so it's findable by ADL static bool TypeHasCycleThroughItself(const Type *Ty) { SmallPtrSet<const Type*, 128> VisitedTypes; @@ -740,6 +747,7 @@ static bool TypeHasCycleThroughItself(const Type *Ty) { } return false; } +} //===----------------------------------------------------------------------===// // Function Type Factory and Value Class... @@ -955,6 +963,20 @@ bool PointerType::isValidElementType(const Type *ElemTy) { //===----------------------------------------------------------------------===// +// Opaque Type Factory... +// + +OpaqueType *OpaqueType::get(LLVMContext &C) { + OpaqueType *OT = new OpaqueType(C); // All opaque types are distinct + + LLVMContextImpl *pImpl = C.pImpl; + pImpl->OpaqueTypes.insert(OT); + return OT; +} + + + +//===----------------------------------------------------------------------===// // Derived Type Refinement Functions //===----------------------------------------------------------------------===// diff --git a/lib/VMCore/Value.cpp b/lib/VMCore/Value.cpp index 826e8a1..fe1219f 100644 --- a/lib/VMCore/Value.cpp +++ b/lib/VMCore/Value.cpp @@ -19,7 +19,6 @@ #include "llvm/Instructions.h" #include "llvm/Operator.h" #include "llvm/Module.h" -#include "llvm/Metadata.h" #include "llvm/ValueSymbolTable.h" #include "llvm/ADT/SmallString.h" #include "llvm/Support/Debug.h" @@ -41,7 +40,7 @@ static inline const Type *checkType(const Type *Ty) { } Value::Value(const Type *ty, unsigned scid) - : SubclassID(scid), HasValueHandle(0), HasMetadata(0), + : SubclassID(scid), HasValueHandle(0), SubclassOptionalData(0), SubclassData(0), VTy(checkType(ty)), UseList(0), Name(0) { if (isa<CallInst>(this) || isa<InvokeInst>(this)) @@ -57,11 +56,6 @@ Value::Value(const Type *ty, unsigned scid) } Value::~Value() { - if (HasMetadata) { - LLVMContext &Context = getContext(); - Context.pImpl->TheMetadata.ValueIsDeleted(this); - } - // Notify all ValueHandles (if present) that this value is going away. if (HasValueHandle) ValueHandleBase::ValueIsDeleted(this); @@ -306,10 +300,6 @@ void Value::uncheckedReplaceAllUsesWith(Value *New) { // Notify all ValueHandles (if present) that this value is going away. if (HasValueHandle) ValueHandleBase::ValueIsRAUWd(this, New); - if (HasMetadata) { - LLVMContext &Context = getContext(); - Context.pImpl->TheMetadata.ValueIsRAUWd(this, New); - } while (!use_empty()) { Use &U = *UseList; diff --git a/lib/VMCore/Verifier.cpp b/lib/VMCore/Verifier.cpp index 7aa86b7..30528bf 100644 --- a/lib/VMCore/Verifier.cpp +++ b/lib/VMCore/Verifier.cpp @@ -329,6 +329,8 @@ namespace { int VT, unsigned ArgNo, std::string &Suffix); void VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F, unsigned RetNum, unsigned ParamNum, ...); + void VerifyFunctionLocalMetadata(MDNode *N, Function *F, + SmallPtrSet<MDNode *, 32> &Visited); void VerifyParameterAttrs(Attributes Attrs, const Type *Ty, bool isReturnValue, const Value *V); void VerifyFunctionAttrs(const FunctionType *FT, const AttrListPtr &Attrs, @@ -1526,6 +1528,38 @@ void Verifier::VerifyType(const Type *Ty) { } } +/// VerifyFunctionLocalMetadata - Verify that the specified MDNode is local to +/// specified Function. +void Verifier::VerifyFunctionLocalMetadata(MDNode *N, Function *F, + SmallPtrSet<MDNode *, 32> &Visited) { + assert(N->isFunctionLocal() && "Should only be called on function-local MD"); + + // Only visit each node once. + if (!Visited.insert(N)) + return; + + for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { + Value *V = N->getOperand(i); + if (!V) continue; + + Function *ActualF = 0; + if (Instruction *I = dyn_cast<Instruction>(V)) + ActualF = I->getParent()->getParent(); + else if (BasicBlock *BB = dyn_cast<BasicBlock>(V)) + ActualF = BB->getParent(); + else if (Argument *A = dyn_cast<Argument>(V)) + ActualF = A->getParent(); + else if (MDNode *MD = dyn_cast<MDNode>(V)) + if (MD->isFunctionLocal()) + VerifyFunctionLocalMetadata(MD, F, Visited); + + // If this was an instruction, bb, or argument, verify that it is in the + // function that we expect. + Assert1(ActualF == 0 || ActualF == F, + "function-local metadata used in wrong function", N); + } +} + // Flags used by TableGen to mark intrinsic parameters with the // LLVMExtendedElementVectorType and LLVMTruncatedElementVectorType classes. static const unsigned ExtendedElementVectorType = 0x40000000; @@ -1542,6 +1576,15 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) { #include "llvm/Intrinsics.gen" #undef GET_INTRINSIC_VERIFIER + // If the intrinsic takes MDNode arguments, verify that they are either global + // or are local to *this* function. + for (unsigned i = 0, e = CI.getNumOperands(); i != e; ++i) + if (MDNode *MD = dyn_cast<MDNode>(CI.getOperand(i))) { + if (!MD->isFunctionLocal()) continue; + SmallPtrSet<MDNode *, 32> Visited; + VerifyFunctionLocalMetadata(MD, CI.getParent()->getParent(), Visited); + } + switch (ID) { default: break; |