diff options
Diffstat (limited to 'contrib/llvm/tools/clang/lib')
251 files changed, 31842 insertions, 12319 deletions
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp index 851f8d1..d41051f 100644 --- a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp +++ b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp @@ -31,25 +31,128 @@ using namespace clang; +unsigned ASTContext::NumImplicitDefaultConstructors; +unsigned ASTContext::NumImplicitDefaultConstructorsDeclared; +unsigned ASTContext::NumImplicitCopyConstructors; +unsigned ASTContext::NumImplicitCopyConstructorsDeclared; +unsigned ASTContext::NumImplicitCopyAssignmentOperators; +unsigned ASTContext::NumImplicitCopyAssignmentOperatorsDeclared; +unsigned ASTContext::NumImplicitDestructors; +unsigned ASTContext::NumImplicitDestructorsDeclared; + enum FloatingRank { FloatRank, DoubleRank, LongDoubleRank }; +void +ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, + TemplateTemplateParmDecl *Parm) { + ID.AddInteger(Parm->getDepth()); + ID.AddInteger(Parm->getPosition()); + // FIXME: Parameter pack + + TemplateParameterList *Params = Parm->getTemplateParameters(); + ID.AddInteger(Params->size()); + for (TemplateParameterList::const_iterator P = Params->begin(), + PEnd = Params->end(); + P != PEnd; ++P) { + if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { + ID.AddInteger(0); + ID.AddBoolean(TTP->isParameterPack()); + continue; + } + + if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { + ID.AddInteger(1); + // FIXME: Parameter pack + ID.AddPointer(NTTP->getType().getAsOpaquePtr()); + continue; + } + + TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P); + ID.AddInteger(2); + Profile(ID, TTP); + } +} + +TemplateTemplateParmDecl * +ASTContext::getCanonicalTemplateTemplateParmDecl( + TemplateTemplateParmDecl *TTP) { + // Check if we already have a canonical template template parameter. + llvm::FoldingSetNodeID ID; + CanonicalTemplateTemplateParm::Profile(ID, TTP); + void *InsertPos = 0; + CanonicalTemplateTemplateParm *Canonical + = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); + if (Canonical) + return Canonical->getParam(); + + // Build a canonical template parameter list. + TemplateParameterList *Params = TTP->getTemplateParameters(); + llvm::SmallVector<NamedDecl *, 4> CanonParams; + CanonParams.reserve(Params->size()); + for (TemplateParameterList::const_iterator P = Params->begin(), + PEnd = Params->end(); + P != PEnd; ++P) { + if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) + CanonParams.push_back( + TemplateTypeParmDecl::Create(*this, getTranslationUnitDecl(), + SourceLocation(), TTP->getDepth(), + TTP->getIndex(), 0, false, + TTP->isParameterPack())); + else if (NonTypeTemplateParmDecl *NTTP + = dyn_cast<NonTypeTemplateParmDecl>(*P)) + CanonParams.push_back( + NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), + SourceLocation(), NTTP->getDepth(), + NTTP->getPosition(), 0, + getCanonicalType(NTTP->getType()), + 0)); + else + CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( + cast<TemplateTemplateParmDecl>(*P))); + } + + TemplateTemplateParmDecl *CanonTTP + = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), + SourceLocation(), TTP->getDepth(), + TTP->getPosition(), 0, + TemplateParameterList::Create(*this, SourceLocation(), + SourceLocation(), + CanonParams.data(), + CanonParams.size(), + SourceLocation())); + + // Get the new insert position for the node we care about. + Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); + assert(Canonical == 0 && "Shouldn't be in the map!"); + (void)Canonical; + + // Create the canonical template template parameter entry. + Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); + CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); + return CanonTTP; +} + ASTContext::ASTContext(const LangOptions& LOpts, SourceManager &SM, const TargetInfo &t, IdentifierTable &idents, SelectorTable &sels, Builtin::Context &builtins, bool FreeMem, unsigned size_reserve) : - GlobalNestedNameSpecifier(0), CFConstantStringTypeDecl(0), - NSConstantStringTypeDecl(0), + TemplateSpecializationTypes(this_()), + DependentTemplateSpecializationTypes(this_()), + GlobalNestedNameSpecifier(0), IsInt128Installed(false), + CFConstantStringTypeDecl(0), NSConstantStringTypeDecl(0), ObjCFastEnumerationStateTypeDecl(0), FILEDecl(0), jmp_bufDecl(0), sigjmp_bufDecl(0), BlockDescriptorType(0), BlockDescriptorExtendedType(0), + NullTypeSourceInfo(QualType()), SourceMgr(SM), LangOpts(LOpts), FreeMemory(FreeMem), Target(t), Idents(idents), Selectors(sels), BuiltinInfo(builtins), DeclarationNames(*this), ExternalSource(0), PrintingPolicy(LOpts), - LastSDM(0, 0) { + LastSDM(0, 0), + UniqueBlockByRefTypeID(0), UniqueBlockParmTypeID(0) { ObjCIdRedefinitionType = QualType(); ObjCClassRedefinitionType = QualType(); ObjCSelRedefinitionType = QualType(); @@ -88,13 +191,6 @@ ASTContext::~ASTContext() { Deallocate(&*I++); } - for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator - I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { - // Increment in loop to prevent using deallocated memory. - if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second)) - R->Destroy(*this); - } - for (llvm::DenseMap<const ObjCContainerDecl*, const ASTRecordLayout*>::iterator I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) { @@ -104,6 +200,16 @@ ASTContext::~ASTContext() { } } + // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed + // even when using the BumpPtrAllocator because they can contain + // DenseMaps. + for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator + I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { + // Increment in loop to prevent using deallocated memory. + if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second)) + R->Destroy(*this); + } + // Destroy nested-name-specifiers. for (llvm::FoldingSet<NestedNameSpecifier>::iterator NNS = NestedNameSpecifiers.begin(), @@ -155,11 +261,30 @@ void ASTContext::PrintStats() const { #include "clang/AST/TypeNodes.def" fprintf(stderr, "Total bytes = %d\n", int(TotalBytes)); - + + // Implicit special member functions. + fprintf(stderr, " %u/%u implicit default constructors created\n", + NumImplicitDefaultConstructorsDeclared, + NumImplicitDefaultConstructors); + fprintf(stderr, " %u/%u implicit copy constructors created\n", + NumImplicitCopyConstructorsDeclared, + NumImplicitCopyConstructors); + fprintf(stderr, " %u/%u implicit copy assignment operators created\n", + NumImplicitCopyAssignmentOperatorsDeclared, + NumImplicitCopyAssignmentOperators); + fprintf(stderr, " %u/%u implicit destructors created\n", + NumImplicitDestructorsDeclared, NumImplicitDestructors); + + if (!FreeMemory) + BumpAlloc.PrintStats(); + if (ExternalSource.get()) { fprintf(stderr, "\n"); ExternalSource->PrintStats(); } + + if (!FreeMemory) + BumpAlloc.PrintStats(); } @@ -273,13 +398,14 @@ ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { void ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, - TemplateSpecializationKind TSK) { + TemplateSpecializationKind TSK, + SourceLocation PointOfInstantiation) { assert(Inst->isStaticDataMember() && "Not a static data member"); assert(Tmpl->isStaticDataMember() && "Not a static data member"); assert(!InstantiatedFromStaticDataMember[Inst] && "Already noted what static data member was instantiated from"); InstantiatedFromStaticDataMember[Inst] - = new (*this) MemberSpecializationInfo(Tmpl, TSK); + = new (*this) MemberSpecializationInfo(Tmpl, TSK, PointOfInstantiation); } NamedDecl * @@ -358,6 +484,16 @@ ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { return Pos->second.end(); } +unsigned +ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { + llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos + = OverriddenMethods.find(Method); + if (Pos == OverriddenMethods.end()) + return 0; + + return Pos->second.size(); +} + void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, const CXXMethodDecl *Overridden) { OverriddenMethods[Method].push_back(Overridden); @@ -414,6 +550,15 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool RefAsPointee) { T = getPointerType(RT->getPointeeType()); } if (!T->isIncompleteType() && !T->isFunctionType()) { + unsigned MinWidth = Target.getLargeArrayMinWidth(); + unsigned ArrayAlign = Target.getLargeArrayAlign(); + if (isa<VariableArrayType>(T) && MinWidth != 0) + Align = std::max(Align, ArrayAlign); + if (ConstantArrayType *CT = dyn_cast<ConstantArrayType>(T)) { + unsigned Size = getTypeSize(CT); + if (MinWidth != 0 && MinWidth <= Size) + Align = std::max(Align, ArrayAlign); + } // Incomplete or function types default to 1. while (isa<VariableArrayType>(T) || isa<IncompleteArrayType>(T)) T = cast<ArrayType>(T)->getElementType(); @@ -762,7 +907,8 @@ void ASTContext::ShallowCollectObjCIvars(const ObjCInterfaceDecl *OI, void ASTContext::CollectNonClassIvars(const ObjCInterfaceDecl *OI, llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars) { // Find ivars declared in class extension. - if (const ObjCCategoryDecl *CDecl = OI->getClassExtension()) { + for (const ObjCCategoryDecl *CDecl = OI->getFirstClassExtension(); CDecl; + CDecl = CDecl->getNextClassExtension()) { for (ObjCCategoryDecl::ivar_iterator I = CDecl->ivar_begin(), E = CDecl->ivar_end(); I != E; ++I) { Ivars.push_back(*I); @@ -827,7 +973,8 @@ void ASTContext::CollectInheritedProtocols(const Decl *CDecl, unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) { unsigned count = 0; // Count ivars declared in class extension. - if (const ObjCCategoryDecl *CDecl = OI->getClassExtension()) + for (const ObjCCategoryDecl *CDecl = OI->getFirstClassExtension(); CDecl; + CDecl = CDecl->getNextClassExtension()) count += CDecl->ivar_size(); // Count ivar defined in this class's implementation. This @@ -1406,7 +1553,7 @@ QualType ASTContext::getIncompleteArrayType(QualType EltTy, /// getVectorType - Return the unique reference to a vector type of /// the specified element type and size. VectorType must be a built-in type. QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, - bool IsAltiVec, bool IsPixel) { + VectorType::AltiVecSpecific AltiVecSpec) { BuiltinType *baseType; baseType = dyn_cast<BuiltinType>(getCanonicalType(vecType).getTypePtr()); @@ -1414,8 +1561,8 @@ QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, // Check if we've already instantiated a vector of this type. llvm::FoldingSetNodeID ID; - VectorType::Profile(ID, vecType, NumElts, Type::Vector, - IsAltiVec, IsPixel); + VectorType::Profile(ID, vecType, NumElts, Type::Vector, AltiVecSpec); + void *InsertPos = 0; if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(VTP, 0); @@ -1423,16 +1570,19 @@ QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, // If the element type isn't canonical, this won't be a canonical type either, // so fill in the canonical type field. QualType Canonical; - if (!vecType.isCanonical() || IsAltiVec || IsPixel) { - Canonical = getVectorType(getCanonicalType(vecType), - NumElts, false, false); + if (!vecType.isCanonical() || (AltiVecSpec == VectorType::AltiVec)) { + // pass VectorType::NotAltiVec for AltiVecSpec to make AltiVec canonical + // vector type (except 'vector bool ...' and 'vector Pixel') the same as + // the equivalent GCC vector types + Canonical = getVectorType(getCanonicalType(vecType), NumElts, + VectorType::NotAltiVec); // Get the new insert position for the node we care about. VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP; } VectorType *New = new (*this, TypeAlignment) - VectorType(vecType, NumElts, Canonical, IsAltiVec, IsPixel); + VectorType(vecType, NumElts, Canonical, AltiVecSpec); VectorTypes.InsertNode(New, InsertPos); Types.push_back(New); return QualType(New, 0); @@ -1448,7 +1598,8 @@ QualType ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) { // Check if we've already instantiated a vector of this type. llvm::FoldingSetNodeID ID; - VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, false, false); + VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, + VectorType::NotAltiVec); void *InsertPos = 0; if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) return QualType(VTP, 0); @@ -1629,8 +1780,7 @@ QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, assert(NeedsInjectedClassNameType(Decl)); if (Decl->TypeForDecl) { assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); - } else if (CXXRecordDecl *PrevDecl - = cast_or_null<CXXRecordDecl>(Decl->getPreviousDeclaration())) { + } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDeclaration()) { assert(PrevDecl->TypeForDecl && "previous declaration has no type"); Decl->TypeForDecl = PrevDecl->TypeForDecl; assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); @@ -1658,11 +1808,11 @@ QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) { assert(!Record->getPreviousDeclaration() && "struct/union has previous declaration"); assert(!NeedsInjectedClassNameType(Record)); - Decl->TypeForDecl = new (*this, TypeAlignment) RecordType(Record); + return getRecordType(Record); } else if (const EnumDecl *Enum = dyn_cast<EnumDecl>(Decl)) { assert(!Enum->getPreviousDeclaration() && "enum has previous declaration"); - Decl->TypeForDecl = new (*this, TypeAlignment) EnumType(Enum); + return getEnumType(Enum); } else if (const UnresolvedUsingTypenameDecl *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { Decl->TypeForDecl = new (*this, TypeAlignment) UnresolvedUsingType(Using); @@ -1675,16 +1825,42 @@ QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) { /// getTypedefType - Return the unique reference to the type for the /// specified typename decl. -QualType ASTContext::getTypedefType(const TypedefDecl *Decl) { +QualType +ASTContext::getTypedefType(const TypedefDecl *Decl, QualType Canonical) { if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); - QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); + if (Canonical.isNull()) + Canonical = getCanonicalType(Decl->getUnderlyingType()); Decl->TypeForDecl = new(*this, TypeAlignment) TypedefType(Type::Typedef, Decl, Canonical); Types.push_back(Decl->TypeForDecl); return QualType(Decl->TypeForDecl, 0); } +QualType ASTContext::getRecordType(const RecordDecl *Decl) { + if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); + + if (const RecordDecl *PrevDecl = Decl->getPreviousDeclaration()) + if (PrevDecl->TypeForDecl) + return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); + + Decl->TypeForDecl = new (*this, TypeAlignment) RecordType(Decl); + Types.push_back(Decl->TypeForDecl); + return QualType(Decl->TypeForDecl, 0); +} + +QualType ASTContext::getEnumType(const EnumDecl *Decl) { + if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); + + if (const EnumDecl *PrevDecl = Decl->getPreviousDeclaration()) + if (PrevDecl->TypeForDecl) + return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); + + Decl->TypeForDecl = new (*this, TypeAlignment) EnumType(Decl); + Types.push_back(Decl->TypeForDecl); + return QualType(Decl->TypeForDecl, 0); +} + /// \brief Retrieve a substitution-result type. QualType ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm, @@ -1763,8 +1939,7 @@ ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, QualType ASTContext::getTemplateSpecializationType(TemplateName Template, const TemplateArgumentListInfo &Args, - QualType Canon, - bool IsCurrentInstantiation) { + QualType Canon) { unsigned NumArgs = Args.size(); llvm::SmallVector<TemplateArgument, 4> ArgVec; @@ -1773,56 +1948,18 @@ ASTContext::getTemplateSpecializationType(TemplateName Template, ArgVec.push_back(Args[i].getArgument()); return getTemplateSpecializationType(Template, ArgVec.data(), NumArgs, - Canon, IsCurrentInstantiation); + Canon); } QualType ASTContext::getTemplateSpecializationType(TemplateName Template, const TemplateArgument *Args, unsigned NumArgs, - QualType Canon, - bool IsCurrentInstantiation) { + QualType Canon) { if (!Canon.isNull()) Canon = getCanonicalType(Canon); - else { - assert(!IsCurrentInstantiation && - "current-instantiation specializations should always " - "have a canonical type"); - - // Build the canonical template specialization type. - TemplateName CanonTemplate = getCanonicalTemplateName(Template); - llvm::SmallVector<TemplateArgument, 4> CanonArgs; - CanonArgs.reserve(NumArgs); - for (unsigned I = 0; I != NumArgs; ++I) - CanonArgs.push_back(getCanonicalTemplateArgument(Args[I])); - - // Determine whether this canonical template specialization type already - // exists. - llvm::FoldingSetNodeID ID; - TemplateSpecializationType::Profile(ID, CanonTemplate, false, - CanonArgs.data(), NumArgs, *this); - - void *InsertPos = 0; - TemplateSpecializationType *Spec - = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); - - if (!Spec) { - // Allocate a new canonical template specialization type. - void *Mem = Allocate((sizeof(TemplateSpecializationType) + - sizeof(TemplateArgument) * NumArgs), - TypeAlignment); - Spec = new (Mem) TemplateSpecializationType(*this, CanonTemplate, false, - CanonArgs.data(), NumArgs, - Canon); - Types.push_back(Spec); - TemplateSpecializationTypes.InsertNode(Spec, InsertPos); - } - - if (Canon.isNull()) - Canon = QualType(Spec, 0); - assert(Canon->isDependentType() && - "Non-dependent template-id type must have a canonical type"); - } + else + Canon = getCanonicalTemplateSpecializationType(Template, Args, NumArgs); // Allocate the (non-canonical) template specialization type, but don't // try to unique it: these types typically have location information that @@ -1831,8 +1968,7 @@ ASTContext::getTemplateSpecializationType(TemplateName Template, sizeof(TemplateArgument) * NumArgs), TypeAlignment); TemplateSpecializationType *Spec - = new (Mem) TemplateSpecializationType(*this, Template, - IsCurrentInstantiation, + = new (Mem) TemplateSpecializationType(Template, Args, NumArgs, Canon); @@ -1841,6 +1977,44 @@ ASTContext::getTemplateSpecializationType(TemplateName Template, } QualType +ASTContext::getCanonicalTemplateSpecializationType(TemplateName Template, + const TemplateArgument *Args, + unsigned NumArgs) { + // Build the canonical template specialization type. + TemplateName CanonTemplate = getCanonicalTemplateName(Template); + llvm::SmallVector<TemplateArgument, 4> CanonArgs; + CanonArgs.reserve(NumArgs); + for (unsigned I = 0; I != NumArgs; ++I) + CanonArgs.push_back(getCanonicalTemplateArgument(Args[I])); + + // Determine whether this canonical template specialization type already + // exists. + llvm::FoldingSetNodeID ID; + TemplateSpecializationType::Profile(ID, CanonTemplate, + CanonArgs.data(), NumArgs, *this); + + void *InsertPos = 0; + TemplateSpecializationType *Spec + = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); + + if (!Spec) { + // Allocate a new canonical template specialization type. + void *Mem = Allocate((sizeof(TemplateSpecializationType) + + sizeof(TemplateArgument) * NumArgs), + TypeAlignment); + Spec = new (Mem) TemplateSpecializationType(CanonTemplate, + CanonArgs.data(), NumArgs, + QualType()); + Types.push_back(Spec); + TemplateSpecializationTypes.InsertNode(Spec, InsertPos); + } + + assert(Spec->isDependentType() && + "Non-dependent template-id type must have a canonical type"); + return QualType(Spec, 0); +} + +QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, QualType NamedType) { @@ -1898,44 +2072,69 @@ QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, } QualType -ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, +ASTContext::getDependentTemplateSpecializationType( + ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, - const TemplateSpecializationType *TemplateId, - QualType Canon) { + const IdentifierInfo *Name, + const TemplateArgumentListInfo &Args) { + // TODO: avoid this copy + llvm::SmallVector<TemplateArgument, 16> ArgCopy; + for (unsigned I = 0, E = Args.size(); I != E; ++I) + ArgCopy.push_back(Args[I].getArgument()); + return getDependentTemplateSpecializationType(Keyword, NNS, Name, + ArgCopy.size(), + ArgCopy.data()); +} + +QualType +ASTContext::getDependentTemplateSpecializationType( + ElaboratedTypeKeyword Keyword, + NestedNameSpecifier *NNS, + const IdentifierInfo *Name, + unsigned NumArgs, + const TemplateArgument *Args) { assert(NNS->isDependent() && "nested-name-specifier must be dependent"); llvm::FoldingSetNodeID ID; - DependentNameType::Profile(ID, Keyword, NNS, TemplateId); + DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, + Name, NumArgs, Args); void *InsertPos = 0; - DependentNameType *T - = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); + DependentTemplateSpecializationType *T + = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); if (T) return QualType(T, 0); - if (Canon.isNull()) { - NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); - QualType CanonType = getCanonicalType(QualType(TemplateId, 0)); - ElaboratedTypeKeyword CanonKeyword = Keyword; - if (Keyword == ETK_None) - CanonKeyword = ETK_Typename; - if (CanonNNS != NNS || CanonKeyword != Keyword || - CanonType != QualType(TemplateId, 0)) { - const TemplateSpecializationType *CanonTemplateId - = CanonType->getAs<TemplateSpecializationType>(); - assert(CanonTemplateId && - "Canonical type must also be a template specialization type"); - Canon = getDependentNameType(CanonKeyword, CanonNNS, CanonTemplateId); - } + NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); + + ElaboratedTypeKeyword CanonKeyword = Keyword; + if (Keyword == ETK_None) CanonKeyword = ETK_Typename; - DependentNameType *CheckT - = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); - assert(!CheckT && "Typename canonical type is broken"); (void)CheckT; + bool AnyNonCanonArgs = false; + llvm::SmallVector<TemplateArgument, 16> CanonArgs(NumArgs); + for (unsigned I = 0; I != NumArgs; ++I) { + CanonArgs[I] = getCanonicalTemplateArgument(Args[I]); + if (!CanonArgs[I].structurallyEquals(Args[I])) + AnyNonCanonArgs = true; } - T = new (*this) DependentNameType(Keyword, NNS, TemplateId, Canon); + QualType Canon; + if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { + Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, + Name, NumArgs, + CanonArgs.data()); + + // Find the insert position again. + DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); + } + + void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + + sizeof(TemplateArgument) * NumArgs), + TypeAlignment); + T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, + Name, NumArgs, Args, Canon); Types.push_back(T); - DependentNameTypes.InsertNode(T, InsertPos); + DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); return QualType(T, 0); } @@ -2326,6 +2525,48 @@ QualType ASTContext::getUnqualifiedArrayType(QualType T, SourceRange()); } +/// UnwrapSimilarPointerTypes - If T1 and T2 are pointer types that +/// may be similar (C++ 4.4), replaces T1 and T2 with the type that +/// they point to and return true. If T1 and T2 aren't pointer types +/// or pointer-to-member types, or if they are not similar at this +/// level, returns false and leaves T1 and T2 unchanged. Top-level +/// qualifiers on T1 and T2 are ignored. This function will typically +/// be called in a loop that successively "unwraps" pointer and +/// pointer-to-member types to compare them at each level. +bool ASTContext::UnwrapSimilarPointerTypes(QualType &T1, QualType &T2) { + const PointerType *T1PtrType = T1->getAs<PointerType>(), + *T2PtrType = T2->getAs<PointerType>(); + if (T1PtrType && T2PtrType) { + T1 = T1PtrType->getPointeeType(); + T2 = T2PtrType->getPointeeType(); + return true; + } + + const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(), + *T2MPType = T2->getAs<MemberPointerType>(); + if (T1MPType && T2MPType && + hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), + QualType(T2MPType->getClass(), 0))) { + T1 = T1MPType->getPointeeType(); + T2 = T2MPType->getPointeeType(); + return true; + } + + if (getLangOptions().ObjC1) { + const ObjCObjectPointerType *T1OPType = T1->getAs<ObjCObjectPointerType>(), + *T2OPType = T2->getAs<ObjCObjectPointerType>(); + if (T1OPType && T2OPType) { + T1 = T1OPType->getPointeeType(); + T2 = T2OPType->getPointeeType(); + return true; + } + } + + // FIXME: Block pointers, too? + + return false; +} + DeclarationName ASTContext::getNameForTemplate(TemplateName Name) { if (TemplateDecl *TD = Name.getAsTemplateDecl()) return TD->getDeclName(); @@ -2344,10 +2585,14 @@ DeclarationName ASTContext::getNameForTemplate(TemplateName Name) { } TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) { - // If this template name refers to a template, the canonical - // template name merely stores the template itself. - if (TemplateDecl *Template = Name.getAsTemplateDecl()) + if (TemplateDecl *Template = Name.getAsTemplateDecl()) { + if (TemplateTemplateParmDecl *TTP + = dyn_cast<TemplateTemplateParmDecl>(Template)) + Template = getCanonicalTemplateTemplateParmDecl(TTP); + + // The canonical template name is the canonical template declaration. return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); + } assert(!Name.getAsOverloadedTemplate()); @@ -2856,6 +3101,10 @@ QualType ASTContext::getObjCFastEnumerationStateType() { Field->setAccess(AS_public); ObjCFastEnumerationStateTypeDecl->addDecl(Field); } + if (getLangOptions().CPlusPlus) + if (CXXRecordDecl *CXXRD = + dyn_cast<CXXRecordDecl>(ObjCFastEnumerationStateTypeDecl)) + CXXRD->setEmpty(false); ObjCFastEnumerationStateTypeDecl->completeDefinition(); } @@ -2981,7 +3230,6 @@ QualType ASTContext::BuildByRefType(const char *DeclName, QualType Ty) { bool HasCopyAndDispose = BlockRequiresCopying(Ty); // FIXME: Move up - static unsigned int UniqueBlockByRefTypeID = 0; llvm::SmallString<36> Name; llvm::raw_svector_ostream(Name) << "__Block_byref_" << ++UniqueBlockByRefTypeID << '_' << DeclName; @@ -3033,7 +3281,6 @@ QualType ASTContext::getBlockParmType( llvm::SmallVectorImpl<const Expr *> &Layout) { // FIXME: Move up - static unsigned int UniqueBlockParmTypeID = 0; llvm::SmallString<36> Name; llvm::raw_svector_ostream(Name) << "__block_literal_" << ++UniqueBlockParmTypeID; @@ -3122,7 +3369,7 @@ CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) { CharUnits sz = getTypeSizeInChars(type); // Make all integer and enum types at least as large as an int - if (sz.isPositive() && type->isIntegralType()) + if (sz.isPositive() && type->isIntegralOrEnumerationType()) sz = std::max(sz, getTypeSizeInChars(IntTy)); // Treat arrays as pointers, since that's how they're passed in. else if (type->isArrayType()) @@ -3143,7 +3390,7 @@ void ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr, QualType BlockTy = Expr->getType()->getAs<BlockPointerType>()->getPointeeType(); // Encode result type. - getObjCEncodingForType(cast<FunctionType>(BlockTy)->getResultType(), S); + getObjCEncodingForType(BlockTy->getAs<FunctionType>()->getResultType(), S); // Compute size of all parameters. // Start with computing size of a pointer in number of bytes. // FIXME: There might(should) be a better way of doing this computation! @@ -3376,13 +3623,74 @@ void ASTContext::getObjCEncodingForType(QualType T, std::string& S, true /* outermost type */); } +static char ObjCEncodingForPrimitiveKind(const ASTContext *C, QualType T) { + switch (T->getAs<BuiltinType>()->getKind()) { + default: assert(0 && "Unhandled builtin type kind"); + case BuiltinType::Void: return 'v'; + case BuiltinType::Bool: return 'B'; + case BuiltinType::Char_U: + case BuiltinType::UChar: return 'C'; + case BuiltinType::UShort: return 'S'; + case BuiltinType::UInt: return 'I'; + case BuiltinType::ULong: + return + (const_cast<ASTContext *>(C))->getIntWidth(T) == 32 ? 'L' : 'Q'; + case BuiltinType::UInt128: return 'T'; + case BuiltinType::ULongLong: return 'Q'; + case BuiltinType::Char_S: + case BuiltinType::SChar: return 'c'; + case BuiltinType::Short: return 's'; + case BuiltinType::WChar: + case BuiltinType::Int: return 'i'; + case BuiltinType::Long: + return + (const_cast<ASTContext *>(C))->getIntWidth(T) == 32 ? 'l' : 'q'; + case BuiltinType::LongLong: return 'q'; + case BuiltinType::Int128: return 't'; + case BuiltinType::Float: return 'f'; + case BuiltinType::Double: return 'd'; + case BuiltinType::LongDouble: return 'd'; + } +} + static void EncodeBitField(const ASTContext *Context, std::string& S, - const FieldDecl *FD) { + QualType T, const FieldDecl *FD) { const Expr *E = FD->getBitWidth(); assert(E && "bitfield width not there - getObjCEncodingForTypeImpl"); ASTContext *Ctx = const_cast<ASTContext*>(Context); - unsigned N = E->EvaluateAsInt(*Ctx).getZExtValue(); S += 'b'; + // The NeXT runtime encodes bit fields as b followed by the number of bits. + // The GNU runtime requires more information; bitfields are encoded as b, + // then the offset (in bits) of the first element, then the type of the + // bitfield, then the size in bits. For example, in this structure: + // + // struct + // { + // int integer; + // int flags:2; + // }; + // On a 32-bit system, the encoding for flags would be b2 for the NeXT + // runtime, but b32i2 for the GNU runtime. The reason for this extra + // information is not especially sensible, but we're stuck with it for + // compatibility with GCC, although providing it breaks anything that + // actually uses runtime introspection and wants to work on both runtimes... + if (!Ctx->getLangOptions().NeXTRuntime) { + const RecordDecl *RD = FD->getParent(); + const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); + // FIXME: This same linear search is also used in ExprConstant - it might + // be better if the FieldDecl stored its offset. We'd be increasing the + // size of the object slightly, but saving some time every time it is used. + unsigned i = 0; + for (RecordDecl::field_iterator Field = RD->field_begin(), + FieldEnd = RD->field_end(); + Field != FieldEnd; (void)++Field, ++i) { + if (*Field == FD) + break; + } + S += llvm::utostr(RL.getFieldOffset(i)); + S += ObjCEncodingForPrimitiveKind(Context, T); + } + unsigned N = E->EvaluateAsInt(*Ctx).getZExtValue(); S += llvm::utostr(N); } @@ -3393,40 +3701,10 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S, const FieldDecl *FD, bool OutermostType, bool EncodingProperty) { - if (const BuiltinType *BT = T->getAs<BuiltinType>()) { + if (T->getAs<BuiltinType>()) { if (FD && FD->isBitField()) - return EncodeBitField(this, S, FD); - char encoding; - switch (BT->getKind()) { - default: assert(0 && "Unhandled builtin type kind"); - case BuiltinType::Void: encoding = 'v'; break; - case BuiltinType::Bool: encoding = 'B'; break; - case BuiltinType::Char_U: - case BuiltinType::UChar: encoding = 'C'; break; - case BuiltinType::UShort: encoding = 'S'; break; - case BuiltinType::UInt: encoding = 'I'; break; - case BuiltinType::ULong: - encoding = - (const_cast<ASTContext *>(this))->getIntWidth(T) == 32 ? 'L' : 'Q'; - break; - case BuiltinType::UInt128: encoding = 'T'; break; - case BuiltinType::ULongLong: encoding = 'Q'; break; - case BuiltinType::Char_S: - case BuiltinType::SChar: encoding = 'c'; break; - case BuiltinType::Short: encoding = 's'; break; - case BuiltinType::Int: encoding = 'i'; break; - case BuiltinType::Long: - encoding = - (const_cast<ASTContext *>(this))->getIntWidth(T) == 32 ? 'l' : 'q'; - break; - case BuiltinType::LongLong: encoding = 'q'; break; - case BuiltinType::Int128: encoding = 't'; break; - case BuiltinType::Float: encoding = 'f'; break; - case BuiltinType::Double: encoding = 'd'; break; - case BuiltinType::LongDouble: encoding = 'd'; break; - } - - S += encoding; + return EncodeBitField(this, S, T, FD); + S += ObjCEncodingForPrimitiveKind(this, T); return; } @@ -3585,7 +3863,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S, if (T->isEnumeralType()) { if (FD && FD->isBitField()) - EncodeBitField(this, S, FD); + EncodeBitField(this, S, T, FD); else S += 'i'; return; @@ -4728,7 +5006,7 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) { // Turn <4 x signed int> -> <4 x unsigned int> if (const VectorType *VTy = T->getAs<VectorType>()) return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), - VTy->getNumElements(), VTy->isAltiVec(), VTy->isPixel()); + VTy->getNumElements(), VTy->getAltiVecSpecific()); // For enums, we return the unsigned version of the base type. if (const EnumType *ETy = T->getAs<EnumType>()) @@ -4886,7 +5164,8 @@ static QualType DecodeTypeFromStr(const char *&Str, ASTContext &Context, QualType ElementType = DecodeTypeFromStr(Str, Context, Error, false); // FIXME: Don't know what to do about AltiVec. - Type = Context.getVectorType(ElementType, NumElements, false, false); + Type = Context.getVectorType(ElementType, NumElements, + VectorType::NotAltiVec); break; } case 'X': { diff --git a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp index 6ed08d1..8d347d1 100644 --- a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp +++ b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp @@ -73,6 +73,7 @@ namespace { // FIXME: TemplateSpecializationType QualType VisitElaboratedType(ElaboratedType *T); // FIXME: DependentNameType + // FIXME: DependentTemplateSpecializationType QualType VisitObjCInterfaceType(ObjCInterfaceType *T); QualType VisitObjCObjectType(ObjCObjectType *T); QualType VisitObjCObjectPointerType(ObjCObjectPointerType *T); @@ -439,9 +440,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, return false; if (Vec1->getNumElements() != Vec2->getNumElements()) return false; - if (Vec1->isAltiVec() != Vec2->isAltiVec()) - return false; - if (Vec1->isPixel() != Vec2->isPixel()) + if (Vec1->getAltiVecSpecific() != Vec2->getAltiVecSpecific()) return false; break; } @@ -619,14 +618,32 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, if (!IsStructurallyEquivalent(Typename1->getIdentifier(), Typename2->getIdentifier())) return false; - if (!IsStructurallyEquivalent(Context, - QualType(Typename1->getTemplateId(), 0), - QualType(Typename2->getTemplateId(), 0))) - return false; break; } + case Type::DependentTemplateSpecialization: { + const DependentTemplateSpecializationType *Spec1 = + cast<DependentTemplateSpecializationType>(T1); + const DependentTemplateSpecializationType *Spec2 = + cast<DependentTemplateSpecializationType>(T2); + if (!IsStructurallyEquivalent(Context, + Spec1->getQualifier(), + Spec2->getQualifier())) + return false; + if (!IsStructurallyEquivalent(Spec1->getIdentifier(), + Spec2->getIdentifier())) + return false; + if (Spec1->getNumArgs() != Spec2->getNumArgs()) + return false; + for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) { + if (!IsStructurallyEquivalent(Context, + Spec1->getArg(I), Spec2->getArg(I))) + return false; + } + break; + } + case Type::ObjCInterface: { const ObjCInterfaceType *Iface1 = cast<ObjCInterfaceType>(T1); const ObjCInterfaceType *Iface2 = cast<ObjCInterfaceType>(T2); @@ -1172,8 +1189,7 @@ QualType ASTNodeImporter::VisitVectorType(VectorType *T) { return Importer.getToContext().getVectorType(ToElementType, T->getNumElements(), - T->isAltiVec(), - T->isPixel()); + T->getAltiVecSpecific()); } QualType ASTNodeImporter::VisitExtVectorType(ExtVectorType *T) { @@ -1687,7 +1703,7 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) { // Create the record declaration. RecordDecl *D2 = AdoptDecl; if (!D2) { - if (CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(D)) { + if (isa<CXXRecordDecl>(D)) { CXXRecordDecl *D2CXX = CXXRecordDecl::Create(Importer.getToContext(), D->getTagKind(), DC, Loc, @@ -1695,30 +1711,6 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) { Importer.Import(D->getTagKeywordLoc())); D2 = D2CXX; D2->setAccess(D->getAccess()); - - if (D->isDefinition()) { - // Add base classes. - llvm::SmallVector<CXXBaseSpecifier *, 4> Bases; - for (CXXRecordDecl::base_class_iterator - Base1 = D1CXX->bases_begin(), - FromBaseEnd = D1CXX->bases_end(); - Base1 != FromBaseEnd; - ++Base1) { - QualType T = Importer.Import(Base1->getType()); - if (T.isNull()) - return 0; - - Bases.push_back( - new (Importer.getToContext()) - CXXBaseSpecifier(Importer.Import(Base1->getSourceRange()), - Base1->isVirtual(), - Base1->isBaseOfClass(), - Base1->getAccessSpecifierAsWritten(), - T)); - } - if (!Bases.empty()) - D2CXX->setBases(Bases.data(), Bases.size()); - } } else { D2 = RecordDecl::Create(Importer.getToContext(), D->getTagKind(), DC, Loc, @@ -1739,6 +1731,33 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) { if (D->isDefinition()) { D2->startDefinition(); + + // Add base classes. + if (CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(D2)) { + CXXRecordDecl *D1CXX = cast<CXXRecordDecl>(D); + + llvm::SmallVector<CXXBaseSpecifier *, 4> Bases; + for (CXXRecordDecl::base_class_iterator + Base1 = D1CXX->bases_begin(), + FromBaseEnd = D1CXX->bases_end(); + Base1 != FromBaseEnd; + ++Base1) { + QualType T = Importer.Import(Base1->getType()); + if (T.isNull()) + return 0; + + Bases.push_back( + new (Importer.getToContext()) + CXXBaseSpecifier(Importer.Import(Base1->getSourceRange()), + Base1->isVirtual(), + Base1->isBaseOfClass(), + Base1->getAccessSpecifierAsWritten(), + T)); + } + if (!Bases.empty()) + D2CXX->setBases(Bases.data(), Bases.size()); + } + ImportDeclContext(D); D2->completeDefinition(); } @@ -2598,8 +2617,8 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) { } // Import the type. - QualType T = Importer.Import(D->getType()); - if (T.isNull()) + TypeSourceInfo *T = Importer.Import(D->getTypeSourceInfo()); + if (!T) return 0; // Create the new property. @@ -2614,6 +2633,8 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) { LexicalDC->addDecl(ToProperty); ToProperty->setPropertyAttributes(D->getPropertyAttributes()); + ToProperty->setPropertyAttributesAsWritten( + D->getPropertyAttributesAsWritten()); ToProperty->setGetterName(Importer.Import(D->getGetterName())); ToProperty->setSetterName(Importer.Import(D->getSetterName())); ToProperty->setGetterMethodDecl( diff --git a/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp b/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp index 0fab22c..b09ba895 100644 --- a/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp +++ b/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp @@ -24,7 +24,7 @@ void Attr::Destroy(ASTContext &C) { C.Deallocate((void*)this); } -AttrWithString::AttrWithString(Attr::Kind AK, ASTContext &C, llvm::StringRef s) +AttrWithString::AttrWithString(attr::Kind AK, ASTContext &C, llvm::StringRef s) : Attr(AK) { assert(!s.empty()); StrLen = s.size(); @@ -51,7 +51,7 @@ void FormatAttr::setType(ASTContext &C, llvm::StringRef type) { } NonNullAttr::NonNullAttr(ASTContext &C, unsigned* arg_nums, unsigned size) - : Attr(NonNull), ArgNums(0), Size(0) { + : Attr(attr::NonNull), ArgNums(0), Size(0) { if (size == 0) return; assert(arg_nums); @@ -93,6 +93,7 @@ DEF_SIMPLE_ATTR_CLONE(NSReturnsNotRetained) DEF_SIMPLE_ATTR_CLONE(NSReturnsRetained) DEF_SIMPLE_ATTR_CLONE(NoDebug) DEF_SIMPLE_ATTR_CLONE(NoInline) +DEF_SIMPLE_ATTR_CLONE(NoInstrumentFunction) DEF_SIMPLE_ATTR_CLONE(NoReturn) DEF_SIMPLE_ATTR_CLONE(NoThrow) DEF_SIMPLE_ATTR_CLONE(ObjCException) @@ -200,6 +201,10 @@ Attr *ReqdWorkGroupSizeAttr::clone(ASTContext &C) const { return ::new (C) ReqdWorkGroupSizeAttr(X, Y, Z); } +Attr *InitPriorityAttr::clone(ASTContext &C) const { + return ::new (C) InitPriorityAttr(Priority); +} + Attr *MSP430InterruptAttr::clone(ASTContext &C) const { return ::new (C) MSP430InterruptAttr(Number); } diff --git a/contrib/llvm/tools/clang/lib/AST/CMakeLists.txt b/contrib/llvm/tools/clang/lib/AST/CMakeLists.txt index bce3646..407ed95 100644 --- a/contrib/llvm/tools/clang/lib/AST/CMakeLists.txt +++ b/contrib/llvm/tools/clang/lib/AST/CMakeLists.txt @@ -18,6 +18,7 @@ add_clang_library(clangAST DeclPrinter.cpp DeclTemplate.cpp Expr.cpp + ExprClassification.cpp ExprConstant.cpp ExprCXX.cpp FullExpr.cpp @@ -39,4 +40,5 @@ add_clang_library(clangAST TypePrinter.cpp ) -add_dependencies(clangAST ClangDiagnosticAST ClangStmtNodes) +add_dependencies(clangAST ClangARMNeon ClangAttrClasses ClangAttrList + ClangDiagnosticAST ClangDeclNodes ClangStmtNodes) diff --git a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp index d616e42..c563c37 100644 --- a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp +++ b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp @@ -90,6 +90,9 @@ bool CXXRecordDecl::isDerivedFrom(CXXRecordDecl *Base, CXXBasePaths &Paths) cons } bool CXXRecordDecl::isVirtuallyDerivedFrom(CXXRecordDecl *Base) const { + if (!getNumVBases()) + return false; + CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/false, /*DetectVirtual=*/false); @@ -559,22 +562,23 @@ void FinalOverriderCollector::Collect(const CXXRecordDecl *RD, for (; OverMethods.first != OverMethods.second; ++OverMethods.first) { const CXXMethodDecl *CanonOM = cast<CXXMethodDecl>((*OverMethods.first)->getCanonicalDecl()); + + // C++ [class.virtual]p2: + // A virtual member function C::vf of a class object S is + // a final overrider unless the most derived class (1.8) + // of which S is a base class subobject (if any) declares + // or inherits another member function that overrides vf. + // + // Treating this object like the most derived class, we + // replace any overrides from base classes with this + // overriding virtual function. + Overriders[CanonOM].replaceAll( + UniqueVirtualMethod(CanonM, SubobjectNumber, + InVirtualSubobject)); + if (CanonOM->begin_overridden_methods() - == CanonOM->end_overridden_methods()) { - // C++ [class.virtual]p2: - // A virtual member function C::vf of a class object S is - // a final overrider unless the most derived class (1.8) - // of which S is a base class subobject (if any) declares - // or inherits another member function that overrides vf. - // - // Treating this object like the most derived class, we - // replace any overrides from base classes with this - // overriding virtual function. - Overriders[CanonOM].replaceAll( - UniqueVirtualMethod(CanonM, SubobjectNumber, - InVirtualSubobject)); + == CanonOM->end_overridden_methods()) continue; - } // Continue recursion to the methods that this virtual method // overrides. @@ -582,6 +586,12 @@ void FinalOverriderCollector::Collect(const CXXRecordDecl *RD, CanonOM->end_overridden_methods())); } } + + // C++ [class.virtual]p2: + // For convenience we say that any virtual function overrides itself. + Overriders[CanonM].add(SubobjectNumber, + UniqueVirtualMethod(CanonM, SubobjectNumber, + InVirtualSubobject)); } } diff --git a/contrib/llvm/tools/clang/lib/AST/Decl.cpp b/contrib/llvm/tools/clang/lib/AST/Decl.cpp index ffdcb47..149938f 100644 --- a/contrib/llvm/tools/clang/lib/AST/Decl.cpp +++ b/contrib/llvm/tools/clang/lib/AST/Decl.cpp @@ -523,6 +523,14 @@ bool NamedDecl::isCXXInstanceMember() const { // DeclaratorDecl Implementation //===----------------------------------------------------------------------===// +template <typename DeclT> +static SourceLocation getTemplateOrInnerLocStart(const DeclT *decl) { + if (decl->getNumTemplateParameterLists() > 0) + return decl->getTemplateParameterList(0)->getTemplateLoc(); + else + return decl->getInnerLocStart(); +} + DeclaratorDecl::~DeclaratorDecl() {} void DeclaratorDecl::Destroy(ASTContext &C) { if (hasExtInfo()) @@ -531,15 +539,8 @@ void DeclaratorDecl::Destroy(ASTContext &C) { } SourceLocation DeclaratorDecl::getTypeSpecStartLoc() const { - if (DeclInfo) { - TypeLoc TL = getTypeSourceInfo()->getTypeLoc(); - while (true) { - TypeLoc NextTL = TL.getNextTypeLoc(); - if (!NextTL) - return TL.getLocalSourceRange().getBegin(); - TL = NextTL; - } - } + TypeSourceInfo *TSI = getTypeSourceInfo(); + if (TSI) return TSI->getTypeLoc().getBeginLoc(); return SourceLocation(); } @@ -573,6 +574,40 @@ void DeclaratorDecl::setQualifierInfo(NestedNameSpecifier *Qualifier, } } +SourceLocation DeclaratorDecl::getOuterLocStart() const { + return getTemplateOrInnerLocStart(this); +} + +void +QualifierInfo::setTemplateParameterListsInfo(ASTContext &Context, + unsigned NumTPLists, + TemplateParameterList **TPLists) { + assert((NumTPLists == 0 || TPLists != 0) && + "Empty array of template parameters with positive size!"); + assert((NumTPLists == 0 || NNS) && + "Nonempty array of template parameters with no qualifier!"); + + // Free previous template parameters (if any). + if (NumTemplParamLists > 0) { + Context.Deallocate(TemplParamLists); + TemplParamLists = 0; + NumTemplParamLists = 0; + } + // Set info on matched template parameter lists (if any). + if (NumTPLists > 0) { + TemplParamLists = new (Context) TemplateParameterList*[NumTPLists]; + NumTemplParamLists = NumTPLists; + for (unsigned i = NumTPLists; i-- > 0; ) + TemplParamLists[i] = TPLists[i]; + } +} + +void QualifierInfo::Destroy(ASTContext &Context) { + // FIXME: Deallocate template parameter lists themselves! + if (TemplParamLists) + Context.Deallocate(TemplParamLists); +} + //===----------------------------------------------------------------------===// // VarDecl Implementation //===----------------------------------------------------------------------===// @@ -613,14 +648,17 @@ void VarDecl::Destroy(ASTContext& C) { VarDecl::~VarDecl() { } -SourceRange VarDecl::getSourceRange() const { +SourceLocation VarDecl::getInnerLocStart() const { SourceLocation Start = getTypeSpecStartLoc(); if (Start.isInvalid()) Start = getLocation(); - + return Start; +} + +SourceRange VarDecl::getSourceRange() const { if (getInit()) - return SourceRange(Start, getInit()->getLocEnd()); - return SourceRange(Start, getLocation()); + return SourceRange(getOuterLocStart(), getInit()->getLocEnd()); + return SourceRange(getOuterLocStart(), getLocation()); } bool VarDecl::isExternC() const { @@ -678,7 +716,15 @@ VarDecl::DefinitionKind VarDecl::isThisDeclarationADefinition() const { // AST for 'extern "C" int foo;' is annotated with 'extern'. if (hasExternalStorage()) return DeclarationOnly; - + + if (getStorageClassAsWritten() == Extern || + getStorageClassAsWritten() == PrivateExtern) { + for (const VarDecl *PrevVar = getPreviousDeclaration(); + PrevVar; PrevVar = PrevVar->getPreviousDeclaration()) { + if (PrevVar->getLinkage() == InternalLinkage && PrevVar->hasInit()) + return DeclarationOnly; + } + } // C99 6.9.2p2: // A declaration of an object that has file scope without an initializer, // and without a storage class specifier or the scs 'static', constitutes @@ -697,7 +743,7 @@ VarDecl *VarDecl::getActingDefinition() { if (Kind != TentativeDefinition) return 0; - VarDecl *LastTentative = false; + VarDecl *LastTentative = 0; VarDecl *First = getFirstDeclaration(); for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end(); I != E; ++I) { @@ -907,6 +953,17 @@ bool FunctionDecl::isVariadic() const { return false; } +bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const { + for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) { + if (I->Body) { + Definition = *I; + return true; + } + } + + return false; +} + Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const { for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) { if (I->Body) { @@ -1107,11 +1164,11 @@ bool FunctionDecl::isInlined() const { } const FunctionDecl *PatternDecl = getTemplateInstantiationPattern(); - Stmt *Pattern = 0; + bool HasPattern = false; if (PatternDecl) - Pattern = PatternDecl->getBody(PatternDecl); + HasPattern = PatternDecl->hasBody(PatternDecl); - if (Pattern && PatternDecl) + if (HasPattern && PatternDecl) return PatternDecl->isInlined(); return false; @@ -1197,6 +1254,23 @@ const IdentifierInfo *FunctionDecl::getLiteralIdentifier() const { return 0; } +FunctionDecl::TemplatedKind FunctionDecl::getTemplatedKind() const { + if (TemplateOrSpecialization.isNull()) + return TK_NonTemplate; + if (TemplateOrSpecialization.is<FunctionTemplateDecl *>()) + return TK_FunctionTemplate; + if (TemplateOrSpecialization.is<MemberSpecializationInfo *>()) + return TK_MemberSpecialization; + if (TemplateOrSpecialization.is<FunctionTemplateSpecializationInfo *>()) + return TK_FunctionTemplateSpecialization; + if (TemplateOrSpecialization.is + <DependentFunctionTemplateSpecializationInfo*>()) + return TK_DependentFunctionTemplateSpecialization; + + assert(false && "Did we miss a TemplateOrSpecialization type?"); + return TK_NonTemplate; +} + FunctionDecl *FunctionDecl::getInstantiatedFromMemberFunction() const { if (MemberSpecializationInfo *Info = getMemberSpecializationInfo()) return cast<FunctionDecl>(Info->getInstantiatedFrom()); @@ -1239,15 +1313,15 @@ bool FunctionDecl::isImplicitlyInstantiable() const { // Find the actual template from which we will instantiate. const FunctionDecl *PatternDecl = getTemplateInstantiationPattern(); - Stmt *Pattern = 0; + bool HasPattern = false; if (PatternDecl) - Pattern = PatternDecl->getBody(PatternDecl); + HasPattern = PatternDecl->hasBody(PatternDecl); // C++0x [temp.explicit]p9: // Except for inline functions, other explicit instantiation declarations // have the effect of suppressing the implicit instantiation of the entity // to which they refer. - if (!Pattern || !PatternDecl) + if (!HasPattern || !PatternDecl) return true; return PatternDecl->isInlined(); @@ -1304,7 +1378,8 @@ FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template, const TemplateArgumentList *TemplateArgs, void *InsertPos, TemplateSpecializationKind TSK, - const TemplateArgumentListInfo *TemplateArgsAsWritten) { + const TemplateArgumentListInfo *TemplateArgsAsWritten, + SourceLocation PointOfInstantiation) { assert(TSK != TSK_Undeclared && "Must specify the type of function template specialization"); FunctionTemplateSpecializationInfo *Info @@ -1317,6 +1392,7 @@ FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template, Info->Template.setInt(TSK - 1); Info->TemplateArguments = TemplateArgs; Info->TemplateArgumentsAsWritten = TemplateArgsAsWritten; + Info->PointOfInstantiation = PointOfInstantiation; TemplateOrSpecialization = Info; // Insert this function template specialization into the set of known @@ -1336,6 +1412,28 @@ FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template, } void +FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template, + unsigned NumTemplateArgs, + const TemplateArgument *TemplateArgs, + TemplateSpecializationKind TSK, + unsigned NumTemplateArgsAsWritten, + TemplateArgumentLoc *TemplateArgsAsWritten, + SourceLocation LAngleLoc, + SourceLocation RAngleLoc, + SourceLocation PointOfInstantiation) { + ASTContext &Ctx = getASTContext(); + TemplateArgumentList *TemplArgs + = new (Ctx) TemplateArgumentList(Ctx, TemplateArgs, NumTemplateArgs); + TemplateArgumentListInfo *TemplArgsInfo + = new (Ctx) TemplateArgumentListInfo(LAngleLoc, RAngleLoc); + for (unsigned i=0; i != NumTemplateArgsAsWritten; ++i) + TemplArgsInfo->addArgument(TemplateArgsAsWritten[i]); + + setFunctionTemplateSpecialization(Template, TemplArgs, /*InsertPos=*/0, TSK, + TemplArgsInfo, PointOfInstantiation); +} + +void FunctionDecl::setDependentTemplateSpecialization(ASTContext &Context, const UnresolvedSetImpl &Templates, const TemplateArgumentListInfo &TemplateArgs) { @@ -1427,7 +1525,7 @@ bool FunctionDecl::isOutOfLine() const { // class template, check whether that member function was defined out-of-line. if (FunctionDecl *FD = getInstantiatedFromMemberFunction()) { const FunctionDecl *Definition; - if (FD->getBody(Definition)) + if (FD->hasBody(Definition)) return Definition->isOutOfLine(); } @@ -1435,7 +1533,7 @@ bool FunctionDecl::isOutOfLine() const { // check whether that function template was defined out-of-line. if (FunctionTemplateDecl *FunTmpl = getPrimaryTemplate()) { const FunctionDecl *Definition; - if (FunTmpl->getTemplatedDecl()->getBody(Definition)) + if (FunTmpl->getTemplatedDecl()->hasBody(Definition)) return Definition->isOutOfLine(); } @@ -1472,9 +1570,13 @@ void TagDecl::Destroy(ASTContext &C) { TypeDecl::Destroy(C); } +SourceLocation TagDecl::getOuterLocStart() const { + return getTemplateOrInnerLocStart(this); +} + SourceRange TagDecl::getSourceRange() const { SourceLocation E = RBraceLoc.isValid() ? RBraceLoc : getLocation(); - return SourceRange(TagKeywordLoc, E); + return SourceRange(getOuterLocStart(), E); } TagDecl* TagDecl::getCanonicalDecl() { @@ -1569,6 +1671,10 @@ EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L, return Enum; } +EnumDecl *EnumDecl::Create(ASTContext &C, EmptyShell Empty) { + return new (C) EnumDecl(0, SourceLocation(), 0, 0, SourceLocation()); +} + void EnumDecl::Destroy(ASTContext& C) { TagDecl::Destroy(C); } @@ -1608,6 +1714,11 @@ RecordDecl *RecordDecl::Create(ASTContext &C, TagKind TK, DeclContext *DC, return R; } +RecordDecl *RecordDecl::Create(ASTContext &C, EmptyShell Empty) { + return new (C) RecordDecl(Record, TTK_Struct, 0, SourceLocation(), 0, 0, + SourceLocation()); +} + RecordDecl::~RecordDecl() { } diff --git a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp index 42a3726..d4f997d 100644 --- a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp +++ b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp @@ -35,16 +35,18 @@ using namespace clang; // Statistics //===----------------------------------------------------------------------===// -#define DECL(Derived, Base) static int n##Derived##s = 0; -#include "clang/AST/DeclNodes.def" +#define DECL(DERIVED, BASE) static int n##DERIVED##s = 0; +#define ABSTRACT_DECL(DECL) +#include "clang/AST/DeclNodes.inc" static bool StatSwitch = false; const char *Decl::getDeclKindName() const { switch (DeclKind) { - default: assert(0 && "Declaration not in DeclNodes.def!"); -#define DECL(Derived, Base) case Derived: return #Derived; -#include "clang/AST/DeclNodes.def" + default: assert(0 && "Declaration not in DeclNodes.inc!"); +#define DECL(DERIVED, BASE) case DERIVED: return #DERIVED; +#define ABSTRACT_DECL(DECL) +#include "clang/AST/DeclNodes.inc" } } @@ -60,9 +62,10 @@ void Decl::setInvalidDecl(bool Invalid) { const char *DeclContext::getDeclKindName() const { switch (DeclKind) { - default: assert(0 && "Declaration context not in DeclNodes.def!"); -#define DECL(Derived, Base) case Decl::Derived: return #Derived; -#include "clang/AST/DeclNodes.def" + default: assert(0 && "Declaration context not in DeclNodes.inc!"); +#define DECL(DERIVED, BASE) case Decl::DERIVED: return #DERIVED; +#define ABSTRACT_DECL(DECL) +#include "clang/AST/DeclNodes.inc" } } @@ -75,28 +78,31 @@ void Decl::PrintStats() { fprintf(stderr, "*** Decl Stats:\n"); int totalDecls = 0; -#define DECL(Derived, Base) totalDecls += n##Derived##s; -#include "clang/AST/DeclNodes.def" +#define DECL(DERIVED, BASE) totalDecls += n##DERIVED##s; +#define ABSTRACT_DECL(DECL) +#include "clang/AST/DeclNodes.inc" fprintf(stderr, " %d decls total.\n", totalDecls); int totalBytes = 0; -#define DECL(Derived, Base) \ - if (n##Derived##s > 0) { \ - totalBytes += (int)(n##Derived##s * sizeof(Derived##Decl)); \ - fprintf(stderr, " %d " #Derived " decls, %d each (%d bytes)\n", \ - n##Derived##s, (int)sizeof(Derived##Decl), \ - (int)(n##Derived##s * sizeof(Derived##Decl))); \ +#define DECL(DERIVED, BASE) \ + if (n##DERIVED##s > 0) { \ + totalBytes += (int)(n##DERIVED##s * sizeof(DERIVED##Decl)); \ + fprintf(stderr, " %d " #DERIVED " decls, %d each (%d bytes)\n", \ + n##DERIVED##s, (int)sizeof(DERIVED##Decl), \ + (int)(n##DERIVED##s * sizeof(DERIVED##Decl))); \ } -#include "clang/AST/DeclNodes.def" +#define ABSTRACT_DECL(DECL) +#include "clang/AST/DeclNodes.inc" fprintf(stderr, "Total bytes = %d\n", totalBytes); } -void Decl::addDeclKind(Kind k) { +void Decl::add(Kind k) { switch (k) { - default: assert(0 && "Declaration not in DeclNodes.def!"); -#define DECL(Derived, Base) case Derived: ++n##Derived##s; break; -#include "clang/AST/DeclNodes.def" + default: assert(0 && "Declaration not in DeclNodes.inc!"); +#define DECL(DERIVED, BASE) case DERIVED: ++n##DERIVED##s; break; +#define ABSTRACT_DECL(DECL) +#include "clang/AST/DeclNodes.inc" } } @@ -206,17 +212,17 @@ ASTContext &Decl::getASTContext() const { return getTranslationUnitDecl()->getASTContext(); } -bool Decl::isUsed() const { +bool Decl::isUsed(bool CheckUsedAttr) const { if (Used) return true; // Check for used attribute. - if (hasAttr<UsedAttr>()) + if (CheckUsedAttr && hasAttr<UsedAttr>()) return true; // Check redeclarations for used attribute. for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) { - if (I->hasAttr<UsedAttr>() || I->Used) + if ((CheckUsedAttr && I->hasAttr<UsedAttr>()) || I->Used) return true; } @@ -285,6 +291,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) { // Never have names. case Friend: case FriendTemplate: + case AccessSpec: case LinkageSpec: case FileScopeAsm: case StaticAssert: @@ -307,9 +314,20 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) { return 0; } +void Decl::initAttrs(Attr *attrs) { + assert(!HasAttrs && "Decl already contains attrs."); + + Attr *&AttrBlank = getASTContext().getDeclAttrs(this); + assert(AttrBlank == 0 && "HasAttrs was wrong?"); + + AttrBlank = attrs; + HasAttrs = true; +} + void Decl::addAttr(Attr *NewAttr) { Attr *&ExistingAttr = getASTContext().getDeclAttrs(this); + assert(NewAttr->getNext() == 0 && "Chain of attributes will be truncated!"); NewAttr->setNext(ExistingAttr); ExistingAttr = NewAttr; @@ -354,7 +372,6 @@ void Decl::swapAttrs(Decl *RHS) { RHS->HasAttrs = true; } - void Decl::Destroy(ASTContext &C) { // Free attributes for this decl. if (HasAttrs) { @@ -392,16 +409,18 @@ void Decl::Destroy(ASTContext &C) { Decl *Decl::castFromDeclContext (const DeclContext *D) { Decl::Kind DK = D->getDeclKind(); switch(DK) { -#define DECL_CONTEXT(Name) \ - case Decl::Name: \ - return static_cast<Name##Decl*>(const_cast<DeclContext*>(D)); -#define DECL_CONTEXT_BASE(Name) -#include "clang/AST/DeclNodes.def" +#define DECL(NAME, BASE) +#define DECL_CONTEXT(NAME) \ + case Decl::NAME: \ + return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D)); +#define DECL_CONTEXT_BASE(NAME) +#include "clang/AST/DeclNodes.inc" default: -#define DECL_CONTEXT_BASE(Name) \ - if (DK >= Decl::Name##First && DK <= Decl::Name##Last) \ - return static_cast<Name##Decl*>(const_cast<DeclContext*>(D)); -#include "clang/AST/DeclNodes.def" +#define DECL(NAME, BASE) +#define DECL_CONTEXT_BASE(NAME) \ + if (DK >= first##NAME && DK <= last##NAME) \ + return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D)); +#include "clang/AST/DeclNodes.inc" assert(false && "a decl that inherits DeclContext isn't handled"); return 0; } @@ -410,46 +429,51 @@ Decl *Decl::castFromDeclContext (const DeclContext *D) { DeclContext *Decl::castToDeclContext(const Decl *D) { Decl::Kind DK = D->getKind(); switch(DK) { -#define DECL_CONTEXT(Name) \ - case Decl::Name: \ - return static_cast<Name##Decl*>(const_cast<Decl*>(D)); -#define DECL_CONTEXT_BASE(Name) -#include "clang/AST/DeclNodes.def" +#define DECL(NAME, BASE) +#define DECL_CONTEXT(NAME) \ + case Decl::NAME: \ + return static_cast<NAME##Decl*>(const_cast<Decl*>(D)); +#define DECL_CONTEXT_BASE(NAME) +#include "clang/AST/DeclNodes.inc" default: -#define DECL_CONTEXT_BASE(Name) \ - if (DK >= Decl::Name##First && DK <= Decl::Name##Last) \ - return static_cast<Name##Decl*>(const_cast<Decl*>(D)); -#include "clang/AST/DeclNodes.def" +#define DECL(NAME, BASE) +#define DECL_CONTEXT_BASE(NAME) \ + if (DK >= first##NAME && DK <= last##NAME) \ + return static_cast<NAME##Decl*>(const_cast<Decl*>(D)); +#include "clang/AST/DeclNodes.inc" assert(false && "a decl that inherits DeclContext isn't handled"); return 0; } } -CompoundStmt* Decl::getCompoundBody() const { - return dyn_cast_or_null<CompoundStmt>(getBody()); -} - SourceLocation Decl::getBodyRBrace() const { - Stmt *Body = getBody(); - if (!Body) + // Special handling of FunctionDecl to avoid de-serializing the body from PCH. + // FunctionDecl stores EndRangeLoc for this purpose. + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) { + const FunctionDecl *Definition; + if (FD->hasBody(Definition)) + return Definition->getSourceRange().getEnd(); return SourceLocation(); - if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Body)) - return CS->getRBracLoc(); - assert(isa<CXXTryStmt>(Body) && - "Body can only be CompoundStmt or CXXTryStmt"); - return cast<CXXTryStmt>(Body)->getSourceRange().getEnd(); + } + + if (Stmt *Body = getBody()) + return Body->getSourceRange().getEnd(); + + return SourceLocation(); } #ifndef NDEBUG void Decl::CheckAccessDeclContext() const { + // FIXME: Disable this until rdar://8146294 "access specifier for inner class + // templates is not set or checked" is fixed. + return; // Suppress this check if any of the following hold: // 1. this is the translation unit (and thus has no parent) // 2. this is a template parameter (and thus doesn't belong to its context) - // 3. this is a ParmVarDecl (which can be in a record context during - // the brief period between its creation and the creation of the - // FunctionDecl) - // 4. the context is not a record + // 3. the context is not a record + // 4. it's invalid if (isa<TranslationUnitDecl>(this) || + isa<TemplateTypeParmDecl>(this) || !isa<CXXRecordDecl>(getDeclContext()) || isInvalidDecl()) return; @@ -466,16 +490,18 @@ void Decl::CheckAccessDeclContext() const { bool DeclContext::classof(const Decl *D) { switch (D->getKind()) { -#define DECL_CONTEXT(Name) case Decl::Name: -#define DECL_CONTEXT_BASE(Name) -#include "clang/AST/DeclNodes.def" +#define DECL(NAME, BASE) +#define DECL_CONTEXT(NAME) case Decl::NAME: +#define DECL_CONTEXT_BASE(NAME) +#include "clang/AST/DeclNodes.inc" return true; default: -#define DECL_CONTEXT_BASE(Name) \ - if (D->getKind() >= Decl::Name##First && \ - D->getKind() <= Decl::Name##Last) \ +#define DECL(NAME, BASE) +#define DECL_CONTEXT_BASE(NAME) \ + if (D->getKind() >= Decl::first##NAME && \ + D->getKind() <= Decl::last##NAME) \ return true; -#include "clang/AST/DeclNodes.def" +#include "clang/AST/DeclNodes.inc" return false; } } @@ -537,7 +563,7 @@ bool DeclContext::isTransparentContext() const { return true; // FIXME: Check for C++0x scoped enums else if (DeclKind == Decl::LinkageSpec) return true; - else if (DeclKind >= Decl::RecordFirst && DeclKind <= Decl::RecordLast) + else if (DeclKind >= Decl::firstRecord && DeclKind <= Decl::lastRecord) return cast<RecordDecl>(this)->isAnonymousStructOrUnion(); else if (DeclKind == Decl::Namespace) return false; // FIXME: Check for C++0x inline namespaces @@ -581,7 +607,7 @@ DeclContext *DeclContext::getPrimaryContext() { return this; default: - if (DeclKind >= Decl::TagFirst && DeclKind <= Decl::TagLast) { + if (DeclKind >= Decl::firstTag && DeclKind <= Decl::lastTag) { // If this is a tag type that has a definition or is currently // being defined, that definition is our primary context. TagDecl *Tag = cast<TagDecl>(this); @@ -602,7 +628,7 @@ DeclContext *DeclContext::getPrimaryContext() { return Tag; } - assert(DeclKind >= Decl::FunctionFirst && DeclKind <= Decl::FunctionLast && + assert(DeclKind >= Decl::firstFunction && DeclKind <= Decl::lastFunction && "Unknown DeclContext kind"); return this; } @@ -626,9 +652,8 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const { ExternalASTSource *Source = getParentASTContext().getExternalSource(); assert(hasExternalLexicalStorage() && Source && "No external storage?"); - llvm::SmallVector<uint32_t, 64> Decls; - if (Source->ReadDeclsLexicallyInContext(const_cast<DeclContext *>(this), - Decls)) + llvm::SmallVector<Decl*, 64> Decls; + if (Source->FindExternalLexicalDecls(this, Decls)) return; // There is no longer any lexical storage in this context @@ -642,7 +667,7 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const { Decl *FirstNewDecl = 0; Decl *PrevDecl = 0; for (unsigned I = 0, N = Decls.size(); I != N; ++I) { - Decl *D = Source->GetDecl(Decls[I]); + Decl *D = Decls[I]; if (PrevDecl) PrevDecl->NextDeclInContext = D; else @@ -659,28 +684,83 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const { LastDecl = PrevDecl; } -void -DeclContext::LoadVisibleDeclsFromExternalStorage() const { - DeclContext *This = const_cast<DeclContext *>(this); - ExternalASTSource *Source = getParentASTContext().getExternalSource(); - assert(hasExternalVisibleStorage() && Source && "No external storage?"); +DeclContext::lookup_result +ExternalASTSource::SetNoExternalVisibleDeclsForName(const DeclContext *DC, + DeclarationName Name) { + ASTContext &Context = DC->getParentASTContext(); + StoredDeclsMap *Map; + if (!(Map = DC->LookupPtr)) + Map = DC->CreateStoredDeclsMap(Context); + + StoredDeclsList &List = (*Map)[Name]; + assert(List.isNull()); + (void) List; + + return DeclContext::lookup_result(); +} - llvm::SmallVector<VisibleDeclaration, 64> Decls; - if (Source->ReadDeclsVisibleInContext(This, Decls)) - return; +DeclContext::lookup_result +ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC, + const VisibleDeclaration &VD) { + ASTContext &Context = DC->getParentASTContext(); + StoredDeclsMap *Map; + if (!(Map = DC->LookupPtr)) + Map = DC->CreateStoredDeclsMap(Context); + + StoredDeclsList &List = (*Map)[VD.Name]; + List.setFromDeclIDs(VD.Declarations); + return List.getLookupResult(Context); +} - // There is no longer any visible storage in this context - ExternalVisibleStorage = false; +DeclContext::lookup_result +ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC, + DeclarationName Name, + llvm::SmallVectorImpl<NamedDecl*> &Decls) { + ASTContext &Context = DC->getParentASTContext();; - // Load the declaration IDs for all of the names visible in this - // context. - assert(!LookupPtr && "Have a lookup map before de-serialization?"); - StoredDeclsMap *Map = CreateStoredDeclsMap(getParentASTContext()); + StoredDeclsMap *Map; + if (!(Map = DC->LookupPtr)) + Map = DC->CreateStoredDeclsMap(Context); + + StoredDeclsList &List = (*Map)[Name]; + for (unsigned I = 0, N = Decls.size(); I != N; ++I) { + if (List.isNull()) + List.setOnlyValue(Decls[I]); + else + List.AddSubsequentDecl(Decls[I]); + } + + return List.getLookupResult(Context); +} + +void ExternalASTSource::SetExternalVisibleDecls(const DeclContext *DC, + const llvm::SmallVectorImpl<VisibleDeclaration> &Decls) { + // There is no longer any visible storage in this context. + DC->ExternalVisibleStorage = false; + + assert(!DC->LookupPtr && "Have a lookup map before de-serialization?"); + StoredDeclsMap *Map = DC->CreateStoredDeclsMap(DC->getParentASTContext()); for (unsigned I = 0, N = Decls.size(); I != N; ++I) { (*Map)[Decls[I].Name].setFromDeclIDs(Decls[I].Declarations); } } +void ExternalASTSource::SetExternalVisibleDecls(const DeclContext *DC, + const llvm::SmallVectorImpl<NamedDecl*> &Decls) { + // There is no longer any visible storage in this context. + DC->ExternalVisibleStorage = false; + + assert(!DC->LookupPtr && "Have a lookup map before de-serialization?"); + StoredDeclsMap &Map = *DC->CreateStoredDeclsMap(DC->getParentASTContext()); + for (unsigned I = 0, N = Decls.size(); I != N; ++I) { + StoredDeclsList &List = Map[Decls[I]->getDeclName()]; + if (List.isNull()) + List.setOnlyValue(Decls[I]); + else + List.AddSubsequentDecl(Decls[I]); + } +} + DeclContext::decl_iterator DeclContext::decls_begin() const { if (hasExternalLexicalStorage()) LoadLexicalDeclsFromExternalStorage(); @@ -801,8 +881,17 @@ DeclContext::lookup(DeclarationName Name) { if (PrimaryContext != this) return PrimaryContext->lookup(Name); - if (hasExternalVisibleStorage()) - LoadVisibleDeclsFromExternalStorage(); + if (hasExternalVisibleStorage()) { + // Check to see if we've already cached the lookup results. + if (LookupPtr) { + StoredDeclsMap::iterator I = LookupPtr->find(Name); + if (I != LookupPtr->end()) + return I->second.getLookupResult(getParentASTContext()); + } + + ExternalASTSource *Source = getParentASTContext().getExternalSource(); + return Source->FindExternalVisibleDeclsByName(this, Name); + } /// If there is no lookup data structure, build one now by walking /// all of the linked DeclContexts (in declaration order!) and @@ -858,9 +947,10 @@ void DeclContext::makeDeclVisibleInContext(NamedDecl *D, bool Recoverable) { } // If we already have a lookup data structure, perform the insertion - // into it. Otherwise, be lazy and don't build that structure until - // someone asks for it. - if (LookupPtr || !Recoverable) + // into it. If we haven't deserialized externally stored decls, deserialize + // them so we can add the decl. Otherwise, be lazy and don't build that + // structure until someone asks for it. + if (LookupPtr || !Recoverable || hasExternalVisibleStorage()) makeDeclVisibleInContextImpl(D); // If we are a transparent context, insert into our parent context, @@ -880,6 +970,12 @@ void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D) { if (isa<ClassTemplateSpecializationDecl>(D)) return; + // If there is an external AST source, load any declarations it knows about + // with this declaration's name. + if (ExternalASTSource *Source = getParentASTContext().getExternalSource()) + if (hasExternalVisibleStorage()) + Source->FindExternalVisibleDeclsByName(this, D->getDeclName()); + ASTContext *C = 0; if (!LookupPtr) { C = &getParentASTContext(); @@ -932,7 +1028,7 @@ void StoredDeclsList::materializeDecls(ASTContext &Context) { ExternalASTSource *Source = Context.getExternalSource(); assert(Source && "No external AST source available!"); - Data = reinterpret_cast<uintptr_t>(Source->GetDecl(DeclID)); + Data = reinterpret_cast<uintptr_t>(Source->GetExternalDecl(DeclID)); break; } @@ -944,7 +1040,7 @@ void StoredDeclsList::materializeDecls(ASTContext &Context) { assert(Source && "No external AST source available!"); for (unsigned I = 0, N = Vector.size(); I != N; ++I) - Vector[I] = reinterpret_cast<uintptr_t>(Source->GetDecl(Vector[I])); + Vector[I] = reinterpret_cast<uintptr_t>(Source->GetExternalDecl(Vector[I])); Data = (Data & ~0x03) | DK_Decl_Vector; break; diff --git a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp index cd7afd9..dd0fe08 100644 --- a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp +++ b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp @@ -32,6 +32,8 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D) Abstract(false), HasTrivialConstructor(true), HasTrivialCopyConstructor(true), HasTrivialCopyAssignment(true), HasTrivialDestructor(true), ComputedVisibleConversions(false), + DeclaredDefaultConstructor(false), DeclaredCopyConstructor(false), + DeclaredCopyAssignment(false), DeclaredDestructor(false), Bases(0), NumBases(0), VBases(0), NumVBases(0), Definition(D), FirstFriend(0) { } @@ -58,6 +60,11 @@ CXXRecordDecl *CXXRecordDecl::Create(ASTContext &C, TagKind TK, DeclContext *DC, return R; } +CXXRecordDecl *CXXRecordDecl::Create(ASTContext &C, EmptyShell Empty) { + return new (C) CXXRecordDecl(CXXRecord, TTK_Struct, 0, SourceLocation(), 0, 0, + SourceLocation()); +} + CXXRecordDecl::~CXXRecordDecl() { } @@ -159,6 +166,29 @@ bool CXXRecordDecl::hasConstCopyConstructor(ASTContext &Context) const { return getCopyConstructor(Context, Qualifiers::Const) != 0; } +/// \brief Perform a simplistic form of overload resolution that only considers +/// cv-qualifiers on a single parameter, and return the best overload candidate +/// (if there is one). +static CXXMethodDecl * +GetBestOverloadCandidateSimple( + const llvm::SmallVectorImpl<std::pair<CXXMethodDecl *, Qualifiers> > &Cands) { + if (Cands.empty()) + return 0; + if (Cands.size() == 1) + return Cands[0].first; + + unsigned Best = 0, N = Cands.size(); + for (unsigned I = 1; I != N; ++I) + if (Cands[Best].second.isSupersetOf(Cands[I].second)) + Best = I; + + for (unsigned I = 1; I != N; ++I) + if (Cands[Best].second.isSupersetOf(Cands[I].second)) + return 0; + + return Cands[Best].first; +} + CXXConstructorDecl *CXXRecordDecl::getCopyConstructor(ASTContext &Context, unsigned TypeQuals) const{ QualType ClassType @@ -167,6 +197,7 @@ CXXConstructorDecl *CXXRecordDecl::getCopyConstructor(ASTContext &Context, = Context.DeclarationNames.getCXXConstructorName( Context.getCanonicalType(ClassType)); unsigned FoundTQs; + llvm::SmallVector<std::pair<CXXMethodDecl *, Qualifiers>, 4> Found; DeclContext::lookup_const_iterator Con, ConEnd; for (llvm::tie(Con, ConEnd) = this->lookup(ConstructorName); Con != ConEnd; ++Con) { @@ -175,61 +206,68 @@ CXXConstructorDecl *CXXRecordDecl::getCopyConstructor(ASTContext &Context, if (isa<FunctionTemplateDecl>(*Con)) continue; - if (cast<CXXConstructorDecl>(*Con)->isCopyConstructor(FoundTQs)) { + CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con); + if (Constructor->isCopyConstructor(FoundTQs)) { if (((TypeQuals & Qualifiers::Const) == (FoundTQs & Qualifiers::Const)) || (!(TypeQuals & Qualifiers::Const) && (FoundTQs & Qualifiers::Const))) - return cast<CXXConstructorDecl>(*Con); - + Found.push_back(std::make_pair( + const_cast<CXXConstructorDecl *>(Constructor), + Qualifiers::fromCVRMask(FoundTQs))); } } - return 0; + + return cast_or_null<CXXConstructorDecl>( + GetBestOverloadCandidateSimple(Found)); } -bool CXXRecordDecl::hasConstCopyAssignment(ASTContext &Context, - const CXXMethodDecl *& MD) const { - QualType ClassType = Context.getCanonicalType(Context.getTypeDeclType( - const_cast<CXXRecordDecl*>(this))); - DeclarationName OpName =Context.DeclarationNames.getCXXOperatorName(OO_Equal); - +CXXMethodDecl *CXXRecordDecl::getCopyAssignmentOperator(bool ArgIsConst) const { + ASTContext &Context = getASTContext(); + QualType Class = Context.getTypeDeclType(const_cast<CXXRecordDecl *>(this)); + DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal); + + llvm::SmallVector<std::pair<CXXMethodDecl *, Qualifiers>, 4> Found; DeclContext::lookup_const_iterator Op, OpEnd; - for (llvm::tie(Op, OpEnd) = this->lookup(OpName); - Op != OpEnd; ++Op) { + for (llvm::tie(Op, OpEnd) = this->lookup(Name); Op != OpEnd; ++Op) { // C++ [class.copy]p9: // A user-declared copy assignment operator is a non-static non-template // member function of class X with exactly one parameter of type X, X&, // const X&, volatile X& or const volatile X&. const CXXMethodDecl* Method = dyn_cast<CXXMethodDecl>(*Op); - if (!Method) + if (!Method || Method->isStatic() || Method->getPrimaryTemplate()) continue; - - if (Method->isStatic()) - continue; - if (Method->getPrimaryTemplate()) - continue; - const FunctionProtoType *FnType = - Method->getType()->getAs<FunctionProtoType>(); + + const FunctionProtoType *FnType + = Method->getType()->getAs<FunctionProtoType>(); assert(FnType && "Overloaded operator has no prototype."); // Don't assert on this; an invalid decl might have been left in the AST. if (FnType->getNumArgs() != 1 || FnType->isVariadic()) continue; - bool AcceptsConst = true; + QualType ArgType = FnType->getArgType(0); + Qualifiers Quals; if (const LValueReferenceType *Ref = ArgType->getAs<LValueReferenceType>()) { ArgType = Ref->getPointeeType(); - // Is it a non-const lvalue reference? - if (!ArgType.isConstQualified()) - AcceptsConst = false; + // If we have a const argument and we have a reference to a non-const, + // this function does not match. + if (ArgIsConst && !ArgType.isConstQualified()) + continue; + + Quals = ArgType.getQualifiers(); + } else { + // By-value copy-assignment operators are treated like const X& + // copy-assignment operators. + Quals = Qualifiers::fromCVRMask(Qualifiers::Const); } - if (!Context.hasSameUnqualifiedType(ArgType, ClassType)) + + if (!Context.hasSameUnqualifiedType(ArgType, Class)) continue; - MD = Method; - // We have a single argument of type cv X or cv X&, i.e. we've found the - // copy assignment operator. Return whether it accepts const arguments. - return AcceptsConst; + + // Save this copy-assignment operator. It might be "the one". + Found.push_back(std::make_pair(const_cast<CXXMethodDecl *>(Method), Quals)); } - assert(isInvalidDecl() && - "No copy assignment operator declared in valid code."); - return false; + + // Use a simplistic form of overload resolution to find the candidate. + return GetBestOverloadCandidateSimple(Found); } void @@ -239,6 +277,9 @@ CXXRecordDecl::addedConstructor(ASTContext &Context, // Note that we have a user-declared constructor. data().UserDeclaredConstructor = true; + // Note that we have no need of an implicitly-declared default constructor. + data().DeclaredDefaultConstructor = true; + // C++ [dcl.init.aggr]p1: // An aggregate is an array or a class (clause 9) with no // user-declared constructors (12.1) [...]. @@ -258,11 +299,13 @@ CXXRecordDecl::addedConstructor(ASTContext &Context, // suppress the implicit declaration of a copy constructor. if (ConDecl->isCopyConstructor()) { data().UserDeclaredCopyConstructor = true; - + data().DeclaredCopyConstructor = true; + // C++ [class.copy]p6: // A copy constructor is trivial if it is implicitly declared. // FIXME: C++0x: don't do this for "= default" copy constructors. data().HasTrivialCopyConstructor = false; + } } @@ -294,7 +337,8 @@ void CXXRecordDecl::addedAssignmentOperator(ASTContext &Context, // Suppress the implicit declaration of a copy constructor. data().UserDeclaredCopyAssignment = true; - + data().DeclaredCopyAssignment = true; + // C++ [class.copy]p11: // A copy assignment operator is trivial if it is implicitly declared. // FIXME: C++0x: don't do this for "= default" copy operators. @@ -546,7 +590,8 @@ CXXRecordDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK) { } CXXConstructorDecl * -CXXRecordDecl::getDefaultConstructor(ASTContext &Context) { +CXXRecordDecl::getDefaultConstructor() { + ASTContext &Context = getASTContext(); QualType ClassType = Context.getTypeDeclType(this); DeclarationName ConstructorName = Context.DeclarationNames.getCXXConstructorName( @@ -566,7 +611,8 @@ CXXRecordDecl::getDefaultConstructor(ASTContext &Context) { return 0; } -CXXDestructorDecl *CXXRecordDecl::getDestructor(ASTContext &Context) const { +CXXDestructorDecl *CXXRecordDecl::getDestructor() const { + ASTContext &Context = getASTContext(); QualType ClassType = Context.getTypeDeclType(this); DeclarationName Name @@ -670,6 +716,10 @@ CXXMethodDecl::method_iterator CXXMethodDecl::end_overridden_methods() const { return getASTContext().overridden_methods_end(this); } +unsigned CXXMethodDecl::size_overridden_methods() const { + return getASTContext().overridden_methods_size(this); +} + QualType CXXMethodDecl::getThisType(ASTContext &C) const { // C++ 9.3.2p1: The type of this in a member function of a class X is X*. // If the member function is declared const, the type of this is const X*, @@ -693,7 +743,7 @@ bool CXXMethodDecl::hasInlineBody() const { CheckFn = this; const FunctionDecl *fn; - return CheckFn->getBody(fn) && !fn->isOutOfLine(); + return CheckFn->hasBody(fn) && !fn->isOutOfLine(); } CXXBaseOrMemberInitializer:: diff --git a/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp b/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp index ab3552d..99bfe40 100644 --- a/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp +++ b/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp @@ -39,3 +39,7 @@ FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC, cast<CXXRecordDecl>(DC)->pushFriendDecl(FD); return FD; } + +FriendDecl *FriendDecl::Create(ASTContext &C, EmptyShell Empty) { + return new (C) FriendDecl(Empty); +} diff --git a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp index dc4aacd..adb0e7d 100644 --- a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp +++ b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp @@ -223,17 +223,24 @@ void ObjCInterfaceDecl::mergeClassExtensionProtocolList( setProtocolList(ProtocolRefs.data(), NumProtoRefs, ProtocolLocs.data(), C); } -/// getClassExtension - Find class extension of the given class. -// FIXME. can speed it up, if need be. -ObjCCategoryDecl* ObjCInterfaceDecl::getClassExtension() const { - const ObjCInterfaceDecl* ClassDecl = this; - for (ObjCCategoryDecl *CDecl = ClassDecl->getCategoryList(); CDecl; +/// getFirstClassExtension - Find first class extension of the given class. +ObjCCategoryDecl* ObjCInterfaceDecl::getFirstClassExtension() const { + for (ObjCCategoryDecl *CDecl = getCategoryList(); CDecl; CDecl = CDecl->getNextClassCategory()) if (CDecl->IsClassExtension()) return CDecl; return 0; } +/// getNextClassCategory - Find next class extension in list of categories. +const ObjCCategoryDecl* ObjCCategoryDecl::getNextClassExtension() const { + for (const ObjCCategoryDecl *CDecl = getNextClassCategory(); CDecl; + CDecl = CDecl->getNextClassCategory()) + if (CDecl->IsClassExtension()) + return CDecl; + return 0; +} + ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID, ObjCInterfaceDecl *&clsDeclared) { ObjCInterfaceDecl* ClassDecl = this; @@ -242,11 +249,13 @@ ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID, clsDeclared = ClassDecl; return I; } - if (const ObjCCategoryDecl *CDecl = ClassDecl->getClassExtension()) + for (const ObjCCategoryDecl *CDecl = ClassDecl->getFirstClassExtension(); + CDecl; CDecl = CDecl->getNextClassExtension()) { if (ObjCIvarDecl *I = CDecl->getIvarDecl(ID)) { clsDeclared = ClassDecl; return I; } + } ClassDecl = ClassDecl->getSuperClass(); } @@ -887,7 +896,7 @@ ObjCPropertyDecl *ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L, IdentifierInfo *Id, SourceLocation AtLoc, - QualType T, + TypeSourceInfo *T, PropertyControl propControl) { return new (C) ObjCPropertyDecl(DC, L, Id, AtLoc, T); } diff --git a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp index 53949247..765772d 100644 --- a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp +++ b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp @@ -183,7 +183,7 @@ void DeclPrinter::Print(AccessSpecifier AS) { case AS_none: assert(0 && "No access specifier!"); break; case AS_public: Out << "public"; break; case AS_protected: Out << "protected"; break; - case AS_private: Out << " private"; break; + case AS_private: Out << "private"; break; } } @@ -195,9 +195,6 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) { if (Indent) Indentation += Policy.Indentation; - bool PrintAccess = isa<CXXRecordDecl>(DC); - AccessSpecifier CurAS = AS_none; - llvm::SmallVector<Decl*, 2> Decls; for (DeclContext::decl_iterator D = DC->decls_begin(), DEnd = DC->decls_end(); D != DEnd; ++D) { @@ -205,21 +202,14 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) { // Skip over implicit declarations in pretty-printing mode. if (D->isImplicit()) continue; // FIXME: Ugly hack so we don't pretty-print the builtin declaration - // of __builtin_va_list. There should be some other way to check that. - if (isa<NamedDecl>(*D) && cast<NamedDecl>(*D)->getNameAsString() == - "__builtin_va_list") - continue; - } - - if (PrintAccess) { - AccessSpecifier AS = D->getAccess(); - - if (AS != CurAS) { - if (Indent) - this->Indent(Indentation - Policy.Indentation); - Print(AS); - Out << ":\n"; - CurAS = AS; + // of __builtin_va_list or __[u]int128_t. There should be some other way + // to check that. + if (NamedDecl *ND = dyn_cast<NamedDecl>(*D)) { + if (IdentifierInfo *II = ND->getIdentifier()) { + if (II->isStr("__builtin_va_list") || + II->isStr("__int128_t") || II->isStr("__uint128_t")) + continue; + } } } @@ -251,6 +241,16 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) { Decls.push_back(*D); continue; } + + if (isa<AccessSpecDecl>(*D)) { + Indentation -= Policy.Indentation; + this->Indent(); + Print(D->getAccess()); + Out << ":\n"; + Indentation += Policy.Indentation; + continue; + } + this->Indent(); Visit(*D); @@ -406,7 +406,8 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { FieldDecl *FD = BMInitializer->getMember(); Out << FD; } else { - Out << QualType(BMInitializer->getBaseClass(), 0).getAsString(); + Out << QualType(BMInitializer->getBaseClass(), + 0).getAsString(Policy); } Out << "("; @@ -653,7 +654,11 @@ void DeclPrinter::VisitTemplateDecl(TemplateDecl *D) { Out << "> "; - Visit(D->getTemplatedDecl()); + if (isa<TemplateTemplateParmDecl>(D)) { + Out << "class " << D->getName(); + } else { + Visit(D->getTemplatedDecl()); + } } //---------------------------------------------------------------------------- diff --git a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp index 26e291c..9e1d79d 100644 --- a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp +++ b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp @@ -162,37 +162,19 @@ ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C, TemplateParameterList *Params, NamedDecl *Decl, ClassTemplateDecl *PrevDecl) { - Common *CommonPtr; - if (PrevDecl) - CommonPtr = PrevDecl->CommonPtr; - else { - CommonPtr = new (C) Common; - C.AddDeallocation(DeallocateCommon, CommonPtr); - } - - return new (C) ClassTemplateDecl(DC, L, Name, Params, Decl, PrevDecl, - CommonPtr); -} - -ClassTemplateDecl::~ClassTemplateDecl() { - assert(CommonPtr == 0 && "ClassTemplateDecl must be explicitly destroyed"); + ClassTemplateDecl *New = new (C) ClassTemplateDecl(DC, L, Name, Params, Decl); + New->setPreviousDeclaration(PrevDecl); + return New; } void ClassTemplateDecl::Destroy(ASTContext& C) { - if (!PreviousDeclaration) { - CommonPtr->~Common(); - C.Deallocate((void*)CommonPtr); - } - CommonPtr = 0; - - this->~ClassTemplateDecl(); - C.Deallocate((void*)this); + Decl::Destroy(C); } void ClassTemplateDecl::getPartialSpecializations( llvm::SmallVectorImpl<ClassTemplatePartialSpecializationDecl *> &PS) { llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> &PartialSpecs - = CommonPtr->PartialSpecializations; + = getPartialSpecializations(); PS.clear(); PS.resize(PartialSpecs.size()); for (llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>::iterator @@ -219,7 +201,8 @@ ClassTemplateDecl::findPartialSpecialization(QualType T) { } QualType -ClassTemplateDecl::getInjectedClassNameSpecialization(ASTContext &Context) { +ClassTemplateDecl::getInjectedClassNameSpecialization() { + Common *CommonPtr = getCommonPtr(); if (!CommonPtr->InjectedClassNameType.isNull()) return CommonPtr->InjectedClassNameType; @@ -227,7 +210,7 @@ ClassTemplateDecl::getInjectedClassNameSpecialization(ASTContext &Context) { // corresponding to template parameter packs should be pack // expansions. We already say that in 14.6.2.1p2, so it would be // better to fix that redundancy. - + ASTContext &Context = getASTContext(); TemplateParameterList *Params = getTemplateParameters(); llvm::SmallVector<TemplateArgument, 16> TemplateArgs; TemplateArgs.reserve(Params->size()); @@ -240,7 +223,7 @@ ClassTemplateDecl::getInjectedClassNameSpecialization(ASTContext &Context) { } else if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*Param)) { Expr *E = new (Context) DeclRefExpr(NTTP, - NTTP->getType().getNonReferenceType(), + NTTP->getType().getNonLValueExprType(Context), NTTP->getLocation()); TemplateArgs.push_back(TemplateArgument(E)); } else { @@ -256,6 +239,20 @@ ClassTemplateDecl::getInjectedClassNameSpecialization(ASTContext &Context) { return CommonPtr->InjectedClassNameType; } +ClassTemplateDecl::Common *ClassTemplateDecl::getCommonPtr() { + // Find the first declaration of this function template. + ClassTemplateDecl *First = this; + while (First->getPreviousDeclaration()) + First = First->getPreviousDeclaration(); + + if (First->CommonOrPrev.isNull()) { + Common *CommonPtr = new (getASTContext()) Common; + getASTContext().AddDeallocation(DeallocateCommon, CommonPtr); + First->CommonOrPrev = CommonPtr; + } + return First->CommonOrPrev.get<Common*>(); +} + //===----------------------------------------------------------------------===// // TemplateTypeParm Allocation/Deallocation Method Implementations //===----------------------------------------------------------------------===// @@ -269,6 +266,12 @@ TemplateTypeParmDecl::Create(ASTContext &C, DeclContext *DC, return new (C) TemplateTypeParmDecl(DC, L, Id, Typename, Type, ParameterPack); } +TemplateTypeParmDecl * +TemplateTypeParmDecl::Create(ASTContext &C, EmptyShell Empty) { + return new (C) TemplateTypeParmDecl(0, SourceLocation(), 0, false, + QualType(), false); +} + SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const { return DefaultArgument->getTypeLoc().getSourceRange().getBegin(); } @@ -294,8 +297,9 @@ NonTypeTemplateParmDecl::Create(ASTContext &C, DeclContext *DC, } SourceLocation NonTypeTemplateParmDecl::getDefaultArgumentLoc() const { - return DefaultArgument? DefaultArgument->getSourceRange().getBegin() - : SourceLocation(); + return hasDefaultArgument() + ? getDefaultArgument()->getSourceRange().getBegin() + : SourceLocation(); } //===----------------------------------------------------------------------===// @@ -393,6 +397,13 @@ TemplateArgumentList::TemplateArgumentList(ASTContext &Context, } } +TemplateArgumentList::TemplateArgumentList(ASTContext &Context, + const TemplateArgument *Args, + unsigned NumArgs) + : NumFlatArguments(0), NumStructuredArguments(0) { + init(Context, Args, NumArgs); +} + /// Produces a shallow copy of the given template argument list. This /// assumes that the input argument list outlives it. This takes the list as /// a pointer to avoid looking like a copy constructor, since this really @@ -403,6 +414,23 @@ TemplateArgumentList::TemplateArgumentList(const TemplateArgumentList *Other) StructuredArguments(Other->StructuredArguments.getPointer(), false), NumStructuredArguments(Other->NumStructuredArguments) { } +void TemplateArgumentList::init(ASTContext &Context, + const TemplateArgument *Args, + unsigned NumArgs) { +assert(NumFlatArguments == 0 && NumStructuredArguments == 0 && + "Already initialized!"); + +NumFlatArguments = NumStructuredArguments = NumArgs; +TemplateArgument *NewArgs = new (Context) TemplateArgument[NumArgs]; +std::copy(Args, Args+NumArgs, NewArgs); +FlatArguments.setPointer(NewArgs); +FlatArguments.setInt(1); // Owns the pointer. + +// Just reuse the flat arguments array. +StructuredArguments.setPointer(NewArgs); +StructuredArguments.setInt(0); // Doesn't own the pointer. +} + void TemplateArgumentList::Destroy(ASTContext &C) { if (FlatArguments.getInt()) C.Deallocate((void*)FlatArguments.getPointer()); @@ -425,11 +453,17 @@ ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK, TagKind TK, SpecializedTemplate->getIdentifier(), PrevDecl), SpecializedTemplate(SpecializedTemplate), - TypeAsWritten(0), + ExplicitInfo(0), TemplateArgs(Context, Builder, /*TakeArgs=*/true), SpecializationKind(TSK_Undeclared) { } +ClassTemplateSpecializationDecl::ClassTemplateSpecializationDecl(Kind DK) + : CXXRecordDecl(DK, TTK_Struct, 0, SourceLocation(), 0, 0), + ExplicitInfo(0), + SpecializationKind(TSK_Undeclared) { +} + ClassTemplateSpecializationDecl * ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK, DeclContext *DC, SourceLocation L, @@ -447,7 +481,15 @@ ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK, return Result; } +ClassTemplateSpecializationDecl * +ClassTemplateSpecializationDecl::Create(ASTContext &Context, EmptyShell Empty) { + return + new (Context)ClassTemplateSpecializationDecl(ClassTemplateSpecialization); +} + void ClassTemplateSpecializationDecl::Destroy(ASTContext &C) { + delete ExplicitInfo; + if (SpecializedPartialSpecialization *PartialSpec = SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization*>()) C.Deallocate(PartialSpec); @@ -508,6 +550,25 @@ Create(ASTContext &Context, TagKind TK,DeclContext *DC, SourceLocation L, return Result; } +ClassTemplatePartialSpecializationDecl * +ClassTemplatePartialSpecializationDecl::Create(ASTContext &Context, + EmptyShell Empty) { + return new (Context)ClassTemplatePartialSpecializationDecl(); +} + +void ClassTemplatePartialSpecializationDecl:: +initTemplateArgsAsWritten(const TemplateArgumentListInfo &ArgInfos) { + assert(ArgsAsWritten == 0 && "ArgsAsWritten already set"); + unsigned N = ArgInfos.size(); + TemplateArgumentLoc *ClonedArgs + = new (getASTContext()) TemplateArgumentLoc[N]; + for (unsigned I = 0; I != N; ++I) + ClonedArgs[I] = ArgInfos[I]; + + ArgsAsWritten = ClonedArgs; + NumArgsAsWritten = N; +} + //===----------------------------------------------------------------------===// // FriendTemplateDecl Implementation //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/tools/clang/lib/AST/Expr.cpp b/contrib/llvm/tools/clang/lib/AST/Expr.cpp index c38cec3..6524a31 100644 --- a/contrib/llvm/tools/clang/lib/AST/Expr.cpp +++ b/contrib/llvm/tools/clang/lib/AST/Expr.cpp @@ -37,7 +37,7 @@ bool Expr::isKnownToHaveBooleanValue() const { // If this value has _Bool type, it is obvious 0/1. if (getType()->isBooleanType()) return true; // If this is a non-scalar-integer type, we don't care enough to try. - if (!getType()->isIntegralType()) return false; + if (!getType()->isIntegralOrEnumerationType()) return false; if (const ParenExpr *PE = dyn_cast<ParenExpr>(this)) return PE->getSubExpr()->isKnownToHaveBooleanValue(); @@ -52,7 +52,9 @@ bool Expr::isKnownToHaveBooleanValue() const { } } - if (const CastExpr *CE = dyn_cast<CastExpr>(this)) + // Only look through implicit casts. If the user writes + // '(int) (a && b)' treat it as an arbitrary int. + if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(this)) return CE->getSubExpr()->isKnownToHaveBooleanValue(); if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(this)) { @@ -111,10 +113,14 @@ void ExplicitTemplateArgumentList::copyInto( Info.addArgument(getTemplateArgs()[I]); } +std::size_t ExplicitTemplateArgumentList::sizeFor(unsigned NumTemplateArgs) { + return sizeof(ExplicitTemplateArgumentList) + + sizeof(TemplateArgumentLoc) * NumTemplateArgs; +} + std::size_t ExplicitTemplateArgumentList::sizeFor( const TemplateArgumentListInfo &Info) { - return sizeof(ExplicitTemplateArgumentList) + - sizeof(TemplateArgumentLoc) * Info.size(); + return sizeFor(Info.size()); } void DeclRefExpr::computeDependence() { @@ -158,7 +164,7 @@ void DeclRefExpr::computeDependence() { // (VD) - a constant with integral or enumeration type and is // initialized with an expression that is value-dependent. else if (VarDecl *Var = dyn_cast<VarDecl>(D)) { - if (Var->getType()->isIntegralType() && + if (Var->getType()->isIntegralOrEnumerationType() && Var->getType().getCVRQualifiers() == Qualifiers::Const) { if (const Expr *Init = Var->getAnyInitializer()) if (Init->isValueDependent()) @@ -222,6 +228,19 @@ DeclRefExpr *DeclRefExpr::Create(ASTContext &Context, TemplateArgs, T); } +DeclRefExpr *DeclRefExpr::CreateEmpty(ASTContext &Context, bool HasQualifier, + unsigned NumTemplateArgs) { + std::size_t Size = sizeof(DeclRefExpr); + if (HasQualifier) + Size += sizeof(NameQualifier); + + if (NumTemplateArgs) + Size += ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs); + + void *Mem = Context.Allocate(Size, llvm::alignof<DeclRefExpr>()); + return new (Mem) DeclRefExpr(EmptyShell()); +} + SourceRange DeclRefExpr::getSourceRange() const { // FIXME: Does not handle multi-token names well, e.g., operator[]. SourceRange R(Loc); @@ -557,7 +576,10 @@ QualType CallExpr::getCallReturnType() const { CalleeType = FnTypePtr->getPointeeType(); else if (const BlockPointerType *BPT = CalleeType->getAs<BlockPointerType>()) CalleeType = BPT->getPointeeType(); - + else if (const MemberPointerType *MPT + = CalleeType->getAs<MemberPointerType>()) + CalleeType = MPT->getPointeeType(); + const FunctionType *FnType = CalleeType->getAs<FunctionType>(); return FnType->getResultType(); } @@ -662,6 +684,8 @@ const char *CastExpr::getCastKindName() const { return "Unknown"; case CastExpr::CK_BitCast: return "BitCast"; + case CastExpr::CK_LValueBitCast: + return "LValueBitCast"; case CastExpr::CK_NoOp: return "NoOp"; case CastExpr::CK_BaseToDerived: @@ -968,7 +992,8 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1, switch (BO->getOpcode()) { default: break; - // Consider ',', '||', '&&' to have side effects if the LHS or RHS does. + // Consider the RHS of comma for side effects. LHS was checked by + // Sema::CheckCommaOperands. case BinaryOperator::Comma: // ((foo = <blah>), 0) is an idiom for hiding the result (and // lvalue-ness) of an assignment written in a macro. @@ -976,10 +1001,14 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1, dyn_cast<IntegerLiteral>(BO->getRHS()->IgnoreParens())) if (IE->getValue() == 0) return false; + return BO->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx); + // Consider '||', '&&' to have side effects if the LHS or RHS does. case BinaryOperator::LAnd: case BinaryOperator::LOr: - return (BO->getLHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx) || - BO->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx)); + if (!BO->getLHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx) || + !BO->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx)) + return false; + break; } if (BO->isAssignmentOp()) return false; @@ -1140,378 +1169,6 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1, } } -/// DeclCanBeLvalue - Determine whether the given declaration can be -/// an lvalue. This is a helper routine for isLvalue. -static bool DeclCanBeLvalue(const NamedDecl *Decl, ASTContext &Ctx) { - // C++ [temp.param]p6: - // A non-type non-reference template-parameter is not an lvalue. - if (const NonTypeTemplateParmDecl *NTTParm - = dyn_cast<NonTypeTemplateParmDecl>(Decl)) - return NTTParm->getType()->isReferenceType(); - - return isa<VarDecl>(Decl) || isa<FieldDecl>(Decl) || - // C++ 3.10p2: An lvalue refers to an object or function. - (Ctx.getLangOptions().CPlusPlus && - (isa<FunctionDecl>(Decl) || isa<FunctionTemplateDecl>(Decl))); -} - -/// isLvalue - C99 6.3.2.1: an lvalue is an expression with an object type or an -/// incomplete type other than void. Nonarray expressions that can be lvalues: -/// - name, where name must be a variable -/// - e[i] -/// - (e), where e must be an lvalue -/// - e.name, where e must be an lvalue -/// - e->name -/// - *e, the type of e cannot be a function type -/// - string-constant -/// - (__real__ e) and (__imag__ e) where e is an lvalue [GNU extension] -/// - reference type [C++ [expr]] -/// -Expr::isLvalueResult Expr::isLvalue(ASTContext &Ctx) const { - assert(!TR->isReferenceType() && "Expressions can't have reference type."); - - isLvalueResult Res = isLvalueInternal(Ctx); - if (Res != LV_Valid || Ctx.getLangOptions().CPlusPlus) - return Res; - - // first, check the type (C99 6.3.2.1). Expressions with function - // type in C are not lvalues, but they can be lvalues in C++. - if (TR->isFunctionType() || TR == Ctx.OverloadTy) - return LV_NotObjectType; - - // Allow qualified void which is an incomplete type other than void (yuck). - if (TR->isVoidType() && !Ctx.getCanonicalType(TR).hasQualifiers()) - return LV_IncompleteVoidType; - - return LV_Valid; -} - -// Check whether the expression can be sanely treated like an l-value -Expr::isLvalueResult Expr::isLvalueInternal(ASTContext &Ctx) const { - switch (getStmtClass()) { - case ObjCIsaExprClass: - case StringLiteralClass: // C99 6.5.1p4 - case ObjCEncodeExprClass: // @encode behaves like its string in every way. - return LV_Valid; - case ArraySubscriptExprClass: // C99 6.5.3p4 (e1[e2] == (*((e1)+(e2)))) - // For vectors, make sure base is an lvalue (i.e. not a function call). - if (cast<ArraySubscriptExpr>(this)->getBase()->getType()->isVectorType()) - return cast<ArraySubscriptExpr>(this)->getBase()->isLvalue(Ctx); - return LV_Valid; - case DeclRefExprClass: { // C99 6.5.1p2 - const NamedDecl *RefdDecl = cast<DeclRefExpr>(this)->getDecl(); - if (DeclCanBeLvalue(RefdDecl, Ctx)) - return LV_Valid; - break; - } - case BlockDeclRefExprClass: { - const BlockDeclRefExpr *BDR = cast<BlockDeclRefExpr>(this); - if (isa<VarDecl>(BDR->getDecl())) - return LV_Valid; - break; - } - case MemberExprClass: { - const MemberExpr *m = cast<MemberExpr>(this); - if (Ctx.getLangOptions().CPlusPlus) { // C++ [expr.ref]p4: - NamedDecl *Member = m->getMemberDecl(); - // C++ [expr.ref]p4: - // If E2 is declared to have type "reference to T", then E1.E2 - // is an lvalue. - if (ValueDecl *Value = dyn_cast<ValueDecl>(Member)) - if (Value->getType()->isReferenceType()) - return LV_Valid; - - // -- If E2 is a static data member [...] then E1.E2 is an lvalue. - if (isa<VarDecl>(Member) && Member->getDeclContext()->isRecord()) - return LV_Valid; - - // -- If E2 is a non-static data member [...]. If E1 is an - // lvalue, then E1.E2 is an lvalue. - if (isa<FieldDecl>(Member)) { - if (m->isArrow()) - return LV_Valid; - return m->getBase()->isLvalue(Ctx); - } - - // -- If it refers to a static member function [...], then - // E1.E2 is an lvalue. - // -- Otherwise, if E1.E2 refers to a non-static member - // function [...], then E1.E2 is not an lvalue. - if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member)) - return Method->isStatic()? LV_Valid : LV_MemberFunction; - - // -- If E2 is a member enumerator [...], the expression E1.E2 - // is not an lvalue. - if (isa<EnumConstantDecl>(Member)) - return LV_InvalidExpression; - - // Not an lvalue. - return LV_InvalidExpression; - } - - // C99 6.5.2.3p4 - if (m->isArrow()) - return LV_Valid; - Expr *BaseExp = m->getBase(); - if (BaseExp->getStmtClass() == ObjCPropertyRefExprClass || - BaseExp->getStmtClass() == ObjCImplicitSetterGetterRefExprClass) - return LV_SubObjCPropertySetting; - return - BaseExp->isLvalue(Ctx); - } - case UnaryOperatorClass: - if (cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::Deref) - return LV_Valid; // C99 6.5.3p4 - - if (cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::Real || - cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::Imag || - cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::Extension) - return cast<UnaryOperator>(this)->getSubExpr()->isLvalue(Ctx); // GNU. - - if (Ctx.getLangOptions().CPlusPlus && // C++ [expr.pre.incr]p1 - (cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::PreInc || - cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::PreDec)) - return LV_Valid; - break; - case ImplicitCastExprClass: - if (cast<ImplicitCastExpr>(this)->isLvalueCast()) - return LV_Valid; - - // If this is a conversion to a class temporary, make a note of - // that. - if (Ctx.getLangOptions().CPlusPlus && getType()->isRecordType()) - return LV_ClassTemporary; - - break; - case ParenExprClass: // C99 6.5.1p5 - return cast<ParenExpr>(this)->getSubExpr()->isLvalue(Ctx); - case BinaryOperatorClass: - case CompoundAssignOperatorClass: { - const BinaryOperator *BinOp = cast<BinaryOperator>(this); - - if (Ctx.getLangOptions().CPlusPlus && // C++ [expr.comma]p1 - BinOp->getOpcode() == BinaryOperator::Comma) - return BinOp->getRHS()->isLvalue(Ctx); - - // C++ [expr.mptr.oper]p6 - // The result of a .* expression is an lvalue only if its first operand is - // an lvalue and its second operand is a pointer to data member. - if (BinOp->getOpcode() == BinaryOperator::PtrMemD && - !BinOp->getType()->isFunctionType()) - return BinOp->getLHS()->isLvalue(Ctx); - - // The result of an ->* expression is an lvalue only if its second operand - // is a pointer to data member. - if (BinOp->getOpcode() == BinaryOperator::PtrMemI && - !BinOp->getType()->isFunctionType()) { - QualType Ty = BinOp->getRHS()->getType(); - if (Ty->isMemberPointerType() && !Ty->isMemberFunctionPointerType()) - return LV_Valid; - } - - if (!BinOp->isAssignmentOp()) - return LV_InvalidExpression; - - if (Ctx.getLangOptions().CPlusPlus) - // C++ [expr.ass]p1: - // The result of an assignment operation [...] is an lvalue. - return LV_Valid; - - - // C99 6.5.16: - // An assignment expression [...] is not an lvalue. - return LV_InvalidExpression; - } - case CallExprClass: - case CXXOperatorCallExprClass: - case CXXMemberCallExprClass: { - // C++0x [expr.call]p10 - // A function call is an lvalue if and only if the result type - // is an lvalue reference. - QualType ReturnType = cast<CallExpr>(this)->getCallReturnType(); - if (ReturnType->isLValueReferenceType()) - return LV_Valid; - - // If the function is returning a class temporary, make a note of - // that. - if (Ctx.getLangOptions().CPlusPlus && ReturnType->isRecordType()) - return LV_ClassTemporary; - - break; - } - case CompoundLiteralExprClass: // C99 6.5.2.5p5 - // FIXME: Is this what we want in C++? - return LV_Valid; - case ChooseExprClass: - // __builtin_choose_expr is an lvalue if the selected operand is. - return cast<ChooseExpr>(this)->getChosenSubExpr(Ctx)->isLvalue(Ctx); - case ExtVectorElementExprClass: - if (cast<ExtVectorElementExpr>(this)->containsDuplicateElements()) - return LV_DuplicateVectorComponents; - return LV_Valid; - case ObjCIvarRefExprClass: // ObjC instance variables are lvalues. - return LV_Valid; - case ObjCPropertyRefExprClass: // FIXME: check if read-only property. - return LV_Valid; - case ObjCImplicitSetterGetterRefExprClass: - // FIXME: check if read-only property. - return LV_Valid; - case PredefinedExprClass: - return LV_Valid; - case UnresolvedLookupExprClass: - case UnresolvedMemberExprClass: - return LV_Valid; - case CXXDefaultArgExprClass: - return cast<CXXDefaultArgExpr>(this)->getExpr()->isLvalue(Ctx); - case CStyleCastExprClass: - case CXXFunctionalCastExprClass: - case CXXStaticCastExprClass: - case CXXDynamicCastExprClass: - case CXXReinterpretCastExprClass: - case CXXConstCastExprClass: - // The result of an explicit cast is an lvalue if the type we are - // casting to is an lvalue reference type. See C++ [expr.cast]p1, - // C++ [expr.static.cast]p2, C++ [expr.dynamic.cast]p2, - // C++ [expr.reinterpret.cast]p1, C++ [expr.const.cast]p1. - if (cast<ExplicitCastExpr>(this)->getTypeAsWritten()-> - isLValueReferenceType()) - return LV_Valid; - - // If this is a conversion to a class temporary, make a note of - // that. - if (Ctx.getLangOptions().CPlusPlus && - cast<ExplicitCastExpr>(this)->getTypeAsWritten()->isRecordType()) - return LV_ClassTemporary; - - break; - case CXXTypeidExprClass: - // C++ 5.2.8p1: The result of a typeid expression is an lvalue of ... - return LV_Valid; - case CXXBindTemporaryExprClass: - return cast<CXXBindTemporaryExpr>(this)->getSubExpr()-> - isLvalueInternal(Ctx); - case CXXBindReferenceExprClass: - // Something that's bound to a reference is always an lvalue. - return LV_Valid; - case ConditionalOperatorClass: { - // Complicated handling is only for C++. - if (!Ctx.getLangOptions().CPlusPlus) - return LV_InvalidExpression; - - // Sema should have taken care to ensure that a CXXTemporaryObjectExpr is - // everywhere there's an object converted to an rvalue. Also, any other - // casts should be wrapped by ImplicitCastExprs. There's just the special - // case involving throws to work out. - const ConditionalOperator *Cond = cast<ConditionalOperator>(this); - Expr *True = Cond->getTrueExpr(); - Expr *False = Cond->getFalseExpr(); - // C++0x 5.16p2 - // If either the second or the third operand has type (cv) void, [...] - // the result [...] is an rvalue. - if (True->getType()->isVoidType() || False->getType()->isVoidType()) - return LV_InvalidExpression; - - // Both sides must be lvalues for the result to be an lvalue. - if (True->isLvalue(Ctx) != LV_Valid || False->isLvalue(Ctx) != LV_Valid) - return LV_InvalidExpression; - - // That's it. - return LV_Valid; - } - - case Expr::CXXExprWithTemporariesClass: - return cast<CXXExprWithTemporaries>(this)->getSubExpr()->isLvalue(Ctx); - - case Expr::ObjCMessageExprClass: - if (const ObjCMethodDecl *Method - = cast<ObjCMessageExpr>(this)->getMethodDecl()) - if (Method->getResultType()->isLValueReferenceType()) - return LV_Valid; - break; - - case Expr::CXXConstructExprClass: - case Expr::CXXTemporaryObjectExprClass: - case Expr::CXXZeroInitValueExprClass: - return LV_ClassTemporary; - - default: - break; - } - return LV_InvalidExpression; -} - -/// isModifiableLvalue - C99 6.3.2.1: an lvalue that does not have array type, -/// does not have an incomplete type, does not have a const-qualified type, and -/// if it is a structure or union, does not have any member (including, -/// recursively, any member or element of all contained aggregates or unions) -/// with a const-qualified type. -Expr::isModifiableLvalueResult -Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const { - isLvalueResult lvalResult = isLvalue(Ctx); - - switch (lvalResult) { - case LV_Valid: - // C++ 3.10p11: Functions cannot be modified, but pointers to - // functions can be modifiable. - if (Ctx.getLangOptions().CPlusPlus && TR->isFunctionType()) - return MLV_NotObjectType; - break; - - case LV_NotObjectType: return MLV_NotObjectType; - case LV_IncompleteVoidType: return MLV_IncompleteVoidType; - case LV_DuplicateVectorComponents: return MLV_DuplicateVectorComponents; - case LV_InvalidExpression: - // If the top level is a C-style cast, and the subexpression is a valid - // lvalue, then this is probably a use of the old-school "cast as lvalue" - // GCC extension. We don't support it, but we want to produce good - // diagnostics when it happens so that the user knows why. - if (const CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(IgnoreParens())) { - if (CE->getSubExpr()->isLvalue(Ctx) == LV_Valid) { - if (Loc) - *Loc = CE->getLParenLoc(); - return MLV_LValueCast; - } - } - return MLV_InvalidExpression; - case LV_MemberFunction: return MLV_MemberFunction; - case LV_SubObjCPropertySetting: return MLV_SubObjCPropertySetting; - case LV_ClassTemporary: - return MLV_ClassTemporary; - } - - // The following is illegal: - // void takeclosure(void (^C)(void)); - // void func() { int x = 1; takeclosure(^{ x = 7; }); } - // - if (const BlockDeclRefExpr *BDR = dyn_cast<BlockDeclRefExpr>(this)) { - if (!BDR->isByRef() && isa<VarDecl>(BDR->getDecl())) - return MLV_NotBlockQualified; - } - - // Assigning to an 'implicit' property? - if (const ObjCImplicitSetterGetterRefExpr* Expr = - dyn_cast<ObjCImplicitSetterGetterRefExpr>(this)) { - if (Expr->getSetterMethod() == 0) - return MLV_NoSetterProperty; - } - - QualType CT = Ctx.getCanonicalType(getType()); - - if (CT.isConstQualified()) - return MLV_ConstQualified; - if (CT->isArrayType()) - return MLV_ArrayType; - if (CT->isIncompleteType()) - return MLV_IncompleteType; - - if (const RecordType *r = CT->getAs<RecordType>()) { - if (r->hasConstFields()) - return MLV_ConstQualified; - } - - return MLV_Valid; -} - /// isOBJCGCCandidate - Check if an expression is objc gc'able. /// returns true, if it is; false otherwise. bool Expr::isOBJCGCCandidate(ASTContext &Ctx) const { @@ -1596,7 +1253,7 @@ Expr *Expr::IgnoreParenNoopCasts(ASTContext &Ctx) { if (CastExpr *P = dyn_cast<CastExpr>(E)) { // We ignore integer <-> casts that are of the same width, ptr<->ptr and - // ptr<->int casts of the same width. We also ignore all identify casts. + // ptr<->int casts of the same width. We also ignore all identity casts. Expr *SE = P->getSubExpr(); if (Ctx.hasSameUnqualifiedType(E->getType(), SE->getType())) { @@ -1604,8 +1261,10 @@ Expr *Expr::IgnoreParenNoopCasts(ASTContext &Ctx) { continue; } - if ((E->getType()->isPointerType() || E->getType()->isIntegralType()) && - (SE->getType()->isPointerType() || SE->getType()->isIntegralType()) && + if ((E->getType()->isPointerType() || + E->getType()->isIntegralType(Ctx)) && + (SE->getType()->isPointerType() || + SE->getType()->isIntegralType(Ctx)) && Ctx.getTypeSize(E->getType()) == Ctx.getTypeSize(SE->getType())) { E = SE; continue; @@ -1795,7 +1454,7 @@ bool Expr::isNullPointerConstant(ASTContext &Ctx, // If the unthinkable happens, fall through to the safest alternative. case NPC_ValueDependentIsNull: - return isTypeDependent() || getType()->isIntegralType(); + return isTypeDependent() || getType()->isIntegralType(Ctx); case NPC_ValueDependentIsNotNull: return false; diff --git a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp index d1a2b26..c2548ec 100644 --- a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp +++ b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp @@ -74,27 +74,27 @@ Stmt::child_iterator CXXDefaultArgExpr::child_end() { return child_iterator(); } -// CXXZeroInitValueExpr -Stmt::child_iterator CXXZeroInitValueExpr::child_begin() { +// CXXScalarValueInitExpr +Stmt::child_iterator CXXScalarValueInitExpr::child_begin() { return child_iterator(); } -Stmt::child_iterator CXXZeroInitValueExpr::child_end() { +Stmt::child_iterator CXXScalarValueInitExpr::child_end() { return child_iterator(); } // CXXNewExpr CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew, Expr **placementArgs, unsigned numPlaceArgs, - bool parenTypeId, Expr *arraySize, + SourceRange TypeIdParens, Expr *arraySize, CXXConstructorDecl *constructor, bool initializer, Expr **constructorArgs, unsigned numConsArgs, FunctionDecl *operatorDelete, QualType ty, SourceLocation startLoc, SourceLocation endLoc) : Expr(CXXNewExprClass, ty, ty->isDependentType(), ty->isDependentType()), - GlobalNew(globalNew), ParenTypeId(parenTypeId), + GlobalNew(globalNew), Initializer(initializer), SubExprs(0), OperatorNew(operatorNew), OperatorDelete(operatorDelete), Constructor(constructor), - StartLoc(startLoc), EndLoc(endLoc) { + TypeIdParens(TypeIdParens), StartLoc(startLoc), EndLoc(endLoc) { AllocateArgsArray(C, arraySize != 0, numPlaceArgs, numConsArgs); unsigned i = 0; @@ -190,6 +190,18 @@ UnresolvedLookupExpr::Create(ASTContext &C, bool Dependent, return ULE; } +UnresolvedLookupExpr * +UnresolvedLookupExpr::CreateEmpty(ASTContext &C, unsigned NumTemplateArgs) { + std::size_t size = sizeof(UnresolvedLookupExpr); + if (NumTemplateArgs != 0) + size += ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs); + + void *Mem = C.Allocate(size, llvm::alignof<UnresolvedLookupExpr>()); + UnresolvedLookupExpr *E = new (Mem) UnresolvedLookupExpr(EmptyShell()); + E->HasExplicitTemplateArgs = NumTemplateArgs != 0; + return E; +} + OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C, QualType T, bool Dependent, NestedNameSpecifier *Qualifier, SourceRange QRange, DeclarationName Name, @@ -197,19 +209,28 @@ OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C, QualType T, UnresolvedSetIterator Begin, UnresolvedSetIterator End) : Expr(K, T, Dependent, Dependent), - Results(0), NumResults(End - Begin), Name(Name), Qualifier(Qualifier), + Results(0), NumResults(0), Name(Name), Qualifier(Qualifier), QualifierRange(QRange), NameLoc(NameLoc), HasExplicitTemplateArgs(HasTemplateArgs) { + initializeResults(C, Begin, End); +} + +void OverloadExpr::initializeResults(ASTContext &C, + UnresolvedSetIterator Begin, + UnresolvedSetIterator End) { + assert(Results == 0 && "Results already initialized!"); + NumResults = End - Begin; if (NumResults) { Results = static_cast<DeclAccessPair *>( C.Allocate(sizeof(DeclAccessPair) * NumResults, llvm::alignof<DeclAccessPair>())); memcpy(Results, &*Begin.getIterator(), - (End - Begin) * sizeof(DeclAccessPair)); + NumResults * sizeof(DeclAccessPair)); } } + bool OverloadExpr::ComputeDependence(UnresolvedSetIterator Begin, UnresolvedSetIterator End, const TemplateArgumentListInfo *Args) { @@ -269,6 +290,19 @@ DependentScopeDeclRefExpr::Create(ASTContext &C, return DRE; } +DependentScopeDeclRefExpr * +DependentScopeDeclRefExpr::CreateEmpty(ASTContext &C, + unsigned NumTemplateArgs) { + std::size_t size = sizeof(DependentScopeDeclRefExpr); + if (NumTemplateArgs) + size += ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs); + void *Mem = C.Allocate(size); + + return new (Mem) DependentScopeDeclRefExpr(QualType(), 0, SourceRange(), + DeclarationName(),SourceLocation(), + NumTemplateArgs != 0); +} + StmtIterator DependentScopeDeclRefExpr::child_begin() { return child_iterator(); } @@ -535,14 +569,6 @@ CXXConstructExpr::CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T, } } -CXXConstructExpr::CXXConstructExpr(EmptyShell Empty, ASTContext &C, - unsigned numargs) - : Expr(CXXConstructExprClass, Empty), Args(0), NumArgs(numargs) -{ - if (NumArgs) - Args = new (C) Stmt*[NumArgs]; -} - void CXXConstructExpr::DoDestroy(ASTContext &C) { DestroyChildren(C); if (Args) @@ -656,6 +682,14 @@ CXXUnresolvedConstructExpr::Create(ASTContext &C, Args, NumArgs, RParenLoc); } +CXXUnresolvedConstructExpr * +CXXUnresolvedConstructExpr::CreateEmpty(ASTContext &C, unsigned NumArgs) { + Stmt::EmptyShell Empty; + void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) + + sizeof(Expr *) * NumArgs); + return new (Mem) CXXUnresolvedConstructExpr(Empty, NumArgs); +} + Stmt::child_iterator CXXUnresolvedConstructExpr::child_begin() { return child_iterator(reinterpret_cast<Stmt **>(this + 1)); } @@ -714,6 +748,29 @@ CXXDependentScopeMemberExpr::Create(ASTContext &C, Member, MemberLoc, TemplateArgs); } +CXXDependentScopeMemberExpr * +CXXDependentScopeMemberExpr::CreateEmpty(ASTContext &C, + unsigned NumTemplateArgs) { + if (NumTemplateArgs == 0) + return new (C) CXXDependentScopeMemberExpr(C, 0, QualType(), + 0, SourceLocation(), 0, + SourceRange(), 0, + DeclarationName(), + SourceLocation()); + + std::size_t size = sizeof(CXXDependentScopeMemberExpr) + + ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs); + void *Mem = C.Allocate(size, llvm::alignof<CXXDependentScopeMemberExpr>()); + CXXDependentScopeMemberExpr *E + = new (Mem) CXXDependentScopeMemberExpr(C, 0, QualType(), + 0, SourceLocation(), 0, + SourceRange(), 0, + DeclarationName(), + SourceLocation(), 0); + E->HasExplicitTemplateArgs = true; + return E; +} + Stmt::child_iterator CXXDependentScopeMemberExpr::child_begin() { return child_iterator(&Base); } @@ -770,6 +827,18 @@ UnresolvedMemberExpr::Create(ASTContext &C, bool Dependent, Member, MemberLoc, TemplateArgs, Begin, End); } +UnresolvedMemberExpr * +UnresolvedMemberExpr::CreateEmpty(ASTContext &C, unsigned NumTemplateArgs) { + std::size_t size = sizeof(UnresolvedMemberExpr); + if (NumTemplateArgs != 0) + size += ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs); + + void *Mem = C.Allocate(size, llvm::alignof<UnresolvedMemberExpr>()); + UnresolvedMemberExpr *E = new (Mem) UnresolvedMemberExpr(EmptyShell()); + E->HasExplicitTemplateArgs = NumTemplateArgs != 0; + return E; +} + CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() const { // Unlike for UnresolvedLookupExpr, it is very easy to re-derive this. diff --git a/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp new file mode 100644 index 0000000..60ac347 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp @@ -0,0 +1,471 @@ +//===--- ExprClassification.cpp - Expression AST Node Implementation ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements Expr::classify. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/ErrorHandling.h" +#include "clang/AST/Expr.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/ExprObjC.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclTemplate.h" +using namespace clang; + +typedef Expr::Classification Cl; + +static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E); +static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D); +static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T); +static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E); +static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E); +static Cl::Kinds ClassifyConditional(ASTContext &Ctx, + const ConditionalOperator *E); +static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E, + Cl::Kinds Kind, SourceLocation &Loc); + +Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const { + assert(!TR->isReferenceType() && "Expressions can't have reference type."); + + Cl::Kinds kind = ClassifyInternal(Ctx, this); + // C99 6.3.2.1: An lvalue is an expression with an object type or an + // incomplete type other than void. + if (!Ctx.getLangOptions().CPlusPlus) { + // Thus, no functions. + if (TR->isFunctionType() || TR == Ctx.OverloadTy) + kind = Cl::CL_Function; + // No void either, but qualified void is OK because it is "other than void". + else if (TR->isVoidType() && !Ctx.getCanonicalType(TR).hasQualifiers()) + kind = Cl::CL_Void; + } + + Cl::ModifiableType modifiable = Cl::CM_Untested; + if (Loc) + modifiable = IsModifiable(Ctx, this, kind, *Loc); + return Classification(kind, modifiable); +} + +static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) { + // This function takes the first stab at classifying expressions. + const LangOptions &Lang = Ctx.getLangOptions(); + + switch (E->getStmtClass()) { + // First come the expressions that are always lvalues, unconditionally. + + case Expr::ObjCIsaExprClass: + // C++ [expr.prim.general]p1: A string literal is an lvalue. + case Expr::StringLiteralClass: + // @encode is equivalent to its string + case Expr::ObjCEncodeExprClass: + // __func__ and friends are too. + case Expr::PredefinedExprClass: + // Property references are lvalues + case Expr::ObjCPropertyRefExprClass: + case Expr::ObjCImplicitSetterGetterRefExprClass: + // C++ [expr.typeid]p1: The result of a typeid expression is an lvalue of... + case Expr::CXXTypeidExprClass: + // Unresolved lookups get classified as lvalues. + // FIXME: Is this wise? Should they get their own kind? + case Expr::UnresolvedLookupExprClass: + case Expr::UnresolvedMemberExprClass: + // ObjC instance variables are lvalues + // FIXME: ObjC++0x might have different rules + case Expr::ObjCIvarRefExprClass: + // C99 6.5.2.5p5 says that compound literals are lvalues. + // FIXME: C++ might have a different opinion. + case Expr::CompoundLiteralExprClass: + return Cl::CL_LValue; + + // Next come the complicated cases. + + // C++ [expr.sub]p1: The result is an lvalue of type "T". + // However, subscripting vector types is more like member access. + case Expr::ArraySubscriptExprClass: + if (cast<ArraySubscriptExpr>(E)->getBase()->getType()->isVectorType()) + return ClassifyInternal(Ctx, cast<ArraySubscriptExpr>(E)->getBase()); + return Cl::CL_LValue; + + // C++ [expr.prim.general]p3: The result is an lvalue if the entity is a + // function or variable and a prvalue otherwise. + case Expr::DeclRefExprClass: + return ClassifyDecl(Ctx, cast<DeclRefExpr>(E)->getDecl()); + // We deal with names referenced from blocks the same way. + case Expr::BlockDeclRefExprClass: + return ClassifyDecl(Ctx, cast<BlockDeclRefExpr>(E)->getDecl()); + + // Member access is complex. + case Expr::MemberExprClass: + return ClassifyMemberExpr(Ctx, cast<MemberExpr>(E)); + + case Expr::UnaryOperatorClass: + switch (cast<UnaryOperator>(E)->getOpcode()) { + // C++ [expr.unary.op]p1: The unary * operator performs indirection: + // [...] the result is an lvalue referring to the object or function + // to which the expression points. + case UnaryOperator::Deref: + return Cl::CL_LValue; + + // GNU extensions, simply look through them. + case UnaryOperator::Real: + case UnaryOperator::Imag: + case UnaryOperator::Extension: + return ClassifyInternal(Ctx, cast<UnaryOperator>(E)->getSubExpr()); + + // C++ [expr.pre.incr]p1: The result is the updated operand; it is an + // lvalue, [...] + // Not so in C. + case UnaryOperator::PreInc: + case UnaryOperator::PreDec: + return Lang.CPlusPlus ? Cl::CL_LValue : Cl::CL_PRValue; + + default: + return Cl::CL_PRValue; + } + + // Implicit casts are lvalues if they're lvalue casts. Other than that, we + // only specifically record class temporaries. + case Expr::ImplicitCastExprClass: + if (cast<ImplicitCastExpr>(E)->isLvalueCast()) + return Cl::CL_LValue; + return Lang.CPlusPlus && E->getType()->isRecordType() ? + Cl::CL_ClassTemporary : Cl::CL_PRValue; + + // C++ [expr.prim.general]p4: The presence of parentheses does not affect + // whether the expression is an lvalue. + case Expr::ParenExprClass: + return ClassifyInternal(Ctx, cast<ParenExpr>(E)->getSubExpr()); + + case Expr::BinaryOperatorClass: + case Expr::CompoundAssignOperatorClass: + // C doesn't have any binary expressions that are lvalues. + if (Lang.CPlusPlus) + return ClassifyBinaryOp(Ctx, cast<BinaryOperator>(E)); + return Cl::CL_PRValue; + + case Expr::CallExprClass: + case Expr::CXXOperatorCallExprClass: + case Expr::CXXMemberCallExprClass: + return ClassifyUnnamed(Ctx, cast<CallExpr>(E)->getCallReturnType()); + + // __builtin_choose_expr is equivalent to the chosen expression. + case Expr::ChooseExprClass: + return ClassifyInternal(Ctx, cast<ChooseExpr>(E)->getChosenSubExpr(Ctx)); + + // Extended vector element access is an lvalue unless there are duplicates + // in the shuffle expression. + case Expr::ExtVectorElementExprClass: + return cast<ExtVectorElementExpr>(E)->containsDuplicateElements() ? + Cl::CL_DuplicateVectorComponents : Cl::CL_LValue; + + // Simply look at the actual default argument. + case Expr::CXXDefaultArgExprClass: + return ClassifyInternal(Ctx, cast<CXXDefaultArgExpr>(E)->getExpr()); + + // Same idea for temporary binding. + case Expr::CXXBindTemporaryExprClass: + return ClassifyInternal(Ctx, cast<CXXBindTemporaryExpr>(E)->getSubExpr()); + + // And the temporary lifetime guard. + case Expr::CXXExprWithTemporariesClass: + return ClassifyInternal(Ctx, cast<CXXExprWithTemporaries>(E)->getSubExpr()); + + // Casts depend completely on the target type. All casts work the same. + case Expr::CStyleCastExprClass: + case Expr::CXXFunctionalCastExprClass: + case Expr::CXXStaticCastExprClass: + case Expr::CXXDynamicCastExprClass: + case Expr::CXXReinterpretCastExprClass: + case Expr::CXXConstCastExprClass: + // Only in C++ can casts be interesting at all. + if (!Lang.CPlusPlus) return Cl::CL_PRValue; + return ClassifyUnnamed(Ctx, cast<ExplicitCastExpr>(E)->getTypeAsWritten()); + + case Expr::ConditionalOperatorClass: + // Once again, only C++ is interesting. + if (!Lang.CPlusPlus) return Cl::CL_PRValue; + return ClassifyConditional(Ctx, cast<ConditionalOperator>(E)); + + // ObjC message sends are effectively function calls, if the target function + // is known. + case Expr::ObjCMessageExprClass: + if (const ObjCMethodDecl *Method = + cast<ObjCMessageExpr>(E)->getMethodDecl()) { + return ClassifyUnnamed(Ctx, Method->getResultType()); + } + + // Some C++ expressions are always class temporaries. + case Expr::CXXConstructExprClass: + case Expr::CXXTemporaryObjectExprClass: + case Expr::CXXScalarValueInitExprClass: + return Cl::CL_ClassTemporary; + + // Everything we haven't handled is a prvalue. + default: + return Cl::CL_PRValue; + } +} + +/// ClassifyDecl - Return the classification of an expression referencing the +/// given declaration. +static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) { + // C++ [expr.prim.general]p6: The result is an lvalue if the entity is a + // function, variable, or data member and a prvalue otherwise. + // In C, functions are not lvalues. + // In addition, NonTypeTemplateParmDecl derives from VarDecl but isn't an + // lvalue unless it's a reference type (C++ [temp.param]p6), so we need to + // special-case this. + bool islvalue; + if (const NonTypeTemplateParmDecl *NTTParm = + dyn_cast<NonTypeTemplateParmDecl>(D)) + islvalue = NTTParm->getType()->isReferenceType(); + else + islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) || + (Ctx.getLangOptions().CPlusPlus && + (isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D))); + + return islvalue ? Cl::CL_LValue : Cl::CL_PRValue; +} + +/// ClassifyUnnamed - Return the classification of an expression yielding an +/// unnamed value of the given type. This applies in particular to function +/// calls and casts. +static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T) { + // In C, function calls are always rvalues. + if (!Ctx.getLangOptions().CPlusPlus) return Cl::CL_PRValue; + + // C++ [expr.call]p10: A function call is an lvalue if the result type is an + // lvalue reference type or an rvalue reference to function type, an xvalue + // if the result type is an rvalue refernence to object type, and a prvalue + // otherwise. + if (T->isLValueReferenceType()) + return Cl::CL_LValue; + const RValueReferenceType *RV = T->getAs<RValueReferenceType>(); + if (!RV) // Could still be a class temporary, though. + return T->isRecordType() ? Cl::CL_ClassTemporary : Cl::CL_PRValue; + + return RV->getPointeeType()->isFunctionType() ? Cl::CL_LValue : Cl::CL_XValue; +} + +static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) { + // Handle C first, it's easier. + if (!Ctx.getLangOptions().CPlusPlus) { + // C99 6.5.2.3p3 + // For dot access, the expression is an lvalue if the first part is. For + // arrow access, it always is an lvalue. + if (E->isArrow()) + return Cl::CL_LValue; + // ObjC property accesses are not lvalues, but get special treatment. + Expr *Base = E->getBase(); + if (isa<ObjCPropertyRefExpr>(Base) || + isa<ObjCImplicitSetterGetterRefExpr>(Base)) + return Cl::CL_SubObjCPropertySetting; + return ClassifyInternal(Ctx, Base); + } + + NamedDecl *Member = E->getMemberDecl(); + // C++ [expr.ref]p3: E1->E2 is converted to the equivalent form (*(E1)).E2. + // C++ [expr.ref]p4: If E2 is declared to have type "reference to T", then + // E1.E2 is an lvalue. + if (ValueDecl *Value = dyn_cast<ValueDecl>(Member)) + if (Value->getType()->isReferenceType()) + return Cl::CL_LValue; + + // Otherwise, one of the following rules applies. + // -- If E2 is a static member [...] then E1.E2 is an lvalue. + if (isa<VarDecl>(Member) && Member->getDeclContext()->isRecord()) + return Cl::CL_LValue; + + // -- If E2 is a non-static data member [...]. If E1 is an lvalue, then + // E1.E2 is an lvalue; if E1 is an xvalue, then E1.E2 is an xvalue; + // otherwise, it is a prvalue. + if (isa<FieldDecl>(Member)) { + // *E1 is an lvalue + if (E->isArrow()) + return Cl::CL_LValue; + return ClassifyInternal(Ctx, E->getBase()); + } + + // -- If E2 is a [...] member function, [...] + // -- If it refers to a static member function [...], then E1.E2 is an + // lvalue; [...] + // -- Otherwise [...] E1.E2 is a prvalue. + if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member)) + return Method->isStatic() ? Cl::CL_LValue : Cl::CL_MemberFunction; + + // -- If E2 is a member enumerator [...], the expression E1.E2 is a prvalue. + // So is everything else we haven't handled yet. + return Cl::CL_PRValue; +} + +static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E) { + assert(Ctx.getLangOptions().CPlusPlus && + "This is only relevant for C++."); + // C++ [expr.ass]p1: All [...] return an lvalue referring to the left operand. + if (E->isAssignmentOp()) + return Cl::CL_LValue; + + // C++ [expr.comma]p1: the result is of the same value category as its right + // operand, [...]. + if (E->getOpcode() == BinaryOperator::Comma) + return ClassifyInternal(Ctx, E->getRHS()); + + // C++ [expr.mptr.oper]p6: The result of a .* expression whose second operand + // is a pointer to a data member is of the same value category as its first + // operand. + if (E->getOpcode() == BinaryOperator::PtrMemD) + return E->getType()->isFunctionType() ? Cl::CL_MemberFunction : + ClassifyInternal(Ctx, E->getLHS()); + + // C++ [expr.mptr.oper]p6: The result of an ->* expression is an lvalue if its + // second operand is a pointer to data member and a prvalue otherwise. + if (E->getOpcode() == BinaryOperator::PtrMemI) + return E->getType()->isFunctionType() ? + Cl::CL_MemberFunction : Cl::CL_LValue; + + // All other binary operations are prvalues. + return Cl::CL_PRValue; +} + +static Cl::Kinds ClassifyConditional(ASTContext &Ctx, + const ConditionalOperator *E) { + assert(Ctx.getLangOptions().CPlusPlus && + "This is only relevant for C++."); + + Expr *True = E->getTrueExpr(); + Expr *False = E->getFalseExpr(); + // C++ [expr.cond]p2 + // If either the second or the third operand has type (cv) void, [...] + // the result [...] is a prvalue. + if (True->getType()->isVoidType() || False->getType()->isVoidType()) + return Cl::CL_PRValue; + + // Note that at this point, we have already performed all conversions + // according to [expr.cond]p3. + // C++ [expr.cond]p4: If the second and third operands are glvalues of the + // same value category [...], the result is of that [...] value category. + // C++ [expr.cond]p5: Otherwise, the result is a prvalue. + Cl::Kinds LCl = ClassifyInternal(Ctx, True), + RCl = ClassifyInternal(Ctx, False); + return LCl == RCl ? LCl : Cl::CL_PRValue; +} + +static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E, + Cl::Kinds Kind, SourceLocation &Loc) { + // As a general rule, we only care about lvalues. But there are some rvalues + // for which we want to generate special results. + if (Kind == Cl::CL_PRValue) { + // For the sake of better diagnostics, we want to specifically recognize + // use of the GCC cast-as-lvalue extension. + if (const CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(E->IgnoreParens())){ + if (CE->getSubExpr()->Classify(Ctx).isLValue()) { + Loc = CE->getLParenLoc(); + return Cl::CM_LValueCast; + } + } + } + if (Kind != Cl::CL_LValue) + return Cl::CM_RValue; + + // This is the lvalue case. + // Functions are lvalues in C++, but not modifiable. (C++ [basic.lval]p6) + if (Ctx.getLangOptions().CPlusPlus && E->getType()->isFunctionType()) + return Cl::CM_Function; + + // You cannot assign to a variable outside a block from within the block if + // it is not marked __block, e.g. + // void takeclosure(void (^C)(void)); + // void func() { int x = 1; takeclosure(^{ x = 7; }); } + if (const BlockDeclRefExpr *BDR = dyn_cast<BlockDeclRefExpr>(E)) { + if (!BDR->isByRef() && isa<VarDecl>(BDR->getDecl())) + return Cl::CM_NotBlockQualified; + } + + // Assignment to a property in ObjC is an implicit setter access. But a + // setter might not exist. + if (const ObjCImplicitSetterGetterRefExpr *Expr = + dyn_cast<ObjCImplicitSetterGetterRefExpr>(E)) { + if (Expr->getSetterMethod() == 0) + return Cl::CM_NoSetterProperty; + } + + CanQualType CT = Ctx.getCanonicalType(E->getType()); + // Const stuff is obviously not modifiable. + if (CT.isConstQualified()) + return Cl::CM_ConstQualified; + // Arrays are not modifiable, only their elements are. + if (CT->isArrayType()) + return Cl::CM_ArrayType; + // Incomplete types are not modifiable. + if (CT->isIncompleteType()) + return Cl::CM_IncompleteType; + + // Records with any const fields (recursively) are not modifiable. + if (const RecordType *R = CT->getAs<RecordType>()) { + assert(!Ctx.getLangOptions().CPlusPlus && + "C++ struct assignment should be resolved by the " + "copy assignment operator."); + if (R->hasConstFields()) + return Cl::CM_ConstQualified; + } + + return Cl::CM_Modifiable; +} + +Expr::isLvalueResult Expr::isLvalue(ASTContext &Ctx) const { + Classification VC = Classify(Ctx); + switch (VC.getKind()) { + case Cl::CL_LValue: return LV_Valid; + case Cl::CL_XValue: return LV_InvalidExpression; + case Cl::CL_Function: return LV_NotObjectType; + case Cl::CL_Void: return LV_IncompleteVoidType; + case Cl::CL_DuplicateVectorComponents: return LV_DuplicateVectorComponents; + case Cl::CL_MemberFunction: return LV_MemberFunction; + case Cl::CL_SubObjCPropertySetting: return LV_SubObjCPropertySetting; + case Cl::CL_ClassTemporary: return LV_ClassTemporary; + case Cl::CL_PRValue: return LV_InvalidExpression; + } + llvm_unreachable("Unhandled kind"); +} + +Expr::isModifiableLvalueResult +Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const { + SourceLocation dummy; + Classification VC = ClassifyModifiable(Ctx, Loc ? *Loc : dummy); + switch (VC.getKind()) { + case Cl::CL_LValue: break; + case Cl::CL_XValue: return MLV_InvalidExpression; + case Cl::CL_Function: return MLV_NotObjectType; + case Cl::CL_Void: return MLV_IncompleteVoidType; + case Cl::CL_DuplicateVectorComponents: return MLV_DuplicateVectorComponents; + case Cl::CL_MemberFunction: return MLV_MemberFunction; + case Cl::CL_SubObjCPropertySetting: return MLV_SubObjCPropertySetting; + case Cl::CL_ClassTemporary: return MLV_ClassTemporary; + case Cl::CL_PRValue: + return VC.getModifiable() == Cl::CM_LValueCast ? + MLV_LValueCast : MLV_InvalidExpression; + } + assert(VC.getKind() == Cl::CL_LValue && "Unhandled kind"); + switch (VC.getModifiable()) { + case Cl::CM_Untested: llvm_unreachable("Did not test modifiability"); + case Cl::CM_Modifiable: return MLV_Valid; + case Cl::CM_RValue: llvm_unreachable("CM_RValue and CL_LValue don't match"); + case Cl::CM_Function: return MLV_NotObjectType; + case Cl::CM_LValueCast: + llvm_unreachable("CM_LValueCast and CL_LValue don't match"); + case Cl::CM_NotBlockQualified: return MLV_NotBlockQualified; + case Cl::CM_NoSetterProperty: return MLV_NoSetterProperty; + case Cl::CM_ConstQualified: return MLV_ConstQualified; + case Cl::CM_ArrayType: return MLV_ArrayType; + case Cl::CM_IncompleteType: return MLV_IncompleteType; + } + llvm_unreachable("Unhandled modifiable type"); +} diff --git a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp index dc61401..3c97420 100644 --- a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp +++ b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp @@ -157,7 +157,7 @@ static bool EvalPointerValueAsBool(LValue& Value, bool& Result) { static bool HandleConversionToBool(const Expr* E, bool& Result, EvalInfo &Info) { - if (E->getType()->isIntegralType()) { + if (E->getType()->isIntegralOrEnumerationType()) { APSInt IntResult; if (!EvaluateInteger(E, IntResult, Info)) return false; @@ -542,7 +542,7 @@ bool PointerExprEvaluator::VisitCastExpr(CastExpr* E) { SubExpr->getType()->isBlockPointerType()) return Visit(SubExpr); - if (SubExpr->getType()->isIntegralType()) { + if (SubExpr->getType()->isIntegralOrEnumerationType()) { APValue Value; if (!EvaluateIntegerOrLValue(SubExpr, Value, Info)) break; @@ -563,6 +563,7 @@ bool PointerExprEvaluator::VisitCastExpr(CastExpr* E) { case CastExpr::CK_NoOp: case CastExpr::CK_BitCast: + case CastExpr::CK_LValueBitCast: case CastExpr::CK_AnyPointerToObjCPointerCast: case CastExpr::CK_AnyPointerToBlockPointerCast: return Visit(SubExpr); @@ -746,25 +747,46 @@ VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) { QualType EltTy = VT->getElementType(); llvm::SmallVector<APValue, 4> Elements; - for (unsigned i = 0; i < NumElements; i++) { + // If a vector is initialized with a single element, that value + // becomes every element of the vector, not just the first. + // This is the behavior described in the IBM AltiVec documentation. + if (NumInits == 1) { + APValue InitValue; if (EltTy->isIntegerType()) { llvm::APSInt sInt(32); - if (i < NumInits) { - if (!EvaluateInteger(E->getInit(i), sInt, Info)) - return APValue(); - } else { - sInt = Info.Ctx.MakeIntValue(0, EltTy); - } - Elements.push_back(APValue(sInt)); + if (!EvaluateInteger(E->getInit(0), sInt, Info)) + return APValue(); + InitValue = APValue(sInt); } else { llvm::APFloat f(0.0); - if (i < NumInits) { - if (!EvaluateFloat(E->getInit(i), f, Info)) - return APValue(); + if (!EvaluateFloat(E->getInit(0), f, Info)) + return APValue(); + InitValue = APValue(f); + } + for (unsigned i = 0; i < NumElements; i++) { + Elements.push_back(InitValue); + } + } else { + for (unsigned i = 0; i < NumElements; i++) { + if (EltTy->isIntegerType()) { + llvm::APSInt sInt(32); + if (i < NumInits) { + if (!EvaluateInteger(E->getInit(i), sInt, Info)) + return APValue(); + } else { + sInt = Info.Ctx.MakeIntValue(0, EltTy); + } + Elements.push_back(APValue(sInt)); } else { - f = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy)); + llvm::APFloat f(0.0); + if (i < NumInits) { + if (!EvaluateFloat(E->getInit(i), f, Info)) + return APValue(); + } else { + f = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy)); + } + Elements.push_back(APValue(f)); } - Elements.push_back(APValue(f)); } } return APValue(&Elements[0], Elements.size()); @@ -818,7 +840,8 @@ public: : Info(info), Result(result) {} bool Success(const llvm::APSInt &SI, const Expr *E) { - assert(E->getType()->isIntegralType() && "Invalid evaluation result."); + assert(E->getType()->isIntegralOrEnumerationType() && + "Invalid evaluation result."); assert(SI.isSigned() == E->getType()->isSignedIntegerType() && "Invalid evaluation result."); assert(SI.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) && @@ -828,7 +851,8 @@ public: } bool Success(const llvm::APInt &I, const Expr *E) { - assert(E->getType()->isIntegralType() && "Invalid evaluation result."); + assert(E->getType()->isIntegralOrEnumerationType() && + "Invalid evaluation result."); assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) && "Invalid evaluation result."); Result = APValue(APSInt(I)); @@ -837,7 +861,8 @@ public: } bool Success(uint64_t Value, const Expr *E) { - assert(E->getType()->isIntegralType() && "Invalid evaluation result."); + assert(E->getType()->isIntegralOrEnumerationType() && + "Invalid evaluation result."); Result = APValue(Info.Ctx.MakeIntValue(Value, E->getType())); return true; } @@ -914,7 +939,7 @@ public: return Success(0, E); } - bool VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) { + bool VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { return Success(0, E); } @@ -943,12 +968,12 @@ private: } // end anonymous namespace static bool EvaluateIntegerOrLValue(const Expr* E, APValue &Result, EvalInfo &Info) { - assert(E->getType()->isIntegralType()); + assert(E->getType()->isIntegralOrEnumerationType()); return IntExprEvaluator(Info, Result).Visit(const_cast<Expr*>(E)); } static bool EvaluateInteger(const Expr* E, APSInt &Result, EvalInfo &Info) { - assert(E->getType()->isIntegralType()); + assert(E->getType()->isIntegralOrEnumerationType()); APValue Val; if (!EvaluateIntegerOrLValue(E, Val, Info) || !Val.isInt()) @@ -1314,8 +1339,8 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) { return Success(Result, E); } } - if (!LHSTy->isIntegralType() || - !RHSTy->isIntegralType()) { + if (!LHSTy->isIntegralOrEnumerationType() || + !RHSTy->isIntegralOrEnumerationType()) { // We can't continue from here for non-integral types, and they // could potentially confuse the following operations. return false; @@ -1570,7 +1595,7 @@ bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) { } // Only handle integral operations... - if (!E->getSubExpr()->getType()->isIntegralType()) + if (!E->getSubExpr()->getType()->isIntegralOrEnumerationType()) return false; // Get the operand value into 'Result'. @@ -1613,7 +1638,7 @@ bool IntExprEvaluator::VisitCastExpr(CastExpr *E) { } // Handle simple integer->integer casts. - if (SrcType->isIntegralType()) { + if (SrcType->isIntegralOrEnumerationType()) { if (!Visit(SubExpr)) return false; @@ -1732,7 +1757,7 @@ public: bool VisitBinaryOperator(const BinaryOperator *E); bool VisitFloatingLiteral(const FloatingLiteral *E); bool VisitCastExpr(CastExpr *E); - bool VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E); + bool VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E); bool VisitConditionalOperator(ConditionalOperator *E); bool VisitChooseExpr(const ChooseExpr *E) @@ -1908,7 +1933,7 @@ bool FloatExprEvaluator::VisitFloatingLiteral(const FloatingLiteral *E) { bool FloatExprEvaluator::VisitCastExpr(CastExpr *E) { Expr* SubExpr = E->getSubExpr(); - if (SubExpr->getType()->isIntegralType()) { + if (SubExpr->getType()->isIntegralOrEnumerationType()) { APSInt IntResult; if (!EvaluateInteger(SubExpr, IntResult, Info)) return false; @@ -1928,7 +1953,7 @@ bool FloatExprEvaluator::VisitCastExpr(CastExpr *E) { return false; } -bool FloatExprEvaluator::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) { +bool FloatExprEvaluator::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { Result = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(E->getType())); return true; } @@ -2186,6 +2211,8 @@ bool Expr::Evaluate(EvalResult &Result, ASTContext &Ctx) const { } else if (E->getType()->isIntegerType()) { if (!IntExprEvaluator(Info, Info.EvalResult.Val).Visit(const_cast<Expr*>(E))) return false; + if (Result.Val.isLValue() && !IsGlobalLValue(Result.Val.getLValueBase())) + return false; } else if (E->getType()->hasPointerRepresentation()) { LValue LV; if (!EvaluatePointer(E, LV, Info)) @@ -2316,7 +2343,7 @@ static ICEDiag CheckEvalInICE(const Expr* E, ASTContext &Ctx) { static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) { assert(!E->isValueDependent() && "Should not see value dependent exprs!"); - if (!E->getType()->isIntegralType()) { + if (!E->getType()->isIntegralOrEnumerationType()) { return ICEDiag(2, E->getLocStart()); } @@ -2384,7 +2411,7 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) { case Expr::IntegerLiteralClass: case Expr::CharacterLiteralClass: case Expr::CXXBoolLiteralExprClass: - case Expr::CXXZeroInitValueExprClass: + case Expr::CXXScalarValueInitExprClass: case Expr::TypesCompatibleExprClass: case Expr::UnaryTypeTraitExprClass: return NoDiag(); @@ -2579,7 +2606,7 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) { case Expr::CXXReinterpretCastExprClass: case Expr::CXXConstCastExprClass: { const Expr *SubExpr = cast<CastExpr>(E)->getSubExpr(); - if (SubExpr->getType()->isIntegralType()) + if (SubExpr->getType()->isIntegralOrEnumerationType()) return CheckICE(SubExpr, Ctx); if (isa<FloatingLiteral>(SubExpr->IgnoreParens())) return NoDiag(); diff --git a/contrib/llvm/tools/clang/lib/AST/Makefile b/contrib/llvm/tools/clang/lib/AST/Makefile index ede2577..7a1672b 100644 --- a/contrib/llvm/tools/clang/lib/AST/Makefile +++ b/contrib/llvm/tools/clang/lib/AST/Makefile @@ -11,11 +11,9 @@ # ##===----------------------------------------------------------------------===## -LEVEL = ../../../.. +CLANG_LEVEL := ../.. LIBRARYNAME := clangAST BUILD_ARCHIVE = 1 -CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include - -include $(LEVEL)/Makefile.common +include $(CLANG_LEVEL)/Makefile diff --git a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp index 983a287..88d71ce 100644 --- a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp +++ b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp @@ -23,6 +23,35 @@ using namespace clang; namespace { +/// BaseSubobjectInfo - Represents a single base subobject in a complete class. +/// For a class hierarchy like +/// +/// class A { }; +/// class B : A { }; +/// class C : A, B { }; +/// +/// The BaseSubobjectInfo graph for C will have three BaseSubobjectInfo +/// instances, one for B and two for A. +/// +/// If a base is virtual, it will only have one BaseSubobjectInfo allocated. +struct BaseSubobjectInfo { + /// Class - The class for this base info. + const CXXRecordDecl *Class; + + /// IsVirtual - Whether the BaseInfo represents a virtual base or not. + bool IsVirtual; + + /// Bases - Information about the base subobjects. + llvm::SmallVector<BaseSubobjectInfo*, 4> Bases; + + /// PrimaryVirtualBaseInfo - Holds the base info for the primary virtual base + /// of this base info (if one exists). + BaseSubobjectInfo *PrimaryVirtualBaseInfo; + + // FIXME: Document. + const BaseSubobjectInfo *Derived; +}; + /// EmptySubobjectMap - Keeps track of which empty subobjects exist at different /// offsets while laying out a C++ class. class EmptySubobjectMap { @@ -36,30 +65,41 @@ class EmptySubobjectMap { typedef llvm::DenseMap<uint64_t, ClassVectorTy> EmptyClassOffsetsMapTy; EmptyClassOffsetsMapTy EmptyClassOffsets; + /// MaxEmptyClassOffset - The highest offset known to contain an empty + /// base subobject. + uint64_t MaxEmptyClassOffset; + /// ComputeEmptySubobjectSizes - Compute the size of the largest base or /// member subobject that is empty. void ComputeEmptySubobjectSizes(); + + bool CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD, + uint64_t Offset) const; - struct BaseInfo { - const CXXRecordDecl *Class; - bool IsVirtual; - - const CXXRecordDecl *PrimaryVirtualBase; - - llvm::SmallVector<BaseInfo*, 4> Bases; - const BaseInfo *Derived; - }; + void AddSubobjectAtOffset(const CXXRecordDecl *RD, uint64_t Offset); - llvm::DenseMap<const CXXRecordDecl *, BaseInfo *> VirtualBaseInfo; - llvm::DenseMap<const CXXRecordDecl *, BaseInfo *> NonVirtualBaseInfo; + bool CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info, + uint64_t Offset); + void UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info, + uint64_t Offset, bool PlacingEmptyBase); - BaseInfo *ComputeBaseInfo(const CXXRecordDecl *RD, bool IsVirtual, - const BaseInfo *Derived); - void ComputeBaseInfo(); + bool CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD, + const CXXRecordDecl *Class, + uint64_t Offset) const; + bool CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD, + uint64_t Offset) const; - bool CanPlaceBaseSubobjectAtOffset(const BaseInfo *Info, uint64_t Offset); - void UpdateEmptyBaseSubobjects(const BaseInfo *Info, uint64_t Offset); + void UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD, + const CXXRecordDecl *Class, + uint64_t Offset); + void UpdateEmptyFieldSubobjects(const FieldDecl *FD, uint64_t Offset); + /// AnyEmptySubobjectsBeyondOffset - Returns whether there are any empty + /// subobjects beyond the given offset. + bool AnyEmptySubobjectsBeyondOffset(uint64_t Offset) const { + return Offset <= MaxEmptyClassOffset; + } + public: /// This holds the size of the largest empty subobject (either a base /// or a member). Will be zero if the record being built doesn't contain @@ -67,18 +107,21 @@ public: uint64_t SizeOfLargestEmptySubobject; EmptySubobjectMap(ASTContext &Context, const CXXRecordDecl *Class) - : Context(Context), Class(Class), SizeOfLargestEmptySubobject(0) { + : Context(Context), Class(Class), MaxEmptyClassOffset(0), + SizeOfLargestEmptySubobject(0) { ComputeEmptySubobjectSizes(); - - ComputeBaseInfo(); } /// CanPlaceBaseAtOffset - Return whether the given base class can be placed /// at the given offset. /// Returns false if placing the record will result in two components /// (direct or indirect) of the same type having the same offset. - bool CanPlaceBaseAtOffset(const CXXRecordDecl *RD, bool BaseIsVirtual, + bool CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info, uint64_t Offset); + + /// CanPlaceFieldAtOffset - Return whether a field can be placed at the given + /// offset. + bool CanPlaceFieldAtOffset(const FieldDecl *FD, uint64_t Offset); }; void EmptySubobjectMap::ComputeEmptySubobjectSizes() { @@ -130,93 +173,67 @@ void EmptySubobjectMap::ComputeEmptySubobjectSizes() { } } -EmptySubobjectMap::BaseInfo * -EmptySubobjectMap::ComputeBaseInfo(const CXXRecordDecl *RD, bool IsVirtual, - const BaseInfo *Derived) { - BaseInfo *Info; - - if (IsVirtual) { - BaseInfo *&InfoSlot = VirtualBaseInfo[RD]; - if (InfoSlot) { - assert(InfoSlot->Class == RD && "Wrong class for virtual base info!"); - return InfoSlot; - } +bool +EmptySubobjectMap::CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD, + uint64_t Offset) const { + // We only need to check empty bases. + if (!RD->isEmpty()) + return true; - InfoSlot = new (Context) BaseInfo; - Info = InfoSlot; - } else { - Info = new (Context) BaseInfo; - } - - Info->Class = RD; - Info->IsVirtual = IsVirtual; - Info->Derived = Derived; - Info->PrimaryVirtualBase = 0; + EmptyClassOffsetsMapTy::const_iterator I = EmptyClassOffsets.find(Offset); + if (I == EmptyClassOffsets.end()) + return true; - if (RD->getNumVBases()) { - // Check if this class has a primary virtual base. - const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); - if (Layout.getPrimaryBaseWasVirtual()) { - Info->PrimaryVirtualBase = Layout.getPrimaryBase(); - assert(Info->PrimaryVirtualBase && - "Didn't have a primary virtual base!"); - } - } + const ClassVectorTy& Classes = I->second; + if (std::find(Classes.begin(), Classes.end(), RD) == Classes.end()) + return true; - for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), - E = RD->bases_end(); I != E; ++I) { - bool IsVirtual = I->isVirtual(); - - const CXXRecordDecl *BaseDecl = - cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); - - Info->Bases.push_back(ComputeBaseInfo(BaseDecl, IsVirtual, Info)); - } - - return Info; + // There is already an empty class of the same type at this offset. + return false; } + +void EmptySubobjectMap::AddSubobjectAtOffset(const CXXRecordDecl *RD, + uint64_t Offset) { + // We only care about empty bases. + if (!RD->isEmpty()) + return; -void EmptySubobjectMap::ComputeBaseInfo() { - for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(), - E = Class->bases_end(); I != E; ++I) { - bool IsVirtual = I->isVirtual(); - - const CXXRecordDecl *BaseDecl = - cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); - - BaseInfo *Info = ComputeBaseInfo(BaseDecl, IsVirtual, /*Derived=*/0); - if (IsVirtual) { - // ComputeBaseInfo has already added this base for us. - continue; - } + ClassVectorTy& Classes = EmptyClassOffsets[Offset]; + assert(std::find(Classes.begin(), Classes.end(), RD) == Classes.end() && + "Duplicate empty class detected!"); - // Add the base info to the map of non-virtual bases. - assert(!NonVirtualBaseInfo.count(BaseDecl) && - "Non-virtual base already exists!"); - NonVirtualBaseInfo.insert(std::make_pair(BaseDecl, Info)); - } + Classes.push_back(RD); + + // Update the empty class offset. + MaxEmptyClassOffset = std::max(MaxEmptyClassOffset, Offset); } bool -EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseInfo *Info, +EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info, uint64_t Offset) { + // We don't have to keep looking past the maximum offset that's known to + // contain an empty class. + if (!AnyEmptySubobjectsBeyondOffset(Offset)) + return true; + + if (!CanPlaceSubobjectAtOffset(Info->Class, Offset)) + return false; + // Traverse all non-virtual bases. + const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class); for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) { - BaseInfo* Base = Info->Bases[I]; + BaseSubobjectInfo* Base = Info->Bases[I]; if (Base->IsVirtual) continue; - const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class); uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class); if (!CanPlaceBaseSubobjectAtOffset(Base, BaseOffset)) return false; } - if (Info->PrimaryVirtualBase) { - BaseInfo *PrimaryVirtualBaseInfo = - VirtualBaseInfo.lookup(Info->PrimaryVirtualBase); - assert(PrimaryVirtualBaseInfo && "Didn't find base info!"); + if (Info->PrimaryVirtualBaseInfo) { + BaseSubobjectInfo *PrimaryVirtualBaseInfo = Info->PrimaryVirtualBaseInfo; if (Info == PrimaryVirtualBaseInfo->Derived) { if (!CanPlaceBaseSubobjectAtOffset(PrimaryVirtualBaseInfo, Offset)) @@ -224,62 +241,277 @@ EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseInfo *Info, } } - // FIXME: Member variables. + // Traverse all member variables. + unsigned FieldNo = 0; + for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(), + E = Info->Class->field_end(); I != E; ++I, ++FieldNo) { + const FieldDecl *FD = *I; + + uint64_t FieldOffset = Offset + Layout.getFieldOffset(FieldNo); + if (!CanPlaceFieldSubobjectAtOffset(FD, FieldOffset)) + return false; + } + return true; } -void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseInfo *Info, - uint64_t Offset) { - if (Info->Class->isEmpty()) { - // FIXME: Record that there is an empty class at this offset. +void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info, + uint64_t Offset, + bool PlacingEmptyBase) { + if (!PlacingEmptyBase && Offset >= SizeOfLargestEmptySubobject) { + // We know that the only empty subobjects that can conflict with empty + // subobject of non-empty bases, are empty bases that can be placed at + // offset zero. Because of this, we only need to keep track of empty base + // subobjects with offsets less than the size of the largest empty + // subobject for our class. + return; } - + + AddSubobjectAtOffset(Info->Class, Offset); + // Traverse all non-virtual bases. + const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class); for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) { - BaseInfo* Base = Info->Bases[I]; + BaseSubobjectInfo* Base = Info->Bases[I]; if (Base->IsVirtual) continue; - - const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class); + uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class); - - UpdateEmptyBaseSubobjects(Base, BaseOffset); + UpdateEmptyBaseSubobjects(Base, BaseOffset, PlacingEmptyBase); } - if (Info->PrimaryVirtualBase) { - BaseInfo *PrimaryVirtualBaseInfo = - VirtualBaseInfo.lookup(Info->PrimaryVirtualBase); - assert(PrimaryVirtualBaseInfo && "Didn't find base info!"); + if (Info->PrimaryVirtualBaseInfo) { + BaseSubobjectInfo *PrimaryVirtualBaseInfo = Info->PrimaryVirtualBaseInfo; if (Info == PrimaryVirtualBaseInfo->Derived) - UpdateEmptyBaseSubobjects(PrimaryVirtualBaseInfo, Offset); + UpdateEmptyBaseSubobjects(PrimaryVirtualBaseInfo, Offset, + PlacingEmptyBase); + } + + // Traverse all member variables. + unsigned FieldNo = 0; + for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(), + E = Info->Class->field_end(); I != E; ++I, ++FieldNo) { + const FieldDecl *FD = *I; + + uint64_t FieldOffset = Offset + Layout.getFieldOffset(FieldNo); + UpdateEmptyFieldSubobjects(FD, FieldOffset); } - - // FIXME: Member variables. } -bool EmptySubobjectMap::CanPlaceBaseAtOffset(const CXXRecordDecl *RD, - bool BaseIsVirtual, +bool EmptySubobjectMap::CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info, uint64_t Offset) { // If we know this class doesn't have any empty subobjects we don't need to // bother checking. if (!SizeOfLargestEmptySubobject) return true; - BaseInfo *Info; + if (!CanPlaceBaseSubobjectAtOffset(Info, Offset)) + return false; + + // We are able to place the base at this offset. Make sure to update the + // empty base subobject map. + UpdateEmptyBaseSubobjects(Info, Offset, Info->Class->isEmpty()); + return true; +} + +bool +EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD, + const CXXRecordDecl *Class, + uint64_t Offset) const { + // We don't have to keep looking past the maximum offset that's known to + // contain an empty class. + if (!AnyEmptySubobjectsBeyondOffset(Offset)) + return true; + + if (!CanPlaceSubobjectAtOffset(RD, Offset)) + return false; - if (BaseIsVirtual) - Info = VirtualBaseInfo.lookup(RD); - else - Info = NonVirtualBaseInfo.lookup(RD); + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + // Traverse all non-virtual bases. + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + if (I->isVirtual()) + continue; + + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl); + if (!CanPlaceFieldSubobjectAtOffset(BaseDecl, Class, BaseOffset)) + return false; + } + + if (RD == Class) { + // This is the most derived class, traverse virtual bases as well. + for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(), + E = RD->vbases_end(); I != E; ++I) { + const CXXRecordDecl *VBaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + uint64_t VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl); + if (!CanPlaceFieldSubobjectAtOffset(VBaseDecl, Class, VBaseOffset)) + return false; + } + } + + // Traverse all member variables. + unsigned FieldNo = 0; + for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end(); + I != E; ++I, ++FieldNo) { + const FieldDecl *FD = *I; + + uint64_t FieldOffset = Offset + Layout.getFieldOffset(FieldNo); + + if (!CanPlaceFieldSubobjectAtOffset(FD, FieldOffset)) + return false; + } + + return true; +} + +bool EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD, + uint64_t Offset) const { + // We don't have to keep looking past the maximum offset that's known to + // contain an empty class. + if (!AnyEmptySubobjectsBeyondOffset(Offset)) + return true; - if (!CanPlaceBaseSubobjectAtOffset(Info, Offset)) + QualType T = FD->getType(); + if (const RecordType *RT = T->getAs<RecordType>()) { + const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); + return CanPlaceFieldSubobjectAtOffset(RD, RD, Offset); + } + + // If we have an array type we need to look at every element. + if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) { + QualType ElemTy = Context.getBaseElementType(AT); + const RecordType *RT = ElemTy->getAs<RecordType>(); + if (!RT) + return true; + + const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + uint64_t NumElements = Context.getConstantArrayElementCount(AT); + uint64_t ElementOffset = Offset; + for (uint64_t I = 0; I != NumElements; ++I) { + // We don't have to keep looking past the maximum offset that's known to + // contain an empty class. + if (!AnyEmptySubobjectsBeyondOffset(ElementOffset)) + return true; + + if (!CanPlaceFieldSubobjectAtOffset(RD, RD, ElementOffset)) + return false; + + ElementOffset += Layout.getSize(); + } + } + + return true; +} + +bool +EmptySubobjectMap::CanPlaceFieldAtOffset(const FieldDecl *FD, uint64_t Offset) { + if (!CanPlaceFieldSubobjectAtOffset(FD, Offset)) return false; - UpdateEmptyBaseSubobjects(Info, Offset); + // We are able to place the member variable at this offset. + // Make sure to update the empty base subobject map. + UpdateEmptyFieldSubobjects(FD, Offset); return true; } +void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD, + const CXXRecordDecl *Class, + uint64_t Offset) { + // We know that the only empty subobjects that can conflict with empty + // field subobjects are subobjects of empty bases that can be placed at offset + // zero. Because of this, we only need to keep track of empty field + // subobjects with offsets less than the size of the largest empty + // subobject for our class. + if (Offset >= SizeOfLargestEmptySubobject) + return; + + AddSubobjectAtOffset(RD, Offset); + + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + // Traverse all non-virtual bases. + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + if (I->isVirtual()) + continue; + + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl); + UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset); + } + + if (RD == Class) { + // This is the most derived class, traverse virtual bases as well. + for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(), + E = RD->vbases_end(); I != E; ++I) { + const CXXRecordDecl *VBaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + uint64_t VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl); + UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset); + } + } + + // Traverse all member variables. + unsigned FieldNo = 0; + for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end(); + I != E; ++I, ++FieldNo) { + const FieldDecl *FD = *I; + + uint64_t FieldOffset = Offset + Layout.getFieldOffset(FieldNo); + + UpdateEmptyFieldSubobjects(FD, FieldOffset); + } +} + +void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD, + uint64_t Offset) { + QualType T = FD->getType(); + if (const RecordType *RT = T->getAs<RecordType>()) { + const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); + UpdateEmptyFieldSubobjects(RD, RD, Offset); + return; + } + + // If we have an array type we need to update every element. + if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) { + QualType ElemTy = Context.getBaseElementType(AT); + const RecordType *RT = ElemTy->getAs<RecordType>(); + if (!RT) + return; + + const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + uint64_t NumElements = Context.getConstantArrayElementCount(AT); + uint64_t ElementOffset = Offset; + + for (uint64_t I = 0; I != NumElements; ++I) { + // We know that the only empty subobjects that can conflict with empty + // field subobjects are subobjects of empty bases that can be placed at + // offset zero. Because of this, we only need to keep track of empty field + // subobjects with offsets less than the size of the largest empty + // subobject for our class. + if (ElementOffset >= SizeOfLargestEmptySubobject) + return; + + UpdateEmptyFieldSubobjects(RD, RD, ElementOffset); + ElementOffset += Layout.getSize(); + } + } +} + class RecordLayoutBuilder { // FIXME: Remove this and make the appropriate fields public. friend class clang::ASTContext; @@ -346,10 +578,6 @@ class RecordLayoutBuilder { /// avoid visiting virtual bases more than once. llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases; - /// EmptyClassOffsets - A map from offsets to empty record decls. - typedef std::multimap<uint64_t, const CXXRecordDecl *> EmptyClassOffsetsTy; - EmptyClassOffsetsTy EmptyClassOffsets; - RecordLayoutBuilder(ASTContext &Context, EmptySubobjectMap *EmptySubobjects) : Context(Context), EmptySubobjects(EmptySubobjects), Size(0), Alignment(8), Packed(false), IsUnion(false), IsMac68kAlign(false), @@ -366,9 +594,29 @@ class RecordLayoutBuilder { void LayoutWideBitField(uint64_t FieldSize, uint64_t TypeSize); void LayoutBitField(const FieldDecl *D); - /// ComputeEmptySubobjectSizes - Compute the size of the largest base or - /// member subobject that is empty. - void ComputeEmptySubobjectSizes(const CXXRecordDecl *RD); + /// BaseSubobjectInfoAllocator - Allocator for BaseSubobjectInfo objects. + llvm::SpecificBumpPtrAllocator<BaseSubobjectInfo> BaseSubobjectInfoAllocator; + + typedef llvm::DenseMap<const CXXRecordDecl *, BaseSubobjectInfo *> + BaseSubobjectInfoMapTy; + + /// VirtualBaseInfo - Map from all the (direct or indirect) virtual bases + /// of the class we're laying out to their base subobject info. + BaseSubobjectInfoMapTy VirtualBaseInfo; + + /// NonVirtualBaseInfo - Map from all the direct non-virtual bases of the + /// class we're laying out to their base subobject info. + BaseSubobjectInfoMapTy NonVirtualBaseInfo; + + /// ComputeBaseSubobjectInfo - Compute the base subobject information for the + /// bases of the given class. + void ComputeBaseSubobjectInfo(const CXXRecordDecl *RD); + + /// ComputeBaseSubobjectInfo - Compute the base subobject information for a + /// single class and all of its base classes. + BaseSubobjectInfo *ComputeBaseSubobjectInfo(const CXXRecordDecl *RD, + bool IsVirtual, + BaseSubobjectInfo *Derived); /// DeterminePrimaryBase - Determine the primary base of the given class. void DeterminePrimaryBase(const CXXRecordDecl *RD); @@ -387,43 +635,21 @@ class RecordLayoutBuilder { void LayoutNonVirtualBases(const CXXRecordDecl *RD); /// LayoutNonVirtualBase - Lays out a single non-virtual base. - void LayoutNonVirtualBase(const CXXRecordDecl *Base); + void LayoutNonVirtualBase(const BaseSubobjectInfo *Base); - void AddPrimaryVirtualBaseOffsets(const CXXRecordDecl *RD, uint64_t Offset, - const CXXRecordDecl *MostDerivedClass); + void AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info, + uint64_t Offset); /// LayoutVirtualBases - Lays out all the virtual bases. void LayoutVirtualBases(const CXXRecordDecl *RD, const CXXRecordDecl *MostDerivedClass); /// LayoutVirtualBase - Lays out a single virtual base. - void LayoutVirtualBase(const CXXRecordDecl *Base); + void LayoutVirtualBase(const BaseSubobjectInfo *Base); /// LayoutBase - Will lay out a base and return the offset where it was /// placed, in bits. - uint64_t LayoutBase(const CXXRecordDecl *Base, bool BaseIsVirtual); - - /// canPlaceRecordAtOffset - Return whether a record (either a base class - /// or a field) can be placed at the given offset. - /// Returns false if placing the record will result in two components - /// (direct or indirect) of the same type having the same offset. - bool canPlaceRecordAtOffset(const CXXRecordDecl *RD, uint64_t Offset, - bool CheckVBases) const; - - /// canPlaceFieldAtOffset - Return whether a field can be placed at the given - /// offset. - bool canPlaceFieldAtOffset(const FieldDecl *FD, uint64_t Offset) const; - - /// UpdateEmptyClassOffsets - Called after a record (either a base class - /// or a field) has been placed at the given offset. Will update the - /// EmptyClassOffsets map if the class is empty or has any empty bases or - /// fields. - void UpdateEmptyClassOffsets(const CXXRecordDecl *RD, uint64_t Offset, - bool UpdateVBases); - - /// UpdateEmptyClassOffsets - Called after a field has been placed at the - /// given offset. - void UpdateEmptyClassOffsets(const FieldDecl *FD, uint64_t Offset); + uint64_t LayoutBase(const BaseSubobjectInfo *Base); /// InitializeLayout - Initialize record layout for the given record decl. void InitializeLayout(const Decl *D); @@ -575,14 +801,127 @@ void RecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) { UpdateAlignment(Context.Target.getPointerAlign(0)); } +BaseSubobjectInfo * +RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD, + bool IsVirtual, + BaseSubobjectInfo *Derived) { + BaseSubobjectInfo *Info; + + if (IsVirtual) { + // Check if we already have info about this virtual base. + BaseSubobjectInfo *&InfoSlot = VirtualBaseInfo[RD]; + if (InfoSlot) { + assert(InfoSlot->Class == RD && "Wrong class for virtual base info!"); + return InfoSlot; + } + + // We don't, create it. + InfoSlot = new (BaseSubobjectInfoAllocator.Allocate()) BaseSubobjectInfo; + Info = InfoSlot; + } else { + Info = new (BaseSubobjectInfoAllocator.Allocate()) BaseSubobjectInfo; + } + + Info->Class = RD; + Info->IsVirtual = IsVirtual; + Info->Derived = 0; + Info->PrimaryVirtualBaseInfo = 0; + + const CXXRecordDecl *PrimaryVirtualBase = 0; + BaseSubobjectInfo *PrimaryVirtualBaseInfo = 0; + + // Check if this base has a primary virtual base. + if (RD->getNumVBases()) { + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + if (Layout.getPrimaryBaseWasVirtual()) { + // This base does have a primary virtual base. + PrimaryVirtualBase = Layout.getPrimaryBase(); + assert(PrimaryVirtualBase && "Didn't have a primary virtual base!"); + + // Now check if we have base subobject info about this primary base. + PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase); + + if (PrimaryVirtualBaseInfo) { + if (PrimaryVirtualBaseInfo->Derived) { + // We did have info about this primary base, and it turns out that it + // has already been claimed as a primary virtual base for another + // base. + PrimaryVirtualBase = 0; + } else { + // We can claim this base as our primary base. + Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo; + PrimaryVirtualBaseInfo->Derived = Info; + } + } + } + } + + // Now go through all direct bases. + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + bool IsVirtual = I->isVirtual(); + + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + Info->Bases.push_back(ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, Info)); + } + + if (PrimaryVirtualBase && !PrimaryVirtualBaseInfo) { + // Traversing the bases must have created the base info for our primary + // virtual base. + PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase); + assert(PrimaryVirtualBaseInfo && + "Did not create a primary virtual base!"); + + // Claim the primary virtual base as our primary virtual base. + Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo; + PrimaryVirtualBaseInfo->Derived = Info; + } + + return Info; +} + +void RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD) { + for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), + E = RD->bases_end(); I != E; ++I) { + bool IsVirtual = I->isVirtual(); + + const CXXRecordDecl *BaseDecl = + cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); + + // Compute the base subobject info for this base. + BaseSubobjectInfo *Info = ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, 0); + + if (IsVirtual) { + // ComputeBaseInfo has already added this base for us. + assert(VirtualBaseInfo.count(BaseDecl) && + "Did not add virtual base!"); + } else { + // Add the base info to the map of non-virtual bases. + assert(!NonVirtualBaseInfo.count(BaseDecl) && + "Non-virtual base already exists!"); + NonVirtualBaseInfo.insert(std::make_pair(BaseDecl, Info)); + } + } +} + void RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) { - // First, determine the primary base class. + // Then, determine the primary base class. DeterminePrimaryBase(RD); + // Compute base subobject info. + ComputeBaseSubobjectInfo(RD); + // If we have a primary base class, lay it out. if (PrimaryBase) { if (PrimaryBaseIsVirtual) { + // If the primary virtual base was a primary virtual base of some other + // base class we'll have to steal it. + BaseSubobjectInfo *PrimaryBaseInfo = VirtualBaseInfo.lookup(PrimaryBase); + PrimaryBaseInfo->Derived = 0; + // We have a virtual primary base, insert it as an indirect primary base. IndirectPrimaryBases.insert(PrimaryBase); @@ -590,9 +929,15 @@ RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) { "vbase already visited!"); VisitedVirtualBases.insert(PrimaryBase); - LayoutVirtualBase(PrimaryBase); - } else - LayoutNonVirtualBase(PrimaryBase); + LayoutVirtualBase(PrimaryBaseInfo); + } else { + BaseSubobjectInfo *PrimaryBaseInfo = + NonVirtualBaseInfo.lookup(PrimaryBase); + assert(PrimaryBaseInfo && + "Did not find base info for non-virtual primary base!"); + + LayoutNonVirtualBase(PrimaryBaseInfo); + } } // Now lay out the non-virtual bases. @@ -603,81 +948,64 @@ RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) { if (I->isVirtual()) continue; - const CXXRecordDecl *Base = + const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); // Skip the primary base. - if (Base == PrimaryBase && !PrimaryBaseIsVirtual) + if (BaseDecl == PrimaryBase && !PrimaryBaseIsVirtual) continue; // Lay out the base. - LayoutNonVirtualBase(Base); + BaseSubobjectInfo *BaseInfo = NonVirtualBaseInfo.lookup(BaseDecl); + assert(BaseInfo && "Did not find base info for non-virtual base!"); + + LayoutNonVirtualBase(BaseInfo); } } -void RecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *Base) { +void RecordLayoutBuilder::LayoutNonVirtualBase(const BaseSubobjectInfo *Base) { // Layout the base. - uint64_t Offset = LayoutBase(Base, /*BaseIsVirtual=*/false); + uint64_t Offset = LayoutBase(Base); // Add its base class offset. - if (!Bases.insert(std::make_pair(Base, Offset)).second) - assert(false && "Added same base offset more than once!"); + assert(!Bases.count(Base->Class) && "base offset already exists!"); + Bases.insert(std::make_pair(Base->Class, Offset)); + + AddPrimaryVirtualBaseOffsets(Base, Offset); } void -RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const CXXRecordDecl *RD, - uint64_t Offset, - const CXXRecordDecl *MostDerivedClass) { - // We already have the offset for the primary base of the most derived class. - if (RD != MostDerivedClass) { - const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); - const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); - - // If this is a primary virtual base and we haven't seen it before, add it. - if (PrimaryBase && Layout.getPrimaryBaseWasVirtual() && - !VBases.count(PrimaryBase)) - VBases.insert(std::make_pair(PrimaryBase, Offset)); +RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info, + uint64_t Offset) { + // This base isn't interesting, it has no virtual bases. + if (!Info->Class->getNumVBases()) + return; + + // First, check if we have a virtual primary base to add offsets for. + if (Info->PrimaryVirtualBaseInfo) { + assert(Info->PrimaryVirtualBaseInfo->IsVirtual && + "Primary virtual base is not virtual!"); + if (Info->PrimaryVirtualBaseInfo->Derived == Info) { + // Add the offset. + assert(!VBases.count(Info->PrimaryVirtualBaseInfo->Class) && + "primary vbase offset already exists!"); + VBases.insert(std::make_pair(Info->PrimaryVirtualBaseInfo->Class, + Offset)); + + // Traverse the primary virtual base. + AddPrimaryVirtualBaseOffsets(Info->PrimaryVirtualBaseInfo, Offset); + } } - for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), - E = RD->bases_end(); I != E; ++I) { - assert(!I->getType()->isDependentType() && - "Cannot layout class with dependent bases."); - - const CXXRecordDecl *BaseDecl = - cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); - - if (!BaseDecl->getNumVBases()) { - // This base isn't interesting since it doesn't have any virtual bases. + // Now go through all direct non-virtual bases. + const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class); + for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) { + const BaseSubobjectInfo *Base = Info->Bases[I]; + if (Base->IsVirtual) continue; - } - - // Compute the offset of this base. - uint64_t BaseOffset; - - if (I->isVirtual()) { - // If we don't know this vbase yet, don't visit it. It will be visited - // later. - if (!VBases.count(BaseDecl)) { - continue; - } - - // Check if we've already visited this base. - if (!VisitedVirtualBases.insert(BaseDecl)) - continue; - // We want the vbase offset from the class we're currently laying out. - BaseOffset = VBases[BaseDecl]; - } else if (RD == MostDerivedClass) { - // We want the base offset from the class we're currently laying out. - assert(Bases.count(BaseDecl) && "Did not find base!"); - BaseOffset = Bases[BaseDecl]; - } else { - const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); - BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl); - } - - AddPrimaryVirtualBaseOffsets(BaseDecl, BaseOffset, MostDerivedClass); + uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class); + AddPrimaryVirtualBaseOffsets(Base, BaseOffset); } } @@ -701,53 +1029,54 @@ RecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD, assert(!I->getType()->isDependentType() && "Cannot layout class with dependent bases."); - const CXXRecordDecl *Base = + const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); if (I->isVirtual()) { - if (PrimaryBase != Base || !PrimaryBaseIsVirtual) { - bool IndirectPrimaryBase = IndirectPrimaryBases.count(Base); + if (PrimaryBase != BaseDecl || !PrimaryBaseIsVirtual) { + bool IndirectPrimaryBase = IndirectPrimaryBases.count(BaseDecl); // Only lay out the virtual base if it's not an indirect primary base. if (!IndirectPrimaryBase) { // Only visit virtual bases once. - if (!VisitedVirtualBases.insert(Base)) + if (!VisitedVirtualBases.insert(BaseDecl)) continue; - LayoutVirtualBase(Base); + const BaseSubobjectInfo *BaseInfo = VirtualBaseInfo.lookup(BaseDecl); + assert(BaseInfo && "Did not find virtual base info!"); + LayoutVirtualBase(BaseInfo); } } } - if (!Base->getNumVBases()) { + if (!BaseDecl->getNumVBases()) { // This base isn't interesting since it doesn't have any virtual bases. continue; } - LayoutVirtualBases(Base, MostDerivedClass); + LayoutVirtualBases(BaseDecl, MostDerivedClass); } } -void RecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *Base) { +void RecordLayoutBuilder::LayoutVirtualBase(const BaseSubobjectInfo *Base) { + assert(!Base->Derived && "Trying to lay out a primary virtual base!"); + // Layout the base. - uint64_t Offset = LayoutBase(Base, /*BaseIsVirtual=*/true); + uint64_t Offset = LayoutBase(Base); // Add its base class offset. - if (!VBases.insert(std::make_pair(Base, Offset)).second) - assert(false && "Added same vbase offset more than once!"); + assert(!VBases.count(Base->Class) && "vbase offset already exists!"); + VBases.insert(std::make_pair(Base->Class, Offset)); + + AddPrimaryVirtualBaseOffsets(Base, Offset); } -uint64_t RecordLayoutBuilder::LayoutBase(const CXXRecordDecl *Base, - bool BaseIsVirtual) { - const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base); +uint64_t RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) { + const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base->Class); // If we have an empty base class, try to place it at offset 0. - if (Base->isEmpty() && - EmptySubobjects->CanPlaceBaseAtOffset(Base, BaseIsVirtual, 0) && - canPlaceRecordAtOffset(Base, 0, /*CheckVBases=*/false)) { - // We were able to place the class at offset 0. - UpdateEmptyClassOffsets(Base, 0, /*UpdateVBases=*/false); - + if (Base->Class->isEmpty() && + EmptySubobjects->CanPlaceBaseAtOffset(Base, 0)) { Size = std::max(Size, Layout.getSize()); return 0; @@ -759,15 +1088,10 @@ uint64_t RecordLayoutBuilder::LayoutBase(const CXXRecordDecl *Base, uint64_t Offset = llvm::RoundUpToAlignment(DataSize, BaseAlign); // Try to place the base. - while (true) { - if (EmptySubobjects->CanPlaceBaseAtOffset(Base, BaseIsVirtual, Offset) && - canPlaceRecordAtOffset(Base, Offset, /*CheckVBases=*/false)) - break; - + while (!EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset)) Offset += BaseAlign; - } - if (!Base->isEmpty()) { + if (!Base->Class->isEmpty()) { // Update the data size. DataSize = Offset + Layout.getNonVirtualSize(); @@ -778,173 +1102,9 @@ uint64_t RecordLayoutBuilder::LayoutBase(const CXXRecordDecl *Base, // Remember max struct/class alignment. UpdateAlignment(BaseAlign); - UpdateEmptyClassOffsets(Base, Offset, /*UpdateVBases=*/false); return Offset; } -bool -RecordLayoutBuilder::canPlaceRecordAtOffset(const CXXRecordDecl *RD, - uint64_t Offset, - bool CheckVBases) const { - // Look for an empty class with the same type at the same offset. - for (EmptyClassOffsetsTy::const_iterator I = - EmptyClassOffsets.lower_bound(Offset), - E = EmptyClassOffsets.upper_bound(Offset); I != E; ++I) { - - if (I->second == RD) - return false; - } - - const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); - - // Check bases. - for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), - E = RD->bases_end(); I != E; ++I) { - assert(!I->getType()->isDependentType() && - "Cannot layout class with dependent bases."); - if (I->isVirtual()) - continue; - - const CXXRecordDecl *BaseDecl = - cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); - - uint64_t BaseOffset = Layout.getBaseClassOffset(BaseDecl); - - if (!canPlaceRecordAtOffset(BaseDecl, Offset + BaseOffset, - /*CheckVBases=*/false)) - return false; - } - - // Check fields. - unsigned FieldNo = 0; - for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end(); - I != E; ++I, ++FieldNo) { - const FieldDecl *FD = *I; - - uint64_t FieldOffset = Layout.getFieldOffset(FieldNo); - - if (!canPlaceFieldAtOffset(FD, Offset + FieldOffset)) - return false; - } - - if (CheckVBases) { - // FIXME: virtual bases. - } - - return true; -} - -bool RecordLayoutBuilder::canPlaceFieldAtOffset(const FieldDecl *FD, - uint64_t Offset) const { - QualType T = FD->getType(); - if (const RecordType *RT = T->getAs<RecordType>()) { - if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) - return canPlaceRecordAtOffset(RD, Offset, /*CheckVBases=*/true); - } - - if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) { - QualType ElemTy = Context.getBaseElementType(AT); - const RecordType *RT = ElemTy->getAs<RecordType>(); - if (!RT) - return true; - const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); - if (!RD) - return true; - - const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); - - uint64_t NumElements = Context.getConstantArrayElementCount(AT); - uint64_t ElementOffset = Offset; - for (uint64_t I = 0; I != NumElements; ++I) { - if (!canPlaceRecordAtOffset(RD, ElementOffset, /*CheckVBases=*/true)) - return false; - - ElementOffset += Layout.getSize(); - } - } - - return true; -} - -void RecordLayoutBuilder::UpdateEmptyClassOffsets(const CXXRecordDecl *RD, - uint64_t Offset, - bool UpdateVBases) { - if (RD->isEmpty()) - EmptyClassOffsets.insert(std::make_pair(Offset, RD)); - - const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); - - // Update bases. - for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), - E = RD->bases_end(); I != E; ++I) { - assert(!I->getType()->isDependentType() && - "Cannot layout class with dependent bases."); - if (I->isVirtual()) - continue; - - const CXXRecordDecl *Base = - cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); - - uint64_t BaseClassOffset = Layout.getBaseClassOffset(Base); - UpdateEmptyClassOffsets(Base, Offset + BaseClassOffset, - /*UpdateVBases=*/false); - } - - // Update fields. - unsigned FieldNo = 0; - for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end(); - I != E; ++I, ++FieldNo) { - const FieldDecl *FD = *I; - - uint64_t FieldOffset = Layout.getFieldOffset(FieldNo); - UpdateEmptyClassOffsets(FD, Offset + FieldOffset); - } - - const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); - - if (UpdateVBases) { - // FIXME: Update virtual bases. - } else if (PrimaryBase && Layout.getPrimaryBaseWasVirtual()) { - // We always want to update the offsets of a primary virtual base. - assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 && - "primary base class offset must always be 0!"); - UpdateEmptyClassOffsets(PrimaryBase, Offset, /*UpdateVBases=*/false); - } -} - -void -RecordLayoutBuilder::UpdateEmptyClassOffsets(const FieldDecl *FD, - uint64_t Offset) { - QualType T = FD->getType(); - - if (const RecordType *RT = T->getAs<RecordType>()) { - if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) { - UpdateEmptyClassOffsets(RD, Offset, /*UpdateVBases=*/true); - return; - } - } - - if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) { - QualType ElemTy = Context.getBaseElementType(AT); - const RecordType *RT = ElemTy->getAs<RecordType>(); - if (!RT) - return; - const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); - if (!RD) - return; - - const ASTRecordLayout &Info = Context.getASTRecordLayout(RD); - - uint64_t NumElements = Context.getConstantArrayElementCount(AT); - uint64_t ElementOffset = Offset; - - for (uint64_t I = 0; I != NumElements; ++I) { - UpdateEmptyClassOffsets(RD, ElementOffset, /*UpdateVBases=*/true); - ElementOffset += Info.getSize(); - } - } -} - void RecordLayoutBuilder::InitializeLayout(const Decl *D) { if (const RecordDecl *RD = dyn_cast<RecordDecl>(D)) IsUnion = RD->isUnion(); @@ -992,7 +1152,6 @@ void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) { LayoutVirtualBases(RD, RD); VisitedVirtualBases.clear(); - AddPrimaryVirtualBaseOffsets(RD, 0, RD); // Finally, round the size of the total struct up to the alignment of the // struct itself. @@ -1137,7 +1296,7 @@ void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) { // Check if we need to add padding to give the field the correct alignment. if (FieldSize == 0 || (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize) - FieldOffset = (FieldOffset + (FieldAlign-1)) & ~(FieldAlign-1); + FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign); // Padding members don't affect overall alignment. if (!D->getIdentifier()) @@ -1208,17 +1367,12 @@ void RecordLayoutBuilder::LayoutField(const FieldDecl *D) { // Round up the current record size to the field's alignment boundary. FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign); - if (!IsUnion) { - while (true) { - // Check if we can place the field at this offset. - if (canPlaceFieldAtOffset(D, FieldOffset)) - break; - + if (!IsUnion && EmptySubobjects) { + // Check if we can place the field at this offset. + while (!EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset)) { // We couldn't place the field at the offset. Try again at a new offset. FieldOffset += FieldAlign; } - - UpdateEmptyClassOffsets(D, FieldOffset); } // Place this field at the current location. @@ -1261,8 +1415,6 @@ void RecordLayoutBuilder::UpdateAlignment(unsigned NewAlignment) { const CXXMethodDecl * RecordLayoutBuilder::ComputeKeyFunction(const CXXRecordDecl *RD) { - assert(RD->isDynamicClass() && "Class does not have any virtual methods!"); - // If a class isn't polymorphic it doesn't have a key function. if (!RD->isPolymorphic()) return 0; diff --git a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp index 80f5695..6dbe8f4 100644 --- a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp +++ b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp @@ -499,14 +499,101 @@ void DeclStmt::DoDestroy(ASTContext &C) { DG.getDeclGroup().Destroy(C); } +IfStmt::IfStmt(ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond, + Stmt *then, SourceLocation EL, Stmt *elsev) + : Stmt(IfStmtClass), IfLoc(IL), ElseLoc(EL) +{ + setConditionVariable(C, var); + SubExprs[COND] = reinterpret_cast<Stmt*>(cond); + SubExprs[THEN] = then; + SubExprs[ELSE] = elsev; +} + +VarDecl *IfStmt::getConditionVariable() const { + if (!SubExprs[VAR]) + return 0; + + DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]); + return cast<VarDecl>(DS->getSingleDecl()); +} + +void IfStmt::setConditionVariable(ASTContext &C, VarDecl *V) { + if (!V) { + SubExprs[VAR] = 0; + return; + } + + SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), + V->getSourceRange().getBegin(), + V->getSourceRange().getEnd()); +} + void IfStmt::DoDestroy(ASTContext &C) { BranchDestroy(C, this, SubExprs, END_EXPR); } +ForStmt::ForStmt(ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, + Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, + SourceLocation RP) + : Stmt(ForStmtClass), ForLoc(FL), LParenLoc(LP), RParenLoc(RP) +{ + SubExprs[INIT] = Init; + setConditionVariable(C, condVar); + SubExprs[COND] = reinterpret_cast<Stmt*>(Cond); + SubExprs[INC] = reinterpret_cast<Stmt*>(Inc); + SubExprs[BODY] = Body; +} + +VarDecl *ForStmt::getConditionVariable() const { + if (!SubExprs[CONDVAR]) + return 0; + + DeclStmt *DS = cast<DeclStmt>(SubExprs[CONDVAR]); + return cast<VarDecl>(DS->getSingleDecl()); +} + +void ForStmt::setConditionVariable(ASTContext &C, VarDecl *V) { + if (!V) { + SubExprs[CONDVAR] = 0; + return; + } + + SubExprs[CONDVAR] = new (C) DeclStmt(DeclGroupRef(V), + V->getSourceRange().getBegin(), + V->getSourceRange().getEnd()); +} + void ForStmt::DoDestroy(ASTContext &C) { BranchDestroy(C, this, SubExprs, END_EXPR); } +SwitchStmt::SwitchStmt(ASTContext &C, VarDecl *Var, Expr *cond) + : Stmt(SwitchStmtClass), FirstCase(0) +{ + setConditionVariable(C, Var); + SubExprs[COND] = reinterpret_cast<Stmt*>(cond); + SubExprs[BODY] = NULL; +} + +VarDecl *SwitchStmt::getConditionVariable() const { + if (!SubExprs[VAR]) + return 0; + + DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]); + return cast<VarDecl>(DS->getSingleDecl()); +} + +void SwitchStmt::setConditionVariable(ASTContext &C, VarDecl *V) { + if (!V) { + SubExprs[VAR] = 0; + return; + } + + SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), + V->getSourceRange().getBegin(), + V->getSourceRange().getEnd()); +} + void SwitchStmt::DoDestroy(ASTContext &C) { // Destroy the SwitchCase statements in this switch. In the normal // case, this loop will merely decrement the reference counts from @@ -521,6 +608,35 @@ void SwitchStmt::DoDestroy(ASTContext &C) { BranchDestroy(C, this, SubExprs, END_EXPR); } +WhileStmt::WhileStmt(ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, + SourceLocation WL) +: Stmt(WhileStmtClass) +{ + setConditionVariable(C, Var); + SubExprs[COND] = reinterpret_cast<Stmt*>(cond); + SubExprs[BODY] = body; + WhileLoc = WL; +} + +VarDecl *WhileStmt::getConditionVariable() const { + if (!SubExprs[VAR]) + return 0; + + DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]); + return cast<VarDecl>(DS->getSingleDecl()); +} + +void WhileStmt::setConditionVariable(ASTContext &C, VarDecl *V) { + if (!V) { + SubExprs[VAR] = 0; + return; + } + + SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), + V->getSourceRange().getBegin(), + V->getSourceRange().getEnd()); +} + void WhileStmt::DoDestroy(ASTContext &C) { BranchDestroy(C, this, SubExprs, END_EXPR); } @@ -572,26 +688,26 @@ Stmt::child_iterator LabelStmt::child_end() { return &SubStmt+1; } // IfStmt Stmt::child_iterator IfStmt::child_begin() { - return child_iterator(Var, &SubExprs[0]); + return &SubExprs[0]; } Stmt::child_iterator IfStmt::child_end() { - return child_iterator(0, &SubExprs[0]+END_EXPR); + return &SubExprs[0]+END_EXPR; } // SwitchStmt Stmt::child_iterator SwitchStmt::child_begin() { - return child_iterator(Var, &SubExprs[0]); + return &SubExprs[0]; } Stmt::child_iterator SwitchStmt::child_end() { - return child_iterator(0, &SubExprs[0]+END_EXPR); + return &SubExprs[0]+END_EXPR; } // WhileStmt Stmt::child_iterator WhileStmt::child_begin() { - return child_iterator(Var, &SubExprs[0]); + return &SubExprs[0]; } Stmt::child_iterator WhileStmt::child_end() { - return child_iterator(0, &SubExprs[0]+END_EXPR); + return &SubExprs[0]+END_EXPR; } // DoStmt @@ -600,10 +716,10 @@ Stmt::child_iterator DoStmt::child_end() { return &SubExprs[0]+END_EXPR; } // ForStmt Stmt::child_iterator ForStmt::child_begin() { - return child_iterator(CondVar, &SubExprs[0]); + return &SubExprs[0]; } Stmt::child_iterator ForStmt::child_end() { - return child_iterator(0, &SubExprs[0]+END_EXPR); + return &SubExprs[0]+END_EXPR; } // ObjCForCollectionStmt diff --git a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp index 9bef49c..7043c35 100644 --- a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp +++ b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp @@ -682,7 +682,7 @@ void StmtPrinter::VisitUnaryOperator(UnaryOperator *Node) { bool StmtPrinter::PrintOffsetOfDesignator(Expr *E) { if (isa<UnaryOperator>(E)) { // Base case, print the type and comma. - OS << E->getType().getAsString() << ", "; + OS << E->getType().getAsString(Policy) << ", "; return true; } else if (ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(E)) { PrintOffsetOfDesignator(ASE->getLHS()); @@ -706,7 +706,7 @@ void StmtPrinter::VisitUnaryOffsetOf(UnaryOperator *Node) { void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) { OS << "__builtin_offsetof("; - OS << Node->getTypeSourceInfo()->getType().getAsString() << ", "; + OS << Node->getTypeSourceInfo()->getType().getAsString(Policy) << ", "; bool PrintedSomething = false; for (unsigned i = 0, n = Node->getNumComponents(); i < n; ++i) { OffsetOfExpr::OffsetOfNode ON = Node->getComponent(i); @@ -740,7 +740,7 @@ void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) { void StmtPrinter::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *Node) { OS << (Node->isSizeOf() ? "sizeof" : "__alignof"); if (Node->isArgumentType()) - OS << "(" << Node->getArgumentType().getAsString() << ")"; + OS << "(" << Node->getArgumentType().getAsString(Policy) << ")"; else { OS << " "; PrintExpr(Node->getArgumentExpr()); @@ -802,11 +802,11 @@ void StmtPrinter::VisitExplicitCastExpr(ExplicitCastExpr *) { assert(0 && "ExplicitCastExpr is an abstract class"); } void StmtPrinter::VisitCStyleCastExpr(CStyleCastExpr *Node) { - OS << "(" << Node->getType().getAsString() << ")"; + OS << "(" << Node->getType().getAsString(Policy) << ")"; PrintExpr(Node->getSubExpr()); } void StmtPrinter::VisitCompoundLiteralExpr(CompoundLiteralExpr *Node) { - OS << "(" << Node->getType().getAsString() << ")"; + OS << "(" << Node->getType().getAsString(Policy) << ")"; PrintExpr(Node->getInitializer()); } void StmtPrinter::VisitImplicitCastExpr(ImplicitCastExpr *Node) { @@ -852,8 +852,8 @@ void StmtPrinter::VisitStmtExpr(StmtExpr *E) { void StmtPrinter::VisitTypesCompatibleExpr(TypesCompatibleExpr *Node) { OS << "__builtin_types_compatible_p("; - OS << Node->getArgType1().getAsString() << ","; - OS << Node->getArgType2().getAsString() << ")"; + OS << Node->getArgType1().getAsString(Policy) << ","; + OS << Node->getArgType2().getAsString(Policy) << ")"; } void StmtPrinter::VisitChooseExpr(ChooseExpr *Node) { @@ -947,7 +947,7 @@ void StmtPrinter::VisitVAArgExpr(VAArgExpr *Node) { OS << "__builtin_va_arg("; PrintExpr(Node->getSubExpr()); OS << ", "; - OS << Node->getType().getAsString(); + OS << Node->getType().getAsString(Policy); OS << ")"; } @@ -1002,7 +1002,7 @@ void StmtPrinter::VisitCXXMemberCallExpr(CXXMemberCallExpr *Node) { void StmtPrinter::VisitCXXNamedCastExpr(CXXNamedCastExpr *Node) { OS << Node->getCastName() << '<'; - OS << Node->getTypeAsWritten().getAsString() << ">("; + OS << Node->getTypeAsWritten().getAsString(Policy) << ">("; PrintExpr(Node->getSubExpr()); OS << ")"; } @@ -1026,7 +1026,7 @@ void StmtPrinter::VisitCXXConstCastExpr(CXXConstCastExpr *Node) { void StmtPrinter::VisitCXXTypeidExpr(CXXTypeidExpr *Node) { OS << "typeid("; if (Node->isTypeOperand()) { - OS << Node->getTypeOperand().getAsString(); + OS << Node->getTypeOperand().getAsString(Policy); } else { PrintExpr(Node->getExprOperand()); } @@ -1059,7 +1059,7 @@ void StmtPrinter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *Node) { } void StmtPrinter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) { - OS << Node->getType().getAsString(); + OS << Node->getType().getAsString(Policy); OS << "("; PrintExpr(Node->getSubExpr()); OS << ")"; @@ -1074,7 +1074,7 @@ void StmtPrinter::VisitCXXBindReferenceExpr(CXXBindReferenceExpr *Node) { } void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) { - OS << Node->getType().getAsString(); + OS << Node->getType().getAsString(Policy); OS << "("; for (CXXTemporaryObjectExpr::arg_iterator Arg = Node->arg_begin(), ArgEnd = Node->arg_end(); @@ -1086,8 +1086,8 @@ void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) { OS << ")"; } -void StmtPrinter::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *Node) { - OS << Node->getType().getAsString() << "()"; +void StmtPrinter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *Node) { + OS << Node->getType().getAsString(Policy) << "()"; } void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) { @@ -1177,7 +1177,7 @@ void StmtPrinter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) { void StmtPrinter::VisitCXXUnresolvedConstructExpr( CXXUnresolvedConstructExpr *Node) { - OS << Node->getTypeAsWritten().getAsString(); + OS << Node->getTypeAsWritten().getAsString(Policy); OS << "("; for (CXXUnresolvedConstructExpr::arg_iterator Arg = Node->arg_begin(), ArgEnd = Node->arg_end(); @@ -1254,7 +1254,7 @@ static const char *getTypeTraitName(UnaryTypeTrait UTT) { void StmtPrinter::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) { OS << getTypeTraitName(E->getTrait()) << "(" - << E->getQueriedType().getAsString() << ")"; + << E->getQueriedType().getAsString(Policy) << ")"; } // Obj-C @@ -1265,7 +1265,7 @@ void StmtPrinter::VisitObjCStringLiteral(ObjCStringLiteral *Node) { } void StmtPrinter::VisitObjCEncodeExpr(ObjCEncodeExpr *Node) { - OS << "@encode(" << Node->getEncodedType().getAsString() << ')'; + OS << "@encode(" << Node->getEncodedType().getAsString(Policy) << ')'; } void StmtPrinter::VisitObjCSelectorExpr(ObjCSelectorExpr *Node) { diff --git a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp index ac3a9ee..cff86a4 100644 --- a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp +++ b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp @@ -211,9 +211,11 @@ void StmtProfiler::VisitExpr(Expr *S) { void StmtProfiler::VisitDeclRefExpr(DeclRefExpr *S) { VisitExpr(S); - VisitNestedNameSpecifier(S->getQualifier()); + if (!Canonical) + VisitNestedNameSpecifier(S->getQualifier()); VisitDecl(S->getDecl()); - VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs()); + if (!Canonical) + VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs()); } void StmtProfiler::VisitPredefinedExpr(PredefinedExpr *S) { @@ -307,7 +309,8 @@ void StmtProfiler::VisitCallExpr(CallExpr *S) { void StmtProfiler::VisitMemberExpr(MemberExpr *S) { VisitExpr(S); VisitDecl(S->getMemberDecl()); - VisitNestedNameSpecifier(S->getQualifier()); + if (!Canonical) + VisitNestedNameSpecifier(S->getQualifier()); ID.AddBoolean(S->isArrow()); } @@ -428,6 +431,8 @@ void StmtProfiler::VisitBlockDeclRefExpr(BlockDeclRefExpr *S) { VisitDecl(S->getDecl()); ID.AddBoolean(S->isByRef()); ID.AddBoolean(S->isConstQualAdded()); + if (S->getCopyConstructorExpr()) + Visit(S->getCopyConstructorExpr()); } static Stmt::StmtClass DecodeOperatorCall(CXXOperatorCallExpr *S, @@ -719,7 +724,7 @@ void StmtProfiler::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *S) { VisitCXXConstructExpr(S); } -void StmtProfiler::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *S) { +void StmtProfiler::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *S) { VisitExpr(S); } diff --git a/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp index 1c775ef..02e6488 100644 --- a/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp +++ b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp @@ -90,6 +90,33 @@ void TemplateArgument::Profile(llvm::FoldingSetNodeID &ID, } } +bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const { + if (getKind() != Other.getKind()) return false; + + switch (getKind()) { + case Null: + case Type: + case Declaration: + case Template: + case Expression: + return TypeOrValue == Other.TypeOrValue; + + case Integral: + return getIntegralType() == Other.getIntegralType() && + *getAsIntegral() == *Other.getAsIntegral(); + + case Pack: + if (Args.NumArgs != Other.Args.NumArgs) return false; + for (unsigned I = 0, E = Args.NumArgs; I != E; ++I) + if (!Args.Args[I].structurallyEquals(Other.Args.Args[I])) + return false; + return true; + } + + // Suppress warnings. + return false; +} + //===----------------------------------------------------------------------===// // TemplateArgumentLoc Implementation //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp b/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp index 14722f7..ef7b315 100644 --- a/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp +++ b/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp @@ -21,6 +21,17 @@ using namespace clang; using namespace llvm; +TemplateName::NameKind TemplateName::getKind() const { + if (Storage.is<TemplateDecl *>()) + return Template; + if (Storage.is<OverloadedTemplateStorage *>()) + return OverloadedTemplate; + if (Storage.is<QualifiedTemplateName *>()) + return QualifiedTemplate; + assert(Storage.is<DependentTemplateName *>() && "There's a case unhandled!"); + return DependentTemplate; +} + TemplateDecl *TemplateName::getAsTemplateDecl() const { if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>()) return Template; diff --git a/contrib/llvm/tools/clang/lib/AST/Type.cpp b/contrib/llvm/tools/clang/lib/AST/Type.cpp index 1aab65e..d7929304 100644 --- a/contrib/llvm/tools/clang/lib/AST/Type.cpp +++ b/contrib/llvm/tools/clang/lib/AST/Type.cpp @@ -439,17 +439,49 @@ bool Type::isIntegerType() const { return false; } -bool Type::isIntegralType() const { +/// \brief Determine whether this type is an integral type. +/// +/// This routine determines whether the given type is an integral type per +/// C++ [basic.fundamental]p7. Although the C standard does not define the +/// term "integral type", it has a similar term "integer type", and in C++ +/// the two terms are equivalent. However, C's "integer type" includes +/// enumeration types, while C++'s "integer type" does not. The \c ASTContext +/// parameter is used to determine whether we should be following the C or +/// C++ rules when determining whether this type is an integral/integer type. +/// +/// For cases where C permits "an integer type" and C++ permits "an integral +/// type", use this routine. +/// +/// For cases where C permits "an integer type" and C++ permits "an integral +/// or enumeration type", use \c isIntegralOrEnumerationType() instead. +/// +/// \param Ctx The context in which this type occurs. +/// +/// \returns true if the type is considered an integral type, false otherwise. +bool Type::isIntegralType(ASTContext &Ctx) const { if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) return BT->getKind() >= BuiltinType::Bool && BT->getKind() <= BuiltinType::Int128; - if (const TagType *TT = dyn_cast<TagType>(CanonicalType)) - if (TT->getDecl()->isEnum() && TT->getDecl()->isDefinition()) - return true; // Complete enum types are integral. - // FIXME: In C++, enum types are never integral. + + if (!Ctx.getLangOptions().CPlusPlus) + if (const TagType *TT = dyn_cast<TagType>(CanonicalType)) + if (TT->getDecl()->isEnum() && TT->getDecl()->isDefinition()) + return true; // Complete enum types are integral in C. + return false; } +bool Type::isIntegralOrEnumerationType() const { + if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) + return BT->getKind() >= BuiltinType::Bool && + BT->getKind() <= BuiltinType::Int128; + + if (isa<EnumType>(CanonicalType)) + return true; + + return false; +} + bool Type::isEnumeralType() const { if (const TagType *TT = dyn_cast<TagType>(CanonicalType)) return TT->getDecl()->isEnum(); @@ -531,16 +563,19 @@ bool Type::isFloatingType() const { BT->getKind() <= BuiltinType::LongDouble; if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType)) return CT->getElementType()->isFloatingType(); + return false; +} + +bool Type::hasFloatingRepresentation() const { if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType)) return VT->getElementType()->isFloatingType(); - return false; + else + return isFloatingType(); } bool Type::isRealFloatingType() const { if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) return BT->isFloatingPoint(); - if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType)) - return VT->getElementType()->isRealFloatingType(); return false; } @@ -550,8 +585,6 @@ bool Type::isRealType() const { BT->getKind() <= BuiltinType::LongDouble; if (const TagType *TT = dyn_cast<TagType>(CanonicalType)) return TT->getDecl()->isEnum() && TT->getDecl()->isDefinition(); - if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType)) - return VT->getElementType()->isRealType(); return false; } @@ -563,7 +596,7 @@ bool Type::isArithmeticType() const { // GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2). // If a body isn't seen by the time we get here, return false. return ET->getDecl()->isDefinition(); - return isa<ComplexType>(CanonicalType) || isa<VectorType>(CanonicalType); + return isa<ComplexType>(CanonicalType); } bool Type::isScalarType() const { @@ -768,6 +801,7 @@ bool Type::isSpecifierType() const { case TemplateSpecialization: case Elaborated: case DependentName: + case DependentTemplateSpecialization: case ObjCInterface: case ObjCObject: case ObjCObjectPointer: // FIXME: object pointers aren't really specifiers @@ -856,12 +890,56 @@ TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) { } } +ElaboratedType::~ElaboratedType() {} +DependentNameType::~DependentNameType() {} +DependentTemplateSpecializationType::~DependentTemplateSpecializationType() {} + +void DependentTemplateSpecializationType::Destroy(ASTContext &C) { + for (unsigned Arg = 0; Arg < NumArgs; ++Arg) { + // FIXME: Not all expressions get cloned, so we can't yet perform + // this destruction. + // if (Expr *E = getArg(Arg).getAsExpr()) + // E->Destroy(C); + } +} + +DependentTemplateSpecializationType::DependentTemplateSpecializationType( + ElaboratedTypeKeyword Keyword, + NestedNameSpecifier *NNS, const IdentifierInfo *Name, + unsigned NumArgs, const TemplateArgument *Args, + QualType Canon) + : TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon, true), + NNS(NNS), Name(Name), NumArgs(NumArgs) { + assert(NNS && NNS->isDependent() && + "DependentTemplateSpecializatonType requires dependent qualifier"); + for (unsigned I = 0; I != NumArgs; ++I) + new (&getArgBuffer()[I]) TemplateArgument(Args[I]); +} + +void +DependentTemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID, + ASTContext &Context, + ElaboratedTypeKeyword Keyword, + NestedNameSpecifier *Qualifier, + const IdentifierInfo *Name, + unsigned NumArgs, + const TemplateArgument *Args) { + ID.AddInteger(Keyword); + ID.AddPointer(Qualifier); + ID.AddPointer(Name); + for (unsigned Idx = 0; Idx < NumArgs; ++Idx) + Args[Idx].Profile(ID, Context); +} + bool Type::isElaboratedTypeSpecifier() const { ElaboratedTypeKeyword Keyword; if (const ElaboratedType *Elab = dyn_cast<ElaboratedType>(this)) Keyword = Elab->getKeyword(); else if (const DependentNameType *DepName = dyn_cast<DependentNameType>(this)) Keyword = DepName->getKeyword(); + else if (const DependentTemplateSpecializationType *DepTST = + dyn_cast<DependentTemplateSpecializationType>(this)) + Keyword = DepTST->getKeyword(); else return false; @@ -914,6 +992,22 @@ const char *BuiltinType::getName(const LangOptions &LO) const { void FunctionType::ANCHOR() {} // Key function for FunctionType. +QualType QualType::getNonLValueExprType(ASTContext &Context) const { + if (const ReferenceType *RefType = getTypePtr()->getAs<ReferenceType>()) + return RefType->getPointeeType(); + + // C++0x [basic.lval]: + // Class prvalues can have cv-qualified types; non-class prvalues always + // have cv-unqualified types. + // + // See also C99 6.3.2.1p2. + if (!Context.getLangOptions().CPlusPlus || + (!getTypePtr()->isDependentType() && !getTypePtr()->isRecordType())) + return getUnqualifiedType(); + + return *this; +} + llvm::StringRef FunctionType::getNameForCallConv(CallingConv CC) { switch (CC) { case CC_Default: llvm_unreachable("no name for default cc"); @@ -1085,14 +1179,12 @@ anyDependentTemplateArguments(const TemplateArgument *Args, unsigned N) { } TemplateSpecializationType:: -TemplateSpecializationType(ASTContext &Context, TemplateName T, - bool IsCurrentInstantiation, +TemplateSpecializationType(TemplateName T, const TemplateArgument *Args, unsigned NumArgs, QualType Canon) : Type(TemplateSpecialization, Canon.isNull()? QualType(this, 0) : Canon, T.isDependent() || anyDependentTemplateArguments(Args, NumArgs)), - ContextAndCurrentInstantiation(&Context, IsCurrentInstantiation), Template(T), NumArgs(NumArgs) { assert((!Canon.isNull() || T.isDependent() || anyDependentTemplateArguments(Args, NumArgs)) && @@ -1113,25 +1205,12 @@ void TemplateSpecializationType::Destroy(ASTContext& C) { } } -TemplateSpecializationType::iterator -TemplateSpecializationType::end() const { - return begin() + getNumArgs(); -} - -const TemplateArgument & -TemplateSpecializationType::getArg(unsigned Idx) const { - assert(Idx < getNumArgs() && "Template argument out of range"); - return getArgs()[Idx]; -} - void TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID, TemplateName T, - bool IsCurrentInstantiation, const TemplateArgument *Args, unsigned NumArgs, ASTContext &Context) { - ID.AddBoolean(IsCurrentInstantiation); T.Profile(ID); for (unsigned Idx = 0; Idx < NumArgs; ++Idx) Args[Idx].Profile(ID, Context); diff --git a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp index 35a7e09..a08ee1a 100644 --- a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp +++ b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp @@ -227,12 +227,13 @@ void TypePrinter::PrintDependentSizedExtVector( } void TypePrinter::PrintVector(const VectorType *T, std::string &S) { - if (T->isAltiVec()) { - if (T->isPixel()) + if (T->getAltiVecSpecific() != VectorType::NotAltiVec) { + if (T->getAltiVecSpecific() == VectorType::Pixel) S = "__vector __pixel " + S; else { Print(T->getElementType(), S); - S = "__vector " + S; + S = ((T->getAltiVecSpecific() == VectorType::Bool) + ? "__vector __bool " : "__vector ") + S; } } else { // FIXME: We prefer to print the size directly here, but have no way @@ -452,11 +453,13 @@ void TypePrinter::PrintTag(TagDecl *D, std::string &InnerString) { if (!HasKindDecoration) OS << " " << D->getKindName(); - PresumedLoc PLoc = D->getASTContext().getSourceManager().getPresumedLoc( - D->getLocation()); - OS << " at " << PLoc.getFilename() - << ':' << PLoc.getLine() - << ':' << PLoc.getColumn(); + if (D->getLocation().isValid()) { + PresumedLoc PLoc = D->getASTContext().getSourceManager().getPresumedLoc( + D->getLocation()); + OS << " at " << PLoc.getFilename() + << ':' << PLoc.getLine() + << ':' << PLoc.getColumn(); + } } OS << '>'; @@ -578,15 +581,31 @@ void TypePrinter::PrintDependentName(const DependentNameType *T, std::string &S) T->getQualifier()->print(OS, Policy); - if (const IdentifierInfo *Ident = T->getIdentifier()) - OS << Ident->getName(); - else if (const TemplateSpecializationType *Spec = T->getTemplateId()) { - Spec->getTemplateName().print(OS, Policy, true); - OS << TemplateSpecializationType::PrintTemplateArgumentList( - Spec->getArgs(), - Spec->getNumArgs(), + OS << T->getIdentifier()->getName(); + } + + if (S.empty()) + S.swap(MyString); + else + S = MyString + ' ' + S; +} + +void TypePrinter::PrintDependentTemplateSpecialization( + const DependentTemplateSpecializationType *T, std::string &S) { + std::string MyString; + { + llvm::raw_string_ostream OS(MyString); + + OS << TypeWithKeyword::getKeywordName(T->getKeyword()); + if (T->getKeyword() != ETK_None) + OS << " "; + + T->getQualifier()->print(OS, Policy); + OS << T->getIdentifier()->getName(); + OS << TemplateSpecializationType::PrintTemplateArgumentList( + T->getArgs(), + T->getNumArgs(), Policy); - } } if (S.empty()) diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp index 6f2cb41..08543aa 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp @@ -171,8 +171,8 @@ private: void autoCreateBlock() { if (!Block) Block = createBlock(); } CFGBlock *createBlock(bool add_successor = true); bool FinishBlock(CFGBlock* B); - CFGBlock *addStmt(Stmt *S, AddStmtChoice asc = AddStmtChoice::AlwaysAdd) { - return Visit(S, asc); + CFGBlock *addStmt(Stmt *S) { + return Visit(S, AddStmtChoice::AlwaysAdd); } void AppendStmt(CFGBlock *B, Stmt *S, @@ -538,6 +538,15 @@ CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B, addStmt(B->getRHS()); return addStmt(B->getLHS()); } + else if (B->isAssignmentOp()) { + if (asc.alwaysAdd()) { + autoCreateBlock(); + AppendStmt(Block, B, asc); + } + + Visit(B->getRHS()); + return Visit(B->getLHS(), AddStmtChoice::AsLValueNotAlwaysAdd); + } return VisitStmt(B, asc); } @@ -612,8 +621,12 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) { if (!CanThrow(C->getCallee())) AddEHEdge = false; - if (!NoReturn && !AddEHEdge) - return VisitStmt(C, AddStmtChoice::AlwaysAdd); + if (!NoReturn && !AddEHEdge) { + if (asc.asLValue()) + return VisitStmt(C, AddStmtChoice::AlwaysAddAsLValue); + else + return VisitStmt(C, AddStmtChoice::AlwaysAdd); + } if (Block) { Succ = Block; @@ -651,13 +664,13 @@ CFGBlock *CFGBuilder::VisitChooseExpr(ChooseExpr *C, Succ = ConfluenceBlock; Block = NULL; - CFGBlock* LHSBlock = addStmt(C->getLHS(), asc); + CFGBlock* LHSBlock = Visit(C->getLHS(), asc); if (!FinishBlock(LHSBlock)) return 0; Succ = ConfluenceBlock; Block = NULL; - CFGBlock* RHSBlock = addStmt(C->getRHS(), asc); + CFGBlock* RHSBlock = Visit(C->getRHS(), asc); if (!FinishBlock(RHSBlock)) return 0; @@ -709,7 +722,7 @@ CFGBlock *CFGBuilder::VisitConditionalOperator(ConditionalOperator *C, Block = NULL; CFGBlock* LHSBlock = NULL; if (C->getLHS()) { - LHSBlock = addStmt(C->getLHS(), asc); + LHSBlock = Visit(C->getLHS(), asc); if (!FinishBlock(LHSBlock)) return 0; Block = NULL; @@ -717,7 +730,7 @@ CFGBlock *CFGBuilder::VisitConditionalOperator(ConditionalOperator *C, // Create the block for the RHS expression. Succ = ConfluenceBlock; - CFGBlock* RHSBlock = addStmt(C->getRHS(), asc); + CFGBlock* RHSBlock = Visit(C->getRHS(), asc); if (!FinishBlock(RHSBlock)) return 0; diff --git a/contrib/llvm/tools/clang/lib/Analysis/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Analysis/CMakeLists.txt index a8e3708..f2916c2 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/CMakeLists.txt +++ b/contrib/llvm/tools/clang/lib/Analysis/CMakeLists.txt @@ -9,4 +9,5 @@ add_clang_library(clangAnalysis UninitializedValues.cpp ) -add_dependencies(clangAnalysis ClangDiagnosticAnalysis ClangStmtNodes) +add_dependencies(clangAnalysis ClangAttrClasses ClangAttrList + ClangDiagnosticAnalysis ClangDeclNodes ClangStmtNodes) diff --git a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp index 01a36a1..4efe25e 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp @@ -256,17 +256,21 @@ void TransferFuncs::VisitAssign(BinaryOperator* B) { // Assigning to a variable? if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(LHS->IgnoreParens())) { + // Assignments to references don't kill the ref's address + if (DR->getDecl()->getType()->isReferenceType()) { + VisitDeclRefExpr(DR); + } else { + // Update liveness inforamtion. + unsigned bit = AD.getIdx(DR->getDecl()); + LiveState.getDeclBit(bit) = Dead | AD.AlwaysLive.getDeclBit(bit); - // Update liveness inforamtion. - unsigned bit = AD.getIdx(DR->getDecl()); - LiveState.getDeclBit(bit) = Dead | AD.AlwaysLive.getDeclBit(bit); - - if (AD.Observer) { AD.Observer->ObserverKill(DR); } + if (AD.Observer) { AD.Observer->ObserverKill(DR); } - // Handle things like +=, etc., which also generate "uses" - // of a variable. Do this just by visiting the subexpression. - if (B->getOpcode() != BinaryOperator::Assign) - VisitDeclRefExpr(DR); + // Handle things like +=, etc., which also generate "uses" + // of a variable. Do this just by visiting the subexpression. + if (B->getOpcode() != BinaryOperator::Assign) + VisitDeclRefExpr(DR); + } } else // Not assigning to a variable. Process LHS as usual. Visit(LHS); diff --git a/contrib/llvm/tools/clang/lib/Analysis/Makefile b/contrib/llvm/tools/clang/lib/Analysis/Makefile index 9b47380..03bf7a6 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/Makefile +++ b/contrib/llvm/tools/clang/lib/Analysis/Makefile @@ -11,11 +11,9 @@ # ##===----------------------------------------------------------------------===## -LEVEL = ../../../.. +CLANG_LEVEL := ../.. LIBRARYNAME := clangAnalysis BUILD_ARCHIVE = 1 -CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include - -include $(LEVEL)/Makefile.common +include $(CLANG_LEVEL)/Makefile diff --git a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp index 0b111e9..631fde6 100644 --- a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp +++ b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp @@ -14,12 +14,16 @@ #include "clang/Analysis/Analyses/PrintfFormatString.h" #include "clang/AST/ASTContext.h" +#include "clang/AST/Type.h" +#include "llvm/Support/raw_ostream.h" using clang::analyze_printf::ArgTypeResult; using clang::analyze_printf::FormatSpecifier; using clang::analyze_printf::FormatStringHandler; using clang::analyze_printf::OptionalAmount; using clang::analyze_printf::PositionContext; +using clang::analyze_printf::ConversionSpecifier; +using clang::analyze_printf::LengthModifier; using namespace clang; @@ -35,7 +39,6 @@ public: const FormatSpecifier &fs) : FS(fs), Start(start), Stop(false) {} - const char *getStart() const { return Start; } bool shouldStop() const { return Stop; } bool hasValue() const { return Start != 0; } @@ -80,7 +83,8 @@ static OptionalAmount ParseAmount(const char *&Beg, const char *E) { } if (hasDigits) - return OptionalAmount(OptionalAmount::Constant, accumulator, Beg); + return OptionalAmount(OptionalAmount::Constant, accumulator, Beg, I - Beg, + false); break; } @@ -92,7 +96,7 @@ static OptionalAmount ParseNonPositionAmount(const char *&Beg, const char *E, unsigned &argIndex) { if (*Beg == '*') { ++Beg; - return OptionalAmount(OptionalAmount::Arg, argIndex++, Beg); + return OptionalAmount(OptionalAmount::Arg, argIndex++, Beg, 0, false); } return ParseAmount(Beg, E); @@ -120,6 +124,8 @@ static OptionalAmount ParsePositionAmount(FormatStringHandler &H, assert(Amt.getHowSpecified() == OptionalAmount::Constant); if (*I == '$') { + // Handle positional arguments + // Special case: '*0$', since this is an easy mistake. if (Amt.getConstantAmount() == 0) { H.HandleZeroPosition(Beg, I - Beg + 1); @@ -130,7 +136,7 @@ static OptionalAmount ParsePositionAmount(FormatStringHandler &H, Beg = ++I; return OptionalAmount(OptionalAmount::Arg, Amt.getConstantAmount() - 1, - Tmp); + Tmp, 0, true); } H.HandleInvalidPosition(Beg, I - Beg, p); @@ -173,7 +179,6 @@ static bool ParseFieldWidth(FormatStringHandler &H, FormatSpecifier &FS, return false; } - static bool ParseArgPosition(FormatStringHandler &H, FormatSpecifier &FS, const char *Start, const char *&Beg, const char *E) { @@ -258,11 +263,11 @@ static FormatSpecifierResult ParseFormatSpecifier(FormatStringHandler &H, for ( ; I != E; ++I) { switch (*I) { default: hasMore = false; break; - case '-': FS.setIsLeftJustified(); break; - case '+': FS.setHasPlusPrefix(); break; - case ' ': FS.setHasSpacePrefix(); break; - case '#': FS.setHasAlternativeForm(); break; - case '0': FS.setHasLeadingZeros(); break; + case '-': FS.setIsLeftJustified(I); break; + case '+': FS.setHasPlusPrefix(I); break; + case ' ': FS.setHasSpacePrefix(I); break; + case '#': FS.setHasAlternativeForm(I); break; + case '0': FS.setHasLeadingZeros(I); break; } if (!hasMore) break; @@ -305,24 +310,28 @@ static FormatSpecifierResult ParseFormatSpecifier(FormatStringHandler &H, } // Look for the length modifier. - LengthModifier lm = None; + LengthModifier::Kind lmKind = LengthModifier::None; + const char *lmPosition = I; switch (*I) { default: break; case 'h': ++I; - lm = (I != E && *I == 'h') ? ++I, AsChar : AsShort; + lmKind = (I != E && *I == 'h') ? + ++I, LengthModifier::AsChar : LengthModifier::AsShort; break; case 'l': ++I; - lm = (I != E && *I == 'l') ? ++I, AsLongLong : AsLong; + lmKind = (I != E && *I == 'l') ? + ++I, LengthModifier::AsLongLong : LengthModifier::AsLong; break; - case 'j': lm = AsIntMax; ++I; break; - case 'z': lm = AsSizeT; ++I; break; - case 't': lm = AsPtrDiff; ++I; break; - case 'L': lm = AsLongDouble; ++I; break; - case 'q': lm = AsLongLong; ++I; break; + case 'j': lmKind = LengthModifier::AsIntMax; ++I; break; + case 'z': lmKind = LengthModifier::AsSizeT; ++I; break; + case 't': lmKind = LengthModifier::AsPtrDiff; ++I; break; + case 'L': lmKind = LengthModifier::AsLongDouble; ++I; break; + case 'q': lmKind = LengthModifier::AsLongLong; ++I; break; } + LengthModifier lm(lmPosition, lmKind); FS.setLengthModifier(lm); if (I == E) { @@ -423,95 +432,111 @@ FormatStringHandler::~FormatStringHandler() {} //===----------------------------------------------------------------------===// bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const { - assert(isValid()); - - if (K == UnknownTy) - return true; - - if (K == SpecificTy) { - argTy = C.getCanonicalType(argTy).getUnqualifiedType(); - - if (T == argTy) + switch (K) { + case InvalidTy: + assert(false && "ArgTypeResult must be valid"); return true; - if (const BuiltinType *BT = argTy->getAs<BuiltinType>()) - switch (BT->getKind()) { - default: - break; - case BuiltinType::Char_S: - case BuiltinType::SChar: - return T == C.UnsignedCharTy; - case BuiltinType::Char_U: - case BuiltinType::UChar: - return T == C.SignedCharTy; - case BuiltinType::Short: - return T == C.UnsignedShortTy; - case BuiltinType::UShort: - return T == C.ShortTy; - case BuiltinType::Int: - return T == C.UnsignedIntTy; - case BuiltinType::UInt: - return T == C.IntTy; - case BuiltinType::Long: - return T == C.UnsignedLongTy; - case BuiltinType::ULong: - return T == C.LongTy; - case BuiltinType::LongLong: - return T == C.UnsignedLongLongTy; - case BuiltinType::ULongLong: - return T == C.LongLongTy; - } - - return false; - } + case UnknownTy: + return true; - if (K == CStrTy) { - const PointerType *PT = argTy->getAs<PointerType>(); - if (!PT) + case SpecificTy: { + argTy = C.getCanonicalType(argTy).getUnqualifiedType(); + if (T == argTy) + return true; + if (const BuiltinType *BT = argTy->getAs<BuiltinType>()) + switch (BT->getKind()) { + default: + break; + case BuiltinType::Char_S: + case BuiltinType::SChar: + return T == C.UnsignedCharTy; + case BuiltinType::Char_U: + case BuiltinType::UChar: + return T == C.SignedCharTy; + case BuiltinType::Short: + return T == C.UnsignedShortTy; + case BuiltinType::UShort: + return T == C.ShortTy; + case BuiltinType::Int: + return T == C.UnsignedIntTy; + case BuiltinType::UInt: + return T == C.IntTy; + case BuiltinType::Long: + return T == C.UnsignedLongTy; + case BuiltinType::ULong: + return T == C.LongTy; + case BuiltinType::LongLong: + return T == C.UnsignedLongLongTy; + case BuiltinType::ULongLong: + return T == C.LongLongTy; + } return false; + } - QualType pointeeTy = PT->getPointeeType(); - - if (const BuiltinType *BT = pointeeTy->getAs<BuiltinType>()) - switch (BT->getKind()) { - case BuiltinType::Void: - case BuiltinType::Char_U: - case BuiltinType::UChar: - case BuiltinType::Char_S: - case BuiltinType::SChar: - return true; - default: - break; - } - - return false; - } + case CStrTy: { + const PointerType *PT = argTy->getAs<PointerType>(); + if (!PT) + return false; + QualType pointeeTy = PT->getPointeeType(); + if (const BuiltinType *BT = pointeeTy->getAs<BuiltinType>()) + switch (BT->getKind()) { + case BuiltinType::Void: + case BuiltinType::Char_U: + case BuiltinType::UChar: + case BuiltinType::Char_S: + case BuiltinType::SChar: + return true; + default: + break; + } - if (K == WCStrTy) { - const PointerType *PT = argTy->getAs<PointerType>(); - if (!PT) return false; + } + + case WCStrTy: { + const PointerType *PT = argTy->getAs<PointerType>(); + if (!PT) + return false; + QualType pointeeTy = + C.getCanonicalType(PT->getPointeeType()).getUnqualifiedType(); + return pointeeTy == C.getWCharType(); + } - QualType pointeeTy = - C.getCanonicalType(PT->getPointeeType()).getUnqualifiedType(); + case CPointerTy: + return argTy->getAs<PointerType>() != NULL || + argTy->getAs<ObjCObjectPointerType>() != NULL; - return pointeeTy == C.getWCharType(); + case ObjCPointerTy: + return argTy->getAs<ObjCObjectPointerType>() != NULL; } + // FIXME: Should be unreachable, but Clang is currently emitting + // a warning. return false; } QualType ArgTypeResult::getRepresentativeType(ASTContext &C) const { - assert(isValid()); - if (K == SpecificTy) - return T; - if (K == CStrTy) - return C.getPointerType(C.CharTy); - if (K == WCStrTy) - return C.getPointerType(C.getWCharType()); - if (K == ObjCPointerTy) - return C.ObjCBuiltinIdTy; + switch (K) { + case InvalidTy: + assert(false && "No representative type for Invalid ArgTypeResult"); + // Fall-through. + case UnknownTy: + return QualType(); + case SpecificTy: + return T; + case CStrTy: + return C.getPointerType(C.CharTy); + case WCStrTy: + return C.getPointerType(C.getWCharType()); + case ObjCPointerTy: + return C.ObjCBuiltinIdTy; + case CPointerTy: + return C.VoidPtrTy; + } + // FIXME: Should be unreachable, but Clang is currently emitting + // a warning. return QualType(); } @@ -524,6 +549,99 @@ ArgTypeResult OptionalAmount::getArgType(ASTContext &Ctx) const { } //===----------------------------------------------------------------------===// +// Methods on ConversionSpecifier. +//===----------------------------------------------------------------------===// +const char *ConversionSpecifier::toString() const { + switch (kind) { + case bArg: return "b"; + case dArg: return "d"; + case iArg: return "i"; + case oArg: return "o"; + case uArg: return "u"; + case xArg: return "x"; + case XArg: return "X"; + case fArg: return "f"; + case FArg: return "F"; + case eArg: return "e"; + case EArg: return "E"; + case gArg: return "g"; + case GArg: return "G"; + case aArg: return "a"; + case AArg: return "A"; + case IntAsCharArg: return "c"; + case CStrArg: return "s"; + case VoidPtrArg: return "p"; + case OutIntPtrArg: return "n"; + case PercentArg: return "%"; + case InvalidSpecifier: return NULL; + + // MacOS X unicode extensions. + case CArg: return "C"; + case UnicodeStrArg: return "S"; + + // Objective-C specific specifiers. + case ObjCObjArg: return "@"; + + // GlibC specific specifiers. + case PrintErrno: return "m"; + } + return NULL; +} + +//===----------------------------------------------------------------------===// +// Methods on LengthModifier. +//===----------------------------------------------------------------------===// + +const char *LengthModifier::toString() const { + switch (kind) { + case AsChar: + return "hh"; + case AsShort: + return "h"; + case AsLong: // or AsWideChar + return "l"; + case AsLongLong: + return "ll"; + case AsIntMax: + return "j"; + case AsSizeT: + return "z"; + case AsPtrDiff: + return "t"; + case AsLongDouble: + return "L"; + case None: + return ""; + } + return NULL; +} + +//===----------------------------------------------------------------------===// +// Methods on OptionalAmount. +//===----------------------------------------------------------------------===// + +void OptionalAmount::toString(llvm::raw_ostream &os) const { + switch (hs) { + case Invalid: + case NotSpecified: + return; + case Arg: + if (UsesDotPrefix) + os << "."; + if (usesPositionalArg()) + os << "*" << getPositionalArgIndex() << "$"; + else + os << "*"; + break; + case Constant: + if (UsesDotPrefix) + os << "."; + os << amt; + break; + } +} + +//===----------------------------------------------------------------------===// // Methods on FormatSpecifier. //===----------------------------------------------------------------------===// @@ -532,57 +650,60 @@ ArgTypeResult FormatSpecifier::getArgType(ASTContext &Ctx) const { return ArgTypeResult::Invalid(); if (CS.isIntArg()) - switch (LM) { - case AsLongDouble: + switch (LM.getKind()) { + case LengthModifier::AsLongDouble: return ArgTypeResult::Invalid(); - case None: return Ctx.IntTy; - case AsChar: return Ctx.SignedCharTy; - case AsShort: return Ctx.ShortTy; - case AsLong: return Ctx.LongTy; - case AsLongLong: return Ctx.LongLongTy; - case AsIntMax: + case LengthModifier::None: return Ctx.IntTy; + case LengthModifier::AsChar: return Ctx.SignedCharTy; + case LengthModifier::AsShort: return Ctx.ShortTy; + case LengthModifier::AsLong: return Ctx.LongTy; + case LengthModifier::AsLongLong: return Ctx.LongLongTy; + case LengthModifier::AsIntMax: // FIXME: Return unknown for now. return ArgTypeResult(); - case AsSizeT: return Ctx.getSizeType(); - case AsPtrDiff: return Ctx.getPointerDiffType(); + case LengthModifier::AsSizeT: return Ctx.getSizeType(); + case LengthModifier::AsPtrDiff: return Ctx.getPointerDiffType(); } if (CS.isUIntArg()) - switch (LM) { - case AsLongDouble: + switch (LM.getKind()) { + case LengthModifier::AsLongDouble: return ArgTypeResult::Invalid(); - case None: return Ctx.UnsignedIntTy; - case AsChar: return Ctx.UnsignedCharTy; - case AsShort: return Ctx.UnsignedShortTy; - case AsLong: return Ctx.UnsignedLongTy; - case AsLongLong: return Ctx.UnsignedLongLongTy; - case AsIntMax: + case LengthModifier::None: return Ctx.UnsignedIntTy; + case LengthModifier::AsChar: return Ctx.UnsignedCharTy; + case LengthModifier::AsShort: return Ctx.UnsignedShortTy; + case LengthModifier::AsLong: return Ctx.UnsignedLongTy; + case LengthModifier::AsLongLong: return Ctx.UnsignedLongLongTy; + case LengthModifier::AsIntMax: // FIXME: Return unknown for now. return ArgTypeResult(); - case AsSizeT: + case LengthModifier::AsSizeT: // FIXME: How to get the corresponding unsigned // version of size_t? return ArgTypeResult(); - case AsPtrDiff: + case LengthModifier::AsPtrDiff: // FIXME: How to get the corresponding unsigned // version of ptrdiff_t? return ArgTypeResult(); } if (CS.isDoubleArg()) { - if (LM == AsLongDouble) + if (LM.getKind() == LengthModifier::AsLongDouble) return Ctx.LongDoubleTy; return Ctx.DoubleTy; } switch (CS.getKind()) { case ConversionSpecifier::CStrArg: - return ArgTypeResult(LM == AsWideChar ? ArgTypeResult::WCStrTy : ArgTypeResult::CStrTy); + return ArgTypeResult(LM.getKind() == LengthModifier::AsWideChar ? + ArgTypeResult::WCStrTy : ArgTypeResult::CStrTy); case ConversionSpecifier::UnicodeStrArg: // FIXME: This appears to be Mac OS X specific. return ArgTypeResult::WCStrTy; case ConversionSpecifier::CArg: return Ctx.WCharTy; + case ConversionSpecifier::VoidPtrArg: + return ArgTypeResult::CPointerTy; default: break; } @@ -591,3 +712,329 @@ ArgTypeResult FormatSpecifier::getArgType(ASTContext &Ctx) const { return ArgTypeResult(); } +bool FormatSpecifier::fixType(QualType QT) { + // Handle strings first (char *, wchar_t *) + if (QT->isPointerType() && (QT->getPointeeType()->isAnyCharacterType())) { + CS.setKind(ConversionSpecifier::CStrArg); + + // Disable irrelevant flags + HasAlternativeForm = 0; + HasLeadingZeroes = 0; + + // Set the long length modifier for wide characters + if (QT->getPointeeType()->isWideCharType()) + LM.setKind(LengthModifier::AsWideChar); + + return true; + } + + // We can only work with builtin types. + if (!QT->isBuiltinType()) + return false; + + // Everything else should be a base type + const BuiltinType *BT = QT->getAs<BuiltinType>(); + + // Set length modifier + switch (BT->getKind()) { + default: + // The rest of the conversions are either optional or for non-builtin types + LM.setKind(LengthModifier::None); + break; + + case BuiltinType::WChar: + case BuiltinType::Long: + case BuiltinType::ULong: + LM.setKind(LengthModifier::AsLong); + break; + + case BuiltinType::LongLong: + case BuiltinType::ULongLong: + LM.setKind(LengthModifier::AsLongLong); + break; + + case BuiltinType::LongDouble: + LM.setKind(LengthModifier::AsLongDouble); + break; + } + + // Set conversion specifier and disable any flags which do not apply to it. + if (QT->isAnyCharacterType()) { + CS.setKind(ConversionSpecifier::IntAsCharArg); + Precision.setHowSpecified(OptionalAmount::NotSpecified); + HasAlternativeForm = 0; + HasLeadingZeroes = 0; + HasPlusPrefix = 0; + } + // Test for Floating type first as LongDouble can pass isUnsignedIntegerType + else if (QT->isRealFloatingType()) { + CS.setKind(ConversionSpecifier::fArg); + } + else if (QT->isPointerType()) { + CS.setKind(ConversionSpecifier::VoidPtrArg); + Precision.setHowSpecified(OptionalAmount::NotSpecified); + HasAlternativeForm = 0; + HasLeadingZeroes = 0; + HasPlusPrefix = 0; + } + else if (QT->isSignedIntegerType()) { + CS.setKind(ConversionSpecifier::dArg); + HasAlternativeForm = 0; + } + else if (QT->isUnsignedIntegerType()) { + CS.setKind(ConversionSpecifier::uArg); + HasAlternativeForm = 0; + HasPlusPrefix = 0; + } + else { + return false; + } + + return true; +} + +void FormatSpecifier::toString(llvm::raw_ostream &os) const { + // Whilst some features have no defined order, we are using the order + // appearing in the C99 standard (ISO/IEC 9899:1999 (E) �7.19.6.1) + os << "%"; + + // Positional args + if (usesPositionalArg()) { + os << getPositionalArgIndex() << "$"; + } + + // Conversion flags + if (IsLeftJustified) os << "-"; + if (HasPlusPrefix) os << "+"; + if (HasSpacePrefix) os << " "; + if (HasAlternativeForm) os << "#"; + if (HasLeadingZeroes) os << "0"; + + // Minimum field width + FieldWidth.toString(os); + // Precision + Precision.toString(os); + // Length modifier + os << LM.toString(); + // Conversion specifier + os << CS.toString(); +} + +bool FormatSpecifier::hasValidPlusPrefix() const { + if (!HasPlusPrefix) + return true; + + // The plus prefix only makes sense for signed conversions + switch (CS.getKind()) { + case ConversionSpecifier::dArg: + case ConversionSpecifier::iArg: + case ConversionSpecifier::fArg: + case ConversionSpecifier::FArg: + case ConversionSpecifier::eArg: + case ConversionSpecifier::EArg: + case ConversionSpecifier::gArg: + case ConversionSpecifier::GArg: + case ConversionSpecifier::aArg: + case ConversionSpecifier::AArg: + return true; + + default: + return false; + } +} + +bool FormatSpecifier::hasValidAlternativeForm() const { + if (!HasAlternativeForm) + return true; + + // Alternate form flag only valid with the oxaAeEfFgG conversions + switch (CS.getKind()) { + case ConversionSpecifier::oArg: + case ConversionSpecifier::xArg: + case ConversionSpecifier::aArg: + case ConversionSpecifier::AArg: + case ConversionSpecifier::eArg: + case ConversionSpecifier::EArg: + case ConversionSpecifier::fArg: + case ConversionSpecifier::FArg: + case ConversionSpecifier::gArg: + case ConversionSpecifier::GArg: + return true; + + default: + return false; + } +} + +bool FormatSpecifier::hasValidLeadingZeros() const { + if (!HasLeadingZeroes) + return true; + + // Leading zeroes flag only valid with the diouxXaAeEfFgG conversions + switch (CS.getKind()) { + case ConversionSpecifier::dArg: + case ConversionSpecifier::iArg: + case ConversionSpecifier::oArg: + case ConversionSpecifier::uArg: + case ConversionSpecifier::xArg: + case ConversionSpecifier::XArg: + case ConversionSpecifier::aArg: + case ConversionSpecifier::AArg: + case ConversionSpecifier::eArg: + case ConversionSpecifier::EArg: + case ConversionSpecifier::fArg: + case ConversionSpecifier::FArg: + case ConversionSpecifier::gArg: + case ConversionSpecifier::GArg: + return true; + + default: + return false; + } +} + +bool FormatSpecifier::hasValidSpacePrefix() const { + if (!HasSpacePrefix) + return true; + + // The space prefix only makes sense for signed conversions + switch (CS.getKind()) { + case ConversionSpecifier::dArg: + case ConversionSpecifier::iArg: + case ConversionSpecifier::fArg: + case ConversionSpecifier::FArg: + case ConversionSpecifier::eArg: + case ConversionSpecifier::EArg: + case ConversionSpecifier::gArg: + case ConversionSpecifier::GArg: + case ConversionSpecifier::aArg: + case ConversionSpecifier::AArg: + return true; + + default: + return false; + } +} + +bool FormatSpecifier::hasValidLeftJustified() const { + if (!IsLeftJustified) + return true; + + // The left justified flag is valid for all conversions except n + switch (CS.getKind()) { + case ConversionSpecifier::OutIntPtrArg: + return false; + + default: + return true; + } +} + +bool FormatSpecifier::hasValidLengthModifier() const { + switch (LM.getKind()) { + case LengthModifier::None: + return true; + + // Handle most integer flags + case LengthModifier::AsChar: + case LengthModifier::AsShort: + case LengthModifier::AsLongLong: + case LengthModifier::AsIntMax: + case LengthModifier::AsSizeT: + case LengthModifier::AsPtrDiff: + switch (CS.getKind()) { + case ConversionSpecifier::dArg: + case ConversionSpecifier::iArg: + case ConversionSpecifier::oArg: + case ConversionSpecifier::uArg: + case ConversionSpecifier::xArg: + case ConversionSpecifier::XArg: + case ConversionSpecifier::OutIntPtrArg: + return true; + default: + return false; + } + + // Handle 'l' flag + case LengthModifier::AsLong: + switch (CS.getKind()) { + case ConversionSpecifier::dArg: + case ConversionSpecifier::iArg: + case ConversionSpecifier::oArg: + case ConversionSpecifier::uArg: + case ConversionSpecifier::xArg: + case ConversionSpecifier::XArg: + case ConversionSpecifier::aArg: + case ConversionSpecifier::AArg: + case ConversionSpecifier::fArg: + case ConversionSpecifier::FArg: + case ConversionSpecifier::eArg: + case ConversionSpecifier::EArg: + case ConversionSpecifier::gArg: + case ConversionSpecifier::GArg: + case ConversionSpecifier::OutIntPtrArg: + case ConversionSpecifier::IntAsCharArg: + case ConversionSpecifier::CStrArg: + return true; + default: + return false; + } + + case LengthModifier::AsLongDouble: + switch (CS.getKind()) { + case ConversionSpecifier::aArg: + case ConversionSpecifier::AArg: + case ConversionSpecifier::fArg: + case ConversionSpecifier::FArg: + case ConversionSpecifier::eArg: + case ConversionSpecifier::EArg: + case ConversionSpecifier::gArg: + case ConversionSpecifier::GArg: + return true; + default: + return false; + } + } + return false; +} + +bool FormatSpecifier::hasValidPrecision() const { + if (Precision.getHowSpecified() == OptionalAmount::NotSpecified) + return true; + + // Precision is only valid with the diouxXaAeEfFgGs conversions + switch (CS.getKind()) { + case ConversionSpecifier::dArg: + case ConversionSpecifier::iArg: + case ConversionSpecifier::oArg: + case ConversionSpecifier::uArg: + case ConversionSpecifier::xArg: + case ConversionSpecifier::XArg: + case ConversionSpecifier::aArg: + case ConversionSpecifier::AArg: + case ConversionSpecifier::eArg: + case ConversionSpecifier::EArg: + case ConversionSpecifier::fArg: + case ConversionSpecifier::FArg: + case ConversionSpecifier::gArg: + case ConversionSpecifier::GArg: + case ConversionSpecifier::CStrArg: + return true; + + default: + return false; + } +} +bool FormatSpecifier::hasValidFieldWidth() const { + if (FieldWidth.getHowSpecified() == OptionalAmount::NotSpecified) + return true; + + // The field width is valid for all conversions except n + switch (CS.getKind()) { + case ConversionSpecifier::OutIntPtrArg: + return false; + + default: + return true; + } +} diff --git a/contrib/llvm/tools/clang/lib/Basic/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Basic/CMakeLists.txt index 1a89acc..87bf834 100644 --- a/contrib/llvm/tools/clang/lib/Basic/CMakeLists.txt +++ b/contrib/llvm/tools/clang/lib/Basic/CMakeLists.txt @@ -25,6 +25,8 @@ if (Subversion_FOUND AND EXISTS "${CLANG_SOURCE_DIR}/.svn") endif() add_dependencies(clangBasic + ClangARMNeon + ClangAttrList ClangDiagnosticAnalysis ClangDiagnosticAST ClangDiagnosticCommon @@ -34,3 +36,4 @@ add_dependencies(clangBasic ClangDiagnosticLex ClangDiagnosticParse ClangDiagnosticSema) + diff --git a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp index 2fd985f..641d87b 100644 --- a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp @@ -250,6 +250,7 @@ Diagnostic::Diagnostic(DiagnosticClient *client) : Client(client) { ErrorsAsFatal = false; SuppressSystemWarnings = false; SuppressAllDiagnostics = false; + ShowOverloads = Ovl_All; ExtBehavior = Ext_Ignore; ErrorOccurred = false; @@ -1042,8 +1043,7 @@ StoredDiagnostic::StoredDiagnostic(Diagnostic::Level Level, StoredDiagnostic::StoredDiagnostic(Diagnostic::Level Level, const DiagnosticInfo &Info) - : Level(Level), Loc(Info.getLocation()) -{ + : Level(Level), Loc(Info.getLocation()) { llvm::SmallString<64> Message; Info.FormatDiagnostic(Message); this->Message.assign(Message.begin(), Message.end()); @@ -1130,6 +1130,7 @@ void StoredDiagnostic::Serialize(llvm::raw_ostream &OS) const { WriteSourceLocation(OS, SM, R->getBegin()); WriteSourceLocation(OS, SM, R->getEnd()); + WriteUnsigned(OS, R->isTokenRange()); } } @@ -1158,6 +1159,7 @@ void StoredDiagnostic::Serialize(llvm::raw_ostream &OS) const { for (fixit_iterator F = fixit_begin(), FEnd = fixit_end(); F != FEnd; ++F) { WriteSourceLocation(OS, SM, F->RemoveRange.getBegin()); WriteSourceLocation(OS, SM, F->RemoveRange.getEnd()); + WriteUnsigned(OS, F->RemoveRange.isTokenRange()); WriteSourceLocation(OS, SM, F->InsertionLoc); WriteString(OS, F->CodeToInsert); } @@ -1271,11 +1273,14 @@ StoredDiagnostic::Deserialize(FileManager &FM, SourceManager &SM, return Diag; for (unsigned I = 0; I != NumSourceRanges; ++I) { SourceLocation Begin, End; + unsigned IsTokenRange; if (ReadSourceLocation(FM, SM, Memory, MemoryEnd, Begin) || - ReadSourceLocation(FM, SM, Memory, MemoryEnd, End)) + ReadSourceLocation(FM, SM, Memory, MemoryEnd, End) || + ReadUnsigned(Memory, MemoryEnd, IsTokenRange)) return Diag; - Diag.Ranges.push_back(SourceRange(Begin, End)); + Diag.Ranges.push_back(CharSourceRange(SourceRange(Begin, End), + IsTokenRange)); } // Read the fix-it hints. @@ -1284,9 +1289,10 @@ StoredDiagnostic::Deserialize(FileManager &FM, SourceManager &SM, return Diag; for (unsigned I = 0; I != NumFixIts; ++I) { SourceLocation RemoveBegin, RemoveEnd, InsertionLoc; - unsigned InsertLen = 0; + unsigned InsertLen = 0, RemoveIsTokenRange; if (ReadSourceLocation(FM, SM, Memory, MemoryEnd, RemoveBegin) || ReadSourceLocation(FM, SM, Memory, MemoryEnd, RemoveEnd) || + ReadUnsigned(Memory, MemoryEnd, RemoveIsTokenRange) || ReadSourceLocation(FM, SM, Memory, MemoryEnd, InsertionLoc) || ReadUnsigned(Memory, MemoryEnd, InsertLen) || Memory + InsertLen > MemoryEnd) { @@ -1295,7 +1301,8 @@ StoredDiagnostic::Deserialize(FileManager &FM, SourceManager &SM, } FixItHint Hint; - Hint.RemoveRange = SourceRange(RemoveBegin, RemoveEnd); + Hint.RemoveRange = CharSourceRange(SourceRange(RemoveBegin, RemoveEnd), + RemoveIsTokenRange); Hint.InsertionLoc = InsertionLoc; Hint.CodeToInsert.assign(Memory, Memory + InsertLen); Memory += InsertLen; diff --git a/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp index c4296c3..3c91a0f 100644 --- a/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp @@ -331,8 +331,8 @@ const FileEntry *FileManager::getFile(const char *NameStart, } const FileEntry * -FileManager::getVirtualFile(const llvm::StringRef &Filename, - off_t Size, time_t ModificationTime) { +FileManager::getVirtualFile(llvm::StringRef Filename, off_t Size, + time_t ModificationTime) { const char *NameStart = Filename.begin(), *NameEnd = Filename.end(); ++NumFileLookups; diff --git a/contrib/llvm/tools/clang/lib/Basic/Makefile b/contrib/llvm/tools/clang/lib/Basic/Makefile index 58ac7eb..51b8ac1 100644 --- a/contrib/llvm/tools/clang/lib/Basic/Makefile +++ b/contrib/llvm/tools/clang/lib/Basic/Makefile @@ -11,16 +11,11 @@ # ##===----------------------------------------------------------------------===## -LEVEL = ../../../.. +CLANG_LEVEL := ../.. LIBRARYNAME := clangBasic BUILD_ARCHIVE = 1 -CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include -ifdef CLANG_VENDOR -CPPFLAGS += -DCLANG_VENDOR='"$(CLANG_VENDOR) "' -endif - -include $(LEVEL)/Makefile.common +include $(CLANG_LEVEL)/Makefile SVN_REVISION := $(shell $(LLVM_SRC_ROOT)/utils/GetSourceVersion $(PROJ_SRC_DIR)/../..) diff --git a/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp index 6692e64..7fcf372 100644 --- a/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp @@ -34,6 +34,8 @@ TargetInfo::TargetInfo(const std::string &T) : Triple(T) { DoubleAlign = 64; LongDoubleWidth = 64; LongDoubleAlign = 64; + LargeArrayMinWidth = 0; + LargeArrayAlign = 0; SizeType = UnsignedLong; PtrDiffType = SignedLong; IntMaxType = SignedLongLong; @@ -53,6 +55,9 @@ TargetInfo::TargetInfo(const std::string &T) : Triple(T) { "i64:64:64-f32:32:32-f64:64:64-n32"; UserLabelPrefix = "_"; HasAlignMac68kSupport = false; + + // Default to no types using fpret. + RealTypeUsesObjCFPRet = 0; } // Out of line virtual dtor for TargetInfo. @@ -282,6 +287,8 @@ bool TargetInfo::validateOutputConstraint(ConstraintInfo &Info) const { Info.setAllowsRegister(); Info.setAllowsMemory(); break; + case ',': // FIXME: Until we handle multiple alternative constraints, + return true; // ignore everything after the first comma. } Name++; @@ -375,6 +382,8 @@ bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints, Info.setAllowsRegister(); Info.setAllowsMemory(); break; + case ',': // FIXME: Until we handle multiple alternative constraints, + return true; // ignore everything after the first comma. } Name++; diff --git a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp index 92fd417..fdf63e7 100644 --- a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp @@ -150,7 +150,7 @@ protected: public: DarwinTargetInfo(const std::string& triple) : OSTargetInfo<Target>(triple) { - this->TLSSupported = false; + this->TLSSupported = llvm::Triple(triple).getDarwinMajorNumber() > 10; } virtual std::string isValidSectionSpecifier(llvm::StringRef SR) const { @@ -160,6 +160,12 @@ public: return llvm::MCSectionMachO::ParseSectionSpecifier(SR, Segment, Section, TAA, StubSize); } + + virtual const char *getStaticInitSectionSpecifier() const { + // FIXME: We should return 0 when building kexts. + return "__TEXT,__StaticInit,regular,pure_instructions"; + } + }; @@ -206,6 +212,30 @@ public: } }; +// Minix Target +template<typename Target> +class MinixTargetInfo : public OSTargetInfo<Target> { +protected: + virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, + MacroBuilder &Builder) const { + // Minix defines + + Builder.defineMacro("__minix", "3"); + Builder.defineMacro("_EM_WSIZE", "4"); + Builder.defineMacro("_EM_PSIZE", "4"); + Builder.defineMacro("_EM_SSIZE", "2"); + Builder.defineMacro("_EM_LSIZE", "4"); + Builder.defineMacro("_EM_FSIZE", "4"); + Builder.defineMacro("_EM_DSIZE", "8"); + DefineStd(Builder, "unix", Opts); + } +public: + MinixTargetInfo(const std::string &triple) + : OSTargetInfo<Target>(triple) { + this->UserLabelPrefix = ""; + } +}; + // Linux target template<typename Target> class LinuxTargetInfo : public OSTargetInfo<Target> { @@ -299,13 +329,20 @@ protected: Builder.defineMacro("__CELLOS_LV2__"); Builder.defineMacro("__ELF__"); Builder.defineMacro("__LP32__"); + Builder.defineMacro("_ARCH_PPC64"); + Builder.defineMacro("__powerpc64__"); } public: PS3PPUTargetInfo(const std::string& triple) : OSTargetInfo<Target>(triple) { this->UserLabelPrefix = ""; this->LongWidth = this->LongAlign = this->PointerWidth = this->PointerAlign = 32; + this->IntMaxType = TargetInfo::SignedLongLong; + this->UIntMaxType = TargetInfo::UnsignedLongLong; + this->Int64Type = TargetInfo::SignedLongLong; this->SizeType = TargetInfo::UnsignedInt; + this->DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-" + "i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32"; } }; @@ -413,12 +450,98 @@ public: switch (*Name) { default: return false; case 'O': // Zero - return true; + break; case 'b': // Base register case 'f': // Floating point register Info.setAllowsRegister(); - return true; + break; + // FIXME: The following are added to allow parsing. + // I just took a guess at what the actions should be. + // Also, is more specific checking needed? I.e. specific registers? + case 'd': // Floating point register (containing 64-bit value) + case 'v': // Altivec vector register + Info.setAllowsRegister(); + break; + case 'w': + switch (Name[1]) { + case 'd':// VSX vector register to hold vector double data + case 'f':// VSX vector register to hold vector float data + case 's':// VSX vector register to hold scalar float data + case 'a':// Any VSX register + break; + default: + return false; + } + Info.setAllowsRegister(); + Name++; // Skip over 'w'. + break; + case 'h': // `MQ', `CTR', or `LINK' register + case 'q': // `MQ' register + case 'c': // `CTR' register + case 'l': // `LINK' register + case 'x': // `CR' register (condition register) number 0 + case 'y': // `CR' register (condition register) + case 'z': // `XER[CA]' carry bit (part of the XER register) + Info.setAllowsRegister(); + break; + case 'I': // Signed 16-bit constant + case 'J': // Unsigned 16-bit constant shifted left 16 bits + // (use `L' instead for SImode constants) + case 'K': // Unsigned 16-bit constant + case 'L': // Signed 16-bit constant shifted left 16 bits + case 'M': // Constant larger than 31 + case 'N': // Exact power of 2 + case 'P': // Constant whose negation is a signed 16-bit constant + case 'G': // Floating point constant that can be loaded into a + // register with one instruction per word + case 'H': // Integer/Floating point constant that can be loaded + // into a register using three instructions + break; + case 'm': // Memory operand. Note that on PowerPC targets, m can + // include addresses that update the base register. It + // is therefore only safe to use `m' in an asm statement + // if that asm statement accesses the operand exactly once. + // The asm statement must also use `%U<opno>' as a + // placeholder for the �update� flag in the corresponding + // load or store instruction. For example: + // asm ("st%U0 %1,%0" : "=m" (mem) : "r" (val)); + // is correct but: + // asm ("st %1,%0" : "=m" (mem) : "r" (val)); + // is not. Use es rather than m if you don't want the base + // register to be updated. + case 'e': + if (Name[1] != 's') + return false; + // es: A �stable� memory operand; that is, one which does not + // include any automodification of the base register. Unlike + // `m', this constraint can be used in asm statements that + // might access the operand several times, or that might not + // access it at all. + Info.setAllowsMemory(); + Name++; // Skip over 'e'. + break; + case 'Q': // Memory operand that is an offset from a register (it is + // usually better to use `m' or `es' in asm statements) + case 'Z': // Memory operand that is an indexed or indirect from a + // register (it is usually better to use `m' or `es' in + // asm statements) + Info.setAllowsMemory(); + Info.setAllowsRegister(); + break; + case 'R': // AIX TOC entry + case 'a': // Address operand that is an indexed or indirect from a + // register (`p' is preferable for asm statements) + case 'S': // Constant suitable as a 64-bit mask operand + case 'T': // Constant suitable as a 32-bit mask operand + case 'U': // System V Release 4 small data area reference + case 't': // AND masks that can be performed by two rldic{l, r} + // instructions + case 'W': // Vector constant that does not require memory + case 'j': // Vector constant that is all zeros. + break; + // End FIXME. } + return true; } virtual const char *getClobbers() const { return ""; @@ -600,6 +723,27 @@ public: }; } // end anonymous namespace. + +namespace { +class DarwinPPCTargetInfo : + public DarwinTargetInfo<PPCTargetInfo> { +public: + DarwinPPCTargetInfo(const std::string& triple) + : DarwinTargetInfo<PPCTargetInfo>(triple) { + HasAlignMac68kSupport = true; + } +}; + +class DarwinPPC64TargetInfo : + public DarwinTargetInfo<PPC64TargetInfo> { +public: + DarwinPPC64TargetInfo(const std::string& triple) + : DarwinTargetInfo<PPC64TargetInfo>(triple) { + HasAlignMac68kSupport = true; + } +}; +} // end anonymous namespace. + namespace { // MBlaze abstract base class class MBlazeTargetInfo : public TargetInfo { @@ -1101,6 +1245,11 @@ public: PtrDiffType = SignedInt; IntPtrType = SignedInt; RegParmMax = 3; + + // Use fpret for all types. + RealTypeUsesObjCFPRet = ((1 << TargetInfo::Float) | + (1 << TargetInfo::Double) | + (1 << TargetInfo::LongDouble)); } virtual const char *getVAListDeclaration() const { return "typedef char* __builtin_va_list;"; @@ -1257,6 +1406,8 @@ public: LongWidth = LongAlign = PointerWidth = PointerAlign = 64; LongDoubleWidth = 128; LongDoubleAlign = 128; + LargeArrayMinWidth = 128; + LargeArrayAlign = 128; IntMaxType = SignedLong; UIntMaxType = UnsignedLong; Int64Type = SignedLong; @@ -1265,6 +1416,9 @@ public: DescriptionString = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-" "i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-" "a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"; + + // Use fpret only for long double. + RealTypeUsesObjCFPRet = (1 << TargetInfo::LongDouble); } virtual const char *getVAListDeclaration() const { return "typedef struct __va_list_tag {" @@ -2294,6 +2448,8 @@ static TargetInfo *AllocateTarget(const std::string &T) { case llvm::Triple::arm: case llvm::Triple::thumb: switch (os) { + case llvm::Triple::Linux: + return new LinuxTargetInfo<ARMTargetInfo>(T); case llvm::Triple::Darwin: return new DarwinARMTargetInfo(T); case llvm::Triple::FreeBSD: @@ -2327,14 +2483,14 @@ static TargetInfo *AllocateTarget(const std::string &T) { case llvm::Triple::ppc: if (os == llvm::Triple::Darwin) - return new DarwinTargetInfo<PPCTargetInfo>(T); + return new DarwinPPCTargetInfo(T); else if (os == llvm::Triple::FreeBSD) return new FreeBSDTargetInfo<PPC32TargetInfo>(T); return new PPC32TargetInfo(T); case llvm::Triple::ppc64: if (os == llvm::Triple::Darwin) - return new DarwinTargetInfo<PPC64TargetInfo>(T); + return new DarwinPPC64TargetInfo(T); else if (os == llvm::Triple::Lv2) return new PS3PPUTargetInfo<PPC64TargetInfo>(T); else if (os == llvm::Triple::FreeBSD) @@ -2377,6 +2533,8 @@ static TargetInfo *AllocateTarget(const std::string &T) { return new OpenBSDI386TargetInfo(T); case llvm::Triple::FreeBSD: return new FreeBSDTargetInfo<X86_32TargetInfo>(T); + case llvm::Triple::Minix: + return new MinixTargetInfo<X86_32TargetInfo>(T); case llvm::Triple::Solaris: return new SolarisTargetInfo<X86_32TargetInfo>(T); case llvm::Triple::Cygwin: @@ -2444,6 +2602,12 @@ TargetInfo *TargetInfo::CreateTargetInfo(Diagnostic &Diags, return 0; } + // Set the target C++ ABI. + if (!Target->setCXXABI(Opts.CXXABI)) { + Diags.Report(diag::err_target_unknown_cxxabi) << Opts.CXXABI; + return 0; + } + // Compute the default target features, we need the target to handle this // because features may have dependencies on one another. llvm::StringMap<bool> Features; diff --git a/contrib/llvm/tools/clang/lib/Frontend/AnalysisConsumer.cpp b/contrib/llvm/tools/clang/lib/Checker/AnalysisConsumer.cpp index 6a47279..524f37e 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/AnalysisConsumer.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/AnalysisConsumer.cpp @@ -11,7 +11,7 @@ // //===----------------------------------------------------------------------===// -#include "clang/Frontend/AnalysisConsumer.h" +#include "clang/Checker/AnalysisConsumer.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" @@ -27,9 +27,11 @@ #include "clang/Checker/BugReporter/BugReporter.h" #include "clang/Checker/PathSensitive/GRExprEngine.h" #include "clang/Checker/PathSensitive/GRTransferFuncs.h" +#include "clang/Checker/PathDiagnosticClients.h" +#include "GRExprEngineExperimentalChecks.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/SourceManager.h" -#include "clang/Frontend/PathDiagnosticClients.h" +#include "clang/Frontend/AnalyzerOptions.h" #include "clang/Lex/Preprocessor.h" #include "llvm/Support/raw_ostream.h" #include "llvm/System/Path.h" @@ -79,8 +81,6 @@ public: const Preprocessor &PP; const std::string OutDir; AnalyzerOptions Opts; - bool declDisplayed; - // PD is owned by AnalysisManager. PathDiagnosticClient *PD; @@ -94,7 +94,7 @@ public: const std::string& outdir, const AnalyzerOptions& opts) : Ctx(0), PP(pp), OutDir(outdir), - Opts(opts), declDisplayed(false), PD(0) { + Opts(opts), PD(0) { DigestAnalyzerOptions(); } @@ -137,10 +137,9 @@ public: } void DisplayFunction(const Decl *D) { - if (!Opts.AnalyzerDisplayProgress || declDisplayed) + if (!Opts.AnalyzerDisplayProgress) return; - declDisplayed = true; SourceManager &SM = Mgr->getASTContext().getSourceManager(); PresumedLoc Loc = SM.getPresumedLoc(D->getLocation()); llvm::errs() << "ANALYZE: " << Loc.getFilename(); @@ -181,7 +180,7 @@ public: } virtual void HandleTranslationUnit(ASTContext &C); - void HandleCode(Decl *D, Stmt* Body, Actions& actions); + void HandleCode(Decl *D, Actions& actions); }; } // end anonymous namespace @@ -209,7 +208,8 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) { if (!Opts.AnalyzeSpecificFunction.empty() && FD->getDeclName().getAsString() != Opts.AnalyzeSpecificFunction) break; - HandleCode(FD, FD->getBody(), FunctionActions); + DisplayFunction(FD); + HandleCode(FD, FunctionActions); } break; } @@ -221,14 +221,15 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) { if (!Opts.AnalyzeSpecificFunction.empty() && Opts.AnalyzeSpecificFunction != MD->getSelector().getAsString()) break; - HandleCode(MD, MD->getBody(), ObjCMethodActions); + DisplayFunction(MD); + HandleCode(MD, ObjCMethodActions); } break; } case Decl::ObjCImplementation: { ObjCImplementationDecl* ID = cast<ObjCImplementationDecl>(*I); - HandleCode(ID, 0, ObjCImplementationActions); + HandleCode(ID, ObjCImplementationActions); for (ObjCImplementationDecl::method_iterator MI = ID->meth_begin(), ME = ID->meth_end(); MI != ME; ++MI) { @@ -236,7 +237,7 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) { if (!Opts.AnalyzeSpecificFunction.empty() && Opts.AnalyzeSpecificFunction != (*MI)->getSelector().getAsString()) break; - HandleCode(*MI, (*MI)->getBody(), ObjCMethodActions); + HandleCode(*MI, ObjCMethodActions); } } break; @@ -269,7 +270,7 @@ static void FindBlocks(DeclContext *D, llvm::SmallVectorImpl<Decl*> &WL) { FindBlocks(DC, WL); } -void AnalysisConsumer::HandleCode(Decl *D, Stmt* Body, Actions& actions) { +void AnalysisConsumer::HandleCode(Decl *D, Actions& actions) { // Don't run the actions if an error has occured with parsing the file. Diagnostic &Diags = PP.getDiagnostics(); @@ -278,8 +279,9 @@ void AnalysisConsumer::HandleCode(Decl *D, Stmt* Body, Actions& actions) { // Don't run the actions on declarations in header files unless // otherwise specified. - if (!Opts.AnalyzeAll && - !Ctx->getSourceManager().isFromMainFile(D->getLocation())) + SourceManager &SM = Ctx->getSourceManager(); + SourceLocation SL = SM.getInstantiationLoc(D->getLocation()); + if (!Opts.AnalyzeAll && !SM.isFromMainFile(SL)) return; // Clear the AnalysisManager of old AnalysisContexts. @@ -289,7 +291,7 @@ void AnalysisConsumer::HandleCode(Decl *D, Stmt* Body, Actions& actions) { llvm::SmallVector<Decl*, 10> WL; WL.push_back(D); - if (Body && Opts.AnalyzeNestedBlocks) + if (D->hasBody() && Opts.AnalyzeNestedBlocks) FindBlocks(cast<DeclContext>(D), WL); for (Actions::iterator I = actions.begin(), E = actions.end(); I != E; ++I) @@ -339,6 +341,9 @@ static void ActionGRExprEngine(AnalysisConsumer &C, AnalysisManager& mgr, if (C.Opts.EnableExperimentalChecks) RegisterExperimentalChecks(Eng); + if (C.Opts.EnableIdempotentOperationChecker) + RegisterIdempotentOperationChecker(Eng); + // Set the graph auditor. llvm::OwningPtr<ExplodedNode::Auditor> Auditor; if (mgr.shouldVisualizeUbigraph()) { diff --git a/contrib/llvm/tools/clang/lib/Checker/AttrNonNullChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/AttrNonNullChecker.cpp index 309a74c..d0bccb2 100644 --- a/contrib/llvm/tools/clang/lib/Checker/AttrNonNullChecker.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/AttrNonNullChecker.cpp @@ -60,9 +60,10 @@ void AttrNonNullChecker::PreVisitCallExpr(CheckerContext &C, if (!Att->isNonNull(idx)) continue; - const SVal &V = state->getSVal(*I); - const DefinedSVal *DV = dyn_cast<DefinedSVal>(&V); + SVal V = state->getSVal(*I); + DefinedSVal *DV = dyn_cast<DefinedSVal>(&V); + // If the value is unknown or undefined, we can't perform this check. if (!DV) continue; diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicConstraintManager.cpp b/contrib/llvm/tools/clang/lib/Checker/BasicConstraintManager.cpp index e89546e..eee5c59 100644 --- a/contrib/llvm/tools/clang/lib/Checker/BasicConstraintManager.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/BasicConstraintManager.cpp @@ -54,22 +54,28 @@ public: ISetFactory(statemgr.getAllocator()) {} const GRState* AssumeSymNE(const GRState* state, SymbolRef sym, - const llvm::APSInt& V); + const llvm::APSInt& V, + const llvm::APSInt& Adjustment); const GRState* AssumeSymEQ(const GRState* state, SymbolRef sym, - const llvm::APSInt& V); + const llvm::APSInt& V, + const llvm::APSInt& Adjustment); const GRState* AssumeSymLT(const GRState* state, SymbolRef sym, - const llvm::APSInt& V); + const llvm::APSInt& V, + const llvm::APSInt& Adjustment); const GRState* AssumeSymGT(const GRState* state, SymbolRef sym, - const llvm::APSInt& V); + const llvm::APSInt& V, + const llvm::APSInt& Adjustment); const GRState* AssumeSymGE(const GRState* state, SymbolRef sym, - const llvm::APSInt& V); + const llvm::APSInt& V, + const llvm::APSInt& Adjustment); const GRState* AssumeSymLE(const GRState* state, SymbolRef sym, - const llvm::APSInt& V); + const llvm::APSInt& V, + const llvm::APSInt& Adjustment); const GRState* AddEQ(const GRState* state, SymbolRef sym, const llvm::APSInt& V); @@ -94,46 +100,52 @@ ConstraintManager* clang::CreateBasicConstraintManager(GRStateManager& statemgr, return new BasicConstraintManager(statemgr, subengine); } + const GRState* BasicConstraintManager::AssumeSymNE(const GRState *state, SymbolRef sym, - const llvm::APSInt& V) { - // First, determine if sym == X, where X != V. + const llvm::APSInt &V, + const llvm::APSInt &Adjustment) { + // First, determine if sym == X, where X+Adjustment != V. + llvm::APSInt Adjusted = V-Adjustment; if (const llvm::APSInt* X = getSymVal(state, sym)) { - bool isFeasible = (*X != V); + bool isFeasible = (*X != Adjusted); return isFeasible ? state : NULL; } - // Second, determine if sym != V. - if (isNotEqual(state, sym, V)) + // Second, determine if sym+Adjustment != V. + if (isNotEqual(state, sym, Adjusted)) return state; // If we reach here, sym is not a constant and we don't know if it is != V. // Make that assumption. - return AddNE(state, sym, V); + return AddNE(state, sym, Adjusted); } -const GRState *BasicConstraintManager::AssumeSymEQ(const GRState *state, - SymbolRef sym, - const llvm::APSInt &V) { - // First, determine if sym == X, where X != V. +const GRState* +BasicConstraintManager::AssumeSymEQ(const GRState *state, SymbolRef sym, + const llvm::APSInt &V, + const llvm::APSInt &Adjustment) { + // First, determine if sym == X, where X+Adjustment != V. + llvm::APSInt Adjusted = V-Adjustment; if (const llvm::APSInt* X = getSymVal(state, sym)) { - bool isFeasible = *X == V; + bool isFeasible = (*X == Adjusted); return isFeasible ? state : NULL; } - // Second, determine if sym != V. - if (isNotEqual(state, sym, V)) + // Second, determine if sym+Adjustment != V. + if (isNotEqual(state, sym, Adjusted)) return NULL; // If we reach here, sym is not a constant and we don't know if it is == V. // Make that assumption. - return AddEQ(state, sym, V); + return AddEQ(state, sym, Adjusted); } -// These logic will be handled in another ConstraintManager. -const GRState *BasicConstraintManager::AssumeSymLT(const GRState *state, - SymbolRef sym, - const llvm::APSInt& V) { +// The logic for these will be handled in another ConstraintManager. +const GRState* +BasicConstraintManager::AssumeSymLT(const GRState *state, SymbolRef sym, + const llvm::APSInt &V, + const llvm::APSInt &Adjustment) { // Is 'V' the smallest possible value? if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) { // sym cannot be any value less than 'V'. This path is infeasible. @@ -141,13 +153,13 @@ const GRState *BasicConstraintManager::AssumeSymLT(const GRState *state, } // FIXME: For now have assuming x < y be the same as assuming sym != V; - return AssumeSymNE(state, sym, V); + return AssumeSymNE(state, sym, V, Adjustment); } -const GRState *BasicConstraintManager::AssumeSymGT(const GRState *state, - SymbolRef sym, - const llvm::APSInt& V) { - +const GRState* +BasicConstraintManager::AssumeSymGT(const GRState *state, SymbolRef sym, + const llvm::APSInt &V, + const llvm::APSInt &Adjustment) { // Is 'V' the largest possible value? if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) { // sym cannot be any value greater than 'V'. This path is infeasible. @@ -155,56 +167,60 @@ const GRState *BasicConstraintManager::AssumeSymGT(const GRState *state, } // FIXME: For now have assuming x > y be the same as assuming sym != V; - return AssumeSymNE(state, sym, V); + return AssumeSymNE(state, sym, V, Adjustment); } -const GRState *BasicConstraintManager::AssumeSymGE(const GRState *state, - SymbolRef sym, - const llvm::APSInt &V) { - - // Reject a path if the value of sym is a constant X and !(X >= V). +const GRState* +BasicConstraintManager::AssumeSymGE(const GRState *state, SymbolRef sym, + const llvm::APSInt &V, + const llvm::APSInt &Adjustment) { + // Reject a path if the value of sym is a constant X and !(X+Adj >= V). if (const llvm::APSInt *X = getSymVal(state, sym)) { - bool isFeasible = *X >= V; + bool isFeasible = (*X >= V-Adjustment); return isFeasible ? state : NULL; } // Sym is not a constant, but it is worth looking to see if V is the // maximum integer value. if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) { - // If we know that sym != V, then this condition is infeasible since - // there is no other value greater than V. - bool isFeasible = !isNotEqual(state, sym, V); + llvm::APSInt Adjusted = V-Adjustment; + + // If we know that sym != V (after adjustment), then this condition + // is infeasible since there is no other value greater than V. + bool isFeasible = !isNotEqual(state, sym, Adjusted); // If the path is still feasible then as a consequence we know that - // 'sym == V' because we cannot have 'sym > V' (no larger values). + // 'sym+Adjustment == V' because there are no larger values. // Add this constraint. - return isFeasible ? AddEQ(state, sym, V) : NULL; + return isFeasible ? AddEQ(state, sym, Adjusted) : NULL; } return state; } const GRState* -BasicConstraintManager::AssumeSymLE(const GRState* state, SymbolRef sym, - const llvm::APSInt& V) { - - // Reject a path if the value of sym is a constant X and !(X <= V). +BasicConstraintManager::AssumeSymLE(const GRState *state, SymbolRef sym, + const llvm::APSInt &V, + const llvm::APSInt &Adjustment) { + // Reject a path if the value of sym is a constant X and !(X+Adj <= V). if (const llvm::APSInt* X = getSymVal(state, sym)) { - bool isFeasible = *X <= V; + bool isFeasible = (*X <= V-Adjustment); return isFeasible ? state : NULL; } // Sym is not a constant, but it is worth looking to see if V is the // minimum integer value. if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) { - // If we know that sym != V, then this condition is infeasible since - // there is no other value less than V. - bool isFeasible = !isNotEqual(state, sym, V); + llvm::APSInt Adjusted = V-Adjustment; + + // If we know that sym != V (after adjustment), then this condition + // is infeasible since there is no other value less than V. + bool isFeasible = !isNotEqual(state, sym, Adjusted); // If the path is still feasible then as a consequence we know that - // 'sym == V' because we cannot have 'sym < V' (no smaller values). + // 'sym+Adjustment == V' because there are no smaller values. // Add this constraint. - return isFeasible ? AddEQ(state, sym, V) : NULL; + return isFeasible ? AddEQ(state, sym, Adjusted) : NULL; } return state; @@ -213,7 +229,7 @@ BasicConstraintManager::AssumeSymLE(const GRState* state, SymbolRef sym, const GRState* BasicConstraintManager::AddEQ(const GRState* state, SymbolRef sym, const llvm::APSInt& V) { // Create a new state with the old binding replaced. - return state->set<ConstEq>(sym, &V); + return state->set<ConstEq>(sym, &state->getBasicVals().getValue(V)); } const GRState* BasicConstraintManager::AddNE(const GRState* state, SymbolRef sym, @@ -224,7 +240,7 @@ const GRState* BasicConstraintManager::AddNE(const GRState* state, SymbolRef sym GRState::IntSetTy S = T ? *T : ISetFactory.GetEmptySet(); // Now add V to the NE set. - S = ISetFactory.Add(S, &V); + S = ISetFactory.Add(S, &state->getBasicVals().getValue(V)); // Create a new state with the old binding replaced. return state->set<ConstNotEq>(sym, S); @@ -243,7 +259,7 @@ bool BasicConstraintManager::isNotEqual(const GRState* state, SymbolRef sym, const ConstNotEqTy::data_type* T = state->get<ConstNotEq>(sym); // See if V is present in the NE-set. - return T ? T->contains(&V) : false; + return T ? T->contains(&state->getBasicVals().getValue(V)) : false; } bool BasicConstraintManager::isEqual(const GRState* state, SymbolRef sym, diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp b/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp index b852e2a..ecb2d1c 100644 --- a/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.cpp @@ -415,59 +415,72 @@ clang::CreateAuditCFNumberCreate(ASTContext& Ctx, BugReporter& BR) { } //===----------------------------------------------------------------------===// -// CFRetain/CFRelease auditing for null arguments. +// CFRetain/CFRelease checking for null arguments. //===----------------------------------------------------------------------===// namespace { -class AuditCFRetainRelease : public GRSimpleAPICheck { +class CFRetainReleaseChecker : public CheckerVisitor<CFRetainReleaseChecker> { APIMisuse *BT; - - // FIXME: Either this should be refactored into GRSimpleAPICheck, or - // it should always be passed with a call to Audit. The latter - // approach makes this class more stateless. - ASTContext& Ctx; IdentifierInfo *Retain, *Release; - BugReporter& BR; public: - AuditCFRetainRelease(ASTContext& ctx, BugReporter& br) - : BT(0), Ctx(ctx), - Retain(&Ctx.Idents.get("CFRetain")), Release(&Ctx.Idents.get("CFRelease")), - BR(br){} + CFRetainReleaseChecker(ASTContext& Ctx): BT(NULL), + Retain(&Ctx.Idents.get("CFRetain")), Release(&Ctx.Idents.get("CFRelease")) + {} - ~AuditCFRetainRelease() {} + static void *getTag() { static int x = 0; return &x; } - bool Audit(ExplodedNode* N, GRStateManager&); + void PreVisitCallExpr(CheckerContext& C, const CallExpr* CE); }; } // end anonymous namespace -bool AuditCFRetainRelease::Audit(ExplodedNode* N, GRStateManager&) { - const CallExpr* CE = cast<CallExpr>(cast<PostStmt>(N->getLocation()).getStmt()); - +void CFRetainReleaseChecker::PreVisitCallExpr(CheckerContext& C, + const CallExpr* CE) { // If the CallExpr doesn't have exactly 1 argument just give up checking. if (CE->getNumArgs() != 1) - return false; + return; - // Check if we called CFRetain/CFRelease. - const GRState* state = N->getState(); + // Get the function declaration of the callee. + const GRState* state = C.getState(); SVal X = state->getSVal(CE->getCallee()); const FunctionDecl* FD = X.getAsFunctionDecl(); if (!FD) - return false; + return; + // Check if we called CFRetain/CFRelease. const IdentifierInfo *FuncII = FD->getIdentifier(); if (!(FuncII == Retain || FuncII == Release)) - return false; + return; + + // FIXME: The rest of this just checks that the argument is non-null. + // It should probably be refactored and combined with AttrNonNullChecker. + + // Get the argument's value. + const Expr *Arg = CE->getArg(0); + SVal ArgVal = state->getSVal(Arg); + DefinedSVal *DefArgVal = dyn_cast<DefinedSVal>(&ArgVal); + if (!DefArgVal) + return; + + // Get a NULL value. + ValueManager &ValMgr = C.getValueManager(); + DefinedSVal Zero = cast<DefinedSVal>(ValMgr.makeZeroVal(Arg->getType())); + + // Make an expression asserting that they're equal. + SValuator &SVator = ValMgr.getSValuator(); + DefinedOrUnknownSVal ArgIsNull = SVator.EvalEQ(state, Zero, *DefArgVal); + + // Are they equal? + const GRState *stateTrue, *stateFalse; + llvm::tie(stateTrue, stateFalse) = state->Assume(ArgIsNull); + + if (stateTrue && !stateFalse) { + ExplodedNode *N = C.GenerateSink(stateTrue); + if (!N) + return; - // Finally, check if the argument is NULL. - // FIXME: We should be able to bifurcate the state here, as a successful - // check will result in the value not being NULL afterwards. - // FIXME: Need a way to register vistors for the BugReporter. Would like - // to benefit from the same diagnostics that regular null dereference - // reporting has. - if (state->getStateManager().isEqual(state, CE->getArg(0), 0)) { if (!BT) BT = new APIMisuse("null passed to CFRetain/CFRelease"); @@ -475,19 +488,16 @@ bool AuditCFRetainRelease::Audit(ExplodedNode* N, GRStateManager&) { ? "Null pointer argument in call to CFRetain" : "Null pointer argument in call to CFRelease"; - RangedBugReport *report = new RangedBugReport(*BT, description, N); - report->addRange(CE->getArg(0)->getSourceRange()); - BR.EmitReport(report); - return true; - } - - return false; -} + EnhancedBugReport *report = new EnhancedBugReport(*BT, description, N); + report->addRange(Arg->getSourceRange()); + report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, Arg); + C.EmitReport(report); + return; + } -GRSimpleAPICheck* -clang::CreateAuditCFRetainRelease(ASTContext& Ctx, BugReporter& BR) { - return new AuditCFRetainRelease(Ctx, BR); + // From here on, we know the argument is non-null. + C.addTransition(stateFalse); } //===----------------------------------------------------------------------===// @@ -569,9 +579,10 @@ void clang::RegisterAppleChecks(GRExprEngine& Eng, const Decl &D) { Eng.AddCheck(CreateBasicObjCFoundationChecks(Ctx, BR), Stmt::ObjCMessageExprClass); Eng.AddCheck(CreateAuditCFNumberCreate(Ctx, BR), Stmt::CallExprClass); - Eng.AddCheck(CreateAuditCFRetainRelease(Ctx, BR), Stmt::CallExprClass); RegisterNSErrorChecks(BR, Eng, D); RegisterNSAutoreleasePoolChecks(Eng); + + Eng.registerCheck(new CFRetainReleaseChecker(Ctx)); Eng.registerCheck(new ClassReleaseChecker(Ctx)); } diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.h b/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.h index 679c6dc..8fb0570 100644 --- a/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.h +++ b/contrib/llvm/tools/clang/lib/Checker/BasicObjCFoundationChecks.h @@ -30,9 +30,6 @@ GRSimpleAPICheck *CreateBasicObjCFoundationChecks(ASTContext& Ctx, GRSimpleAPICheck *CreateAuditCFNumberCreate(ASTContext& Ctx, BugReporter& BR); -GRSimpleAPICheck *CreateAuditCFRetainRelease(ASTContext& Ctx, - BugReporter& BR); - void RegisterNSErrorChecks(BugReporter& BR, GRExprEngine &Eng, const Decl &D); void RegisterNSAutoreleasePoolChecks(GRExprEngine &Eng); diff --git a/contrib/llvm/tools/clang/lib/Checker/BasicStore.cpp b/contrib/llvm/tools/clang/lib/Checker/BasicStore.cpp index 5be5ca6..62c8d9c 100644 --- a/contrib/llvm/tools/clang/lib/Checker/BasicStore.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/BasicStore.cpp @@ -46,9 +46,14 @@ public: SVal Retrieve(Store store, Loc loc, QualType T = QualType()); - Store InvalidateRegion(Store store, const MemRegion *R, const Expr *E, + Store InvalidateRegion(Store store, const MemRegion *R, const Expr *E, unsigned Count, InvalidatedSymbols *IS); + Store InvalidateRegions(Store store, const MemRegion * const *Begin, + const MemRegion * const *End, const Expr *E, + unsigned Count, InvalidatedSymbols *IS, + bool invalidateGlobals); + Store scanForIvars(Stmt *B, const Decl* SelfDecl, const MemRegion *SelfRegion, Store St); @@ -72,9 +77,9 @@ public: /// RemoveDeadBindings - Scans a BasicStore of 'state' for dead values. /// It updatees the GRState object in place with the values removed. - const GRState *RemoveDeadBindings(GRState &state, Stmt* Loc, - const StackFrameContext *LCtx, - SymbolReaper& SymReaper, + const GRState *RemoveDeadBindings(GRState &state, + const StackFrameContext *LCtx, + SymbolReaper& SymReaper, llvm::SmallVectorImpl<const MemRegion*>& RegionRoots); void iterBindings(Store store, BindingsHandler& f); @@ -144,9 +149,30 @@ SVal BasicStoreManager::LazyRetrieve(Store store, const TypedRegion *R) { // Globals and parameters start with symbolic values. // Local variables initially are undefined. + + // Non-static globals may have had their values reset by InvalidateRegions. + const MemSpaceRegion *MS = VR->getMemorySpace(); + if (isa<NonStaticGlobalSpaceRegion>(MS)) { + BindingsTy B = GetBindings(store); + // FIXME: Copy-and-pasted from RegionStore.cpp. + if (BindingsTy::data_type *Val = B.lookup(MS)) { + if (SymbolRef parentSym = Val->getAsSymbol()) + return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R); + + if (Val->isZeroConstant()) + return ValMgr.makeZeroVal(T); + + if (Val->isUnknownOrUndef()) + return *Val; + + assert(0 && "Unknown default value."); + } + } + if (VR->hasGlobalsOrParametersStorage() || isa<UnknownSpaceRegion>(VR->getMemorySpace())) return ValMgr.getRegionValueSymbolVal(R); + return UndefinedVal(); } @@ -194,6 +220,14 @@ Store BasicStoreManager::Bind(Store store, Loc loc, SVal V) { return store; const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion(); + + // Special case: a default symbol assigned to the NonStaticGlobalsSpaceRegion + // that is used to derive other symbols. + if (isa<NonStaticGlobalSpaceRegion>(R)) { + BindingsTy B = GetBindings(store); + return VBFactory.Add(B, R, V).getRoot(); + } + ASTContext &C = StateMgr.getContext(); // Special case: handle store of pointer values (Loc) to pointers via @@ -251,7 +285,7 @@ Store BasicStoreManager::Remove(Store store, Loc loc) { } } -const GRState *BasicStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc, +const GRState *BasicStoreManager::RemoveDeadBindings(GRState &state, const StackFrameContext *LCtx, SymbolReaper& SymReaper, llvm::SmallVectorImpl<const MemRegion*>& RegionRoots) @@ -263,14 +297,14 @@ const GRState *BasicStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc, // Iterate over the variable bindings. for (BindingsTy::iterator I=B.begin(), E=B.end(); I!=E ; ++I) { if (const VarRegion *VR = dyn_cast<VarRegion>(I.getKey())) { - if (SymReaper.isLive(Loc, VR)) + if (SymReaper.isLive(VR)) RegionRoots.push_back(VR); else continue; } - else if (isa<ObjCIvarRegion>(I.getKey())) { + else if (isa<ObjCIvarRegion>(I.getKey()) || + isa<NonStaticGlobalSpaceRegion>(I.getKey())) RegionRoots.push_back(I.getKey()); - } else continue; @@ -292,7 +326,8 @@ const GRState *BasicStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc, SymReaper.markLive(SymR->getSymbol()); break; } - else if (isa<VarRegion>(MR) || isa<ObjCIvarRegion>(MR)) { + else if (isa<VarRegion>(MR) || isa<ObjCIvarRegion>(MR) || + isa<NonStaticGlobalSpaceRegion>(MR)) { if (Marked.count(MR)) break; @@ -475,7 +510,8 @@ void BasicStoreManager::iterBindings(Store store, BindingsHandler& f) { BindingsTy B = GetBindings(store); for (BindingsTy::iterator I=B.begin(), E=B.end(); I != E; ++I) - f.HandleBinding(*this, store, I.getKey(), I.getData()); + if (!f.HandleBinding(*this, store, I.getKey(), I.getData())) + return; } @@ -485,6 +521,49 @@ StoreManager::BindingsHandler::~BindingsHandler() {} // Binding invalidation. //===----------------------------------------------------------------------===// + +Store BasicStoreManager::InvalidateRegions(Store store, + const MemRegion * const *I, + const MemRegion * const *End, + const Expr *E, unsigned Count, + InvalidatedSymbols *IS, + bool invalidateGlobals) { + if (invalidateGlobals) { + BindingsTy B = GetBindings(store); + for (BindingsTy::iterator I=B.begin(), End=B.end(); I != End; ++I) { + const MemRegion *R = I.getKey(); + if (isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace())) + store = InvalidateRegion(store, R, E, Count, IS); + } + } + + for ( ; I != End ; ++I) { + const MemRegion *R = *I; + // Don't invalidate globals twice. + if (invalidateGlobals) { + if (isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace())) + continue; + } + store = InvalidateRegion(store, *I, E, Count, IS); + } + + // FIXME: This is copy-and-paste from RegionStore.cpp. + if (invalidateGlobals) { + // Bind the non-static globals memory space to a new symbol that we will + // use to derive the bindings for all non-static globals. + const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion(); + SVal V = + ValMgr.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, E, + /* symbol type, doesn't matter */ Ctx.IntTy, + Count); + + store = Bind(store, loc::MemRegionVal(GS), V); + } + + return store; +} + + Store BasicStoreManager::InvalidateRegion(Store store, const MemRegion *R, const Expr *E, diff --git a/contrib/llvm/tools/clang/lib/Checker/BugReporter.cpp b/contrib/llvm/tools/clang/lib/Checker/BugReporter.cpp index 3bcc03f..0422d80 100644 --- a/contrib/llvm/tools/clang/lib/Checker/BugReporter.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/BugReporter.cpp @@ -925,7 +925,7 @@ public: // statement (if it doesn't already exist). // FIXME: Should handle CXXTryStmt if analyser starts supporting C++. if (const CompoundStmt *CS = - PDB.getCodeDecl().getCompoundBody()) + dyn_cast_or_null<CompoundStmt>(PDB.getCodeDecl().getBody())) if (!CS->body_empty()) { SourceLocation Loc = (*CS->body_begin())->getLocStart(); rawAddEdge(PathDiagnosticLocation(Loc, PDB.getSourceManager())); @@ -1403,7 +1403,7 @@ MakeReportGraph(const ExplodedGraph* G, // Create a new (third!) graph with a single path. This is the graph // that will be returned to the caller. - ExplodedGraph *GNew = new ExplodedGraph(GTrim->getContext()); + ExplodedGraph *GNew = new ExplodedGraph(); // Sometimes the trimmed graph can contain a cycle. Perform a reverse BFS // to the root node, and then construct a new graph that contains only diff --git a/contrib/llvm/tools/clang/lib/Checker/BuiltinFunctionChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/BuiltinFunctionChecker.cpp index 9c8b516..057e474 100644 --- a/contrib/llvm/tools/clang/lib/Checker/BuiltinFunctionChecker.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/BuiltinFunctionChecker.cpp @@ -57,15 +57,24 @@ bool BuiltinFunctionChecker::EvalCallExpr(CheckerContext &C,const CallExpr *CE){ case Builtin::BI__builtin_alloca: { // FIXME: Refactor into StoreManager itself? MemRegionManager& RM = C.getStoreManager().getRegionManager(); - const MemRegion* R = + const AllocaRegion* R = RM.getAllocaRegion(CE, C.getNodeBuilder().getCurrentBlockCount(), C.getPredecessor()->getLocationContext()); // Set the extent of the region in bytes. This enables us to use the // SVal of the argument directly. If we save the extent in bits, we // cannot represent values like symbol*8. - SVal Extent = state->getSVal(*(CE->arg_begin())); - state = C.getStoreManager().setExtent(state, R, Extent); + DefinedOrUnknownSVal Size = + cast<DefinedOrUnknownSVal>(state->getSVal(*(CE->arg_begin()))); + + ValueManager& ValMgr = C.getValueManager(); + DefinedOrUnknownSVal Extent = R->getExtent(ValMgr); + + SValuator& SVator = ValMgr.getSValuator(); + DefinedOrUnknownSVal ExtentMatchesSizeArg = + SVator.EvalEQ(state, Extent, Size); + state = state->Assume(ExtentMatchesSizeArg, true); + C.GenerateNode(state->BindExpr(CE, loc::MemRegionVal(R))); return true; } diff --git a/contrib/llvm/tools/clang/lib/Checker/CFRefCount.cpp b/contrib/llvm/tools/clang/lib/Checker/CFRefCount.cpp index 42e6f67..3c74cd8 100644 --- a/contrib/llvm/tools/clang/lib/Checker/CFRefCount.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/CFRefCount.cpp @@ -228,111 +228,111 @@ public: ErrorOverAutorelease, ErrorReturnedNotOwned }; - + private: Kind kind; RetEffect::ObjKind okind; unsigned Cnt; unsigned ACnt; QualType T; - + RefVal(Kind k, RetEffect::ObjKind o, unsigned cnt, unsigned acnt, QualType t) : kind(k), okind(o), Cnt(cnt), ACnt(acnt), T(t) {} - + RefVal(Kind k, unsigned cnt = 0) : kind(k), okind(RetEffect::AnyObj), Cnt(cnt), ACnt(0) {} - + public: Kind getKind() const { return kind; } - + RetEffect::ObjKind getObjKind() const { return okind; } - + unsigned getCount() const { return Cnt; } unsigned getAutoreleaseCount() const { return ACnt; } unsigned getCombinedCounts() const { return Cnt + ACnt; } void clearCounts() { Cnt = 0; ACnt = 0; } void setCount(unsigned i) { Cnt = i; } void setAutoreleaseCount(unsigned i) { ACnt = i; } - + QualType getType() const { return T; } - + // Useful predicates. - + static bool isError(Kind k) { return k >= ERROR_START; } - + static bool isLeak(Kind k) { return k >= ERROR_LEAK_START; } - + bool isOwned() const { return getKind() == Owned; } - + bool isNotOwned() const { return getKind() == NotOwned; } - + bool isReturnedOwned() const { return getKind() == ReturnedOwned; } - + bool isReturnedNotOwned() const { return getKind() == ReturnedNotOwned; } - + bool isNonLeakError() const { Kind k = getKind(); return isError(k) && !isLeak(k); } - + static RefVal makeOwned(RetEffect::ObjKind o, QualType t, unsigned Count = 1) { return RefVal(Owned, o, Count, 0, t); } - + static RefVal makeNotOwned(RetEffect::ObjKind o, QualType t, unsigned Count = 0) { return RefVal(NotOwned, o, Count, 0, t); } - + // Comparison, profiling, and pretty-printing. - + bool operator==(const RefVal& X) const { return kind == X.kind && Cnt == X.Cnt && T == X.T && ACnt == X.ACnt; } - + RefVal operator-(size_t i) const { return RefVal(getKind(), getObjKind(), getCount() - i, getAutoreleaseCount(), getType()); } - + RefVal operator+(size_t i) const { return RefVal(getKind(), getObjKind(), getCount() + i, getAutoreleaseCount(), getType()); } - + RefVal operator^(Kind k) const { return RefVal(k, getObjKind(), getCount(), getAutoreleaseCount(), getType()); } - + RefVal autorelease() const { return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount()+1, getType()); } - + void Profile(llvm::FoldingSetNodeID& ID) const { ID.AddInteger((unsigned) kind); ID.AddInteger(Cnt); ID.AddInteger(ACnt); ID.Add(T); } - + void print(llvm::raw_ostream& Out) const; }; void RefVal::print(llvm::raw_ostream& Out) const { if (!T.isNull()) Out << "Tracked Type:" << T.getAsString() << '\n'; - + switch (getKind()) { default: assert(false); case Owned: { @@ -341,69 +341,69 @@ void RefVal::print(llvm::raw_ostream& Out) const { if (cnt) Out << " (+ " << cnt << ")"; break; } - + case NotOwned: { Out << "NotOwned"; unsigned cnt = getCount(); if (cnt) Out << " (+ " << cnt << ")"; break; } - + case ReturnedOwned: { Out << "ReturnedOwned"; unsigned cnt = getCount(); if (cnt) Out << " (+ " << cnt << ")"; break; } - + case ReturnedNotOwned: { Out << "ReturnedNotOwned"; unsigned cnt = getCount(); if (cnt) Out << " (+ " << cnt << ")"; break; } - + case Released: Out << "Released"; break; - + case ErrorDeallocGC: Out << "-dealloc (GC)"; break; - + case ErrorDeallocNotOwned: Out << "-dealloc (not-owned)"; break; - + case ErrorLeak: Out << "Leaked"; break; - + case ErrorLeakReturned: Out << "Leaked (Bad naming)"; break; - + case ErrorGCLeakReturned: Out << "Leaked (GC-ed at return)"; break; - + case ErrorUseAfterRelease: Out << "Use-After-Release [ERROR]"; break; - + case ErrorReleaseNotOwned: Out << "Release of Not-Owned [ERROR]"; break; - + case RefVal::ErrorOverAutorelease: Out << "Over autoreleased"; break; - + case RefVal::ErrorReturnedNotOwned: Out << "Non-owned object returned instead of owned"; break; } - + if (ACnt) { Out << " [ARC +" << ACnt << ']'; } @@ -897,7 +897,7 @@ public: RetainSummary *getInstanceMethodSummary(const ObjCMessageExpr *ME, const GRState *state, const LocationContext *LC); - + RetainSummary* getInstanceMethodSummary(const ObjCMessageExpr* ME, const ObjCInterfaceDecl* ID) { return getInstanceMethodSummary(ME->getSelector(), 0, @@ -927,7 +927,7 @@ public: break; } - return getClassMethodSummary(ME->getSelector(), + return getClassMethodSummary(ME->getSelector(), Class? Class->getIdentifier() : 0, Class, ME->getMethodDecl(), ME->getType()); @@ -1419,16 +1419,16 @@ RetainSummaryManager::getInstanceMethodSummary(const ObjCMessageExpr *ME, if (Receiver) { receiverV = state->getSValAsScalarOrLoc(Receiver); - + // FIXME: Eventually replace the use of state->get<RefBindings> with // a generic API for reasoning about the Objective-C types of symbolic // objects. if (SymbolRef Sym = receiverV.getAsLocSymbol()) if (const RefVal *T = state->get<RefBindings>(Sym)) - if (const ObjCObjectPointerType* PT = + if (const ObjCObjectPointerType* PT = T->getType()->getAs<ObjCObjectPointerType>()) ID = PT->getInterfaceDecl(); - + // FIXME: this is a hack. This may or may not be the actual method // that is called. if (!ID) { @@ -1444,7 +1444,7 @@ RetainSummaryManager::getInstanceMethodSummary(const ObjCMessageExpr *ME, // FIXME: The receiver could be a reference to a class, meaning that // we should use the class method. RetainSummary *Summ = getInstanceMethodSummary(ME, ID); - + // Special-case: are we sending a mesage to "self"? // This is a hack. When we have full-IP this should be removed. if (isa<ObjCMethodDecl>(LC->getDecl()) && Receiver) { @@ -1461,7 +1461,7 @@ RetainSummaryManager::getInstanceMethodSummary(const ObjCMessageExpr *ME, } } } - + return Summ ? Summ : getDefaultSummary(); } @@ -1849,7 +1849,7 @@ public: GRExprEngine& Engine, GRStmtNodeBuilder& Builder, ExplodedNode* Pred, - Stmt* S, const GRState* state, + const GRState* state, SymbolReaper& SymReaper); std::pair<ExplodedNode*, const GRState *> @@ -2619,7 +2619,7 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst, SymbolRef ErrorSym = 0; llvm::SmallVector<const MemRegion*, 10> RegionsToInvalidate; - + for (ExprIterator I = arg_beg; I != arg_end; ++I, ++idx) { SVal V = state->getSValAsScalarOrLoc(*I); SymbolRef Sym = V.getAsLocSymbol(); @@ -2659,7 +2659,7 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst, // approriately delegated to the respective StoreManagers while // still allowing us to do checker-specific logic (e.g., // invalidating reference counts), probably via callbacks. - if (ER->getElementType()->isIntegralType()) { + if (ER->getElementType()->isIntegralOrEnumerationType()) { const MemRegion *superReg = ER->getSuperRegion(); if (isa<VarRegion>(superReg) || isa<FieldRegion>(superReg) || isa<ObjCIvarRegion>(superReg)) @@ -2667,7 +2667,7 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst, } // FIXME: What about layers of ElementRegions? } - + // Mark this region for invalidation. We batch invalidate regions // below for efficiency. RegionsToInvalidate.push_back(R); @@ -2687,37 +2687,39 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst, goto tryAgain; } } - + // Block calls result in all captured values passed-via-reference to be // invalidated. if (const BlockDataRegion *BR = dyn_cast_or_null<BlockDataRegion>(Callee)) { RegionsToInvalidate.push_back(BR); } - + // Invalidate regions we designed for invalidation use the batch invalidation // API. - if (!RegionsToInvalidate.empty()) { - // FIXME: We can have collisions on the conjured symbol if the - // expression *I also creates conjured symbols. We probably want - // to identify conjured symbols by an expression pair: the enclosing - // expression (the context) and the expression itself. This should - // disambiguate conjured symbols. - unsigned Count = Builder.getCurrentBlockCount(); - StoreManager& StoreMgr = Eng.getStateManager().getStoreManager(); - - - StoreManager::InvalidatedSymbols IS; - Store store = state->getStore(); - store = StoreMgr.InvalidateRegions(store, RegionsToInvalidate.data(), - RegionsToInvalidate.data() + - RegionsToInvalidate.size(), - Ex, Count, &IS); - state = state->makeWithStore(store); - for (StoreManager::InvalidatedSymbols::iterator I = IS.begin(), - E = IS.end(); I!=E; ++I) { - // Remove any existing reference-count binding. - state = state->remove<RefBindings>(*I); - } + + // FIXME: We can have collisions on the conjured symbol if the + // expression *I also creates conjured symbols. We probably want + // to identify conjured symbols by an expression pair: the enclosing + // expression (the context) and the expression itself. This should + // disambiguate conjured symbols. + unsigned Count = Builder.getCurrentBlockCount(); + StoreManager& StoreMgr = Eng.getStateManager().getStoreManager(); + StoreManager::InvalidatedSymbols IS; + Store store = state->getStore(); + + // NOTE: Even if RegionsToInvalidate is empty, we must still invalidate + // global variables. + store = StoreMgr.InvalidateRegions(store, RegionsToInvalidate.data(), + RegionsToInvalidate.data() + + RegionsToInvalidate.size(), + Ex, Count, &IS, + /* invalidateGlobals = */ true); + + state = state->makeWithStore(store); + for (StoreManager::InvalidatedSymbols::iterator I = IS.begin(), + E = IS.end(); I!=E; ++I) { + // Remove any existing reference-count binding. + state = state->remove<RefBindings>(*I); } // Evaluate the effect on the message receiver. @@ -2862,7 +2864,7 @@ void CFRefCount::EvalCall(ExplodedNodeSet& Dst, ExplodedNode* Pred) { RetainSummary *Summ = 0; - + // FIXME: Better support for blocks. For now we stop tracking anything // that is passed to blocks. // FIXME: Need to handle variables that are "captured" by the block. @@ -3400,10 +3402,9 @@ void CFRefCount::EvalDeadSymbols(ExplodedNodeSet& Dst, GRExprEngine& Eng, GRStmtNodeBuilder& Builder, ExplodedNode* Pred, - Stmt* S, const GRState* state, SymbolReaper& SymReaper) { - + Stmt *S = Builder.getStmt(); RefBindings B = state->get<RefBindings>(); // Update counts from autorelease pools @@ -3501,7 +3502,7 @@ class RetainReleaseChecker public: RetainReleaseChecker(CFRefCount *tf) : TF(tf) {} static void* getTag() { static int x = 0; return &x; } - + void PostVisitBlockExpr(CheckerContext &C, const BlockExpr *BE); }; } // end anonymous namespace @@ -3509,29 +3510,29 @@ public: void RetainReleaseChecker::PostVisitBlockExpr(CheckerContext &C, const BlockExpr *BE) { - + // Scan the BlockDecRefExprs for any object the retain/release checker - // may be tracking. + // may be tracking. if (!BE->hasBlockDeclRefExprs()) return; - + const GRState *state = C.getState(); const BlockDataRegion *R = cast<BlockDataRegion>(state->getSVal(BE).getAsRegion()); - + BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(), E = R->referenced_vars_end(); - + if (I == E) return; - + // FIXME: For now we invalidate the tracking of all symbols passed to blocks // via captured variables, even though captured variables result in a copy // and in implicit increment/decrement of a retain count. llvm::SmallVector<const MemRegion*, 10> Regions; const LocationContext *LC = C.getPredecessor()->getLocationContext(); MemRegionManager &MemMgr = C.getValueManager().getRegionManager(); - + for ( ; I != E; ++I) { const VarRegion *VR = *I; if (VR->getSuperRegion() == R) { @@ -3539,7 +3540,7 @@ void RetainReleaseChecker::PostVisitBlockExpr(CheckerContext &C, } Regions.push_back(VR); } - + state = state->scanReachableSymbols<StopTrackingCallback>(Regions.data(), Regions.data() + Regions.size()).getState(); @@ -3552,28 +3553,28 @@ void RetainReleaseChecker::PostVisitBlockExpr(CheckerContext &C, void CFRefCount::RegisterChecks(GRExprEngine& Eng) { BugReporter &BR = Eng.getBugReporter(); - + useAfterRelease = new UseAfterRelease(this); BR.Register(useAfterRelease); - + releaseNotOwned = new BadRelease(this); BR.Register(releaseNotOwned); - + deallocGC = new DeallocGC(this); BR.Register(deallocGC); - + deallocNotOwned = new DeallocNotOwned(this); BR.Register(deallocNotOwned); - + overAutorelease = new OverAutorelease(this); BR.Register(overAutorelease); - + returnNotOwnedForOwned = new ReturnedNotOwnedForOwned(this); BR.Register(returnNotOwnedForOwned); - + // First register "return" leaks. const char* name = 0; - + if (isGCEnabled()) name = "Leak of returned object when using garbage collection"; else if (getLangOptions().getGCMode() == LangOptions::HybridGC) @@ -3583,12 +3584,12 @@ void CFRefCount::RegisterChecks(GRExprEngine& Eng) { assert(getLangOptions().getGCMode() == LangOptions::NonGC); name = "Leak of returned object"; } - + // Leaks should not be reported if they are post-dominated by a sink. leakAtReturn = new LeakAtReturn(this, name); leakAtReturn->setSuppressOnSink(true); BR.Register(leakAtReturn); - + // Second, register leaks within a function/method. if (isGCEnabled()) name = "Leak of object when using garbage collection"; @@ -3599,15 +3600,15 @@ void CFRefCount::RegisterChecks(GRExprEngine& Eng) { assert(getLangOptions().getGCMode() == LangOptions::NonGC); name = "Leak"; } - + // Leaks should not be reported if they are post-dominated by sinks. leakWithinFunction = new LeakWithinFunction(this, name); leakWithinFunction->setSuppressOnSink(true); BR.Register(leakWithinFunction); - + // Save the reference to the BugReporter. this->BR = &BR; - + // Register the RetainReleaseChecker with the GRExprEngine object. // Functionality in CFRefCount will be migrated to RetainReleaseChecker // over time. diff --git a/contrib/llvm/tools/clang/lib/Checker/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Checker/CMakeLists.txt index 9c6adc6..259346a 100644 --- a/contrib/llvm/tools/clang/lib/Checker/CMakeLists.txt +++ b/contrib/llvm/tools/clang/lib/Checker/CMakeLists.txt @@ -3,6 +3,7 @@ set(LLVM_NO_RTTI 1) add_clang_library(clangChecker AdjustedReturnValueChecker.cpp AggExprVisitor.cpp + AnalysisConsumer.cpp ArrayBoundChecker.cpp AttrNonNullChecker.cpp BasicConstraintManager.cpp @@ -12,63 +13,70 @@ add_clang_library(clangChecker BugReporter.cpp BugReporterVisitors.cpp BuiltinFunctionChecker.cpp + CFRefCount.cpp CallAndMessageChecker.cpp CallInliner.cpp CastSizeChecker.cpp CastToStructChecker.cpp - CFRefCount.cpp CheckDeadStores.cpp - Checker.cpp CheckObjCDealloc.cpp CheckObjCInstMethSignature.cpp CheckSecuritySyntaxOnly.cpp CheckSizeofPointer.cpp + Checker.cpp CocoaConventions.cpp + CStringChecker.cpp DereferenceChecker.cpp DivZeroChecker.cpp Environment.cpp ExplodedGraph.cpp FixedAddressChecker.cpp FlatStore.cpp + FrontendActions.cpp GRBlockCounter.cpp - GRCoreEngine.cpp GRCXXExprEngine.cpp + GRCoreEngine.cpp GRExprEngine.cpp GRExprEngineExperimentalChecks.cpp GRState.cpp + HTMLDiagnostics.cpp + IdempotentOperationChecker.cpp LLVMConventionsChecker.cpp MacOSXAPIChecker.cpp MallocChecker.cpp ManagerRegistry.cpp MemRegion.cpp - NoReturnFunctionChecker.cpp NSAutoreleasePoolChecker.cpp NSErrorChecker.cpp - ObjCUnusedIVarsChecker.cpp + NoReturnFunctionChecker.cpp OSAtomicChecker.cpp + ObjCUnusedIVarsChecker.cpp PathDiagnostic.cpp + PlistDiagnostics.cpp PointerArithChecker.cpp PointerSubChecker.cpp PthreadLockChecker.cpp RangeConstraintManager.cpp RegionStore.cpp ReturnPointerRangeChecker.cpp - ReturnStackAddressChecker.cpp ReturnUndefChecker.cpp + SVals.cpp + SValuator.cpp SimpleConstraintManager.cpp SimpleSValuator.cpp + StackAddrLeakChecker.cpp Store.cpp - SVals.cpp - SValuator.cpp + StreamChecker.cpp SymbolManager.cpp UndefBranchChecker.cpp UndefCapturedBlockVarChecker.cpp + UndefResultChecker.cpp UndefinedArraySubscriptChecker.cpp UndefinedAssignmentChecker.cpp - UndefResultChecker.cpp UnixAPIChecker.cpp - ValueManager.cpp VLASizeChecker.cpp + ValueManager.cpp ) -add_dependencies(clangChecker ClangStmtNodes) +add_dependencies(clangChecker ClangAttrClasses ClangAttrList ClangDeclNodes + ClangStmtNodes) diff --git a/contrib/llvm/tools/clang/lib/Checker/CStringChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/CStringChecker.cpp new file mode 100644 index 0000000..a92d409 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/Checker/CStringChecker.cpp @@ -0,0 +1,525 @@ +//= CStringChecker.h - Checks calls to C string functions ----------*- C++ -*-// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This defines CStringChecker, which is an assortment of checks on calls +// to functions in <string.h>. +// +//===----------------------------------------------------------------------===// + +#include "GRExprEngineExperimentalChecks.h" +#include "clang/Checker/BugReporter/BugType.h" +#include "clang/Checker/PathSensitive/CheckerVisitor.h" +#include "llvm/ADT/StringSwitch.h" + +using namespace clang; + +namespace { +class CStringChecker : public CheckerVisitor<CStringChecker> { + BugType *BT_Null, *BT_Bounds, *BT_Overlap; +public: + CStringChecker() + : BT_Null(0), BT_Bounds(0), BT_Overlap(0) {} + static void *getTag() { static int tag; return &tag; } + + bool EvalCallExpr(CheckerContext &C, const CallExpr *CE); + + typedef void (CStringChecker::*FnCheck)(CheckerContext &, const CallExpr *); + + void EvalMemcpy(CheckerContext &C, const CallExpr *CE); + void EvalMemmove(CheckerContext &C, const CallExpr *CE); + void EvalBcopy(CheckerContext &C, const CallExpr *CE); + void EvalCopyCommon(CheckerContext &C, const GRState *state, + const Expr *Size, const Expr *Source, const Expr *Dest, + bool Restricted = false); + + void EvalMemcmp(CheckerContext &C, const CallExpr *CE); + + // Utility methods + std::pair<const GRState*, const GRState*> + AssumeZero(CheckerContext &C, const GRState *state, SVal V, QualType Ty); + + const GRState *CheckNonNull(CheckerContext &C, const GRState *state, + const Expr *S, SVal l); + const GRState *CheckLocation(CheckerContext &C, const GRState *state, + const Expr *S, SVal l); + const GRState *CheckBufferAccess(CheckerContext &C, const GRState *state, + const Expr *Size, + const Expr *FirstBuf, + const Expr *SecondBuf = NULL); + const GRState *CheckOverlap(CheckerContext &C, const GRState *state, + const Expr *Size, const Expr *First, + const Expr *Second); + void EmitOverlapBug(CheckerContext &C, const GRState *state, + const Stmt *First, const Stmt *Second); +}; +} //end anonymous namespace + +void clang::RegisterCStringChecker(GRExprEngine &Eng) { + Eng.registerCheck(new CStringChecker()); +} + +//===----------------------------------------------------------------------===// +// Individual checks and utility methods. +//===----------------------------------------------------------------------===// + +std::pair<const GRState*, const GRState*> +CStringChecker::AssumeZero(CheckerContext &C, const GRState *state, SVal V, + QualType Ty) { + DefinedSVal *Val = dyn_cast<DefinedSVal>(&V); + if (!Val) + return std::pair<const GRState*, const GRState *>(state, state); + + ValueManager &ValMgr = C.getValueManager(); + SValuator &SV = ValMgr.getSValuator(); + + DefinedOrUnknownSVal Zero = ValMgr.makeZeroVal(Ty); + DefinedOrUnknownSVal ValIsZero = SV.EvalEQ(state, *Val, Zero); + + return state->Assume(ValIsZero); +} + +const GRState *CStringChecker::CheckNonNull(CheckerContext &C, + const GRState *state, + const Expr *S, SVal l) { + // If a previous check has failed, propagate the failure. + if (!state) + return NULL; + + const GRState *stateNull, *stateNonNull; + llvm::tie(stateNull, stateNonNull) = AssumeZero(C, state, l, S->getType()); + + if (stateNull && !stateNonNull) { + ExplodedNode *N = C.GenerateSink(stateNull); + if (!N) + return NULL; + + if (!BT_Null) + BT_Null = new BuiltinBug("API", + "Null pointer argument in call to byte string function"); + + // Generate a report for this bug. + BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Null); + EnhancedBugReport *report = new EnhancedBugReport(*BT, + BT->getDescription(), N); + + report->addRange(S->getSourceRange()); + report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, S); + C.EmitReport(report); + return NULL; + } + + // From here on, assume that the value is non-null. + assert(stateNonNull); + return stateNonNull; +} + +// FIXME: This was originally copied from ArrayBoundChecker.cpp. Refactor? +const GRState *CStringChecker::CheckLocation(CheckerContext &C, + const GRState *state, + const Expr *S, SVal l) { + // If a previous check has failed, propagate the failure. + if (!state) + return NULL; + + // Check for out of bound array element access. + const MemRegion *R = l.getAsRegion(); + if (!R) + return state; + + const ElementRegion *ER = dyn_cast<ElementRegion>(R); + if (!ER) + return state; + + assert(ER->getValueType(C.getASTContext()) == C.getASTContext().CharTy && + "CheckLocation should only be called with char* ElementRegions"); + + // Get the size of the array. + const SubRegion *Super = cast<SubRegion>(ER->getSuperRegion()); + ValueManager &ValMgr = C.getValueManager(); + SVal Extent = ValMgr.convertToArrayIndex(Super->getExtent(ValMgr)); + DefinedOrUnknownSVal Size = cast<DefinedOrUnknownSVal>(Extent); + + // Get the index of the accessed element. + DefinedOrUnknownSVal &Idx = cast<DefinedOrUnknownSVal>(ER->getIndex()); + + const GRState *StInBound = state->AssumeInBound(Idx, Size, true); + const GRState *StOutBound = state->AssumeInBound(Idx, Size, false); + if (StOutBound && !StInBound) { + ExplodedNode *N = C.GenerateSink(StOutBound); + if (!N) + return NULL; + + if (!BT_Bounds) + BT_Bounds = new BuiltinBug("Out-of-bound array access", + "Byte string function accesses out-of-bound array element " + "(buffer overflow)"); + + // FIXME: It would be nice to eventually make this diagnostic more clear, + // e.g., by referencing the original declaration or by saying *why* this + // reference is outside the range. + + // Generate a report for this bug. + BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Bounds); + RangedBugReport *report = new RangedBugReport(*BT, BT->getDescription(), N); + + report->addRange(S->getSourceRange()); + C.EmitReport(report); + return NULL; + } + + // Array bound check succeeded. From this point forward the array bound + // should always succeed. + return StInBound; +} + +const GRState *CStringChecker::CheckBufferAccess(CheckerContext &C, + const GRState *state, + const Expr *Size, + const Expr *FirstBuf, + const Expr *SecondBuf) { + // If a previous check has failed, propagate the failure. + if (!state) + return NULL; + + ValueManager &VM = C.getValueManager(); + SValuator &SV = VM.getSValuator(); + ASTContext &Ctx = C.getASTContext(); + + QualType SizeTy = Ctx.getSizeType(); + QualType PtrTy = Ctx.getPointerType(Ctx.CharTy); + + // Check that the first buffer is non-null. + SVal BufVal = state->getSVal(FirstBuf); + state = CheckNonNull(C, state, FirstBuf, BufVal); + if (!state) + return NULL; + + // Get the access length and make sure it is known. + SVal LengthVal = state->getSVal(Size); + NonLoc *Length = dyn_cast<NonLoc>(&LengthVal); + if (!Length) + return state; + + // Compute the offset of the last element to be accessed: size-1. + NonLoc One = cast<NonLoc>(VM.makeIntVal(1, SizeTy)); + NonLoc LastOffset = cast<NonLoc>(SV.EvalBinOpNN(state, BinaryOperator::Sub, + *Length, One, SizeTy)); + + // Check that the first buffer is sufficently long. + Loc BufStart = cast<Loc>(SV.EvalCast(BufVal, PtrTy, FirstBuf->getType())); + SVal BufEnd + = SV.EvalBinOpLN(state, BinaryOperator::Add, BufStart, LastOffset, PtrTy); + state = CheckLocation(C, state, FirstBuf, BufEnd); + + // If the buffer isn't large enough, abort. + if (!state) + return NULL; + + // If there's a second buffer, check it as well. + if (SecondBuf) { + BufVal = state->getSVal(SecondBuf); + state = CheckNonNull(C, state, SecondBuf, BufVal); + if (!state) + return NULL; + + BufStart = cast<Loc>(SV.EvalCast(BufVal, PtrTy, SecondBuf->getType())); + BufEnd + = SV.EvalBinOpLN(state, BinaryOperator::Add, BufStart, LastOffset, PtrTy); + state = CheckLocation(C, state, SecondBuf, BufEnd); + } + + // Large enough or not, return this state! + return state; +} + +const GRState *CStringChecker::CheckOverlap(CheckerContext &C, + const GRState *state, + const Expr *Size, + const Expr *First, + const Expr *Second) { + // Do a simple check for overlap: if the two arguments are from the same + // buffer, see if the end of the first is greater than the start of the second + // or vice versa. + + // If a previous check has failed, propagate the failure. + if (!state) + return NULL; + + ValueManager &VM = state->getStateManager().getValueManager(); + SValuator &SV = VM.getSValuator(); + ASTContext &Ctx = VM.getContext(); + const GRState *stateTrue, *stateFalse; + + // Get the buffer values and make sure they're known locations. + SVal FirstVal = state->getSVal(First); + SVal SecondVal = state->getSVal(Second); + + Loc *FirstLoc = dyn_cast<Loc>(&FirstVal); + if (!FirstLoc) + return state; + + Loc *SecondLoc = dyn_cast<Loc>(&SecondVal); + if (!SecondLoc) + return state; + + // Are the two values the same? + DefinedOrUnknownSVal EqualTest = SV.EvalEQ(state, *FirstLoc, *SecondLoc); + llvm::tie(stateTrue, stateFalse) = state->Assume(EqualTest); + + if (stateTrue && !stateFalse) { + // If the values are known to be equal, that's automatically an overlap. + EmitOverlapBug(C, stateTrue, First, Second); + return NULL; + } + + // Assume the two expressions are not equal. + assert(stateFalse); + state = stateFalse; + + // Which value comes first? + QualType CmpTy = Ctx.IntTy; + SVal Reverse = SV.EvalBinOpLL(state, BinaryOperator::GT, + *FirstLoc, *SecondLoc, CmpTy); + DefinedOrUnknownSVal *ReverseTest = dyn_cast<DefinedOrUnknownSVal>(&Reverse); + if (!ReverseTest) + return state; + + llvm::tie(stateTrue, stateFalse) = state->Assume(*ReverseTest); + + if (stateTrue) { + if (stateFalse) { + // If we don't know which one comes first, we can't perform this test. + return state; + } else { + // Switch the values so that FirstVal is before SecondVal. + Loc *tmpLoc = FirstLoc; + FirstLoc = SecondLoc; + SecondLoc = tmpLoc; + + // Switch the Exprs as well, so that they still correspond. + const Expr *tmpExpr = First; + First = Second; + Second = tmpExpr; + } + } + + // Get the length, and make sure it too is known. + SVal LengthVal = state->getSVal(Size); + NonLoc *Length = dyn_cast<NonLoc>(&LengthVal); + if (!Length) + return state; + + // Convert the first buffer's start address to char*. + // Bail out if the cast fails. + QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy); + SVal FirstStart = SV.EvalCast(*FirstLoc, CharPtrTy, First->getType()); + Loc *FirstStartLoc = dyn_cast<Loc>(&FirstStart); + if (!FirstStartLoc) + return state; + + // Compute the end of the first buffer. Bail out if THAT fails. + SVal FirstEnd = SV.EvalBinOpLN(state, BinaryOperator::Add, + *FirstStartLoc, *Length, CharPtrTy); + Loc *FirstEndLoc = dyn_cast<Loc>(&FirstEnd); + if (!FirstEndLoc) + return state; + + // Is the end of the first buffer past the start of the second buffer? + SVal Overlap = SV.EvalBinOpLL(state, BinaryOperator::GT, + *FirstEndLoc, *SecondLoc, CmpTy); + DefinedOrUnknownSVal *OverlapTest = dyn_cast<DefinedOrUnknownSVal>(&Overlap); + if (!OverlapTest) + return state; + + llvm::tie(stateTrue, stateFalse) = state->Assume(*OverlapTest); + + if (stateTrue && !stateFalse) { + // Overlap! + EmitOverlapBug(C, stateTrue, First, Second); + return NULL; + } + + // Assume the two expressions don't overlap. + assert(stateFalse); + return stateFalse; +} + +void CStringChecker::EmitOverlapBug(CheckerContext &C, const GRState *state, + const Stmt *First, const Stmt *Second) { + ExplodedNode *N = C.GenerateSink(state); + if (!N) + return; + + if (!BT_Overlap) + BT_Overlap = new BugType("Unix API", "Improper arguments"); + + // Generate a report for this bug. + RangedBugReport *report = + new RangedBugReport(*BT_Overlap, + "Arguments must not be overlapping buffers", N); + report->addRange(First->getSourceRange()); + report->addRange(Second->getSourceRange()); + + C.EmitReport(report); +} + +//===----------------------------------------------------------------------===// +// Evaluation of individual function calls. +//===----------------------------------------------------------------------===// + +void CStringChecker::EvalCopyCommon(CheckerContext &C, const GRState *state, + const Expr *Size, const Expr *Dest, + const Expr *Source, bool Restricted) { + // See if the size argument is zero. + SVal SizeVal = state->getSVal(Size); + QualType SizeTy = Size->getType(); + + const GRState *StZeroSize, *StNonZeroSize; + llvm::tie(StZeroSize, StNonZeroSize) = AssumeZero(C, state, SizeVal, SizeTy); + + // If the size is zero, there won't be any actual memory access. + if (StZeroSize) + C.addTransition(StZeroSize); + + // If the size can be nonzero, we have to check the other arguments. + if (StNonZeroSize) { + state = StNonZeroSize; + state = CheckBufferAccess(C, state, Size, Dest, Source); + if (Restricted) + state = CheckOverlap(C, state, Size, Dest, Source); + if (state) + C.addTransition(state); + } +} + + +void CStringChecker::EvalMemcpy(CheckerContext &C, const CallExpr *CE) { + // void *memcpy(void *restrict dst, const void *restrict src, size_t n); + // The return value is the address of the destination buffer. + const Expr *Dest = CE->getArg(0); + const GRState *state = C.getState(); + state = state->BindExpr(CE, state->getSVal(Dest)); + EvalCopyCommon(C, state, CE->getArg(2), Dest, CE->getArg(1), true); +} + +void CStringChecker::EvalMemmove(CheckerContext &C, const CallExpr *CE) { + // void *memmove(void *dst, const void *src, size_t n); + // The return value is the address of the destination buffer. + const Expr *Dest = CE->getArg(0); + const GRState *state = C.getState(); + state = state->BindExpr(CE, state->getSVal(Dest)); + EvalCopyCommon(C, state, CE->getArg(2), Dest, CE->getArg(1)); +} + +void CStringChecker::EvalBcopy(CheckerContext &C, const CallExpr *CE) { + // void bcopy(const void *src, void *dst, size_t n); + EvalCopyCommon(C, C.getState(), CE->getArg(2), CE->getArg(1), CE->getArg(0)); +} + +void CStringChecker::EvalMemcmp(CheckerContext &C, const CallExpr *CE) { + // int memcmp(const void *s1, const void *s2, size_t n); + const Expr *Left = CE->getArg(0); + const Expr *Right = CE->getArg(1); + const Expr *Size = CE->getArg(2); + + const GRState *state = C.getState(); + ValueManager &ValMgr = C.getValueManager(); + SValuator &SV = ValMgr.getSValuator(); + + // See if the size argument is zero. + SVal SizeVal = state->getSVal(Size); + QualType SizeTy = Size->getType(); + + const GRState *StZeroSize, *StNonZeroSize; + llvm::tie(StZeroSize, StNonZeroSize) = AssumeZero(C, state, SizeVal, SizeTy); + + // If the size can be zero, the result will be 0 in that case, and we don't + // have to check either of the buffers. + if (StZeroSize) { + state = StZeroSize; + state = state->BindExpr(CE, ValMgr.makeZeroVal(CE->getType())); + C.addTransition(state); + } + + // If the size can be nonzero, we have to check the other arguments. + if (StNonZeroSize) { + state = StNonZeroSize; + + // If we know the two buffers are the same, we know the result is 0. + // First, get the two buffers' addresses. Another checker will have already + // made sure they're not undefined. + DefinedOrUnknownSVal LV = cast<DefinedOrUnknownSVal>(state->getSVal(Left)); + DefinedOrUnknownSVal RV = cast<DefinedOrUnknownSVal>(state->getSVal(Right)); + + // See if they are the same. + DefinedOrUnknownSVal SameBuf = SV.EvalEQ(state, LV, RV); + const GRState *StSameBuf, *StNotSameBuf; + llvm::tie(StSameBuf, StNotSameBuf) = state->Assume(SameBuf); + + // If the two arguments might be the same buffer, we know the result is zero, + // and we only need to check one size. + if (StSameBuf) { + state = StSameBuf; + state = CheckBufferAccess(C, state, Size, Left); + if (state) { + state = StSameBuf->BindExpr(CE, ValMgr.makeZeroVal(CE->getType())); + C.addTransition(state); + } + } + + // If the two arguments might be different buffers, we have to check the + // size of both of them. + if (StNotSameBuf) { + state = StNotSameBuf; + state = CheckBufferAccess(C, state, Size, Left, Right); + if (state) { + // The return value is the comparison result, which we don't know. + unsigned Count = C.getNodeBuilder().getCurrentBlockCount(); + SVal CmpV = ValMgr.getConjuredSymbolVal(NULL, CE, CE->getType(), Count); + state = state->BindExpr(CE, CmpV); + C.addTransition(state); + } + } + } +} + +//===----------------------------------------------------------------------===// +// The driver method. +//===----------------------------------------------------------------------===// + +bool CStringChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) { + // Get the callee. All the functions we care about are C functions + // with simple identifiers. + const GRState *state = C.getState(); + const Expr *Callee = CE->getCallee(); + const FunctionDecl *FD = state->getSVal(Callee).getAsFunctionDecl(); + + if (!FD) + return false; + + // Get the name of the callee. If it's a builtin, strip off the prefix. + llvm::StringRef Name = FD->getName(); + if (Name.startswith("__builtin_")) + Name = Name.substr(10); + + FnCheck EvalFunction = llvm::StringSwitch<FnCheck>(Name) + .Cases("memcpy", "__memcpy_chk", &CStringChecker::EvalMemcpy) + .Cases("memcmp", "bcmp", &CStringChecker::EvalMemcmp) + .Cases("memmove", "__memmove_chk", &CStringChecker::EvalMemmove) + .Case("bcopy", &CStringChecker::EvalBcopy) + .Default(NULL); + + // If the callee isn't a string function, let another checker handle it. + if (!EvalFunction) + return false; + + // Check and evaluate the call. + (this->*EvalFunction)(C, CE); + return true; +} diff --git a/contrib/llvm/tools/clang/lib/Checker/CallInliner.cpp b/contrib/llvm/tools/clang/lib/Checker/CallInliner.cpp index 88e1a05..c47e06c 100644 --- a/contrib/llvm/tools/clang/lib/Checker/CallInliner.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/CallInliner.cpp @@ -42,7 +42,7 @@ bool CallInliner::EvalCallExpr(CheckerContext &C, const CallExpr *CE) { if (!FD) return false; - if (!FD->getBody(FD)) + if (!FD->hasBody(FD)) return false; // Now we have the definition of the callee, create a CallEnter node. diff --git a/contrib/llvm/tools/clang/lib/Checker/CastSizeChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/CastSizeChecker.cpp index 754d775..a502c10 100644 --- a/contrib/llvm/tools/clang/lib/Checker/CastSizeChecker.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/CastSizeChecker.cpp @@ -44,7 +44,8 @@ void CastSizeChecker::PreVisitCastExpr(CheckerContext &C, const CastExpr *CE) { QualType ToPointeeTy = ToPTy->getPointeeType(); - const MemRegion *R = C.getState()->getSVal(E).getAsRegion(); + const GRState *state = C.getState(); + const MemRegion *R = state->getSVal(E).getAsRegion(); if (R == 0) return; @@ -52,17 +53,21 @@ void CastSizeChecker::PreVisitCastExpr(CheckerContext &C, const CastExpr *CE) { if (SR == 0) return; - llvm::Optional<SVal> V = - C.getEngine().getStoreManager().getExtent(C.getState(), SR); - if (!V) - return; + ValueManager &ValMgr = C.getValueManager(); + SVal Extent = SR->getExtent(ValMgr); - const nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(V); - if (!CI) + SValuator &SVator = ValMgr.getSValuator(); + const llvm::APSInt *ExtentInt = SVator.getKnownValue(state, Extent); + if (!ExtentInt) return; - CharUnits RegionSize = CharUnits::fromQuantity(CI->getValue().getSExtValue()); + CharUnits RegionSize = CharUnits::fromQuantity(ExtentInt->getSExtValue()); CharUnits TypeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy); + + // Ignore void, and a few other un-sizeable types. + if (TypeSize.isZero()) + return; + if (RegionSize % TypeSize != 0) { if (ExplodedNode *N = C.GenerateSink()) { if (!BT) diff --git a/contrib/llvm/tools/clang/lib/Checker/CheckSecuritySyntaxOnly.cpp b/contrib/llvm/tools/clang/lib/Checker/CheckSecuritySyntaxOnly.cpp index 74e12b1..af85c2f 100644 --- a/contrib/llvm/tools/clang/lib/Checker/CheckSecuritySyntaxOnly.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/CheckSecuritySyntaxOnly.cpp @@ -191,8 +191,8 @@ void WalkAST::CheckLoopConditionForFloat(const ForStmt *FS) { const DeclRefExpr *drRHS = dyn_cast<DeclRefExpr>(B->getRHS()->IgnoreParens()); // Does at least one of the variables have a floating point type? - drLHS = drLHS && drLHS->getType()->isFloatingType() ? drLHS : NULL; - drRHS = drRHS && drRHS->getType()->isFloatingType() ? drRHS : NULL; + drLHS = drLHS && drLHS->getType()->isRealFloatingType() ? drLHS : NULL; + drRHS = drRHS && drRHS->getType()->isRealFloatingType() ? drRHS : NULL; if (!drLHS && !drRHS) return; diff --git a/contrib/llvm/tools/clang/lib/Checker/Environment.cpp b/contrib/llvm/tools/clang/lib/Checker/Environment.cpp index addfc21..48152ce 100644 --- a/contrib/llvm/tools/clang/lib/Checker/Environment.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/Environment.cpp @@ -125,7 +125,7 @@ static bool isBlockExprInCallers(const Stmt *E, const LocationContext *LC) { // - Mark the region in DRoots if the binding is a loc::MemRegionVal. Environment -EnvironmentManager::RemoveDeadBindings(Environment Env, const Stmt *S, +EnvironmentManager::RemoveDeadBindings(Environment Env, SymbolReaper &SymReaper, const GRState *ST, llvm::SmallVectorImpl<const MemRegion*> &DRoots) { @@ -163,7 +163,7 @@ EnvironmentManager::RemoveDeadBindings(Environment Env, const Stmt *S, if (!C.isBlkExpr(BlkExpr)) continue; - if (SymReaper.isLive(S, BlkExpr)) { + if (SymReaper.isLive(BlkExpr)) { // Copy the binding to the new map. NewEnv.ExprBindings = F.Add(NewEnv.ExprBindings, BlkExpr, X); diff --git a/contrib/llvm/tools/clang/lib/Checker/FlatStore.cpp b/contrib/llvm/tools/clang/lib/Checker/FlatStore.cpp index 7f1c579..64575b3c9 100644 --- a/contrib/llvm/tools/clang/lib/Checker/FlatStore.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/FlatStore.cpp @@ -44,7 +44,7 @@ public: } SVal ArrayToPointer(Loc Array); - const GRState *RemoveDeadBindings(GRState &state, Stmt* Loc, + const GRState *RemoveDeadBindings(GRState &state, const StackFrameContext *LCtx, SymbolReaper& SymReaper, llvm::SmallVectorImpl<const MemRegion*>& RegionRoots){ @@ -59,6 +59,11 @@ public: Store InvalidateRegion(Store store, const MemRegion *R, const Expr *E, unsigned Count, InvalidatedSymbols *IS); + + Store InvalidateRegions(Store store, const MemRegion * const *I, + const MemRegion * const *E, const Expr *Ex, + unsigned Count, InvalidatedSymbols *IS, + bool invalidateGlobals); void print(Store store, llvm::raw_ostream& Out, const char* nl, const char *sep); @@ -141,9 +146,20 @@ Store FlatStoreManager::BindDeclWithNoInit(Store store, const VarRegion *VR) { return store; } +Store FlatStoreManager::InvalidateRegions(Store store, + const MemRegion * const *I, + const MemRegion * const *E, + const Expr *Ex, unsigned Count, + InvalidatedSymbols *IS, + bool invalidateGlobals) { + assert(false && "Not implemented"); + return store; +} + Store FlatStoreManager::InvalidateRegion(Store store, const MemRegion *R, const Expr *E, unsigned Count, InvalidatedSymbols *IS) { + assert(false && "Not implemented"); return store; } diff --git a/contrib/llvm/tools/clang/lib/Checker/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Checker/FrontendActions.cpp new file mode 100644 index 0000000..d9a54a0 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/Checker/FrontendActions.cpp @@ -0,0 +1,21 @@ +//===--- FrontendActions.cpp ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "clang/Checker/FrontendActions.h" +#include "clang/Frontend/CompilerInstance.h" +#include "clang/Checker/AnalysisConsumer.h" +using namespace clang; + +ASTConsumer *AnalysisAction::CreateASTConsumer(CompilerInstance &CI, + llvm::StringRef InFile) { + return CreateAnalysisConsumer(CI.getPreprocessor(), + CI.getFrontendOpts().OutputFile, + CI.getAnalyzerOpts()); +} + diff --git a/contrib/llvm/tools/clang/lib/Checker/GRCoreEngine.cpp b/contrib/llvm/tools/clang/lib/Checker/GRCoreEngine.cpp index 23a87d3..a816186 100644 --- a/contrib/llvm/tools/clang/lib/Checker/GRCoreEngine.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/GRCoreEngine.cpp @@ -221,6 +221,7 @@ bool GRCoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps) { } } + SubEngine.ProcessEndWorklist(WList->hasWork() || BlockAborted); return WList->hasWork(); } @@ -257,7 +258,10 @@ void GRCoreEngine::HandleBlockEdge(const BlockEdge& L, ExplodedNode* Pred) { // FIXME: Should we allow ProcessBlockEntrance to also manipulate state? if (ProcessBlockEntrance(Blk, Pred, WList->getBlockCounter())) - GenerateNode(BlockEntrance(Blk, Pred->getLocationContext()), Pred->State, Pred); + GenerateNode(BlockEntrance(Blk, Pred->getLocationContext()), + Pred->State, Pred); + else + BlockAborted = true; } void GRCoreEngine::HandleBlockEntrance(const BlockEntrance& L, diff --git a/contrib/llvm/tools/clang/lib/Checker/GRExprEngine.cpp b/contrib/llvm/tools/clang/lib/Checker/GRExprEngine.cpp index 2417658..07fee9d 100644 --- a/contrib/llvm/tools/clang/lib/Checker/GRExprEngine.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/GRExprEngine.cpp @@ -172,15 +172,39 @@ public: void GRExprEngine::CheckerVisit(Stmt *S, ExplodedNodeSet &Dst, ExplodedNodeSet &Src, bool isPrevisit) { - if (Checkers.empty()) { + // Determine if we already have a cached 'CheckersOrdered' vector + // specifically tailored for the provided <Stmt kind, isPrevisit>. This + // can reduce the number of checkers actually called. + CheckersOrdered *CO = &Checkers; + llvm::OwningPtr<CheckersOrdered> NewCO; + + const std::pair<unsigned, unsigned> &K = + std::make_pair((unsigned)S->getStmtClass(), isPrevisit ? 1U : 0U); + + CheckersOrdered *& CO_Ref = COCache[K]; + + if (!CO_Ref) { + // If we have no previously cached CheckersOrdered vector for this + // statement kind, then create one. + NewCO.reset(new CheckersOrdered); + } + else { + // Use the already cached set. + CO = CO_Ref; + } + + if (CO->empty()) { + // If there are no checkers, return early without doing any + // more work. Dst.insert(Src); return; } ExplodedNodeSet Tmp; ExplodedNodeSet *PrevSet = &Src; + unsigned checkersEvaluated = 0; - for (CheckersOrdered::iterator I=Checkers.begin(),E=Checkers.end(); I!=E;++I){ + for (CheckersOrdered::iterator I=CO->begin(), E=CO->end(); I!=E; ++I){ ExplodedNodeSet *CurrSet = 0; if (I+1 == E) CurrSet = &Dst; @@ -190,12 +214,30 @@ void GRExprEngine::CheckerVisit(Stmt *S, ExplodedNodeSet &Dst, } void *tag = I->first; Checker *checker = I->second; + bool respondsToCallback = true; for (ExplodedNodeSet::iterator NI = PrevSet->begin(), NE = PrevSet->end(); - NI != NE; ++NI) - checker->GR_Visit(*CurrSet, *Builder, *this, S, *NI, tag, isPrevisit); + NI != NE; ++NI) { + + checker->GR_Visit(*CurrSet, *Builder, *this, S, *NI, tag, isPrevisit, + respondsToCallback); + + } + PrevSet = CurrSet; + + if (NewCO.get()) { + ++checkersEvaluated; + if (respondsToCallback) + NewCO->push_back(*I); + } } + + // If we built NewCO, check if we called all the checkers. This is important + // so that we know that we accurately determined the entire set of checkers + // that responds to this callback. + if (NewCO.get() && checkersEvaluated == Checkers.size()) + CO_Ref = NewCO.take(); // Don't autotransition. The CheckerContext objects should do this // automatically. @@ -312,18 +354,20 @@ static void RegisterInternalChecks(GRExprEngine &Eng) { // automatically. Note that the check itself is owned by the GRExprEngine // object. RegisterAdjustedReturnValueChecker(Eng); - RegisterAttrNonNullChecker(Eng); + // CallAndMessageChecker should be registered before AttrNonNullChecker, + // where we assume arguments are not undefined. RegisterCallAndMessageChecker(Eng); + RegisterAttrNonNullChecker(Eng); RegisterDereferenceChecker(Eng); RegisterVLASizeChecker(Eng); RegisterDivZeroChecker(Eng); - RegisterReturnStackAddressChecker(Eng); RegisterReturnUndefChecker(Eng); RegisterUndefinedArraySubscriptChecker(Eng); RegisterUndefinedAssignmentChecker(Eng); RegisterUndefBranchChecker(Eng); RegisterUndefCapturedBlockVarChecker(Eng); RegisterUndefResultChecker(Eng); + RegisterStackAddrLeakChecker(Eng); // This is not a checker yet. RegisterNoReturnFunctionChecker(Eng); @@ -335,10 +379,10 @@ static void RegisterInternalChecks(GRExprEngine &Eng) { GRExprEngine::GRExprEngine(AnalysisManager &mgr, GRTransferFuncs *tf) : AMgr(mgr), - CoreEngine(mgr.getASTContext(), *this), + CoreEngine(*this), G(CoreEngine.getGraph()), Builder(NULL), - StateMgr(G.getContext(), mgr.getStoreManagerCreator(), + StateMgr(getContext(), mgr.getStoreManagerCreator(), mgr.getConstraintManagerCreator(), G.getAllocator(), *this), SymMgr(StateMgr.getSymbolManager()), @@ -346,7 +390,7 @@ GRExprEngine::GRExprEngine(AnalysisManager &mgr, GRTransferFuncs *tf) SVator(ValMgr.getSValuator()), CurrentStmt(NULL), NSExceptionII(NULL), NSExceptionInstanceRaiseSelectors(NULL), - RaiseSel(GetNullarySelector("raise", G.getContext())), + RaiseSel(GetNullarySelector("raise", getContext())), BR(mgr, *this), TF(tf) { // Register internal checks. RegisterInternalChecks(*this); @@ -359,8 +403,14 @@ GRExprEngine::GRExprEngine(AnalysisManager &mgr, GRTransferFuncs *tf) GRExprEngine::~GRExprEngine() { BR.FlushReports(); delete [] NSExceptionInstanceRaiseSelectors; + + // Delete the set of checkers. for (CheckersOrdered::iterator I=Checkers.begin(), E=Checkers.end(); I!=E;++I) delete I->second; + + for (CheckersOrderedCache::iterator I=COCache.begin(), E=COCache.end(); + I!=E;++I) + delete I->second; } //===----------------------------------------------------------------------===// @@ -464,6 +514,13 @@ const GRState *GRExprEngine::ProcessAssume(const GRState *state, SVal cond, return TF->EvalAssume(state, cond, assumption); } +void GRExprEngine::ProcessEndWorklist(bool hasWorkRemaining) { + for (CheckersOrdered::iterator I = Checkers.begin(), E = Checkers.end(); + I != E; ++I) { + I->second->VisitEndAnalysis(G, BR, hasWorkRemaining); + } +} + void GRExprEngine::ProcessStmt(CFGElement CE, GRStmtNodeBuilder& builder) { CurrentStmt = CE.getStmt(); PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(), @@ -480,10 +537,10 @@ void GRExprEngine::ProcessStmt(CFGElement CE, GRStmtNodeBuilder& builder) { // Create the cleaned state. const ExplodedNode *BasePred = Builder->getBasePredecessor(); - SymbolReaper SymReaper(BasePred->getLocationContext(), SymMgr); + SymbolReaper SymReaper(BasePred->getLocationContext(), CurrentStmt, SymMgr); CleanedState = AMgr.shouldPurgeDead() - ? StateMgr.RemoveDeadBindings(EntryNode->getState(), CurrentStmt, + ? StateMgr.RemoveDeadBindings(EntryNode->getState(), BasePred->getLocationContext()->getCurrentStackFrame(), SymReaper) : EntryNode->getState(); @@ -502,7 +559,7 @@ void GRExprEngine::ProcessStmt(CFGElement CE, GRStmtNodeBuilder& builder) { // FIXME: This should soon be removed. ExplodedNodeSet Tmp2; - getTF().EvalDeadSymbols(Tmp2, *this, *Builder, EntryNode, CurrentStmt, + getTF().EvalDeadSymbols(Tmp2, *this, *Builder, EntryNode, CleanedState, SymReaper); if (Checkers.empty()) @@ -598,7 +655,7 @@ void GRExprEngine::Visit(Stmt* S, ExplodedNode* Pred, ExplodedNodeSet& Dst) { case Stmt::CXXTryStmtClass: case Stmt::CXXTypeidExprClass: case Stmt::CXXUnresolvedConstructExprClass: - case Stmt::CXXZeroInitValueExprClass: + case Stmt::CXXScalarValueInitExprClass: case Stmt::DependentScopeDeclRefExprClass: case Stmt::UnaryTypeTraitExprClass: case Stmt::UnresolvedLookupExprClass: @@ -627,10 +684,14 @@ void GRExprEngine::Visit(Stmt* S, ExplodedNode* Pred, ExplodedNodeSet& Dst) { llvm_unreachable("Stmt should not be in analyzer evaluation loop"); break; + case Stmt::GNUNullExprClass: { + MakeNode(Dst, S, Pred, GetState(Pred)->BindExpr(S, ValMgr.makeNull())); + break; + } + // Cases not handled yet; but will handle some day. case Stmt::DesignatedInitExprClass: case Stmt::ExtVectorElementExprClass: - case Stmt::GNUNullExprClass: case Stmt::ImaginaryLiteralClass: case Stmt::ImplicitValueInitExprClass: case Stmt::ObjCAtCatchStmtClass: @@ -901,7 +962,7 @@ void GRExprEngine::VisitLValue(Expr* Ex, ExplodedNode* Pred, // C++ stuff we don't support yet. case Stmt::CXXExprWithTemporariesClass: case Stmt::CXXMemberCallExprClass: - case Stmt::CXXZeroInitValueExprClass: { + case Stmt::CXXScalarValueInitExprClass: { SaveAndRestore<bool> OldSink(Builder->BuildSinks); Builder->BuildSinks = true; MakeNode(Dst, Ex, Pred, GetState(Pred)); @@ -998,16 +1059,21 @@ void GRExprEngine::VisitLValue(Expr* Ex, ExplodedNode* Pred, CreateCXXTemporaryObject(Ex, Pred, Dst); return; - default: + default: { // Arbitrary subexpressions can return aggregate temporaries that // can be used in a lvalue context. We need to enhance our support // of such temporaries in both the environment and the store, so right // now we just do a regular visit. - assert ((Ex->getType()->isAggregateType()) && - "Other kinds of expressions with non-aggregate/union types do" - " not have lvalues."); + + // NOTE: Do not use 'isAggregateType()' here as CXXRecordDecls that + // are non-pod are not aggregates. + assert ((isa<RecordType>(Ex->getType().getDesugaredType()) || + isa<ArrayType>(Ex->getType().getDesugaredType())) && + "Other kinds of expressions with non-aggregate/union/class types" + " do not have lvalues."); Visit(Ex, Pred, Dst); + } } } @@ -1819,7 +1885,7 @@ bool GRExprEngine::InlineCall(ExplodedNodeSet &Dst, const CallExpr *CE, if (!FD) return false; - if (!FD->getBody(FD)) + if (!FD->hasBody(FD)) return false; // Now we have the definition of the callee, create a CallEnter node. @@ -1940,7 +2006,8 @@ void GRExprEngine::VisitCall(CallExpr* CE, ExplodedNode* Pred, // Finally, perform the post-condition check of the CallExpr and store // the created nodes in 'Dst'. - + // If the callee returns a reference and we want an rvalue, skip this check + // and do the load. if (!(!asLValue && CalleeReturnsReference(CE))) { CheckerVisit(CE, Dst, DstTmp3, false); return; @@ -2371,6 +2438,7 @@ void GRExprEngine::VisitCast(CastExpr *CastE, Expr *Ex, ExplodedNode *Pred, case CastExpr::CK_Unknown: case CastExpr::CK_ArrayToPointerDecay: case CastExpr::CK_BitCast: + case CastExpr::CK_LValueBitCast: case CastExpr::CK_IntegralCast: case CastExpr::CK_IntegralToPointer: case CastExpr::CK_PointerToIntegral: @@ -2380,7 +2448,7 @@ void GRExprEngine::VisitCast(CastExpr *CastE, Expr *Ex, ExplodedNode *Pred, case CastExpr::CK_AnyPointerToObjCPointerCast: case CastExpr::CK_AnyPointerToBlockPointerCast: case CastExpr::CK_DerivedToBase: - case CastExpr::CK_UncheckedDerivedToBase: + case CastExpr::CK_UncheckedDerivedToBase: { // Delegate to SValuator to process. for (ExplodedNodeSet::iterator I = S2.begin(), E = S2.end(); I != E; ++I) { ExplodedNode* N = *I; @@ -2391,10 +2459,24 @@ void GRExprEngine::VisitCast(CastExpr *CastE, Expr *Ex, ExplodedNode *Pred, MakeNode(Dst, CastE, N, state); } return; - - default: - llvm::errs() << "Cast kind " << CastE->getCastKind() << " not handled.\n"; - assert(0); + } + + // Various C++ casts that are not handled yet. + case CastExpr::CK_Dynamic: + case CastExpr::CK_ToUnion: + case CastExpr::CK_BaseToDerived: + case CastExpr::CK_NullToMemberPointer: + case CastExpr::CK_BaseToDerivedMemberPointer: + case CastExpr::CK_DerivedToBaseMemberPointer: + case CastExpr::CK_UserDefinedConversion: + case CastExpr::CK_ConstructorConversion: + case CastExpr::CK_VectorSplat: + case CastExpr::CK_MemberPointerToBoolean: { + SaveAndRestore<bool> OldSink(Builder->BuildSinks); + Builder->BuildSinks = true; + MakeNode(Dst, CastE, Pred, GetState(Pred)); + return; + } } } @@ -2615,9 +2697,38 @@ void GRExprEngine::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr* Ex, // sizeof(void) == 1 byte. amt = CharUnits::One(); } - else if (!T.getTypePtr()->isConstantSizeType()) { - // FIXME: Add support for VLAs. - Dst.Add(Pred); + else if (!T->isConstantSizeType()) { + assert(T->isVariableArrayType() && "Unknown non-constant-sized type."); + + // FIXME: Add support for VLA type arguments, not just VLA expressions. + // When that happens, we should probably refactor VLASizeChecker's code. + if (Ex->isArgumentType()) { + Dst.Add(Pred); + return; + } + + // Get the size by getting the extent of the sub-expression. + // First, visit the sub-expression to find its region. + Expr *Arg = Ex->getArgumentExpr(); + ExplodedNodeSet Tmp; + VisitLValue(Arg, Pred, Tmp); + + for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) { + const GRState* state = GetState(*I); + const MemRegion *MR = state->getSVal(Arg).getAsRegion(); + + // If the subexpression can't be resolved to a region, we don't know + // anything about its size. Just leave the state as is and continue. + if (!MR) { + Dst.Add(*I); + continue; + } + + // The result is the extent of the VLA. + SVal Extent = cast<SubRegion>(MR)->getExtent(ValMgr); + MakeNode(Dst, Ex, *I, state->BindExpr(Ex, Extent)); + } + return; } else if (T->getAs<ObjCObjectType>()) { @@ -2749,7 +2860,7 @@ void GRExprEngine::VisitUnaryOperator(UnaryOperator* U, ExplodedNode* Pred, return; } - case UnaryOperator::Plus: assert (!asLValue); // FALL-THROUGH. + case UnaryOperator::Plus: assert(!asLValue); // FALL-THROUGH. case UnaryOperator::Extension: { // Unary "+" is a no-op, similar to a parentheses. We still have places @@ -2759,7 +2870,11 @@ void GRExprEngine::VisitUnaryOperator(UnaryOperator* U, ExplodedNode* Pred, Expr* Ex = U->getSubExpr()->IgnoreParens(); ExplodedNodeSet Tmp; - Visit(Ex, Pred, Tmp); + + if (asLValue) + VisitLValue(Ex, Pred, Tmp); + else + Visit(Ex, Pred, Tmp); for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) { const GRState* state = GetState(*I); diff --git a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.cpp b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.cpp index 6066a1c..d138e81 100644 --- a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.cpp @@ -23,6 +23,8 @@ void clang::RegisterExperimentalChecks(GRExprEngine &Eng) { // within GRExprEngine. RegisterPthreadLockChecker(Eng); RegisterMallocChecker(Eng); + RegisterStreamChecker(Eng); + RegisterCStringChecker(Eng); } void clang::RegisterExperimentalInternalChecks(GRExprEngine &Eng) { @@ -38,4 +40,5 @@ void clang::RegisterExperimentalInternalChecks(GRExprEngine &Eng) { RegisterCastToStructChecker(Eng); RegisterCastSizeChecker(Eng); RegisterArrayBoundChecker(Eng); + } diff --git a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h index 9a9da32..7d1eb77 100644 --- a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h +++ b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineExperimentalChecks.h @@ -19,8 +19,11 @@ namespace clang { class GRExprEngine; +void RegisterCStringChecker(GRExprEngine &Eng); void RegisterPthreadLockChecker(GRExprEngine &Eng); void RegisterMallocChecker(GRExprEngine &Eng); +void RegisterStreamChecker(GRExprEngine &Eng); +void RegisterIdempotentOperationChecker(GRExprEngine &Eng); } // end clang namespace #endif diff --git a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h index 335b85e..f91a759 100644 --- a/contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h +++ b/contrib/llvm/tools/clang/lib/Checker/GRExprEngineInternalChecks.h @@ -34,8 +34,8 @@ void RegisterNoReturnFunctionChecker(GRExprEngine &Eng); void RegisterPointerArithChecker(GRExprEngine &Eng); void RegisterPointerSubChecker(GRExprEngine &Eng); void RegisterReturnPointerRangeChecker(GRExprEngine &Eng); -void RegisterReturnStackAddressChecker(GRExprEngine &Eng); void RegisterReturnUndefChecker(GRExprEngine &Eng); +void RegisterStackAddrLeakChecker(GRExprEngine &Eng); void RegisterUndefBranchChecker(GRExprEngine &Eng); void RegisterUndefCapturedBlockVarChecker(GRExprEngine &Eng); void RegisterUndefResultChecker(GRExprEngine &Eng); diff --git a/contrib/llvm/tools/clang/lib/Checker/GRState.cpp b/contrib/llvm/tools/clang/lib/Checker/GRState.cpp index b16e922..9e584b5 100644 --- a/contrib/llvm/tools/clang/lib/Checker/GRState.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/GRState.cpp @@ -34,7 +34,7 @@ GRStateManager::~GRStateManager() { } const GRState* -GRStateManager::RemoveDeadBindings(const GRState* state, Stmt* Loc, +GRStateManager::RemoveDeadBindings(const GRState* state, const StackFrameContext *LCtx, SymbolReaper& SymReaper) { @@ -47,11 +47,11 @@ GRStateManager::RemoveDeadBindings(const GRState* state, Stmt* Loc, llvm::SmallVector<const MemRegion*, 10> RegionRoots; GRState NewState = *state; - NewState.Env = EnvMgr.RemoveDeadBindings(NewState.Env, Loc, SymReaper, + NewState.Env = EnvMgr.RemoveDeadBindings(NewState.Env, SymReaper, state, RegionRoots); // Clean up the store. - const GRState *s = StoreMgr->RemoveDeadBindings(NewState, Loc, LCtx, + const GRState *s = StoreMgr->RemoveDeadBindings(NewState, LCtx, SymReaper, RegionRoots); return ConstraintMgr->RemoveDeadBindings(s, SymReaper); @@ -343,28 +343,3 @@ bool GRState::scanReachableSymbols(const MemRegion * const *I, } return true; } - -//===----------------------------------------------------------------------===// -// Queries. -//===----------------------------------------------------------------------===// - -bool GRStateManager::isEqual(const GRState* state, const Expr* Ex, - const llvm::APSInt& Y) { - - SVal V = state->getSVal(Ex); - - if (loc::ConcreteInt* X = dyn_cast<loc::ConcreteInt>(&V)) - return X->getValue() == Y; - - if (nonloc::ConcreteInt* X = dyn_cast<nonloc::ConcreteInt>(&V)) - return X->getValue() == Y; - - if (SymbolRef Sym = V.getAsSymbol()) - return ConstraintMgr->isEqual(state, Sym, Y); - - return false; -} - -bool GRStateManager::isEqual(const GRState* state, const Expr* Ex, uint64_t x) { - return isEqual(state, Ex, getBasicVals().getValue(x, Ex->getType())); -} diff --git a/contrib/llvm/tools/clang/lib/Frontend/HTMLDiagnostics.cpp b/contrib/llvm/tools/clang/lib/Checker/HTMLDiagnostics.cpp index 022a34d..ff9867f 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/HTMLDiagnostics.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/HTMLDiagnostics.cpp @@ -11,7 +11,7 @@ // //===----------------------------------------------------------------------===// -#include "clang/Frontend/PathDiagnosticClients.h" +#include "clang/Checker/PathDiagnosticClients.h" #include "clang/Checker/BugReporter/PathDiagnostic.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" @@ -294,8 +294,8 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D, llvm::raw_fd_ostream os(H.c_str(), ErrorMsg); if (!ErrorMsg.empty()) { - (llvm::errs() << "warning: could not create file '" << F.str() - << "'\n").flush(); + llvm::errs() << "warning: could not create file '" << F.str() + << "'\n"; return; } diff --git a/contrib/llvm/tools/clang/lib/Checker/IdempotentOperationChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/IdempotentOperationChecker.cpp new file mode 100644 index 0000000..6ed1841 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/Checker/IdempotentOperationChecker.cpp @@ -0,0 +1,454 @@ +//==- IdempotentOperationChecker.cpp - Idempotent Operations ----*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a set of path-sensitive checks for idempotent and/or +// tautological operations. Each potential operation is checked along all paths +// to see if every path results in a pointless operation. +// +-------------------------------------------+ +// |Table of idempotent/tautological operations| +// +-------------------------------------------+ +//+--------------------------------------------------------------------------+ +//|Operator | x op x | x op 1 | 1 op x | x op 0 | 0 op x | x op ~0 | ~0 op x | +//+--------------------------------------------------------------------------+ +// +, += | | | | x | x | | +// -, -= | | | | x | -x | | +// *, *= | | x | x | 0 | 0 | | +// /, /= | 1 | x | | N/A | 0 | | +// &, &= | x | | | 0 | 0 | x | x +// |, |= | x | | | x | x | ~0 | ~0 +// ^, ^= | 0 | | | x | x | | +// <<, <<= | | | | x | 0 | | +// >>, >>= | | | | x | 0 | | +// || | 1 | 1 | 1 | x | x | 1 | 1 +// && | 1 | x | x | 0 | 0 | x | x +// = | x | | | | | | +// == | 1 | | | | | | +// >= | 1 | | | | | | +// <= | 1 | | | | | | +// > | 0 | | | | | | +// < | 0 | | | | | | +// != | 0 | | | | | | +//===----------------------------------------------------------------------===// +// +// Ways to reduce false positives (that need to be implemented): +// - Don't flag downsizing casts +// - Improved handling of static/global variables +// - Per-block marking of incomplete analysis +// - Handling ~0 values +// - False positives involving silencing unused variable warnings +// +// Other things TODO: +// - Improved error messages +// - Handle mixed assumptions (which assumptions can belong together?) +// - Finer grained false positive control (levels) + +#include "GRExprEngineExperimentalChecks.h" +#include "clang/Checker/BugReporter/BugType.h" +#include "clang/Checker/PathSensitive/CheckerVisitor.h" +#include "clang/Checker/PathSensitive/SVals.h" +#include "clang/AST/Stmt.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace clang; + +namespace { +class IdempotentOperationChecker + : public CheckerVisitor<IdempotentOperationChecker> { + public: + static void *getTag(); + void PreVisitBinaryOperator(CheckerContext &C, const BinaryOperator *B); + void VisitEndAnalysis(ExplodedGraph &G, BugReporter &B, + bool hasWorkRemaining); + + private: + // Our assumption about a particular operation. + enum Assumption { Possible, Impossible, Equal, LHSis1, RHSis1, LHSis0, + RHSis0 }; + + void UpdateAssumption(Assumption &A, const Assumption &New); + + /// contains* - Useful recursive methods to see if a statement contains an + /// element somewhere. Used in static analysis to reduce false positives. + static bool containsMacro(const Stmt *S); + static bool containsEnum(const Stmt *S); + static bool containsBuiltinOffsetOf(const Stmt *S); + static bool containsZeroConstant(const Stmt *S); + static bool containsOneConstant(const Stmt *S); + template <class T> static bool containsStmt(const Stmt *S) { + if (isa<T>(S)) + return true; + + for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end(); + ++I) + if (const Stmt *child = *I) + if (containsStmt<T>(child)) + return true; + + return false; + } + + // Hash table + typedef llvm::DenseMap<const BinaryOperator *, Assumption> AssumptionMap; + AssumptionMap hash; +}; +} + +void *IdempotentOperationChecker::getTag() { + static int x = 0; + return &x; +} + +void clang::RegisterIdempotentOperationChecker(GRExprEngine &Eng) { + Eng.registerCheck(new IdempotentOperationChecker()); +} + +void IdempotentOperationChecker::PreVisitBinaryOperator( + CheckerContext &C, + const BinaryOperator *B) { + // Find or create an entry in the hash for this BinaryOperator instance + AssumptionMap::iterator i = hash.find(B); + Assumption &A = i == hash.end() ? hash[B] : i->second; + + // If we had to create an entry, initialise the value to Possible + if (i == hash.end()) + A = Possible; + + // If we already have visited this node on a path that does not contain an + // idempotent operation, return immediately. + if (A == Impossible) + return; + + // Skip binary operators containing common false positives + if (containsMacro(B) || containsEnum(B) || containsStmt<SizeOfAlignOfExpr>(B) + || containsZeroConstant(B) || containsOneConstant(B) + || containsBuiltinOffsetOf(B)) { + A = Impossible; + return; + } + + const Expr *LHS = B->getLHS(); + const Expr *RHS = B->getRHS(); + + const GRState *state = C.getState(); + + SVal LHSVal = state->getSVal(LHS); + SVal RHSVal = state->getSVal(RHS); + + // If either value is unknown, we can't be 100% sure of all paths. + if (LHSVal.isUnknownOrUndef() || RHSVal.isUnknownOrUndef()) { + A = Impossible; + return; + } + BinaryOperator::Opcode Op = B->getOpcode(); + + // Dereference the LHS SVal if this is an assign operation + switch (Op) { + default: + break; + + // Fall through intentional + case BinaryOperator::AddAssign: + case BinaryOperator::SubAssign: + case BinaryOperator::MulAssign: + case BinaryOperator::DivAssign: + case BinaryOperator::AndAssign: + case BinaryOperator::OrAssign: + case BinaryOperator::XorAssign: + case BinaryOperator::ShlAssign: + case BinaryOperator::ShrAssign: + case BinaryOperator::Assign: + // Assign statements have one extra level of indirection + if (!isa<Loc>(LHSVal)) { + A = Impossible; + return; + } + LHSVal = state->getSVal(cast<Loc>(LHSVal)); + } + + + // We now check for various cases which result in an idempotent operation. + + // x op x + switch (Op) { + default: + break; // We don't care about any other operators. + + // Fall through intentional + case BinaryOperator::SubAssign: + case BinaryOperator::DivAssign: + case BinaryOperator::AndAssign: + case BinaryOperator::OrAssign: + case BinaryOperator::XorAssign: + case BinaryOperator::Assign: + case BinaryOperator::Sub: + case BinaryOperator::Div: + case BinaryOperator::And: + case BinaryOperator::Or: + case BinaryOperator::Xor: + case BinaryOperator::LOr: + case BinaryOperator::LAnd: + if (LHSVal != RHSVal) + break; + UpdateAssumption(A, Equal); + return; + } + + // x op 1 + switch (Op) { + default: + break; // We don't care about any other operators. + + // Fall through intentional + case BinaryOperator::MulAssign: + case BinaryOperator::DivAssign: + case BinaryOperator::Mul: + case BinaryOperator::Div: + case BinaryOperator::LOr: + case BinaryOperator::LAnd: + if (!RHSVal.isConstant(1)) + break; + UpdateAssumption(A, RHSis1); + return; + } + + // 1 op x + switch (Op) { + default: + break; // We don't care about any other operators. + + // Fall through intentional + case BinaryOperator::MulAssign: + case BinaryOperator::Mul: + case BinaryOperator::LOr: + case BinaryOperator::LAnd: + if (!LHSVal.isConstant(1)) + break; + UpdateAssumption(A, LHSis1); + return; + } + + // x op 0 + switch (Op) { + default: + break; // We don't care about any other operators. + + // Fall through intentional + case BinaryOperator::AddAssign: + case BinaryOperator::SubAssign: + case BinaryOperator::MulAssign: + case BinaryOperator::AndAssign: + case BinaryOperator::OrAssign: + case BinaryOperator::XorAssign: + case BinaryOperator::Add: + case BinaryOperator::Sub: + case BinaryOperator::Mul: + case BinaryOperator::And: + case BinaryOperator::Or: + case BinaryOperator::Xor: + case BinaryOperator::Shl: + case BinaryOperator::Shr: + case BinaryOperator::LOr: + case BinaryOperator::LAnd: + if (!RHSVal.isConstant(0)) + break; + UpdateAssumption(A, RHSis0); + return; + } + + // 0 op x + switch (Op) { + default: + break; // We don't care about any other operators. + + // Fall through intentional + //case BinaryOperator::AddAssign: // Common false positive + case BinaryOperator::SubAssign: // Check only if unsigned + case BinaryOperator::MulAssign: + case BinaryOperator::DivAssign: + case BinaryOperator::AndAssign: + //case BinaryOperator::OrAssign: // Common false positive + //case BinaryOperator::XorAssign: // Common false positive + case BinaryOperator::ShlAssign: + case BinaryOperator::ShrAssign: + case BinaryOperator::Add: + case BinaryOperator::Sub: + case BinaryOperator::Mul: + case BinaryOperator::Div: + case BinaryOperator::And: + case BinaryOperator::Or: + case BinaryOperator::Xor: + case BinaryOperator::Shl: + case BinaryOperator::Shr: + case BinaryOperator::LOr: + case BinaryOperator::LAnd: + if (!LHSVal.isConstant(0)) + break; + UpdateAssumption(A, LHSis0); + return; + } + + // If we get to this point, there has been a valid use of this operation. + A = Impossible; +} + +void IdempotentOperationChecker::VisitEndAnalysis(ExplodedGraph &G, + BugReporter &B, + bool hasWorkRemaining) { + // If there is any work remaining we cannot be 100% sure about our warnings + if (hasWorkRemaining) + return; + + // Iterate over the hash to see if we have any paths with definite + // idempotent operations. + for (AssumptionMap::const_iterator i = + hash.begin(); i != hash.end(); ++i) { + if (i->second != Impossible) { + // Select the error message. + const char *msg = 0; + switch (i->second) { + case Equal: + msg = "idempotent operation; both operands are always equal in value"; + break; + case LHSis1: + msg = "idempotent operation; the left operand is always 1"; + break; + case RHSis1: + msg = "idempotent operation; the right operand is always 1"; + break; + case LHSis0: + msg = "idempotent operation; the left operand is always 0"; + break; + case RHSis0: + msg = "idempotent operation; the right operand is always 0"; + break; + case Possible: + llvm_unreachable("Operation was never marked with an assumption"); + case Impossible: + llvm_unreachable(0); + } + + // Create the SourceRange Arrays + SourceRange S[2] = { i->first->getLHS()->getSourceRange(), + i->first->getRHS()->getSourceRange() }; + B.EmitBasicReport("Idempotent operation", msg, i->first->getOperatorLoc(), + S, 2); + } + } +} + +// Updates the current assumption given the new assumption +inline void IdempotentOperationChecker::UpdateAssumption(Assumption &A, + const Assumption &New) { + switch (A) { + // If we don't currently have an assumption, set it + case Possible: + A = New; + return; + + // If we have determined that a valid state happened, ignore the new + // assumption. + case Impossible: + return; + + // Any other case means that we had a different assumption last time. We don't + // currently support mixing assumptions for diagnostic reasons, so we set + // our assumption to be impossible. + default: + A = Impossible; + return; + } +} + +// Recursively find any substatements containing macros +bool IdempotentOperationChecker::containsMacro(const Stmt *S) { + if (S->getLocStart().isMacroID()) + return true; + + if (S->getLocEnd().isMacroID()) + return true; + + for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end(); + ++I) + if (const Stmt *child = *I) + if (containsMacro(child)) + return true; + + return false; +} + +// Recursively find any substatements containing enum constants +bool IdempotentOperationChecker::containsEnum(const Stmt *S) { + const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S); + + if (DR && isa<EnumConstantDecl>(DR->getDecl())) + return true; + + for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end(); + ++I) + if (const Stmt *child = *I) + if (containsEnum(child)) + return true; + + return false; +} + +// Recursively find any substatements containing __builtin_offset_of +bool IdempotentOperationChecker::containsBuiltinOffsetOf(const Stmt *S) { + const UnaryOperator *UO = dyn_cast<UnaryOperator>(S); + + if (UO && UO->getOpcode() == UnaryOperator::OffsetOf) + return true; + + for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end(); + ++I) + if (const Stmt *child = *I) + if (containsBuiltinOffsetOf(child)) + return true; + + return false; +} + +bool IdempotentOperationChecker::containsZeroConstant(const Stmt *S) { + const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(S); + if (IL && IL->getValue() == 0) + return true; + + const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(S); + if (FL && FL->getValue().isZero()) + return true; + + for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end(); + ++I) + if (const Stmt *child = *I) + if (containsZeroConstant(child)) + return true; + + return false; +} + +bool IdempotentOperationChecker::containsOneConstant(const Stmt *S) { + const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(S); + if (IL && IL->getValue() == 1) + return true; + + const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(S); + const llvm::APFloat one(1.0); + if (FL && FL->getValue().compare(one) == llvm::APFloat::cmpEqual) + return true; + + for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end(); + ++I) + if (const Stmt *child = *I) + if (containsOneConstant(child)) + return true; + + return false; +} + diff --git a/contrib/llvm/tools/clang/lib/Checker/LLVMConventionsChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/LLVMConventionsChecker.cpp index 39ded43..c121257 100644 --- a/contrib/llvm/tools/clang/lib/Checker/LLVMConventionsChecker.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/LLVMConventionsChecker.cpp @@ -34,13 +34,15 @@ static bool IsLLVMStringRef(QualType T) { "class llvm::StringRef"; } -static bool InStdNamespace(const Decl *D) { +/// Check whether the declaration is semantically inside the top-level +/// namespace named by ns. +static bool InNamespace(const Decl *D, llvm::StringRef NS) { const DeclContext *DC = D->getDeclContext(); const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext()); if (!ND) return false; const IdentifierInfo *II = ND->getIdentifier(); - if (!II || II->getName() != "std") + if (!II || !II->getName().equals(NS)) return false; DC = ND->getDeclContext(); return isa<TranslationUnitDecl>(DC); @@ -56,50 +58,26 @@ static bool IsStdString(QualType T) { const TypedefDecl *TD = TT->getDecl(); - if (!InStdNamespace(TD)) + if (!InNamespace(TD, "std")) return false; return TD->getName() == "string"; } -static bool InClangNamespace(const Decl *D) { - const DeclContext *DC = D->getDeclContext(); - const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext()); - if (!ND) - return false; - const IdentifierInfo *II = ND->getIdentifier(); - if (!II || II->getName() != "clang") - return false; - DC = ND->getDeclContext(); - return isa<TranslationUnitDecl>(DC); -} - -static bool InLLVMNamespace(const Decl *D) { - const DeclContext *DC = D->getDeclContext(); - const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext()); - if (!ND) - return false; - const IdentifierInfo *II = ND->getIdentifier(); - if (!II || II->getName() != "llvm") - return false; - DC = ND->getDeclContext(); - return isa<TranslationUnitDecl>(DC); -} - static bool IsClangType(const RecordDecl *RD) { - return RD->getName() == "Type" && InClangNamespace(RD); + return RD->getName() == "Type" && InNamespace(RD, "clang"); } static bool IsClangDecl(const RecordDecl *RD) { - return RD->getName() == "Decl" && InClangNamespace(RD); + return RD->getName() == "Decl" && InNamespace(RD, "clang"); } static bool IsClangStmt(const RecordDecl *RD) { - return RD->getName() == "Stmt" && InClangNamespace(RD); + return RD->getName() == "Stmt" && InNamespace(RD, "clang"); } -static bool isClangAttr(const RecordDecl *RD) { - return RD->getName() == "Attr" && InClangNamespace(RD); +static bool IsClangAttr(const RecordDecl *RD) { + return RD->getName() == "Attr" && InNamespace(RD, "clang"); } static bool IsStdVector(QualType T) { @@ -110,7 +88,7 @@ static bool IsStdVector(QualType T) { TemplateName TM = TS->getTemplateName(); TemplateDecl *TD = TM.getAsTemplateDecl(); - if (!TD || !InStdNamespace(TD)) + if (!TD || !InNamespace(TD, "std")) return false; return TD->getName() == "vector"; @@ -124,7 +102,7 @@ static bool IsSmallVector(QualType T) { TemplateName TM = TS->getTemplateName(); TemplateDecl *TD = TM.getAsTemplateDecl(); - if (!TD || !InLLVMNamespace(TD)) + if (!TD || !InNamespace(TD, "llvm")) return false; return TD->getName() == "SmallVector"; @@ -214,7 +192,7 @@ static bool AllocatesMemory(QualType T) { // This type checking could be sped up via dynamic programming. static bool IsPartOfAST(const CXXRecordDecl *R) { - if (IsClangStmt(R) || IsClangType(R) || IsClangDecl(R) || isClangAttr(R)) + if (IsClangStmt(R) || IsClangType(R) || IsClangDecl(R) || IsClangAttr(R)) return true; for (CXXRecordDecl::base_class_const_iterator I = R->bases_begin(), @@ -316,7 +294,7 @@ static void ScanCodeDecls(DeclContext *DC, BugReporter &BR) { Decl *D = *I; - if (D->getBody()) + if (D->hasBody()) CheckStringRefAssignedTemporary(D, BR); if (CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(D)) diff --git a/contrib/llvm/tools/clang/lib/Checker/Makefile b/contrib/llvm/tools/clang/lib/Checker/Makefile index c45ab29..1bc6529 100644 --- a/contrib/llvm/tools/clang/lib/Checker/Makefile +++ b/contrib/llvm/tools/clang/lib/Checker/Makefile @@ -11,11 +11,9 @@ # ##===----------------------------------------------------------------------===## -LEVEL = ../../../.. +CLANG_LEVEL := ../.. LIBRARYNAME := clangChecker BUILD_ARCHIVE = 1 -CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include - -include $(LEVEL)/Makefile.common +include $(CLANG_LEVEL)/Makefile diff --git a/contrib/llvm/tools/clang/lib/Checker/MallocChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/MallocChecker.cpp index 086dbd8..dcc21ca 100644 --- a/contrib/llvm/tools/clang/lib/Checker/MallocChecker.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/MallocChecker.cpp @@ -59,15 +59,16 @@ class MallocChecker : public CheckerVisitor<MallocChecker> { BuiltinBug *BT_DoubleFree; BuiltinBug *BT_Leak; BuiltinBug *BT_UseFree; - IdentifierInfo *II_malloc, *II_free, *II_realloc; + BuiltinBug *BT_BadFree; + IdentifierInfo *II_malloc, *II_free, *II_realloc, *II_calloc; public: MallocChecker() - : BT_DoubleFree(0), BT_Leak(0), BT_UseFree(0), - II_malloc(0), II_free(0), II_realloc(0) {} + : BT_DoubleFree(0), BT_Leak(0), BT_UseFree(0), BT_BadFree(0), + II_malloc(0), II_free(0), II_realloc(0), II_calloc(0) {} static void *getTag(); bool EvalCallExpr(CheckerContext &C, const CallExpr *CE); - void EvalDeadSymbols(CheckerContext &C,const Stmt *S,SymbolReaper &SymReaper); + void EvalDeadSymbols(CheckerContext &C, SymbolReaper &SymReaper); void EvalEndPath(GREndPathNodeBuilder &B, void *tag, GRExprEngine &Eng); void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *S); const GRState *EvalAssume(const GRState *state, SVal Cond, bool Assumption); @@ -76,12 +77,24 @@ public: private: void MallocMem(CheckerContext &C, const CallExpr *CE); const GRState *MallocMemAux(CheckerContext &C, const CallExpr *CE, - const Expr *SizeEx, const GRState *state); + const Expr *SizeEx, SVal Init, + const GRState *state) { + return MallocMemAux(C, CE, state->getSVal(SizeEx), Init, state); + } + const GRState *MallocMemAux(CheckerContext &C, const CallExpr *CE, + SVal SizeEx, SVal Init, + const GRState *state); + void FreeMem(CheckerContext &C, const CallExpr *CE); const GRState *FreeMemAux(CheckerContext &C, const CallExpr *CE, const GRState *state); void ReallocMem(CheckerContext &C, const CallExpr *CE); + void CallocMem(CheckerContext &C, const CallExpr *CE); + + bool SummarizeValue(llvm::raw_ostream& os, SVal V); + bool SummarizeRegion(llvm::raw_ostream& os, const MemRegion *MR); + void ReportBadFree(CheckerContext &C, SVal ArgVal, SourceRange range); }; } // end anonymous namespace @@ -120,6 +133,8 @@ bool MallocChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) { II_free = &Ctx.Idents.get("free"); if (!II_realloc) II_realloc = &Ctx.Idents.get("realloc"); + if (!II_calloc) + II_calloc = &Ctx.Idents.get("calloc"); if (FD->getIdentifier() == II_malloc) { MallocMem(C, CE); @@ -136,30 +151,44 @@ bool MallocChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) { return true; } + if (FD->getIdentifier() == II_calloc) { + CallocMem(C, CE); + return true; + } + return false; } void MallocChecker::MallocMem(CheckerContext &C, const CallExpr *CE) { - const GRState *state = MallocMemAux(C, CE, CE->getArg(0), C.getState()); + const GRState *state = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), + C.getState()); C.addTransition(state); } const GRState *MallocChecker::MallocMemAux(CheckerContext &C, const CallExpr *CE, - const Expr *SizeEx, + SVal Size, SVal Init, const GRState *state) { unsigned Count = C.getNodeBuilder().getCurrentBlockCount(); ValueManager &ValMgr = C.getValueManager(); + // Set the return value. SVal RetVal = ValMgr.getConjuredSymbolVal(NULL, CE, CE->getType(), Count); + state = state->BindExpr(CE, RetVal); - SVal Size = state->getSVal(SizeEx); + // Fill the region with the initialization value. + state = state->bindDefault(RetVal, Init); - state = C.getEngine().getStoreManager().setExtent(state, RetVal.getAsRegion(), - Size); + // Set the region's extent equal to the Size parameter. + const SymbolicRegion *R = cast<SymbolicRegion>(RetVal.getAsRegion()); + DefinedOrUnknownSVal Extent = R->getExtent(ValMgr); + DefinedOrUnknownSVal DefinedSize = cast<DefinedOrUnknownSVal>(Size); + + SValuator &SVator = ValMgr.getSValuator(); + DefinedOrUnknownSVal ExtentMatchesSize = + SVator.EvalEQ(state, Extent, DefinedSize); + state = state->Assume(ExtentMatchesSize, true); - state = state->BindExpr(CE, RetVal); - SymbolRef Sym = RetVal.getAsLocSymbol(); assert(Sym); // Set the symbol's state to Allocated. @@ -175,18 +204,59 @@ void MallocChecker::FreeMem(CheckerContext &C, const CallExpr *CE) { const GRState *MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr *CE, const GRState *state) { - SVal ArgVal = state->getSVal(CE->getArg(0)); + const Expr *ArgExpr = CE->getArg(0); + SVal ArgVal = state->getSVal(ArgExpr); // If ptr is NULL, no operation is preformed. if (ArgVal.isZeroConstant()) return state; + + // Unknown values could easily be okay + // Undefined values are handled elsewhere + if (ArgVal.isUnknownOrUndef()) + return state; - SymbolRef Sym = ArgVal.getAsLocSymbol(); - + const MemRegion *R = ArgVal.getAsRegion(); + + // Nonlocs can't be freed, of course. + // Non-region locations (labels and fixed addresses) also shouldn't be freed. + if (!R) { + ReportBadFree(C, ArgVal, ArgExpr->getSourceRange()); + return NULL; + } + + R = R->StripCasts(); + + // Blocks might show up as heap data, but should not be free()d + if (isa<BlockDataRegion>(R)) { + ReportBadFree(C, ArgVal, ArgExpr->getSourceRange()); + return NULL; + } + + const MemSpaceRegion *MS = R->getMemorySpace(); + + // Parameters, locals, statics, and globals shouldn't be freed. + if (!(isa<UnknownSpaceRegion>(MS) || isa<HeapSpaceRegion>(MS))) { + // FIXME: at the time this code was written, malloc() regions were + // represented by conjured symbols, which are all in UnknownSpaceRegion. + // This means that there isn't actually anything from HeapSpaceRegion + // that should be freed, even though we allow it here. + // Of course, free() can work on memory allocated outside the current + // function, so UnknownSpaceRegion is always a possibility. + // False negatives are better than false positives. + + ReportBadFree(C, ArgVal, ArgExpr->getSourceRange()); + return NULL; + } + + const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R); // Various cases could lead to non-symbol values here. - if (!Sym) + // For now, ignore them. + if (!SR) return state; + SymbolRef Sym = SR->getSymbol(); + const RefState *RS = state->get<RegionState>(Sym); // If the symbol has not been tracked, return. This is possible when free() is @@ -214,6 +284,135 @@ const GRState *MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr *CE, return state->set<RegionState>(Sym, RefState::getReleased(CE)); } +bool MallocChecker::SummarizeValue(llvm::raw_ostream& os, SVal V) { + if (nonloc::ConcreteInt *IntVal = dyn_cast<nonloc::ConcreteInt>(&V)) + os << "an integer (" << IntVal->getValue() << ")"; + else if (loc::ConcreteInt *ConstAddr = dyn_cast<loc::ConcreteInt>(&V)) + os << "a constant address (" << ConstAddr->getValue() << ")"; + else if (loc::GotoLabel *Label = dyn_cast<loc::GotoLabel>(&V)) + os << "the address of the label '" + << Label->getLabel()->getID()->getName() + << "'"; + else + return false; + + return true; +} + +bool MallocChecker::SummarizeRegion(llvm::raw_ostream& os, + const MemRegion *MR) { + switch (MR->getKind()) { + case MemRegion::FunctionTextRegionKind: { + const FunctionDecl *FD = cast<FunctionTextRegion>(MR)->getDecl(); + if (FD) + os << "the address of the function '" << FD << "'"; + else + os << "the address of a function"; + return true; + } + case MemRegion::BlockTextRegionKind: + os << "block text"; + return true; + case MemRegion::BlockDataRegionKind: + // FIXME: where the block came from? + os << "a block"; + return true; + default: { + const MemSpaceRegion *MS = MR->getMemorySpace(); + + switch (MS->getKind()) { + case MemRegion::StackLocalsSpaceRegionKind: { + const VarRegion *VR = dyn_cast<VarRegion>(MR); + const VarDecl *VD; + if (VR) + VD = VR->getDecl(); + else + VD = NULL; + + if (VD) + os << "the address of the local variable '" << VD->getName() << "'"; + else + os << "the address of a local stack variable"; + return true; + } + case MemRegion::StackArgumentsSpaceRegionKind: { + const VarRegion *VR = dyn_cast<VarRegion>(MR); + const VarDecl *VD; + if (VR) + VD = VR->getDecl(); + else + VD = NULL; + + if (VD) + os << "the address of the parameter '" << VD->getName() << "'"; + else + os << "the address of a parameter"; + return true; + } + case MemRegion::NonStaticGlobalSpaceRegionKind: + case MemRegion::StaticGlobalSpaceRegionKind: { + const VarRegion *VR = dyn_cast<VarRegion>(MR); + const VarDecl *VD; + if (VR) + VD = VR->getDecl(); + else + VD = NULL; + + if (VD) { + if (VD->isStaticLocal()) + os << "the address of the static variable '" << VD->getName() << "'"; + else + os << "the address of the global variable '" << VD->getName() << "'"; + } else + os << "the address of a global variable"; + return true; + } + default: + return false; + } + } + } +} + +void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal, + SourceRange range) { + ExplodedNode *N = C.GenerateSink(); + if (N) { + if (!BT_BadFree) + BT_BadFree = new BuiltinBug("Bad free"); + + llvm::SmallString<100> buf; + llvm::raw_svector_ostream os(buf); + + const MemRegion *MR = ArgVal.getAsRegion(); + if (MR) { + while (const ElementRegion *ER = dyn_cast<ElementRegion>(MR)) + MR = ER->getSuperRegion(); + + // Special case for alloca() + if (isa<AllocaRegion>(MR)) + os << "Argument to free() was allocated by alloca(), not malloc()"; + else { + os << "Argument to free() is "; + if (SummarizeRegion(os, MR)) + os << ", which is not memory allocated by malloc()"; + else + os << "not memory allocated by malloc()"; + } + } else { + os << "Argument to free() is "; + if (SummarizeValue(os, ArgVal)) + os << ", which is not memory allocated by malloc()"; + else + os << "not memory allocated by malloc()"; + } + + EnhancedBugReport *R = new EnhancedBugReport(*BT_BadFree, os.str(), N); + R->addRange(range); + C.EmitReport(R); + } +} + void MallocChecker::ReallocMem(CheckerContext &C, const CallExpr *CE) { const GRState *state = C.getState(); const Expr *Arg0 = CE->getArg(0); @@ -234,7 +433,8 @@ void MallocChecker::ReallocMem(CheckerContext &C, const CallExpr *CE) { if (Sym) stateEqual = stateEqual->set<RegionState>(Sym, RefState::getReleased(CE)); - const GRState *stateMalloc = MallocMemAux(C, CE, CE->getArg(1), stateEqual); + const GRState *stateMalloc = MallocMemAux(C, CE, CE->getArg(1), + UndefinedVal(), stateEqual); C.addTransition(stateMalloc); } @@ -256,15 +456,31 @@ void MallocChecker::ReallocMem(CheckerContext &C, const CallExpr *CE) { if (stateFree) { // FIXME: We should copy the content of the original buffer. const GRState *stateRealloc = MallocMemAux(C, CE, CE->getArg(1), - stateFree); + UnknownVal(), stateFree); C.addTransition(stateRealloc); } } } } -void MallocChecker::EvalDeadSymbols(CheckerContext &C, const Stmt *S, - SymbolReaper &SymReaper) { +void MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE) { + const GRState *state = C.getState(); + + ValueManager &ValMgr = C.getValueManager(); + SValuator &SVator = C.getSValuator(); + + SVal Count = state->getSVal(CE->getArg(0)); + SVal EleSize = state->getSVal(CE->getArg(1)); + SVal TotalSize = SVator.EvalBinOp(state, BinaryOperator::Mul, Count, EleSize, + ValMgr.getContext().getSizeType()); + + SVal Zero = ValMgr.makeZeroVal(ValMgr.getContext().CharTy); + + state = MallocMemAux(C, CE, TotalSize, Zero, state); + C.addTransition(state); +} + +void MallocChecker::EvalDeadSymbols(CheckerContext &C,SymbolReaper &SymReaper) { for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(), E = SymReaper.dead_end(); I != E; ++I) { SymbolRef Sym = *I; diff --git a/contrib/llvm/tools/clang/lib/Checker/MemRegion.cpp b/contrib/llvm/tools/clang/lib/Checker/MemRegion.cpp index 575458c..9cfeb7a 100644 --- a/contrib/llvm/tools/clang/lib/Checker/MemRegion.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/MemRegion.cpp @@ -14,6 +14,7 @@ //===----------------------------------------------------------------------===// #include "clang/Checker/PathSensitive/MemRegion.h" +#include "clang/Checker/PathSensitive/ValueManager.h" #include "clang/Analysis/AnalysisContext.h" #include "clang/Analysis/Support/BumpVector.h" #include "clang/AST/CharUnits.h" @@ -29,22 +30,22 @@ template<typename RegionTy> struct MemRegionManagerTrait; template <typename RegionTy, typename A1> RegionTy* MemRegionManager::getRegion(const A1 a1) { - + const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion = MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1); - + llvm::FoldingSetNodeID ID; RegionTy::ProfileRegion(ID, a1, superRegion); void* InsertPos; RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, InsertPos)); - + if (!R) { R = (RegionTy*) A.Allocate<RegionTy>(); new (R) RegionTy(a1, superRegion); Regions.InsertNode(R, InsertPos); } - + return R; } @@ -56,72 +57,72 @@ RegionTy* MemRegionManager::getSubRegion(const A1 a1, void* InsertPos; RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, InsertPos)); - + if (!R) { R = (RegionTy*) A.Allocate<RegionTy>(); new (R) RegionTy(a1, superRegion); Regions.InsertNode(R, InsertPos); } - + return R; } template <typename RegionTy, typename A1, typename A2> RegionTy* MemRegionManager::getRegion(const A1 a1, const A2 a2) { - + const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion = MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1, a2); - + llvm::FoldingSetNodeID ID; RegionTy::ProfileRegion(ID, a1, a2, superRegion); void* InsertPos; RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, InsertPos)); - + if (!R) { R = (RegionTy*) A.Allocate<RegionTy>(); new (R) RegionTy(a1, a2, superRegion); Regions.InsertNode(R, InsertPos); } - + return R; } template <typename RegionTy, typename A1, typename A2> RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2, const MemRegion *superRegion) { - + llvm::FoldingSetNodeID ID; RegionTy::ProfileRegion(ID, a1, a2, superRegion); void* InsertPos; RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, InsertPos)); - + if (!R) { R = (RegionTy*) A.Allocate<RegionTy>(); new (R) RegionTy(a1, a2, superRegion); Regions.InsertNode(R, InsertPos); } - + return R; } template <typename RegionTy, typename A1, typename A2, typename A3> RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2, const A3 a3, const MemRegion *superRegion) { - + llvm::FoldingSetNodeID ID; RegionTy::ProfileRegion(ID, a1, a2, a3, superRegion); void* InsertPos; RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, InsertPos)); - + if (!R) { R = (RegionTy*) A.Allocate<RegionTy>(); new (R) RegionTy(a1, a2, a3, superRegion); Regions.InsertNode(R, InsertPos); } - + return R; } @@ -171,6 +172,53 @@ const StackFrameContext *VarRegion::getStackFrame() const { } //===----------------------------------------------------------------------===// +// Region extents. +//===----------------------------------------------------------------------===// + +DefinedOrUnknownSVal DeclRegion::getExtent(ValueManager& ValMgr) const { + ASTContext& Ctx = ValMgr.getContext(); + QualType T = getDesugaredValueType(Ctx); + + if (isa<VariableArrayType>(T)) + return nonloc::SymbolVal(ValMgr.getSymbolManager().getExtentSymbol(this)); + if (isa<IncompleteArrayType>(T)) + return UnknownVal(); + + CharUnits Size = Ctx.getTypeSizeInChars(T); + QualType SizeTy = Ctx.getSizeType(); + return ValMgr.makeIntVal(Size.getQuantity(), SizeTy); +} + +DefinedOrUnknownSVal FieldRegion::getExtent(ValueManager& ValMgr) const { + DefinedOrUnknownSVal Extent = DeclRegion::getExtent(ValMgr); + + // A zero-length array at the end of a struct often stands for dynamically- + // allocated extra memory. + if (Extent.isZeroConstant()) { + ASTContext& Ctx = ValMgr.getContext(); + QualType T = getDesugaredValueType(Ctx); + + if (isa<ConstantArrayType>(T)) + return UnknownVal(); + } + + return Extent; +} + +DefinedOrUnknownSVal AllocaRegion::getExtent(ValueManager& ValMgr) const { + return nonloc::SymbolVal(ValMgr.getSymbolManager().getExtentSymbol(this)); +} + +DefinedOrUnknownSVal SymbolicRegion::getExtent(ValueManager& ValMgr) const { + return nonloc::SymbolVal(ValMgr.getSymbolManager().getExtentSymbol(this)); +} + +DefinedOrUnknownSVal StringRegion::getExtent(ValueManager& ValMgr) const { + QualType SizeTy = ValMgr.getContext().getSizeType(); + return ValMgr.makeIntVal(getStringLiteral()->getByteLength()+1, SizeTy); +} + +//===----------------------------------------------------------------------===// // FoldingSet profiling. //===----------------------------------------------------------------------===// @@ -183,6 +231,11 @@ void StackSpaceRegion::Profile(llvm::FoldingSetNodeID &ID) const { ID.AddPointer(getStackFrame()); } +void StaticGlobalSpaceRegion::Profile(llvm::FoldingSetNodeID &ID) const { + ID.AddInteger((unsigned)getKind()); + ID.AddPointer(getCodeRegion()); +} + void StringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, const StringLiteral* Str, const MemRegion* superRegion) { @@ -226,7 +279,7 @@ void CXXThisRegion::ProfileRegion(llvm::FoldingSetNodeID &ID, void CXXThisRegion::Profile(llvm::FoldingSetNodeID &ID) const { CXXThisRegion::ProfileRegion(ID, ThisPointerTy, superRegion); } - + void DeclRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, const Decl* D, const MemRegion* superRegion, Kind k) { ID.AddInteger((unsigned) k); @@ -349,7 +402,6 @@ void BlockDataRegion::dumpToStream(llvm::raw_ostream& os) const { os << "block_data{" << BC << '}'; } - void CompoundLiteralRegion::dumpToStream(llvm::raw_ostream& os) const { // FIXME: More elaborate pretty-printing. os << "{ " << (void*) CL << " }"; @@ -368,6 +420,10 @@ void FieldRegion::dumpToStream(llvm::raw_ostream& os) const { os << superRegion << "->" << getDecl(); } +void NonStaticGlobalSpaceRegion::dumpToStream(llvm::raw_ostream &os) const { + os << "NonStaticGlobalSpaceRegion"; +} + void ObjCIvarRegion::dumpToStream(llvm::raw_ostream& os) const { os << "ivar{" << superRegion << ',' << getDecl() << '}'; } @@ -392,6 +448,10 @@ void RegionRawOffset::dumpToStream(llvm::raw_ostream& os) const { os << "raw_offset{" << getRegion() << ',' << getByteOffset() << '}'; } +void StaticGlobalSpaceRegion::dumpToStream(llvm::raw_ostream &os) const { + os << "StaticGlobalsMemSpace{" << CR << '}'; +} + //===----------------------------------------------------------------------===// // MemRegionManager methods. //===----------------------------------------------------------------------===// @@ -412,7 +472,7 @@ const REG *MemRegionManager::LazyAllocate(REG*& region, ARG a) { region = (REG*) A.Allocate<REG>(); new (region) REG(this, a); } - + return region; } @@ -442,8 +502,18 @@ MemRegionManager::getStackArgumentsRegion(const StackFrameContext *STC) { return R; } -const GlobalsSpaceRegion *MemRegionManager::getGlobalsRegion() { - return LazyAllocate(globals); +const GlobalsSpaceRegion +*MemRegionManager::getGlobalsRegion(const CodeTextRegion *CR) { + if (!CR) + return LazyAllocate(globals); + + StaticGlobalSpaceRegion *&R = StaticsGlobalSpaceRegions[CR]; + if (R) + return R; + + R = A.Allocate<StaticGlobalSpaceRegion>(); + new (R) StaticGlobalSpaceRegion(this, CR); + return R; } const HeapSpaceRegion *MemRegionManager::getHeapRegion() { @@ -462,7 +532,7 @@ const MemSpaceRegion *MemRegionManager::getCodeRegion() { // Constructing regions. //===----------------------------------------------------------------------===// -const StringRegion* MemRegionManager::getStringRegion(const StringLiteral* Str) { +const StringRegion* MemRegionManager::getStringRegion(const StringLiteral* Str){ return getSubRegion<StringRegion>(Str, getGlobalsRegion()); } @@ -470,7 +540,9 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D, const LocationContext *LC) { const MemRegion *sReg = 0; - if (D->hasLocalStorage()) { + if (D->hasGlobalStorage() && !D->isStaticLocal()) + sReg = getGlobalsRegion(); + else { // FIXME: Once we implement scope handling, we will need to properly lookup // 'D' to the proper LocationContext. const DeclContext *DC = D->getDeclContext(); @@ -479,15 +551,32 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D, if (!STC) sReg = getUnknownRegion(); else { - sReg = isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D) - ? static_cast<const MemRegion*>(getStackArgumentsRegion(STC)) - : static_cast<const MemRegion*>(getStackLocalsRegion(STC)); + if (D->hasLocalStorage()) { + sReg = isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D) + ? static_cast<const MemRegion*>(getStackArgumentsRegion(STC)) + : static_cast<const MemRegion*>(getStackLocalsRegion(STC)); + } + else { + assert(D->isStaticLocal()); + const Decl *D = STC->getDecl(); + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) + sReg = getGlobalsRegion(getFunctionTextRegion(FD)); + else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) { + const BlockTextRegion *BTR = + getBlockTextRegion(BD, + C.getCanonicalType(BD->getSignatureAsWritten()->getType()), + STC->getAnalysisContext()); + sReg = getGlobalsRegion(BTR); + } + else { + // FIXME: For ObjC-methods, we need a new CodeTextRegion. For now + // just use the main global memspace. + sReg = getGlobalsRegion(); + } + } } } - else { - sReg = getGlobalsRegion(); - } - + return getSubRegion<VarRegion>(D, sReg); } @@ -500,10 +589,10 @@ const BlockDataRegion * MemRegionManager::getBlockDataRegion(const BlockTextRegion *BC, const LocationContext *LC) { const MemRegion *sReg = 0; - - if (LC) { + + if (LC) { // FIXME: Once we implement scope handling, we want the parent region - // to be the scope. + // to be the scope. const StackFrameContext *STC = LC->getCurrentStackFrame(); assert(STC); sReg = getStackLocalsRegion(STC); @@ -520,9 +609,9 @@ MemRegionManager::getBlockDataRegion(const BlockTextRegion *BC, const CompoundLiteralRegion* MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr* CL, const LocationContext *LC) { - + const MemRegion *sReg = 0; - + if (CL->isFileScope()) sReg = getGlobalsRegion(); else { @@ -530,7 +619,7 @@ MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr* CL, assert(STC); sReg = getStackLocalsRegion(STC); } - + return getSubRegion<CompoundLiteralRegion>(CL, sReg); } @@ -749,24 +838,24 @@ void BlockDataRegion::LazyInitializeReferencedVars() { AnalysisContext *AC = getCodeRegion()->getAnalysisContext(); AnalysisContext::referenced_decls_iterator I, E; llvm::tie(I, E) = AC->getReferencedBlockVars(BC->getDecl()); - + if (I == E) { ReferencedVars = (void*) 0x1; return; } - + MemRegionManager &MemMgr = *getMemRegionManager(); llvm::BumpPtrAllocator &A = MemMgr.getAllocator(); BumpVectorContext BC(A); - + typedef BumpVector<const MemRegion*> VarVec; VarVec *BV = (VarVec*) A.Allocate<VarVec>(); new (BV) VarVec(BC, E - I); - + for ( ; I != E; ++I) { const VarDecl *VD = *I; const VarRegion *VR = 0; - + if (!VD->getAttr<BlocksAttr>() && VD->hasLocalStorage()) VR = MemMgr.getVarRegion(VD, this); else { @@ -776,11 +865,11 @@ void BlockDataRegion::LazyInitializeReferencedVars() { VR = MemMgr.getVarRegion(VD, MemMgr.getUnknownRegion()); } } - + assert(VR); BV->push_back(VR, BC); } - + ReferencedVars = BV; } @@ -790,7 +879,7 @@ BlockDataRegion::referenced_vars_begin() const { BumpVector<const MemRegion*> *Vec = static_cast<BumpVector<const MemRegion*>*>(ReferencedVars); - + return BlockDataRegion::referenced_vars_iterator(Vec == (void*) 0x1 ? NULL : Vec->begin()); } @@ -801,7 +890,7 @@ BlockDataRegion::referenced_vars_end() const { BumpVector<const MemRegion*> *Vec = static_cast<BumpVector<const MemRegion*>*>(ReferencedVars); - + return BlockDataRegion::referenced_vars_iterator(Vec == (void*) 0x1 ? NULL : Vec->end()); } diff --git a/contrib/llvm/tools/clang/lib/Checker/OSAtomicChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/OSAtomicChecker.cpp index e743528..1ea1bd9 100644 --- a/contrib/llvm/tools/clang/lib/Checker/OSAtomicChecker.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/OSAtomicChecker.cpp @@ -100,7 +100,13 @@ bool OSAtomicChecker::EvalOSAtomicCompareAndSwap(CheckerContext &C, const GRState *state = C.getState(); ExplodedNodeSet Tmp; SVal location = state->getSVal(theValueExpr); - // Here we should use the value type of the region as the load type. + // Here we should use the value type of the region as the load type, because + // we are simulating the semantics of the function, not the semantics of + // passing argument. So the type of theValue expr is not we are loading. + // But usually the type of the varregion is not the type we want either, + // we still need to do a CastRetrievedVal in store manager. So actually this + // LoadTy specifying can be omitted. But we put it here to emphasize the + // semantics. QualType LoadTy; if (const TypedRegion *TR = dyn_cast_or_null<TypedRegion>(location.getAsRegion())) { diff --git a/contrib/llvm/tools/clang/lib/Checker/PathDiagnostic.cpp b/contrib/llvm/tools/clang/lib/Checker/PathDiagnostic.cpp index 963923c..cf05a7d 100644 --- a/contrib/llvm/tools/clang/lib/Checker/PathDiagnostic.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/PathDiagnostic.cpp @@ -107,7 +107,7 @@ void PathDiagnosticClient::HandleDiagnostic(Diagnostic::Level DiagLevel, new PathDiagnosticEventPiece(Info.getLocation(), StrC.str()); for (unsigned i = 0, e = Info.getNumRanges(); i != e; ++i) - P->addRange(Info.getRange(i)); + P->addRange(Info.getRange(i).getAsRange()); for (unsigned i = 0, e = Info.getNumFixItHints(); i != e; ++i) P->addFixItHint(Info.getFixItHint(i)); D->push_front(P); @@ -181,15 +181,8 @@ PathDiagnosticRange PathDiagnosticLocation::asRange() const { if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) return MD->getSourceRange(); if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { - // FIXME: We would like to always get the function body, even - // when it needs to be de-serialized, but getting the - // ASTContext here requires significant changes. - if (Stmt *Body = FD->getBody()) { - if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Body)) - return CS->getSourceRange(); - else - return cast<CXXTryStmt>(Body)->getSourceRange(); - } + if (Stmt *Body = FD->getBody()) + return Body->getSourceRange(); } else { SourceLocation L = D->getLocation(); diff --git a/contrib/llvm/tools/clang/lib/Frontend/PlistDiagnostics.cpp b/contrib/llvm/tools/clang/lib/Checker/PlistDiagnostics.cpp index 5706a07..13accbb 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/PlistDiagnostics.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/PlistDiagnostics.cpp @@ -11,7 +11,7 @@ // //===----------------------------------------------------------------------===// -#include "clang/Frontend/PathDiagnosticClients.h" +#include "clang/Checker/PathDiagnosticClients.h" #include "clang/Checker/BugReporter/PathDiagnostic.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/FileManager.h" diff --git a/contrib/llvm/tools/clang/lib/Checker/RangeConstraintManager.cpp b/contrib/llvm/tools/clang/lib/Checker/RangeConstraintManager.cpp index c904c33..2a35d32 100644 --- a/contrib/llvm/tools/clang/lib/Checker/RangeConstraintManager.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/RangeConstraintManager.cpp @@ -105,97 +105,69 @@ public: return ranges.isSingleton() ? ranges.begin()->getConcreteValue() : 0; } - /// AddEQ - Create a new RangeSet with the additional constraint that the - /// value be equal to V. - RangeSet AddEQ(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) { - // Search for a range that includes 'V'. If so, return a new RangeSet - // representing { [V, V] }. - for (PrimRangeSet::iterator i = begin(), e = end(); i!=e; ++i) - if (i->Includes(V)) - return RangeSet(F, V, V); - - return RangeSet(F); - } - - /// AddNE - Create a new RangeSet with the additional constraint that the - /// value be not be equal to V. - RangeSet AddNE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) { - PrimRangeSet newRanges = ranges; - - // FIXME: We can perhaps enhance ImmutableSet to do this search for us - // in log(N) time using the sorted property of the internal AVL tree. - for (iterator i = begin(), e = end(); i != e; ++i) { - if (i->Includes(V)) { - // Remove the old range. - newRanges = F.Remove(newRanges, *i); - // Split the old range into possibly one or two ranges. - if (V != i->From()) - newRanges = F.Add(newRanges, Range(i->From(), BV.Sub1(V))); - if (V != i->To()) - newRanges = F.Add(newRanges, Range(BV.Add1(V), i->To())); - // All of the ranges are non-overlapping, so we can stop. +private: + void IntersectInRange(BasicValueFactory &BV, Factory &F, + const llvm::APSInt &Lower, + const llvm::APSInt &Upper, + PrimRangeSet &newRanges, + PrimRangeSet::iterator &i, + PrimRangeSet::iterator &e) const { + // There are six cases for each range R in the set: + // 1. R is entirely before the intersection range. + // 2. R is entirely after the intersection range. + // 3. R contains the entire intersection range. + // 4. R starts before the intersection range and ends in the middle. + // 5. R starts in the middle of the intersection range and ends after it. + // 6. R is entirely contained in the intersection range. + // These correspond to each of the conditions below. + for (/* i = begin(), e = end() */; i != e; ++i) { + if (i->To() < Lower) { + continue; + } + if (i->From() > Upper) { break; } - } - - return newRanges; - } - - /// AddNE - Create a new RangeSet with the additional constraint that the - /// value be less than V. - RangeSet AddLT(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) { - PrimRangeSet newRanges = F.GetEmptySet(); - - for (iterator i = begin(), e = end() ; i != e ; ++i) { - if (i->Includes(V) && i->From() < V) - newRanges = F.Add(newRanges, Range(i->From(), BV.Sub1(V))); - else if (i->To() < V) - newRanges = F.Add(newRanges, *i); - } - - return newRanges; - } - - RangeSet AddLE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) { - PrimRangeSet newRanges = F.GetEmptySet(); - for (iterator i = begin(), e = end(); i != e; ++i) { - // Strictly we should test for includes *V + 1, but no harm is - // done by this formulation - if (i->Includes(V)) - newRanges = F.Add(newRanges, Range(i->From(), V)); - else if (i->To() <= V) - newRanges = F.Add(newRanges, *i); - } - - return newRanges; - } - - RangeSet AddGT(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) { - PrimRangeSet newRanges = F.GetEmptySet(); - - for (PrimRangeSet::iterator i = begin(), e = end(); i != e; ++i) { - if (i->Includes(V) && i->To() > V) - newRanges = F.Add(newRanges, Range(BV.Add1(V), i->To())); - else if (i->From() > V) - newRanges = F.Add(newRanges, *i); + if (i->Includes(Lower)) { + if (i->Includes(Upper)) { + newRanges = F.Add(newRanges, Range(BV.getValue(Lower), + BV.getValue(Upper))); + break; + } else + newRanges = F.Add(newRanges, Range(BV.getValue(Lower), i->To())); + } else { + if (i->Includes(Upper)) { + newRanges = F.Add(newRanges, Range(i->From(), BV.getValue(Upper))); + break; + } else + newRanges = F.Add(newRanges, *i); + } } - - return newRanges; } - RangeSet AddGE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) { +public: + // Returns a set containing the values in the receiving set, intersected with + // the closed range [Lower, Upper]. Unlike the Range type, this range uses + // modular arithmetic, corresponding to the common treatment of C integer + // overflow. Thus, if the Lower bound is greater than the Upper bound, the + // range is taken to wrap around. This is equivalent to taking the + // intersection with the two ranges [Min, Upper] and [Lower, Max], + // or, alternatively, /removing/ all integers between Upper and Lower. + RangeSet Intersect(BasicValueFactory &BV, Factory &F, + const llvm::APSInt &Lower, + const llvm::APSInt &Upper) const { PrimRangeSet newRanges = F.GetEmptySet(); - for (PrimRangeSet::iterator i = begin(), e = end(); i != e; ++i) { - // Strictly we should test for includes *V - 1, but no harm is - // done by this formulation - if (i->Includes(V)) - newRanges = F.Add(newRanges, Range(V, i->To())); - else if (i->From() >= V) - newRanges = F.Add(newRanges, *i); + PrimRangeSet::iterator i = begin(), e = end(); + if (Lower <= Upper) + IntersectInRange(BV, F, Lower, Upper, newRanges, i, e); + else { + // The order of the next two statements is important! + // IntersectInRange() does not reset the iteration state for i and e. + // Therefore, the lower range most be handled first. + IntersectInRange(BV, F, BV.getMinValue(Upper), Upper, newRanges, i, e); + IntersectInRange(BV, F, Lower, BV.getMaxValue(Lower), newRanges, i, e); } - return newRanges; } @@ -237,23 +209,29 @@ public: RangeConstraintManager(GRSubEngine &subengine) : SimpleConstraintManager(subengine) {} - const GRState* AssumeSymNE(const GRState* St, SymbolRef sym, - const llvm::APSInt& V); + const GRState* AssumeSymNE(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment); - const GRState* AssumeSymEQ(const GRState* St, SymbolRef sym, - const llvm::APSInt& V); + const GRState* AssumeSymEQ(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment); - const GRState* AssumeSymLT(const GRState* St, SymbolRef sym, - const llvm::APSInt& V); + const GRState* AssumeSymLT(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment); - const GRState* AssumeSymGT(const GRState* St, SymbolRef sym, - const llvm::APSInt& V); + const GRState* AssumeSymGT(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment); - const GRState* AssumeSymGE(const GRState* St, SymbolRef sym, - const llvm::APSInt& V); + const GRState* AssumeSymGE(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment); - const GRState* AssumeSymLE(const GRState* St, SymbolRef sym, - const llvm::APSInt& V); + const GRState* AssumeSymLE(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment); const llvm::APSInt* getSymVal(const GRState* St, SymbolRef sym) const; @@ -303,10 +281,6 @@ RangeConstraintManager::RemoveDeadBindings(const GRState* state, return state->set<ConstraintRange>(CR); } -//===------------------------------------------------------------------------=== -// AssumeSymX methods: public interface for RangeConstraintManager. -//===------------------------------------------------------------------------===/ - RangeSet RangeConstraintManager::GetRange(const GRState *state, SymbolRef sym) { if (ConstraintRangeTy::data_type* V = state->get<ConstraintRange>(sym)) @@ -323,20 +297,127 @@ RangeConstraintManager::GetRange(const GRState *state, SymbolRef sym) { // AssumeSymX methods: public interface for RangeConstraintManager. //===------------------------------------------------------------------------===/ -#define AssumeX(OP)\ -const GRState*\ -RangeConstraintManager::AssumeSym ## OP(const GRState* state, SymbolRef sym,\ - const llvm::APSInt& V){\ - const RangeSet& R = GetRange(state, sym).Add##OP(state->getBasicVals(), F, V);\ - return !R.isEmpty() ? state->set<ConstraintRange>(sym, R) : NULL;\ +// The syntax for ranges below is mathematical, using [x, y] for closed ranges +// and (x, y) for open ranges. These ranges are modular, corresponding with +// a common treatment of C integer overflow. This means that these methods +// do not have to worry about overflow; RangeSet::Intersect can handle such a +// "wraparound" range. +// As an example, the range [UINT_MAX-1, 3) contains five values: UINT_MAX-1, +// UINT_MAX, 0, 1, and 2. + +const GRState* +RangeConstraintManager::AssumeSymNE(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment) { + BasicValueFactory &BV = state->getBasicVals(); + + llvm::APSInt Lower = Int-Adjustment; + llvm::APSInt Upper = Lower; + --Lower; + ++Upper; + + // [Int-Adjustment+1, Int-Adjustment-1] + // Notice that the lower bound is greater than the upper bound. + RangeSet New = GetRange(state, sym).Intersect(BV, F, Upper, Lower); + return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New); } -AssumeX(EQ) -AssumeX(NE) -AssumeX(LT) -AssumeX(GT) -AssumeX(LE) -AssumeX(GE) +const GRState* +RangeConstraintManager::AssumeSymEQ(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment) { + // [Int-Adjustment, Int-Adjustment] + BasicValueFactory &BV = state->getBasicVals(); + llvm::APSInt AdjInt = Int-Adjustment; + RangeSet New = GetRange(state, sym).Intersect(BV, F, AdjInt, AdjInt); + return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New); +} + +const GRState* +RangeConstraintManager::AssumeSymLT(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment) { + BasicValueFactory &BV = state->getBasicVals(); + + QualType T = state->getSymbolManager().getType(sym); + const llvm::APSInt &Min = BV.getMinValue(T); + + // Special case for Int == Min. This is always false. + if (Int == Min) + return NULL; + + llvm::APSInt Lower = Min-Adjustment; + llvm::APSInt Upper = Int-Adjustment; + --Upper; + + RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper); + return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New); +} + +const GRState* +RangeConstraintManager::AssumeSymGT(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment) { + BasicValueFactory &BV = state->getBasicVals(); + + QualType T = state->getSymbolManager().getType(sym); + const llvm::APSInt &Max = BV.getMaxValue(T); + + // Special case for Int == Max. This is always false. + if (Int == Max) + return NULL; + + llvm::APSInt Lower = Int-Adjustment; + llvm::APSInt Upper = Max-Adjustment; + ++Lower; + + RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper); + return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New); +} + +const GRState* +RangeConstraintManager::AssumeSymGE(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment) { + BasicValueFactory &BV = state->getBasicVals(); + + QualType T = state->getSymbolManager().getType(sym); + const llvm::APSInt &Min = BV.getMinValue(T); + + // Special case for Int == Min. This is always feasible. + if (Int == Min) + return state; + + const llvm::APSInt &Max = BV.getMaxValue(T); + + llvm::APSInt Lower = Int-Adjustment; + llvm::APSInt Upper = Max-Adjustment; + + RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper); + return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New); +} + +const GRState* +RangeConstraintManager::AssumeSymLE(const GRState* state, SymbolRef sym, + const llvm::APSInt& Int, + const llvm::APSInt& Adjustment) { + BasicValueFactory &BV = state->getBasicVals(); + + QualType T = state->getSymbolManager().getType(sym); + const llvm::APSInt &Max = BV.getMaxValue(T); + + // Special case for Int == Max. This is always feasible. + if (Int == Max) + return state; + + const llvm::APSInt &Min = BV.getMinValue(T); + + llvm::APSInt Lower = Min-Adjustment; + llvm::APSInt Upper = Int-Adjustment; + + RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper); + return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New); +} //===------------------------------------------------------------------------=== // Pretty-printing. diff --git a/contrib/llvm/tools/clang/lib/Checker/RegionStore.cpp b/contrib/llvm/tools/clang/lib/Checker/RegionStore.cpp index c4072fd..74a7fee 100644 --- a/contrib/llvm/tools/clang/lib/Checker/RegionStore.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/RegionStore.cpp @@ -118,22 +118,6 @@ public: } //===----------------------------------------------------------------------===// -// Region "Extents" -//===----------------------------------------------------------------------===// -// -// MemRegions represent chunks of memory with a size (their "extent"). This -// GDM entry tracks the extents for regions. Extents are in bytes. -// -namespace { class RegionExtents {}; } -static int RegionExtentsIndex = 0; -namespace clang { - template<> struct GRStateTrait<RegionExtents> - : public GRStatePartialTrait<llvm::ImmutableMap<const MemRegion*, SVal> > { - static void* GDMIndex() { return &RegionExtentsIndex; } - }; -} - -//===----------------------------------------------------------------------===// // Utility functions. //===----------------------------------------------------------------------===// @@ -244,14 +228,16 @@ public: Store InvalidateRegion(Store store, const MemRegion *R, const Expr *E, unsigned Count, InvalidatedSymbols *IS) { - return RegionStoreManager::InvalidateRegions(store, &R, &R+1, E, Count, IS); + return RegionStoreManager::InvalidateRegions(store, &R, &R+1, E, Count, IS, + false); } Store InvalidateRegions(Store store, const MemRegion * const *Begin, const MemRegion * const *End, const Expr *E, unsigned Count, - InvalidatedSymbols *IS); + InvalidatedSymbols *IS, + bool invalidateGlobals); public: // Made public for helper classes. @@ -280,6 +266,14 @@ public: // Part of public interface to class. Store Bind(Store store, Loc LV, SVal V); + // BindDefault is only used to initialize a region with a default value. + Store BindDefault(Store store, const MemRegion *R, SVal V) { + RegionBindings B = GetRegionBindings(store); + assert(!Lookup(B, R, BindingKey::Default)); + assert(!Lookup(B, R, BindingKey::Direct)); + return Add(B, R, BindingKey::Default, V).getRoot(); + } + Store BindCompoundLiteral(Store store, const CompoundLiteralExpr* CL, const LocationContext *LC, SVal V); @@ -339,6 +333,12 @@ public: // Part of public interface to class. SVal RetrieveArray(Store store, const TypedRegion* R); + /// Used to lazily generate derived symbols for bindings that are defined + /// implicitly by default bindings in a super region. + Optional<SVal> RetrieveDerivedDefaultValue(RegionBindings B, + const MemRegion *superR, + const TypedRegion *R, QualType Ty); + /// Get the state and region whose binding this region R corresponds to. std::pair<Store, const MemRegion*> GetLazyBinding(RegionBindings B, const MemRegion *R); @@ -352,7 +352,7 @@ public: // Part of public interface to class. /// RemoveDeadBindings - Scans the RegionStore of 'state' for dead values. /// It returns a new Store with these values removed. - const GRState *RemoveDeadBindings(GRState &state, Stmt* Loc, + const GRState *RemoveDeadBindings(GRState &state, const StackFrameContext *LCtx, SymbolReaper& SymReaper, llvm::SmallVectorImpl<const MemRegion*>& RegionRoots); @@ -364,18 +364,7 @@ public: // Part of public interface to class. // Region "extents". //===------------------------------------------------------------------===// - const GRState *setExtent(const GRState *state,const MemRegion* R,SVal Extent){ - return state->set<RegionExtents>(R, Extent); - } - - Optional<SVal> getExtent(const GRState *state, const MemRegion *R) { - const SVal *V = state->get<RegionExtents>(R); - if (V) - return *V; - else - return Optional<SVal>(); - } - + // FIXME: This method will soon be eliminated; see the note in Store.h. DefinedOrUnknownSVal getSizeInElements(const GRState *state, const MemRegion* R, QualType EleTy); @@ -391,12 +380,17 @@ public: // Part of public interface to class. const char *sep); void iterBindings(Store store, BindingsHandler& f) { - // FIXME: Implement. - } - - // FIXME: Remove. - BasicValueFactory& getBasicVals() { - return StateMgr.getBasicVals(); + RegionBindings B = GetRegionBindings(store); + for (RegionBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) { + const BindingKey &K = I.getKey(); + if (!K.isDirect()) + continue; + if (const SubRegion *R = dyn_cast<SubRegion>(I.getKey().getRegion())) { + // FIXME: Possibly incorporate the offset? + if (!f.HandleBinding(*this, store, R, I.getData())) + return; + } + } } // FIXME: Remove. @@ -483,12 +477,13 @@ public: RegionBindings getRegionBindings() const { return B; } - void AddToCluster(BindingKey K) { + RegionCluster &AddToCluster(BindingKey K) { const MemRegion *R = K.getRegion(); const MemRegion *baseR = R->getBaseRegion(); RegionCluster &C = getCluster(baseR); C.push_back(K, BVC); static_cast<DERIVED*>(this)->VisitAddedToCluster(baseR, C); + return C; } bool isVisited(const MemRegion *R) { @@ -504,15 +499,20 @@ public: return *CRef; } - void GenerateClusters() { + void GenerateClusters(bool includeGlobals = false) { // Scan the entire set of bindings and make the region clusters. for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){ - AddToCluster(RI.getKey()); + RegionCluster &C = AddToCluster(RI.getKey()); if (const MemRegion *R = RI.getData().getAsRegion()) { // Generate a cluster, but don't add the region to the cluster // if there aren't any bindings. getCluster(R->getBaseRegion()); } + if (includeGlobals) { + const MemRegion *R = RI.getKey().getRegion(); + if (isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace())) + AddToWorkList(R, C); + } } } @@ -615,8 +615,8 @@ void InvalidateRegionsWorker::VisitBinding(SVal V) { RegionBindings B = RegionStoreManager::GetRegionBindings(LCS->getStore()); for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){ - const MemRegion *baseR = RI.getKey().getRegion(); - if (cast<SubRegion>(baseR)->isSubRegionOf(LazyR)) + const SubRegion *baseR = dyn_cast<SubRegion>(RI.getKey().getRegion()); + if (baseR && baseR->isSubRegionOf(LazyR)) VisitBinding(RI.getData()); } @@ -706,13 +706,14 @@ Store RegionStoreManager::InvalidateRegions(Store store, const MemRegion * const *I, const MemRegion * const *E, const Expr *Ex, unsigned Count, - InvalidatedSymbols *IS) { + InvalidatedSymbols *IS, + bool invalidateGlobals) { InvalidateRegionsWorker W(*this, StateMgr, RegionStoreManager::GetRegionBindings(store), Ex, Count, IS); // Scan the bindings and generate the clusters. - W.GenerateClusters(); + W.GenerateClusters(invalidateGlobals); // Add I .. E to the worklist. for ( ; I != E; ++I) @@ -721,7 +722,20 @@ Store RegionStoreManager::InvalidateRegions(Store store, W.RunWorkList(); // Return the new bindings. - return W.getRegionBindings().getRoot(); + RegionBindings B = W.getRegionBindings(); + + if (invalidateGlobals) { + // Bind the non-static globals memory space to a new symbol that we will + // use to derive the bindings for all non-static globals. + const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion(); + SVal V = + ValMgr.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, Ex, + /* symbol type, doesn't matter */ Ctx.IntTy, + Count); + B = Add(B, BindingKey::Make(GS, BindingKey::Default), V); + } + + return B.getRoot(); } //===----------------------------------------------------------------------===// @@ -731,82 +745,19 @@ Store RegionStoreManager::InvalidateRegions(Store store, DefinedOrUnknownSVal RegionStoreManager::getSizeInElements(const GRState *state, const MemRegion *R, QualType EleTy) { + SVal Size = cast<SubRegion>(R)->getExtent(ValMgr); + SValuator &SVator = ValMgr.getSValuator(); + const llvm::APSInt *SizeInt = SVator.getKnownValue(state, Size); + if (!SizeInt) + return UnknownVal(); - switch (R->getKind()) { - case MemRegion::CXXThisRegionKind: - assert(0 && "Cannot get size of 'this' region"); - case MemRegion::GenericMemSpaceRegionKind: - case MemRegion::StackLocalsSpaceRegionKind: - case MemRegion::StackArgumentsSpaceRegionKind: - case MemRegion::HeapSpaceRegionKind: - case MemRegion::GlobalsSpaceRegionKind: - case MemRegion::UnknownSpaceRegionKind: - assert(0 && "Cannot index into a MemSpace"); - return UnknownVal(); - - case MemRegion::FunctionTextRegionKind: - case MemRegion::BlockTextRegionKind: - case MemRegion::BlockDataRegionKind: - // Technically this can happen if people do funny things with casts. - return UnknownVal(); - - // Not yet handled. - case MemRegion::AllocaRegionKind: - case MemRegion::CompoundLiteralRegionKind: - case MemRegion::ElementRegionKind: - case MemRegion::FieldRegionKind: - case MemRegion::ObjCIvarRegionKind: - case MemRegion::CXXObjectRegionKind: - return UnknownVal(); - - case MemRegion::SymbolicRegionKind: { - const SVal *Size = state->get<RegionExtents>(R); - if (!Size) - return UnknownVal(); - const nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(Size); - if (!CI) - return UnknownVal(); - - CharUnits RegionSize = - CharUnits::fromQuantity(CI->getValue().getSExtValue()); - CharUnits EleSize = getContext().getTypeSizeInChars(EleTy); - assert(RegionSize % EleSize == 0); - - return ValMgr.makeIntVal(RegionSize / EleSize, false); - } - - case MemRegion::StringRegionKind: { - const StringLiteral* Str = cast<StringRegion>(R)->getStringLiteral(); - // We intentionally made the size value signed because it participates in - // operations with signed indices. - return ValMgr.makeIntVal(Str->getByteLength()+1, false); - } - - case MemRegion::VarRegionKind: { - const VarRegion* VR = cast<VarRegion>(R); - // Get the type of the variable. - QualType T = VR->getDesugaredValueType(getContext()); - - // FIXME: Handle variable-length arrays. - if (isa<VariableArrayType>(T)) - return UnknownVal(); - - if (const ConstantArrayType* CAT = dyn_cast<ConstantArrayType>(T)) { - // return the size as signed integer. - return ValMgr.makeIntVal(CAT->getSize(), false); - } - - // Clients can reinterpret ordinary variables as arrays, possibly of - // another type. The width is rounded down to ensure that an access is - // entirely within bounds. - CharUnits VarSize = getContext().getTypeSizeInChars(T); - CharUnits EleSize = getContext().getTypeSizeInChars(EleTy); - return ValMgr.makeIntVal(VarSize / EleSize, false); - } - } + CharUnits RegionSize = CharUnits::fromQuantity(SizeInt->getSExtValue()); + CharUnits EleSize = getContext().getTypeSizeInChars(EleTy); - assert(0 && "Unreachable"); - return UnknownVal(); + // If a variable is reinterpreted as a type that doesn't fit into a larger + // type evenly, round it down. + // This is a signed value, since it's used in arithmetic with signed indices. + return ValMgr.makeIntVal(RegionSize / EleSize, false); } //===----------------------------------------------------------------------===// @@ -849,6 +800,19 @@ SVal RegionStoreManager::EvalBinOp(BinaryOperator::Opcode Op, Loc L, NonLoc R, if (!isa<loc::MemRegionVal>(L)) return UnknownVal(); + // Special case for zero RHS. + if (R.isZeroConstant()) { + switch (Op) { + default: + // Handle it normally. + break; + case BinaryOperator::Add: + case BinaryOperator::Sub: + // FIXME: does this need to be casted to match resultTy? + return L; + } + } + const MemRegion* MR = cast<loc::MemRegionVal>(L).getRegion(); const ElementRegion *ER = 0; @@ -870,8 +834,7 @@ SVal RegionStoreManager::EvalBinOp(BinaryOperator::Opcode Op, Loc L, NonLoc R, } case MemRegion::AllocaRegionKind: { const AllocaRegion *AR = cast<AllocaRegion>(MR); - QualType T = getContext().CharTy; // Create an ElementRegion of bytes. - QualType EleTy = T->getAs<PointerType>()->getPointeeType(); + QualType EleTy = getContext().CharTy; // Create an ElementRegion of bytes. SVal ZeroIdx = ValMgr.makeZeroArrayIndex(); ER = MRMgr.getElementRegion(EleTy, ZeroIdx, AR, getContext()); break; @@ -907,7 +870,8 @@ SVal RegionStoreManager::EvalBinOp(BinaryOperator::Opcode Op, Loc L, NonLoc R, case MemRegion::StackLocalsSpaceRegionKind: case MemRegion::StackArgumentsSpaceRegionKind: case MemRegion::HeapSpaceRegionKind: - case MemRegion::GlobalsSpaceRegionKind: + case MemRegion::NonStaticGlobalSpaceRegionKind: + case MemRegion::StaticGlobalSpaceRegionKind: case MemRegion::UnknownSpaceRegionKind: assert(0 && "Cannot perform pointer arithmetic on a MemSpace"); return UnknownVal(); @@ -946,7 +910,8 @@ SVal RegionStoreManager::EvalBinOp(BinaryOperator::Opcode Op, Loc L, NonLoc R, //===----------------------------------------------------------------------===// Optional<SVal> RegionStoreManager::getDirectBinding(RegionBindings B, - const MemRegion *R) { + const MemRegion *R) { + if (const SVal *V = Lookup(B, R, BindingKey::Direct)) return *V; @@ -1009,8 +974,13 @@ SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) { const MemRegion *MR = cast<loc::MemRegionVal>(L).getRegion(); - if (isa<AllocaRegion>(MR) || isa<SymbolicRegion>(MR)) + if (isa<AllocaRegion>(MR) || isa<SymbolicRegion>(MR)) { + if (T.isNull()) { + const SymbolicRegion *SR = cast<SymbolicRegion>(MR); + T = SR->getSymbol()->getType(getContext()); + } MR = GetElementZeroRegion(MR, T); + } if (isa<CodeTextRegion>(MR)) { assert(0 && "Why load from a code text region?"); @@ -1172,27 +1142,33 @@ SVal RegionStoreManager::RetrieveElement(Store store, } } - // Check if the immediate super region has a direct binding. - if (const Optional<SVal> &V = getDirectBinding(B, superR)) { - if (SymbolRef parentSym = V->getAsSymbol()) - return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R); - - if (V->isUnknownOrUndef()) - return *V; - - // Handle LazyCompoundVals for the immediate super region. Other cases - // are handled in 'RetrieveFieldOrElementCommon'. - if (const nonloc::LazyCompoundVal *LCV = - dyn_cast<nonloc::LazyCompoundVal>(V)) { - - R = MRMgr.getElementRegionWithSuper(R, LCV->getRegion()); - return RetrieveElement(LCV->getStore(), R); + // Handle the case where we are indexing into a larger scalar object. + // For example, this handles: + // int x = ... + // char *y = &x; + // return *y; + // FIXME: This is a hack, and doesn't do anything really intelligent yet. + const RegionRawOffset &O = R->getAsRawOffset(); + if (const TypedRegion *baseR = dyn_cast_or_null<TypedRegion>(O.getRegion())) { + QualType baseT = baseR->getValueType(Ctx); + if (baseT->isScalarType()) { + QualType elemT = R->getElementType(); + if (elemT->isScalarType()) { + if (Ctx.getTypeSizeInChars(baseT) >= Ctx.getTypeSizeInChars(elemT)) { + if (const Optional<SVal> &V = getDirectBinding(B, superR)) { + if (SymbolRef parentSym = V->getAsSymbol()) + return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R); + + if (V->isUnknownOrUndef()) + return *V; + // Other cases: give up. We are indexing into a larger object + // that has some value, but we don't know how to handle that yet. + return UnknownVal(); + } + } + } } - - // Other cases: give up. - return UnknownVal(); } - return RetrieveFieldOrElementCommon(store, R, R->getElementType(), superR); } @@ -1208,6 +1184,28 @@ SVal RegionStoreManager::RetrieveField(Store store, return RetrieveFieldOrElementCommon(store, R, Ty, R->getSuperRegion()); } +Optional<SVal> +RegionStoreManager::RetrieveDerivedDefaultValue(RegionBindings B, + const MemRegion *superR, + const TypedRegion *R, + QualType Ty) { + + if (const Optional<SVal> &D = getDefaultBinding(B, superR)) { + if (SymbolRef parentSym = D->getAsSymbol()) + return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R); + + if (D->isZeroConstant()) + return ValMgr.makeZeroVal(Ty); + + if (D->isUnknownOrUndef()) + return *D; + + assert(0 && "Unknown default value"); + } + + return Optional<SVal>(); +} + SVal RegionStoreManager::RetrieveFieldOrElementCommon(Store store, const TypedRegion *R, QualType Ty, @@ -1219,18 +1217,8 @@ SVal RegionStoreManager::RetrieveFieldOrElementCommon(Store store, RegionBindings B = GetRegionBindings(store); while (superR) { - if (const Optional<SVal> &D = getDefaultBinding(B, superR)) { - if (SymbolRef parentSym = D->getAsSymbol()) - return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R); - - if (D->isZeroConstant()) - return ValMgr.makeZeroVal(Ty); - - if (D->isUnknown()) - return *D; - - assert(0 && "Unknown default value"); - } + if (const Optional<SVal> &D = RetrieveDerivedDefaultValue(B, superR, R, Ty)) + return *D; // If our super region is a field or element itself, walk up the region // hierarchy to see if there is a default value installed in an ancestor. @@ -1311,7 +1299,7 @@ SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) { return ValMgr.getRegionValueSymbolVal(R); if (isa<GlobalsSpaceRegion>(MS)) { - if (VD->isFileVarDecl()) { + if (isa<NonStaticGlobalSpaceRegion>(MS)) { // Is 'VD' declared constant? If so, retrieve the constant value. QualType CT = Ctx.getCanonicalType(T); if (CT.isConstQualified()) { @@ -1326,6 +1314,9 @@ SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) { } } + if (const Optional<SVal> &V = RetrieveDerivedDefaultValue(B, MS, R, CT)) + return V.getValue(); + return ValMgr.getRegionValueSymbolVal(R); } @@ -1449,6 +1440,7 @@ Store RegionStoreManager::BindCompoundLiteral(Store store, V); } + Store RegionStoreManager::setImplicitDefaultValue(Store store, const MemRegion *R, QualType T) { @@ -1691,15 +1683,14 @@ class RemoveDeadBindingsWorker : public ClusterAnalysis<RemoveDeadBindingsWorker> { llvm::SmallVector<const SymbolicRegion*, 12> Postponed; SymbolReaper &SymReaper; - Stmt *Loc; const StackFrameContext *CurrentLCtx; - + public: RemoveDeadBindingsWorker(RegionStoreManager &rm, GRStateManager &stateMgr, RegionBindings b, SymbolReaper &symReaper, - Stmt *loc, const StackFrameContext *LCtx) + const StackFrameContext *LCtx) : ClusterAnalysis<RemoveDeadBindingsWorker>(rm, stateMgr, b), - SymReaper(symReaper), Loc(loc), CurrentLCtx(LCtx) {} + SymReaper(symReaper), CurrentLCtx(LCtx) {} // Called by ClusterAnalysis. void VisitAddedToCluster(const MemRegion *baseR, RegionCluster &C); @@ -1715,7 +1706,7 @@ void RemoveDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR, RegionCluster &C) { if (const VarRegion *VR = dyn_cast<VarRegion>(baseR)) { - if (SymReaper.isLive(Loc, VR)) + if (SymReaper.isLive(VR)) AddToWorkList(baseR, C); return; @@ -1730,9 +1721,14 @@ void RemoveDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR, return; } + if (isa<NonStaticGlobalSpaceRegion>(baseR)) { + AddToWorkList(baseR, C); + return; + } + // CXXThisRegion in the current or parent location context is live. if (const CXXThisRegion *TR = dyn_cast<CXXThisRegion>(baseR)) { - const StackArgumentsSpaceRegion *StackReg = + const StackArgumentsSpaceRegion *StackReg = cast<StackArgumentsSpaceRegion>(TR->getSuperRegion()); const StackFrameContext *RegCtx = StackReg->getStackFrame(); if (RegCtx == CurrentLCtx || RegCtx->isParentOf(CurrentLCtx)) @@ -1754,8 +1750,8 @@ void RemoveDeadBindingsWorker::VisitBinding(SVal V) { const MemRegion *LazyR = LCS->getRegion(); RegionBindings B = RegionStoreManager::GetRegionBindings(LCS->getStore()); for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){ - const MemRegion *baseR = RI.getKey().getRegion(); - if (cast<SubRegion>(baseR)->isSubRegionOf(LazyR)) + const SubRegion *baseR = dyn_cast<SubRegion>(RI.getKey().getRegion()); + if (baseR && baseR->isSubRegionOf(LazyR)) VisitBinding(RI.getData()); } return; @@ -1822,13 +1818,13 @@ bool RemoveDeadBindingsWorker::UpdatePostponed() { return changed; } -const GRState *RegionStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc, +const GRState *RegionStoreManager::RemoveDeadBindings(GRState &state, const StackFrameContext *LCtx, SymbolReaper& SymReaper, llvm::SmallVectorImpl<const MemRegion*>& RegionRoots) { RegionBindings B = GetRegionBindings(state.getStore()); - RemoveDeadBindingsWorker W(*this, StateMgr, B, SymReaper, Loc, LCtx); + RemoveDeadBindingsWorker W(*this, StateMgr, B, SymReaper, LCtx); W.GenerateClusters(); // Enqueue the region roots onto the worklist. @@ -1862,13 +1858,6 @@ const GRState *RegionStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc, } state.setStore(B.getRoot()); const GRState *s = StateMgr.getPersistentState(state); - // Remove the extents of dead symbolic regions. - llvm::ImmutableMap<const MemRegion*,SVal> Extents = s->get<RegionExtents>(); - for (llvm::ImmutableMap<const MemRegion *, SVal>::iterator I=Extents.begin(), - E = Extents.end(); I != E; ++I) { - if (!W.isVisited(I->first)) - s = s->remove<RegionExtents>(I->first); - } return s; } @@ -1887,9 +1876,9 @@ GRState const *RegionStoreManager::EnterStackFrame(GRState const *state, SVal ArgVal = state->getSVal(*AI); store = Bind(store, ValMgr.makeLoc(MRMgr.getVarRegion(*PI,frame)),ArgVal); } - } else if (const CXXConstructExpr *CE = + } else if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(frame->getCallSite())) { - CXXConstructExpr::const_arg_iterator AI = CE->arg_begin(), + CXXConstructExpr::const_arg_iterator AI = CE->arg_begin(), AE = CE->arg_end(); // Copy the arg expression value to the arg variables. diff --git a/contrib/llvm/tools/clang/lib/Checker/ReturnStackAddressChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/ReturnStackAddressChecker.cpp deleted file mode 100644 index 35b1cde..0000000 --- a/contrib/llvm/tools/clang/lib/Checker/ReturnStackAddressChecker.cpp +++ /dev/null @@ -1,125 +0,0 @@ -//== ReturnStackAddressChecker.cpp ------------------------------*- C++ -*--==// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines ReturnStackAddressChecker, which is a path-sensitive -// check which looks for the addresses of stack variables being returned to -// callers. -// -//===----------------------------------------------------------------------===// - -#include "GRExprEngineInternalChecks.h" -#include "clang/Checker/BugReporter/BugType.h" -#include "clang/Checker/PathSensitive/GRExprEngine.h" -#include "clang/Checker/PathSensitive/CheckerVisitor.h" -#include "clang/Basic/SourceManager.h" -#include "llvm/ADT/SmallString.h" - -using namespace clang; - -namespace { -class ReturnStackAddressChecker : - public CheckerVisitor<ReturnStackAddressChecker> { - BuiltinBug *BT; -public: - ReturnStackAddressChecker() : BT(0) {} - static void *getTag(); - void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *RS); -private: - void EmitStackError(CheckerContext &C, const MemRegion *R, const Expr *RetE); -}; -} - -void clang::RegisterReturnStackAddressChecker(GRExprEngine &Eng) { - Eng.registerCheck(new ReturnStackAddressChecker()); -} - -void *ReturnStackAddressChecker::getTag() { - static int x = 0; return &x; -} - -void ReturnStackAddressChecker::EmitStackError(CheckerContext &C, - const MemRegion *R, - const Expr *RetE) { - ExplodedNode *N = C.GenerateSink(); - - if (!N) - return; - - if (!BT) - BT = new BuiltinBug("Return of address to stack-allocated memory"); - - // Generate a report for this bug. - llvm::SmallString<512> buf; - llvm::raw_svector_ostream os(buf); - SourceRange range; - - // Get the base region, stripping away fields and elements. - R = R->getBaseRegion(); - - // Check if the region is a compound literal. - if (const CompoundLiteralRegion* CR = dyn_cast<CompoundLiteralRegion>(R)) { - const CompoundLiteralExpr* CL = CR->getLiteralExpr(); - os << "Address of stack memory associated with a compound literal " - "declared on line " - << C.getSourceManager().getInstantiationLineNumber(CL->getLocStart()) - << " returned to caller"; - range = CL->getSourceRange(); - } - else if (const AllocaRegion* AR = dyn_cast<AllocaRegion>(R)) { - const Expr* ARE = AR->getExpr(); - SourceLocation L = ARE->getLocStart(); - range = ARE->getSourceRange(); - os << "Address of stack memory allocated by call to alloca() on line " - << C.getSourceManager().getInstantiationLineNumber(L) - << " returned to caller"; - } - else if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) { - const BlockDecl *BD = BR->getCodeRegion()->getDecl(); - SourceLocation L = BD->getLocStart(); - range = BD->getSourceRange(); - os << "Address of stack-allocated block declared on line " - << C.getSourceManager().getInstantiationLineNumber(L) - << " returned to caller"; - } - else if (const VarRegion *VR = dyn_cast<VarRegion>(R)) { - os << "Address of stack memory associated with local variable '" - << VR->getString() << "' returned"; - range = VR->getDecl()->getSourceRange(); - } - else { - assert(false && "Invalid region in ReturnStackAddressChecker."); - return; - } - - RangedBugReport *report = new RangedBugReport(*BT, os.str(), N); - report->addRange(RetE->getSourceRange()); - if (range.isValid()) - report->addRange(range); - - C.EmitReport(report); -} - -void ReturnStackAddressChecker::PreVisitReturnStmt(CheckerContext &C, - const ReturnStmt *RS) { - - const Expr *RetE = RS->getRetValue(); - if (!RetE) - return; - - SVal V = C.getState()->getSVal(RetE); - const MemRegion *R = V.getAsRegion(); - - if (!R || !R->hasStackStorage()) - return; - - if (R->hasStackStorage()) { - EmitStackError(C, R, RetE); - return; - } -} diff --git a/contrib/llvm/tools/clang/lib/Checker/SVals.cpp b/contrib/llvm/tools/clang/lib/Checker/SVals.cpp index d756be7..7a99e86 100644 --- a/contrib/llvm/tools/clang/lib/Checker/SVals.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/SVals.cpp @@ -200,15 +200,19 @@ bool SVal::isConstant() const { return isa<nonloc::ConcreteInt>(this) || isa<loc::ConcreteInt>(this); } -bool SVal::isZeroConstant() const { +bool SVal::isConstant(int I) const { if (isa<loc::ConcreteInt>(*this)) - return cast<loc::ConcreteInt>(*this).getValue() == 0; + return cast<loc::ConcreteInt>(*this).getValue() == I; else if (isa<nonloc::ConcreteInt>(*this)) - return cast<nonloc::ConcreteInt>(*this).getValue() == 0; + return cast<nonloc::ConcreteInt>(*this).getValue() == I; else return false; } +bool SVal::isZeroConstant() const { + return isConstant(0); +} + //===----------------------------------------------------------------------===// // Transfer function dispatch for Non-Locs. diff --git a/contrib/llvm/tools/clang/lib/Checker/SValuator.cpp b/contrib/llvm/tools/clang/lib/Checker/SValuator.cpp index 542fc1b..a7e15fc 100644 --- a/contrib/llvm/tools/clang/lib/Checker/SValuator.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/SValuator.cpp @@ -29,15 +29,15 @@ SVal SValuator::EvalBinOp(const GRState *ST, BinaryOperator::Opcode Op, if (isa<Loc>(L)) { if (isa<Loc>(R)) - return EvalBinOpLL(Op, cast<Loc>(L), cast<Loc>(R), T); + return EvalBinOpLL(ST, Op, cast<Loc>(L), cast<Loc>(R), T); return EvalBinOpLN(ST, Op, cast<Loc>(L), cast<NonLoc>(R), T); } if (isa<Loc>(R)) { - // Support pointer arithmetic where the increment/decrement operand - // is on the left and the pointer on the right. - assert(Op == BinaryOperator::Add || Op == BinaryOperator::Sub); + // Support pointer arithmetic where the addend is on the left + // and the pointer on the right. + assert(Op == BinaryOperator::Add); // Commute the operands. return EvalBinOpLN(ST, Op, cast<Loc>(R), cast<NonLoc>(L), T); diff --git a/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.cpp b/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.cpp index 8c423a9..321381b 100644 --- a/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.cpp @@ -35,12 +35,11 @@ bool SimpleConstraintManager::canReasonAbout(SVal X) const { case BinaryOperator::Or: case BinaryOperator::Xor: return false; - // We don't reason yet about arithmetic constraints on symbolic values. + // We don't reason yet about these arithmetic constraints on + // symbolic values. case BinaryOperator::Mul: case BinaryOperator::Div: case BinaryOperator::Rem: - case BinaryOperator::Add: - case BinaryOperator::Sub: case BinaryOperator::Shl: case BinaryOperator::Shr: return false; @@ -90,12 +89,11 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state, while (SubR) { // FIXME: now we only find the first symbolic region. if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SubR)) { + const llvm::APSInt &zero = BasicVals.getZeroWithPtrWidth(); if (Assumption) - return AssumeSymNE(state, SymR->getSymbol(), - BasicVals.getZeroWithPtrWidth()); + return AssumeSymNE(state, SymR->getSymbol(), zero, zero); else - return AssumeSymEQ(state, SymR->getSymbol(), - BasicVals.getZeroWithPtrWidth()); + return AssumeSymEQ(state, SymR->getSymbol(), zero, zero); } SubR = dyn_cast<SubRegion>(SubR->getSuperRegion()); } @@ -121,11 +119,27 @@ const GRState *SimpleConstraintManager::Assume(const GRState *state, return SU.ProcessAssume(state, cond, assumption); } +static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) { + // FIXME: This should probably be part of BinaryOperator, since this isn't + // the only place it's used. (This code was copied from SimpleSValuator.cpp.) + switch (op) { + default: + assert(false && "Invalid opcode."); + case BinaryOperator::LT: return BinaryOperator::GE; + case BinaryOperator::GT: return BinaryOperator::LE; + case BinaryOperator::LE: return BinaryOperator::GT; + case BinaryOperator::GE: return BinaryOperator::LT; + case BinaryOperator::EQ: return BinaryOperator::NE; + case BinaryOperator::NE: return BinaryOperator::EQ; + } +} + const GRState *SimpleConstraintManager::AssumeAux(const GRState *state, NonLoc Cond, bool Assumption) { - // We cannot reason about SymIntExpr and SymSymExpr. + // We cannot reason about SymSymExprs, + // and can only reason about some SymIntExprs. if (!canReasonAbout(Cond)) { // Just return the current state indicating that the path is feasible. // This may be an over-approximation of what is possible. @@ -144,30 +158,35 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state, SymbolRef sym = SV.getSymbol(); QualType T = SymMgr.getType(sym); const llvm::APSInt &zero = BasicVals.getValue(0, T); - - return Assumption ? AssumeSymNE(state, sym, zero) - : AssumeSymEQ(state, sym, zero); + if (Assumption) + return AssumeSymNE(state, sym, zero, zero); + else + return AssumeSymEQ(state, sym, zero, zero); } case nonloc::SymExprValKind: { nonloc::SymExprVal V = cast<nonloc::SymExprVal>(Cond); - if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(V.getSymbolicExpression())){ - // FIXME: This is a hack. It silently converts the RHS integer to be - // of the same type as on the left side. This should be removed once - // we support truncation/extension of symbolic values. - GRStateManager &StateMgr = state->getStateManager(); - ASTContext &Ctx = StateMgr.getContext(); - QualType LHSType = SE->getLHS()->getType(Ctx); - BasicValueFactory &BasicVals = StateMgr.getBasicVals(); - const llvm::APSInt &RHS = BasicVals.Convert(LHSType, SE->getRHS()); - SymIntExpr SENew(SE->getLHS(), SE->getOpcode(), RHS, SE->getType(Ctx)); - - return AssumeSymInt(state, Assumption, &SENew); + + // For now, we only handle expressions whose RHS is an integer. + // All other expressions are assumed to be feasible. + const SymIntExpr *SE = dyn_cast<SymIntExpr>(V.getSymbolicExpression()); + if (!SE) + return state; + + BinaryOperator::Opcode op = SE->getOpcode(); + // Implicitly compare non-comparison expressions to 0. + if (!BinaryOperator::isComparisonOp(op)) { + QualType T = SymMgr.getType(SE); + const llvm::APSInt &zero = BasicVals.getValue(0, T); + op = (Assumption ? BinaryOperator::NE : BinaryOperator::EQ); + return AssumeSymRel(state, SE, op, zero); } - // For all other symbolic expressions, over-approximate and consider - // the constraint feasible. - return state; + // From here on out, op is the real comparison we'll be testing. + if (!Assumption) + op = NegateComparison(op); + + return AssumeSymRel(state, SE->getLHS(), op, SE->getRHS()); } case nonloc::ConcreteIntKind: { @@ -182,43 +201,98 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state, } // end switch } -const GRState *SimpleConstraintManager::AssumeSymInt(const GRState *state, - bool Assumption, - const SymIntExpr *SE) { +const GRState *SimpleConstraintManager::AssumeSymRel(const GRState *state, + const SymExpr *LHS, + BinaryOperator::Opcode op, + const llvm::APSInt& Int) { + assert(BinaryOperator::isComparisonOp(op) && + "Non-comparison ops should be rewritten as comparisons to zero."); + + // We only handle simple comparisons of the form "$sym == constant" + // or "($sym+constant1) == constant2". + // The adjustment is "constant1" in the above expression. It's used to + // "slide" the solution range around for modular arithmetic. For example, + // x < 4 has the solution [0, 3]. x+2 < 4 has the solution [0-2, 3-2], which + // in modular arithmetic is [0, 1] U [UINT_MAX-1, UINT_MAX]. It's up to + // the subclasses of SimpleConstraintManager to handle the adjustment. + llvm::APSInt Adjustment; + + // First check if the LHS is a simple symbol reference. + SymbolRef Sym = dyn_cast<SymbolData>(LHS); + if (Sym) { + Adjustment = 0; + } else { + // Next, see if it's a "($sym+constant1)" expression. + const SymIntExpr *SE = dyn_cast<SymIntExpr>(LHS); + + // We don't handle "($sym1+$sym2)". + // Give up and assume the constraint is feasible. + if (!SE) + return state; + + // We don't handle "(<expr>+constant1)". + // Give up and assume the constraint is feasible. + Sym = dyn_cast<SymbolData>(SE->getLHS()); + if (!Sym) + return state; + + // Get the constant out of the expression "($sym+constant1)". + switch (SE->getOpcode()) { + case BinaryOperator::Add: + Adjustment = SE->getRHS(); + break; + case BinaryOperator::Sub: + Adjustment = -SE->getRHS(); + break; + default: + // We don't handle non-additive operators. + // Give up and assume the constraint is feasible. + return state; + } + } + + // FIXME: This next section is a hack. It silently converts the integers to + // be of the same type as the symbol, which is not always correct. Really the + // comparisons should be performed using the Int's type, then mapped back to + // the symbol's range of values. + GRStateManager &StateMgr = state->getStateManager(); + ASTContext &Ctx = StateMgr.getContext(); + + QualType T = Sym->getType(Ctx); + assert(T->isIntegerType() || Loc::IsLocType(T)); + unsigned bitwidth = Ctx.getTypeSize(T); + bool isSymUnsigned = T->isUnsignedIntegerType() || Loc::IsLocType(T); + // Convert the adjustment. + Adjustment.setIsUnsigned(isSymUnsigned); + Adjustment.extOrTrunc(bitwidth); - // Here we assume that LHS is a symbol. This is consistent with the - // rest of the constraint manager logic. - SymbolRef Sym = cast<SymbolData>(SE->getLHS()); - const llvm::APSInt &Int = SE->getRHS(); + // Convert the right-hand side integer. + llvm::APSInt ConvertedInt(Int, isSymUnsigned); + ConvertedInt.extOrTrunc(bitwidth); - switch (SE->getOpcode()) { + switch (op) { default: // No logic yet for other operators. Assume the constraint is feasible. return state; case BinaryOperator::EQ: - return Assumption ? AssumeSymEQ(state, Sym, Int) - : AssumeSymNE(state, Sym, Int); + return AssumeSymEQ(state, Sym, ConvertedInt, Adjustment); case BinaryOperator::NE: - return Assumption ? AssumeSymNE(state, Sym, Int) - : AssumeSymEQ(state, Sym, Int); + return AssumeSymNE(state, Sym, ConvertedInt, Adjustment); + case BinaryOperator::GT: - return Assumption ? AssumeSymGT(state, Sym, Int) - : AssumeSymLE(state, Sym, Int); + return AssumeSymGT(state, Sym, ConvertedInt, Adjustment); case BinaryOperator::GE: - return Assumption ? AssumeSymGE(state, Sym, Int) - : AssumeSymLT(state, Sym, Int); + return AssumeSymGE(state, Sym, ConvertedInt, Adjustment); case BinaryOperator::LT: - return Assumption ? AssumeSymLT(state, Sym, Int) - : AssumeSymGE(state, Sym, Int); + return AssumeSymLT(state, Sym, ConvertedInt, Adjustment); case BinaryOperator::LE: - return Assumption ? AssumeSymLE(state, Sym, Int) - : AssumeSymGT(state, Sym, Int); + return AssumeSymLE(state, Sym, ConvertedInt, Adjustment); } // end switch } diff --git a/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.h b/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.h index 5f20e00..45057e6 100644 --- a/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.h +++ b/contrib/llvm/tools/clang/lib/Checker/SimpleConstraintManager.h @@ -38,8 +38,10 @@ public: const GRState *Assume(const GRState *state, NonLoc Cond, bool Assumption); - const GRState *AssumeSymInt(const GRState *state, bool Assumption, - const SymIntExpr *SE); + const GRState *AssumeSymRel(const GRState *state, + const SymExpr *LHS, + BinaryOperator::Opcode op, + const llvm::APSInt& Int); const GRState *AssumeInBound(const GRState *state, DefinedSVal Idx, DefinedSVal UpperBound, @@ -51,23 +53,31 @@ protected: // Interface that subclasses must implement. //===------------------------------------------------------------------===// + // Each of these is of the form "$sym+Adj <> V", where "<>" is the comparison + // operation for the method being invoked. virtual const GRState *AssumeSymNE(const GRState *state, SymbolRef sym, - const llvm::APSInt& V) = 0; + const llvm::APSInt& V, + const llvm::APSInt& Adjustment) = 0; virtual const GRState *AssumeSymEQ(const GRState *state, SymbolRef sym, - const llvm::APSInt& V) = 0; + const llvm::APSInt& V, + const llvm::APSInt& Adjustment) = 0; virtual const GRState *AssumeSymLT(const GRState *state, SymbolRef sym, - const llvm::APSInt& V) = 0; + const llvm::APSInt& V, + const llvm::APSInt& Adjustment) = 0; virtual const GRState *AssumeSymGT(const GRState *state, SymbolRef sym, - const llvm::APSInt& V) = 0; + const llvm::APSInt& V, + const llvm::APSInt& Adjustment) = 0; virtual const GRState *AssumeSymLE(const GRState *state, SymbolRef sym, - const llvm::APSInt& V) = 0; + const llvm::APSInt& V, + const llvm::APSInt& Adjustment) = 0; virtual const GRState *AssumeSymGE(const GRState *state, SymbolRef sym, - const llvm::APSInt& V) = 0; + const llvm::APSInt& V, + const llvm::APSInt& Adjustment) = 0; //===------------------------------------------------------------------===// // Internal implementation. diff --git a/contrib/llvm/tools/clang/lib/Checker/SimpleSValuator.cpp b/contrib/llvm/tools/clang/lib/Checker/SimpleSValuator.cpp index dd38a43..3bc4ee7 100644 --- a/contrib/llvm/tools/clang/lib/Checker/SimpleSValuator.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/SimpleSValuator.cpp @@ -30,10 +30,17 @@ public: virtual SVal EvalComplement(NonLoc val); virtual SVal EvalBinOpNN(const GRState *state, BinaryOperator::Opcode op, NonLoc lhs, NonLoc rhs, QualType resultTy); - virtual SVal EvalBinOpLL(BinaryOperator::Opcode op, Loc lhs, Loc rhs, - QualType resultTy); + virtual SVal EvalBinOpLL(const GRState *state, BinaryOperator::Opcode op, + Loc lhs, Loc rhs, QualType resultTy); virtual SVal EvalBinOpLN(const GRState *state, BinaryOperator::Opcode op, Loc lhs, NonLoc rhs, QualType resultTy); + + /// getKnownValue - Evaluates a given SVal. If the SVal has only one possible + /// (integer) value, that value is returned. Otherwise, returns NULL. + virtual const llvm::APSInt *getKnownValue(const GRState *state, SVal V); + + SVal MakeSymIntVal(const SymExpr *LHS, BinaryOperator::Opcode op, + const llvm::APSInt &RHS, QualType resultTy); }; } // end anonymous namespace @@ -170,45 +177,93 @@ static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) { } } -// Equality operators for Locs. -// FIXME: All this logic will be revamped when we have MemRegion::getLocation() -// implemented. - -static SVal EvalEquality(ValueManager &ValMgr, Loc lhs, Loc rhs, bool isEqual, - QualType resultTy) { +static BinaryOperator::Opcode ReverseComparison(BinaryOperator::Opcode op) { + switch (op) { + default: + assert(false && "Invalid opcode."); + case BinaryOperator::LT: return BinaryOperator::GT; + case BinaryOperator::GT: return BinaryOperator::LT; + case BinaryOperator::LE: return BinaryOperator::GE; + case BinaryOperator::GE: return BinaryOperator::LE; + case BinaryOperator::EQ: + case BinaryOperator::NE: + return op; + } +} - switch (lhs.getSubKind()) { - default: - assert(false && "EQ/NE not implemented for this Loc."); - return UnknownVal(); +SVal SimpleSValuator::MakeSymIntVal(const SymExpr *LHS, + BinaryOperator::Opcode op, + const llvm::APSInt &RHS, + QualType resultTy) { + bool isIdempotent = false; - case loc::ConcreteIntKind: { - if (SymbolRef rSym = rhs.getAsSymbol()) - return ValMgr.makeNonLoc(rSym, - isEqual ? BinaryOperator::EQ - : BinaryOperator::NE, - cast<loc::ConcreteInt>(lhs).getValue(), - resultTy); - break; - } - case loc::MemRegionKind: { - if (SymbolRef lSym = lhs.getAsLocSymbol()) { - if (isa<loc::ConcreteInt>(rhs)) { - return ValMgr.makeNonLoc(lSym, - isEqual ? BinaryOperator::EQ - : BinaryOperator::NE, - cast<loc::ConcreteInt>(rhs).getValue(), - resultTy); - } - } - break; + // Check for a few special cases with known reductions first. + switch (op) { + default: + // We can't reduce this case; just treat it normally. + break; + case BinaryOperator::Mul: + // a*0 and a*1 + if (RHS == 0) + return ValMgr.makeIntVal(0, resultTy); + else if (RHS == 1) + isIdempotent = true; + break; + case BinaryOperator::Div: + // a/0 and a/1 + if (RHS == 0) + // This is also handled elsewhere. + return UndefinedVal(); + else if (RHS == 1) + isIdempotent = true; + break; + case BinaryOperator::Rem: + // a%0 and a%1 + if (RHS == 0) + // This is also handled elsewhere. + return UndefinedVal(); + else if (RHS == 1) + return ValMgr.makeIntVal(0, resultTy); + break; + case BinaryOperator::Add: + case BinaryOperator::Sub: + case BinaryOperator::Shl: + case BinaryOperator::Shr: + case BinaryOperator::Xor: + // a+0, a-0, a<<0, a>>0, a^0 + if (RHS == 0) + isIdempotent = true; + break; + case BinaryOperator::And: + // a&0 and a&(~0) + if (RHS == 0) + return ValMgr.makeIntVal(0, resultTy); + else if (RHS.isAllOnesValue()) + isIdempotent = true; + break; + case BinaryOperator::Or: + // a|0 and a|(~0) + if (RHS == 0) + isIdempotent = true; + else if (RHS.isAllOnesValue()) { + BasicValueFactory &BVF = ValMgr.getBasicValueFactory(); + const llvm::APSInt &Result = BVF.Convert(resultTy, RHS); + return nonloc::ConcreteInt(Result); } + break; + } - case loc::GotoLabelKind: - break; + // Idempotent ops (like a*1) can still change the type of an expression. + // Wrap the LHS up in a NonLoc again and let EvalCastNL do the dirty work. + if (isIdempotent) { + if (SymbolRef LHSSym = dyn_cast<SymbolData>(LHS)) + return EvalCastNL(nonloc::SymbolVal(LHSSym), resultTy); + return EvalCastNL(nonloc::SymExprVal(LHS), resultTy); } - return ValMgr.makeTruthVal(isEqual ? lhs == rhs : lhs != rhs, resultTy); + // If we reach this point, the expression cannot be simplified. + // Make a SymExprVal for the entire thing. + return ValMgr.makeNonLoc(LHS, op, RHS, resultTy); } SVal SimpleSValuator::EvalBinOpNN(const GRState *state, @@ -228,6 +283,12 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state, case BinaryOperator::GT: case BinaryOperator::NE: return ValMgr.makeTruthVal(false, resultTy); + case BinaryOperator::Xor: + case BinaryOperator::Sub: + return ValMgr.makeIntVal(0, resultTy); + case BinaryOperator::Or: + case BinaryOperator::And: + return EvalCastNL(lhs, resultTy); } while (1) { @@ -238,7 +299,8 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state, Loc lhsL = cast<nonloc::LocAsInteger>(lhs).getLoc(); switch (rhs.getSubKind()) { case nonloc::LocAsIntegerKind: - return EvalBinOpLL(op, lhsL, cast<nonloc::LocAsInteger>(rhs).getLoc(), + return EvalBinOpLL(state, op, lhsL, + cast<nonloc::LocAsInteger>(rhs).getLoc(), resultTy); case nonloc::ConcreteIntKind: { // Transform the integer into a location and compare. @@ -246,7 +308,7 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state, llvm::APSInt i = cast<nonloc::ConcreteInt>(rhs).getValue(); i.setIsUnsigned(true); i.extOrTrunc(Ctx.getTypeSize(Ctx.VoidPtrTy)); - return EvalBinOpLL(op, lhsL, ValMgr.makeLoc(i), resultTy); + return EvalBinOpLL(state, op, lhsL, ValMgr.makeLoc(i), resultTy); } default: switch (op) { @@ -261,87 +323,136 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state, } } case nonloc::SymExprValKind: { - // Logical not? - if (!(op == BinaryOperator::EQ && rhs.isZeroConstant())) + nonloc::SymExprVal *selhs = cast<nonloc::SymExprVal>(&lhs); + + // Only handle LHS of the form "$sym op constant", at least for now. + const SymIntExpr *symIntExpr = + dyn_cast<SymIntExpr>(selhs->getSymbolicExpression()); + + if (!symIntExpr) return UnknownVal(); - const SymExpr *symExpr = - cast<nonloc::SymExprVal>(lhs).getSymbolicExpression(); + // Is this a logical not? (!x is represented as x == 0.) + if (op == BinaryOperator::EQ && rhs.isZeroConstant()) { + // We know how to negate certain expressions. Simplify them here. - // Only handle ($sym op constant) for now. - if (const SymIntExpr *symIntExpr = dyn_cast<SymIntExpr>(symExpr)) { BinaryOperator::Opcode opc = symIntExpr->getOpcode(); switch (opc) { - case BinaryOperator::LAnd: - case BinaryOperator::LOr: - assert(false && "Logical operators handled by branching logic."); - return UnknownVal(); - case BinaryOperator::Assign: - case BinaryOperator::MulAssign: - case BinaryOperator::DivAssign: - case BinaryOperator::RemAssign: - case BinaryOperator::AddAssign: - case BinaryOperator::SubAssign: - case BinaryOperator::ShlAssign: - case BinaryOperator::ShrAssign: - case BinaryOperator::AndAssign: - case BinaryOperator::XorAssign: - case BinaryOperator::OrAssign: - case BinaryOperator::Comma: - assert(false && "'=' and ',' operators handled by GRExprEngine."); - return UnknownVal(); - case BinaryOperator::PtrMemD: - case BinaryOperator::PtrMemI: - assert(false && "Pointer arithmetic not handled here."); - return UnknownVal(); - case BinaryOperator::Mul: - case BinaryOperator::Div: - case BinaryOperator::Rem: - case BinaryOperator::Add: - case BinaryOperator::Sub: - case BinaryOperator::Shl: - case BinaryOperator::Shr: - case BinaryOperator::And: - case BinaryOperator::Xor: - case BinaryOperator::Or: - // Not handled yet. - return UnknownVal(); - case BinaryOperator::LT: - case BinaryOperator::GT: - case BinaryOperator::LE: - case BinaryOperator::GE: - case BinaryOperator::EQ: - case BinaryOperator::NE: - opc = NegateComparison(opc); - assert(symIntExpr->getType(ValMgr.getContext()) == resultTy); - return ValMgr.makeNonLoc(symIntExpr->getLHS(), opc, - symIntExpr->getRHS(), resultTy); + default: + // We don't know how to negate this operation. + // Just handle it as if it were a normal comparison to 0. + break; + case BinaryOperator::LAnd: + case BinaryOperator::LOr: + assert(false && "Logical operators handled by branching logic."); + return UnknownVal(); + case BinaryOperator::Assign: + case BinaryOperator::MulAssign: + case BinaryOperator::DivAssign: + case BinaryOperator::RemAssign: + case BinaryOperator::AddAssign: + case BinaryOperator::SubAssign: + case BinaryOperator::ShlAssign: + case BinaryOperator::ShrAssign: + case BinaryOperator::AndAssign: + case BinaryOperator::XorAssign: + case BinaryOperator::OrAssign: + case BinaryOperator::Comma: + assert(false && "'=' and ',' operators handled by GRExprEngine."); + return UnknownVal(); + case BinaryOperator::PtrMemD: + case BinaryOperator::PtrMemI: + assert(false && "Pointer arithmetic not handled here."); + return UnknownVal(); + case BinaryOperator::LT: + case BinaryOperator::GT: + case BinaryOperator::LE: + case BinaryOperator::GE: + case BinaryOperator::EQ: + case BinaryOperator::NE: + // Negate the comparison and make a value. + opc = NegateComparison(opc); + assert(symIntExpr->getType(ValMgr.getContext()) == resultTy); + return ValMgr.makeNonLoc(symIntExpr->getLHS(), opc, + symIntExpr->getRHS(), resultTy); } } + + // For now, only handle expressions whose RHS is a constant. + const nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs); + if (!rhsInt) + return UnknownVal(); + + // If both the LHS and the current expression are additive, + // fold their constants. + if (BinaryOperator::isAdditiveOp(op)) { + BinaryOperator::Opcode lop = symIntExpr->getOpcode(); + if (BinaryOperator::isAdditiveOp(lop)) { + BasicValueFactory &BVF = ValMgr.getBasicValueFactory(); + + // resultTy may not be the best type to convert to, but it's + // probably the best choice in expressions with mixed type + // (such as x+1U+2LL). The rules for implicit conversions should + // choose a reasonable type to preserve the expression, and will + // at least match how the value is going to be used. + const llvm::APSInt &first = + BVF.Convert(resultTy, symIntExpr->getRHS()); + const llvm::APSInt &second = + BVF.Convert(resultTy, rhsInt->getValue()); + + const llvm::APSInt *newRHS; + if (lop == op) + newRHS = BVF.EvaluateAPSInt(BinaryOperator::Add, first, second); + else + newRHS = BVF.EvaluateAPSInt(BinaryOperator::Sub, first, second); + return MakeSymIntVal(symIntExpr->getLHS(), lop, *newRHS, resultTy); + } + } + + // Otherwise, make a SymExprVal out of the expression. + return MakeSymIntVal(symIntExpr, op, rhsInt->getValue(), resultTy); } case nonloc::ConcreteIntKind: { + const nonloc::ConcreteInt& lhsInt = cast<nonloc::ConcreteInt>(lhs); + if (isa<nonloc::ConcreteInt>(rhs)) { - const nonloc::ConcreteInt& lhsInt = cast<nonloc::ConcreteInt>(lhs); return lhsInt.evalBinOp(ValMgr, op, cast<nonloc::ConcreteInt>(rhs)); - } - else { + } else { + const llvm::APSInt& lhsValue = lhsInt.getValue(); + // Swap the left and right sides and flip the operator if doing so // allows us to better reason about the expression (this is a form // of expression canonicalization). + // While we're at it, catch some special cases for non-commutative ops. NonLoc tmp = rhs; rhs = lhs; lhs = tmp; switch (op) { - case BinaryOperator::LT: op = BinaryOperator::GT; continue; - case BinaryOperator::GT: op = BinaryOperator::LT; continue; - case BinaryOperator::LE: op = BinaryOperator::GE; continue; - case BinaryOperator::GE: op = BinaryOperator::LE; continue; + case BinaryOperator::LT: + case BinaryOperator::GT: + case BinaryOperator::LE: + case BinaryOperator::GE: + op = ReverseComparison(op); + continue; case BinaryOperator::EQ: case BinaryOperator::NE: case BinaryOperator::Add: case BinaryOperator::Mul: + case BinaryOperator::And: + case BinaryOperator::Xor: + case BinaryOperator::Or: continue; + case BinaryOperator::Shr: + if (lhsValue.isAllOnesValue() && lhsValue.isSigned()) + // At this point lhs and rhs have been swapped. + return rhs; + // FALL-THROUGH + case BinaryOperator::Shl: + if (lhsValue == 0) + // At this point lhs and rhs have been swapped. + return rhs; + return UnknownVal(); default: return UnknownVal(); } @@ -377,9 +488,9 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state, } if (isa<nonloc::ConcreteInt>(rhs)) { - return ValMgr.makeNonLoc(slhs->getSymbol(), op, - cast<nonloc::ConcreteInt>(rhs).getValue(), - resultTy); + return MakeSymIntVal(slhs->getSymbol(), op, + cast<nonloc::ConcreteInt>(rhs).getValue(), + resultTy); } return UnknownVal(); @@ -388,21 +499,301 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state, } } -SVal SimpleSValuator::EvalBinOpLL(BinaryOperator::Opcode op, Loc lhs, Loc rhs, +// FIXME: all this logic will change if/when we have MemRegion::getLocation(). +SVal SimpleSValuator::EvalBinOpLL(const GRState *state, + BinaryOperator::Opcode op, + Loc lhs, Loc rhs, QualType resultTy) { - switch (op) { + // Only comparisons and subtractions are valid operations on two pointers. + // See [C99 6.5.5 through 6.5.14] or [C++0x 5.6 through 5.15]. + // However, if a pointer is casted to an integer, EvalBinOpNN may end up + // calling this function with another operation (PR7527). We don't attempt to + // model this for now, but it could be useful, particularly when the + // "location" is actually an integer value that's been passed through a void*. + if (!(BinaryOperator::isComparisonOp(op) || op == BinaryOperator::Sub)) + return UnknownVal(); + + // Special cases for when both sides are identical. + if (lhs == rhs) { + switch (op) { default: + assert(false && "Unimplemented operation for two identical values"); return UnknownVal(); + case BinaryOperator::Sub: + return ValMgr.makeZeroVal(resultTy); case BinaryOperator::EQ: + case BinaryOperator::LE: + case BinaryOperator::GE: + return ValMgr.makeTruthVal(true, resultTy); case BinaryOperator::NE: - return EvalEquality(ValMgr, lhs, rhs, op == BinaryOperator::EQ, resultTy); case BinaryOperator::LT: case BinaryOperator::GT: - // FIXME: Generalize. For now, just handle the trivial case where - // the two locations are identical. - if (lhs == rhs) + return ValMgr.makeTruthVal(false, resultTy); + } + } + + switch (lhs.getSubKind()) { + default: + assert(false && "Ordering not implemented for this Loc."); + return UnknownVal(); + + case loc::GotoLabelKind: + // The only thing we know about labels is that they're non-null. + if (rhs.isZeroConstant()) { + switch (op) { + default: + break; + case BinaryOperator::Sub: + return EvalCastL(lhs, resultTy); + case BinaryOperator::EQ: + case BinaryOperator::LE: + case BinaryOperator::LT: return ValMgr.makeTruthVal(false, resultTy); + case BinaryOperator::NE: + case BinaryOperator::GT: + case BinaryOperator::GE: + return ValMgr.makeTruthVal(true, resultTy); + } + } + // There may be two labels for the same location, and a function region may + // have the same address as a label at the start of the function (depending + // on the ABI). + // FIXME: we can probably do a comparison against other MemRegions, though. + // FIXME: is there a way to tell if two labels refer to the same location? + return UnknownVal(); + + case loc::ConcreteIntKind: { + // If one of the operands is a symbol and the other is a constant, + // build an expression for use by the constraint manager. + if (SymbolRef rSym = rhs.getAsLocSymbol()) { + // We can only build expressions with symbols on the left, + // so we need a reversible operator. + if (!BinaryOperator::isComparisonOp(op)) + return UnknownVal(); + + const llvm::APSInt &lVal = cast<loc::ConcreteInt>(lhs).getValue(); + return ValMgr.makeNonLoc(rSym, ReverseComparison(op), lVal, resultTy); + } + + // If both operands are constants, just perform the operation. + if (loc::ConcreteInt *rInt = dyn_cast<loc::ConcreteInt>(&rhs)) { + BasicValueFactory &BVF = ValMgr.getBasicValueFactory(); + SVal ResultVal = cast<loc::ConcreteInt>(lhs).EvalBinOp(BVF, op, *rInt); + if (Loc *Result = dyn_cast<Loc>(&ResultVal)) + return EvalCastL(*Result, resultTy); + else + return UnknownVal(); + } + + // Special case comparisons against NULL. + // This must come after the test if the RHS is a symbol, which is used to + // build constraints. The address of any non-symbolic region is guaranteed + // to be non-NULL, as is any label. + assert(isa<loc::MemRegionVal>(rhs) || isa<loc::GotoLabel>(rhs)); + if (lhs.isZeroConstant()) { + switch (op) { + default: + break; + case BinaryOperator::EQ: + case BinaryOperator::GT: + case BinaryOperator::GE: + return ValMgr.makeTruthVal(false, resultTy); + case BinaryOperator::NE: + case BinaryOperator::LT: + case BinaryOperator::LE: + return ValMgr.makeTruthVal(true, resultTy); + } + } + + // Comparing an arbitrary integer to a region or label address is + // completely unknowable. + return UnknownVal(); + } + case loc::MemRegionKind: { + if (loc::ConcreteInt *rInt = dyn_cast<loc::ConcreteInt>(&rhs)) { + // If one of the operands is a symbol and the other is a constant, + // build an expression for use by the constraint manager. + if (SymbolRef lSym = lhs.getAsLocSymbol()) + return MakeSymIntVal(lSym, op, rInt->getValue(), resultTy); + + // Special case comparisons to NULL. + // This must come after the test if the LHS is a symbol, which is used to + // build constraints. The address of any non-symbolic region is guaranteed + // to be non-NULL. + if (rInt->isZeroConstant()) { + switch (op) { + default: + break; + case BinaryOperator::Sub: + return EvalCastL(lhs, resultTy); + case BinaryOperator::EQ: + case BinaryOperator::LT: + case BinaryOperator::LE: + return ValMgr.makeTruthVal(false, resultTy); + case BinaryOperator::NE: + case BinaryOperator::GT: + case BinaryOperator::GE: + return ValMgr.makeTruthVal(true, resultTy); + } + } + + // Comparing a region to an arbitrary integer is completely unknowable. + return UnknownVal(); + } + + // Get both values as regions, if possible. + const MemRegion *LeftMR = lhs.getAsRegion(); + assert(LeftMR && "MemRegionKind SVal doesn't have a region!"); + + const MemRegion *RightMR = rhs.getAsRegion(); + if (!RightMR) + // The RHS is probably a label, which in theory could address a region. + // FIXME: we can probably make a more useful statement about non-code + // regions, though. + return UnknownVal(); + + // If both values wrap regions, see if they're from different base regions. + const MemRegion *LeftBase = LeftMR->getBaseRegion(); + const MemRegion *RightBase = RightMR->getBaseRegion(); + if (LeftBase != RightBase && + !isa<SymbolicRegion>(LeftBase) && !isa<SymbolicRegion>(RightBase)) { + switch (op) { + default: + return UnknownVal(); + case BinaryOperator::EQ: + return ValMgr.makeTruthVal(false, resultTy); + case BinaryOperator::NE: + return ValMgr.makeTruthVal(true, resultTy); + } + } + + // The two regions are from the same base region. See if they're both a + // type of region we know how to compare. + + // FIXME: If/when there is a getAsRawOffset() for FieldRegions, this + // ElementRegion path and the FieldRegion path below should be unified. + if (const ElementRegion *LeftER = dyn_cast<ElementRegion>(LeftMR)) { + // First see if the right region is also an ElementRegion. + const ElementRegion *RightER = dyn_cast<ElementRegion>(RightMR); + if (!RightER) + return UnknownVal(); + + // Next, see if the two ERs have the same super-region and matching types. + // FIXME: This should do something useful even if the types don't match, + // though if both indexes are constant the RegionRawOffset path will + // give the correct answer. + if (LeftER->getSuperRegion() == RightER->getSuperRegion() && + LeftER->getElementType() == RightER->getElementType()) { + // Get the left index and cast it to the correct type. + // If the index is unknown or undefined, bail out here. + SVal LeftIndexVal = LeftER->getIndex(); + NonLoc *LeftIndex = dyn_cast<NonLoc>(&LeftIndexVal); + if (!LeftIndex) + return UnknownVal(); + LeftIndexVal = EvalCastNL(*LeftIndex, resultTy); + LeftIndex = dyn_cast<NonLoc>(&LeftIndexVal); + if (!LeftIndex) + return UnknownVal(); + + // Do the same for the right index. + SVal RightIndexVal = RightER->getIndex(); + NonLoc *RightIndex = dyn_cast<NonLoc>(&RightIndexVal); + if (!RightIndex) + return UnknownVal(); + RightIndexVal = EvalCastNL(*RightIndex, resultTy); + RightIndex = dyn_cast<NonLoc>(&RightIndexVal); + if (!RightIndex) + return UnknownVal(); + + // Actually perform the operation. + // EvalBinOpNN expects the two indexes to already be the right type. + return EvalBinOpNN(state, op, *LeftIndex, *RightIndex, resultTy); + } + + // If the element indexes aren't comparable, see if the raw offsets are. + RegionRawOffset LeftOffset = LeftER->getAsRawOffset(); + RegionRawOffset RightOffset = RightER->getAsRawOffset(); + + if (LeftOffset.getRegion() != NULL && + LeftOffset.getRegion() == RightOffset.getRegion()) { + int64_t left = LeftOffset.getByteOffset(); + int64_t right = RightOffset.getByteOffset(); + + switch (op) { + default: + return UnknownVal(); + case BinaryOperator::LT: + return ValMgr.makeTruthVal(left < right, resultTy); + case BinaryOperator::GT: + return ValMgr.makeTruthVal(left > right, resultTy); + case BinaryOperator::LE: + return ValMgr.makeTruthVal(left <= right, resultTy); + case BinaryOperator::GE: + return ValMgr.makeTruthVal(left >= right, resultTy); + case BinaryOperator::EQ: + return ValMgr.makeTruthVal(left == right, resultTy); + case BinaryOperator::NE: + return ValMgr.makeTruthVal(left != right, resultTy); + } + } + + // If we get here, we have no way of comparing the ElementRegions. return UnknownVal(); + } + + // See if both regions are fields of the same structure. + // FIXME: This doesn't handle nesting, inheritance, or Objective-C ivars. + if (const FieldRegion *LeftFR = dyn_cast<FieldRegion>(LeftMR)) { + // Only comparisons are meaningful here! + if (!BinaryOperator::isComparisonOp(op)) + return UnknownVal(); + + // First see if the right region is also a FieldRegion. + const FieldRegion *RightFR = dyn_cast<FieldRegion>(RightMR); + if (!RightFR) + return UnknownVal(); + + // Next, see if the two FRs have the same super-region. + // FIXME: This doesn't handle casts yet, and simply stripping the casts + // doesn't help. + if (LeftFR->getSuperRegion() != RightFR->getSuperRegion()) + return UnknownVal(); + + const FieldDecl *LeftFD = LeftFR->getDecl(); + const FieldDecl *RightFD = RightFR->getDecl(); + const RecordDecl *RD = LeftFD->getParent(); + + // Make sure the two FRs are from the same kind of record. Just in case! + // FIXME: This is probably where inheritance would be a problem. + if (RD != RightFD->getParent()) + return UnknownVal(); + + // We know for sure that the two fields are not the same, since that + // would have given us the same SVal. + if (op == BinaryOperator::EQ) + return ValMgr.makeTruthVal(false, resultTy); + if (op == BinaryOperator::NE) + return ValMgr.makeTruthVal(true, resultTy); + + // Iterate through the fields and see which one comes first. + // [C99 6.7.2.1.13] "Within a structure object, the non-bit-field + // members and the units in which bit-fields reside have addresses that + // increase in the order in which they are declared." + bool leftFirst = (op == BinaryOperator::LT || op == BinaryOperator::LE); + for (RecordDecl::field_iterator I = RD->field_begin(), + E = RD->field_end(); I!=E; ++I) { + if (*I == LeftFD) + return ValMgr.makeTruthVal(leftFirst, resultTy); + if (*I == RightFD) + return ValMgr.makeTruthVal(!leftFirst, resultTy); + } + + assert(false && "Fields not found in parent record's definition"); + } + + // If we get here, we have no way of comparing the regions. + return UnknownVal(); + } } } @@ -414,7 +805,7 @@ SVal SimpleSValuator::EvalBinOpLN(const GRState *state, // triggered, but transfer functions like those for OSCommpareAndSwapBarrier32 // can generate comparisons that trigger this code. // FIXME: Are all locations guaranteed to have pointer width? - if (BinaryOperator::isEqualityOp(op)) { + if (BinaryOperator::isComparisonOp(op)) { if (nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs)) { const llvm::APSInt *x = &rhsInt->getValue(); ASTContext &ctx = ValMgr.getContext(); @@ -423,7 +814,7 @@ SVal SimpleSValuator::EvalBinOpLN(const GRState *state, if (x->isSigned()) x = &ValMgr.getBasicValueFactory().getValue(*x, true); - return EvalBinOpLL(op, lhs, loc::ConcreteInt(*x), resultTy); + return EvalBinOpLL(state, op, lhs, loc::ConcreteInt(*x), resultTy); } } } @@ -432,3 +823,21 @@ SVal SimpleSValuator::EvalBinOpLN(const GRState *state, return state->getStateManager().getStoreManager().EvalBinOp(op, lhs, rhs, resultTy); } + +const llvm::APSInt *SimpleSValuator::getKnownValue(const GRState *state, + SVal V) { + if (V.isUnknownOrUndef()) + return NULL; + + if (loc::ConcreteInt* X = dyn_cast<loc::ConcreteInt>(&V)) + return &X->getValue(); + + if (nonloc::ConcreteInt* X = dyn_cast<nonloc::ConcreteInt>(&V)) + return &X->getValue(); + + if (SymbolRef Sym = V.getAsSymbol()) + return state->getSymVal(Sym); + + // FIXME: Add support for SymExprs. + return NULL; +} diff --git a/contrib/llvm/tools/clang/lib/Checker/StackAddrLeakChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/StackAddrLeakChecker.cpp new file mode 100644 index 0000000..f4a9db6 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/Checker/StackAddrLeakChecker.cpp @@ -0,0 +1,204 @@ +//=== StackAddrLeakChecker.cpp ------------------------------------*- C++ -*--// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines stack address leak checker, which checks if an invalid +// stack address is stored into a global or heap location. See CERT DCL30-C. +// +//===----------------------------------------------------------------------===// + +#include "GRExprEngineInternalChecks.h" +#include "clang/Checker/BugReporter/BugType.h" +#include "clang/Checker/PathSensitive/CheckerVisitor.h" +#include "clang/Checker/PathSensitive/GRState.h" +#include "clang/Basic/SourceManager.h" +#include "llvm/ADT/SmallString.h" +using namespace clang; + +namespace { +class StackAddrLeakChecker : public CheckerVisitor<StackAddrLeakChecker> { + BuiltinBug *BT_stackleak; + BuiltinBug *BT_returnstack; + +public: + StackAddrLeakChecker() : BT_stackleak(0), BT_returnstack(0) {} + static void *getTag() { + static int x; + return &x; + } + void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *RS); + void EvalEndPath(GREndPathNodeBuilder &B, void *tag, GRExprEngine &Eng); +private: + void EmitStackError(CheckerContext &C, const MemRegion *R, const Expr *RetE); + SourceRange GenName(llvm::raw_ostream &os, const MemRegion *R, + SourceManager &SM); +}; +} + +void clang::RegisterStackAddrLeakChecker(GRExprEngine &Eng) { + Eng.registerCheck(new StackAddrLeakChecker()); +} + +SourceRange StackAddrLeakChecker::GenName(llvm::raw_ostream &os, + const MemRegion *R, + SourceManager &SM) { + // Get the base region, stripping away fields and elements. + R = R->getBaseRegion(); + SourceRange range; + os << "Address of "; + + // Check if the region is a compound literal. + if (const CompoundLiteralRegion* CR = dyn_cast<CompoundLiteralRegion>(R)) { + const CompoundLiteralExpr* CL = CR->getLiteralExpr(); + os << "stack memory associated with a compound literal " + "declared on line " + << SM.getInstantiationLineNumber(CL->getLocStart()) + << " returned to caller"; + range = CL->getSourceRange(); + } + else if (const AllocaRegion* AR = dyn_cast<AllocaRegion>(R)) { + const Expr* ARE = AR->getExpr(); + SourceLocation L = ARE->getLocStart(); + range = ARE->getSourceRange(); + os << "stack memory allocated by call to alloca() on line " + << SM.getInstantiationLineNumber(L); + } + else if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) { + const BlockDecl *BD = BR->getCodeRegion()->getDecl(); + SourceLocation L = BD->getLocStart(); + range = BD->getSourceRange(); + os << "stack-allocated block declared on line " + << SM.getInstantiationLineNumber(L); + } + else if (const VarRegion *VR = dyn_cast<VarRegion>(R)) { + os << "stack memory associated with local variable '" + << VR->getString() << '\''; + range = VR->getDecl()->getSourceRange(); + } + else { + assert(false && "Invalid region in ReturnStackAddressChecker."); + } + + return range; +} + +void StackAddrLeakChecker::EmitStackError(CheckerContext &C, const MemRegion *R, + const Expr *RetE) { + ExplodedNode *N = C.GenerateSink(); + + if (!N) + return; + + if (!BT_returnstack) + BT_returnstack=new BuiltinBug("Return of address to stack-allocated memory"); + + // Generate a report for this bug. + llvm::SmallString<512> buf; + llvm::raw_svector_ostream os(buf); + SourceRange range = GenName(os, R, C.getSourceManager()); + os << " returned to caller"; + RangedBugReport *report = new RangedBugReport(*BT_returnstack, os.str(), N); + report->addRange(RetE->getSourceRange()); + if (range.isValid()) + report->addRange(range); + + C.EmitReport(report); +} + +void StackAddrLeakChecker::PreVisitReturnStmt(CheckerContext &C, + const ReturnStmt *RS) { + + const Expr *RetE = RS->getRetValue(); + if (!RetE) + return; + + SVal V = C.getState()->getSVal(RetE); + const MemRegion *R = V.getAsRegion(); + + if (!R || !R->hasStackStorage()) + return; + + if (R->hasStackStorage()) { + EmitStackError(C, R, RetE); + return; + } +} + +void StackAddrLeakChecker::EvalEndPath(GREndPathNodeBuilder &B, void *tag, + GRExprEngine &Eng) { + SaveAndRestore<bool> OldHasGen(B.HasGeneratedNode); + const GRState *state = B.getState(); + + // Iterate over all bindings to global variables and see if it contains + // a memory region in the stack space. + class CallBack : public StoreManager::BindingsHandler { + private: + const StackFrameContext *CurSFC; + public: + llvm::SmallVector<std::pair<const MemRegion*, const MemRegion*>, 10> V; + + CallBack(const LocationContext *LCtx) + : CurSFC(LCtx->getCurrentStackFrame()) {} + + bool HandleBinding(StoreManager &SMgr, Store store, + const MemRegion *region, SVal val) { + + if (!isa<GlobalsSpaceRegion>(region->getMemorySpace())) + return true; + + const MemRegion *vR = val.getAsRegion(); + if (!vR) + return true; + + if (const StackSpaceRegion *SSR = + dyn_cast<StackSpaceRegion>(vR->getMemorySpace())) { + // If the global variable holds a location in the current stack frame, + // record the binding to emit a warning. + if (SSR->getStackFrame() == CurSFC) + V.push_back(std::make_pair(region, vR)); + } + + return true; + } + }; + + CallBack cb(B.getPredecessor()->getLocationContext()); + state->getStateManager().getStoreManager().iterBindings(state->getStore(),cb); + + if (cb.V.empty()) + return; + + // Generate an error node. + ExplodedNode *N = B.generateNode(state, tag, B.getPredecessor()); + if (!N) + return; + + if (!BT_stackleak) + BT_stackleak = + new BuiltinBug("Stack address stored into global variable", + "Stack address was saved into a global variable. " + "This is dangerous because the address will become " + "invalid after returning from the function"); + + for (unsigned i = 0, e = cb.V.size(); i != e; ++i) { + // Generate a report for this bug. + llvm::SmallString<512> buf; + llvm::raw_svector_ostream os(buf); + SourceRange range = GenName(os, cb.V[i].second, + Eng.getContext().getSourceManager()); + os << " is still referred to by the global variable '"; + const VarRegion *VR = cast<VarRegion>(cb.V[i].first->getBaseRegion()); + os << VR->getDecl()->getNameAsString() + << "' upon returning to the caller. This will be a dangling reference"; + RangedBugReport *report = new RangedBugReport(*BT_stackleak, os.str(), N); + if (range.isValid()) + report->addRange(range); + + Eng.getBugReporter().EmitReport(report); + } +} diff --git a/contrib/llvm/tools/clang/lib/Checker/Store.cpp b/contrib/llvm/tools/clang/lib/Checker/Store.cpp index c12065b..b128331 100644 --- a/contrib/llvm/tools/clang/lib/Checker/Store.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/Store.cpp @@ -91,7 +91,8 @@ const MemRegion *StoreManager::CastRegion(const MemRegion *R, QualType CastToTy) case MemRegion::StackArgumentsSpaceRegionKind: case MemRegion::HeapSpaceRegionKind: case MemRegion::UnknownSpaceRegionKind: - case MemRegion::GlobalsSpaceRegionKind: { + case MemRegion::NonStaticGlobalSpaceRegionKind: + case MemRegion::StaticGlobalSpaceRegionKind: { assert(0 && "Invalid region cast"); break; } @@ -232,17 +233,6 @@ SVal StoreManager::CastRetrievedVal(SVal V, const TypedRegion *R, return V; } -Store StoreManager::InvalidateRegions(Store store, - const MemRegion * const *I, - const MemRegion * const *End, - const Expr *E, unsigned Count, - InvalidatedSymbols *IS) { - for ( ; I != End ; ++I) - store = InvalidateRegion(store, *I, E, Count, IS); - - return store; -} - SVal StoreManager::getLValueFieldOrIvar(const Decl* D, SVal Base) { if (Base.isUnknownOrUndef()) return Base; diff --git a/contrib/llvm/tools/clang/lib/Checker/StreamChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/StreamChecker.cpp new file mode 100644 index 0000000..c527ca2 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/Checker/StreamChecker.cpp @@ -0,0 +1,287 @@ +//===-- StreamChecker.cpp -----------------------------------------*- C++ -*--// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines checkers that model and check stream handling functions. +// +//===----------------------------------------------------------------------===// + +#include "GRExprEngineExperimentalChecks.h" +#include "clang/Checker/BugReporter/BugType.h" +#include "clang/Checker/PathSensitive/CheckerVisitor.h" +#include "clang/Checker/PathSensitive/GRState.h" +#include "clang/Checker/PathSensitive/GRStateTrait.h" +#include "clang/Checker/PathSensitive/SymbolManager.h" +#include "llvm/ADT/ImmutableMap.h" + +using namespace clang; + +namespace { + +class StreamChecker : public CheckerVisitor<StreamChecker> { + IdentifierInfo *II_fopen, *II_fread, *II_fwrite, + *II_fseek, *II_ftell, *II_rewind, *II_fgetpos, *II_fsetpos, + *II_clearerr, *II_feof, *II_ferror, *II_fileno; + BuiltinBug *BT_nullfp, *BT_illegalwhence; + +public: + StreamChecker() + : II_fopen(0), II_fread(0), II_fwrite(0), + II_fseek(0), II_ftell(0), II_rewind(0), II_fgetpos(0), II_fsetpos(0), + II_clearerr(0), II_feof(0), II_ferror(0), II_fileno(0), + BT_nullfp(0), BT_illegalwhence(0) {} + + static void *getTag() { + static int x; + return &x; + } + + virtual bool EvalCallExpr(CheckerContext &C, const CallExpr *CE); + +private: + void Fopen(CheckerContext &C, const CallExpr *CE); + void Fread(CheckerContext &C, const CallExpr *CE); + void Fwrite(CheckerContext &C, const CallExpr *CE); + void Fseek(CheckerContext &C, const CallExpr *CE); + void Ftell(CheckerContext &C, const CallExpr *CE); + void Rewind(CheckerContext &C, const CallExpr *CE); + void Fgetpos(CheckerContext &C, const CallExpr *CE); + void Fsetpos(CheckerContext &C, const CallExpr *CE); + void Clearerr(CheckerContext &C, const CallExpr *CE); + void Feof(CheckerContext &C, const CallExpr *CE); + void Ferror(CheckerContext &C, const CallExpr *CE); + void Fileno(CheckerContext &C, const CallExpr *CE); + + // Return true indicates the stream pointer is NULL. + const GRState *CheckNullStream(SVal SV, const GRState *state, + CheckerContext &C); +}; + +} // end anonymous namespace + +void clang::RegisterStreamChecker(GRExprEngine &Eng) { + Eng.registerCheck(new StreamChecker()); +} + +bool StreamChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) { + const GRState *state = C.getState(); + const Expr *Callee = CE->getCallee(); + SVal L = state->getSVal(Callee); + const FunctionDecl *FD = L.getAsFunctionDecl(); + if (!FD) + return false; + + ASTContext &Ctx = C.getASTContext(); + if (!II_fopen) + II_fopen = &Ctx.Idents.get("fopen"); + if (!II_fread) + II_fread = &Ctx.Idents.get("fread"); + if (!II_fwrite) + II_fwrite = &Ctx.Idents.get("fwrite"); + if (!II_fseek) + II_fseek = &Ctx.Idents.get("fseek"); + if (!II_ftell) + II_ftell = &Ctx.Idents.get("ftell"); + if (!II_rewind) + II_rewind = &Ctx.Idents.get("rewind"); + if (!II_fgetpos) + II_fgetpos = &Ctx.Idents.get("fgetpos"); + if (!II_fsetpos) + II_fsetpos = &Ctx.Idents.get("fsetpos"); + if (!II_clearerr) + II_clearerr = &Ctx.Idents.get("clearerr"); + if (!II_feof) + II_feof = &Ctx.Idents.get("feof"); + if (!II_ferror) + II_ferror = &Ctx.Idents.get("ferror"); + if (!II_fileno) + II_fileno = &Ctx.Idents.get("fileno"); + + if (FD->getIdentifier() == II_fopen) { + Fopen(C, CE); + return true; + } + if (FD->getIdentifier() == II_fread) { + Fread(C, CE); + return true; + } + if (FD->getIdentifier() == II_fwrite) { + Fwrite(C, CE); + return true; + } + if (FD->getIdentifier() == II_fseek) { + Fseek(C, CE); + return true; + } + if (FD->getIdentifier() == II_ftell) { + Ftell(C, CE); + return true; + } + if (FD->getIdentifier() == II_rewind) { + Rewind(C, CE); + return true; + } + if (FD->getIdentifier() == II_fgetpos) { + Fgetpos(C, CE); + return true; + } + if (FD->getIdentifier() == II_fsetpos) { + Fsetpos(C, CE); + return true; + } + if (FD->getIdentifier() == II_clearerr) { + Clearerr(C, CE); + return true; + } + if (FD->getIdentifier() == II_feof) { + Feof(C, CE); + return true; + } + if (FD->getIdentifier() == II_ferror) { + Ferror(C, CE); + return true; + } + if (FD->getIdentifier() == II_fileno) { + Fileno(C, CE); + return true; + } + + return false; +} + +void StreamChecker::Fopen(CheckerContext &C, const CallExpr *CE) { + const GRState *state = C.getState(); + unsigned Count = C.getNodeBuilder().getCurrentBlockCount(); + ValueManager &ValMgr = C.getValueManager(); + DefinedSVal RetVal = cast<DefinedSVal>(ValMgr.getConjuredSymbolVal(0, CE, + Count)); + state = state->BindExpr(CE, RetVal); + + ConstraintManager &CM = C.getConstraintManager(); + // Bifurcate the state into two: one with a valid FILE* pointer, the other + // with a NULL. + const GRState *stateNotNull, *stateNull; + llvm::tie(stateNotNull, stateNull) = CM.AssumeDual(state, RetVal); + + C.addTransition(stateNotNull); + C.addTransition(stateNull); +} + +void StreamChecker::Fread(CheckerContext &C, const CallExpr *CE) { + const GRState *state = C.getState(); + if (!CheckNullStream(state->getSVal(CE->getArg(3)), state, C)) + return; +} + +void StreamChecker::Fwrite(CheckerContext &C, const CallExpr *CE) { + const GRState *state = C.getState(); + if (!CheckNullStream(state->getSVal(CE->getArg(3)), state, C)) + return; +} + +void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) { + const GRState *state = C.getState(); + if (!(state = CheckNullStream(state->getSVal(CE->getArg(0)), state, C))) + return; + // Check the legality of the 'whence' argument of 'fseek'. + SVal Whence = state->getSVal(CE->getArg(2)); + bool WhenceIsLegal = true; + const nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&Whence); + if (!CI) + WhenceIsLegal = false; + + int64_t x = CI->getValue().getSExtValue(); + if (!(x == 0 || x == 1 || x == 2)) + WhenceIsLegal = false; + + if (!WhenceIsLegal) { + if (ExplodedNode *N = C.GenerateSink(state)) { + if (!BT_illegalwhence) + BT_illegalwhence = new BuiltinBug("Illegal whence argument", + "The whence argument to fseek() should be " + "SEEK_SET, SEEK_END, or SEEK_CUR."); + BugReport *R = new BugReport(*BT_illegalwhence, + BT_illegalwhence->getDescription(), N); + C.EmitReport(R); + } + return; + } + + C.addTransition(state); +} + +void StreamChecker::Ftell(CheckerContext &C, const CallExpr *CE) { + const GRState *state = C.getState(); + if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C)) + return; +} + +void StreamChecker::Rewind(CheckerContext &C, const CallExpr *CE) { + const GRState *state = C.getState(); + if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C)) + return; +} + +void StreamChecker::Fgetpos(CheckerContext &C, const CallExpr *CE) { + const GRState *state = C.getState(); + if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C)) + return; +} + +void StreamChecker::Fsetpos(CheckerContext &C, const CallExpr *CE) { + const GRState *state = C.getState(); + if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C)) + return; +} + +void StreamChecker::Clearerr(CheckerContext &C, const CallExpr *CE) { + const GRState *state = C.getState(); + if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C)) + return; +} + +void StreamChecker::Feof(CheckerContext &C, const CallExpr *CE) { + const GRState *state = C.getState(); + if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C)) + return; +} + +void StreamChecker::Ferror(CheckerContext &C, const CallExpr *CE) { + const GRState *state = C.getState(); + if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C)) + return; +} + +void StreamChecker::Fileno(CheckerContext &C, const CallExpr *CE) { + const GRState *state = C.getState(); + if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C)) + return; +} + +const GRState *StreamChecker::CheckNullStream(SVal SV, const GRState *state, + CheckerContext &C) { + const DefinedSVal *DV = dyn_cast<DefinedSVal>(&SV); + if (!DV) + return 0; + + ConstraintManager &CM = C.getConstraintManager(); + const GRState *stateNotNull, *stateNull; + llvm::tie(stateNotNull, stateNull) = CM.AssumeDual(state, *DV); + + if (!stateNotNull && stateNull) { + if (ExplodedNode *N = C.GenerateSink(stateNull)) { + if (!BT_nullfp) + BT_nullfp = new BuiltinBug("NULL stream pointer", + "Stream pointer might be NULL."); + BugReport *R =new BugReport(*BT_nullfp, BT_nullfp->getDescription(), N); + C.EmitReport(R); + } + return 0; + } + return stateNotNull; +} diff --git a/contrib/llvm/tools/clang/lib/Checker/SymbolManager.cpp b/contrib/llvm/tools/clang/lib/Checker/SymbolManager.cpp index f3a803c..c2b557e 100644 --- a/contrib/llvm/tools/clang/lib/Checker/SymbolManager.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/SymbolManager.cpp @@ -74,6 +74,10 @@ void SymbolDerived::dumpToStream(llvm::raw_ostream& os) const { << getParentSymbol() << ',' << getRegion() << '}'; } +void SymbolExtent::dumpToStream(llvm::raw_ostream& os) const { + os << "extent_$" << getSymbolID() << '{' << getRegion() << '}'; +} + void SymbolRegionValue::dumpToStream(llvm::raw_ostream& os) const { os << "reg_$" << getSymbolID() << "<" << R << ">"; } @@ -130,6 +134,22 @@ SymbolManager::getDerivedSymbol(SymbolRef parentSymbol, return cast<SymbolDerived>(SD); } +const SymbolExtent* +SymbolManager::getExtentSymbol(const SubRegion *R) { + llvm::FoldingSetNodeID profile; + SymbolExtent::Profile(profile, R); + void* InsertPos; + SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos); + if (!SD) { + SD = (SymExpr*) BPAlloc.Allocate<SymbolExtent>(); + new (SD) SymbolExtent(SymbolCounter, R); + DataSet.InsertNode(SD, InsertPos); + ++SymbolCounter; + } + + return cast<SymbolExtent>(SD); +} + const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs, BinaryOperator::Opcode op, const llvm::APSInt& v, @@ -170,11 +190,14 @@ QualType SymbolConjured::getType(ASTContext&) const { return T; } - QualType SymbolDerived::getType(ASTContext& Ctx) const { return R->getValueType(Ctx); } +QualType SymbolExtent::getType(ASTContext& Ctx) const { + return Ctx.getSizeType(); +} + QualType SymbolRegionValue::getType(ASTContext& C) const { return R->getValueType(C); } @@ -210,16 +233,25 @@ bool SymbolReaper::isLive(SymbolRef sym) { return false; } + if (const SymbolExtent *extent = dyn_cast<SymbolExtent>(sym)) { + const MemRegion *Base = extent->getRegion()->getBaseRegion(); + if (const VarRegion *VR = dyn_cast<VarRegion>(Base)) + return isLive(VR); + if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Base)) + return isLive(SR->getSymbol()); + return false; + } + // Interogate the symbol. It may derive from an input value to // the analyzed function/method. return isa<SymbolRegionValue>(sym); } -bool SymbolReaper::isLive(const Stmt* Loc, const Stmt* ExprVal) const { +bool SymbolReaper::isLive(const Stmt* ExprVal) const { return LCtx->getLiveVariables()->isLive(Loc, ExprVal); } -bool SymbolReaper::isLive(const Stmt *Loc, const VarRegion *VR) const { +bool SymbolReaper::isLive(const VarRegion *VR) const { const StackFrameContext *SFC = VR->getStackFrame(); if (SFC == LCtx->getCurrentStackFrame()) diff --git a/contrib/llvm/tools/clang/lib/Checker/VLASizeChecker.cpp b/contrib/llvm/tools/clang/lib/Checker/VLASizeChecker.cpp index cea9d19..936991d 100644 --- a/contrib/llvm/tools/clang/lib/Checker/VLASizeChecker.cpp +++ b/contrib/llvm/tools/clang/lib/Checker/VLASizeChecker.cpp @@ -9,10 +9,13 @@ // // This defines VLASizeChecker, a builtin check in GRExprEngine that // performs checks for declaration of VLA of undefined or zero size. +// In addition, VLASizeChecker is responsible for defining the extent +// of the MemRegion that represents a VLA. // //===----------------------------------------------------------------------===// #include "GRExprEngineInternalChecks.h" +#include "clang/AST/CharUnits.h" #include "clang/Checker/BugReporter/BugType.h" #include "clang/Checker/PathSensitive/CheckerVisitor.h" #include "clang/Checker/PathSensitive/GRExprEngine.h" @@ -42,9 +45,9 @@ void VLASizeChecker::PreVisitDeclStmt(CheckerContext &C, const DeclStmt *DS) { const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl()); if (!VD) return; - - const VariableArrayType *VLA - = C.getASTContext().getAsVariableArrayType(VD->getType()); + + ASTContext &Ctx = C.getASTContext(); + const VariableArrayType *VLA = Ctx.getAsVariableArrayType(VD->getType()); if (!VLA) return; @@ -70,9 +73,14 @@ void VLASizeChecker::PreVisitDeclStmt(CheckerContext &C, const DeclStmt *DS) { C.EmitReport(report); return; } + + // See if the size value is known. It can't be undefined because we would have + // warned about that already. + if (sizeV.isUnknown()) + return; // Check if the size is zero. - DefinedOrUnknownSVal sizeD = cast<DefinedOrUnknownSVal>(sizeV); + DefinedSVal sizeD = cast<DefinedSVal>(sizeV); const GRState *stateNotZero, *stateZero; llvm::tie(stateNotZero, stateZero) = state->Assume(sizeD); @@ -92,5 +100,36 @@ void VLASizeChecker::PreVisitDeclStmt(CheckerContext &C, const DeclStmt *DS) { } // From this point on, assume that the size is not zero. - C.addTransition(stateNotZero); + state = stateNotZero; + + // VLASizeChecker is responsible for defining the extent of the array being + // declared. We do this by multiplying the array length by the element size, + // then matching that with the array region's extent symbol. + + // Convert the array length to size_t. + ValueManager &ValMgr = C.getValueManager(); + SValuator &SV = ValMgr.getSValuator(); + QualType SizeTy = Ctx.getSizeType(); + NonLoc ArrayLength = cast<NonLoc>(SV.EvalCast(sizeD, SizeTy, SE->getType())); + + // Get the element size. + CharUnits EleSize = Ctx.getTypeSizeInChars(VLA->getElementType()); + SVal EleSizeVal = ValMgr.makeIntVal(EleSize.getQuantity(), SizeTy); + + // Multiply the array length by the element size. + SVal ArraySizeVal = SV.EvalBinOpNN(state, BinaryOperator::Mul, ArrayLength, + cast<NonLoc>(EleSizeVal), SizeTy); + + // Finally, Assume that the array's extent matches the given size. + const LocationContext *LC = C.getPredecessor()->getLocationContext(); + DefinedOrUnknownSVal Extent = state->getRegion(VD, LC)->getExtent(ValMgr); + DefinedOrUnknownSVal ArraySize = cast<DefinedOrUnknownSVal>(ArraySizeVal); + DefinedOrUnknownSVal SizeIsKnown = SV.EvalEQ(state, Extent, ArraySize); + state = state->Assume(SizeIsKnown, true); + + // Assume should not fail at this point. + assert(state); + + // Remember our assumptions! + C.addTransition(state); } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h index 1ab2f55..85524ac 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h @@ -11,11 +11,9 @@ #define CLANG_CODEGEN_ABIINFO_H #include "clang/AST/Type.h" - -#include <cassert> +#include "llvm/Type.h" namespace llvm { - class Type; class Value; class LLVMContext; } @@ -70,7 +68,7 @@ namespace clang { private: Kind TheKind; - const llvm::Type *TypeData; + llvm::PATypeHolder TypeData; unsigned UIntData; bool BoolData; @@ -136,7 +134,11 @@ namespace clang { virtual void computeInfo(CodeGen::CGFunctionInfo &FI, ASTContext &Ctx, - llvm::LLVMContext &VMContext) const = 0; + llvm::LLVMContext &VMContext, + // This is the preferred type for argument lowering + // which can be used to generate better IR. + const llvm::Type *const *PrefTypes = 0, + unsigned NumPrefTypes = 0) const = 0; /// EmitVAArg - Emit the target dependent code to load a value of /// \arg Ty from the va_list pointed to by \arg VAListAddr. diff --git a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp new file mode 100644 index 0000000..69efe43 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp @@ -0,0 +1,339 @@ +//===--- BackendUtil.cpp - LLVM Backend Utilities -------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "clang/CodeGen/BackendUtil.h" +#include "clang/Basic/Diagnostic.h" +#include "clang/Basic/TargetOptions.h" +#include "clang/Frontend/CodeGenOptions.h" +#include "clang/Frontend/FrontendDiagnostic.h" +#include "llvm/Module.h" +#include "llvm/PassManager.h" +#include "llvm/Assembly/PrintModulePass.h" +#include "llvm/Bitcode/ReaderWriter.h" +#include "llvm/CodeGen/RegAllocRegistry.h" +#include "llvm/CodeGen/SchedulerRegistry.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/FormattedStream.h" +#include "llvm/Support/PrettyStackTrace.h" +#include "llvm/Support/StandardPasses.h" +#include "llvm/Support/Timer.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/SubtargetFeature.h" +#include "llvm/Target/TargetData.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetOptions.h" +#include "llvm/Target/TargetRegistry.h" +using namespace clang; +using namespace llvm; + +namespace { + +class EmitAssemblyHelper { + Diagnostic &Diags; + const CodeGenOptions &CodeGenOpts; + const TargetOptions &TargetOpts; + Module *TheModule; + + Timer CodeGenerationTime; + + mutable FunctionPassManager *CodeGenPasses; + mutable PassManager *PerModulePasses; + mutable FunctionPassManager *PerFunctionPasses; + +private: + FunctionPassManager *getCodeGenPasses() const { + if (!CodeGenPasses) { + CodeGenPasses = new FunctionPassManager(TheModule); + CodeGenPasses->add(new TargetData(TheModule)); + } + return CodeGenPasses; + } + + PassManager *getPerModulePasses() const { + if (!PerModulePasses) { + PerModulePasses = new PassManager(); + PerModulePasses->add(new TargetData(TheModule)); + } + return PerModulePasses; + } + + FunctionPassManager *getPerFunctionPasses() const { + if (!PerFunctionPasses) { + PerFunctionPasses = new FunctionPassManager(TheModule); + PerFunctionPasses->add(new TargetData(TheModule)); + } + return PerFunctionPasses; + } + + void CreatePasses(); + + /// AddEmitPasses - Add passes necessary to emit assembly or LLVM IR. + /// + /// \return True on success. + bool AddEmitPasses(BackendAction Action, formatted_raw_ostream &OS); + +public: + EmitAssemblyHelper(Diagnostic &_Diags, + const CodeGenOptions &CGOpts, const TargetOptions &TOpts, + Module *M) + : Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts), + TheModule(M), CodeGenerationTime("Code Generation Time"), + CodeGenPasses(0), PerModulePasses(0), PerFunctionPasses(0) {} + + ~EmitAssemblyHelper() { + delete CodeGenPasses; + delete PerModulePasses; + delete PerFunctionPasses; + } + + void EmitAssembly(BackendAction Action, raw_ostream *OS); +}; + +} + +void EmitAssemblyHelper::CreatePasses() { + unsigned OptLevel = CodeGenOpts.OptimizationLevel; + CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining; + + // Handle disabling of LLVM optimization, where we want to preserve the + // internal module before any optimization. + if (CodeGenOpts.DisableLLVMOpts) { + OptLevel = 0; + Inlining = CodeGenOpts.NoInlining; + } + + // In -O0 if checking is disabled, we don't even have per-function passes. + if (CodeGenOpts.VerifyModule) + getPerFunctionPasses()->add(createVerifierPass()); + + // Assume that standard function passes aren't run for -O0. + if (OptLevel > 0) + llvm::createStandardFunctionPasses(getPerFunctionPasses(), OptLevel); + + llvm::Pass *InliningPass = 0; + switch (Inlining) { + case CodeGenOptions::NoInlining: break; + case CodeGenOptions::NormalInlining: { + // Set the inline threshold following llvm-gcc. + // + // FIXME: Derive these constants in a principled fashion. + unsigned Threshold = 225; + if (CodeGenOpts.OptimizeSize) + Threshold = 75; + else if (OptLevel > 2) + Threshold = 275; + InliningPass = createFunctionInliningPass(Threshold); + break; + } + case CodeGenOptions::OnlyAlwaysInlining: + InliningPass = createAlwaysInlinerPass(); // Respect always_inline + break; + } + + // For now we always create per module passes. + llvm::createStandardModulePasses(getPerModulePasses(), OptLevel, + CodeGenOpts.OptimizeSize, + CodeGenOpts.UnitAtATime, + CodeGenOpts.UnrollLoops, + CodeGenOpts.SimplifyLibCalls, + /*HaveExceptions=*/true, + InliningPass); +} + +bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action, + formatted_raw_ostream &OS) { + // Create the TargetMachine for generating code. + std::string Error; + std::string Triple = TheModule->getTargetTriple(); + const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error); + if (!TheTarget) { + Diags.Report(diag::err_fe_unable_to_create_target) << Error; + return false; + } + + // FIXME: Expose these capabilities via actual APIs!!!! Aside from just + // being gross, this is also totally broken if we ever care about + // concurrency. + + // Set frame pointer elimination mode. + if (!CodeGenOpts.DisableFPElim) { + llvm::NoFramePointerElim = false; + llvm::NoFramePointerElimNonLeaf = false; + } else if (CodeGenOpts.OmitLeafFramePointer) { + llvm::NoFramePointerElim = false; + llvm::NoFramePointerElimNonLeaf = true; + } else { + llvm::NoFramePointerElim = true; + llvm::NoFramePointerElimNonLeaf = true; + } + + // Set float ABI type. + if (CodeGenOpts.FloatABI == "soft") + llvm::FloatABIType = llvm::FloatABI::Soft; + else if (CodeGenOpts.FloatABI == "hard") + llvm::FloatABIType = llvm::FloatABI::Hard; + else { + assert(CodeGenOpts.FloatABI.empty() && "Invalid float abi!"); + llvm::FloatABIType = llvm::FloatABI::Default; + } + + NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS; + llvm::UseSoftFloat = CodeGenOpts.SoftFloat; + UnwindTablesMandatory = CodeGenOpts.UnwindTables; + + TargetMachine::setAsmVerbosityDefault(CodeGenOpts.AsmVerbose); + + TargetMachine::setFunctionSections(CodeGenOpts.FunctionSections); + TargetMachine::setDataSections (CodeGenOpts.DataSections); + + // FIXME: Parse this earlier. + if (CodeGenOpts.RelocationModel == "static") { + TargetMachine::setRelocationModel(llvm::Reloc::Static); + } else if (CodeGenOpts.RelocationModel == "pic") { + TargetMachine::setRelocationModel(llvm::Reloc::PIC_); + } else { + assert(CodeGenOpts.RelocationModel == "dynamic-no-pic" && + "Invalid PIC model!"); + TargetMachine::setRelocationModel(llvm::Reloc::DynamicNoPIC); + } + // FIXME: Parse this earlier. + if (CodeGenOpts.CodeModel == "small") { + TargetMachine::setCodeModel(llvm::CodeModel::Small); + } else if (CodeGenOpts.CodeModel == "kernel") { + TargetMachine::setCodeModel(llvm::CodeModel::Kernel); + } else if (CodeGenOpts.CodeModel == "medium") { + TargetMachine::setCodeModel(llvm::CodeModel::Medium); + } else if (CodeGenOpts.CodeModel == "large") { + TargetMachine::setCodeModel(llvm::CodeModel::Large); + } else { + assert(CodeGenOpts.CodeModel.empty() && "Invalid code model!"); + TargetMachine::setCodeModel(llvm::CodeModel::Default); + } + + std::vector<const char *> BackendArgs; + BackendArgs.push_back("clang"); // Fake program name. + if (!CodeGenOpts.DebugPass.empty()) { + BackendArgs.push_back("-debug-pass"); + BackendArgs.push_back(CodeGenOpts.DebugPass.c_str()); + } + if (!CodeGenOpts.LimitFloatPrecision.empty()) { + BackendArgs.push_back("-limit-float-precision"); + BackendArgs.push_back(CodeGenOpts.LimitFloatPrecision.c_str()); + } + if (llvm::TimePassesIsEnabled) + BackendArgs.push_back("-time-passes"); + BackendArgs.push_back(0); + llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1, + const_cast<char **>(&BackendArgs[0])); + + std::string FeaturesStr; + if (TargetOpts.CPU.size() || TargetOpts.Features.size()) { + SubtargetFeatures Features; + Features.setCPU(TargetOpts.CPU); + for (std::vector<std::string>::const_iterator + it = TargetOpts.Features.begin(), + ie = TargetOpts.Features.end(); it != ie; ++it) + Features.AddFeature(*it); + FeaturesStr = Features.getString(); + } + TargetMachine *TM = TheTarget->createTargetMachine(Triple, FeaturesStr); + + if (CodeGenOpts.RelaxAll) + TM->setMCRelaxAll(true); + + // Create the code generator passes. + FunctionPassManager *PM = getCodeGenPasses(); + CodeGenOpt::Level OptLevel = CodeGenOpt::Default; + + switch (CodeGenOpts.OptimizationLevel) { + default: break; + case 0: OptLevel = CodeGenOpt::None; break; + case 3: OptLevel = CodeGenOpt::Aggressive; break; + } + + // Normal mode, emit a .s or .o file by running the code generator. Note, + // this also adds codegenerator level optimization passes. + TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile; + if (Action == Backend_EmitObj) + CGFT = TargetMachine::CGFT_ObjectFile; + else if (Action == Backend_EmitMCNull) + CGFT = TargetMachine::CGFT_Null; + else + assert(Action == Backend_EmitAssembly && "Invalid action!"); + if (TM->addPassesToEmitFile(*PM, OS, CGFT, OptLevel, + /*DisableVerify=*/!CodeGenOpts.VerifyModule)) { + Diags.Report(diag::err_fe_unable_to_interface_with_target); + return false; + } + + return true; +} + +void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) { + TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : 0); + llvm::formatted_raw_ostream FormattedOS; + + CreatePasses(); + switch (Action) { + case Backend_EmitNothing: + break; + + case Backend_EmitBC: + getPerModulePasses()->add(createBitcodeWriterPass(*OS)); + break; + + case Backend_EmitLL: + FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM); + getPerModulePasses()->add(createPrintModulePass(&FormattedOS)); + break; + + default: + FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM); + if (!AddEmitPasses(Action, FormattedOS)) + return; + } + + // Run passes. For now we do all passes at once, but eventually we + // would like to have the option of streaming code generation. + + if (PerFunctionPasses) { + PrettyStackTraceString CrashInfo("Per-function optimization"); + + PerFunctionPasses->doInitialization(); + for (Module::iterator I = TheModule->begin(), + E = TheModule->end(); I != E; ++I) + if (!I->isDeclaration()) + PerFunctionPasses->run(*I); + PerFunctionPasses->doFinalization(); + } + + if (PerModulePasses) { + PrettyStackTraceString CrashInfo("Per-module optimization passes"); + PerModulePasses->run(*TheModule); + } + + if (CodeGenPasses) { + PrettyStackTraceString CrashInfo("Code generation"); + + CodeGenPasses->doInitialization(); + for (Module::iterator I = TheModule->begin(), + E = TheModule->end(); I != E; ++I) + if (!I->isDeclaration()) + CodeGenPasses->run(*I); + CodeGenPasses->doFinalization(); + } +} + +void clang::EmitBackendOutput(Diagnostic &Diags, const CodeGenOptions &CGOpts, + const TargetOptions &TOpts, Module *M, + BackendAction Action, raw_ostream *OS) { + EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, M); + + AsmHelper.EmitAssembly(Action, OS); +} diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp index de58597..cb9e636 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp @@ -228,7 +228,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { // block literal. // __invoke llvm::Function *Fn - = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl, + = CodeGenFunction(CGM).GenerateBlockFunction(CurGD, BE, Info, CurFuncDecl, LocalDeclMap); BlockHasCopyDispose |= Info.BlockHasCopyDispose; Elts[3] = Fn; @@ -253,7 +253,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { CodeGenTypes &Types = CGM.getTypes(); const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, Args, FunctionType::ExtInfo()); - if (CGM.ReturnTypeUsesSret(FnInfo)) + if (CGM.ReturnTypeUsesSRet(FnInfo)) flags |= BLOCK_USE_STRET; } const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( @@ -296,8 +296,11 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E); QualType Ty = E->getType(); if (BDRE && BDRE->isByRef()) { - Types[i+BlockFields] = llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0); - } else + Types[i+BlockFields] = + llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0); + } else if (BDRE && BDRE->getDecl()->getType()->isReferenceType()) { + Types[i+BlockFields] = llvm::PointerType::get(ConvertType(Ty), 0); + } else Types[i+BlockFields] = ConvertType(Ty); } @@ -358,11 +361,23 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { Builder.CreateStore(Loc, Addr); continue; } else { - E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD), - VD->getType(), - SourceLocation()); + if (BDRE->getCopyConstructorExpr()) { + E = BDRE->getCopyConstructorExpr(); + PushDestructorCleanup(E->getType(), Addr); + } + else { + E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD), + VD->getType().getNonReferenceType(), + SourceLocation()); + if (VD->getType()->isReferenceType()) { + E = new (getContext()) + UnaryOperator(const_cast<Expr*>(E), UnaryOperator::AddrOf, + getContext().getPointerType(E->getType()), + SourceLocation()); + } + } + } } - } if (BDRE->isByRef()) { E = new (getContext()) @@ -386,8 +401,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { llvm::Value *BlockLiteral = LoadBlockStruct(); Loc = Builder.CreateGEP(BlockLiteral, - llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), - offset.getQuantity()), + llvm::ConstantInt::get(Int64Ty, offset.getQuantity()), "block.literal"); Ty = llvm::PointerType::get(Ty, 0); Loc = Builder.CreateBitCast(Loc, Ty); @@ -599,13 +613,13 @@ void CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) { llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD, bool IsByRef) { + CharUnits offset = BlockDecls[VD]; assert(!offset.isZero() && "getting address of unallocated decl"); llvm::Value *BlockLiteral = LoadBlockStruct(); llvm::Value *V = Builder.CreateGEP(BlockLiteral, - llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), - offset.getQuantity()), + llvm::ConstantInt::get(Int64Ty, offset.getQuantity()), "block.literal"); if (IsByRef) { const llvm::Type *PtrStructTy @@ -626,9 +640,10 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD, V = Builder.CreateLoad(V); } else { const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType()); - Ty = llvm::PointerType::get(Ty, 0); V = Builder.CreateBitCast(V, Ty); + if (VD->getType()->isReferenceType()) + V = Builder.CreateLoad(V, "ref.tmp"); } return V; } @@ -680,7 +695,7 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) { CGBlockInfo Info(n); llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap; llvm::Function *Fn - = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap); + = CodeGenFunction(CGM).GenerateBlockFunction(GlobalDecl(), BE, Info, 0, LocalDeclMap); assert(Info.BlockSize == BlockLiteralSize && "no imports allowed for global block"); @@ -719,7 +734,7 @@ llvm::Value *CodeGenFunction::LoadBlockStruct() { } llvm::Function * -CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr, +CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const BlockExpr *BExpr, CGBlockInfo &Info, const Decl *OuterFuncDecl, llvm::DenseMap<const Decl*, llvm::Value*> ldm) { @@ -792,18 +807,29 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr, const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic); MangleBuffer Name; - CGM.getMangledName(Name, BD); + CGM.getMangledName(GD, Name, BD); llvm::Function *Fn = llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, Name.getString(), &CGM.getModule()); CGM.SetInternalFunctionAttributes(BD, Fn, FI); + QualType FnType(BlockFunctionType, 0); + bool HasPrototype = isa<FunctionProtoType>(BlockFunctionType); + + IdentifierInfo *ID = &getContext().Idents.get(Name.getString()); + CurCodeDecl = FunctionDecl::Create(getContext(), + getContext().getTranslationUnitDecl(), + SourceLocation(), ID, FnType, + 0, + FunctionDecl::Static, + FunctionDecl::None, + false, HasPrototype); + StartFunction(BD, ResultType, Fn, Args, BExpr->getBody()->getLocEnd()); CurFuncDecl = OuterFuncDecl; - CurCodeDecl = BD; // If we have a C++ 'this' reference, go ahead and force it into // existence now. @@ -985,8 +1011,7 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T, llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index); Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty); - llvm::Value *N = llvm::ConstantInt::get( - llvm::Type::getInt32Ty(T->getContext()), flag); + llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag); llvm::Value *F = getBlockObjectAssign(); Builder.CreateCall3(F, Dstv, Srcv, N); } @@ -1138,8 +1163,7 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) { flag |= BLOCK_BYREF_CALLER; - llvm::Value *N = llvm::ConstantInt::get( - llvm::Type::getInt32Ty(T->getContext()), flag); + llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag); llvm::Value *F = getBlockObjectAssign(); Builder.CreateCall3(F, DstObj, SrcObj, N); @@ -1241,7 +1265,7 @@ llvm::Value *BlockFunction::getBlockObjectDispose() { std::vector<const llvm::Type*> ArgTys; const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); ArgTys.push_back(PtrToInt8Ty); - ArgTys.push_back(llvm::Type::getInt32Ty(VMContext)); + ArgTys.push_back(CGF.Int32Ty); FTy = llvm::FunctionType::get(ResultType, ArgTys, false); CGM.BlockObjectDispose = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose"); @@ -1256,7 +1280,7 @@ llvm::Value *BlockFunction::getBlockObjectAssign() { const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); ArgTys.push_back(PtrToInt8Ty); ArgTys.push_back(PtrToInt8Ty); - ArgTys.push_back(llvm::Type::getInt32Ty(VMContext)); + ArgTys.push_back(CGF.Int32Ty); FTy = llvm::FunctionType::get(ResultType, ArgTys, false); CGM.BlockObjectAssign = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign"); @@ -1268,7 +1292,7 @@ void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) { llvm::Value *F = getBlockObjectDispose(); llvm::Value *N; V = Builder.CreateBitCast(V, PtrToInt8Ty); - N = llvm::ConstantInt::get(llvm::Type::getInt32Ty(V->getContext()), flag); + N = llvm::ConstantInt::get(CGF.Int32Ty, flag); Builder.CreateCall2(F, V, N); } @@ -1276,7 +1300,7 @@ ASTContext &BlockFunction::getContext() const { return CGM.getContext(); } BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, CGBuilderTy &B) - : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()), Builder(B) { + : CGM(cgm), VMContext(cgm.getLLVMContext()), CGF(cgf), Builder(B) { PtrToInt8Ty = llvm::PointerType::getUnqual( llvm::Type::getInt8Ty(VMContext)); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h index e9b2bd5..772a62c 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h @@ -99,7 +99,7 @@ public: llvm::Value *BlockObjectAssign; llvm::Value *BlockObjectDispose; - const llvm::Type *PtrToInt8Ty; + const llvm::PointerType *PtrToInt8Ty; std::map<uint64_t, llvm::Constant *> AssignCache; std::map<uint64_t, llvm::Constant *> DestroyCache; @@ -121,13 +121,14 @@ public: class BlockFunction : public BlockBase { CodeGenModule &CGM; - CodeGenFunction &CGF; ASTContext &getContext() const; protected: llvm::LLVMContext &VMContext; public: + CodeGenFunction &CGF; + const llvm::PointerType *PtrToInt8Ty; struct HelperInfo { int index; @@ -180,7 +181,7 @@ public: /// BlockDecls - Offsets for all Decls in BlockDeclRefExprs. llvm::DenseMap<const Decl*, CharUnits> BlockDecls; - + /// BlockCXXThisOffset - The offset of the C++ 'this' value within /// the block structure. CharUnits BlockCXXThisOffset; diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h index ed56bd9..8120217 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h @@ -14,12 +14,14 @@ namespace clang { namespace CodeGen { - // Don't preserve names on values in an optimized build. + +// Don't preserve names on values in an optimized build. #ifdef NDEBUG - typedef llvm::IRBuilder<false> CGBuilderTy; +typedef llvm::IRBuilder<false> CGBuilderTy; #else - typedef llvm::IRBuilder<> CGBuilderTy; +typedef llvm::IRBuilder<> CGBuilderTy; #endif + } // end namespace CodeGen } // end namespace clang diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp index dd505c2..fff4bac 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp @@ -14,6 +14,7 @@ #include "TargetInfo.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" +#include "CGObjCRuntime.h" #include "clang/Basic/TargetInfo.h" #include "clang/AST/APValue.h" #include "clang/AST/ASTContext.h" @@ -84,11 +85,6 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Args[1])); } -static llvm::ConstantInt *getInt32(llvm::LLVMContext &Context, int32_t Value) { - return llvm::ConstantInt::get(llvm::Type::getInt32Ty(Context), Value); -} - - /// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy, /// which must be a scalar floating point type. static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) { @@ -283,9 +279,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); // FIXME: Technically these constants should of type 'int', yes? RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); + llvm::ConstantInt::get(Int32Ty, 0); Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 3); + llvm::ConstantInt::get(Int32Ty, 3); Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0); return RValue::get(Builder.CreateCall3(F, Address, RW, Locality)); } @@ -395,12 +391,68 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, V = Builder.CreateAnd(Eq, IsNotInf, "and"); return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); } + + case Builtin::BI__builtin_fpclassify: { + Value *V = EmitScalarExpr(E->getArg(5)); + const llvm::Type *Ty = ConvertType(E->getArg(5)->getType()); + + // Create Result + BasicBlock *Begin = Builder.GetInsertBlock(); + BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); + Builder.SetInsertPoint(End); + PHINode *Result = + Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), + "fpclassify_result"); + + // if (V==0) return FP_ZERO + Builder.SetInsertPoint(Begin); + Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), + "iszero"); + Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); + BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); + Builder.CreateCondBr(IsZero, End, NotZero); + Result->addIncoming(ZeroLiteral, Begin); + + // if (V != V) return FP_NAN + Builder.SetInsertPoint(NotZero); + Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp"); + Value *NanLiteral = EmitScalarExpr(E->getArg(0)); + BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn); + Builder.CreateCondBr(IsNan, End, NotNan); + Result->addIncoming(NanLiteral, NotZero); + + // if (fabs(V) == infinity) return FP_INFINITY + Builder.SetInsertPoint(NotNan); + Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType()); + Value *IsInf = + Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), + "isinf"); + Value *InfLiteral = EmitScalarExpr(E->getArg(1)); + BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); + Builder.CreateCondBr(IsInf, End, NotInf); + Result->addIncoming(InfLiteral, NotNan); + + // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL + Builder.SetInsertPoint(NotInf); + APFloat Smallest = APFloat::getSmallestNormalized( + getContext().getFloatTypeSemantics(E->getArg(5)->getType())); + Value *IsNormal = + Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), + "isnormal"); + Value *NormalResult = + Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), + EmitScalarExpr(E->getArg(3))); + Builder.CreateBr(End); + Result->addIncoming(NormalResult, NotInf); + + // return Result + Builder.SetInsertPoint(End); + return RValue::get(Result); + } case Builtin::BIalloca: case Builtin::BI__builtin_alloca: { - // FIXME: LLVM IR Should allow alloca with an i64 size! Value *Size = EmitScalarExpr(E->getArg(0)); - Size = Builder.CreateIntCast(Size, llvm::Type::getInt32Ty(VMContext), false, "tmp"); return RValue::get(Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), Size, "tmp")); } case Builtin::BIbzero: @@ -411,7 +463,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Address, llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0), SizeVal, - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1), + llvm::ConstantInt::get(Int32Ty, 1), llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0)); return RValue::get(Address); } @@ -423,10 +475,20 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Builder.CreateCall5(CGM.getMemCpyFn(Address->getType(), SrcAddr->getType(), SizeVal->getType()), Address, SrcAddr, SizeVal, - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1), + llvm::ConstantInt::get(Int32Ty, 1), llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0)); return RValue::get(Address); } + + case Builtin::BI__builtin_objc_memmove_collectable: { + Value *Address = EmitScalarExpr(E->getArg(0)); + Value *SrcAddr = EmitScalarExpr(E->getArg(1)); + Value *SizeVal = EmitScalarExpr(E->getArg(2)); + CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, + Address, SrcAddr, SizeVal); + return RValue::get(Address); + } + case Builtin::BImemmove: case Builtin::BI__builtin_memmove: { Value *Address = EmitScalarExpr(E->getArg(0)); @@ -435,7 +497,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Builder.CreateCall5(CGM.getMemMoveFn(Address->getType(), SrcAddr->getType(), SizeVal->getType()), Address, SrcAddr, SizeVal, - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1), + llvm::ConstantInt::get(Int32Ty, 1), llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0)); return RValue::get(Address); } @@ -448,7 +510,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), llvm::Type::getInt8Ty(VMContext)), SizeVal, - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1), + llvm::ConstantInt::get(Int32Ty, 1), llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0)); return RValue::get(Address); } @@ -464,21 +526,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, int32_t Offset = 0; Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa, 0, 0); - return RValue::get(Builder.CreateCall(F, getInt32(VMContext, Offset))); + return RValue::get(Builder.CreateCall(F, + llvm::ConstantInt::get(Int32Ty, Offset))); } case Builtin::BI__builtin_return_address: { Value *Depth = EmitScalarExpr(E->getArg(0)); - Depth = Builder.CreateIntCast(Depth, - llvm::Type::getInt32Ty(VMContext), - false, "tmp"); + Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp"); Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0); return RValue::get(Builder.CreateCall(F, Depth)); } case Builtin::BI__builtin_frame_address: { Value *Depth = EmitScalarExpr(E->getArg(0)); - Depth = Builder.CreateIntCast(Depth, - llvm::Type::getInt32Ty(VMContext), - false, "tmp"); + Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp"); Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0); return RValue::get(Builder.CreateCall(F, Depth)); } @@ -551,36 +610,45 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, return RValue::get(Result); // Otherwise, ask the codegen data what to do. - const llvm::IntegerType *Int64Ty = llvm::IntegerType::get(C, 64); if (getTargetHooks().extendPointerWithSExt()) return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext")); else return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext")); } -#if 0 - // FIXME: Finish/enable when LLVM backend support stabilizes case Builtin::BI__builtin_setjmp: { + // Buffer is a void**. Value *Buf = EmitScalarExpr(E->getArg(0)); - // Store the frame pointer to the buffer - Value *FrameAddrF = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0); + + // Store the frame pointer to the setjmp buffer. Value *FrameAddr = - Builder.CreateCall(FrameAddrF, - Constant::getNullValue(llvm::Type::getInt32Ty(VMContext))); + Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), + ConstantInt::get(Int32Ty, 0)); Builder.CreateStore(FrameAddr, Buf); - // Call the setjmp intrinsic - Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp, 0, 0); - const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext); - Buf = Builder.CreateBitCast(Buf, DestType); + + // Store the stack pointer to the setjmp buffer. + Value *StackAddr = + Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); + Value *StackSaveSlot = + Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2)); + Builder.CreateStore(StackAddr, StackSaveSlot); + + // Call LLVM's EH setjmp, which is lightweight. + Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp); + Buf = Builder.CreateBitCast(Buf, llvm::Type::getInt8PtrTy(VMContext)); return RValue::get(Builder.CreateCall(F, Buf)); } case Builtin::BI__builtin_longjmp: { - Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp, 0, 0); Value *Buf = EmitScalarExpr(E->getArg(0)); - const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext); - Buf = Builder.CreateBitCast(Buf, DestType); - return RValue::get(Builder.CreateCall(F, Buf)); + Buf = Builder.CreateBitCast(Buf, llvm::Type::getInt8PtrTy(VMContext)); + + // Call LLVM's EH longjmp, which is lightweight. + Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf); + + // longjmp doesn't return; mark this as unreachable + Value *V = Builder.CreateUnreachable(); + Builder.ClearInsertionPoint(); + return RValue::get(V); } -#endif case Builtin::BI__sync_fetch_and_add: case Builtin::BI__sync_fetch_and_sub: case Builtin::BI__sync_fetch_and_or: @@ -870,14 +938,703 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, } } +const llvm::VectorType *GetNeonType(LLVMContext &C, unsigned type, bool q) { + switch (type) { + default: break; + case 0: + case 5: return llvm::VectorType::get(llvm::Type::getInt8Ty(C), 8 << (int)q); + case 6: + case 7: + case 1: return llvm::VectorType::get(llvm::Type::getInt16Ty(C),4 << (int)q); + case 2: return llvm::VectorType::get(llvm::Type::getInt32Ty(C),2 << (int)q); + case 3: return llvm::VectorType::get(llvm::Type::getInt64Ty(C),1 << (int)q); + case 4: return llvm::VectorType::get(llvm::Type::getFloatTy(C),2 << (int)q); + }; + return 0; +} + +Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { + unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements(); + SmallVector<Constant*, 16> Indices(nElts, C); + Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); + return Builder.CreateShuffleVector(V, V, SV, "lane"); +} + +Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops, + const char *name, bool splat, + unsigned shift, bool rightshift) { + unsigned j = 0; + for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); + ai != ae; ++ai, ++j) + if (shift > 0 && shift == j) + Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift); + else + Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name); + + if (splat) { + Ops[j-1] = EmitNeonSplat(Ops[j-1], cast<Constant>(Ops[j])); + Ops.resize(j); + } + return Builder.CreateCall(F, Ops.begin(), Ops.end(), name); +} + +Value *CodeGenFunction::EmitNeonShiftVector(Value *V, const llvm::Type *Ty, + bool neg) { + ConstantInt *CI = cast<ConstantInt>(V); + int SV = CI->getSExtValue(); + + const llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); + llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV); + SmallVector<llvm::Constant*, 16> CV(VTy->getNumElements(), C); + return llvm::ConstantVector::get(CV.begin(), CV.size()); +} + Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E) { + if (BuiltinID == ARM::BI__clear_cache) { + const FunctionDecl *FD = E->getDirectCallee(); + Value *a = EmitScalarExpr(E->getArg(0)); + Value *b = EmitScalarExpr(E->getArg(1)); + const llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); + const llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); + llvm::StringRef Name = FD->getName(); + return Builder.CreateCall2(CGM.CreateRuntimeFunction(FTy, Name), + a, b); + } + + // Determine the type of this overloaded NEON intrinsic. + assert(BuiltinID > ARM::BI__builtin_thread_pointer); + + llvm::SmallVector<Value*, 4> Ops; + for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) + Ops.push_back(EmitScalarExpr(E->getArg(i))); + + llvm::APSInt Result; + const Expr *Arg = E->getArg(E->getNumArgs()-1); + if (!Arg->isIntegerConstantExpr(Result, getContext())) + return 0; + + unsigned type = Result.getZExtValue(); + bool usgn = type & 0x08; + bool quad = type & 0x10; + bool poly = (type & 0x7) == 5 || (type & 0x7) == 6; + bool splat = false; + + const llvm::VectorType *VTy = GetNeonType(VMContext, type & 0x7, quad); + const llvm::Type *Ty = VTy; + if (!Ty) + return 0; + + unsigned Int; switch (BuiltinID) { default: return 0; + case ARM::BI__builtin_neon_vaba_v: + case ARM::BI__builtin_neon_vabaq_v: + Int = usgn ? Intrinsic::arm_neon_vabau : Intrinsic::arm_neon_vabas; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vaba"); + case ARM::BI__builtin_neon_vabal_v: + Int = usgn ? Intrinsic::arm_neon_vabalu : Intrinsic::arm_neon_vabals; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabal"); + case ARM::BI__builtin_neon_vabd_v: + case ARM::BI__builtin_neon_vabdq_v: + Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabd"); + case ARM::BI__builtin_neon_vabdl_v: + Int = usgn ? Intrinsic::arm_neon_vabdlu : Intrinsic::arm_neon_vabdls; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vabdl"); + case ARM::BI__builtin_neon_vabs_v: + case ARM::BI__builtin_neon_vabsq_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, &Ty, 1), + Ops, "vabs"); + case ARM::BI__builtin_neon_vaddhn_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, &Ty, 1), + Ops, "vaddhn"); + case ARM::BI__builtin_neon_vaddl_v: + Int = usgn ? Intrinsic::arm_neon_vaddlu : Intrinsic::arm_neon_vaddls; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vaddl"); + case ARM::BI__builtin_neon_vaddw_v: + Int = usgn ? Intrinsic::arm_neon_vaddws : Intrinsic::arm_neon_vaddwu; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vaddw"); + case ARM::BI__builtin_neon_vcale_v: + std::swap(Ops[0], Ops[1]); + case ARM::BI__builtin_neon_vcage_v: { + Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged, &Ty, 1); + return EmitNeonCall(F, Ops, "vcage"); + } + case ARM::BI__builtin_neon_vcaleq_v: + std::swap(Ops[0], Ops[1]); + case ARM::BI__builtin_neon_vcageq_v: { + Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq, &Ty, 1); + return EmitNeonCall(F, Ops, "vcage"); + } + case ARM::BI__builtin_neon_vcalt_v: + std::swap(Ops[0], Ops[1]); + case ARM::BI__builtin_neon_vcagt_v: { + Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd, &Ty, 1); + return EmitNeonCall(F, Ops, "vcagt"); + } + case ARM::BI__builtin_neon_vcaltq_v: + std::swap(Ops[0], Ops[1]); + case ARM::BI__builtin_neon_vcagtq_v: { + Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq, &Ty, 1); + return EmitNeonCall(F, Ops, "vcagt"); + } + case ARM::BI__builtin_neon_vcls_v: + case ARM::BI__builtin_neon_vclsq_v: { + Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, &Ty, 1); + return EmitNeonCall(F, Ops, "vcls"); + } + case ARM::BI__builtin_neon_vclz_v: + case ARM::BI__builtin_neon_vclzq_v: { + Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vclz, &Ty, 1); + return EmitNeonCall(F, Ops, "vclz"); + } + case ARM::BI__builtin_neon_vcnt_v: + case ARM::BI__builtin_neon_vcntq_v: { + Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, &Ty, 1); + return EmitNeonCall(F, Ops, "vcnt"); + } + // FIXME: intrinsics for f16<->f32 convert missing from ARM target. + case ARM::BI__builtin_neon_vcvt_f32_v: + case ARM::BI__builtin_neon_vcvtq_f32_v: { + Ops[0] = Builder.CreateBitCast(Ops[0], Ty); + Ty = GetNeonType(VMContext, 4, quad); + return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") + : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); + } + case ARM::BI__builtin_neon_vcvt_s32_v: + case ARM::BI__builtin_neon_vcvt_u32_v: + case ARM::BI__builtin_neon_vcvtq_s32_v: + case ARM::BI__builtin_neon_vcvtq_u32_v: { + Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(VMContext, 4, quad)); + return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt") + : Builder.CreateFPToSI(Ops[0], Ty, "vcvt"); + } + case ARM::BI__builtin_neon_vcvt_n_f32_v: + case ARM::BI__builtin_neon_vcvtq_n_f32_v: { + const llvm::Type *Tys[2] = { GetNeonType(VMContext, 4, quad), Ty }; + Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp : Intrinsic::arm_neon_vcvtfxs2fp; + Function *F = CGM.getIntrinsic(Int, Tys, 2); + return EmitNeonCall(F, Ops, "vcvt_n"); + } + case ARM::BI__builtin_neon_vcvt_n_s32_v: + case ARM::BI__builtin_neon_vcvt_n_u32_v: + case ARM::BI__builtin_neon_vcvtq_n_s32_v: + case ARM::BI__builtin_neon_vcvtq_n_u32_v: { + const llvm::Type *Tys[2] = { Ty, GetNeonType(VMContext, 4, quad) }; + Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu : Intrinsic::arm_neon_vcvtfp2fxs; + Function *F = CGM.getIntrinsic(Int, Tys, 2); + return EmitNeonCall(F, Ops, "vcvt_n"); + } + case ARM::BI__builtin_neon_vext_v: + case ARM::BI__builtin_neon_vextq_v: { + ConstantInt *C = dyn_cast<ConstantInt>(Ops[2]); + int CV = C->getSExtValue(); + SmallVector<Constant*, 16> Indices; + for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) + Indices.push_back(ConstantInt::get(Int32Ty, i+CV)); + + Ops[0] = Builder.CreateBitCast(Ops[0], Ty); + Ops[1] = Builder.CreateBitCast(Ops[1], Ty); + Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); + return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext"); + } + case ARM::BI__builtin_neon_vget_lane_i8: + case ARM::BI__builtin_neon_vget_lane_i16: + case ARM::BI__builtin_neon_vget_lane_i32: + case ARM::BI__builtin_neon_vget_lane_i64: + case ARM::BI__builtin_neon_vget_lane_f32: + case ARM::BI__builtin_neon_vgetq_lane_i8: + case ARM::BI__builtin_neon_vgetq_lane_i16: + case ARM::BI__builtin_neon_vgetq_lane_i32: + case ARM::BI__builtin_neon_vgetq_lane_i64: + case ARM::BI__builtin_neon_vgetq_lane_f32: + return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), + "vget_lane"); + case ARM::BI__builtin_neon_vhadd_v: + case ARM::BI__builtin_neon_vhaddq_v: + Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhadd"); + case ARM::BI__builtin_neon_vhsub_v: + case ARM::BI__builtin_neon_vhsubq_v: + Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vhsub"); + case ARM::BI__builtin_neon_vld1_v: + case ARM::BI__builtin_neon_vld1q_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, &Ty, 1), + Ops, "vld1"); + case ARM::BI__builtin_neon_vld1_lane_v: + case ARM::BI__builtin_neon_vld1q_lane_v: + Ops[1] = Builder.CreateBitCast(Ops[1], Ty); + Ty = llvm::PointerType::getUnqual(VTy->getElementType()); + Ops[0] = Builder.CreateBitCast(Ops[0], Ty); + Ops[0] = Builder.CreateLoad(Ops[0]); + return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane"); + case ARM::BI__builtin_neon_vld1_dup_v: + case ARM::BI__builtin_neon_vld1q_dup_v: { + Value *V = UndefValue::get(Ty); + Ty = llvm::PointerType::getUnqual(VTy->getElementType()); + Ops[0] = Builder.CreateBitCast(Ops[0], Ty); + Ops[0] = Builder.CreateLoad(Ops[0]); + llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); + Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI); + return EmitNeonSplat(Ops[0], CI); + } + case ARM::BI__builtin_neon_vld2_v: + case ARM::BI__builtin_neon_vld2q_v: { + Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, &Ty, 1); + Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); + Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); + Ops[0] = Builder.CreateBitCast(Ops[0], Ty); + return Builder.CreateStore(Ops[1], Ops[0]); + } + case ARM::BI__builtin_neon_vld3_v: + case ARM::BI__builtin_neon_vld3q_v: { + Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, &Ty, 1); + Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); + Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); + Ops[0] = Builder.CreateBitCast(Ops[0], Ty); + return Builder.CreateStore(Ops[1], Ops[0]); + } + case ARM::BI__builtin_neon_vld4_v: + case ARM::BI__builtin_neon_vld4q_v: { + Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, &Ty, 1); + Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); + Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); + Ops[0] = Builder.CreateBitCast(Ops[0], Ty); + return Builder.CreateStore(Ops[1], Ops[0]); + } + case ARM::BI__builtin_neon_vld2_lane_v: + case ARM::BI__builtin_neon_vld2q_lane_v: { + Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, &Ty, 1); + Ops[2] = Builder.CreateBitCast(Ops[2], Ty); + Ops[3] = Builder.CreateBitCast(Ops[3], Ty); + Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld2_lane"); + Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); + Ops[0] = Builder.CreateBitCast(Ops[0], Ty); + return Builder.CreateStore(Ops[1], Ops[0]); + } + case ARM::BI__builtin_neon_vld3_lane_v: + case ARM::BI__builtin_neon_vld3q_lane_v: { + Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, &Ty, 1); + Ops[2] = Builder.CreateBitCast(Ops[2], Ty); + Ops[3] = Builder.CreateBitCast(Ops[3], Ty); + Ops[4] = Builder.CreateBitCast(Ops[4], Ty); + Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane"); + Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); + Ops[0] = Builder.CreateBitCast(Ops[0], Ty); + return Builder.CreateStore(Ops[1], Ops[0]); + } + case ARM::BI__builtin_neon_vld4_lane_v: + case ARM::BI__builtin_neon_vld4q_lane_v: { + Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, &Ty, 1); + Ops[2] = Builder.CreateBitCast(Ops[2], Ty); + Ops[3] = Builder.CreateBitCast(Ops[3], Ty); + Ops[4] = Builder.CreateBitCast(Ops[4], Ty); + Ops[5] = Builder.CreateBitCast(Ops[5], Ty); + Ops[1] = Builder.CreateCall(F, Ops.begin() + 1, Ops.end(), "vld3_lane"); + Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); + Ops[0] = Builder.CreateBitCast(Ops[0], Ty); + return Builder.CreateStore(Ops[1], Ops[0]); + } + case ARM::BI__builtin_neon_vld2_dup_v: + case ARM::BI__builtin_neon_vld3_dup_v: + case ARM::BI__builtin_neon_vld4_dup_v: { + switch (BuiltinID) { + case ARM::BI__builtin_neon_vld2_dup_v: + Int = Intrinsic::arm_neon_vld2lane; + break; + case ARM::BI__builtin_neon_vld3_dup_v: + Int = Intrinsic::arm_neon_vld2lane; + break; + case ARM::BI__builtin_neon_vld4_dup_v: + Int = Intrinsic::arm_neon_vld2lane; + break; + default: assert(0 && "unknown vld_dup intrinsic?"); + } + Function *F = CGM.getIntrinsic(Int, &Ty, 1); + const llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType()); + + SmallVector<Value*, 6> Args; + Args.push_back(Ops[1]); + Args.append(STy->getNumElements(), UndefValue::get(Ty)); - case ARM::BI__builtin_thread_pointer: { - Value *AtomF = CGM.getIntrinsic(Intrinsic::arm_thread_pointer, 0, 0); - return Builder.CreateCall(AtomF); + llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); + Args.push_back(CI); + + Ops[1] = Builder.CreateCall(F, Args.begin(), Args.end(), "vld_dup"); + // splat lane 0 to all elts in each vector of the result. + for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { + Value *Val = Builder.CreateExtractValue(Ops[1], i); + Value *Elt = Builder.CreateBitCast(Val, Ty); + Elt = EmitNeonSplat(Elt, CI); + Elt = Builder.CreateBitCast(Elt, Val->getType()); + Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i); + } + Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); + Ops[0] = Builder.CreateBitCast(Ops[0], Ty); + return Builder.CreateStore(Ops[1], Ops[0]); + } + case ARM::BI__builtin_neon_vmax_v: + case ARM::BI__builtin_neon_vmaxq_v: + Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmax"); + case ARM::BI__builtin_neon_vmin_v: + case ARM::BI__builtin_neon_vminq_v: + Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmin"); + case ARM::BI__builtin_neon_vmlal_lane_v: + splat = true; + case ARM::BI__builtin_neon_vmlal_v: + Int = usgn ? Intrinsic::arm_neon_vmlalu : Intrinsic::arm_neon_vmlals; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmlal", splat); + case ARM::BI__builtin_neon_vmlsl_lane_v: + splat = true; + case ARM::BI__builtin_neon_vmlsl_v: + Int = usgn ? Intrinsic::arm_neon_vmlslu : Intrinsic::arm_neon_vmlsls; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmlsl", splat); + case ARM::BI__builtin_neon_vmovl_v: + Int = usgn ? Intrinsic::arm_neon_vmovlu : Intrinsic::arm_neon_vmovls; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmovl"); + case ARM::BI__builtin_neon_vmovn_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmovn, &Ty, 1), + Ops, "vmovn"); + case ARM::BI__builtin_neon_vmull_lane_v: + splat = true; + case ARM::BI__builtin_neon_vmull_v: + Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls; + Int = poly ? (unsigned)Intrinsic::arm_neon_vmullp : Int; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vmlal", splat); + case ARM::BI__builtin_neon_vpadal_v: + case ARM::BI__builtin_neon_vpadalq_v: + Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpadal"); + case ARM::BI__builtin_neon_vpadd_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, &Ty, 1), + Ops, "vpadd"); + case ARM::BI__builtin_neon_vpaddl_v: + case ARM::BI__builtin_neon_vpaddlq_v: + Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpaddl"); + case ARM::BI__builtin_neon_vpmax_v: + Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmax"); + case ARM::BI__builtin_neon_vpmin_v: + Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vpmin"); + case ARM::BI__builtin_neon_vqabs_v: + case ARM::BI__builtin_neon_vqabsq_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, &Ty, 1), + Ops, "vqabs"); + case ARM::BI__builtin_neon_vqadd_v: + case ARM::BI__builtin_neon_vqaddq_v: + Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqadd"); + case ARM::BI__builtin_neon_vqdmlal_lane_v: + splat = true; + case ARM::BI__builtin_neon_vqdmlal_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, &Ty, 1), + Ops, "vqdmlal", splat); + case ARM::BI__builtin_neon_vqdmlsl_lane_v: + splat = true; + case ARM::BI__builtin_neon_vqdmlsl_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, &Ty, 1), + Ops, "vqdmlsl", splat); + case ARM::BI__builtin_neon_vqdmulh_lane_v: + case ARM::BI__builtin_neon_vqdmulhq_lane_v: + splat = true; + case ARM::BI__builtin_neon_vqdmulh_v: + case ARM::BI__builtin_neon_vqdmulhq_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, &Ty, 1), + Ops, "vqdmulh", splat); + case ARM::BI__builtin_neon_vqdmull_lane_v: + splat = true; + case ARM::BI__builtin_neon_vqdmull_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, &Ty, 1), + Ops, "vqdmull", splat); + case ARM::BI__builtin_neon_vqmovn_v: + Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqmovn"); + case ARM::BI__builtin_neon_vqmovun_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, &Ty, 1), + Ops, "vqdmull"); + case ARM::BI__builtin_neon_vqneg_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, &Ty, 1), + Ops, "vqneg"); + case ARM::BI__builtin_neon_vqrdmulh_lane_v: + case ARM::BI__builtin_neon_vqrdmulhq_lane_v: + splat = true; + case ARM::BI__builtin_neon_vqrdmulh_v: + case ARM::BI__builtin_neon_vqrdmulhq_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, &Ty, 1), + Ops, "vqrdmulh", splat); + case ARM::BI__builtin_neon_vqrshl_v: + case ARM::BI__builtin_neon_vqrshlq_v: + Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshl"); + case ARM::BI__builtin_neon_vqrshrn_n_v: + Int = usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqrshrn_n", false, + 1, true); + case ARM::BI__builtin_neon_vqrshrun_n_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, &Ty, 1), + Ops, "vqrshrun_n", false, 1, true); + case ARM::BI__builtin_neon_vqshl_v: + case ARM::BI__builtin_neon_vqshlq_v: + Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl"); + case ARM::BI__builtin_neon_vqshl_n_v: + case ARM::BI__builtin_neon_vqshlq_n_v: + Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshl_n", false, + 1, false); + case ARM::BI__builtin_neon_vqshlu_n_v: + case ARM::BI__builtin_neon_vqshluq_n_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, &Ty, 1), + Ops, "vqshlu", 1, false); + case ARM::BI__builtin_neon_vqshrn_n_v: + Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqshrn_n", false, + 1, true); + case ARM::BI__builtin_neon_vqshrun_n_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, &Ty, 1), + Ops, "vqshrun_n", false, 1, true); + case ARM::BI__builtin_neon_vqsub_v: + case ARM::BI__builtin_neon_vqsubq_v: + Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vqsub"); + case ARM::BI__builtin_neon_vraddhn_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, &Ty, 1), + Ops, "vraddhn"); + case ARM::BI__builtin_neon_vrecpe_v: + case ARM::BI__builtin_neon_vrecpeq_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, &Ty, 1), + Ops, "vrecpe"); + case ARM::BI__builtin_neon_vrecps_v: + case ARM::BI__builtin_neon_vrecpsq_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, &Ty, 1), + Ops, "vrecps"); + case ARM::BI__builtin_neon_vrhadd_v: + case ARM::BI__builtin_neon_vrhaddq_v: + Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrhadd"); + case ARM::BI__builtin_neon_vrshl_v: + case ARM::BI__builtin_neon_vrshlq_v: + Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshl"); + case ARM::BI__builtin_neon_vrshrn_n_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, &Ty, 1), + Ops, "vrshrn_n", false, 1, true); + case ARM::BI__builtin_neon_vrshr_n_v: + case ARM::BI__builtin_neon_vrshrq_n_v: + Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vrshr_n", false, + 1, true); + case ARM::BI__builtin_neon_vrsqrte_v: + case ARM::BI__builtin_neon_vrsqrteq_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, &Ty, 1), + Ops, "vrsqrte"); + case ARM::BI__builtin_neon_vrsqrts_v: + case ARM::BI__builtin_neon_vrsqrtsq_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, &Ty, 1), + Ops, "vrsqrts"); + case ARM::BI__builtin_neon_vrsra_n_v: + case ARM::BI__builtin_neon_vrsraq_n_v: + Ops[0] = Builder.CreateBitCast(Ops[0], Ty); + Ops[1] = Builder.CreateBitCast(Ops[1], Ty); + Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true); + Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; + Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, &Ty, 1), Ops[1], Ops[2]); + return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n"); + case ARM::BI__builtin_neon_vrsubhn_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, &Ty, 1), + Ops, "vrsubhn"); + case ARM::BI__builtin_neon_vset_lane_i8: + case ARM::BI__builtin_neon_vset_lane_i16: + case ARM::BI__builtin_neon_vset_lane_i32: + case ARM::BI__builtin_neon_vset_lane_i64: + case ARM::BI__builtin_neon_vset_lane_f32: + case ARM::BI__builtin_neon_vsetq_lane_i8: + case ARM::BI__builtin_neon_vsetq_lane_i16: + case ARM::BI__builtin_neon_vsetq_lane_i32: + case ARM::BI__builtin_neon_vsetq_lane_i64: + case ARM::BI__builtin_neon_vsetq_lane_f32: + Ops.push_back(EmitScalarExpr(E->getArg(2))); + return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); + case ARM::BI__builtin_neon_vshl_v: + case ARM::BI__builtin_neon_vshlq_v: + Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshl"); + case ARM::BI__builtin_neon_vshll_n_v: + Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vshll", false, 1); + case ARM::BI__builtin_neon_vshl_n_v: + case ARM::BI__builtin_neon_vshlq_n_v: + Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); + return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], "vshl_n"); + case ARM::BI__builtin_neon_vshrn_n_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, &Ty, 1), + Ops, "vshrn_n", false, 1, true); + case ARM::BI__builtin_neon_vshr_n_v: + case ARM::BI__builtin_neon_vshrq_n_v: + Ops[0] = Builder.CreateBitCast(Ops[0], Ty); + Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); + if (usgn) + return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n"); + else + return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n"); + case ARM::BI__builtin_neon_vsri_n_v: + case ARM::BI__builtin_neon_vsriq_n_v: + poly = true; + case ARM::BI__builtin_neon_vsli_n_v: + case ARM::BI__builtin_neon_vsliq_n_v: + Ops[2] = EmitNeonShiftVector(Ops[2], Ty, poly); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, &Ty, 1), + Ops, "vsli_n"); + case ARM::BI__builtin_neon_vsra_n_v: + case ARM::BI__builtin_neon_vsraq_n_v: + Ops[0] = Builder.CreateBitCast(Ops[0], Ty); + Ops[1] = Builder.CreateBitCast(Ops[1], Ty); + Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false); + if (usgn) + Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n"); + else + Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n"); + return Builder.CreateAdd(Ops[0], Ops[1]); + case ARM::BI__builtin_neon_vst1_v: + case ARM::BI__builtin_neon_vst1q_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, &Ty, 1), + Ops, ""); + case ARM::BI__builtin_neon_vst1_lane_v: + case ARM::BI__builtin_neon_vst1q_lane_v: + Ops[1] = Builder.CreateBitCast(Ops[1], Ty); + Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); + Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); + return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty)); + case ARM::BI__builtin_neon_vst2_v: + case ARM::BI__builtin_neon_vst2q_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, &Ty, 1), + Ops, ""); + case ARM::BI__builtin_neon_vst2_lane_v: + case ARM::BI__builtin_neon_vst2q_lane_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, &Ty, 1), + Ops, ""); + case ARM::BI__builtin_neon_vst3_v: + case ARM::BI__builtin_neon_vst3q_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, &Ty, 1), + Ops, ""); + case ARM::BI__builtin_neon_vst3_lane_v: + case ARM::BI__builtin_neon_vst3q_lane_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, &Ty, 1), + Ops, ""); + case ARM::BI__builtin_neon_vst4_v: + case ARM::BI__builtin_neon_vst4q_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, &Ty, 1), + Ops, ""); + case ARM::BI__builtin_neon_vst4_lane_v: + case ARM::BI__builtin_neon_vst4q_lane_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, &Ty, 1), + Ops, ""); + case ARM::BI__builtin_neon_vsubhn_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, &Ty, 1), + Ops, "vsubhn"); + case ARM::BI__builtin_neon_vsubl_v: + Int = usgn ? Intrinsic::arm_neon_vsublu : Intrinsic::arm_neon_vsubls; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vsubl"); + case ARM::BI__builtin_neon_vsubw_v: + Int = usgn ? Intrinsic::arm_neon_vsubws : Intrinsic::arm_neon_vsubwu; + return EmitNeonCall(CGM.getIntrinsic(Int, &Ty, 1), Ops, "vsubw"); + case ARM::BI__builtin_neon_vtbl1_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1), + Ops, "vtbl1"); + case ARM::BI__builtin_neon_vtbl2_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2), + Ops, "vtbl2"); + case ARM::BI__builtin_neon_vtbl3_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3), + Ops, "vtbl3"); + case ARM::BI__builtin_neon_vtbl4_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4), + Ops, "vtbl4"); + case ARM::BI__builtin_neon_vtbx1_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1), + Ops, "vtbx1"); + case ARM::BI__builtin_neon_vtbx2_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2), + Ops, "vtbx2"); + case ARM::BI__builtin_neon_vtbx3_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3), + Ops, "vtbx3"); + case ARM::BI__builtin_neon_vtbx4_v: + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4), + Ops, "vtbx4"); + case ARM::BI__builtin_neon_vtst_v: + case ARM::BI__builtin_neon_vtstq_v: { + Ops[0] = Builder.CreateBitCast(Ops[0], Ty); + Ops[1] = Builder.CreateBitCast(Ops[1], Ty); + Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); + Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], + ConstantAggregateZero::get(Ty)); + return Builder.CreateSExt(Ops[0], Ty, "vtst"); + } + case ARM::BI__builtin_neon_vtrn_v: + case ARM::BI__builtin_neon_vtrnq_v: { + Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); + Ops[1] = Builder.CreateBitCast(Ops[1], Ty); + Ops[2] = Builder.CreateBitCast(Ops[2], Ty); + Value *SV; + + for (unsigned vi = 0; vi != 2; ++vi) { + SmallVector<Constant*, 16> Indices; + for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { + Indices.push_back(ConstantInt::get(Int32Ty, i+vi)); + Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi)); + } + Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); + SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); + SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn"); + SV = Builder.CreateStore(SV, Addr); + } + return SV; + } + case ARM::BI__builtin_neon_vuzp_v: + case ARM::BI__builtin_neon_vuzpq_v: { + Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); + Ops[1] = Builder.CreateBitCast(Ops[1], Ty); + Ops[2] = Builder.CreateBitCast(Ops[2], Ty); + Value *SV; + + for (unsigned vi = 0; vi != 2; ++vi) { + SmallVector<Constant*, 16> Indices; + for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) + Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi)); + + Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); + SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); + SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp"); + SV = Builder.CreateStore(SV, Addr); + } + return SV; + } + case ARM::BI__builtin_neon_vzip_v: + case ARM::BI__builtin_neon_vzipq_v: { + Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); + Ops[1] = Builder.CreateBitCast(Ops[1], Ty); + Ops[2] = Builder.CreateBitCast(Ops[2], Ty); + Value *SV; + + for (unsigned vi = 0; vi != 2; ++vi) { + SmallVector<Constant*, 16> Indices; + for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { + Indices.push_back(ConstantInt::get(Int32Ty, (i >> 1))); + Indices.push_back(ConstantInt::get(Int32Ty, (i >> 1)+e)); + } + Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); + SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); + SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip"); + SV = Builder.CreateStore(SV, Addr); + } + return SV; } } } @@ -900,9 +1657,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, case X86::BI__builtin_ia32_psrldi128: case X86::BI__builtin_ia32_psrlqi128: case X86::BI__builtin_ia32_psrlwi128: { - Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext"); - const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 2); - llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); + Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext"); + const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2); + llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty), Ops[1], Zero, "insert"); Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast"); @@ -955,8 +1712,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, case X86::BI__builtin_ia32_psrldi: case X86::BI__builtin_ia32_psrlqi: case X86::BI__builtin_ia32_psrlwi: { - Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext"); - const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 1); + Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext"); + const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 1); Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast"); const char *name = 0; Intrinsic::ID ID = Intrinsic::not_intrinsic; @@ -1009,16 +1766,16 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, } case X86::BI__builtin_ia32_ldmxcsr: { const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext); - Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1); - Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp"); + Value *One = llvm::ConstantInt::get(Int32Ty, 1); + Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp"); Builder.CreateStore(Ops[0], Tmp); return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), Builder.CreateBitCast(Tmp, PtrTy)); } case X86::BI__builtin_ia32_stmxcsr: { const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext); - Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1); - Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp"); + Value *One = llvm::ConstantInt::get(Int32Ty, 1); + Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp"); One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), Builder.CreateBitCast(Tmp, PtrTy)); return Builder.CreateLoad(Tmp, "stmxcsr"); @@ -1033,16 +1790,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, } case X86::BI__builtin_ia32_storehps: case X86::BI__builtin_ia32_storelps: { - const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext); - llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy); - llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2); + llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty); + llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); // cast val v2i64 Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast"); // extract (0, 1) unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1; - llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Index); + llvm::Value *Idx = llvm::ConstantInt::get(Int32Ty, Index); Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract"); // cast pointer to i64 & store @@ -1055,11 +1811,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, // If palignr is shifting the pair of input vectors less than 9 bytes, // emit a shuffle instruction. if (shiftVal <= 8) { - const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext); - llvm::SmallVector<llvm::Constant*, 8> Indices; for (unsigned i = 0; i != 8; ++i) - Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i)); + Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); @@ -1069,8 +1823,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, // than 16 bytes, emit a logical right shift of the destination. if (shiftVal < 16) { // MMX has these as 1 x i64 vectors for some odd optimization reasons. - const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext); - const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 1); + const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1); Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8); @@ -1089,11 +1842,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, // If palignr is shifting the pair of input vectors less than 17 bytes, // emit a shuffle instruction. if (shiftVal <= 16) { - const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext); - llvm::SmallVector<llvm::Constant*, 16> Indices; for (unsigned i = 0; i != 16; ++i) - Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i)); + Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); @@ -1102,12 +1853,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, // If palignr is shifting the pair of input vectors more than 16 but less // than 32 bytes, emit a logical right shift of the destination. if (shiftVal < 32) { - const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext); - const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2); - const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext); + const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); - Ops[1] = llvm::ConstantInt::get(IntTy, (shiftVal-16) * 8); + Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8); // create i32 constant llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq); @@ -1132,6 +1881,48 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, switch (BuiltinID) { default: return 0; + // vec_ld, vec_lvsl, vec_lvsr + case PPC::BI__builtin_altivec_lvx: + case PPC::BI__builtin_altivec_lvxl: + case PPC::BI__builtin_altivec_lvebx: + case PPC::BI__builtin_altivec_lvehx: + case PPC::BI__builtin_altivec_lvewx: + case PPC::BI__builtin_altivec_lvsl: + case PPC::BI__builtin_altivec_lvsr: + { + Ops[1] = Builder.CreateBitCast(Ops[1], llvm::Type::getInt8PtrTy(VMContext)); + + Ops[0] = Builder.CreateGEP(Ops[1], Ops[0], "tmp"); + Ops.pop_back(); + + switch (BuiltinID) { + default: assert(0 && "Unsupported ld/lvsl/lvsr intrinsic!"); + case PPC::BI__builtin_altivec_lvx: + ID = Intrinsic::ppc_altivec_lvx; + break; + case PPC::BI__builtin_altivec_lvxl: + ID = Intrinsic::ppc_altivec_lvxl; + break; + case PPC::BI__builtin_altivec_lvebx: + ID = Intrinsic::ppc_altivec_lvebx; + break; + case PPC::BI__builtin_altivec_lvehx: + ID = Intrinsic::ppc_altivec_lvehx; + break; + case PPC::BI__builtin_altivec_lvewx: + ID = Intrinsic::ppc_altivec_lvewx; + break; + case PPC::BI__builtin_altivec_lvsl: + ID = Intrinsic::ppc_altivec_lvsl; + break; + case PPC::BI__builtin_altivec_lvsr: + ID = Intrinsic::ppc_altivec_lvsr; + break; + } + llvm::Function *F = CGM.getIntrinsic(ID); + return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), ""); + } + // vec_st case PPC::BI__builtin_altivec_stvx: case PPC::BI__builtin_altivec_stvxl: @@ -1140,12 +1931,11 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, case PPC::BI__builtin_altivec_stvewx: { Ops[2] = Builder.CreateBitCast(Ops[2], llvm::Type::getInt8PtrTy(VMContext)); - Ops[1] = !isa<Constant>(Ops[1]) || !cast<Constant>(Ops[1])->isNullValue() - ? Builder.CreateGEP(Ops[2], Ops[1], "tmp") : Ops[2]; + Ops[1] = Builder.CreateGEP(Ops[2], Ops[1], "tmp"); Ops.pop_back(); switch (BuiltinID) { - default: assert(0 && "Unsupported vavg intrinsic!"); + default: assert(0 && "Unsupported st intrinsic!"); case PPC::BI__builtin_altivec_stvx: ID = Intrinsic::ppc_altivec_stvx; break; diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp index 5258779..7b7be9a 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp @@ -23,7 +23,7 @@ #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/StmtCXX.h" -#include "clang/CodeGen/CodeGenOptions.h" +#include "clang/Frontend/CodeGenOptions.h" #include "llvm/ADT/StringExtras.h" using namespace clang; using namespace CodeGen; @@ -97,8 +97,8 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) { /// If we don't have a definition for the destructor yet, don't /// emit. We can't emit aliases to declarations; that's just not /// how aliases work. - const CXXDestructorDecl *BaseD = UniqueBase->getDestructor(getContext()); - if (!BaseD->isImplicit() && !BaseD->getBody()) + const CXXDestructorDecl *BaseD = UniqueBase->getDestructor(); + if (!BaseD->isImplicit() && !BaseD->hasBody()) return true; // If the base is at a non-zero offset, give up. @@ -166,8 +166,7 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl, new llvm::GlobalAlias(AliasType, Linkage, "", Aliasee, &getModule()); // Switch any previous uses to the alias. - MangleBuffer MangledName; - getMangledName(MangledName, AliasDecl); + llvm::StringRef MangledName = getMangledName(AliasDecl); llvm::GlobalValue *Entry = GetGlobalValue(MangledName); if (Entry) { assert(Entry->isDeclaration() && "definition already exists for alias"); @@ -177,7 +176,7 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl, Entry->replaceAllUsesWith(Alias); Entry->eraseFromParent(); } else { - Alias->setName(MangledName.getString()); + Alias->setName(MangledName); } // Finally, set up the alias with its proper name and attributes. @@ -218,8 +217,9 @@ void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D, llvm::GlobalValue * CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D, CXXCtorType Type) { - MangleBuffer Name; - getMangledCXXCtorName(Name, D, Type); + GlobalDecl GD(D, Type); + + llvm::StringRef Name = getMangledName(GD); if (llvm::GlobalValue *V = GetGlobalValue(Name)) return V; @@ -227,18 +227,7 @@ CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D, const llvm::FunctionType *FTy = getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type), FPT->isVariadic()); - return cast<llvm::Function>( - GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type))); -} - -void CodeGenModule::getMangledName(MangleBuffer &Buffer, const BlockDecl *BD) { - getMangleContext().mangleBlock(BD, Buffer.getBuffer()); -} - -void CodeGenModule::getMangledCXXCtorName(MangleBuffer &Name, - const CXXConstructorDecl *D, - CXXCtorType Type) { - getMangleContext().mangleCXXCtor(D, Type, Name.getBuffer()); + return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD)); } void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) { @@ -286,22 +275,54 @@ void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D, llvm::GlobalValue * CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D, CXXDtorType Type) { - MangleBuffer Name; - getMangledCXXDtorName(Name, D, Type); + GlobalDecl GD(D, Type); + + llvm::StringRef Name = getMangledName(GD); if (llvm::GlobalValue *V = GetGlobalValue(Name)) return V; const llvm::FunctionType *FTy = getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type), false); - return cast<llvm::Function>( - GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type))); + return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FTy, GD)); } -void CodeGenModule::getMangledCXXDtorName(MangleBuffer &Name, - const CXXDestructorDecl *D, - CXXDtorType Type) { - getMangleContext().mangleCXXDtor(D, Type, Name.getBuffer()); +llvm::Constant * +CodeGenModule::GetCXXMemberFunctionPointerValue(const CXXMethodDecl *MD) { + assert(MD->isInstance() && "Member function must not be static!"); + + MD = MD->getCanonicalDecl(); + + const llvm::Type *PtrDiffTy = Types.ConvertType(Context.getPointerDiffType()); + + // Get the function pointer (or index if this is a virtual function). + if (MD->isVirtual()) { + uint64_t Index = VTables.getMethodVTableIndex(MD); + + // FIXME: We shouldn't use / 8 here. + uint64_t PointerWidthInBytes = Context.Target.getPointerWidth(0) / 8; + + // Itanium C++ ABI 2.3: + // For a non-virtual function, this field is a simple function pointer. + // For a virtual function, it is 1 plus the virtual table offset + // (in bytes) of the function, represented as a ptrdiff_t. + return llvm::ConstantInt::get(PtrDiffTy, (Index * PointerWidthInBytes) + 1); + } + + const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); + const llvm::Type *Ty; + // Check whether the function has a computable LLVM signature. + if (!CodeGenTypes::VerifyFuncTypeComplete(FPT)) { + // The function has a computable LLVM signature; use the correct type. + Ty = Types.GetFunctionType(Types.getFunctionInfo(MD), FPT->isVariadic()); + } else { + // Use an arbitrary non-function type to tell GetAddrOfFunction that the + // function type is incomplete. + Ty = PtrDiffTy; + } + + llvm::Constant *FuncPtr = GetAddrOfFunction(MD, Ty); + return llvm::ConstantExpr::getPtrToInt(FuncPtr, PtrDiffTy); } static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VTableIndex, diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h index a7e1871..e1bbb0a 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h @@ -31,6 +31,7 @@ public: /// Creates an instance of a C++ ABI class. CXXABI *CreateItaniumCXXABI(CodeGenModule &CGM); +CXXABI *CreateMicrosoftCXXABI(CodeGenModule &CGM); } } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp index 73cee3c..3d1e143 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp @@ -13,26 +13,22 @@ //===----------------------------------------------------------------------===// #include "CGCall.h" +#include "ABIInfo.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "clang/Basic/TargetInfo.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" -#include "clang/CodeGen/CodeGenOptions.h" +#include "clang/Frontend/CodeGenOptions.h" #include "llvm/Attributes.h" #include "llvm/Support/CallSite.h" #include "llvm/Target/TargetData.h" - -#include "ABIInfo.h" - using namespace clang; using namespace CodeGen; /***/ -// FIXME: Use iterator and sidestep silly type array creation. - static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { switch (CC) { default: return llvm::CallingConv::C; @@ -65,29 +61,31 @@ static CanQualType GetReturnType(QualType RetTy) { } const CGFunctionInfo & -CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) { +CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP, + bool IsRecursive) { return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(), llvm::SmallVector<CanQualType, 16>(), - FTNP->getExtInfo()); + FTNP->getExtInfo(), IsRecursive); } /// \param Args - contains any initial parameters besides those /// in the formal type static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT, llvm::SmallVectorImpl<CanQualType> &ArgTys, - CanQual<FunctionProtoType> FTP) { + CanQual<FunctionProtoType> FTP, + bool IsRecursive = false) { // FIXME: Kill copy. for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) ArgTys.push_back(FTP->getArgType(i)); CanQualType ResTy = FTP->getResultType().getUnqualifiedType(); - return CGT.getFunctionInfo(ResTy, ArgTys, - FTP->getExtInfo()); + return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo(), IsRecursive); } const CGFunctionInfo & -CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) { +CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP, + bool IsRecursive) { llvm::SmallVector<CanQualType, 16> ArgTys; - return ::getFunctionInfo(*this, ArgTys, FTP); + return ::getFunctionInfo(*this, ArgTys, FTP, IsRecursive); } static CallingConv getCallingConventionForDecl(const Decl *D) { @@ -220,7 +218,8 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy, const llvm::SmallVectorImpl<CanQualType> &ArgTys, - const FunctionType::ExtInfo &Info) { + const FunctionType::ExtInfo &Info, + bool IsRecursive) { #ifndef NDEBUG for (llvm::SmallVectorImpl<CanQualType>::const_iterator I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I) @@ -240,35 +239,65 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy, return *FI; // Construct the function info. - FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy, ArgTys); + FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy, + ArgTys.data(), ArgTys.size()); FunctionInfos.InsertNode(FI, InsertPos); + // ABI lowering wants to know what our preferred type for the argument is in + // various situations, pass it in. + llvm::SmallVector<const llvm::Type *, 8> PreferredArgTypes; + for (llvm::SmallVectorImpl<CanQualType>::const_iterator + I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I) { + // If this is being called from the guts of the ConvertType loop, make sure + // to call ConvertTypeRecursive so we don't get into issues with cyclic + // pointer type structures. + PreferredArgTypes.push_back(ConvertTypeRecursive(*I)); + } + // Compute ABI information. - getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext()); - + getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext(), + PreferredArgTypes.data(), PreferredArgTypes.size()); + + // If this is a top-level call and ConvertTypeRecursive hit unresolved pointer + // types, resolve them now. These pointers may point to this function, which + // we *just* filled in the FunctionInfo for. + if (!IsRecursive && !PointersToResolve.empty()) { + // Use PATypeHolder's so that our preferred types don't dangle under + // refinement. + llvm::SmallVector<llvm::PATypeHolder, 8> Handles(PreferredArgTypes.begin(), + PreferredArgTypes.end()); + HandleLateResolvedPointers(); + PreferredArgTypes.clear(); + PreferredArgTypes.append(Handles.begin(), Handles.end()); + } + + return *FI; } CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention, - bool _NoReturn, - unsigned _RegParm, + bool _NoReturn, unsigned _RegParm, CanQualType ResTy, - const llvm::SmallVectorImpl<CanQualType> &ArgTys) + const CanQualType *ArgTys, + unsigned NumArgTys) : CallingConvention(_CallingConvention), EffectiveCallingConvention(_CallingConvention), NoReturn(_NoReturn), RegParm(_RegParm) { - NumArgs = ArgTys.size(); - Args = new ArgInfo[1 + NumArgs]; + NumArgs = NumArgTys; + + // FIXME: Coallocate with the CGFunctionInfo object. + Args = new ArgInfo[1 + NumArgTys]; Args[0].type = ResTy; - for (unsigned i = 0; i < NumArgs; ++i) + for (unsigned i = 0; i != NumArgTys; ++i) Args[1 + i].type = ArgTys[i]; } /***/ void CodeGenTypes::GetExpandedTypes(QualType Ty, - std::vector<const llvm::Type*> &ArgTys) { + std::vector<const llvm::Type*> &ArgTys, + bool IsRecursive) { const RecordType *RT = Ty->getAsStructureType(); assert(RT && "Can only expand structure types."); const RecordDecl *RD = RT->getDecl(); @@ -283,9 +312,9 @@ void CodeGenTypes::GetExpandedTypes(QualType Ty, QualType FT = FD->getType(); if (CodeGenFunction::hasAggregateLLVMType(FT)) { - GetExpandedTypes(FT, ArgTys); + GetExpandedTypes(FT, ArgTys, IsRecursive); } else { - ArgTys.push_back(ConvertType(FT)); + ArgTys.push_back(ConvertType(FT, IsRecursive)); } } } @@ -345,6 +374,71 @@ CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, } } +/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are +/// accessing some number of bytes out of it, try to gep into the struct to get +/// at its inner goodness. Dive as deep as possible without entering an element +/// with an in-memory size smaller than DstSize. +static llvm::Value * +EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, + const llvm::StructType *SrcSTy, + uint64_t DstSize, CodeGenFunction &CGF) { + // We can't dive into a zero-element struct. + if (SrcSTy->getNumElements() == 0) return SrcPtr; + + const llvm::Type *FirstElt = SrcSTy->getElementType(0); + + // If the first elt is at least as large as what we're looking for, or if the + // first element is the same size as the whole struct, we can enter it. + uint64_t FirstEltSize = + CGF.CGM.getTargetData().getTypeAllocSize(FirstElt); + if (FirstEltSize < DstSize && + FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy)) + return SrcPtr; + + // GEP into the first element. + SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); + + // If the first element is a struct, recurse. + const llvm::Type *SrcTy = + cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); + if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) + return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); + + return SrcPtr; +} + +/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both +/// are either integers or pointers. This does a truncation of the value if it +/// is too large or a zero extension if it is too small. +static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, + const llvm::Type *Ty, + CodeGenFunction &CGF) { + if (Val->getType() == Ty) + return Val; + + if (isa<llvm::PointerType>(Val->getType())) { + // If this is Pointer->Pointer avoid conversion to and from int. + if (isa<llvm::PointerType>(Ty)) + return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); + + // Convert the pointer to an integer so we can play with its width. + Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); + } + + const llvm::Type *DestIntTy = Ty; + if (isa<llvm::PointerType>(DestIntTy)) + DestIntTy = CGF.IntPtrTy; + + if (Val->getType() != DestIntTy) + Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); + + if (isa<llvm::PointerType>(Ty)) + Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); + return Val; +} + + + /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as /// a pointer to an object of type \arg Ty. /// @@ -356,9 +450,28 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, CodeGenFunction &CGF) { const llvm::Type *SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); - uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); + + // If SrcTy and Ty are the same, just do a load. + if (SrcTy == Ty) + return CGF.Builder.CreateLoad(SrcPtr); + uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty); + + if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { + SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); + SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); + } + + uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); + // If the source and destination are integer or pointer types, just do an + // extension or truncation to the desired type. + if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && + (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { + llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); + return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); + } + // If load is legal, just bitcast the src pointer. if (SrcSize >= DstSize) { // Generally SrcSize is never greater than DstSize, since this means we are @@ -373,18 +486,18 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, // FIXME: Use better alignment / avoid requiring aligned load. Load->setAlignment(1); return Load; - } else { - // Otherwise do coercion through memory. This is stupid, but - // simple. - llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); - llvm::Value *Casted = - CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy)); - llvm::StoreInst *Store = - CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted); - // FIXME: Use better alignment / avoid requiring aligned store. - Store->setAlignment(1); - return CGF.Builder.CreateLoad(Tmp); } + + // Otherwise do coercion through memory. This is stupid, but + // simple. + llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); + llvm::Value *Casted = + CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy)); + llvm::StoreInst *Store = + CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted); + // FIXME: Use better alignment / avoid requiring aligned store. + Store->setAlignment(1); + return CGF.Builder.CreateLoad(Tmp); } /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, @@ -399,8 +512,27 @@ static void CreateCoercedStore(llvm::Value *Src, const llvm::Type *SrcTy = Src->getType(); const llvm::Type *DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); - + if (SrcTy == DstTy) { + CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); + return; + } + uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); + + if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { + DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); + DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); + } + + // If the source and destination are integer or pointer types, just do an + // extension or truncation to the desired type. + if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && + (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { + Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); + CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); + return; + } + uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy); // If store is legal, just bitcast the src pointer. @@ -432,10 +564,28 @@ static void CreateCoercedStore(llvm::Value *Src, /***/ -bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) { +bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { return FI.getReturnInfo().isIndirect(); } +bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { + if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { + switch (BT->getKind()) { + default: + return false; + case BuiltinType::Float: + return getContext().Target.useObjCFPRetForRealType(TargetInfo::Float); + case BuiltinType::Double: + return getContext().Target.useObjCFPRetForRealType(TargetInfo::Double); + case BuiltinType::LongDouble: + return getContext().Target.useObjCFPRetForRealType( + TargetInfo::LongDouble); + } + } + + return false; +} + const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { const CGFunctionInfo &FI = getFunctionInfo(GD); @@ -445,11 +595,12 @@ const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>()) Variadic = FPT->isVariadic(); - return GetFunctionType(FI, Variadic); + return GetFunctionType(FI, Variadic, false); } const llvm::FunctionType * -CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) { +CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic, + bool IsRecursive) { std::vector<const llvm::Type*> ArgTys; const llvm::Type *ResultType = 0; @@ -462,13 +613,13 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) { case ABIArgInfo::Extend: case ABIArgInfo::Direct: - ResultType = ConvertType(RetTy); + ResultType = ConvertType(RetTy, IsRecursive); break; case ABIArgInfo::Indirect: { assert(!RetAI.getIndirectAlign() && "Align unused on indirect return."); ResultType = llvm::Type::getVoidTy(getLLVMContext()); - const llvm::Type *STy = ConvertType(RetTy); + const llvm::Type *STy = ConvertType(RetTy, IsRecursive); ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace())); break; } @@ -490,24 +641,34 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) { case ABIArgInfo::Ignore: break; - case ABIArgInfo::Coerce: - ArgTys.push_back(AI.getCoerceToType()); + case ABIArgInfo::Coerce: { + // If the coerce-to type is a first class aggregate, flatten it. Either + // way is semantically identical, but fast-isel and the optimizer + // generally likes scalar values better than FCAs. + const llvm::Type *ArgTy = AI.getCoerceToType(); + if (const llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgTy)) { + for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) + ArgTys.push_back(STy->getElementType(i)); + } else { + ArgTys.push_back(ArgTy); + } break; + } case ABIArgInfo::Indirect: { // indirect arguments are always on the stack, which is addr space #0. - const llvm::Type *LTy = ConvertTypeForMem(it->type); + const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive); ArgTys.push_back(llvm::PointerType::getUnqual(LTy)); break; } case ABIArgInfo::Extend: case ABIArgInfo::Direct: - ArgTys.push_back(ConvertType(it->type)); + ArgTys.push_back(ConvertType(it->type, IsRecursive)); break; case ABIArgInfo::Expand: - GetExpandedTypes(it->type, ArgTys); + GetExpandedTypes(it->type, ArgTys, IsRecursive); break; } } @@ -515,28 +676,12 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) { return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic); } -static bool HasIncompleteReturnTypeOrArgumentTypes(const FunctionProtoType *T) { - if (const TagType *TT = T->getResultType()->getAs<TagType>()) { - if (!TT->getDecl()->isDefinition()) - return true; - } - - for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) { - if (const TagType *TT = T->getArgType(i)->getAs<TagType>()) { - if (!TT->getDecl()->isDefinition()) - return true; - } - } - - return false; -} - const llvm::Type * CodeGenTypes::GetFunctionTypeForVTable(const CXXMethodDecl *MD) { const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); - if (!HasIncompleteReturnTypeOrArgumentTypes(FPT)) - return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic()); + if (!VerifyFuncTypeComplete(FPT)) + return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic(), false); return llvm::OpaqueType::get(getLLVMContext()); } @@ -557,6 +702,12 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, if (TargetDecl) { if (TargetDecl->hasAttr<NoThrowAttr>()) FuncAttrs |= llvm::Attribute::NoUnwind; + else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { + const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); + if (FPT && FPT->hasEmptyExceptionSpec()) + FuncAttrs |= llvm::Attribute::NoUnwind; + } + if (TargetDecl->hasAttr<NoReturnAttr>()) FuncAttrs |= llvm::Attribute::NoReturn; if (TargetDecl->hasAttr<ConstAttr>()) @@ -626,7 +777,12 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, switch (AI.getKind()) { case ABIArgInfo::Coerce: - break; + if (const llvm::StructType *STy = + dyn_cast<llvm::StructType>(AI.getCoerceToType())) + Index += STy->getNumElements(); + else + ++Index; + continue; // Skip index increment. case ABIArgInfo::Indirect: if (AI.getIndirectByVal()) @@ -666,7 +822,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, // FIXME: This is rather inefficient. Do we ever actually need to do // anything here? The result should be just reconstructed on the other // side, so extension should be a non-issue. - getTypes().GetExpandedTypes(ParamType, Tys); + getTypes().GetExpandedTypes(ParamType, Tys, false); Index += Tys.size(); continue; } @@ -687,7 +843,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, // initialize the return value. TODO: it might be nice to have // a more general mechanism for this that didn't require synthesized // return statements. - if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { + if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { if (FD->hasImplicitReturnZero()) { QualType RetTy = FD->getResultType().getUnqualifiedType(); const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); @@ -703,7 +859,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function::arg_iterator AI = Fn->arg_begin(); // Name the struct return argument. - if (CGM.ReturnTypeUsesSret(FI)) { + if (CGM.ReturnTypeUsesSRet(FI)) { AI->setName("agg.result"); ++AI; } @@ -719,7 +875,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, switch (ArgI.getKind()) { case ABIArgInfo::Indirect: { - llvm::Value* V = AI; + llvm::Value *V = AI; if (hasAggregateLLVMType(Ty)) { // Do nothing, aggregates and complex variables are accessed by // reference. @@ -739,7 +895,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, case ABIArgInfo::Extend: case ABIArgInfo::Direct: { assert(AI != Fn->arg_end() && "Argument mismatch!"); - llvm::Value* V = AI; + llvm::Value *V = AI; if (hasAggregateLLVMType(Ty)) { // Create a temporary alloca to hold the argument; the rest of // codegen expects to access aggregates & complex values by @@ -789,12 +945,35 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, continue; case ABIArgInfo::Coerce: { - assert(AI != Fn->arg_end() && "Argument mismatch!"); // FIXME: This is very wasteful; EmitParmDecl is just going to drop the // result in a new alloca anyway, so we could just store into that // directly if we broke the abstraction down more. - llvm::Value *V = CreateMemTemp(Ty, "coerce"); - CreateCoercedStore(AI, V, /*DestIsVolatile=*/false, *this); + llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce"); + Alloca->setAlignment(getContext().getDeclAlign(Arg).getQuantity()); + llvm::Value *V = Alloca; + + // If the coerce-to type is a first class aggregate, we flatten it and + // pass the elements. Either way is semantically identical, but fast-isel + // and the optimizer generally likes scalar values better than FCAs. + if (const llvm::StructType *STy = + dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) { + llvm::Value *Ptr = V; + Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); + + for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { + assert(AI != Fn->arg_end() && "Argument mismatch!"); + AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i)); + llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); + Builder.CreateStore(AI++, EltPtr); + } + } else { + // Simple case, just do a coerced store of the argument into the alloca. + assert(AI != Fn->arg_end() && "Argument mismatch!"); + AI->setName(Arg->getName() + ".coerce"); + CreateCoercedStore(AI++, V, /*DestIsVolatile=*/false, *this); + } + + // Match to what EmitParmDecl is expecting for this type. if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { V = EmitLoadOfScalar(V, false, Ty); @@ -805,7 +984,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, } } EmitParmDecl(*Arg, V); - break; + continue; // Skip ++AI increment, already done. } } @@ -814,52 +993,73 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, assert(AI == Fn->arg_end() && "Argument mismatch!"); } -void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, - llvm::Value *ReturnValue) { - llvm::Value *RV = 0; - +void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) { // Functions with no result always return void. - if (ReturnValue) { - QualType RetTy = FI.getReturnType(); - const ABIArgInfo &RetAI = FI.getReturnInfo(); - - switch (RetAI.getKind()) { - case ABIArgInfo::Indirect: - if (RetTy->isAnyComplexType()) { - ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); - StoreComplexToAddr(RT, CurFn->arg_begin(), false); - } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { - // Do nothing; aggregrates get evaluated directly into the destination. - } else { - EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(), - false, RetTy); - } - break; - - case ABIArgInfo::Extend: - case ABIArgInfo::Direct: - // The internal return value temp always will have - // pointer-to-return-type type. - RV = Builder.CreateLoad(ReturnValue); - break; + if (ReturnValue == 0) { + Builder.CreateRetVoid(); + return; + } - case ABIArgInfo::Ignore: - break; + llvm::MDNode *RetDbgInfo = 0; + llvm::Value *RV = 0; + QualType RetTy = FI.getReturnType(); + const ABIArgInfo &RetAI = FI.getReturnInfo(); - case ABIArgInfo::Coerce: - RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this); - break; + switch (RetAI.getKind()) { + case ABIArgInfo::Indirect: + if (RetTy->isAnyComplexType()) { + ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); + StoreComplexToAddr(RT, CurFn->arg_begin(), false); + } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { + // Do nothing; aggregrates get evaluated directly into the destination. + } else { + EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(), + false, RetTy); + } + break; - case ABIArgInfo::Expand: - assert(0 && "Invalid ABI kind for return argument"); + case ABIArgInfo::Extend: + case ABIArgInfo::Direct: { + // The internal return value temp always will have pointer-to-return-type + // type, just do a load. + + // If the instruction right before the insertion point is a store to the + // return value, we can elide the load, zap the store, and usually zap the + // alloca. + llvm::BasicBlock *InsertBB = Builder.GetInsertBlock(); + llvm::StoreInst *SI = 0; + if (InsertBB->empty() || + !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) || + SI->getPointerOperand() != ReturnValue || SI->isVolatile()) { + RV = Builder.CreateLoad(ReturnValue); + } else { + // Get the stored value and nuke the now-dead store. + RetDbgInfo = SI->getDbgMetadata(); + RV = SI->getValueOperand(); + SI->eraseFromParent(); + + // If that was the only use of the return value, nuke it as well now. + if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { + cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); + ReturnValue = 0; + } } + break; } + case ABIArgInfo::Ignore: + break; - if (RV) { - Builder.CreateRet(RV); - } else { - Builder.CreateRetVoid(); + case ABIArgInfo::Coerce: + RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this); + break; + + case ABIArgInfo::Expand: + assert(0 && "Invalid ABI kind for return argument"); } + + llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid(); + if (RetDbgInfo) + Ret->setDbgMetadata(RetDbgInfo); } RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) { @@ -894,11 +1094,29 @@ RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) { RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) { if (ArgType->isReferenceType()) - return EmitReferenceBindingToExpr(E); + return EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); return EmitAnyExprToTemp(E); } +/// Emits a call or invoke instruction to the given function, depending +/// on the current state of the EH stack. +llvm::CallSite +CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, + llvm::Value * const *ArgBegin, + llvm::Value * const *ArgEnd, + const llvm::Twine &Name) { + llvm::BasicBlock *InvokeDest = getInvokeDest(); + if (!InvokeDest) + return Builder.CreateCall(Callee, ArgBegin, ArgEnd, Name); + + llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); + llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest, + ArgBegin, ArgEnd, Name); + EmitBlock(ContBB); + return Invoke; +} + RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, llvm::Value *Callee, ReturnValueSlot ReturnValue, @@ -916,7 +1134,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, // If the call returns a temporary with struct return, create a temporary // alloca to hold the result, unless one is given to us. - if (CGM.ReturnTypeUsesSret(CallInfo)) { + if (CGM.ReturnTypeUsesSRet(CallInfo)) { llvm::Value *Value = ReturnValue.getValue(); if (!Value) Value = CreateMemTemp(RetTy); @@ -973,8 +1191,24 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); } else SrcPtr = RV.getAggregateAddr(); - Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), - *this)); + + // If the coerce-to type is a first class aggregate, we flatten it and + // pass the elements. Either way is semantically identical, but fast-isel + // and the optimizer generally likes scalar values better than FCAs. + if (const llvm::StructType *STy = + dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) { + SrcPtr = Builder.CreateBitCast(SrcPtr, + llvm::PointerType::getUnqual(STy)); + for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { + llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); + Args.push_back(Builder.CreateLoad(EltPtr)); + } + } else { + // In the simple case, just pass the coerced loaded value. + Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), + *this)); + } + break; } @@ -1014,15 +1248,18 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, } - llvm::BasicBlock *InvokeDest = getInvokeDest(); unsigned CallingConv; CodeGen::AttributeListType AttributeList; CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv); llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(), AttributeList.end()); + llvm::BasicBlock *InvokeDest = 0; + if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) + InvokeDest = getInvokeDest(); + llvm::CallSite CS; - if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) { + if (!InvokeDest) { CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size()); } else { llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); @@ -1030,9 +1267,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, Args.data(), Args.data()+Args.size()); EmitBlock(Cont); } - if (callOrInvoke) { + if (callOrInvoke) *callOrInvoke = CS.getInstruction(); - } CS.setAttributes(Attrs); CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h index 31c8aac..41e707a 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h @@ -83,11 +83,9 @@ namespace CodeGen { typedef const ArgInfo *const_arg_iterator; typedef ArgInfo *arg_iterator; - CGFunctionInfo(unsigned CallingConvention, - bool NoReturn, - unsigned RegParm, - CanQualType ResTy, - const llvm::SmallVectorImpl<CanQualType> &ArgTys); + CGFunctionInfo(unsigned CallingConvention, bool NoReturn, + unsigned RegParm, CanQualType ResTy, + const CanQualType *ArgTys, unsigned NumArgTys); ~CGFunctionInfo() { delete[] Args; } const_arg_iterator arg_begin() const { return Args + 1; } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp index bebea54..c50fe90 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp @@ -340,9 +340,9 @@ static void EmitBaseInitializer(CodeGenFunction &CGF, if (CGF.Exceptions && !BaseClassDecl->hasTrivialDestructor()) { // FIXME: Is this OK for C++0x delegating constructors? - CodeGenFunction::EHCleanupBlock Cleanup(CGF); + CodeGenFunction::CleanupBlock Cleanup(CGF, EHCleanup); - CXXDestructorDecl *DD = BaseClassDecl->getDestructor(CGF.getContext()); + CXXDestructorDecl *DD = BaseClassDecl->getDestructor(); CGF.EmitCXXDestructorCall(DD, Dtor_Base, isBaseVirtual, V); } } @@ -354,7 +354,7 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF, QualType T, unsigned Index) { if (Index == MemberInit->getNumArrayIndices()) { - CodeGenFunction::CleanupScope Cleanups(CGF); + CodeGenFunction::RunCleanupsScope Cleanups(CGF); llvm::Value *Dest = LHS.getAddress(); if (ArrayIndexVar) { @@ -410,7 +410,7 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF, llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc"); { - CodeGenFunction::CleanupScope Cleanups(CGF); + CodeGenFunction::RunCleanupsScope Cleanups(CGF); // Inside the loop body recurse to emit the inner loop or, eventually, the // constructor call. @@ -461,13 +461,12 @@ static void EmitMemberInitializer(CodeGenFunction &CGF, // was implicitly generated, we shouldn't be zeroing memory. RValue RHS; if (FieldType->isReferenceType()) { - RHS = CGF.EmitReferenceBindingToExpr(MemberInit->getInit(), - /*IsInitializer=*/true); + RHS = CGF.EmitReferenceBindingToExpr(MemberInit->getInit(), Field); CGF.EmitStoreThroughLValue(RHS, LHS, FieldType); } else if (FieldType->isArrayType() && !MemberInit->getInit()) { CGF.EmitNullInitialization(LHS.getAddress(), Field->getType()); } else if (!CGF.hasAggregateLLVMType(Field->getType())) { - RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit(), true)); + RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit())); CGF.EmitStoreThroughLValue(RHS, LHS, FieldType); } else if (MemberInit->getInit()->getType()->isAnyComplexType()) { CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), LHS.getAddress(), @@ -535,12 +534,12 @@ static void EmitMemberInitializer(CodeGenFunction &CGF, CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); if (!RD->hasTrivialDestructor()) { // FIXME: Is this OK for C++0x delegating constructors? - CodeGenFunction::EHCleanupBlock Cleanup(CGF); + CodeGenFunction::CleanupBlock Cleanup(CGF, EHCleanup); llvm::Value *ThisPtr = CGF.LoadCXXThis(); LValue LHS = CGF.EmitLValueForField(ThisPtr, Field, 0); - CXXDestructorDecl *DD = RD->getDestructor(CGF.getContext()); + CXXDestructorDecl *DD = RD->getDestructor(); CGF.EmitCXXDestructorCall(DD, Dtor_Complete, /*ForVirtualBase=*/false, LHS.getAddress()); } @@ -607,13 +606,11 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) { // Enter the function-try-block before the constructor prologue if // applicable. - CXXTryStmtInfo TryInfo; bool IsTryBody = (Body && isa<CXXTryStmt>(Body)); - if (IsTryBody) - TryInfo = EnterCXXTryStmt(*cast<CXXTryStmt>(Body)); + EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); - unsigned CleanupStackSize = CleanupEntries.size(); + EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin(); // Emit the constructor prologue, i.e. the base and member // initializers. @@ -629,10 +626,10 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) { // initializers, which includes (along the exceptional path) the // destructors for those members and bases that were fully // constructed. - EmitCleanupBlocks(CleanupStackSize); + PopCleanupBlocks(CleanupDepth); if (IsTryBody) - ExitCXXTryStmt(*cast<CXXTryStmt>(Body), TryInfo); + ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); } /// EmitCtorPrologue - This routine generates necessary code to initialize @@ -649,9 +646,6 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, B != E; ++B) { CXXBaseOrMemberInitializer *Member = (*B); - assert(LiveTemporaries.empty() && - "Should not have any live temporaries at initializer start!"); - if (Member->isBaseInitializer()) EmitBaseInitializer(*this, ClassDecl, Member, CtorType); else @@ -660,12 +654,8 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, InitializeVTablePointers(ClassDecl); - for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I) { - assert(LiveTemporaries.empty() && - "Should not have any live temporaries at initializer start!"); - + for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I) EmitMemberInitializer(*this, ClassDecl, MemberInitializers[I], CD, Args); - } } /// EmitDestructorBody - Emits the body of the current destructor. @@ -679,14 +669,33 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { // anything else --- unless we're in a deleting destructor, in which // case we're just going to call the complete destructor and then // call operator delete() on the way out. - CXXTryStmtInfo TryInfo; bool isTryBody = (DtorType != Dtor_Deleting && Body && isa<CXXTryStmt>(Body)); if (isTryBody) - TryInfo = EnterCXXTryStmt(*cast<CXXTryStmt>(Body)); + EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); - llvm::BasicBlock *DtorEpilogue = createBasicBlock("dtor.epilogue"); - PushCleanupBlock(DtorEpilogue); + // Emit the destructor epilogue now. If this is a complete + // destructor with a function-try-block, perform the base epilogue + // as well. + // + // FIXME: This isn't really right, because an exception in the + // non-EH epilogue should jump to the appropriate place in the + // EH epilogue. + { + CleanupBlock Cleanup(*this, NormalCleanup); + + if (isTryBody && DtorType == Dtor_Complete) + EmitDtorEpilogue(Dtor, Dtor_Base); + EmitDtorEpilogue(Dtor, DtorType); + + if (Exceptions) { + Cleanup.beginEHCleanup(); + + if (isTryBody && DtorType == Dtor_Complete) + EmitDtorEpilogue(Dtor, Dtor_Base); + EmitDtorEpilogue(Dtor, DtorType); + } + } bool SkipBody = false; // should get jump-threaded @@ -725,27 +734,12 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { // nothing to do besides what's in the epilogue } - // Jump to the cleanup block. - CleanupBlockInfo Info = PopCleanupBlock(); - assert(Info.CleanupBlock == DtorEpilogue && "Block mismatch!"); - EmitBlock(DtorEpilogue); - - // Emit the destructor epilogue now. If this is a complete - // destructor with a function-try-block, perform the base epilogue - // as well. - if (isTryBody && DtorType == Dtor_Complete) - EmitDtorEpilogue(Dtor, Dtor_Base); - EmitDtorEpilogue(Dtor, DtorType); - - // Link up the cleanup information. - if (Info.SwitchBlock) - EmitBlock(Info.SwitchBlock); - if (Info.EndBlock) - EmitBlock(Info.EndBlock); + // We're done with the epilogue cleanup. + PopCleanupBlock(); // Exit the try if applicable. if (isTryBody) - ExitCXXTryStmt(*cast<CXXTryStmt>(Body), TryInfo); + ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); } /// EmitDtorEpilogue - Emit all code that comes at the end of class's @@ -784,7 +778,7 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD, // Ignore trivial destructors. if (BaseClassDecl->hasTrivialDestructor()) continue; - const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext()); + const CXXDestructorDecl *D = BaseClassDecl->getDestructor(); llvm::Value *V = GetAddressOfDirectBaseInCompleteClass(LoadCXXThis(), ClassDecl, BaseClassDecl, @@ -839,10 +833,10 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD, BasePtr = llvm::PointerType::getUnqual(BasePtr); llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), BasePtr); - EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(getContext()), + EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(), Array, BaseAddrPtr); } else - EmitCXXDestructorCall(FieldClassDecl->getDestructor(getContext()), + EmitCXXDestructorCall(FieldClassDecl->getDestructor(), Dtor_Complete, /*ForVirtualBase=*/false, LHS.getAddress()); } @@ -863,7 +857,7 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD, if (BaseClassDecl->hasTrivialDestructor()) continue; - const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext()); + const CXXDestructorDecl *D = BaseClassDecl->getDestructor(); llvm::Value *V = GetAddressOfDirectBaseInCompleteClass(LoadCXXThis(), ClassDecl, BaseClassDecl, @@ -940,7 +934,7 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D, // Keep track of the current number of live temporaries. { - CXXTemporariesCleanupScope Scope(*this); + RunCleanupsScope Scope(*this); EmitCXXConstructorCall(D, Ctor_Complete, /*ForVirtualBase=*/false, Address, ArgBeg, ArgEnd); @@ -1033,51 +1027,6 @@ CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D, EmitBlock(AfterFor, true); } -/// GenerateCXXAggrDestructorHelper - Generates a helper function which when -/// invoked, calls the default destructor on array elements in reverse order of -/// construction. -llvm::Constant * -CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D, - const ArrayType *Array, - llvm::Value *This) { - FunctionArgList Args; - ImplicitParamDecl *Dst = - ImplicitParamDecl::Create(getContext(), 0, - SourceLocation(), 0, - getContext().getPointerType(getContext().VoidTy)); - Args.push_back(std::make_pair(Dst, Dst->getType())); - - llvm::SmallString<16> Name; - llvm::raw_svector_ostream(Name) << "__tcf_" << (++UniqueAggrDestructorCount); - QualType R = getContext().VoidTy; - const CGFunctionInfo &FI - = CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo()); - const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false); - llvm::Function *Fn = - llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage, - Name.str(), - &CGM.getModule()); - IdentifierInfo *II = &CGM.getContext().Idents.get(Name.str()); - FunctionDecl *FD = FunctionDecl::Create(getContext(), - getContext().getTranslationUnitDecl(), - SourceLocation(), II, R, 0, - FunctionDecl::Static, - FunctionDecl::None, - false, true); - StartFunction(FD, R, Fn, Args, SourceLocation()); - QualType BaseElementTy = getContext().getBaseElementType(Array); - const llvm::Type *BasePtr = ConvertType(BaseElementTy); - BasePtr = llvm::PointerType::getUnqual(BasePtr); - llvm::Value *BaseAddrPtr = Builder.CreateBitCast(This, BasePtr); - EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr); - FinishFunction(); - llvm::Type *Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), - 0); - llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty); - return m; -} - - void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, @@ -1160,6 +1109,23 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD, EmitCXXMemberCall(DD, Callee, ReturnValueSlot(), This, VTT, 0, 0); } +void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) { + CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl(); + if (!ClassDecl) return; + if (ClassDecl->hasTrivialDestructor()) return; + + const CXXDestructorDecl *D = ClassDecl->getDestructor(); + + CleanupBlock Scope(*this, NormalCleanup); + + EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, Addr); + + if (Exceptions) { + Scope.beginEHCleanup(); + EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, Addr); + } +} + llvm::Value * CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This, const CXXRecordDecl *ClassDecl, diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp index c9bcb1b..4e15895 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp @@ -21,7 +21,7 @@ #include "clang/Basic/SourceManager.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/Version.h" -#include "clang/CodeGen/CodeGenOptions.h" +#include "clang/Frontend/CodeGenOptions.h" #include "llvm/Constants.h" #include "llvm/DerivedTypes.h" #include "llvm/Instructions.h" @@ -536,6 +536,19 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method, Context.getPointerType(Context.getTagDeclType(Method->getParent())); llvm::DIType ThisPtrType = DebugFactory.CreateArtificialType(getOrCreateType(ThisPtr, Unit)); + + unsigned Quals = Method->getTypeQualifiers(); + if (Quals & Qualifiers::Const) + ThisPtrType = + DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_const_type, + Unit, "", Unit, + 0, 0, 0, 0, 0, ThisPtrType); + if (Quals & Qualifiers::Volatile) + ThisPtrType = + DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_volatile_type, + Unit, "", Unit, + 0, 0, 0, 0, 0, ThisPtrType); + TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType; Elts.push_back(ThisPtrType); @@ -567,9 +580,9 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method, // Since a single ctor/dtor corresponds to multiple functions, it doesn't // make sense to give a single ctor/dtor a linkage name. - MangleBuffer MethodLinkageName; + llvm::StringRef MethodLinkageName; if (!IsCtorOrDtor) - CGM.getMangledName(MethodLinkageName, Method); + MethodLinkageName = CGM.getMangledName(Method); // Get the location for the method. llvm::DIFile MethodDefUnit = getOrCreateFile(Method->getLocation()); @@ -598,7 +611,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method, MethodLinkageName, MethodDefUnit, MethodLine, MethodTy, /*isLocalToUnit=*/false, - Method->isThisDeclarationADefinition(), + /* isDefintion=*/ false, Virtuality, VIndex, ContainingType); // Don't cache ctors or dtors since we have to emit multiple functions for @@ -758,22 +771,30 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty, // its members. Finally, we create a descriptor for the complete type (which // may refer to the forward decl if the struct is recursive) and replace all // uses of the forward declaration with the final definition. + llvm::DIDescriptor FDContext = + getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit); + + // If this is just a forward declaration, construct an appropriately + // marked node and just return it. + if (!RD->getDefinition()) { + llvm::DICompositeType FwdDecl = + DebugFactory.CreateCompositeType(Tag, FDContext, RD->getName(), + DefUnit, Line, 0, 0, 0, + llvm::DIType::FlagFwdDecl, + llvm::DIType(), llvm::DIArray()); + + return FwdDecl; + } // A RD->getName() is not unique. However, the debug info descriptors // are uniqued so use type name to ensure uniquness. llvm::SmallString<128> FwdDeclName; llvm::raw_svector_ostream(FwdDeclName) << "fwd.type." << FwdDeclCount++; - llvm::DIDescriptor FDContext = - getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit); llvm::DICompositeType FwdDecl = DebugFactory.CreateCompositeType(Tag, FDContext, FwdDeclName, DefUnit, Line, 0, 0, 0, 0, llvm::DIType(), llvm::DIArray()); - // If this is just a forward declaration, return it. - if (!RD->getDefinition()) - return FwdDecl; - llvm::MDNode *MN = FwdDecl; llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN; // Otherwise, insert it into the TypeCache so that recursive uses will find @@ -1289,7 +1310,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType, CGBuilderTy &Builder) { llvm::StringRef Name; - MangleBuffer LinkageName; + llvm::StringRef LinkageName; const Decl *D = GD.getDecl(); if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { @@ -1307,11 +1328,11 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType, } Name = getFunctionName(FD); // Use mangled name as linkage name for c/c++ functions. - CGM.getMangledName(LinkageName, GD); + LinkageName = CGM.getMangledName(GD); } else { // Use llvm function name as linkage name. Name = Fn->getName(); - LinkageName.setString(Name); + LinkageName = Name; } if (!Name.empty() && Name[0] == '\01') Name = Name.substr(1); @@ -1477,7 +1498,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag, llvm::DIVariable D = DebugFactory.CreateVariable(Tag, llvm::DIDescriptor(RegionStack.back()), VD->getName(), - Unit, Line, Ty); + Unit, Line, Ty, CGM.getLangOptions().Optimize); // Insert an llvm.dbg.declare into the current block. llvm::Instruction *Call = DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock()); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp index 07edca0..1a62ea9 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp @@ -20,7 +20,7 @@ #include "clang/AST/DeclObjC.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" -#include "clang/CodeGen/CodeGenOptions.h" +#include "clang/Frontend/CodeGenOptions.h" #include "llvm/GlobalVariable.h" #include "llvm/Intrinsics.h" #include "llvm/Target/TargetData.h" @@ -38,7 +38,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) { case Decl::ClassTemplatePartialSpecialization: case Decl::TemplateTypeParm: case Decl::UnresolvedUsingValue: - case Decl::NonTypeTemplateParm: + case Decl::NonTypeTemplateParm: case Decl::CXXMethod: case Decl::CXXConstructor: case Decl::CXXDestructor: @@ -59,6 +59,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) { case Decl::ObjCImplementation: case Decl::ObjCProperty: case Decl::ObjCCompatibleAlias: + case Decl::AccessSpec: case Decl::LinkageSpec: case Decl::ObjCPropertyImpl: case Decl::ObjCClass: @@ -138,16 +139,14 @@ static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D, const char *Separator) { CodeGenModule &CGM = CGF.CGM; if (CGF.getContext().getLangOptions().CPlusPlus) { - MangleBuffer Name; - CGM.getMangledName(Name, &D); - return Name.getString().str(); + llvm::StringRef Name = CGM.getMangledName(&D); + return Name.str(); } std::string ContextName; if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) { - MangleBuffer Name; - CGM.getMangledName(Name, FD); - ContextName = Name.getString().str(); + llvm::StringRef Name = CGM.getMangledName(FD); + ContextName = Name.str(); } else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl)) ContextName = CGF.CurFn->getName(); else @@ -328,10 +327,10 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) { Types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder)); // int32_t __flags; - Types.push_back(llvm::Type::getInt32Ty(VMContext)); + Types.push_back(Int32Ty); // int32_t __size; - Types.push_back(llvm::Type::getInt32Ty(VMContext)); + Types.push_back(Int32Ty); bool HasCopyAndDispose = BlockRequiresCopying(Ty); if (HasCopyAndDispose) { @@ -389,10 +388,63 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) { return Info.first; } +namespace { + struct CallArrayDtor : EHScopeStack::LazyCleanup { + CallArrayDtor(const CXXDestructorDecl *Dtor, + const ConstantArrayType *Type, + llvm::Value *Loc) + : Dtor(Dtor), Type(Type), Loc(Loc) {} + + const CXXDestructorDecl *Dtor; + const ConstantArrayType *Type; + llvm::Value *Loc; + + void Emit(CodeGenFunction &CGF, bool IsForEH) { + QualType BaseElementTy = CGF.getContext().getBaseElementType(Type); + const llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy); + BasePtr = llvm::PointerType::getUnqual(BasePtr); + llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(Loc, BasePtr); + CGF.EmitCXXAggrDestructorCall(Dtor, Type, BaseAddrPtr); + } + }; + + struct CallVarDtor : EHScopeStack::LazyCleanup { + CallVarDtor(const CXXDestructorDecl *Dtor, + llvm::Value *NRVOFlag, + llvm::Value *Loc) + : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(Loc) {} + + const CXXDestructorDecl *Dtor; + llvm::Value *NRVOFlag; + llvm::Value *Loc; + + void Emit(CodeGenFunction &CGF, bool IsForEH) { + // Along the exceptions path we always execute the dtor. + bool NRVO = !IsForEH && NRVOFlag; + + llvm::BasicBlock *SkipDtorBB = 0; + if (NRVO) { + // If we exited via NRVO, we skip the destructor call. + llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused"); + SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor"); + llvm::Value *DidNRVO = CGF.Builder.CreateLoad(NRVOFlag, "nrvo.val"); + CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB); + CGF.EmitBlock(RunDtorBB); + } + + CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, + /*ForVirtualBase=*/false, Loc); + + if (NRVO) CGF.EmitBlock(SkipDtorBB); + } + }; +} + /// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a /// variable declaration with auto, register, or no storage class specifier. /// These turn into simple stack objects, or GlobalValues depending on target. -void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { +void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D, + SpecialInitFn *SpecialInit) { QualType Ty = D.getType(); bool isByRef = D.hasAttr<BlocksAttr>(); bool needsDispose = false; @@ -490,7 +542,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { { // Push a cleanup block and restore the stack there. - DelayedCleanupBlock scope(*this); + CleanupBlock scope(*this, NormalCleanup); V = Builder.CreateLoad(Stack, "tmp"); llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore); @@ -505,10 +557,6 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { llvm::Value *VLASize = EmitVLASize(Ty); - // Downcast the VLA size expression - VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext), - false, "tmp"); - // Allocate memory for the array. llvm::AllocaInst *VLA = Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla"); @@ -573,18 +621,18 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { int isa = 0; if (flag&BLOCK_FIELD_IS_WEAK) isa = 1; - V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa); + V = llvm::ConstantInt::get(Int32Ty, isa); V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa"); Builder.CreateStore(V, isa_field); Builder.CreateStore(DeclPtr, forwarding_field); - V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags); + V = llvm::ConstantInt::get(Int32Ty, flags); Builder.CreateStore(V, flags_field); const llvm::Type *V1; V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType(); - V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), + V = llvm::ConstantInt::get(Int32Ty, CGM.GetTargetTypeStoreSize(V1).getQuantity()); Builder.CreateStore(V, size_field); @@ -602,7 +650,9 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { } } - if (Init) { + if (SpecialInit) { + SpecialInit(*this, D, DeclPtr); + } else if (Init) { llvm::Value *Loc = DeclPtr; if (isByRef) Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D), @@ -618,8 +668,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { assert(Init != 0 && "Wasn't a simple constant init?"); llvm::Value *AlignVal = - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), - Align.getQuantity()); + llvm::ConstantInt::get(Int32Ty, Align.getQuantity()); const llvm::Type *IntPtr = llvm::IntegerType::get(VMContext, LLVMPointerWidth); llvm::Value *SizeVal = @@ -658,7 +707,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { Loc, SrcPtr, SizeVal, AlignVal, NotVolatile); } } else if (Ty->isReferenceType()) { - RValue RV = EmitReferenceBindingToExpr(Init, /*IsInitializer=*/true); + RValue RV = EmitReferenceBindingToExpr(Init, &D); EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty); } else if (!hasAggregateLLVMType(Init->getType())) { llvm::Value *V = EmitScalarExpr(Init); @@ -669,7 +718,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { EmitAggExpr(Init, Loc, isVolatile); } } - + // Handle CXX destruction of variables. QualType DtorTy(Ty); while (const ArrayType *Array = getContext().getAsArrayType(DtorTy)) @@ -684,60 +733,16 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D), D.getNameAsString()); - const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext()); + const CXXDestructorDecl *D = ClassDecl->getDestructor(); assert(D && "EmitLocalBlockVarDecl - destructor is nul"); if (const ConstantArrayType *Array = getContext().getAsConstantArrayType(Ty)) { - { - DelayedCleanupBlock Scope(*this); - QualType BaseElementTy = getContext().getBaseElementType(Array); - const llvm::Type *BasePtr = ConvertType(BaseElementTy); - BasePtr = llvm::PointerType::getUnqual(BasePtr); - llvm::Value *BaseAddrPtr = - Builder.CreateBitCast(Loc, BasePtr); - EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr); - - // Make sure to jump to the exit block. - EmitBranch(Scope.getCleanupExitBlock()); - } - if (Exceptions) { - EHCleanupBlock Cleanup(*this); - QualType BaseElementTy = getContext().getBaseElementType(Array); - const llvm::Type *BasePtr = ConvertType(BaseElementTy); - BasePtr = llvm::PointerType::getUnqual(BasePtr); - llvm::Value *BaseAddrPtr = - Builder.CreateBitCast(Loc, BasePtr); - EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr); - } + EHStack.pushLazyCleanup<CallArrayDtor>(NormalAndEHCleanup, + D, Array, Loc); } else { - { - // Normal destruction. - DelayedCleanupBlock Scope(*this); - - if (NRVO) { - // If we exited via NRVO, we skip the destructor call. - llvm::BasicBlock *NoNRVO = createBasicBlock("nrvo.unused"); - Builder.CreateCondBr(Builder.CreateLoad(NRVOFlag, "nrvo.val"), - Scope.getCleanupExitBlock(), - NoNRVO); - EmitBlock(NoNRVO); - } - - // We don't call the destructor along the normal edge if we're - // applying the NRVO. - EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, - Loc); - - // Make sure to jump to the exit block. - EmitBranch(Scope.getCleanupExitBlock()); - } - - if (Exceptions) { - EHCleanupBlock Cleanup(*this); - EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, - Loc); - } + EHStack.pushLazyCleanup<CallVarDtor>(NormalAndEHCleanup, + D, NRVOFlag, Loc); } } } @@ -758,17 +763,19 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { // // To fix this we insert a bitcast here. QualType ArgTy = Info.arg_begin()->type; - { - DelayedCleanupBlock scope(*this); - CallArgList Args; - Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr, - ConvertType(ArgTy))), - getContext().getPointerType(D.getType()))); - EmitCall(Info, F, ReturnValueSlot(), Args); - } + CleanupBlock CleanupScope(*this, NormalCleanup); + + // Normal cleanup. + CallArgList Args; + Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr, + ConvertType(ArgTy))), + getContext().getPointerType(D.getType()))); + EmitCall(Info, F, ReturnValueSlot(), Args); + + // EH cleanup. if (Exceptions) { - EHCleanupBlock Cleanup(*this); + CleanupScope.beginEHCleanup(); CallArgList Args; Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr, @@ -779,15 +786,16 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { } if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) { - { - DelayedCleanupBlock scope(*this); - llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding"); - V = Builder.CreateLoad(V); - BuildBlockRelease(V); - } + CleanupBlock CleanupScope(*this, NormalCleanup); + + llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding"); + V = Builder.CreateLoad(V); + BuildBlockRelease(V); + // FIXME: Turn this on and audit the codegen if (0 && Exceptions) { - EHCleanupBlock Cleanup(*this); + CleanupScope.beginEHCleanup(); + llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding"); V = Builder.CreateLoad(V); BuildBlockRelease(V); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp index f94ddd9..ec3f386 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp @@ -12,7 +12,7 @@ //===----------------------------------------------------------------------===// #include "CodeGenFunction.h" -#include "clang/CodeGen/CodeGenOptions.h" +#include "clang/Frontend/CodeGenOptions.h" #include "llvm/Intrinsics.h" using namespace clang; @@ -66,16 +66,15 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D, if (RD->hasTrivialDestructor()) return; - CXXDestructorDecl *Dtor = RD->getDestructor(Context); + CXXDestructorDecl *Dtor = RD->getDestructor(); llvm::Constant *DtorFn; if (Array) { DtorFn = - CodeGenFunction(CGM).GenerateCXXAggrDestructorHelper(Dtor, - Array, - DeclPtr); + CodeGenFunction(CGM).GenerateCXXAggrDestructorHelper(Dtor, Array, + DeclPtr); const llvm::Type *Int8PtrTy = - llvm::Type::getInt8PtrTy(CGM.getLLVMContext()); + llvm::Type::getInt8PtrTy(CGM.getLLVMContext()); DeclPtr = llvm::Constant::getNullValue(Int8PtrTy); } else DtorFn = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete); @@ -94,13 +93,9 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D, EmitDeclDestroy(*this, D, DeclPtr); return; } - if (Init->isLvalue(getContext()) == Expr::LV_Valid) { - RValue RV = EmitReferenceBindingToExpr(Init, /*IsInitializer=*/true); - EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, T); - return; - } - ErrorUnsupported(Init, - "global variable that binds reference to a non-lvalue"); + + RValue RV = EmitReferenceBindingToExpr(Init, &D); + EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, T); } void @@ -144,6 +139,25 @@ CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn, Builder.CreateCall(AtExitFn, &Args[0], llvm::array_endof(Args)); } +static llvm::Function * +CreateGlobalInitOrDestructFunction(CodeGenModule &CGM, + const llvm::FunctionType *FTy, + llvm::StringRef Name) { + llvm::Function *Fn = + llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage, + Name, &CGM.getModule()); + + // Set the section if needed. + if (const char *Section = + CGM.getContext().Target.getStaticInitSectionSpecifier()) + Fn->setSection(Section); + + if (!CGM.getLangOptions().Exceptions) + Fn->setDoesNotThrow(); + + return Fn; +} + void CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D) { const llvm::FunctionType *FTy @@ -152,17 +166,22 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D) { // Create a variable initialization function. llvm::Function *Fn = - llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage, - "__cxx_global_var_init", &TheModule); + CreateGlobalInitOrDestructFunction(*this, FTy, "__cxx_global_var_init"); CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D); - CXXGlobalInits.push_back(Fn); + if (D->hasAttr<InitPriorityAttr>()) { + unsigned int order = D->getAttr<InitPriorityAttr>()->getPriority(); + OrderGlobalInits Key(order, PrioritizedCXXGlobalInits.size()); + PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn)); + } + else + CXXGlobalInits.push_back(Fn); } void CodeGenModule::EmitCXXGlobalInitFunc() { - if (CXXGlobalInits.empty()) + if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty()) return; const llvm::FunctionType *FTy @@ -170,21 +189,30 @@ CodeGenModule::EmitCXXGlobalInitFunc() { false); // Create our global initialization function. - llvm::Function *Fn = - llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage, - "_GLOBAL__I_a", &TheModule); - - CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, - &CXXGlobalInits[0], - CXXGlobalInits.size()); + llvm::Function *Fn = + CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__I_a"); + + if (!PrioritizedCXXGlobalInits.empty()) { + llvm::SmallVector<llvm::Constant*, 8> LocalCXXGlobalInits; + llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(), + PrioritizedCXXGlobalInits.end()); + for (unsigned i = 0; i < PrioritizedCXXGlobalInits.size(); i++) { + llvm::Function *Fn = PrioritizedCXXGlobalInits[i].second; + LocalCXXGlobalInits.push_back(Fn); + } + for (unsigned i = 0; i < CXXGlobalInits.size(); i++) + LocalCXXGlobalInits.push_back(CXXGlobalInits[i]); + CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, + &LocalCXXGlobalInits[0], + LocalCXXGlobalInits.size()); + } + else + CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, + &CXXGlobalInits[0], + CXXGlobalInits.size()); AddGlobalCtor(Fn); } -void CodeGenModule::AddCXXDtorEntry(llvm::Constant *DtorFn, - llvm::Constant *Object) { - CXXGlobalDtors.push_back(std::make_pair(DtorFn, Object)); -} - void CodeGenModule::EmitCXXGlobalDtorFunc() { if (CXXGlobalDtors.empty()) return; @@ -195,8 +223,7 @@ void CodeGenModule::EmitCXXGlobalDtorFunc() { // Create our global destructor function. llvm::Function *Fn = - llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage, - "_GLOBAL__D_a", &TheModule); + CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__D_a"); CodeGenFunction(*this).GenerateCXXGlobalDtorFunc(Fn, CXXGlobalDtors); AddGlobalDtor(Fn); @@ -226,14 +253,14 @@ void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn, } void CodeGenFunction::GenerateCXXGlobalDtorFunc(llvm::Function *Fn, - const std::vector<std::pair<llvm::Constant*, llvm::Constant*> > + const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> > &DtorsAndObjects) { StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(), SourceLocation()); // Emit the dtors, in reverse order from construction. for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) { - llvm::Constant *Callee = DtorsAndObjects[e - i - 1].first; + llvm::Value *Callee = DtorsAndObjects[e - i - 1].first; llvm::CallInst *CI = Builder.CreateCall(Callee, DtorsAndObjects[e - i - 1].second); // Make sure the call and the callee agree on calling convention. @@ -301,7 +328,6 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D, CGM.getMangleContext().mangleGuardVariable(&D, GuardVName); // Create the guard variable. - const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(VMContext); llvm::GlobalValue *GuardVariable = new llvm::GlobalVariable(CGM.getModule(), Int64Ty, false, GV->getLinkage(), @@ -324,8 +350,6 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D, EmitBlock(InitCheckBlock); // Variables used when coping with thread-safe statics and exceptions. - llvm::BasicBlock *SavedLandingPad = 0; - llvm::BasicBlock *LandingPad = 0; if (ThreadsafeStatics) { // Call __cxa_guard_acquire. V = Builder.CreateCall(getGuardAcquireFn(*this), GuardVariable); @@ -335,10 +359,10 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D, Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"), InitBlock, EndBlock); + // Call __cxa_guard_abort along the exceptional edge. if (Exceptions) { - SavedLandingPad = getInvokeDest(); - LandingPad = createBasicBlock("guard.lpad"); - setInvokeDest(LandingPad); + CleanupBlock Cleanup(*this, EHCleanup); + Builder.CreateCall(getGuardAbortFn(*this), GuardVariable); } EmitBlock(InitBlock); @@ -346,17 +370,14 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D, if (D.getType()->isReferenceType()) { QualType T = D.getType(); - // We don't want to pass true for IsInitializer here, because a static - // reference to a temporary does not extend its lifetime. - RValue RV = EmitReferenceBindingToExpr(D.getInit(), - /*IsInitializer=*/false); + RValue RV = EmitReferenceBindingToExpr(D.getInit(), &D); EmitStoreOfScalar(RV.getScalarVal(), GV, /*Volatile=*/false, T); } else EmitDeclInit(*this, D, GV); if (ThreadsafeStatics) { - // Call __cxa_guard_release. + // Call __cxa_guard_release. This cannot throw. Builder.CreateCall(getGuardReleaseFn(*this), GuardVariable); } else { llvm::Value *One = @@ -368,57 +389,39 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D, if (!D.getType()->isReferenceType()) EmitDeclDestroy(*this, D, GV); - if (ThreadsafeStatics && Exceptions) { - // If an exception is thrown during initialization, call __cxa_guard_abort - // along the exceptional edge. - EmitBranch(EndBlock); - - // Construct the landing pad. - EmitBlock(LandingPad); - - // Personality function and LLVM intrinsics. - llvm::Constant *Personality = - CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty - (VMContext), - true), - "__gxx_personality_v0"); - Personality = llvm::ConstantExpr::getBitCast(Personality, PtrToInt8Ty); - llvm::Value *llvm_eh_exception = - CGM.getIntrinsic(llvm::Intrinsic::eh_exception); - llvm::Value *llvm_eh_selector = - CGM.getIntrinsic(llvm::Intrinsic::eh_selector); - - // Exception object - llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc"); - llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow"); - - // Call the selector function. - const llvm::PointerType *PtrToInt8Ty - = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); - llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty); - llvm::Value* SelectorArgs[3] = { Exc, Personality, Null }; - Builder.CreateCall(llvm_eh_selector, SelectorArgs, SelectorArgs + 3, - "selector"); - Builder.CreateStore(Exc, RethrowPtr); - - // Call __cxa_guard_abort along the exceptional edge. - Builder.CreateCall(getGuardAbortFn(*this), GuardVariable); - - setInvokeDest(SavedLandingPad); - - // Rethrow the current exception. - if (getInvokeDest()) { - llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); - Builder.CreateInvoke(getUnwindResumeOrRethrowFn(), Cont, - getInvokeDest(), - Builder.CreateLoad(RethrowPtr)); - EmitBlock(Cont); - } else - Builder.CreateCall(getUnwindResumeOrRethrowFn(), - Builder.CreateLoad(RethrowPtr)); - - Builder.CreateUnreachable(); - } - EmitBlock(EndBlock); } + +/// GenerateCXXAggrDestructorHelper - Generates a helper function which when +/// invoked, calls the default destructor on array elements in reverse order of +/// construction. +llvm::Function * +CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D, + const ArrayType *Array, + llvm::Value *This) { + FunctionArgList Args; + ImplicitParamDecl *Dst = + ImplicitParamDecl::Create(getContext(), 0, + SourceLocation(), 0, + getContext().getPointerType(getContext().VoidTy)); + Args.push_back(std::make_pair(Dst, Dst->getType())); + + const CGFunctionInfo &FI = + CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args, + FunctionType::ExtInfo()); + const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false); + llvm::Function *Fn = + CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor"); + + StartFunction(GlobalDecl(), getContext().VoidTy, Fn, Args, SourceLocation()); + + QualType BaseElementTy = getContext().getBaseElementType(Array); + const llvm::Type *BasePtr = ConvertType(BaseElementTy)->getPointerTo(); + llvm::Value *BaseAddrPtr = Builder.CreateBitCast(This, BasePtr); + + EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr); + + FinishFunction(); + + return Fn; +} diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp index ddc1c77..4980aad 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp @@ -14,11 +14,194 @@ #include "clang/AST/StmtCXX.h" #include "llvm/Intrinsics.h" +#include "llvm/Support/CallSite.h" #include "CodeGenFunction.h" +#include "CGException.h" + using namespace clang; using namespace CodeGen; +/// Push an entry of the given size onto this protected-scope stack. +char *EHScopeStack::allocate(size_t Size) { + if (!StartOfBuffer) { + unsigned Capacity = 1024; + while (Capacity < Size) Capacity *= 2; + StartOfBuffer = new char[Capacity]; + StartOfData = EndOfBuffer = StartOfBuffer + Capacity; + } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) { + unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer; + unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer); + + unsigned NewCapacity = CurrentCapacity; + do { + NewCapacity *= 2; + } while (NewCapacity < UsedCapacity + Size); + + char *NewStartOfBuffer = new char[NewCapacity]; + char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity; + char *NewStartOfData = NewEndOfBuffer - UsedCapacity; + memcpy(NewStartOfData, StartOfData, UsedCapacity); + delete [] StartOfBuffer; + StartOfBuffer = NewStartOfBuffer; + EndOfBuffer = NewEndOfBuffer; + StartOfData = NewStartOfData; + } + + assert(StartOfBuffer + Size <= StartOfData); + StartOfData -= Size; + return StartOfData; +} + +EHScopeStack::stable_iterator +EHScopeStack::getEnclosingEHCleanup(iterator it) const { + assert(it != end()); + do { + if (isa<EHCleanupScope>(*it)) { + if (cast<EHCleanupScope>(*it).isEHCleanup()) + return stabilize(it); + return cast<EHCleanupScope>(*it).getEnclosingEHCleanup(); + } + if (isa<EHLazyCleanupScope>(*it)) { + if (cast<EHLazyCleanupScope>(*it).isEHCleanup()) + return stabilize(it); + return cast<EHLazyCleanupScope>(*it).getEnclosingEHCleanup(); + } + ++it; + } while (it != end()); + return stable_end(); +} + + +void *EHScopeStack::pushLazyCleanup(CleanupKind Kind, size_t Size) { + assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned"); + char *Buffer = allocate(EHLazyCleanupScope::getSizeForCleanupSize(Size)); + bool IsNormalCleanup = Kind != EHCleanup; + bool IsEHCleanup = Kind != NormalCleanup; + EHLazyCleanupScope *Scope = + new (Buffer) EHLazyCleanupScope(IsNormalCleanup, + IsEHCleanup, + Size, + BranchFixups.size(), + InnermostNormalCleanup, + InnermostEHCleanup); + if (IsNormalCleanup) + InnermostNormalCleanup = stable_begin(); + if (IsEHCleanup) + InnermostEHCleanup = stable_begin(); + + return Scope->getCleanupBuffer(); +} + +void EHScopeStack::pushCleanup(llvm::BasicBlock *NormalEntry, + llvm::BasicBlock *NormalExit, + llvm::BasicBlock *EHEntry, + llvm::BasicBlock *EHExit) { + char *Buffer = allocate(EHCleanupScope::getSize()); + new (Buffer) EHCleanupScope(BranchFixups.size(), + InnermostNormalCleanup, + InnermostEHCleanup, + NormalEntry, NormalExit, EHEntry, EHExit); + if (NormalEntry) + InnermostNormalCleanup = stable_begin(); + if (EHEntry) + InnermostEHCleanup = stable_begin(); +} + +void EHScopeStack::popCleanup() { + assert(!empty() && "popping exception stack when not empty"); + + if (isa<EHLazyCleanupScope>(*begin())) { + EHLazyCleanupScope &Cleanup = cast<EHLazyCleanupScope>(*begin()); + InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup(); + InnermostEHCleanup = Cleanup.getEnclosingEHCleanup(); + StartOfData += Cleanup.getAllocatedSize(); + } else { + assert(isa<EHCleanupScope>(*begin())); + EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin()); + InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup(); + InnermostEHCleanup = Cleanup.getEnclosingEHCleanup(); + StartOfData += EHCleanupScope::getSize(); + } + + // Check whether we can shrink the branch-fixups stack. + if (!BranchFixups.empty()) { + // If we no longer have any normal cleanups, all the fixups are + // complete. + if (!hasNormalCleanups()) + BranchFixups.clear(); + + // Otherwise we can still trim out unnecessary nulls. + else + popNullFixups(); + } +} + +EHFilterScope *EHScopeStack::pushFilter(unsigned NumFilters) { + char *Buffer = allocate(EHFilterScope::getSizeForNumFilters(NumFilters)); + CatchDepth++; + return new (Buffer) EHFilterScope(NumFilters); +} + +void EHScopeStack::popFilter() { + assert(!empty() && "popping exception stack when not empty"); + + EHFilterScope &Filter = cast<EHFilterScope>(*begin()); + StartOfData += EHFilterScope::getSizeForNumFilters(Filter.getNumFilters()); + + assert(CatchDepth > 0 && "mismatched filter push/pop"); + CatchDepth--; +} + +EHCatchScope *EHScopeStack::pushCatch(unsigned NumHandlers) { + char *Buffer = allocate(EHCatchScope::getSizeForNumHandlers(NumHandlers)); + CatchDepth++; + return new (Buffer) EHCatchScope(NumHandlers); +} + +void EHScopeStack::pushTerminate() { + char *Buffer = allocate(EHTerminateScope::getSize()); + CatchDepth++; + new (Buffer) EHTerminateScope(); +} + +/// Remove any 'null' fixups on the stack. However, we can't pop more +/// fixups than the fixup depth on the innermost normal cleanup, or +/// else fixups that we try to add to that cleanup will end up in the +/// wrong place. We *could* try to shrink fixup depths, but that's +/// actually a lot of work for little benefit. +void EHScopeStack::popNullFixups() { + // We expect this to only be called when there's still an innermost + // normal cleanup; otherwise there really shouldn't be any fixups. + assert(hasNormalCleanups()); + + EHScopeStack::iterator it = find(InnermostNormalCleanup); + unsigned MinSize; + if (isa<EHCleanupScope>(*it)) + MinSize = cast<EHCleanupScope>(*it).getFixupDepth(); + else + MinSize = cast<EHLazyCleanupScope>(*it).getFixupDepth(); + assert(BranchFixups.size() >= MinSize && "fixup stack out of order"); + + while (BranchFixups.size() > MinSize && + BranchFixups.back().Destination == 0) + BranchFixups.pop_back(); +} + +void EHScopeStack::resolveBranchFixups(llvm::BasicBlock *Dest) { + assert(Dest && "null block passed to resolveBranchFixups"); + + if (BranchFixups.empty()) return; + assert(hasNormalCleanups() && + "branch fixups exist with no normal cleanups on stack"); + + for (unsigned I = 0, E = BranchFixups.size(); I != E; ++I) + if (BranchFixups[I].Destination == Dest) + BranchFixups[I].Destination = 0; + + popNullFixups(); +} + static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) { // void *__cxa_allocate_exception(size_t thrown_size); const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType()); @@ -66,8 +249,19 @@ static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) { return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow"); } +static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) { + // void *__cxa_get_exception_ptr(void*); + const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); + std::vector<const llvm::Type*> Args(1, Int8PtrTy); + + const llvm::FunctionType *FTy = + llvm::FunctionType::get(Int8PtrTy, Args, false); + + return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr"); +} + static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) { - // void* __cxa_begin_catch(); + // void *__cxa_begin_catch(void*); const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); std::vector<const llvm::Type*> Args(1, Int8PtrTy); @@ -123,25 +317,114 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) { CGF.CGM.getLangOptions().CPlusPlus ? "_ZSt9terminatev" : "abort"); } -static llvm::Constant *getPersonalityFn(CodeGenModule &CGM) { - const char *PersonalityFnName = "__gcc_personality_v0"; - LangOptions Opts = CGM.getLangOptions(); - if (Opts.CPlusPlus) - PersonalityFnName = "__gxx_personality_v0"; - else if (Opts.ObjC1) { - if (Opts.NeXTRuntime) { - if (Opts.ObjCNonFragileABI) - PersonalityFnName = "__gcc_personality_v0"; - } else - PersonalityFnName = "__gnu_objc_personality_v0"; +static const char *getCPersonalityFn(CodeGenFunction &CGF) { + return "__gcc_personality_v0"; +} + +static const char *getObjCPersonalityFn(CodeGenFunction &CGF) { + if (CGF.CGM.getLangOptions().NeXTRuntime) { + if (CGF.CGM.getLangOptions().ObjCNonFragileABI) + return "__objc_personality_v0"; + else + return getCPersonalityFn(CGF); + } else { + return "__gnu_objc_personality_v0"; } +} + +static const char *getCXXPersonalityFn(CodeGenFunction &CGF) { + if (CGF.CGM.getLangOptions().SjLjExceptions) + return "__gxx_personality_sj0"; + else + return "__gxx_personality_v0"; +} + +/// Determines the personality function to use when both C++ +/// and Objective-C exceptions are being caught. +static const char *getObjCXXPersonalityFn(CodeGenFunction &CGF) { + // The ObjC personality defers to the C++ personality for non-ObjC + // handlers. Unlike the C++ case, we use the same personality + // function on targets using (backend-driven) SJLJ EH. + if (CGF.CGM.getLangOptions().NeXTRuntime) { + if (CGF.CGM.getLangOptions().ObjCNonFragileABI) + return "__objc_personality_v0"; + + // In the fragile ABI, just use C++ exception handling and hope + // they're not doing crazy exception mixing. + else + return getCXXPersonalityFn(CGF); + } + + // I'm pretty sure the GNU runtime doesn't support mixed EH. + // TODO: we don't necessarily need mixed EH here; remember what + // kind of exceptions we actually try to catch in this function. + CGF.CGM.ErrorUnsupported(CGF.CurCodeDecl, + "the GNU Objective C runtime does not support " + "catching C++ and Objective C exceptions in the " + "same function"); + // Use the C++ personality just to avoid returning null. + return getCXXPersonalityFn(CGF); +} + +static llvm::Constant *getPersonalityFn(CodeGenFunction &CGF) { + const char *Name; + const LangOptions &Opts = CGF.CGM.getLangOptions(); + if (Opts.CPlusPlus && Opts.ObjC1) + Name = getObjCXXPersonalityFn(CGF); + else if (Opts.CPlusPlus) + Name = getCXXPersonalityFn(CGF); + else if (Opts.ObjC1) + Name = getObjCPersonalityFn(CGF); + else + Name = getCPersonalityFn(CGF); llvm::Constant *Personality = - CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty( - CGM.getLLVMContext()), - true), - PersonalityFnName); - return llvm::ConstantExpr::getBitCast(Personality, CGM.PtrToInt8Ty); + CGF.CGM.CreateRuntimeFunction(llvm::FunctionType::get( + llvm::Type::getInt32Ty( + CGF.CGM.getLLVMContext()), + true), + Name); + return llvm::ConstantExpr::getBitCast(Personality, CGF.CGM.PtrToInt8Ty); +} + +/// Returns the value to inject into a selector to indicate the +/// presence of a catch-all. +static llvm::Constant *getCatchAllValue(CodeGenFunction &CGF) { + // Possibly we should use @llvm.eh.catch.all.value here. + return llvm::ConstantPointerNull::get(CGF.CGM.PtrToInt8Ty); +} + +/// Returns the value to inject into a selector to indicate the +/// presence of a cleanup. +static llvm::Constant *getCleanupValue(CodeGenFunction &CGF) { + return llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0); +} + +namespace { + /// A cleanup to free the exception object if its initialization + /// throws. + struct FreeExceptionCleanup : EHScopeStack::LazyCleanup { + FreeExceptionCleanup(llvm::Value *ShouldFreeVar, + llvm::Value *ExnLocVar) + : ShouldFreeVar(ShouldFreeVar), ExnLocVar(ExnLocVar) {} + + llvm::Value *ShouldFreeVar; + llvm::Value *ExnLocVar; + + void Emit(CodeGenFunction &CGF, bool IsForEH) { + llvm::BasicBlock *FreeBB = CGF.createBasicBlock("free-exnobj"); + llvm::BasicBlock *DoneBB = CGF.createBasicBlock("free-exnobj.done"); + + llvm::Value *ShouldFree = CGF.Builder.CreateLoad(ShouldFreeVar, + "should-free-exnobj"); + CGF.Builder.CreateCondBr(ShouldFree, FreeBB, DoneBB); + CGF.EmitBlock(FreeBB); + llvm::Value *ExnLocLocal = CGF.Builder.CreateLoad(ExnLocVar, "exnobj"); + CGF.Builder.CreateCall(getFreeExceptionFn(CGF), ExnLocLocal) + ->setDoesNotThrow(); + CGF.EmitBlock(DoneBB); + } + }; } // Emits an exception expression into the given location. This @@ -166,21 +449,14 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E, llvm::AllocaInst *ExnLocVar = CGF.CreateTempAlloca(ExnLoc->getType(), "exnobj.var"); - llvm::BasicBlock *SavedInvokeDest = CGF.getInvokeDest(); - { - CodeGenFunction::EHCleanupBlock Cleanup(CGF); - llvm::BasicBlock *FreeBB = CGF.createBasicBlock("free-exnobj"); - llvm::BasicBlock *DoneBB = CGF.createBasicBlock("free-exnobj.done"); - - llvm::Value *ShouldFree = CGF.Builder.CreateLoad(ShouldFreeVar, - "should-free-exnobj"); - CGF.Builder.CreateCondBr(ShouldFree, FreeBB, DoneBB); - CGF.EmitBlock(FreeBB); - llvm::Value *ExnLocLocal = CGF.Builder.CreateLoad(ExnLocVar, "exnobj"); - CGF.Builder.CreateCall(getFreeExceptionFn(CGF), ExnLocLocal); - CGF.EmitBlock(DoneBB); - } - llvm::BasicBlock *Cleanup = CGF.getInvokeDest(); + // Make sure the exception object is cleaned up if there's an + // exception during initialization. + // FIXME: stmt expressions might require this to be a normal + // cleanup, too. + CGF.EHStack.pushLazyCleanup<FreeExceptionCleanup>(EHCleanup, + ShouldFreeVar, + ExnLocVar); + EHScopeStack::stable_iterator Cleanup = CGF.EHStack.stable_begin(); CGF.Builder.CreateStore(ExnLoc, ExnLocVar); CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(CGF.getLLVMContext()), @@ -203,74 +479,38 @@ static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *E, CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()), ShouldFreeVar); - // Pop the cleanup block if it's still the top of the cleanup stack. - // Otherwise, temporaries have been created and our cleanup will get - // properly removed in time. - // TODO: this is not very resilient. - if (CGF.getInvokeDest() == Cleanup) - CGF.setInvokeDest(SavedInvokeDest); -} - -// CopyObject - Utility to copy an object. Calls copy constructor as necessary. -// N is casted to the right type. -static void CopyObject(CodeGenFunction &CGF, QualType ObjectType, - bool WasPointer, bool WasPointerReference, - llvm::Value *E, llvm::Value *N) { - // Store the throw exception in the exception object. - if (WasPointer || !CGF.hasAggregateLLVMType(ObjectType)) { - llvm::Value *Value = E; - if (!WasPointer) - Value = CGF.Builder.CreateLoad(Value); - const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo(0); - if (WasPointerReference) { - llvm::Value *Tmp = CGF.CreateTempAlloca(Value->getType(), "catch.param"); - CGF.Builder.CreateStore(Value, Tmp); - Value = Tmp; - ValuePtrTy = Value->getType()->getPointerTo(0); - } - N = CGF.Builder.CreateBitCast(N, ValuePtrTy); - CGF.Builder.CreateStore(Value, N); - } else { - const llvm::Type *Ty = CGF.ConvertType(ObjectType)->getPointerTo(0); - const CXXRecordDecl *RD; - RD = cast<CXXRecordDecl>(ObjectType->getAs<RecordType>()->getDecl()); - llvm::Value *This = CGF.Builder.CreateBitCast(N, Ty); - if (RD->hasTrivialCopyConstructor()) { - CGF.EmitAggregateCopy(This, E, ObjectType); - } else if (CXXConstructorDecl *CopyCtor - = RD->getCopyConstructor(CGF.getContext(), 0)) { - llvm::Value *Src = E; - - // Stolen from EmitClassAggrMemberwiseCopy - llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor, - Ctor_Complete); - CallArgList CallArgs; - CallArgs.push_back(std::make_pair(RValue::get(This), - CopyCtor->getThisType(CGF.getContext()))); - - // Push the Src ptr. - CallArgs.push_back(std::make_pair(RValue::get(Src), - CopyCtor->getParamDecl(0)->getType())); - - const FunctionProtoType *FPT - = CopyCtor->getType()->getAs<FunctionProtoType>(); - CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT), - Callee, ReturnValueSlot(), CallArgs, CopyCtor); - } else - llvm_unreachable("uncopyable object"); + // Technically, the exception object is like a temporary; it has to + // be cleaned up when its full-expression is complete. + // Unfortunately, the AST represents full-expressions by creating a + // CXXExprWithTemporaries, which it only does when there are actually + // temporaries. + // + // If any cleanups have been added since we pushed ours, they must + // be from temporaries; this will get popped at the same time. + // Otherwise we need to pop ours off. FIXME: this is very brittle. + if (Cleanup == CGF.EHStack.stable_begin()) + CGF.PopCleanupBlock(); +} + +llvm::Value *CodeGenFunction::getExceptionSlot() { + if (!ExceptionSlot) { + const llvm::Type *i8p = llvm::Type::getInt8PtrTy(getLLVMContext()); + ExceptionSlot = CreateTempAlloca(i8p, "exn.slot"); } + return ExceptionSlot; } void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) { if (!E->getSubExpr()) { if (getInvokeDest()) { - llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); - Builder.CreateInvoke(getReThrowFn(*this), Cont, getInvokeDest()) + Builder.CreateInvoke(getReThrowFn(*this), + getUnreachableBlock(), + getInvokeDest()) ->setDoesNotReturn(); - EmitBlock(Cont); - } else + } else { Builder.CreateCall(getReThrowFn(*this))->setDoesNotReturn(); - Builder.CreateUnreachable(); + Builder.CreateUnreachable(); + } // Clear the insertion point to indicate we are in unreachable code. Builder.ClearInsertionPoint(); @@ -284,10 +524,11 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) { uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity(); llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(*this); - llvm::Value *ExceptionPtr = + llvm::CallInst *ExceptionPtr = Builder.CreateCall(AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception"); + ExceptionPtr->setDoesNotThrow(); EmitAnyExprToExn(*this, E->getSubExpr(), ExceptionPtr); @@ -301,7 +542,7 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) { if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) { CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl()); if (!Record->hasTrivialDestructor()) { - CXXDestructorDecl *DtorD = Record->getDestructor(getContext()); + CXXDestructorDecl *DtorD = Record->getDestructor(); Dtor = CGM.GetAddrOfCXXDestructor(DtorD, Dtor_Complete); Dtor = llvm::ConstantExpr::getBitCast(Dtor, Int8PtrTy); } @@ -309,18 +550,17 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) { if (!Dtor) Dtor = llvm::Constant::getNullValue(Int8PtrTy); if (getInvokeDest()) { - llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); llvm::InvokeInst *ThrowCall = - Builder.CreateInvoke3(getThrowFn(*this), Cont, getInvokeDest(), + Builder.CreateInvoke3(getThrowFn(*this), + getUnreachableBlock(), getInvokeDest(), ExceptionPtr, TypeInfo, Dtor); ThrowCall->setDoesNotReturn(); - EmitBlock(Cont); } else { llvm::CallInst *ThrowCall = Builder.CreateCall3(getThrowFn(*this), ExceptionPtr, TypeInfo, Dtor); ThrowCall->setDoesNotReturn(); + Builder.CreateUnreachable(); } - Builder.CreateUnreachable(); // Clear the insertion point to indicate we are in unreachable code. Builder.ClearInsertionPoint(); @@ -346,80 +586,15 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) { if (!Proto->hasExceptionSpec()) return; - llvm::Constant *Personality = getPersonalityFn(CGM); - llvm::Value *llvm_eh_exception = - CGM.getIntrinsic(llvm::Intrinsic::eh_exception); - llvm::Value *llvm_eh_selector = - CGM.getIntrinsic(llvm::Intrinsic::eh_selector); - const llvm::IntegerType *Int8Ty; - const llvm::PointerType *PtrToInt8Ty; - Int8Ty = llvm::Type::getInt8Ty(VMContext); - // C string type. Used in lots of places. - PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty); - llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty); - llvm::SmallVector<llvm::Value*, 8> SelectorArgs; - - llvm::BasicBlock *PrevLandingPad = getInvokeDest(); - llvm::BasicBlock *EHSpecHandler = createBasicBlock("ehspec.handler"); - llvm::BasicBlock *Match = createBasicBlock("match"); - llvm::BasicBlock *Unwind = 0; - - assert(PrevLandingPad == 0 && "EHSpec has invoke context"); - (void)PrevLandingPad; - - llvm::BasicBlock *Cont = createBasicBlock("cont"); - - EmitBranchThroughCleanup(Cont); - - // Emit the statements in the try {} block - setInvokeDest(EHSpecHandler); - - EmitBlock(EHSpecHandler); - // Exception object - llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc"); - llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow"); - - SelectorArgs.push_back(Exc); - SelectorArgs.push_back(Personality); - SelectorArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), - Proto->getNumExceptions()+1)); - - for (unsigned i = 0; i < Proto->getNumExceptions(); ++i) { - QualType Ty = Proto->getExceptionType(i); - QualType ExceptType - = Ty.getNonReferenceType().getUnqualifiedType(); - llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType, true); - SelectorArgs.push_back(EHType); - } - if (Proto->getNumExceptions()) - SelectorArgs.push_back(Null); - - // Find which handler was matched. - llvm::Value *Selector - = Builder.CreateCall(llvm_eh_selector, SelectorArgs.begin(), - SelectorArgs.end(), "selector"); - if (Proto->getNumExceptions()) { - Unwind = createBasicBlock("Unwind"); - - Builder.CreateStore(Exc, RethrowPtr); - Builder.CreateCondBr(Builder.CreateICmpSLT(Selector, - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), - 0)), - Match, Unwind); - - EmitBlock(Match); - } - Builder.CreateCall(getUnexpectedFn(*this), Exc)->setDoesNotReturn(); - Builder.CreateUnreachable(); + unsigned NumExceptions = Proto->getNumExceptions(); + EHFilterScope *Filter = EHStack.pushFilter(NumExceptions); - if (Proto->getNumExceptions()) { - EmitBlock(Unwind); - Builder.CreateCall(getUnwindResumeOrRethrowFn(), - Builder.CreateLoad(RethrowPtr)); - Builder.CreateUnreachable(); + for (unsigned I = 0; I != NumExceptions; ++I) { + QualType Ty = Proto->getExceptionType(I); + QualType ExceptType = Ty.getNonReferenceType().getUnqualifiedType(); + llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType, true); + Filter->setFilter(I, EHType); } - - EmitBlock(Cont); } void CodeGenFunction::EmitEndEHSpec(const Decl *D) { @@ -436,317 +611,936 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) { if (!Proto->hasExceptionSpec()) return; - setInvokeDest(0); + EHStack.popFilter(); } void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) { - CXXTryStmtInfo Info = EnterCXXTryStmt(S); + EnterCXXTryStmt(S); EmitStmt(S.getTryBlock()); - ExitCXXTryStmt(S, Info); + ExitCXXTryStmt(S); +} + +void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { + unsigned NumHandlers = S.getNumHandlers(); + EHCatchScope *CatchScope = EHStack.pushCatch(NumHandlers); + + for (unsigned I = 0; I != NumHandlers; ++I) { + const CXXCatchStmt *C = S.getHandler(I); + + llvm::BasicBlock *Handler = createBasicBlock("catch"); + if (C->getExceptionDecl()) { + // FIXME: Dropping the reference type on the type into makes it + // impossible to correctly implement catch-by-reference + // semantics for pointers. Unfortunately, this is what all + // existing compilers do, and it's not clear that the standard + // personality routine is capable of doing this right. See C++ DR 388: + // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#388 + QualType CaughtType = C->getCaughtType(); + CaughtType = CaughtType.getNonReferenceType().getUnqualifiedType(); + llvm::Value *TypeInfo = CGM.GetAddrOfRTTIDescriptor(CaughtType, true); + CatchScope->setHandler(I, TypeInfo, Handler); + } else { + // No exception decl indicates '...', a catch-all. + CatchScope->setCatchAllHandler(I, Handler); + } + } } -CodeGenFunction::CXXTryStmtInfo -CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S) { - CXXTryStmtInfo Info; - Info.SavedLandingPad = getInvokeDest(); - Info.HandlerBlock = createBasicBlock("try.handler"); - Info.FinallyBlock = createBasicBlock("finally"); +/// Check whether this is a non-EH scope, i.e. a scope which doesn't +/// affect exception handling. Currently, the only non-EH scopes are +/// normal-only cleanup scopes. +static bool isNonEHScope(const EHScope &S) { + switch (S.getKind()) { + case EHScope::Cleanup: + return !cast<EHCleanupScope>(S).isEHCleanup(); + case EHScope::LazyCleanup: + return !cast<EHLazyCleanupScope>(S).isEHCleanup(); + case EHScope::Filter: + case EHScope::Catch: + case EHScope::Terminate: + return false; + } - PushCleanupBlock(Info.FinallyBlock); - setInvokeDest(Info.HandlerBlock); + // Suppress warning. + return false; +} - return Info; +llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() { + assert(EHStack.requiresLandingPad()); + assert(!EHStack.empty()); + + if (!Exceptions) + return 0; + + // Check the innermost scope for a cached landing pad. If this is + // a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad. + llvm::BasicBlock *LP = EHStack.begin()->getCachedLandingPad(); + if (LP) return LP; + + // Build the landing pad for this scope. + LP = EmitLandingPad(); + assert(LP); + + // Cache the landing pad on the innermost scope. If this is a + // non-EH scope, cache the landing pad on the enclosing scope, too. + for (EHScopeStack::iterator ir = EHStack.begin(); true; ++ir) { + ir->setCachedLandingPad(LP); + if (!isNonEHScope(*ir)) break; + } + + return LP; } -void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, - CXXTryStmtInfo TryInfo) { - // Pointer to the personality function - llvm::Constant *Personality = getPersonalityFn(CGM); - llvm::Value *llvm_eh_exception = - CGM.getIntrinsic(llvm::Intrinsic::eh_exception); - llvm::Value *llvm_eh_selector = - CGM.getIntrinsic(llvm::Intrinsic::eh_selector); - - llvm::BasicBlock *PrevLandingPad = TryInfo.SavedLandingPad; - llvm::BasicBlock *TryHandler = TryInfo.HandlerBlock; - llvm::BasicBlock *FinallyBlock = TryInfo.FinallyBlock; - llvm::BasicBlock *FinallyRethrow = createBasicBlock("finally.throw"); - llvm::BasicBlock *FinallyEnd = createBasicBlock("finally.end"); - - // Jump to end if there is no exception - EmitBranchThroughCleanup(FinallyEnd); - - llvm::BasicBlock *TerminateHandler = getTerminateHandler(); - - // Emit the handlers - EmitBlock(TryHandler); - - const llvm::IntegerType *Int8Ty; - const llvm::PointerType *PtrToInt8Ty; - Int8Ty = llvm::Type::getInt8Ty(VMContext); - // C string type. Used in lots of places. - PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty); - llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty); - llvm::SmallVector<llvm::Value*, 8> SelectorArgs; +llvm::BasicBlock *CodeGenFunction::EmitLandingPad() { + assert(EHStack.requiresLandingPad()); + + // This function contains a hack to work around a design flaw in + // LLVM's EH IR which breaks semantics after inlining. This same + // hack is implemented in llvm-gcc. + // + // The LLVM EH abstraction is basically a thin veneer over the + // traditional GCC zero-cost design: for each range of instructions + // in the function, there is (at most) one "landing pad" with an + // associated chain of EH actions. A language-specific personality + // function interprets this chain of actions and (1) decides whether + // or not to resume execution at the landing pad and (2) if so, + // provides an integer indicating why it's stopping. In LLVM IR, + // the association of a landing pad with a range of instructions is + // achieved via an invoke instruction, the chain of actions becomes + // the arguments to the @llvm.eh.selector call, and the selector + // call returns the integer indicator. Other than the required + // presence of two intrinsic function calls in the landing pad, + // the IR exactly describes the layout of the output code. + // + // A principal advantage of this design is that it is completely + // language-agnostic; in theory, the LLVM optimizers can treat + // landing pads neutrally, and targets need only know how to lower + // the intrinsics to have a functioning exceptions system (assuming + // that platform exceptions follow something approximately like the + // GCC design). Unfortunately, landing pads cannot be combined in a + // language-agnostic way: given selectors A and B, there is no way + // to make a single landing pad which faithfully represents the + // semantics of propagating an exception first through A, then + // through B, without knowing how the personality will interpret the + // (lowered form of the) selectors. This means that inlining has no + // choice but to crudely chain invokes (i.e., to ignore invokes in + // the inlined function, but to turn all unwindable calls into + // invokes), which is only semantically valid if every unwind stops + // at every landing pad. + // + // Therefore, the invoke-inline hack is to guarantee that every + // landing pad has a catch-all. + const bool UseInvokeInlineHack = true; + + for (EHScopeStack::iterator ir = EHStack.begin(); ; ) { + assert(ir != EHStack.end() && + "stack requiring landing pad is nothing but non-EH scopes?"); + + // If this is a terminate scope, just use the singleton terminate + // landing pad. + if (isa<EHTerminateScope>(*ir)) + return getTerminateLandingPad(); + + // If this isn't an EH scope, iterate; otherwise break out. + if (!isNonEHScope(*ir)) break; + ++ir; + + // We haven't checked this scope for a cached landing pad yet. + if (llvm::BasicBlock *LP = ir->getCachedLandingPad()) + return LP; + } + + // Save the current IR generation state. + CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); + + // Create and configure the landing pad. + llvm::BasicBlock *LP = createBasicBlock("lpad"); + EmitBlock(LP); + + // Save the exception pointer. It's safe to use a single exception + // pointer per function because EH cleanups can never have nested + // try/catches. + llvm::CallInst *Exn = + Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_exception), "exn"); + Exn->setDoesNotThrow(); + Builder.CreateStore(Exn, getExceptionSlot()); + + // Build the selector arguments. + llvm::SmallVector<llvm::Value*, 8> EHSelector; + EHSelector.push_back(Exn); + EHSelector.push_back(getPersonalityFn(*this)); + + // Accumulate all the handlers in scope. + llvm::DenseMap<llvm::Value*, JumpDest> EHHandlers; + JumpDest CatchAll; + bool HasEHCleanup = false; + bool HasEHFilter = false; + llvm::SmallVector<llvm::Value*, 8> EHFilters; + for (EHScopeStack::iterator I = EHStack.begin(), E = EHStack.end(); + I != E; ++I) { + + switch (I->getKind()) { + case EHScope::LazyCleanup: + if (!HasEHCleanup) + HasEHCleanup = cast<EHLazyCleanupScope>(*I).isEHCleanup(); + // We otherwise don't care about cleanups. + continue; + + case EHScope::Cleanup: + if (!HasEHCleanup) + HasEHCleanup = cast<EHCleanupScope>(*I).isEHCleanup(); + // We otherwise don't care about cleanups. + continue; + + case EHScope::Filter: { + assert(I.next() == EHStack.end() && "EH filter is not end of EH stack"); + assert(!CatchAll.Block && "EH filter reached after catch-all"); + + // Filter scopes get added to the selector in wierd ways. + EHFilterScope &Filter = cast<EHFilterScope>(*I); + HasEHFilter = true; + + // Add all the filter values which we aren't already explicitly + // catching. + for (unsigned I = 0, E = Filter.getNumFilters(); I != E; ++I) { + llvm::Value *FV = Filter.getFilter(I); + if (!EHHandlers.count(FV)) + EHFilters.push_back(FV); + } + goto done; + } + + case EHScope::Terminate: + // Terminate scopes are basically catch-alls. + assert(!CatchAll.Block); + CatchAll.Block = getTerminateHandler(); + CatchAll.ScopeDepth = EHStack.getEnclosingEHCleanup(I); + goto done; + + case EHScope::Catch: + break; + } + + EHCatchScope &Catch = cast<EHCatchScope>(*I); + for (unsigned HI = 0, HE = Catch.getNumHandlers(); HI != HE; ++HI) { + EHCatchScope::Handler Handler = Catch.getHandler(HI); + + // Catch-all. We should only have one of these per catch. + if (!Handler.Type) { + assert(!CatchAll.Block); + CatchAll.Block = Handler.Block; + CatchAll.ScopeDepth = EHStack.getEnclosingEHCleanup(I); + continue; + } + + // Check whether we already have a handler for this type. + JumpDest &Dest = EHHandlers[Handler.Type]; + if (Dest.Block) continue; + + EHSelector.push_back(Handler.Type); + Dest.Block = Handler.Block; + Dest.ScopeDepth = EHStack.getEnclosingEHCleanup(I); + } + + // Stop if we found a catch-all. + if (CatchAll.Block) break; + } + + done: + unsigned LastToEmitInLoop = EHSelector.size(); + + // If we have a catch-all, add null to the selector. + if (CatchAll.Block) { + EHSelector.push_back(getCatchAllValue(CGF)); + + // If we have an EH filter, we need to add those handlers in the + // right place in the selector, which is to say, at the end. + } else if (HasEHFilter) { + // Create a filter expression: an integer constant saying how many + // filters there are (+1 to avoid ambiguity with 0 for cleanup), + // followed by the filter types. The personality routine only + // lands here if the filter doesn't match. + EHSelector.push_back(llvm::ConstantInt::get(Builder.getInt32Ty(), + EHFilters.size() + 1)); + EHSelector.append(EHFilters.begin(), EHFilters.end()); + + // Also check whether we need a cleanup. + if (UseInvokeInlineHack || HasEHCleanup) + EHSelector.push_back(UseInvokeInlineHack + ? getCatchAllValue(CGF) + : getCleanupValue(CGF)); + + // Otherwise, signal that we at least have cleanups. + } else if (UseInvokeInlineHack || HasEHCleanup) { + EHSelector.push_back(UseInvokeInlineHack + ? getCatchAllValue(CGF) + : getCleanupValue(CGF)); + } else { + assert(LastToEmitInLoop > 2); + LastToEmitInLoop--; + } + + assert(EHSelector.size() >= 3 && "selector call has only two arguments!"); + + // Tell the backend how to generate the landing pad. + llvm::CallInst *Selection = + Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector), + EHSelector.begin(), EHSelector.end(), "eh.selector"); + Selection->setDoesNotThrow(); + + // Select the right handler. llvm::Value *llvm_eh_typeid_for = CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for); - // Exception object - llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc"); - llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow"); - - SelectorArgs.push_back(Exc); - SelectorArgs.push_back(Personality); - - bool HasCatchAll = false; - for (unsigned i = 0; i<S.getNumHandlers(); ++i) { - const CXXCatchStmt *C = S.getHandler(i); - VarDecl *CatchParam = C->getExceptionDecl(); - if (CatchParam) { - // C++ [except.handle]p3 indicates that top-level cv-qualifiers - // are ignored. - QualType CaughtType = C->getCaughtType().getNonReferenceType(); - llvm::Value *EHTypeInfo - = CGM.GetAddrOfRTTIDescriptor(CaughtType.getUnqualifiedType(), true); - SelectorArgs.push_back(EHTypeInfo); + + // The results of llvm_eh_typeid_for aren't reliable --- at least + // not locally --- so we basically have to do this as an 'if' chain. + // We walk through the first N-1 catch clauses, testing and chaining, + // and then fall into the final clause (which is either a cleanup, a + // filter (possibly with a cleanup), a catch-all, or another catch). + for (unsigned I = 2; I != LastToEmitInLoop; ++I) { + llvm::Value *Type = EHSelector[I]; + JumpDest Dest = EHHandlers[Type]; + assert(Dest.Block && "no handler entry for value in selector?"); + + // Figure out where to branch on a match. As a debug code-size + // optimization, if the scope depth matches the innermost cleanup, + // we branch directly to the catch handler. + llvm::BasicBlock *Match = Dest.Block; + bool MatchNeedsCleanup = Dest.ScopeDepth != EHStack.getInnermostEHCleanup(); + if (MatchNeedsCleanup) + Match = createBasicBlock("eh.match"); + + llvm::BasicBlock *Next = createBasicBlock("eh.next"); + + // Check whether the exception matches. + llvm::CallInst *Id + = Builder.CreateCall(llvm_eh_typeid_for, + Builder.CreateBitCast(Type, CGM.PtrToInt8Ty)); + Id->setDoesNotThrow(); + Builder.CreateCondBr(Builder.CreateICmpEQ(Selection, Id), + Match, Next); + + // Emit match code if necessary. + if (MatchNeedsCleanup) { + EmitBlock(Match); + EmitBranchThroughEHCleanup(Dest); + } + + // Continue to the next match. + EmitBlock(Next); + } + + // Emit the final case in the selector. + // This might be a catch-all.... + if (CatchAll.Block) { + assert(isa<llvm::ConstantPointerNull>(EHSelector.back())); + EmitBranchThroughEHCleanup(CatchAll); + + // ...or an EH filter... + } else if (HasEHFilter) { + llvm::Value *SavedSelection = Selection; + + // First, unwind out to the outermost scope if necessary. + if (EHStack.hasEHCleanups()) { + // The end here might not dominate the beginning, so we might need to + // save the selector if we need it. + llvm::AllocaInst *SelectorVar = 0; + if (HasEHCleanup) { + SelectorVar = CreateTempAlloca(Builder.getInt32Ty(), "selector.var"); + Builder.CreateStore(Selection, SelectorVar); + } + + llvm::BasicBlock *CleanupContBB = createBasicBlock("ehspec.cleanup.cont"); + EmitBranchThroughEHCleanup(JumpDest(CleanupContBB, EHStack.stable_end())); + EmitBlock(CleanupContBB); + + if (HasEHCleanup) + SavedSelection = Builder.CreateLoad(SelectorVar, "ehspec.saved-selector"); + } + + // If there was a cleanup, we'll need to actually check whether we + // landed here because the filter triggered. + if (UseInvokeInlineHack || HasEHCleanup) { + llvm::BasicBlock *RethrowBB = createBasicBlock("cleanup"); + llvm::BasicBlock *UnexpectedBB = createBasicBlock("ehspec.unexpected"); + + llvm::Constant *Zero = llvm::ConstantInt::get(Builder.getInt32Ty(), 0); + llvm::Value *FailsFilter = + Builder.CreateICmpSLT(SavedSelection, Zero, "ehspec.fails"); + Builder.CreateCondBr(FailsFilter, UnexpectedBB, RethrowBB); + + // The rethrow block is where we land if this was a cleanup. + // TODO: can this be _Unwind_Resume if the InvokeInlineHack is off? + EmitBlock(RethrowBB); + Builder.CreateCall(getUnwindResumeOrRethrowFn(), + Builder.CreateLoad(getExceptionSlot())) + ->setDoesNotReturn(); + Builder.CreateUnreachable(); + + EmitBlock(UnexpectedBB); + } + + // Call __cxa_call_unexpected. This doesn't need to be an invoke + // because __cxa_call_unexpected magically filters exceptions + // according to the last landing pad the exception was thrown + // into. Seriously. + Builder.CreateCall(getUnexpectedFn(*this), + Builder.CreateLoad(getExceptionSlot())) + ->setDoesNotReturn(); + Builder.CreateUnreachable(); + + // ...or a normal catch handler... + } else if (!UseInvokeInlineHack && !HasEHCleanup) { + llvm::Value *Type = EHSelector.back(); + EmitBranchThroughEHCleanup(EHHandlers[Type]); + + // ...or a cleanup. + } else { + // We emit a jump to a notional label at the outermost unwind state. + llvm::BasicBlock *Unwind = createBasicBlock("eh.resume"); + JumpDest Dest(Unwind, EHStack.stable_end()); + EmitBranchThroughEHCleanup(Dest); + + // The unwind block. We have to reload the exception here because + // we might have unwound through arbitrary blocks, so the landing + // pad might not dominate. + EmitBlock(Unwind); + + // This can always be a call because we necessarily didn't find + // anything on the EH stack which needs our help. + Builder.CreateCall(getUnwindResumeOrRethrowFn(), + Builder.CreateLoad(getExceptionSlot())) + ->setDoesNotReturn(); + Builder.CreateUnreachable(); + } + + // Restore the old IR generation state. + Builder.restoreIP(SavedIP); + + return LP; +} + +namespace { + /// A cleanup to call __cxa_end_catch. In many cases, the caught + /// exception type lets us state definitively that the thrown exception + /// type does not have a destructor. In particular: + /// - Catch-alls tell us nothing, so we have to conservatively + /// assume that the thrown exception might have a destructor. + /// - Catches by reference behave according to their base types. + /// - Catches of non-record types will only trigger for exceptions + /// of non-record types, which never have destructors. + /// - Catches of record types can trigger for arbitrary subclasses + /// of the caught type, so we have to assume the actual thrown + /// exception type might have a throwing destructor, even if the + /// caught type's destructor is trivial or nothrow. + struct CallEndCatch : EHScopeStack::LazyCleanup { + CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {} + bool MightThrow; + + void Emit(CodeGenFunction &CGF, bool IsForEH) { + if (!MightThrow) { + CGF.Builder.CreateCall(getEndCatchFn(CGF))->setDoesNotThrow(); + return; + } + + CGF.EmitCallOrInvoke(getEndCatchFn(CGF), 0, 0); + } + }; +} + +/// Emits a call to __cxa_begin_catch and enters a cleanup to call +/// __cxa_end_catch. +/// +/// \param EndMightThrow - true if __cxa_end_catch might throw +static llvm::Value *CallBeginCatch(CodeGenFunction &CGF, + llvm::Value *Exn, + bool EndMightThrow) { + llvm::CallInst *Call = CGF.Builder.CreateCall(getBeginCatchFn(CGF), Exn); + Call->setDoesNotThrow(); + + CGF.EHStack.pushLazyCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow); + + return Call; +} + +/// A "special initializer" callback for initializing a catch +/// parameter during catch initialization. +static void InitCatchParam(CodeGenFunction &CGF, + const VarDecl &CatchParam, + llvm::Value *ParamAddr) { + // Load the exception from where the landing pad saved it. + llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot(), "exn"); + + CanQualType CatchType = + CGF.CGM.getContext().getCanonicalType(CatchParam.getType()); + const llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType); + + // If we're catching by reference, we can just cast the object + // pointer to the appropriate pointer. + if (isa<ReferenceType>(CatchType)) { + bool EndCatchMightThrow = cast<ReferenceType>(CatchType)->getPointeeType() + ->isRecordType(); + + // __cxa_begin_catch returns the adjusted object pointer. + llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow); + llvm::Value *ExnCast = + CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref"); + CGF.Builder.CreateStore(ExnCast, ParamAddr); + return; + } + + // Non-aggregates (plus complexes). + bool IsComplex = false; + if (!CGF.hasAggregateLLVMType(CatchType) || + (IsComplex = CatchType->isAnyComplexType())) { + llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false); + + // If the catch type is a pointer type, __cxa_begin_catch returns + // the pointer by value. + if (CatchType->hasPointerRepresentation()) { + llvm::Value *CastExn = + CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted"); + CGF.Builder.CreateStore(CastExn, ParamAddr); + return; + } + + // Otherwise, it returns a pointer into the exception object. + + const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok + llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); + + if (IsComplex) { + CGF.StoreComplexToAddr(CGF.LoadComplexFromAddr(Cast, /*volatile*/ false), + ParamAddr, /*volatile*/ false); } else { - // null indicates catch all - SelectorArgs.push_back(Null); - HasCatchAll = true; + llvm::Value *ExnLoad = CGF.Builder.CreateLoad(Cast, "exn.scalar"); + CGF.EmitStoreOfScalar(ExnLoad, ParamAddr, /*volatile*/ false, CatchType); } + return; } - // We use a cleanup unless there was already a catch all. - if (!HasCatchAll) { - SelectorArgs.push_back(Null); + // FIXME: this *really* needs to be done via a proper, Sema-emitted + // initializer expression. + + CXXRecordDecl *RD = CatchType.getTypePtr()->getAsCXXRecordDecl(); + assert(RD && "aggregate catch type was not a record!"); + + const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok + + if (RD->hasTrivialCopyConstructor()) { + llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, true); + llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); + CGF.EmitAggregateCopy(ParamAddr, Cast, CatchType); + return; } - // Find which handler was matched. - llvm::Value *Selector - = Builder.CreateCall(llvm_eh_selector, SelectorArgs.begin(), - SelectorArgs.end(), "selector"); - for (unsigned i = 0; i<S.getNumHandlers(); ++i) { - const CXXCatchStmt *C = S.getHandler(i); - VarDecl *CatchParam = C->getExceptionDecl(); - Stmt *CatchBody = C->getHandlerBlock(); - - llvm::BasicBlock *Next = 0; - - if (SelectorArgs[i+2] != Null) { - llvm::BasicBlock *Match = createBasicBlock("match"); - Next = createBasicBlock("catch.next"); - const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext()); - llvm::Value *Id - = Builder.CreateCall(llvm_eh_typeid_for, - Builder.CreateBitCast(SelectorArgs[i+2], - Int8PtrTy)); - Builder.CreateCondBr(Builder.CreateICmpEQ(Selector, Id), - Match, Next); - EmitBlock(Match); + // We have to call __cxa_get_exception_ptr to get the adjusted + // pointer before copying. + llvm::CallInst *AdjustedExn = + CGF.Builder.CreateCall(getGetExceptionPtrFn(CGF), Exn); + AdjustedExn->setDoesNotThrow(); + llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); + + CXXConstructorDecl *CD = RD->getCopyConstructor(CGF.getContext(), 0); + assert(CD && "record has no copy constructor!"); + llvm::Value *CopyCtor = CGF.CGM.GetAddrOfCXXConstructor(CD, Ctor_Complete); + + CallArgList CallArgs; + CallArgs.push_back(std::make_pair(RValue::get(ParamAddr), + CD->getThisType(CGF.getContext()))); + CallArgs.push_back(std::make_pair(RValue::get(Cast), + CD->getParamDecl(0)->getType())); + + const FunctionProtoType *FPT + = CD->getType()->getAs<FunctionProtoType>(); + + // Call the copy ctor in a terminate scope. + CGF.EHStack.pushTerminate(); + CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT), + CopyCtor, ReturnValueSlot(), CallArgs, CD); + CGF.EHStack.popTerminate(); + + // Finally we can call __cxa_begin_catch. + CallBeginCatch(CGF, Exn, true); +} + +/// Begins a catch statement by initializing the catch variable and +/// calling __cxa_begin_catch. +static void BeginCatch(CodeGenFunction &CGF, + const CXXCatchStmt *S) { + // We have to be very careful with the ordering of cleanups here: + // C++ [except.throw]p4: + // The destruction [of the exception temporary] occurs + // immediately after the destruction of the object declared in + // the exception-declaration in the handler. + // + // So the precise ordering is: + // 1. Construct catch variable. + // 2. __cxa_begin_catch + // 3. Enter __cxa_end_catch cleanup + // 4. Enter dtor cleanup + // + // We do this by initializing the exception variable with a + // "special initializer", InitCatchParam. Delegation sequence: + // - ExitCXXTryStmt opens a RunCleanupsScope + // - EmitLocalBlockVarDecl creates the variable and debug info + // - InitCatchParam initializes the variable from the exception + // - CallBeginCatch calls __cxa_begin_catch + // - CallBeginCatch enters the __cxa_end_catch cleanup + // - EmitLocalBlockVarDecl enters the variable destructor cleanup + // - EmitCXXTryStmt emits the code for the catch body + // - EmitCXXTryStmt close the RunCleanupsScope + + VarDecl *CatchParam = S->getExceptionDecl(); + if (!CatchParam) { + llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot(), "exn"); + CallBeginCatch(CGF, Exn, true); + return; + } + + // Emit the local. + CGF.EmitLocalBlockVarDecl(*CatchParam, &InitCatchParam); +} + +namespace { + struct CallRethrow : EHScopeStack::LazyCleanup { + void Emit(CodeGenFunction &CGF, bool IsForEH) { + CGF.EmitCallOrInvoke(getReThrowFn(CGF), 0, 0); + } + }; +} + +void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) { + unsigned NumHandlers = S.getNumHandlers(); + EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin()); + assert(CatchScope.getNumHandlers() == NumHandlers); + + // Copy the handler blocks off before we pop the EH stack. Emitting + // the handlers might scribble on this memory. + llvm::SmallVector<EHCatchScope::Handler, 8> Handlers(NumHandlers); + memcpy(Handlers.data(), CatchScope.begin(), + NumHandlers * sizeof(EHCatchScope::Handler)); + EHStack.popCatch(); + + // The fall-through block. + llvm::BasicBlock *ContBB = createBasicBlock("try.cont"); + + // We just emitted the body of the try; jump to the continue block. + if (HaveInsertPoint()) + Builder.CreateBr(ContBB); + + // Determine if we need an implicit rethrow for all these catch handlers. + bool ImplicitRethrow = false; + if (IsFnTryBlock) + ImplicitRethrow = isa<CXXDestructorDecl>(CurCodeDecl) || + isa<CXXConstructorDecl>(CurCodeDecl); + + for (unsigned I = 0; I != NumHandlers; ++I) { + llvm::BasicBlock *CatchBlock = Handlers[I].Block; + EmitBlock(CatchBlock); + + // Catch the exception if this isn't a catch-all. + const CXXCatchStmt *C = S.getHandler(I); + + // Enter a cleanup scope, including the catch variable and the + // end-catch. + RunCleanupsScope CatchScope(*this); + + // Initialize the catch variable and set up the cleanups. + BeginCatch(*this, C); + + // If there's an implicit rethrow, push a normal "cleanup" to call + // _cxa_rethrow. This needs to happen before __cxa_end_catch is + // called, and so it is pushed after BeginCatch. + if (ImplicitRethrow) + EHStack.pushLazyCleanup<CallRethrow>(NormalCleanup); + + // Perform the body of the catch. + EmitStmt(C->getHandlerBlock()); + + // Fall out through the catch cleanups. + CatchScope.ForceCleanup(); + + // Branch out of the try. + if (HaveInsertPoint()) + Builder.CreateBr(ContBB); + } + + EmitBlock(ContBB); +} + +/// Enters a finally block for an implementation using zero-cost +/// exceptions. This is mostly general, but hard-codes some +/// language/ABI-specific behavior in the catch-all sections. +CodeGenFunction::FinallyInfo +CodeGenFunction::EnterFinallyBlock(const Stmt *Body, + llvm::Constant *BeginCatchFn, + llvm::Constant *EndCatchFn, + llvm::Constant *RethrowFn) { + assert((BeginCatchFn != 0) == (EndCatchFn != 0) && + "begin/end catch functions not paired"); + assert(RethrowFn && "rethrow function is required"); + + // The rethrow function has one of the following two types: + // void (*)() + // void (*)(void*) + // In the latter case we need to pass it the exception object. + // But we can't use the exception slot because the @finally might + // have a landing pad (which would overwrite the exception slot). + const llvm::FunctionType *RethrowFnTy = + cast<llvm::FunctionType>( + cast<llvm::PointerType>(RethrowFn->getType()) + ->getElementType()); + llvm::Value *SavedExnVar = 0; + if (RethrowFnTy->getNumParams()) + SavedExnVar = CreateTempAlloca(Builder.getInt8PtrTy(), "finally.exn"); + + // A finally block is a statement which must be executed on any edge + // out of a given scope. Unlike a cleanup, the finally block may + // contain arbitrary control flow leading out of itself. In + // addition, finally blocks should always be executed, even if there + // are no catch handlers higher on the stack. Therefore, we + // surround the protected scope with a combination of a normal + // cleanup (to catch attempts to break out of the block via normal + // control flow) and an EH catch-all (semantically "outside" any try + // statement to which the finally block might have been attached). + // The finally block itself is generated in the context of a cleanup + // which conditionally leaves the catch-all. + + FinallyInfo Info; + + // Jump destination for performing the finally block on an exception + // edge. We'll never actually reach this block, so unreachable is + // fine. + JumpDest RethrowDest = getJumpDestInCurrentScope(getUnreachableBlock()); + + // Whether the finally block is being executed for EH purposes. + llvm::AllocaInst *ForEHVar = CreateTempAlloca(CGF.Builder.getInt1Ty(), + "finally.for-eh"); + InitTempAlloca(ForEHVar, llvm::ConstantInt::getFalse(getLLVMContext())); + + // Enter a normal cleanup which will perform the @finally block. + { + CodeGenFunction::CleanupBlock Cleanup(*this, NormalCleanup); + + // Enter a cleanup to call the end-catch function if one was provided. + if (EndCatchFn) { + CodeGenFunction::CleanupBlock FinallyExitCleanup(CGF, NormalAndEHCleanup); + + llvm::BasicBlock *EndCatchBB = createBasicBlock("finally.endcatch"); + llvm::BasicBlock *CleanupContBB = createBasicBlock("finally.cleanup.cont"); + + llvm::Value *ShouldEndCatch = + Builder.CreateLoad(ForEHVar, "finally.endcatch"); + Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB); + EmitBlock(EndCatchBB); + EmitCallOrInvoke(EndCatchFn, 0, 0); // catch-all, so might throw + EmitBlock(CleanupContBB); } - llvm::BasicBlock *MatchEnd = createBasicBlock("match.end"); - llvm::BasicBlock *MatchHandler = createBasicBlock("match.handler"); - - PushCleanupBlock(MatchEnd); - setInvokeDest(MatchHandler); - - llvm::Value *ExcObject = Builder.CreateCall(getBeginCatchFn(*this), Exc); - - { - CleanupScope CatchScope(*this); - // Bind the catch parameter if it exists. - if (CatchParam) { - QualType CatchType = CatchParam->getType().getNonReferenceType(); - setInvokeDest(TerminateHandler); - bool WasPointer = true; - bool WasPointerReference = false; - CatchType = CGM.getContext().getCanonicalType(CatchType); - if (CatchType.getTypePtr()->isPointerType()) { - if (isa<ReferenceType>(CatchParam->getType())) - WasPointerReference = true; - } else { - if (!isa<ReferenceType>(CatchParam->getType())) - WasPointer = false; - CatchType = getContext().getPointerType(CatchType); - } - ExcObject = Builder.CreateBitCast(ExcObject, ConvertType(CatchType)); - EmitLocalBlockVarDecl(*CatchParam); - // FIXME: we need to do this sooner so that the EH region for the - // cleanup doesn't start until after the ctor completes, use a decl - // init? - CopyObject(*this, CatchParam->getType().getNonReferenceType(), - WasPointer, WasPointerReference, ExcObject, - GetAddrOfLocalVar(CatchParam)); - setInvokeDest(MatchHandler); + // Emit the finally block. + EmitStmt(Body); + + // If the end of the finally is reachable, check whether this was + // for EH. If so, rethrow. + if (HaveInsertPoint()) { + llvm::BasicBlock *RethrowBB = createBasicBlock("finally.rethrow"); + llvm::BasicBlock *ContBB = createBasicBlock("finally.cont"); + + llvm::Value *ShouldRethrow = + Builder.CreateLoad(ForEHVar, "finally.shouldthrow"); + Builder.CreateCondBr(ShouldRethrow, RethrowBB, ContBB); + + EmitBlock(RethrowBB); + if (SavedExnVar) { + llvm::Value *Args[] = { Builder.CreateLoad(SavedExnVar) }; + EmitCallOrInvoke(RethrowFn, Args, Args+1); + } else { + EmitCallOrInvoke(RethrowFn, 0, 0); } + Builder.CreateUnreachable(); - EmitStmt(CatchBody); + EmitBlock(ContBB); } - EmitBranchThroughCleanup(FinallyEnd); - - EmitBlock(MatchHandler); - - llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc"); - // We are required to emit this call to satisfy LLVM, even - // though we don't use the result. - llvm::Value *Args[] = { - Exc, Personality, - llvm::ConstantInt::getNullValue(llvm::Type::getInt32Ty(VMContext)) - }; - Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args)); - Builder.CreateStore(Exc, RethrowPtr); - EmitBranchThroughCleanup(FinallyRethrow); - - CodeGenFunction::CleanupBlockInfo Info = PopCleanupBlock(); - - EmitBlock(MatchEnd); - - llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); - Builder.CreateInvoke(getEndCatchFn(*this), - Cont, TerminateHandler, - &Args[0], &Args[0]); - EmitBlock(Cont); - if (Info.SwitchBlock) - EmitBlock(Info.SwitchBlock); - if (Info.EndBlock) - EmitBlock(Info.EndBlock); - - Exc = Builder.CreateCall(llvm_eh_exception, "exc"); - Builder.CreateStore(Exc, RethrowPtr); - EmitBranchThroughCleanup(FinallyRethrow); - - if (Next) - EmitBlock(Next); + // Leave the end-catch cleanup. As an optimization, pretend that + // the fallthrough path was inaccessible; we've dynamically proven + // that we're not in the EH case along that path. + if (EndCatchFn) { + CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); + PopCleanupBlock(); + Builder.restoreIP(SavedIP); + } + + // Now make sure we actually have an insertion point or the + // cleanup gods will hate us. + EnsureInsertPoint(); } - if (!HasCatchAll) { - Builder.CreateStore(Exc, RethrowPtr); - EmitBranchThroughCleanup(FinallyRethrow); + + // Enter a catch-all scope. + llvm::BasicBlock *CatchAllBB = createBasicBlock("finally.catchall"); + CGBuilderTy::InsertPoint SavedIP = Builder.saveIP(); + Builder.SetInsertPoint(CatchAllBB); + + // If there's a begin-catch function, call it. + if (BeginCatchFn) { + Builder.CreateCall(BeginCatchFn, Builder.CreateLoad(getExceptionSlot())) + ->setDoesNotThrow(); } - CodeGenFunction::CleanupBlockInfo Info = PopCleanupBlock(); + // If we need to remember the exception pointer to rethrow later, do so. + if (SavedExnVar) { + llvm::Value *SavedExn = Builder.CreateLoad(getExceptionSlot()); + Builder.CreateStore(SavedExn, SavedExnVar); + } - setInvokeDest(PrevLandingPad); + // Tell the finally block that we're in EH. + Builder.CreateStore(llvm::ConstantInt::getTrue(getLLVMContext()), ForEHVar); - EmitBlock(FinallyBlock); + // Thread a jump through the finally cleanup. + EmitBranchThroughCleanup(RethrowDest); - if (Info.SwitchBlock) - EmitBlock(Info.SwitchBlock); - if (Info.EndBlock) - EmitBlock(Info.EndBlock); + Builder.restoreIP(SavedIP); - // Branch around the rethrow code. - EmitBranch(FinallyEnd); + EHCatchScope *CatchScope = EHStack.pushCatch(1); + CatchScope->setCatchAllHandler(0, CatchAllBB); - EmitBlock(FinallyRethrow); - // FIXME: Eventually we can chain the handlers together and just do a call - // here. - if (getInvokeDest()) { - llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); - Builder.CreateInvoke(getUnwindResumeOrRethrowFn(), Cont, - getInvokeDest(), - Builder.CreateLoad(RethrowPtr)); - EmitBlock(Cont); - } else - Builder.CreateCall(getUnwindResumeOrRethrowFn(), - Builder.CreateLoad(RethrowPtr)); + return Info; +} - Builder.CreateUnreachable(); +void CodeGenFunction::ExitFinallyBlock(FinallyInfo &Info) { + // Leave the finally catch-all. + EHCatchScope &Catch = cast<EHCatchScope>(*EHStack.begin()); + llvm::BasicBlock *CatchAllBB = Catch.getHandler(0).Block; + EHStack.popCatch(); + + // And leave the normal cleanup. + PopCleanupBlock(); - EmitBlock(FinallyEnd); -} - -CodeGenFunction::EHCleanupBlock::~EHCleanupBlock() { - CGF.setInvokeDest(PreviousInvokeDest); - - llvm::BasicBlock *EndOfCleanup = CGF.Builder.GetInsertBlock(); - - // Jump to the beginning of the cleanup. - CGF.Builder.SetInsertPoint(CleanupHandler, CleanupHandler->begin()); - - // The libstdc++ personality function. - // TODO: generalize to work with other libraries. - llvm::Constant *Personality = getPersonalityFn(CGF.CGM); - - // %exception = call i8* @llvm.eh.exception() - // Magic intrinsic which tells gives us a handle to the caught - // exception. - llvm::Value *llvm_eh_exception = - CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception); - llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc"); - - llvm::Constant *Null = llvm::ConstantPointerNull::get(CGF.PtrToInt8Ty); - - // %ignored = call i32 @llvm.eh.selector(i8* %exception, - // i8* @__gxx_personality_v0, - // i8* null) - // Magic intrinsic which tells LLVM that this invoke landing pad is - // just a cleanup block. - llvm::Value *Args[] = { Exc, Personality, Null }; - llvm::Value *llvm_eh_selector = - CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector); - CGF.Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args)); - - // And then we fall through into the code that the user put there. - // Jump back to the end of the cleanup. - CGF.Builder.SetInsertPoint(EndOfCleanup); - - // Rethrow the exception. - if (CGF.getInvokeDest()) { - llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont"); - CGF.Builder.CreateInvoke(CGF.getUnwindResumeOrRethrowFn(), Cont, - CGF.getInvokeDest(), Exc); - CGF.EmitBlock(Cont); - } else - CGF.Builder.CreateCall(CGF.getUnwindResumeOrRethrowFn(), Exc); + CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); + EmitBlock(CatchAllBB, true); + + Builder.restoreIP(SavedIP); +} + +llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() { + if (TerminateLandingPad) + return TerminateLandingPad; + + CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); + + // This will get inserted at the end of the function. + TerminateLandingPad = createBasicBlock("terminate.lpad"); + Builder.SetInsertPoint(TerminateLandingPad); + + // Tell the backend that this is a landing pad. + llvm::CallInst *Exn = + Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_exception), "exn"); + Exn->setDoesNotThrow(); + + // Tell the backend what the exception table should be: + // nothing but a catch-all. + llvm::Value *Args[3] = { Exn, getPersonalityFn(*this), + getCatchAllValue(*this) }; + Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector), + Args, Args+3, "eh.selector") + ->setDoesNotThrow(); + + llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this)); + TerminateCall->setDoesNotReturn(); + TerminateCall->setDoesNotThrow(); CGF.Builder.CreateUnreachable(); - // Resume inserting where we started, but put the new cleanup - // handler in place. - if (PreviousInsertionBlock) - CGF.Builder.SetInsertPoint(PreviousInsertionBlock); - else - CGF.Builder.ClearInsertionPoint(); + // Restore the saved insertion state. + Builder.restoreIP(SavedIP); - if (CGF.Exceptions) - CGF.setInvokeDest(CleanupHandler); + return TerminateLandingPad; } llvm::BasicBlock *CodeGenFunction::getTerminateHandler() { if (TerminateHandler) return TerminateHandler; - // We don't want to change anything at the current location, so - // save it aside and clear the insert point. - llvm::BasicBlock *SavedInsertBlock = Builder.GetInsertBlock(); - llvm::BasicBlock::iterator SavedInsertPoint = Builder.GetInsertPoint(); - Builder.ClearInsertionPoint(); - - llvm::Constant *Personality = getPersonalityFn(CGM); - llvm::Value *llvm_eh_exception = - CGM.getIntrinsic(llvm::Intrinsic::eh_exception); - llvm::Value *llvm_eh_selector = - CGM.getIntrinsic(llvm::Intrinsic::eh_selector); + CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); - // Set up terminate handler + // Set up the terminate handler. This block is inserted at the very + // end of the function by FinishFunction. TerminateHandler = createBasicBlock("terminate.handler"); - EmitBlock(TerminateHandler); - llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc"); - // We are required to emit this call to satisfy LLVM, even - // though we don't use the result. - llvm::Value *Args[] = { - Exc, Personality, - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1) - }; - Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args)); - llvm::CallInst *TerminateCall = - Builder.CreateCall(getTerminateFn(*this)); + Builder.SetInsertPoint(TerminateHandler); + llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this)); TerminateCall->setDoesNotReturn(); TerminateCall->setDoesNotThrow(); Builder.CreateUnreachable(); // Restore the saved insertion state. - Builder.SetInsertPoint(SavedInsertBlock, SavedInsertPoint); + Builder.restoreIP(SavedIP); return TerminateHandler; } + +CodeGenFunction::CleanupBlock::CleanupBlock(CodeGenFunction &CGF, + CleanupKind Kind) + : CGF(CGF), SavedIP(CGF.Builder.saveIP()), NormalCleanupExitBB(0) { + llvm::BasicBlock *EntryBB = CGF.createBasicBlock("cleanup"); + CGF.Builder.SetInsertPoint(EntryBB); + + switch (Kind) { + case NormalAndEHCleanup: + NormalCleanupEntryBB = EHCleanupEntryBB = EntryBB; + break; + + case NormalCleanup: + NormalCleanupEntryBB = EntryBB; + EHCleanupEntryBB = 0; + break; + + case EHCleanup: + NormalCleanupEntryBB = 0; + EHCleanupEntryBB = EntryBB; + CGF.EHStack.pushTerminate(); + break; + } +} + +void CodeGenFunction::CleanupBlock::beginEHCleanup() { + assert(EHCleanupEntryBB == 0 && "already started an EH cleanup"); + NormalCleanupExitBB = CGF.Builder.GetInsertBlock(); + assert(NormalCleanupExitBB && "end of normal cleanup is unreachable"); + + EHCleanupEntryBB = CGF.createBasicBlock("eh.cleanup"); + CGF.Builder.SetInsertPoint(EHCleanupEntryBB); + CGF.EHStack.pushTerminate(); +} + +CodeGenFunction::CleanupBlock::~CleanupBlock() { + llvm::BasicBlock *EHCleanupExitBB = 0; + + // If we're currently writing the EH cleanup... + if (EHCleanupEntryBB) { + // Set the EH cleanup exit block. + EHCleanupExitBB = CGF.Builder.GetInsertBlock(); + assert(EHCleanupExitBB && "end of EH cleanup is unreachable"); + + // If we're actually writing both at once, set the normal exit, too. + if (EHCleanupEntryBB == NormalCleanupEntryBB) + NormalCleanupExitBB = EHCleanupExitBB; + + // Otherwise, we must have pushed a terminate handler. + else + CGF.EHStack.popTerminate(); + + // Otherwise, just set the normal cleanup exit block. + } else { + NormalCleanupExitBB = CGF.Builder.GetInsertBlock(); + assert(NormalCleanupExitBB && "end of normal cleanup is unreachable"); + } + + CGF.EHStack.pushCleanup(NormalCleanupEntryBB, NormalCleanupExitBB, + EHCleanupEntryBB, EHCleanupExitBB); + + CGF.Builder.restoreIP(SavedIP); +} + +EHScopeStack::LazyCleanup::~LazyCleanup() { + llvm_unreachable("LazyCleanup is indestructable"); +} diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.h b/contrib/llvm/tools/clang/lib/CodeGen/CGException.h new file mode 100644 index 0000000..80739cd --- /dev/null +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.h @@ -0,0 +1,428 @@ +//===-- CGException.h - Classes for exceptions IR generation ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// These classes support the generation of LLVM IR for exceptions in +// C++ and Objective C. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CODEGEN_CGEXCEPTION_H +#define CLANG_CODEGEN_CGEXCEPTION_H + +/// EHScopeStack is defined in CodeGenFunction.h, but its +/// implementation is in this file and in CGException.cpp. +#include "CodeGenFunction.h" + +namespace llvm { + class Value; + class BasicBlock; +} + +namespace clang { +namespace CodeGen { + +/// A protected scope for zero-cost EH handling. +class EHScope { + llvm::BasicBlock *CachedLandingPad; + + unsigned K : 3; + +protected: + enum { BitsRemaining = 29 }; + +public: + enum Kind { Cleanup, LazyCleanup, Catch, Terminate, Filter }; + + EHScope(Kind K) : CachedLandingPad(0), K(K) {} + + Kind getKind() const { return static_cast<Kind>(K); } + + llvm::BasicBlock *getCachedLandingPad() const { + return CachedLandingPad; + } + + void setCachedLandingPad(llvm::BasicBlock *Block) { + CachedLandingPad = Block; + } +}; + +/// A scope which attempts to handle some, possibly all, types of +/// exceptions. +/// +/// Objective C @finally blocks are represented using a cleanup scope +/// after the catch scope. +class EHCatchScope : public EHScope { + unsigned NumHandlers : BitsRemaining; + + // In effect, we have a flexible array member + // Handler Handlers[0]; + // But that's only standard in C99, not C++, so we have to do + // annoying pointer arithmetic instead. + +public: + struct Handler { + /// A type info value, or null (C++ null, not an LLVM null pointer) + /// for a catch-all. + llvm::Value *Type; + + /// The catch handler for this type. + llvm::BasicBlock *Block; + + static Handler make(llvm::Value *Type, llvm::BasicBlock *Block) { + Handler Temp; + Temp.Type = Type; + Temp.Block = Block; + return Temp; + } + }; + +private: + Handler *getHandlers() { + return reinterpret_cast<Handler*>(this+1); + } + + const Handler *getHandlers() const { + return reinterpret_cast<const Handler*>(this+1); + } + +public: + static size_t getSizeForNumHandlers(unsigned N) { + return sizeof(EHCatchScope) + N * sizeof(Handler); + } + + EHCatchScope(unsigned NumHandlers) + : EHScope(Catch), NumHandlers(NumHandlers) { + } + + unsigned getNumHandlers() const { + return NumHandlers; + } + + void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) { + setHandler(I, /*catchall*/ 0, Block); + } + + void setHandler(unsigned I, llvm::Value *Type, llvm::BasicBlock *Block) { + assert(I < getNumHandlers()); + getHandlers()[I] = Handler::make(Type, Block); + } + + const Handler &getHandler(unsigned I) const { + assert(I < getNumHandlers()); + return getHandlers()[I]; + } + + typedef const Handler *iterator; + iterator begin() const { return getHandlers(); } + iterator end() const { return getHandlers() + getNumHandlers(); } + + static bool classof(const EHScope *Scope) { + return Scope->getKind() == Catch; + } +}; + +/// A cleanup scope which generates the cleanup blocks lazily. +class EHLazyCleanupScope : public EHScope { + /// Whether this cleanup needs to be run along normal edges. + bool IsNormalCleanup : 1; + + /// Whether this cleanup needs to be run along exception edges. + bool IsEHCleanup : 1; + + /// The amount of extra storage needed by the LazyCleanup. + /// Always a multiple of the scope-stack alignment. + unsigned CleanupSize : 12; + + /// The number of fixups required by enclosing scopes (not including + /// this one). If this is the top cleanup scope, all the fixups + /// from this index onwards belong to this scope. + unsigned FixupDepth : BitsRemaining - 14; + + /// The nearest normal cleanup scope enclosing this one. + EHScopeStack::stable_iterator EnclosingNormal; + + /// The nearest EH cleanup scope enclosing this one. + EHScopeStack::stable_iterator EnclosingEH; + + /// The dual entry/exit block along the normal edge. This is lazily + /// created if needed before the cleanup is popped. + llvm::BasicBlock *NormalBlock; + + /// The dual entry/exit block along the EH edge. This is lazily + /// created if needed before the cleanup is popped. + llvm::BasicBlock *EHBlock; + +public: + /// Gets the size required for a lazy cleanup scope with the given + /// cleanup-data requirements. + static size_t getSizeForCleanupSize(size_t Size) { + return sizeof(EHLazyCleanupScope) + Size; + } + + size_t getAllocatedSize() const { + return sizeof(EHLazyCleanupScope) + CleanupSize; + } + + EHLazyCleanupScope(bool IsNormal, bool IsEH, unsigned CleanupSize, + unsigned FixupDepth, + EHScopeStack::stable_iterator EnclosingNormal, + EHScopeStack::stable_iterator EnclosingEH) + : EHScope(EHScope::LazyCleanup), + IsNormalCleanup(IsNormal), IsEHCleanup(IsEH), + CleanupSize(CleanupSize), FixupDepth(FixupDepth), + EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH), + NormalBlock(0), EHBlock(0) + {} + + bool isNormalCleanup() const { return IsNormalCleanup; } + llvm::BasicBlock *getNormalBlock() const { return NormalBlock; } + void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; } + + bool isEHCleanup() const { return IsEHCleanup; } + llvm::BasicBlock *getEHBlock() const { return EHBlock; } + void setEHBlock(llvm::BasicBlock *BB) { EHBlock = BB; } + + unsigned getFixupDepth() const { return FixupDepth; } + EHScopeStack::stable_iterator getEnclosingNormalCleanup() const { + return EnclosingNormal; + } + EHScopeStack::stable_iterator getEnclosingEHCleanup() const { + return EnclosingEH; + } + + size_t getCleanupSize() const { return CleanupSize; } + void *getCleanupBuffer() { return this + 1; } + + EHScopeStack::LazyCleanup *getCleanup() { + return reinterpret_cast<EHScopeStack::LazyCleanup*>(getCleanupBuffer()); + } + + static bool classof(const EHScope *Scope) { + return (Scope->getKind() == LazyCleanup); + } +}; + +/// A scope which needs to execute some code if we try to unwind --- +/// either normally, via the EH mechanism, or both --- through it. +class EHCleanupScope : public EHScope { + /// The number of fixups required by enclosing scopes (not including + /// this one). If this is the top cleanup scope, all the fixups + /// from this index onwards belong to this scope. + unsigned FixupDepth : BitsRemaining; + + /// The nearest normal cleanup scope enclosing this one. + EHScopeStack::stable_iterator EnclosingNormal; + + /// The nearest EH cleanup scope enclosing this one. + EHScopeStack::stable_iterator EnclosingEH; + + llvm::BasicBlock *NormalEntry; + llvm::BasicBlock *NormalExit; + llvm::BasicBlock *EHEntry; + llvm::BasicBlock *EHExit; + +public: + static size_t getSize() { return sizeof(EHCleanupScope); } + + EHCleanupScope(unsigned FixupDepth, + EHScopeStack::stable_iterator EnclosingNormal, + EHScopeStack::stable_iterator EnclosingEH, + llvm::BasicBlock *NormalEntry, llvm::BasicBlock *NormalExit, + llvm::BasicBlock *EHEntry, llvm::BasicBlock *EHExit) + : EHScope(Cleanup), FixupDepth(FixupDepth), + EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH), + NormalEntry(NormalEntry), NormalExit(NormalExit), + EHEntry(EHEntry), EHExit(EHExit) { + assert((NormalEntry != 0) == (NormalExit != 0)); + assert((EHEntry != 0) == (EHExit != 0)); + } + + bool isNormalCleanup() const { return NormalEntry != 0; } + bool isEHCleanup() const { return EHEntry != 0; } + + llvm::BasicBlock *getNormalEntry() const { return NormalEntry; } + llvm::BasicBlock *getNormalExit() const { return NormalExit; } + llvm::BasicBlock *getEHEntry() const { return EHEntry; } + llvm::BasicBlock *getEHExit() const { return EHExit; } + unsigned getFixupDepth() const { return FixupDepth; } + EHScopeStack::stable_iterator getEnclosingNormalCleanup() const { + return EnclosingNormal; + } + EHScopeStack::stable_iterator getEnclosingEHCleanup() const { + return EnclosingEH; + } + + static bool classof(const EHScope *Scope) { + return Scope->getKind() == Cleanup; + } +}; + +/// An exceptions scope which filters exceptions thrown through it. +/// Only exceptions matching the filter types will be permitted to be +/// thrown. +/// +/// This is used to implement C++ exception specifications. +class EHFilterScope : public EHScope { + unsigned NumFilters : BitsRemaining; + + // Essentially ends in a flexible array member: + // llvm::Value *FilterTypes[0]; + + llvm::Value **getFilters() { + return reinterpret_cast<llvm::Value**>(this+1); + } + + llvm::Value * const *getFilters() const { + return reinterpret_cast<llvm::Value* const *>(this+1); + } + +public: + EHFilterScope(unsigned NumFilters) : + EHScope(Filter), NumFilters(NumFilters) {} + + static size_t getSizeForNumFilters(unsigned NumFilters) { + return sizeof(EHFilterScope) + NumFilters * sizeof(llvm::Value*); + } + + unsigned getNumFilters() const { return NumFilters; } + + void setFilter(unsigned I, llvm::Value *FilterValue) { + assert(I < getNumFilters()); + getFilters()[I] = FilterValue; + } + + llvm::Value *getFilter(unsigned I) const { + assert(I < getNumFilters()); + return getFilters()[I]; + } + + static bool classof(const EHScope *Scope) { + return Scope->getKind() == Filter; + } +}; + +/// An exceptions scope which calls std::terminate if any exception +/// reaches it. +class EHTerminateScope : public EHScope { +public: + EHTerminateScope() : EHScope(Terminate) {} + static size_t getSize() { return sizeof(EHTerminateScope); } + + static bool classof(const EHScope *Scope) { + return Scope->getKind() == Terminate; + } +}; + +/// A non-stable pointer into the scope stack. +class EHScopeStack::iterator { + char *Ptr; + + friend class EHScopeStack; + explicit iterator(char *Ptr) : Ptr(Ptr) {} + +public: + iterator() : Ptr(0) {} + + EHScope *get() const { + return reinterpret_cast<EHScope*>(Ptr); + } + + EHScope *operator->() const { return get(); } + EHScope &operator*() const { return *get(); } + + iterator &operator++() { + switch (get()->getKind()) { + case EHScope::Catch: + Ptr += EHCatchScope::getSizeForNumHandlers( + static_cast<const EHCatchScope*>(get())->getNumHandlers()); + break; + + case EHScope::Filter: + Ptr += EHFilterScope::getSizeForNumFilters( + static_cast<const EHFilterScope*>(get())->getNumFilters()); + break; + + case EHScope::LazyCleanup: + Ptr += static_cast<const EHLazyCleanupScope*>(get()) + ->getAllocatedSize(); + break; + + case EHScope::Cleanup: + Ptr += EHCleanupScope::getSize(); + break; + + case EHScope::Terminate: + Ptr += EHTerminateScope::getSize(); + break; + } + + return *this; + } + + iterator next() { + iterator copy = *this; + ++copy; + return copy; + } + + iterator operator++(int) { + iterator copy = *this; + operator++(); + return copy; + } + + bool operator==(iterator other) const { return Ptr == other.Ptr; } + bool operator!=(iterator other) const { return Ptr != other.Ptr; } +}; + +inline EHScopeStack::iterator EHScopeStack::begin() const { + return iterator(StartOfData); +} + +inline EHScopeStack::iterator EHScopeStack::end() const { + return iterator(EndOfBuffer); +} + +inline void EHScopeStack::popCatch() { + assert(!empty() && "popping exception stack when not empty"); + + assert(isa<EHCatchScope>(*begin())); + StartOfData += EHCatchScope::getSizeForNumHandlers( + cast<EHCatchScope>(*begin()).getNumHandlers()); + + assert(CatchDepth > 0 && "mismatched catch/terminate push/pop"); + CatchDepth--; +} + +inline void EHScopeStack::popTerminate() { + assert(!empty() && "popping exception stack when not empty"); + + assert(isa<EHTerminateScope>(*begin())); + StartOfData += EHTerminateScope::getSize(); + + assert(CatchDepth > 0 && "mismatched catch/terminate push/pop"); + CatchDepth--; +} + +inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const { + assert(sp.isValid() && "finding invalid savepoint"); + assert(sp.Size <= stable_begin().Size && "finding savepoint after pop"); + return iterator(EndOfBuffer - sp.Size); +} + +inline EHScopeStack::stable_iterator +EHScopeStack::stabilize(iterator ir) const { + assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer); + return stable_iterator(EndOfBuffer - ir.Ptr); +} + +} +} + +#endif diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp index d67618b..43bab9f 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp @@ -19,7 +19,7 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/DeclObjC.h" #include "llvm/Intrinsics.h" -#include "clang/CodeGen/CodeGenOptions.h" +#include "clang/Frontend/CodeGenOptions.h" #include "llvm/Target/TargetData.h" using namespace clang; using namespace CodeGen; @@ -44,8 +44,8 @@ void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var, Block->getInstList().insertAfter(&*AllocaInsertPt, Store); } -llvm::Value *CodeGenFunction::CreateIRTemp(QualType Ty, - const llvm::Twine &Name) { +llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty, + const llvm::Twine &Name) { llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name); // FIXME: Should we prefer the preferred type alignment here? CharUnits Align = getContext().getTypeAlignInChars(Ty); @@ -53,8 +53,8 @@ llvm::Value *CodeGenFunction::CreateIRTemp(QualType Ty, return Alloc; } -llvm::Value *CodeGenFunction::CreateMemTemp(QualType Ty, - const llvm::Twine &Name) { +llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty, + const llvm::Twine &Name) { llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name); // FIXME: Should we prefer the preferred type alignment here? CharUnits Align = getContext().getTypeAlignInChars(Ty); @@ -168,49 +168,62 @@ struct SubobjectAdjustment { } }; -RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, - bool IsInitializer) { - bool ShouldDestroyTemporaries = false; - unsigned OldNumLiveTemporaries = 0; +static llvm::Value * +CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type, + const NamedDecl *InitializedDecl) { + if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) { + if (VD->hasGlobalStorage()) { + llvm::SmallString<256> Name; + CGF.CGM.getMangleContext().mangleReferenceTemporary(VD, Name); + + const llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type); + + // Create the reference temporary. + llvm::GlobalValue *RefTemp = + new llvm::GlobalVariable(CGF.CGM.getModule(), + RefTempTy, /*isConstant=*/false, + llvm::GlobalValue::InternalLinkage, + llvm::Constant::getNullValue(RefTempTy), + Name.str()); + return RefTemp; + } + } + + return CGF.CreateMemTemp(Type, "ref.tmp"); +} +static llvm::Value * +EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E, + llvm::Value *&ReferenceTemporary, + const CXXDestructorDecl *&ReferenceTemporaryDtor, + const NamedDecl *InitializedDecl) { if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E)) E = DAE->getExpr(); - + if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) { - ShouldDestroyTemporaries = true; - - // Keep track of the current cleanup stack depth. - OldNumLiveTemporaries = LiveTemporaries.size(); - - E = TE->getSubExpr(); + CodeGenFunction::RunCleanupsScope Scope(CGF); + + return EmitExprForReferenceBinding(CGF, TE->getSubExpr(), + ReferenceTemporary, + ReferenceTemporaryDtor, + InitializedDecl); } - - RValue Val; - if (E->isLvalue(getContext()) == Expr::LV_Valid) { - // Emit the expr as an lvalue. - LValue LV = EmitLValue(E); - if (LV.isSimple()) { - if (ShouldDestroyTemporaries) { - // Pop temporaries. - while (LiveTemporaries.size() > OldNumLiveTemporaries) - PopCXXTemporary(); - } - - return RValue::get(LV.getAddress()); - } - - Val = EmitLoadOfLValue(LV, E->getType()); + + RValue RV; + if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid) { + // Emit the expression as an lvalue. + LValue LV = CGF.EmitLValue(E); + + if (LV.isSimple()) + return LV.getAddress(); - if (ShouldDestroyTemporaries) { - // Pop temporaries. - while (LiveTemporaries.size() > OldNumLiveTemporaries) - PopCXXTemporary(); - } + // We have to load the lvalue. + RV = CGF.EmitLoadOfLValue(LV, E->getType()); } else { QualType ResultTy = E->getType(); - + llvm::SmallVector<SubobjectAdjustment, 2> Adjustments; - do { + while (true) { if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { E = PE->getSubExpr(); continue; @@ -233,7 +246,7 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, continue; } } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { - if (ME->getBase()->isLvalue(getContext()) != Expr::LV_Valid && + if (ME->getBase()->isLvalue(CGF.getContext()) != Expr::LV_Valid && ME->getBase()->getType()->isRecordType()) { if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) { E = ME->getBase(); @@ -246,63 +259,46 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, // Nothing changed. break; - } while (true); - - Val = EmitAnyExprToTemp(E, /*IsAggLocVolatile=*/false, - IsInitializer); - - if (ShouldDestroyTemporaries) { - // Pop temporaries. - while (LiveTemporaries.size() > OldNumLiveTemporaries) - PopCXXTemporary(); - } + } - if (IsInitializer) { - // We might have to destroy the temporary variable. + // Create a reference temporary if necessary. + if (CGF.hasAggregateLLVMType(E->getType()) && + !E->getType()->isAnyComplexType()) + ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), + InitializedDecl); + + RV = CGF.EmitAnyExpr(E, ReferenceTemporary, /*IsAggLocVolatile=*/false, + /*IgnoreResult=*/false, InitializedDecl); + + if (InitializedDecl) { + // Get the destructor for the reference temporary. if (const RecordType *RT = E->getType()->getAs<RecordType>()) { - if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { - if (!ClassDecl->hasTrivialDestructor()) { - const CXXDestructorDecl *Dtor = - ClassDecl->getDestructor(getContext()); - - { - DelayedCleanupBlock Scope(*this); - EmitCXXDestructorCall(Dtor, Dtor_Complete, - /*ForVirtualBase=*/false, - Val.getAggregateAddr()); - - // Make sure to jump to the exit block. - EmitBranch(Scope.getCleanupExitBlock()); - } - if (Exceptions) { - EHCleanupBlock Cleanup(*this); - EmitCXXDestructorCall(Dtor, Dtor_Complete, - /*ForVirtualBase=*/false, - Val.getAggregateAddr()); - } - } - } + CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); + if (!ClassDecl->hasTrivialDestructor()) + ReferenceTemporaryDtor = ClassDecl->getDestructor(); } } - + // Check if need to perform derived-to-base casts and/or field accesses, to // get from the temporary object we created (and, potentially, for which we // extended the lifetime) to the subobject we're binding the reference to. if (!Adjustments.empty()) { - llvm::Value *Object = Val.getAggregateAddr(); + llvm::Value *Object = RV.getAggregateAddr(); for (unsigned I = Adjustments.size(); I != 0; --I) { SubobjectAdjustment &Adjustment = Adjustments[I-1]; switch (Adjustment.Kind) { case SubobjectAdjustment::DerivedToBaseAdjustment: - Object = GetAddressOfBaseClass(Object, - Adjustment.DerivedToBase.DerivedClass, - *Adjustment.DerivedToBase.BasePath, - /*NullCheckValue=*/false); + Object = + CGF.GetAddressOfBaseClass(Object, + Adjustment.DerivedToBase.DerivedClass, + *Adjustment.DerivedToBase.BasePath, + /*NullCheckValue=*/false); break; case SubobjectAdjustment::FieldAdjustment: { unsigned CVR = Adjustment.Field.CVRQualifiers; - LValue LV = EmitLValueForField(Object, Adjustment.Field.Field, CVR); + LValue LV = + CGF.EmitLValueForField(Object, Adjustment.Field.Field, CVR); if (LV.isSimple()) { Object = LV.getAddress(); break; @@ -312,36 +308,72 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, // the object we're binding to. QualType T = Adjustment.Field.Field->getType().getNonReferenceType() .getUnqualifiedType(); - Object = CreateTempAlloca(ConvertType(T), "lv"); - EmitStoreThroughLValue(EmitLoadOfLValue(LV, T), - LValue::MakeAddr(Object, - Qualifiers::fromCVRMask(CVR)), - T); + Object = CreateReferenceTemporary(CGF, T, InitializedDecl); + LValue TempLV = LValue::MakeAddr(Object, + Qualifiers::fromCVRMask(CVR)); + CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV, T), TempLV, T); break; } + } } - const llvm::Type *ResultPtrTy - = llvm::PointerType::get(ConvertType(ResultTy), 0); - Object = Builder.CreateBitCast(Object, ResultPtrTy, "temp"); - return RValue::get(Object); + const llvm::Type *ResultPtrTy = CGF.ConvertType(ResultTy)->getPointerTo(); + return CGF.Builder.CreateBitCast(Object, ResultPtrTy, "temp"); } } - if (Val.isAggregate()) { - Val = RValue::get(Val.getAggregateAddr()); - } else { - // Create a temporary variable that we can bind the reference to. - llvm::Value *Temp = CreateMemTemp(E->getType(), "reftmp"); - if (Val.isScalar()) - EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType()); - else - StoreComplexToAddr(Val.getComplexVal(), Temp, false); - Val = RValue::get(Temp); + if (RV.isAggregate()) + return RV.getAggregateAddr(); + + // Create a temporary variable that we can bind the reference to. + ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), + InitializedDecl); + + if (RV.isScalar()) + CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary, + /*Volatile=*/false, E->getType()); + else + CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary, + /*Volatile=*/false); + return ReferenceTemporary; +} + +RValue +CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, + const NamedDecl *InitializedDecl) { + llvm::Value *ReferenceTemporary = 0; + const CXXDestructorDecl *ReferenceTemporaryDtor = 0; + llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary, + ReferenceTemporaryDtor, + InitializedDecl); + + if (!ReferenceTemporaryDtor) + return RValue::get(Value); + + // Make sure to call the destructor for the reference temporary. + if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) { + if (VD->hasGlobalStorage()) { + llvm::Constant *DtorFn = + CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete); + CGF.EmitCXXGlobalDtorRegistration(DtorFn, + cast<llvm::Constant>(ReferenceTemporary)); + + return RValue::get(Value); + } + } + + CleanupBlock Cleanup(*this, NormalCleanup); + EmitCXXDestructorCall(ReferenceTemporaryDtor, Dtor_Complete, + /*ForVirtualBase=*/false, ReferenceTemporary); + + if (Exceptions) { + Cleanup.beginEHCleanup(); + EmitCXXDestructorCall(ReferenceTemporaryDtor, Dtor_Complete, + /*ForVirtualBase=*/false, ReferenceTemporary); } - return Val; + return RValue::get(Value); } @@ -359,118 +391,28 @@ void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) { if (!CatchUndefined) return; - const llvm::Type *Size_tTy - = llvm::IntegerType::get(VMContext, LLVMPointerWidth); Address = Builder.CreateBitCast(Address, PtrToInt8Ty); - llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &Size_tTy, 1); - const llvm::IntegerType *Int1Ty = llvm::IntegerType::get(VMContext, 1); + const llvm::Type *IntPtrT = IntPtrTy; + llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &IntPtrT, 1); + const llvm::IntegerType *Int1Ty = llvm::Type::getInt1Ty(VMContext); // In time, people may want to control this and use a 1 here. llvm::Value *Arg = llvm::ConstantInt::get(Int1Ty, 0); llvm::Value *C = Builder.CreateCall2(F, Address, Arg); llvm::BasicBlock *Cont = createBasicBlock(); llvm::BasicBlock *Check = createBasicBlock(); - llvm::Value *NegativeOne = llvm::ConstantInt::get(Size_tTy, -1ULL); + llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL); Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check); EmitBlock(Check); Builder.CreateCondBr(Builder.CreateICmpUGE(C, - llvm::ConstantInt::get(Size_tTy, Size)), + llvm::ConstantInt::get(IntPtrTy, Size)), Cont, getTrapBB()); EmitBlock(Cont); } -llvm::Value *CodeGenFunction:: -EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, - bool isInc, bool isPre) { - QualType ValTy = E->getSubExpr()->getType(); - llvm::Value *InVal = EmitLoadOfLValue(LV, ValTy).getScalarVal(); - - int AmountVal = isInc ? 1 : -1; - - if (ValTy->isPointerType() && - ValTy->getAs<PointerType>()->isVariableArrayType()) { - // The amount of the addition/subtraction needs to account for the VLA size - ErrorUnsupported(E, "VLA pointer inc/dec"); - } - - llvm::Value *NextVal; - if (const llvm::PointerType *PT = - dyn_cast<llvm::PointerType>(InVal->getType())) { - llvm::Constant *Inc = - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), AmountVal); - if (!isa<llvm::FunctionType>(PT->getElementType())) { - QualType PTEE = ValTy->getPointeeType(); - if (const ObjCObjectType *OIT = PTEE->getAs<ObjCObjectType>()) { - // Handle interface types, which are not represented with a concrete - // type. - int size = getContext().getTypeSize(OIT) / 8; - if (!isInc) - size = -size; - Inc = llvm::ConstantInt::get(Inc->getType(), size); - const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); - InVal = Builder.CreateBitCast(InVal, i8Ty); - NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr"); - llvm::Value *lhs = LV.getAddress(); - lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty)); - LV = LValue::MakeAddr(lhs, MakeQualifiers(ValTy)); - } else - NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec"); - } else { - const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); - NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp"); - NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec"); - NextVal = Builder.CreateBitCast(NextVal, InVal->getType()); - } - } else if (InVal->getType() == llvm::Type::getInt1Ty(VMContext) && isInc) { - // Bool++ is an interesting case, due to promotion rules, we get: - // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 -> - // Bool = ((int)Bool+1) != 0 - // An interesting aspect of this is that increment is always true. - // Decrement does not have this property. - NextVal = llvm::ConstantInt::getTrue(VMContext); - } else if (isa<llvm::IntegerType>(InVal->getType())) { - NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal); - - // Signed integer overflow is undefined behavior. - if (ValTy->isSignedIntegerType()) - NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec"); - else - NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec"); - } else { - // Add the inc/dec to the real part. - if (InVal->getType()->isFloatTy()) - NextVal = - llvm::ConstantFP::get(VMContext, - llvm::APFloat(static_cast<float>(AmountVal))); - else if (InVal->getType()->isDoubleTy()) - NextVal = - llvm::ConstantFP::get(VMContext, - llvm::APFloat(static_cast<double>(AmountVal))); - else { - llvm::APFloat F(static_cast<float>(AmountVal)); - bool ignored; - F.convert(Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero, - &ignored); - NextVal = llvm::ConstantFP::get(VMContext, F); - } - NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec"); - } - - // Store the updated result through the lvalue. - if (LV.isBitField()) - EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, &NextVal); - else - EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy); - - // If this is a postinc, return the value read from memory, otherwise use the - // updated value. - return isPre ? NextVal : InVal; -} - - CodeGenFunction::ComplexPairTy CodeGenFunction:: EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre) { @@ -568,6 +510,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) { switch (E->getStmtClass()) { default: return EmitUnsupportedLValue(E, "l-value expression"); + case Expr::ObjCSelectorExprClass: + return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); case Expr::ObjCIsaExprClass: return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); case Expr::BinaryOperatorClass: @@ -600,8 +544,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) { return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); case Expr::CXXExprWithTemporariesClass: return EmitCXXExprWithTemporariesLValue(cast<CXXExprWithTemporaries>(E)); - case Expr::CXXZeroInitValueExprClass: - return EmitNullInitializationLValue(cast<CXXZeroInitValueExpr>(E)); + case Expr::CXXScalarValueInitExprClass: + return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E)); case Expr::CXXDefaultArgExprClass: return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); case Expr::CXXTypeidExprClass: @@ -816,8 +760,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, const VectorType *ExprVT = ExprType->getAs<VectorType>(); if (!ExprVT) { unsigned InIdx = getAccessedFieldNo(0, Elts); - llvm::Value *Elt = llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), InIdx); + llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp")); } @@ -827,8 +770,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, llvm::SmallVector<llvm::Constant*, 4> Mask; for (unsigned i = 0; i != NumResultElts; ++i) { unsigned InIdx = getAccessedFieldNo(i, Elts); - Mask.push_back(llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), InIdx)); + Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx)); } llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); @@ -1044,8 +986,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts); for (unsigned i = 0; i != NumSrcElts; ++i) { unsigned InIdx = getAccessedFieldNo(i, Elts); - Mask[InIdx] = llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), i); + Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i); } llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); @@ -1058,7 +999,6 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, // FIXME: since we're shuffling with undef, can we just use the indices // into that? This could be simpler. llvm::SmallVector<llvm::Constant*, 4> ExtMask; - const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); unsigned i; for (i = 0; i != NumSrcElts; ++i) ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i)); @@ -1089,7 +1029,6 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, } else { // If the Src is a scalar (not a vector) it must be updating one element. unsigned InIdx = getAccessedFieldNo(0, Elts); - const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp"); } @@ -1401,6 +1340,22 @@ llvm::BasicBlock *CodeGenFunction::getTrapBB() { return TrapBB; } +/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an +/// array to pointer, return the array subexpression. +static const Expr *isSimpleArrayDecayOperand(const Expr *E) { + // If this isn't just an array->pointer decay, bail out. + const CastExpr *CE = dyn_cast<CastExpr>(E); + if (CE == 0 || CE->getCastKind() != CastExpr::CK_ArrayToPointerDecay) + return 0; + + // If this is a decay from variable width array, bail out. + const Expr *SubExpr = CE->getSubExpr(); + if (SubExpr->getType()->isVariableArrayType()) + return 0; + + return SubExpr; +} + LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { // The index must always be an integer, which is not an aggregate. Emit it. llvm::Value *Idx = EmitScalarExpr(E->getIdx()); @@ -1413,25 +1368,19 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { // Emit the vector as an lvalue to get its address. LValue LHS = EmitLValue(E->getBase()); assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); - Idx = Builder.CreateIntCast(Idx, - llvm::Type::getInt32Ty(VMContext), IdxSigned, "vidx"); + Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vidx"); return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType().getCVRQualifiers()); } - // The base must be a pointer, which is not an aggregate. Emit it. - llvm::Value *Base = EmitScalarExpr(E->getBase()); - // Extend or truncate the index type to 32 or 64-bits. - unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); - if (IdxBitwidth != LLVMPointerWidth) - Idx = Builder.CreateIntCast(Idx, - llvm::IntegerType::get(VMContext, LLVMPointerWidth), + if (!Idx->getType()->isIntegerTy(LLVMPointerWidth)) + Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); - + // FIXME: As llvm implements the object size checking, this can come out. if (CatchUndefined) { - if (const ImplicitCastExpr *ICE=dyn_cast<ImplicitCastExpr>(E->getBase())) { + if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) { if (ICE->getCastKind() == CastExpr::CK_ArrayToPointerDecay) { if (const ConstantArrayType *CAT @@ -1463,9 +1412,13 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { Idx = Builder.CreateUDiv(Idx, llvm::ConstantInt::get(Idx->getType(), BaseTypeSize.getQuantity())); + + // The base must be a pointer, which is not an aggregate. Emit it. + llvm::Value *Base = EmitScalarExpr(E->getBase()); + Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); - } else if (const ObjCObjectType *OIT = - E->getType()->getAs<ObjCObjectType>()) { + } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ + // Indexing over an interface, as in "NSString *P; P[4];" llvm::Value *InterfaceSize = llvm::ConstantInt::get(Idx->getType(), getContext().getTypeSizeInChars(OIT).getQuantity()); @@ -1473,10 +1426,27 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { Idx = Builder.CreateMul(Idx, InterfaceSize); const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext); + + // The base must be a pointer, which is not an aggregate. Emit it. + llvm::Value *Base = EmitScalarExpr(E->getBase()); Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy), Idx, "arrayidx"); Address = Builder.CreateBitCast(Address, Base->getType()); + } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { + // If this is A[i] where A is an array, the frontend will have decayed the + // base to be a ArrayToPointerDecay implicit cast. While correct, it is + // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a + // "gep x, i" here. Emit one "gep A, 0, i". + assert(Array->getType()->isArrayType() && + "Array to pointer decay must have array source type!"); + llvm::Value *ArrayPtr = EmitLValue(Array).getAddress(); + llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); + llvm::Value *Args[] = { Zero, Idx }; + + Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, Args+2, "arrayidx"); } else { + // The base must be a pointer, which is not an aggregate. Emit it. + llvm::Value *Base = EmitScalarExpr(E->getBase()); Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); } @@ -1501,17 +1471,15 @@ llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext, llvm::SmallVector<unsigned, 4> &Elts) { llvm::SmallVector<llvm::Constant*, 4> CElts; + const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); for (unsigned i = 0, e = Elts.size(); i != e; ++i) - CElts.push_back(llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), Elts[i])); + CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i])); return llvm::ConstantVector::get(&CElts[0], CElts.size()); } LValue CodeGenFunction:: EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { - const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); - // Emit the base vector as an l-value. LValue Base; @@ -1816,10 +1784,18 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { cast<CXXRecordDecl>(DerivedClassTy->getDecl()); LValue LV = EmitLValue(E->getSubExpr()); + llvm::Value *This; + if (LV.isPropertyRef()) { + RValue RV = EmitLoadOfPropertyRefLValue(LV, E->getSubExpr()->getType()); + assert (!RV.isScalar() && "EmitCastLValue"); + This = RV.getAggregateAddr(); + } + else + This = LV.getAddress(); // Perform the derived-to-base conversion llvm::Value *Base = - GetAddressOfBaseClass(LV.getAddress(), DerivedClassDecl, + GetAddressOfBaseClass(This, DerivedClassDecl, E->getBasePath(), /*NullCheckValue=*/false); return LValue::MakeAddr(Base, MakeQualifiers(E->getType())); @@ -1840,7 +1816,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { return LValue::MakeAddr(Derived, MakeQualifiers(E->getType())); } - case CastExpr::CK_BitCast: { + case CastExpr::CK_LValueBitCast: { // This must be a reinterpret_cast (or c-style equivalent). const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E); @@ -1853,7 +1829,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { } LValue CodeGenFunction::EmitNullInitializationLValue( - const CXXZeroInitValueExpr *E) { + const CXXScalarValueInitExpr *E) { QualType Ty = E->getType(); LValue LV = LValue::MakeAddr(CreateMemTemp(Ty), MakeQualifiers(Ty)); EmitNullInitialization(LV.getAddress(), Ty); @@ -1966,15 +1942,28 @@ CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { LValue CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { LValue LV = EmitLValue(E->getSubExpr()); - PushCXXTemporary(E->getTemporary(), LV.getAddress()); + EmitCXXTemporary(E->getTemporary(), LV.getAddress()); return LV; } LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { - // Can only get l-value for message expression returning aggregate type RValue RV = EmitObjCMessageExpr(E); - // FIXME: can this be volatile? - return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType())); + + if (!RV.isScalar()) + return LValue::MakeAddr(RV.getAggregateAddr(), + MakeQualifiers(E->getType())); + + assert(E->getMethodDecl()->getResultType()->isReferenceType() && + "Can't have a scalar return unless the return type is a " + "reference type!"); + + return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType())); +} + +LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { + llvm::Value *V = + CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true); + return LValue::MakeAddr(V, MakeQualifiers(E->getType())); } llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp index a4e64fb..219a5f9 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp @@ -127,7 +127,7 @@ public: void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); void VisitCXXConstructExpr(const CXXConstructExpr *E); void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E); - void VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E); + void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E); void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); } void VisitVAArgExpr(VAArgExpr *E); @@ -177,11 +177,16 @@ bool AggExprEmitter::TypeRequiresGCollection(QualType T) { /// directly into the return value slot. If GC does interfere, a final /// move will be performed. void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) { - if (!RequiresGCollection) return; - - CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, DestPtr, + if (RequiresGCollection) { + std::pair<uint64_t, unsigned> TypeInfo = + CGF.getContext().getTypeInfo(E->getType()); + unsigned long size = TypeInfo.first/8; + const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType()); + llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size); + CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, DestPtr, Src.getAggregateAddr(), - E->getType()); + SizeVal); + } } /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. @@ -198,9 +203,14 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) { } if (RequiresGCollection) { + std::pair<uint64_t, unsigned> TypeInfo = + CGF.getContext().getTypeInfo(E->getType()); + unsigned long size = TypeInfo.first/8; + const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType()); + llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size); CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, DestPtr, Src.getAggregateAddr(), - E->getType()); + SizeVal); return; } // If the result of the assignment is used, copy the LHS there also. @@ -297,6 +307,10 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { break; } + case CastExpr::CK_LValueBitCast: + llvm_unreachable("there are no lvalue bit-casts on aggregates"); + break; + case CastExpr::CK_BitCast: { // This must be a member function pointer cast. Visit(E->getSubExpr()); @@ -396,35 +410,11 @@ void AggExprEmitter::VisitUnaryAddrOf(const UnaryOperator *E) { const llvm::Type *PtrDiffTy = CGF.ConvertType(CGF.getContext().getPointerDiffType()); - llvm::Value *DstPtr = Builder.CreateStructGEP(DestPtr, 0, "dst.ptr"); - llvm::Value *FuncPtr; - - if (MD->isVirtual()) { - int64_t Index = CGF.CGM.getVTables().getMethodVTableIndex(MD); - - // FIXME: We shouldn't use / 8 here. - uint64_t PointerWidthInBytes = - CGF.CGM.getContext().Target.getPointerWidth(0) / 8; - - // Itanium C++ ABI 2.3: - // For a non-virtual function, this field is a simple function pointer. - // For a virtual function, it is 1 plus the virtual table offset - // (in bytes) of the function, represented as a ptrdiff_t. - FuncPtr = llvm::ConstantInt::get(PtrDiffTy, - (Index * PointerWidthInBytes) + 1); - } else { - const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); - const llvm::Type *Ty = - CGF.CGM.getTypes().GetFunctionType(CGF.CGM.getTypes().getFunctionInfo(MD), - FPT->isVariadic()); - llvm::Constant *Fn = CGF.CGM.GetAddrOfFunction(MD, Ty); - FuncPtr = llvm::ConstantExpr::getPtrToInt(Fn, PtrDiffTy); - } + llvm::Value *FuncPtr = CGF.CGM.GetCXXMemberFunctionPointerValue(MD); Builder.CreateStore(FuncPtr, DstPtr, VolatileDest); llvm::Value *AdjPtr = Builder.CreateStructGEP(DestPtr, 1, "dst.adj"); - // The adjustment will always be 0. Builder.CreateStore(llvm::ConstantInt::get(PtrDiffTy, 0), AdjPtr, VolatileDest); @@ -546,17 +536,15 @@ void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { // Don't make this a live temporary if we're emitting an initializer expr. if (!IsInitializer) - CGF.PushCXXTemporary(E->getTemporary(), Val); + CGF.EmitCXXTemporary(E->getTemporary(), Val); } void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { llvm::Value *Val = DestPtr; - if (!Val) { - // Create a temporary variable. + if (!Val) // Create a temporary variable. Val = CGF.CreateMemTemp(E->getType(), "tmp"); - } if (E->requiresZeroInitialization()) EmitNullInitializationToLValue(LValue::MakeAddr(Val, @@ -573,7 +561,7 @@ void AggExprEmitter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) { CGF.EmitCXXExprWithTemporaries(E, Val, VolatileDest, IsInitializer); } -void AggExprEmitter::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) { +void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { llvm::Value *Val = DestPtr; if (!Val) { @@ -602,7 +590,7 @@ AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) { if (isa<ImplicitValueInitExpr>(E)) { EmitNullInitializationToLValue(LV, T); } else if (T->isReferenceType()) { - RValue RV = CGF.EmitReferenceBindingToExpr(E, /*IsInitializer=*/false); + RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); CGF.EmitStoreThroughLValue(RV, LV, T); } else if (T->isAnyComplexType()) { CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false); @@ -822,18 +810,11 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, // equal, but other compilers do this optimization, and almost every memcpy // implementation handles this case safely. If there is a libc that does not // safely handle this, we can add a target hook. - const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext); - if (DestPtr->getType() != BP) - DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp"); - if (SrcPtr->getType() != BP) - SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp"); // Get size and alignment info for this aggregate. std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty); // FIXME: Handle variable sized types. - const llvm::Type *IntPtr = - llvm::IntegerType::get(VMContext, LLVMPointerWidth); // FIXME: If we have a volatile struct, the optimizer can remove what might // appear to be `extra' memory ops: @@ -847,25 +828,46 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, // // we need to use a different call here. We use isVolatile to indicate when // either the source or the destination is volatile. - const llvm::Type *I1Ty = llvm::Type::getInt1Ty(VMContext); - const llvm::Type *I8Ty = llvm::Type::getInt8Ty(VMContext); - const llvm::Type *I32Ty = llvm::Type::getInt32Ty(VMContext); const llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); - const llvm::Type *DBP = llvm::PointerType::get(I8Ty, DPT->getAddressSpace()); - if (DestPtr->getType() != DBP) - DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp"); + const llvm::Type *DBP = + llvm::Type::getInt8PtrTy(VMContext, DPT->getAddressSpace()); + DestPtr = Builder.CreateBitCast(DestPtr, DBP, "tmp"); const llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); - const llvm::Type *SBP = llvm::PointerType::get(I8Ty, SPT->getAddressSpace()); - if (SrcPtr->getType() != SBP) - SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp"); - + const llvm::Type *SBP = + llvm::Type::getInt8PtrTy(VMContext, SPT->getAddressSpace()); + SrcPtr = Builder.CreateBitCast(SrcPtr, SBP, "tmp"); + + if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { + RecordDecl *Record = RecordTy->getDecl(); + if (Record->hasObjectMember()) { + unsigned long size = TypeInfo.first/8; + const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); + llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size); + CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, + SizeVal); + return; + } + } else if (getContext().getAsArrayType(Ty)) { + QualType BaseType = getContext().getBaseElementType(Ty); + if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) { + if (RecordTy->getDecl()->hasObjectMember()) { + unsigned long size = TypeInfo.first/8; + const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); + llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size); + CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, + SizeVal); + return; + } + } + } + Builder.CreateCall5(CGM.getMemCpyFn(DestPtr->getType(), SrcPtr->getType(), - IntPtr), + IntPtrTy), DestPtr, SrcPtr, // TypeInfo.first describes size in bits. - llvm::ConstantInt::get(IntPtr, TypeInfo.first/8), - llvm::ConstantInt::get(I32Ty, TypeInfo.second/8), - llvm::ConstantInt::get(I1Ty, isVolatile)); + llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8), + Builder.getInt32(TypeInfo.second/8), + Builder.getInt1(isVolatile)); } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp index f93c79c..69e5f0e 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp @@ -275,10 +275,7 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress(); QualType Ty = E->getType(); - if (ClassDecl->hasObjectMember()) - CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, This, Src, Ty); - else - EmitAggregateCopy(This, Src, Ty); + EmitAggregateCopy(This, Src, Ty); return RValue::get(This); } } @@ -484,6 +481,79 @@ static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context, return V; } +static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const CXXNewExpr *E, + llvm::Value *NewPtr) { + + assert(E->getNumConstructorArgs() == 1 && + "Can only have one argument to initializer of POD type."); + + const Expr *Init = E->getConstructorArg(0); + QualType AllocType = E->getAllocatedType(); + + if (!CGF.hasAggregateLLVMType(AllocType)) + CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr, + AllocType.isVolatileQualified(), AllocType); + else if (AllocType->isAnyComplexType()) + CGF.EmitComplexExprIntoAddr(Init, NewPtr, + AllocType.isVolatileQualified()); + else + CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified()); +} + +void +CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E, + llvm::Value *NewPtr, + llvm::Value *NumElements) { + // We have a POD type. + if (E->getNumConstructorArgs() == 0) + return; + + const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); + + // Create a temporary for the loop index and initialize it with 0. + llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index"); + llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); + Builder.CreateStore(Zero, IndexPtr); + + // Start the loop with a block that tests the condition. + llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); + llvm::BasicBlock *AfterFor = createBasicBlock("for.end"); + + EmitBlock(CondBlock); + + llvm::BasicBlock *ForBody = createBasicBlock("for.body"); + + // Generate: if (loop-index < number-of-elements fall to the loop body, + // otherwise, go to the block after the for-loop. + llvm::Value *Counter = Builder.CreateLoad(IndexPtr); + llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless"); + // If the condition is true, execute the body. + Builder.CreateCondBr(IsLess, ForBody, AfterFor); + + EmitBlock(ForBody); + + llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc"); + // Inside the loop body, emit the constructor call on the array element. + Counter = Builder.CreateLoad(IndexPtr); + llvm::Value *Address = Builder.CreateInBoundsGEP(NewPtr, Counter, + "arrayidx"); + StoreAnyExprIntoOneUnit(*this, E, Address); + + EmitBlock(ContinueBlock); + + // Emit the increment of the loop counter. + llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1); + Counter = Builder.CreateLoad(IndexPtr); + NextVal = Builder.CreateAdd(Counter, NextVal, "inc"); + Builder.CreateStore(NextVal, IndexPtr); + + // Finally, branch back up to the condition for the next iteration. + EmitBranch(CondBlock); + + // Emit the fall-through block. + EmitBlock(AfterFor, true); +} + static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, llvm::Value *NewPtr, llvm::Value *NumElements) { @@ -495,35 +565,32 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, E->constructor_arg_end()); return; } + else { + CGF.EmitNewArrayInitializer(E, NewPtr, NumElements); + return; + } } - - QualType AllocType = E->getAllocatedType(); if (CXXConstructorDecl *Ctor = E->getConstructor()) { + // Per C++ [expr.new]p15, if we have an initializer, then we're performing + // direct initialization. C++ [dcl.init]p5 requires that we + // zero-initialize storage if there are no user-declared constructors. + if (E->hasInitializer() && + !Ctor->getParent()->hasUserDeclaredConstructor() && + !Ctor->getParent()->isEmpty()) + CGF.EmitNullInitialization(NewPtr, E->getAllocatedType()); + CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false, NewPtr, E->constructor_arg_begin(), E->constructor_arg_end()); return; } - // We have a POD type. if (E->getNumConstructorArgs() == 0) return; - - assert(E->getNumConstructorArgs() == 1 && - "Can only have one argument to initializer of POD type."); - - const Expr *Init = E->getConstructorArg(0); - - if (!CGF.hasAggregateLLVMType(AllocType)) - CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr, - AllocType.isVolatileQualified(), AllocType); - else if (AllocType->isAnyComplexType()) - CGF.EmitComplexExprIntoAddr(Init, NewPtr, - AllocType.isVolatileQualified()); - else - CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified()); + + StoreAnyExprIntoOneUnit(CGF, E, NewPtr); } llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { @@ -770,7 +837,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) { if (const RecordType *RT = DeleteTy->getAs<RecordType>()) { if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) { if (!RD->hasTrivialDestructor()) { - const CXXDestructorDecl *Dtor = RD->getDestructor(getContext()); + const CXXDestructorDecl *Dtor = RD->getDestructor(); if (E->isArrayForm()) { llvm::Value *AllocatedObjectPtr; llvm::Value *NumElements; diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp index 0a0c914..0927319 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp @@ -131,14 +131,14 @@ public: // FIXME: CompoundLiteralExpr - ComplexPairTy EmitCast(Expr *Op, QualType DestTy); + ComplexPairTy EmitCast(CastExpr::CastKind CK, Expr *Op, QualType DestTy); ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) { // Unlike for scalars, we don't have to worry about function->ptr demotion // here. - return EmitCast(E->getSubExpr(), E->getType()); + return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType()); } ComplexPairTy VisitCastExpr(CastExpr *E) { - return EmitCast(E->getSubExpr(), E->getType()); + return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType()); } ComplexPairTy VisitCallExpr(const CallExpr *E); ComplexPairTy VisitStmtExpr(const StmtExpr *E); @@ -181,7 +181,7 @@ public: ComplexPairTy VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) { return CGF.EmitCXXExprWithTemporaries(E).getComplexVal(); } - ComplexPairTy VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) { + ComplexPairTy VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { assert(E->getType()->isAnyComplexType() && "Expected complex type!"); QualType Elem = E->getType()->getAs<ComplexType>()->getElementType(); llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem)); @@ -339,11 +339,22 @@ ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val, return Val; } -ComplexPairTy ComplexExprEmitter::EmitCast(Expr *Op, QualType DestTy) { +ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op, + QualType DestTy) { // Two cases here: cast from (complex to complex) and (scalar to complex). if (Op->getType()->isAnyComplexType()) return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy); + // FIXME: We should be looking at all of the cast kinds here, not + // cherry-picking the ones we have test cases for. + if (CK == CastExpr::CK_LValueBitCast) { + llvm::Value *V = CGF.EmitLValue(Op).getAddress(); + V = Builder.CreateBitCast(V, + CGF.ConvertType(CGF.getContext().getPointerType(DestTy))); + // FIXME: Are the qualifiers correct here? + return EmitLoadOfComplex(V, DestTy.isVolatileQualified()); + } + // C99 6.3.1.7: When a value of real type is converted to a complex type, the // real part of the complex result value is determined by the rules of // conversion to the corresponding real type and the imaginary part of the @@ -521,22 +532,22 @@ EmitCompoundAssign(const CompoundAssignOperator *E, // improve codegen a little. It is possible for the RHS to be complex or // scalar. OpInfo.Ty = E->getComputationResultType(); - OpInfo.RHS = EmitCast(E->getRHS(), OpInfo.Ty); + OpInfo.RHS = EmitCast(CastExpr::CK_Unknown, E->getRHS(), OpInfo.Ty); - LValue LHSLV = CGF.EmitLValue(E->getLHS()); + LValue LHS = CGF.EmitLValue(E->getLHS()); // We know the LHS is a complex lvalue. ComplexPairTy LHSComplexPair; - if (LHSLV.isPropertyRef()) - LHSComplexPair = - CGF.EmitObjCPropertyGet(LHSLV.getPropertyRefExpr()).getComplexVal(); - else if (LHSLV.isKVCRef()) - LHSComplexPair = - CGF.EmitObjCPropertyGet(LHSLV.getKVCRefExpr()).getComplexVal(); + if (LHS.isPropertyRef()) + LHSComplexPair = + CGF.EmitObjCPropertyGet(LHS.getPropertyRefExpr()).getComplexVal(); + else if (LHS.isKVCRef()) + LHSComplexPair = + CGF.EmitObjCPropertyGet(LHS.getKVCRefExpr()).getComplexVal(); else - LHSComplexPair = EmitLoadOfComplex(LHSLV.getAddress(), - LHSLV.isVolatileQualified()); + LHSComplexPair = EmitLoadOfComplex(LHS.getAddress(), + LHS.isVolatileQualified()); - OpInfo.LHS=EmitComplexToComplexCast(LHSComplexPair, LHSTy, OpInfo.Ty); + OpInfo.LHS = EmitComplexToComplexCast(LHSComplexPair, LHSTy, OpInfo.Ty); // Expand the binary operator. ComplexPairTy Result = (this->*Func)(OpInfo); @@ -545,23 +556,26 @@ EmitCompoundAssign(const CompoundAssignOperator *E, Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy); // Store the result value into the LHS lvalue. - if (LHSLV.isPropertyRef()) - CGF.EmitObjCPropertySet(LHSLV.getPropertyRefExpr(), + if (LHS.isPropertyRef()) + CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(), RValue::getComplex(Result)); - else if (LHSLV.isKVCRef()) - CGF.EmitObjCPropertySet(LHSLV.getKVCRefExpr(), RValue::getComplex(Result)); + else if (LHS.isKVCRef()) + CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), RValue::getComplex(Result)); else - EmitStoreOfComplex(Result, LHSLV.getAddress(), LHSLV.isVolatileQualified()); - // And now return the LHS + EmitStoreOfComplex(Result, LHS.getAddress(), LHS.isVolatileQualified()); + + // Restore the Ignore* flags. IgnoreReal = ignreal; IgnoreImag = ignimag; IgnoreRealAssign = ignreal; IgnoreImagAssign = ignimag; - if (LHSLV.isPropertyRef()) - return CGF.EmitObjCPropertyGet(LHSLV.getPropertyRefExpr()).getComplexVal(); - else if (LHSLV.isKVCRef()) - return CGF.EmitObjCPropertyGet(LHSLV.getKVCRefExpr()).getComplexVal(); - return EmitLoadOfComplex(LHSLV.getAddress(), LHSLV.isVolatileQualified()); + + // Objective-C property assignment never reloads the value following a store. + if (LHS.isPropertyRef() || LHS.isKVCRef()) + return Result; + + // Otherwise, reload the value. + return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified()); } ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) { @@ -569,8 +583,8 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) { TestAndClearIgnoreImag(); bool ignreal = TestAndClearIgnoreRealAssign(); bool ignimag = TestAndClearIgnoreImagAssign(); - assert(CGF.getContext().getCanonicalType(E->getLHS()->getType()) == - CGF.getContext().getCanonicalType(E->getRHS()->getType()) && + assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), + E->getRHS()->getType()) && "Invalid assignment"); // Emit the RHS. ComplexPairTy Val = Visit(E->getRHS()); @@ -578,31 +592,26 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) { // Compute the address to store into. LValue LHS = CGF.EmitLValue(E->getLHS()); - // Store into it, if simple. - if (LHS.isSimple()) { - EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified()); - - // And now return the LHS - IgnoreReal = ignreal; - IgnoreImag = ignimag; - IgnoreRealAssign = ignreal; - IgnoreImagAssign = ignimag; - return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified()); - } - - // Otherwise we must have a property setter (no complex vector/bitfields). + // Store the result value into the LHS lvalue. if (LHS.isPropertyRef()) CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(), RValue::getComplex(Val)); - else + else if (LHS.isKVCRef()) CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), RValue::getComplex(Val)); + else + EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified()); - // There is no reload after a store through a method, but we need to restore - // the Ignore* flags. + // Restore the Ignore* flags. IgnoreReal = ignreal; IgnoreImag = ignimag; IgnoreRealAssign = ignreal; IgnoreImagAssign = ignimag; - return Val; + + // Objective-C property assignment never reloads the value following a store. + if (LHS.isPropertyRef() || LHS.isKVCRef()) + return Val; + + // Otherwise, reload the value. + return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified()); } ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) { diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp index 551a47a..bbd256c 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp @@ -52,8 +52,8 @@ private: bool AppendField(const FieldDecl *Field, uint64_t FieldOffset, llvm::Constant *InitExpr); - bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset, - llvm::Constant *InitExpr); + void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset, + llvm::ConstantInt *InitExpr); void AppendPadding(uint64_t NumBytes); @@ -123,14 +123,9 @@ AppendField(const FieldDecl *Field, uint64_t FieldOffset, return true; } -bool ConstStructBuilder:: - AppendBitField(const FieldDecl *Field, uint64_t FieldOffset, - llvm::Constant *InitCst) { - llvm::ConstantInt *CI = cast_or_null<llvm::ConstantInt>(InitCst); - // FIXME: Can this ever happen? - if (!CI) - return false; - +void ConstStructBuilder::AppendBitField(const FieldDecl *Field, + uint64_t FieldOffset, + llvm::ConstantInt *CI) { if (FieldOffset > NextFieldOffsetInBytes * 8) { // We need to add padding. uint64_t NumBytes = @@ -195,16 +190,43 @@ bool ConstStructBuilder:: Tmp = Tmp.shl(8 - BitsInPreviousByte); } - // Or in the bits that go into the previous byte. - if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(Elements.back())) + // 'or' in the bits that go into the previous byte. + llvm::Value *LastElt = Elements.back(); + if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt)) Tmp |= Val->getValue(); - else - assert(isa<llvm::UndefValue>(Elements.back())); + else { + assert(isa<llvm::UndefValue>(LastElt)); + // If there is an undef field that we're adding to, it can either be a + // scalar undef (in which case, we just replace it with our field) or it + // is an array. If it is an array, we have to pull one byte off the + // array so that the other undef bytes stay around. + if (!isa<llvm::IntegerType>(LastElt->getType())) { + // The undef padding will be a multibyte array, create a new smaller + // padding and then an hole for our i8 to get plopped into. + assert(isa<llvm::ArrayType>(LastElt->getType()) && + "Expected array padding of undefs"); + const llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType()); + assert(AT->getElementType()->isIntegerTy(8) && + AT->getNumElements() != 0 && + "Expected non-empty array padding of undefs"); + + // Remove the padding array. + NextFieldOffsetInBytes -= AT->getNumElements(); + Elements.pop_back(); + + // Add the padding back in two chunks. + AppendPadding(AT->getNumElements()-1); + AppendPadding(1); + assert(isa<llvm::UndefValue>(Elements.back()) && + Elements.back()->getType()->isIntegerTy(8) && + "Padding addition didn't work right"); + } + } Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp); if (FitsCompletelyInPreviousByte) - return true; + return; } while (FieldValue.getBitWidth() > 8) { @@ -248,7 +270,6 @@ bool ConstStructBuilder:: Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), FieldValue)); NextFieldOffsetInBytes++; - return true; } void ConstStructBuilder::AppendPadding(uint64_t NumBytes) { @@ -346,8 +367,8 @@ bool ConstStructBuilder::Build(InitListExpr *ILE) { return false; } else { // Otherwise we have a bitfield. - if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo), EltInit)) - return false; + AppendBitField(*Field, Layout.getFieldOffset(FieldNo), + cast<llvm::ConstantInt>(EltInit)); } } @@ -443,30 +464,8 @@ public: CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType()); llvm::Constant *Values[2]; - - // Get the function pointer (or index if this is a virtual function). - if (MD->isVirtual()) { - uint64_t Index = CGM.getVTables().getMethodVTableIndex(MD); - // FIXME: We shouldn't use / 8 here. - uint64_t PointerWidthInBytes = - CGM.getContext().Target.getPointerWidth(0) / 8; - - // Itanium C++ ABI 2.3: - // For a non-virtual function, this field is a simple function pointer. - // For a virtual function, it is 1 plus the virtual table offset - // (in bytes) of the function, represented as a ptrdiff_t. - Values[0] = llvm::ConstantInt::get(PtrDiffTy, - (Index * PointerWidthInBytes) + 1); - } else { - const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); - const llvm::Type *Ty = - CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD), - FPT->isVariadic()); - - llvm::Constant *FuncPtr = CGM.GetAddrOfFunction(MD, Ty); - Values[0] = llvm::ConstantExpr::getPtrToInt(FuncPtr, PtrDiffTy); - } + Values[0] = CGM.GetCXXMemberFunctionPointerValue(MD); // The adjustment will always be 0. Values[1] = llvm::ConstantInt::get(PtrDiffTy, 0); @@ -930,7 +929,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E, llvm::Constant *C = llvm::ConstantInt::get(VMContext, Result.Val.getInt()); - if (C->getType() == llvm::Type::getInt1Ty(VMContext)) { + if (C->getType()->isIntegerTy(1)) { const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType()); C = llvm::ConstantExpr::getZExt(C, BoolTy); } @@ -977,7 +976,7 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E, } llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E)); - if (C && C->getType() == llvm::Type::getInt1Ty(VMContext)) { + if (C && C->getType()->isIntegerTy(1)) { const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType()); C = llvm::ConstantExpr::getZExt(C, BoolTy); } @@ -1009,7 +1008,11 @@ FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T, // Go through all bases and fill in any null pointer to data members. for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), E = RD->bases_end(); I != E; ++I) { - assert(!I->isVirtual() && "Should not see virtual bases here!"); + if (I->isVirtual()) { + // FIXME: We should initialize null pointer to data members in virtual + // bases here. + continue; + } const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); @@ -1088,7 +1091,11 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) { // Go through all bases and fill in any null pointer to data members. for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), E = RD->bases_end(); I != E; ++I) { - assert(!I->isVirtual() && "Should not see virtual bases here!"); + if (I->isVirtual()) { + // FIXME: We should initialize null pointer to data members in virtual + // bases here. + continue; + } const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); @@ -1131,6 +1138,11 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) { for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end(); I != E; ++I) { const FieldDecl *FD = *I; + + // Ignore bit fields. + if (FD->isBitField()) + continue; + unsigned FieldNo = Layout.getLLVMFieldNo(FD); Elements[FieldNo] = EmitNullConstant(FD->getType()); } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp index 2108414..ef38209 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp @@ -40,7 +40,8 @@ struct BinOpInfo { Value *LHS; Value *RHS; QualType Ty; // Computation Type. - const BinaryOperator *E; + BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform + const Expr *E; // Entire expr, for error unsupported. May not be binop. }; namespace { @@ -125,7 +126,7 @@ public: Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); } - Value *VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) { + Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { return EmitNullValue(E->getType()); } Value *VisitGNUNullExpr(const GNUNullExpr *E) { @@ -212,22 +213,27 @@ public: Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E); // Unary Operators. - Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre) { - LValue LV = EmitLValue(E->getSubExpr()); - return CGF.EmitScalarPrePostIncDec(E, LV, isInc, isPre); - } Value *VisitUnaryPostDec(const UnaryOperator *E) { - return VisitPrePostIncDec(E, false, false); + LValue LV = EmitLValue(E->getSubExpr()); + return EmitScalarPrePostIncDec(E, LV, false, false); } Value *VisitUnaryPostInc(const UnaryOperator *E) { - return VisitPrePostIncDec(E, true, false); + LValue LV = EmitLValue(E->getSubExpr()); + return EmitScalarPrePostIncDec(E, LV, true, false); } Value *VisitUnaryPreDec(const UnaryOperator *E) { - return VisitPrePostIncDec(E, false, true); + LValue LV = EmitLValue(E->getSubExpr()); + return EmitScalarPrePostIncDec(E, LV, false, true); } Value *VisitUnaryPreInc(const UnaryOperator *E) { - return VisitPrePostIncDec(E, true, true); + LValue LV = EmitLValue(E->getSubExpr()); + return EmitScalarPrePostIncDec(E, LV, true, true); } + + llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, + bool isInc, bool isPre); + + Value *VisitUnaryAddrOf(const UnaryOperator *E) { return EmitLValue(E->getSubExpr()).getAddress(); } @@ -291,9 +297,17 @@ public: // Binary Operators. Value *EmitMul(const BinOpInfo &Ops) { - if (CGF.getContext().getLangOptions().OverflowChecking - && Ops.Ty->isSignedIntegerType()) - return EmitOverflowCheckedBinOp(Ops); + if (Ops.Ty->isSignedIntegerType()) { + switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) { + case LangOptions::SOB_Undefined: + return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); + case LangOptions::SOB_Defined: + return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); + case LangOptions::SOB_Trapping: + return EmitOverflowCheckedBinOp(Ops); + } + } + if (Ops.LHS->getType()->isFPOrFPVectorTy()) return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul"); return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); @@ -320,7 +334,7 @@ public: BinOpInfo EmitBinOps(const BinaryOperator *E); LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E, Value *(ScalarExprEmitter::*F)(const BinOpInfo &), - Value *&BitFieldResult); + Value *&Result); Value *EmitCompoundAssign(const CompoundAssignOperator *E, Value *(ScalarExprEmitter::*F)(const BinOpInfo &)); @@ -435,8 +449,6 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, if (DstType->isVoidType()) return 0; - llvm::LLVMContext &VMContext = CGF.getLLVMContext(); - // Handle conversions to bool first, they are special: comparisons against 0. if (DstType->isBooleanType()) return EmitConversionToBool(Src, SrcType); @@ -458,8 +470,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); // First, convert to the correct width so that we control the kind of // extension. - const llvm::Type *MiddleTy = - llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth); + const llvm::Type *MiddleTy = CGF.IntPtrTy; bool InputSigned = SrcType->isSignedIntegerType(); llvm::Value* IntResult = Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); @@ -481,16 +492,14 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, // Insert the element in element zero of an undef vector llvm::Value *UnV = llvm::UndefValue::get(DstTy); - llvm::Value *Idx = - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); + llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0); UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp"); // Splat the element across to all elements llvm::SmallVector<llvm::Constant*, 16> Args; unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); for (unsigned i = 0; i < NumElements; i++) - Args.push_back(llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), 0)); + Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0)); llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements); llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat"); @@ -578,12 +587,104 @@ Value *ScalarExprEmitter::VisitExpr(Expr *E) { } Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { - llvm::SmallVector<llvm::Constant*, 32> indices; - for (unsigned i = 2; i < E->getNumSubExprs(); i++) { - indices.push_back(cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i)))); + // Vector Mask Case + if (E->getNumSubExprs() == 2 || + (E->getNumSubExprs() == 3 && E->getExpr(2)->getType()->isVectorType())) { + Value *LHS = CGF.EmitScalarExpr(E->getExpr(0)); + Value *RHS = CGF.EmitScalarExpr(E->getExpr(1)); + Value *Mask; + + const llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType()); + unsigned LHSElts = LTy->getNumElements(); + + if (E->getNumSubExprs() == 3) { + Mask = CGF.EmitScalarExpr(E->getExpr(2)); + + // Shuffle LHS & RHS into one input vector. + llvm::SmallVector<llvm::Constant*, 32> concat; + for (unsigned i = 0; i != LHSElts; ++i) { + concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i)); + concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i+1)); + } + + Value* CV = llvm::ConstantVector::get(concat.begin(), concat.size()); + LHS = Builder.CreateShuffleVector(LHS, RHS, CV, "concat"); + LHSElts *= 2; + } else { + Mask = RHS; + } + + const llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType()); + llvm::Constant* EltMask; + + // Treat vec3 like vec4. + if ((LHSElts == 6) && (E->getNumSubExprs() == 3)) + EltMask = llvm::ConstantInt::get(MTy->getElementType(), + (1 << llvm::Log2_32(LHSElts+2))-1); + else if ((LHSElts == 3) && (E->getNumSubExprs() == 2)) + EltMask = llvm::ConstantInt::get(MTy->getElementType(), + (1 << llvm::Log2_32(LHSElts+1))-1); + else + EltMask = llvm::ConstantInt::get(MTy->getElementType(), + (1 << llvm::Log2_32(LHSElts))-1); + + // Mask off the high bits of each shuffle index. + llvm::SmallVector<llvm::Constant *, 32> MaskV; + for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) + MaskV.push_back(EltMask); + + Value* MaskBits = llvm::ConstantVector::get(MaskV.begin(), MaskV.size()); + Mask = Builder.CreateAnd(Mask, MaskBits, "mask"); + + // newv = undef + // mask = mask & maskbits + // for each elt + // n = extract mask i + // x = extract val n + // newv = insert newv, x, i + const llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(), + MTy->getNumElements()); + Value* NewV = llvm::UndefValue::get(RTy); + for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) { + Value *Indx = llvm::ConstantInt::get(CGF.Int32Ty, i); + Indx = Builder.CreateExtractElement(Mask, Indx, "shuf_idx"); + Indx = Builder.CreateZExt(Indx, CGF.Int32Ty, "idx_zext"); + + // Handle vec3 special since the index will be off by one for the RHS. + if ((LHSElts == 6) && (E->getNumSubExprs() == 3)) { + Value *cmpIndx, *newIndx; + cmpIndx = Builder.CreateICmpUGT(Indx, + llvm::ConstantInt::get(CGF.Int32Ty, 3), + "cmp_shuf_idx"); + newIndx = Builder.CreateSub(Indx, llvm::ConstantInt::get(CGF.Int32Ty,1), + "shuf_idx_adj"); + Indx = Builder.CreateSelect(cmpIndx, newIndx, Indx, "sel_shuf_idx"); + } + Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt"); + NewV = Builder.CreateInsertElement(NewV, VExt, Indx, "shuf_ins"); + } + return NewV; } + Value* V1 = CGF.EmitScalarExpr(E->getExpr(0)); Value* V2 = CGF.EmitScalarExpr(E->getExpr(1)); + + // Handle vec3 special since the index will be off by one for the RHS. + llvm::SmallVector<llvm::Constant*, 32> indices; + for (unsigned i = 2; i < E->getNumSubExprs(); i++) { + llvm::Constant *C = cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i))); + const llvm::VectorType *VTy = cast<llvm::VectorType>(V1->getType()); + if (VTy->getNumElements() == 3) { + if (llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C)) { + uint64_t cVal = CI->getZExtValue(); + if (cVal > 3) { + C = llvm::ConstantInt::get(C->getType(), cVal-1); + } + } + } + indices.push_back(C); + } + Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size()); return Builder.CreateShuffleVector(V1, V2, SV, "shuffle"); } @@ -614,10 +715,7 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { Value *Base = Visit(E->getBase()); Value *Idx = Visit(E->getIdx()); bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType(); - Idx = Builder.CreateIntCast(Idx, - llvm::Type::getInt32Ty(CGF.getLLVMContext()), - IdxSigned, - "vecidxcast"); + Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vecidxcast"); return Builder.CreateExtractElement(Base, Idx, "vecext"); } @@ -646,7 +744,6 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { return Visit(E->getInit(0)); unsigned ResElts = VType->getNumElements(); - const llvm::Type *I32Ty = llvm::Type::getInt32Ty(CGF.getLLVMContext()); // Loop over initializers collecting the Value for each, and remembering // whether the source was swizzle (ExtVectorElementExpr). This will allow @@ -677,7 +774,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { // insert into undef -> shuffle (src, undef) Args.push_back(C); for (unsigned j = 1; j != ResElts; ++j) - Args.push_back(llvm::UndefValue::get(I32Ty)); + Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); LHS = EI->getVectorOperand(); RHS = V; @@ -686,11 +783,11 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { // insert into undefshuffle && size match -> shuffle (v, src) llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V); for (unsigned j = 0; j != CurIdx; ++j) - Args.push_back(getMaskElt(SVV, j, 0, I32Ty)); - Args.push_back(llvm::ConstantInt::get(I32Ty, + Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty)); + Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, ResElts + C->getZExtValue())); for (unsigned j = CurIdx + 1; j != ResElts; ++j) - Args.push_back(llvm::UndefValue::get(I32Ty)); + Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); RHS = EI->getVectorOperand(); @@ -704,7 +801,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { } } } - Value *Idx = llvm::ConstantInt::get(I32Ty, CurIdx); + Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, CurIdx); V = Builder.CreateInsertElement(V, Init, Idx, "vecinit"); VIsUndefShuffle = false; ++CurIdx; @@ -728,15 +825,15 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { // this shuffle directly into it. if (VIsUndefShuffle) { Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0, - I32Ty)); + CGF.Int32Ty)); } else { - Args.push_back(llvm::ConstantInt::get(I32Ty, j)); + Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j)); } } for (unsigned j = 0, je = InitElts; j != je; ++j) - Args.push_back(getMaskElt(SVI, j, Offset, I32Ty)); + Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty)); for (unsigned j = CurIdx + InitElts; j != ResElts; ++j) - Args.push_back(llvm::UndefValue::get(I32Ty)); + Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); if (VIsUndefShuffle) V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); @@ -749,20 +846,20 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { // to the vector initializer into V. if (Args.empty()) { for (unsigned j = 0; j != InitElts; ++j) - Args.push_back(llvm::ConstantInt::get(I32Ty, j)); + Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j)); for (unsigned j = InitElts; j != ResElts; ++j) - Args.push_back(llvm::UndefValue::get(I32Ty)); + Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts); Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT), Mask, "vext"); Args.clear(); for (unsigned j = 0; j != CurIdx; ++j) - Args.push_back(llvm::ConstantInt::get(I32Ty, j)); + Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j)); for (unsigned j = 0; j != InitElts; ++j) - Args.push_back(llvm::ConstantInt::get(I32Ty, j+Offset)); + Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j+Offset)); for (unsigned j = CurIdx + InitElts; j != ResElts; ++j) - Args.push_back(llvm::UndefValue::get(I32Ty)); + Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); } // If V is undef, make sure it ends up on the RHS of the shuffle to aid @@ -781,7 +878,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { // Emit remaining default initializers for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) { - Value *Idx = llvm::ConstantInt::get(I32Ty, CurIdx); + Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, CurIdx); llvm::Value *Init = llvm::Constant::getNullValue(EltTy); V = Builder.CreateInsertElement(V, Init, Idx, "vecinit"); } @@ -828,6 +925,15 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) { //assert(0 && "Unknown cast kind!"); break; + case CastExpr::CK_LValueBitCast: { + Value *V = EmitLValue(E).getAddress(); + V = Builder.CreateBitCast(V, + ConvertType(CGF.getContext().getPointerType(DestTy))); + // FIXME: Are the qualifiers correct here? + return EmitLoadOfLValue(LValue::MakeAddr(V, CGF.MakeQualifiers(DestTy)), + DestTy); + } + case CastExpr::CK_AnyPointerToObjCPointerCast: case CastExpr::CK_AnyPointerToBlockPointerCast: case CastExpr::CK_BitCast: { @@ -905,13 +1011,13 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) { std::swap(DerivedDecl, BaseDecl); if (llvm::Constant *Adj = - CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl, - CE->getBasePath())) { + CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl, CE->getBasePath())){ if (CE->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer) - Src = Builder.CreateSub(Src, Adj, "adj"); + Src = Builder.CreateNSWSub(Src, Adj, "adj"); else - Src = Builder.CreateAdd(Src, Adj, "adj"); + Src = Builder.CreateNSWAdd(Src, Adj, "adj"); } + return Src; } @@ -924,8 +1030,7 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) { // First, convert to the correct width so that we control the kind of // extension. - const llvm::Type *MiddleTy = - llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth); + const llvm::Type *MiddleTy = CGF.IntPtrTy; bool InputSigned = E->getType()->isSignedIntegerType(); llvm::Value* IntResult = Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); @@ -946,16 +1051,14 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) { // Insert the element in element zero of an undef vector llvm::Value *UnV = llvm::UndefValue::get(DstTy); - llvm::Value *Idx = - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); + llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0); UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp"); // Splat the element across to all elements llvm::SmallVector<llvm::Constant*, 16> Args; unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); for (unsigned i = 0; i < NumElements; i++) - Args.push_back(llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), 0)); + Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0)); llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements); llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat"); @@ -1020,12 +1123,126 @@ Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) { // Unary Operators //===----------------------------------------------------------------------===// +llvm::Value *ScalarExprEmitter:: +EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, + bool isInc, bool isPre) { + + QualType ValTy = E->getSubExpr()->getType(); + llvm::Value *InVal = EmitLoadOfLValue(LV, ValTy); + + int AmountVal = isInc ? 1 : -1; + + if (ValTy->isPointerType() && + ValTy->getAs<PointerType>()->isVariableArrayType()) { + // The amount of the addition/subtraction needs to account for the VLA size + CGF.ErrorUnsupported(E, "VLA pointer inc/dec"); + } + + llvm::Value *NextVal; + if (const llvm::PointerType *PT = + dyn_cast<llvm::PointerType>(InVal->getType())) { + llvm::Constant *Inc = llvm::ConstantInt::get(CGF.Int32Ty, AmountVal); + if (!isa<llvm::FunctionType>(PT->getElementType())) { + QualType PTEE = ValTy->getPointeeType(); + if (const ObjCObjectType *OIT = PTEE->getAs<ObjCObjectType>()) { + // Handle interface types, which are not represented with a concrete + // type. + int size = CGF.getContext().getTypeSize(OIT) / 8; + if (!isInc) + size = -size; + Inc = llvm::ConstantInt::get(Inc->getType(), size); + const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); + InVal = Builder.CreateBitCast(InVal, i8Ty); + NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr"); + llvm::Value *lhs = LV.getAddress(); + lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty)); + LV = LValue::MakeAddr(lhs, CGF.MakeQualifiers(ValTy)); + } else + NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec"); + } else { + const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); + NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp"); + NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec"); + NextVal = Builder.CreateBitCast(NextVal, InVal->getType()); + } + } else if (InVal->getType()->isIntegerTy(1) && isInc) { + // Bool++ is an interesting case, due to promotion rules, we get: + // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 -> + // Bool = ((int)Bool+1) != 0 + // An interesting aspect of this is that increment is always true. + // Decrement does not have this property. + NextVal = llvm::ConstantInt::getTrue(VMContext); + } else if (isa<llvm::IntegerType>(InVal->getType())) { + NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal); + + if (!ValTy->isSignedIntegerType()) + // Unsigned integer inc is always two's complement. + NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec"); + else { + switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) { + case LangOptions::SOB_Undefined: + NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec"); + break; + case LangOptions::SOB_Defined: + NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec"); + break; + case LangOptions::SOB_Trapping: + BinOpInfo BinOp; + BinOp.LHS = InVal; + BinOp.RHS = NextVal; + BinOp.Ty = E->getType(); + BinOp.Opcode = BinaryOperator::Add; + BinOp.E = E; + return EmitOverflowCheckedBinOp(BinOp); + } + } + } else { + // Add the inc/dec to the real part. + if (InVal->getType()->isFloatTy()) + NextVal = + llvm::ConstantFP::get(VMContext, + llvm::APFloat(static_cast<float>(AmountVal))); + else if (InVal->getType()->isDoubleTy()) + NextVal = + llvm::ConstantFP::get(VMContext, + llvm::APFloat(static_cast<double>(AmountVal))); + else { + llvm::APFloat F(static_cast<float>(AmountVal)); + bool ignored; + F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero, + &ignored); + NextVal = llvm::ConstantFP::get(VMContext, F); + } + NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec"); + } + + // Store the updated result through the lvalue. + if (LV.isBitField()) + CGF.EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, &NextVal); + else + CGF.EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy); + + // If this is a postinc, return the value read from memory, otherwise use the + // updated value. + return isPre ? NextVal : InVal; +} + + + Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { TestAndClearIgnoreResultAssign(); - Value *Op = Visit(E->getSubExpr()); - if (Op->getType()->isFPOrFPVectorTy()) - return Builder.CreateFNeg(Op, "neg"); - return Builder.CreateNeg(Op, "neg"); + // Emit unary minus with EmitSub so we handle overflow cases etc. + BinOpInfo BinOp; + BinOp.RHS = Visit(E->getSubExpr()); + + if (BinOp.RHS->getType()->isFPOrFPVectorTy()) + BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType()); + else + BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType()); + BinOp.Ty = E->getType(); + BinOp.Opcode = BinaryOperator::Sub; + BinOp.E = E; + return EmitSub(BinOp); } Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) { @@ -1126,6 +1343,7 @@ BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) { Result.LHS = Visit(E->getLHS()); Result.RHS = Visit(E->getRHS()); Result.Ty = E->getType(); + Result.Opcode = E->getOpcode(); Result.E = E; return Result; } @@ -1133,9 +1351,8 @@ BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) { LValue ScalarExprEmitter::EmitCompoundAssignLValue( const CompoundAssignOperator *E, Value *(ScalarExprEmitter::*Func)(const BinOpInfo &), - Value *&BitFieldResult) { + Value *&Result) { QualType LHSTy = E->getLHS()->getType(); - BitFieldResult = 0; BinOpInfo OpInfo; if (E->getComputationResultType()->isAnyComplexType()) { @@ -1144,7 +1361,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( // actually need the imaginary part of the RHS for multiplication and // division.) CGF.ErrorUnsupported(E, "complex compound assignment"); - llvm::UndefValue::get(CGF.ConvertType(E->getType())); + Result = llvm::UndefValue::get(CGF.ConvertType(E->getType())); return LValue(); } @@ -1152,6 +1369,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( // first, plus this should improve codegen a little. OpInfo.RHS = Visit(E->getRHS()); OpInfo.Ty = E->getComputationResultType(); + OpInfo.Opcode = E->getOpcode(); OpInfo.E = E; // Load/convert the LHS. LValue LHSLV = EmitCheckedLValue(E->getLHS()); @@ -1160,7 +1378,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( E->getComputationLHSType()); // Expand the binary operator. - Value *Result = (this->*Func)(OpInfo); + Result = (this->*Func)(OpInfo); // Convert the result back to the LHS type. Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy); @@ -1169,30 +1387,35 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue( // specially because the result is altered by the store, i.e., [C99 6.5.16p1] // 'An assignment expression has the value of the left operand after the // assignment...'. - if (LHSLV.isBitField()) { - if (!LHSLV.isVolatileQualified()) { - CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy, - &Result); - BitFieldResult = Result; - return LHSLV; - } else - CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy); - } else + if (LHSLV.isBitField()) + CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy, + &Result); + else CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy); + return LHSLV; } Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) { bool Ignore = TestAndClearIgnoreResultAssign(); - Value *BitFieldResult; - LValue LHSLV = EmitCompoundAssignLValue(E, Func, BitFieldResult); - if (BitFieldResult) - return BitFieldResult; - + Value *RHS; + LValue LHS = EmitCompoundAssignLValue(E, Func, RHS); + + // If the result is clearly ignored, return now. if (Ignore) return 0; - return EmitLoadOfLValue(LHSLV, E->getType()); + + // Objective-C property assignment never reloads the value following a store. + if (LHS.isPropertyRef() || LHS.isKVCRef()) + return RHS; + + // If the lvalue is non-volatile, return the computed value of the assignment. + if (!LHS.isVolatileQualified()) + return RHS; + + // Otherwise, reload the value. + return EmitLoadOfLValue(LHS, E->getType()); } @@ -1217,7 +1440,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { unsigned IID; unsigned OpID = 0; - switch (Ops.E->getOpcode()) { + switch (Ops.Opcode) { case BinaryOperator::Add: case BinaryOperator::AddAssign: OpID = 1; @@ -1265,20 +1488,20 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { // long long *__overflow_handler)(long long a, long long b, char op, // char width) std::vector<const llvm::Type*> handerArgTypes; - handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext)); - handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext)); + handerArgTypes.push_back(CGF.Int64Ty); + handerArgTypes.push_back(CGF.Int64Ty); handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext)); handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext)); - llvm::FunctionType *handlerTy = llvm::FunctionType::get( - llvm::Type::getInt64Ty(VMContext), handerArgTypes, false); + llvm::FunctionType *handlerTy = + llvm::FunctionType::get(CGF.Int64Ty, handerArgTypes, false); llvm::Value *handlerFunction = CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler", llvm::PointerType::getUnqual(handlerTy)); handlerFunction = Builder.CreateLoad(handlerFunction); llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction, - Builder.CreateSExt(Ops.LHS, llvm::Type::getInt64Ty(VMContext)), - Builder.CreateSExt(Ops.RHS, llvm::Type::getInt64Ty(VMContext)), + Builder.CreateSExt(Ops.LHS, CGF.Int64Ty), + Builder.CreateSExt(Ops.RHS, CGF.Int64Ty), llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), OpID), llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), cast<llvm::IntegerType>(opTy)->getBitWidth())); @@ -1300,49 +1523,56 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { if (!Ops.Ty->isAnyPointerType()) { - if (CGF.getContext().getLangOptions().OverflowChecking && - Ops.Ty->isSignedIntegerType()) - return EmitOverflowCheckedBinOp(Ops); - + if (Ops.Ty->isSignedIntegerType()) { + switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) { + case LangOptions::SOB_Undefined: + return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add"); + case LangOptions::SOB_Defined: + return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add"); + case LangOptions::SOB_Trapping: + return EmitOverflowCheckedBinOp(Ops); + } + } + if (Ops.LHS->getType()->isFPOrFPVectorTy()) return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add"); - // Signed integer overflow is undefined behavior. - if (Ops.Ty->isSignedIntegerType()) - return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add"); - return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add"); } + // Must have binary (not unary) expr here. Unary pointer decrement doesn't + // use this path. + const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E); + if (Ops.Ty->isPointerType() && Ops.Ty->getAs<PointerType>()->isVariableArrayType()) { // The amount of the addition needs to account for the VLA size - CGF.ErrorUnsupported(Ops.E, "VLA pointer addition"); + CGF.ErrorUnsupported(BinOp, "VLA pointer addition"); } + Value *Ptr, *Idx; Expr *IdxExp; - const PointerType *PT = Ops.E->getLHS()->getType()->getAs<PointerType>(); + const PointerType *PT = BinOp->getLHS()->getType()->getAs<PointerType>(); const ObjCObjectPointerType *OPT = - Ops.E->getLHS()->getType()->getAs<ObjCObjectPointerType>(); + BinOp->getLHS()->getType()->getAs<ObjCObjectPointerType>(); if (PT || OPT) { Ptr = Ops.LHS; Idx = Ops.RHS; - IdxExp = Ops.E->getRHS(); + IdxExp = BinOp->getRHS(); } else { // int + pointer - PT = Ops.E->getRHS()->getType()->getAs<PointerType>(); - OPT = Ops.E->getRHS()->getType()->getAs<ObjCObjectPointerType>(); + PT = BinOp->getRHS()->getType()->getAs<PointerType>(); + OPT = BinOp->getRHS()->getType()->getAs<ObjCObjectPointerType>(); assert((PT || OPT) && "Invalid add expr"); Ptr = Ops.RHS; Idx = Ops.LHS; - IdxExp = Ops.E->getLHS(); + IdxExp = BinOp->getLHS(); } unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); if (Width < CGF.LLVMPointerWidth) { // Zero or sign extend the pointer value based on whether the index is // signed or not. - const llvm::Type *IdxType = - llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth); + const llvm::Type *IdxType = CGF.IntPtrTy; if (IdxExp->getType()->isSignedIntegerType()) Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); else @@ -1376,30 +1606,37 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { if (!isa<llvm::PointerType>(Ops.LHS->getType())) { - if (CGF.getContext().getLangOptions().OverflowChecking - && Ops.Ty->isSignedIntegerType()) - return EmitOverflowCheckedBinOp(Ops); - + if (Ops.Ty->isSignedIntegerType()) { + switch (CGF.getContext().getLangOptions().getSignedOverflowBehavior()) { + case LangOptions::SOB_Undefined: + return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub"); + case LangOptions::SOB_Defined: + return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub"); + case LangOptions::SOB_Trapping: + return EmitOverflowCheckedBinOp(Ops); + } + } + if (Ops.LHS->getType()->isFPOrFPVectorTy()) return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub"); - // Signed integer overflow is undefined behavior. - if (Ops.Ty->isSignedIntegerType()) - return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub"); - return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub"); } - if (Ops.E->getLHS()->getType()->isPointerType() && - Ops.E->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) { + // Must have binary (not unary) expr here. Unary pointer increment doesn't + // use this path. + const BinaryOperator *BinOp = cast<BinaryOperator>(Ops.E); + + if (BinOp->getLHS()->getType()->isPointerType() && + BinOp->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) { // The amount of the addition needs to account for the VLA size for // ptr-int // The amount of the division needs to account for the VLA size for // ptr-ptr. - CGF.ErrorUnsupported(Ops.E, "VLA pointer subtraction"); + CGF.ErrorUnsupported(BinOp, "VLA pointer subtraction"); } - const QualType LHSType = Ops.E->getLHS()->getType(); + const QualType LHSType = BinOp->getLHS()->getType(); const QualType LHSElementType = LHSType->getPointeeType(); if (!isa<llvm::PointerType>(Ops.RHS->getType())) { // pointer - int @@ -1408,9 +1645,8 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { if (Width < CGF.LLVMPointerWidth) { // Zero or sign extend the pointer value based on whether the index is // signed or not. - const llvm::Type *IdxType = - llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth); - if (Ops.E->getRHS()->getType()->isSignedIntegerType()) + const llvm::Type *IdxType = CGF.IntPtrTy; + if (BinOp->getRHS()->getType()->isSignedIntegerType()) Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); else Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext"); @@ -1615,17 +1851,25 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { // because the result is altered by the store, i.e., [C99 6.5.16p1] // 'An assignment expression has the value of the left operand after // the assignment...'. - if (LHS.isBitField()) { - if (!LHS.isVolatileQualified()) { - CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(), - &RHS); - return RHS; - } else - CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType()); - } else + if (LHS.isBitField()) + CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(), + &RHS); + else CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType()); + + // If the result is clearly ignored, return now. if (Ignore) return 0; + + // Objective-C property assignment never reloads the value following a store. + if (LHS.isPropertyRef() || LHS.isKVCRef()) + return RHS; + + // If the lvalue is non-volatile, return the computed value of the assignment. + if (!LHS.isVolatileQualified()) + return RHS; + + // Otherwise, reload the value. return EmitLoadOfLValue(LHS, E->getType()); } @@ -1925,6 +2169,13 @@ Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, DstTy); } + +llvm::Value *CodeGenFunction:: +EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, + bool isInc, bool isPre) { + return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre); +} + LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { llvm::Value *V; // object->isa or (*object).isa @@ -1958,12 +2209,12 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { LValue CodeGenFunction::EmitCompoundAssignOperatorLValue( const CompoundAssignOperator *E) { ScalarExprEmitter Scalar(*this); - Value *BitFieldResult = 0; + Value *Result = 0; switch (E->getOpcode()) { #define COMPOUND_OP(Op) \ case BinaryOperator::Op##Assign: \ return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \ - BitFieldResult) + Result) COMPOUND_OP(Mul); COMPOUND_OP(Div); COMPOUND_OP(Rem); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp index 7c842a9..e735a61 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp @@ -90,11 +90,14 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E, CallArgList Args; EmitCallArgs(Args, E->getMethodDecl(), E->arg_begin(), E->arg_end()); + QualType ResultType = + E->getMethodDecl() ? E->getMethodDecl()->getResultType() : E->getType(); + if (isSuperMessage) { // super is only valid in an Objective-C method const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext()); - return Runtime.GenerateMessageSendSuper(*this, Return, E->getType(), + return Runtime.GenerateMessageSendSuper(*this, Return, ResultType, E->getSelector(), OMD->getClassInterface(), isCategoryImpl, @@ -104,7 +107,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E, E->getMethodDecl()); } - return Runtime.GenerateMessageSend(*this, Return, E->getType(), + return Runtime.GenerateMessageSend(*this, Return, ResultType, E->getSelector(), Receiver, Args, OID, E->getMethodDecl()); @@ -458,7 +461,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, LoadObjCSelf(), Ivar, 0); const RecordType *RT = FieldType->getAs<RecordType>(); CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl()); - CXXDestructorDecl *Dtor = FieldClassDecl->getDestructor(getContext()); + CXXDestructorDecl *Dtor = FieldClassDecl->getDestructor(); if (!Dtor->isTrivial()) { if (Array) { const llvm::Type *BasePtr = ConvertType(FieldType); @@ -595,7 +598,8 @@ void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp, Args); } else if (const ObjCImplicitSetterGetterRefExpr *E = dyn_cast<ObjCImplicitSetterGetterRefExpr>(Exp)) { - Selector S = E->getSetterMethod()->getSelector(); + const ObjCMethodDecl *SetterMD = E->getSetterMethod(); + Selector S = SetterMD->getSelector(); CallArgList Args; llvm::Value *Receiver; if (E->getInterfaceDecl()) { @@ -606,7 +610,8 @@ void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp, return; } else Receiver = EmitScalarExpr(E->getBase()); - Args.push_back(std::make_pair(Src, E->getType())); + ObjCMethodDecl::param_iterator P = SetterMD->param_begin(); + Args.push_back(std::make_pair(Src, (*P)->getType())); CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), getContext().VoidTy, S, Receiver, @@ -778,8 +783,8 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ llvm::ConstantInt::get(UnsignedLongLTy, 1)); Builder.CreateStore(Counter, CounterPtr); - llvm::BasicBlock *LoopEnd = createBasicBlock("loopend"); - llvm::BasicBlock *AfterBody = createBasicBlock("afterbody"); + JumpDest LoopEnd = getJumpDestInCurrentScope("loopend"); + JumpDest AfterBody = getJumpDestInCurrentScope("afterbody"); BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody)); @@ -787,7 +792,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ BreakContinueStack.pop_back(); - EmitBlock(AfterBody); + EmitBlock(AfterBody.Block); llvm::BasicBlock *FetchMore = createBasicBlock("fetchmore"); @@ -823,11 +828,11 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ LV.getAddress()); } - EmitBlock(LoopEnd); + EmitBlock(LoopEnd.Block); } void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) { - CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S); + CGM.getObjCRuntime().EmitTryStmt(*this, S); } void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) { @@ -836,7 +841,9 @@ void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) { void CodeGenFunction::EmitObjCAtSynchronizedStmt( const ObjCAtSynchronizedStmt &S) { - CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S); + CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S); } CGObjCRuntime::~CGObjCRuntime() {} + + diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp index 6c25afe..f3c80bc 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp @@ -17,6 +17,7 @@ #include "CGObjCRuntime.h" #include "CodeGenModule.h" #include "CodeGenFunction.h" +#include "CGException.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" @@ -162,7 +163,8 @@ public: const ObjCMethodDecl *Method); virtual llvm::Value *GetClass(CGBuilderTy &Builder, const ObjCInterfaceDecl *OID); - virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel); + virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel, + bool lval = false); virtual llvm::Value *GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl *Method); @@ -179,8 +181,10 @@ public: virtual llvm::Function *GetCopyStructFunction(); virtual llvm::Constant *EnumerationMutationFunction(); - virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, - const Stmt &S); + virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF, + const ObjCAtTryStmt &S); + virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF, + const ObjCAtSynchronizedStmt &S); virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S); virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, @@ -197,7 +201,7 @@ public: virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, llvm::Value *DestPtr, llvm::Value *SrcPtr, - QualType Ty); + llvm::Value *Size); virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy, llvm::Value *BaseValue, @@ -360,14 +364,16 @@ llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder, return Builder.CreateCall(ClassLookupFn, ClassName); } -llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel) { +llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel, + bool lval) { llvm::GlobalAlias *&US = UntypedSelectors[Sel.getAsString()]; if (US == 0) US = new llvm::GlobalAlias(llvm::PointerType::getUnqual(SelectorTy), llvm::GlobalValue::PrivateLinkage, ".objc_untyped_selector_alias"+Sel.getAsString(), NULL, &TheModule); - + if (lval) + return US; return Builder.CreateLoad(US); } @@ -624,8 +630,8 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF, // to be on the stack / in those registers at the time) on most platforms, // and generates a SegV on SPARC. With LLVM it corrupts the stack. bool isPointerSizedReturn = false; - if (ResultType->isAnyPointerType() || ResultType->isIntegralType() || - ResultType->isVoidType()) + if (ResultType->isAnyPointerType() || + ResultType->isIntegralOrEnumerationType() || ResultType->isVoidType()) isPointerSizedReturn = true; llvm::BasicBlock *startBB = 0; @@ -1848,245 +1854,167 @@ llvm::Constant *CGObjCGNU::EnumerationMutationFunction() { return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation"); } -void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, - const Stmt &S) { - // Pointer to the personality function - llvm::Constant *Personality = - CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext), - true), - "__gnu_objc_personality_v0"); - Personality = llvm::ConstantExpr::getBitCast(Personality, PtrTy); - std::vector<const llvm::Type*> Params; - Params.push_back(PtrTy); - llvm::Value *RethrowFn = - CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), - Params, false), "_Unwind_Resume"); - - bool isTry = isa<ObjCAtTryStmt>(S); - llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try"); - llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest(); - llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler"); - llvm::BasicBlock *CatchInCatch = CGF.createBasicBlock("catch.rethrow"); - llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally"); - llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw"); - llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end"); - - // @synchronized() - if (!isTry) { - std::vector<const llvm::Type*> Args(1, IdTy); - llvm::FunctionType *FTy = - llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false); - llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter"); - llvm::Value *SyncArg = - CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr()); - SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy); - CGF.Builder.CreateCall(SyncEnter, SyncArg); - } +void CGObjCGNU::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF, + const ObjCAtSynchronizedStmt &S) { + std::vector<const llvm::Type*> Args(1, IdTy); + llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false); + // Evaluate the lock operand. This should dominate the cleanup. + llvm::Value *SyncArg = + CGF.EmitScalarExpr(S.getSynchExpr()); - // Push an EH context entry, used for handling rethrows and jumps - // through finally. - CGF.PushCleanupBlock(FinallyBlock); - - // Emit the statements in the @try {} block - CGF.setInvokeDest(TryHandler); - - CGF.EmitBlock(TryBlock); - CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody() - : cast<ObjCAtSynchronizedStmt>(S).getSynchBody()); - - // Jump to @finally if there is no exception - CGF.EmitBranchThroughCleanup(FinallyEnd); - - // Emit the handlers - CGF.EmitBlock(TryHandler); - - // Get the correct versions of the exception handling intrinsics - llvm::Value *llvm_eh_exception = - CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception); - llvm::Value *llvm_eh_selector = - CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector); - llvm::Value *llvm_eh_typeid_for = - CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for); - - // Exception object - llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc"); - llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow"); - - llvm::SmallVector<llvm::Value*, 8> ESelArgs; - llvm::SmallVector<std::pair<const VarDecl*, const Stmt*>, 8> Handlers; - - ESelArgs.push_back(Exc); - ESelArgs.push_back(Personality); - - bool HasCatchAll = false; - // Only @try blocks are allowed @catch blocks, but both can have @finally - if (isTry) { - if (cast<ObjCAtTryStmt>(S).getNumCatchStmts()) { - const ObjCAtTryStmt &AtTry = cast<ObjCAtTryStmt>(S); - CGF.setInvokeDest(CatchInCatch); - - for (unsigned I = 0, N = AtTry.getNumCatchStmts(); I != N; ++I) { - const ObjCAtCatchStmt *CatchStmt = AtTry.getCatchStmt(I); - const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl(); - Handlers.push_back(std::make_pair(CatchDecl, - CatchStmt->getCatchBody())); - - // @catch() and @catch(id) both catch any ObjC exception - if (!CatchDecl || CatchDecl->getType()->isObjCIdType() - || CatchDecl->getType()->isObjCQualifiedIdType()) { - // Use i8* null here to signal this is a catch all, not a cleanup. - ESelArgs.push_back(NULLPtr); - HasCatchAll = true; - // No further catches after this one will ever by reached - break; - } - - // All other types should be Objective-C interface pointer types. - const ObjCObjectPointerType *OPT = - CatchDecl->getType()->getAs<ObjCObjectPointerType>(); - assert(OPT && "Invalid @catch type."); - const ObjCInterfaceDecl *IDecl = - OPT->getObjectType()->getInterface(); - assert(IDecl && "Invalid @catch type."); - llvm::Value *EHType = - MakeConstantString(IDecl->getNameAsString()); - ESelArgs.push_back(EHType); - } - } - } + // Acquire the lock. + llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter"); + SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy); + CGF.Builder.CreateCall(SyncEnter, SyncArg); - // We use a cleanup unless there was already a catch all. - if (!HasCatchAll) { - ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0)); - Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0)); + // Register an all-paths cleanup to release the lock. + { + CodeGenFunction::CleanupBlock ReleaseScope(CGF, NormalAndEHCleanup); + + llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit"); + SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy); + CGF.Builder.CreateCall(SyncExit, SyncArg); } - // Find which handler was matched. - llvm::Value *ESelector = CGF.Builder.CreateCall(llvm_eh_selector, - ESelArgs.begin(), ESelArgs.end(), "selector"); + // Emit the body of the statement. + CGF.EmitStmt(S.getSynchBody()); - for (unsigned i = 0, e = Handlers.size(); i != e; ++i) { - const VarDecl *CatchParam = Handlers[i].first; - const Stmt *CatchBody = Handlers[i].second; + // Pop the lock-release cleanup. + CGF.PopCleanupBlock(); +} - llvm::BasicBlock *Next = 0; +namespace { + struct CatchHandler { + const VarDecl *Variable; + const Stmt *Body; + llvm::BasicBlock *Block; + llvm::Value *TypeInfo; + }; +} - // The last handler always matches. - if (i + 1 != e) { - assert(CatchParam && "Only last handler can be a catch all."); +void CGObjCGNU::EmitTryStmt(CodeGen::CodeGenFunction &CGF, + const ObjCAtTryStmt &S) { + // Unlike the Apple non-fragile runtimes, which also uses + // unwind-based zero cost exceptions, the GNU Objective C runtime's + // EH support isn't a veneer over C++ EH. Instead, exception + // objects are created by __objc_exception_throw and destroyed by + // the personality function; this avoids the need for bracketing + // catch handlers with calls to __blah_begin_catch/__blah_end_catch + // (or even _Unwind_DeleteException), but probably doesn't + // interoperate very well with foreign exceptions. + + // Jump destination for falling out of catch bodies. + CodeGenFunction::JumpDest Cont; + if (S.getNumCatchStmts()) + Cont = CGF.getJumpDestInCurrentScope("eh.cont"); + + // We handle @finally statements by pushing them as a cleanup + // before entering the catch. + CodeGenFunction::FinallyInfo FinallyInfo; + if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) { + std::vector<const llvm::Type*> Args(1, IdTy); + llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false); + llvm::Constant *Rethrow = + CGM.CreateRuntimeFunction(FTy, "objc_exception_throw"); - // Test whether this block matches the type for the selector and branch - // to Match if it does, or to the next BB if it doesn't. - llvm::BasicBlock *Match = CGF.createBasicBlock("match"); - Next = CGF.createBasicBlock("catch.next"); - llvm::Value *Id = CGF.Builder.CreateCall(llvm_eh_typeid_for, - CGF.Builder.CreateBitCast(ESelArgs[i+2], PtrTy)); - CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(ESelector, Id), Match, - Next); + FinallyInfo = CGF.EnterFinallyBlock(Finally->getFinallyBody(), 0, 0, + Rethrow); + } - CGF.EmitBlock(Match); - } + llvm::SmallVector<CatchHandler, 8> Handlers; - if (CatchBody) { - llvm::Value *ExcObject = CGF.Builder.CreateBitCast(Exc, - CGF.ConvertType(CatchParam->getType())); - - // Bind the catch parameter if it exists. - if (CatchParam) { - // CatchParam is a ParmVarDecl because of the grammar - // construction used to handle this, but for codegen purposes - // we treat this as a local decl. - CGF.EmitLocalBlockVarDecl(*CatchParam); - CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam)); - } + // Enter the catch, if there is one. + if (S.getNumCatchStmts()) { + for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) { + const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I); + const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl(); - CGF.ObjCEHValueStack.push_back(ExcObject); - CGF.EmitStmt(CatchBody); - CGF.ObjCEHValueStack.pop_back(); + Handlers.push_back(CatchHandler()); + CatchHandler &Handler = Handlers.back(); + Handler.Variable = CatchDecl; + Handler.Body = CatchStmt->getCatchBody(); + Handler.Block = CGF.createBasicBlock("catch"); - CGF.EmitBranchThroughCleanup(FinallyEnd); + // @catch() and @catch(id) both catch any ObjC exception. + // Treat them as catch-alls. + // FIXME: this is what this code was doing before, but should 'id' + // really be catching foreign exceptions? + if (!CatchDecl + || CatchDecl->getType()->isObjCIdType() + || CatchDecl->getType()->isObjCQualifiedIdType()) { - if (Next) - CGF.EmitBlock(Next); - } else { - assert(!Next && "catchup should be last handler."); + Handler.TypeInfo = 0; // catch-all + + // Don't consider any other catches. + break; + } - CGF.Builder.CreateStore(Exc, RethrowPtr); - CGF.EmitBranchThroughCleanup(FinallyRethrow); + // All other types should be Objective-C interface pointer types. + const ObjCObjectPointerType *OPT = + CatchDecl->getType()->getAs<ObjCObjectPointerType>(); + assert(OPT && "Invalid @catch type."); + const ObjCInterfaceDecl *IDecl = + OPT->getObjectType()->getInterface(); + assert(IDecl && "Invalid @catch type."); + Handler.TypeInfo = MakeConstantString(IDecl->getNameAsString()); } + + EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size()); + for (unsigned I = 0, E = Handlers.size(); I != E; ++I) + Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block); } - // The @finally block is a secondary landing pad for any exceptions thrown in - // @catch() blocks - CGF.EmitBlock(CatchInCatch); - Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc"); - ESelArgs.clear(); - ESelArgs.push_back(Exc); - ESelArgs.push_back(Personality); - // If there is a @catch or @finally clause in outside of this one then we - // need to make sure that we catch and rethrow it. - if (PrevLandingPad) { - ESelArgs.push_back(NULLPtr); - } else { - ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0)); - } - CGF.Builder.CreateCall(llvm_eh_selector, ESelArgs.begin(), ESelArgs.end(), - "selector"); - CGF.Builder.CreateCall(llvm_eh_typeid_for, - CGF.Builder.CreateIntToPtr(ESelArgs[2], PtrTy)); - CGF.Builder.CreateStore(Exc, RethrowPtr); - CGF.EmitBranchThroughCleanup(FinallyRethrow); + + // Emit the try body. + CGF.EmitStmt(S.getTryBody()); - CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock(); + // Leave the try. + if (S.getNumCatchStmts()) + CGF.EHStack.popCatch(); - CGF.setInvokeDest(PrevLandingPad); + // Remember where we were. + CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP(); - CGF.EmitBlock(FinallyBlock); + // Emit the handlers. + for (unsigned I = 0, E = Handlers.size(); I != E; ++I) { + CatchHandler &Handler = Handlers[I]; + CGF.EmitBlock(Handler.Block); + llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot()); - if (isTry) { - if (const ObjCAtFinallyStmt* FinallyStmt = - cast<ObjCAtTryStmt>(S).getFinallyStmt()) - CGF.EmitStmt(FinallyStmt->getFinallyBody()); - } else { - // Emit 'objc_sync_exit(expr)' as finally's sole statement for - // @synchronized. - std::vector<const llvm::Type*> Args(1, IdTy); - llvm::FunctionType *FTy = - llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false); - llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit"); - llvm::Value *SyncArg = - CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr()); - SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy); - CGF.Builder.CreateCall(SyncExit, SyncArg); - } + // Bind the catch parameter if it exists. + if (const VarDecl *CatchParam = Handler.Variable) { + const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType()); + Exn = CGF.Builder.CreateBitCast(Exn, CatchType); - if (Info.SwitchBlock) - CGF.EmitBlock(Info.SwitchBlock); - if (Info.EndBlock) - CGF.EmitBlock(Info.EndBlock); + CGF.EmitLocalBlockVarDecl(*CatchParam); + CGF.Builder.CreateStore(Exn, CGF.GetAddrOfLocalVar(CatchParam)); + } - // Branch around the rethrow code. - CGF.EmitBranch(FinallyEnd); + CGF.ObjCEHValueStack.push_back(Exn); + CGF.EmitStmt(Handler.Body); + CGF.ObjCEHValueStack.pop_back(); - CGF.EmitBlock(FinallyRethrow); + CGF.EmitBranchThroughCleanup(Cont); + } - llvm::Value *ExceptionObject = CGF.Builder.CreateLoad(RethrowPtr); - llvm::BasicBlock *UnwindBB = CGF.getInvokeDest(); - if (!UnwindBB) { - CGF.Builder.CreateCall(RethrowFn, ExceptionObject); - // Exception always thrown, next instruction is never reached. - CGF.Builder.CreateUnreachable(); - } else { - // If there is a @catch block outside this scope, we invoke instead of - // calling because we may return to this function. This is very slow, but - // some people still do it. It would be nice to add an optimised path for - // this. - CGF.Builder.CreateInvoke(RethrowFn, UnwindBB, UnwindBB, &ExceptionObject, - &ExceptionObject+1); - } + // Go back to the try-statement fallthrough. + CGF.Builder.restoreIP(SavedIP); + + // Pop out of the finally. + if (S.getFinallyStmt()) + CGF.ExitFinallyBlock(FinallyInfo); - CGF.EmitBlock(FinallyEnd); + if (Cont.Block) { + if (Cont.Block->use_empty()) + delete Cont.Block; + else { + CGF.EmitBranch(Cont.Block); + CGF.EmitBlock(Cont.Block); + } + } } void CGObjCGNU::EmitThrowStmt(CodeGen::CodeGenFunction &CGF, @@ -2174,17 +2102,12 @@ void CGObjCGNU::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, void CGObjCGNU::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, llvm::Value *DestPtr, llvm::Value *SrcPtr, - QualType Ty) { + llvm::Value *Size) { CGBuilderTy B = CGF.Builder; DestPtr = EnforceType(B, DestPtr, IdTy); SrcPtr = EnforceType(B, SrcPtr, PtrToIdTy); - std::pair<uint64_t, unsigned> TypeInfo = CGM.getContext().getTypeInfo(Ty); - unsigned long size = TypeInfo.first/8; - // FIXME: size_t - llvm::Value *N = llvm::ConstantInt::get(LongTy, size); - - B.CreateCall3(MemMoveFn, DestPtr, SrcPtr, N); + B.CreateCall3(MemMoveFn, DestPtr, SrcPtr, Size); } llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable( diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp index d3bafd7..01ead9e 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp @@ -16,13 +16,14 @@ #include "CGRecordLayout.h" #include "CodeGenModule.h" #include "CodeGenFunction.h" +#include "CGException.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/StmtObjC.h" #include "clang/Basic/LangOptions.h" -#include "clang/CodeGen/CodeGenOptions.h" +#include "clang/Frontend/CodeGenOptions.h" #include "llvm/Intrinsics.h" #include "llvm/LLVMContext.h" @@ -31,6 +32,7 @@ #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallPtrSet.h" +#include "llvm/Support/CallSite.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetData.h" #include <cstdio> @@ -440,6 +442,15 @@ public: return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw"); } + /// ExceptionRethrowFn - LLVM objc_exception_rethrow function. + llvm::Constant *getExceptionRethrowFn() { + // void objc_exception_rethrow(void) + std::vector<const llvm::Type*> Args; + llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, true); + return CGM.CreateRuntimeFunction(FTy, "objc_exception_rethrow"); + } + /// SyncEnterFn - LLVM object_sync_enter function. llvm::Constant *getSyncEnterFn() { // void objc_sync_enter (id) @@ -843,6 +854,9 @@ protected: /// MethodVarNames - uniqued method variable names. llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames; + /// DefinedCategoryNames - list of category names in form Class_Category. + llvm::SetVector<std::string> DefinedCategoryNames; + /// MethodVarTypes - uniqued method type signatures. We have to use /// a StringMap here because have no other unique reference. llvm::StringMap<llvm::GlobalVariable*> MethodVarTypes; @@ -1120,7 +1134,8 @@ private: /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy, /// for the given selector. - llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel); + llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel, + bool lval=false); public: CGObjCMac(CodeGen::CodeGenModule &cgm); @@ -1151,7 +1166,8 @@ public: virtual llvm::Value *GetClass(CGBuilderTy &Builder, const ObjCInterfaceDecl *ID); - virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel); + virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel, + bool lval = false); /// The NeXT/Apple runtimes do not support typed selectors; just emit an /// untyped one. @@ -1170,8 +1186,11 @@ public: virtual llvm::Constant *GetCopyStructFunction(); virtual llvm::Constant *EnumerationMutationFunction(); - virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, - const Stmt &S); + virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF, + const ObjCAtTryStmt &S); + virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF, + const ObjCAtSynchronizedStmt &S); + void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const Stmt &S); virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S); virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, @@ -1187,7 +1206,7 @@ public: llvm::Value *src, llvm::Value *dest); virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, llvm::Value *dest, llvm::Value *src, - QualType Ty); + llvm::Value *size); virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy, @@ -1319,7 +1338,8 @@ private: /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy, /// for the given selector. - llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel); + llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel, + bool lval=false); /// GetInterfaceEHType - Get the cached ehtype for the given Objective-C /// interface. The return value has type EHTypePtrTy. @@ -1382,8 +1402,9 @@ public: virtual llvm::Value *GetClass(CGBuilderTy &Builder, const ObjCInterfaceDecl *ID); - virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel) - { return EmitSelector(Builder, Sel); } + virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel, + bool lvalue = false) + { return EmitSelector(Builder, Sel, lvalue); } /// The NeXT/Apple runtimes do not support typed selectors; just emit an /// untyped one. @@ -1412,8 +1433,10 @@ public: return ObjCTypes.getEnumerationMutationFn(); } - virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, - const Stmt &S); + virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF, + const ObjCAtTryStmt &S); + virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF, + const ObjCAtSynchronizedStmt &S); virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S); virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, @@ -1429,7 +1452,7 @@ public: llvm::Value *src, llvm::Value *dest); virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, llvm::Value *dest, llvm::Value *src, - QualType Ty); + llvm::Value *size); virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy, llvm::Value *BaseValue, @@ -1483,8 +1506,9 @@ llvm::Value *CGObjCMac::GetClass(CGBuilderTy &Builder, } /// GetSelector - Return the pointer to the unique'd string for this selector. -llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, Selector Sel) { - return EmitSelector(Builder, Sel); +llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, Selector Sel, + bool lval) { + return EmitSelector(Builder, Sel, lval); } llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl *Method) { @@ -1620,30 +1644,23 @@ CGObjCCommonMac::EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF, const llvm::FunctionType *FTy = Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false); + if (Method) + assert(CGM.getContext().getCanonicalType(Method->getResultType()) == + CGM.getContext().getCanonicalType(ResultType) && + "Result type mismatch!"); + llvm::Constant *Fn = NULL; - if (CGM.ReturnTypeUsesSret(FnInfo)) { + if (CGM.ReturnTypeUsesSRet(FnInfo)) { Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper) : ObjCTypes.getSendStretFn(IsSuper); - } else if (ResultType->isFloatingType()) { - if (ObjCABI == 2) { - if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { - BuiltinType::Kind k = BT->getKind(); - Fn = (k == BuiltinType::LongDouble) ? ObjCTypes.getSendFpretFn2(IsSuper) - : ObjCTypes.getSendFn2(IsSuper); - } else { - Fn = ObjCTypes.getSendFn2(IsSuper); - } - } else - // FIXME. This currently matches gcc's API for x86-32. May need to change - // for others if we have their API. - Fn = ObjCTypes.getSendFpretFn(IsSuper); + } else if (CGM.ReturnTypeUsesFPRet(ResultType)) { + Fn = (ObjCABI == 2) ? ObjCTypes.getSendFpretFn2(IsSuper) + : ObjCTypes.getSendFpretFn(IsSuper); } else { Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper) : ObjCTypes.getSendFn(IsSuper); } - assert(Fn && "EmitLegacyMessageSend - unknown API"); - Fn = llvm::ConstantExpr::getBitCast(Fn, - llvm::PointerType::getUnqual(FTy)); + Fn = llvm::ConstantExpr::getBitCast(Fn, llvm::PointerType::getUnqual(FTy)); return CGF.EmitCall(FnInfo, Fn, Return, ActualArgs); } @@ -1909,10 +1926,18 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(llvm::Twine Name, Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop)); } - if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD)) + if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD)) { for (ObjCInterfaceDecl::protocol_iterator P = OID->protocol_begin(), E = OID->protocol_end(); P != E; ++P) - PushProtocolProperties(PropertySet, Properties, Container, (*P), ObjCTypes); + PushProtocolProperties(PropertySet, Properties, Container, (*P), + ObjCTypes); + } + else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(OCD)) { + for (ObjCCategoryDecl::protocol_iterator P = CD->protocol_begin(), + E = CD->protocol_end(); P != E; ++P) + PushProtocolProperties(PropertySet, Properties, Container, (*P), + ObjCTypes); + } // Return null for empty list. if (Properties.empty()) @@ -2049,6 +2074,7 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) { "__OBJC,__category,regular,no_dead_strip", 4, true); DefinedCategories.push_back(GV); + DefinedCategoryNames.insert(ExtName.str()); } // FIXME: Get from somewhere? @@ -2494,11 +2520,52 @@ llvm::Constant *CGObjCMac::EnumerationMutationFunction() { return ObjCTypes.getEnumerationMutationFn(); } +void CGObjCMac::EmitTryStmt(CodeGenFunction &CGF, const ObjCAtTryStmt &S) { + return EmitTryOrSynchronizedStmt(CGF, S); +} + +void CGObjCMac::EmitSynchronizedStmt(CodeGenFunction &CGF, + const ObjCAtSynchronizedStmt &S) { + return EmitTryOrSynchronizedStmt(CGF, S); +} + /* Objective-C setjmp-longjmp (sjlj) Exception Handling -- + A catch buffer is a setjmp buffer plus: + - a pointer to the exception that was caught + - a pointer to the previous exception data buffer + - two pointers of reserved storage + Therefore catch buffers form a stack, with a pointer to the top + of the stack kept in thread-local storage. + + objc_exception_try_enter pushes a catch buffer onto the EH stack. + objc_exception_try_exit pops the given catch buffer, which is + required to be the top of the EH stack. + objc_exception_throw pops the top of the EH stack, writes the + thrown exception into the appropriate field, and longjmps + to the setjmp buffer. It crashes the process (with a printf + and an abort()) if there are no catch buffers on the stack. + objc_exception_extract just reads the exception pointer out of the + catch buffer. + + There's no reason an implementation couldn't use a light-weight + setjmp here --- something like __builtin_setjmp, but API-compatible + with the heavyweight setjmp. This will be more important if we ever + want to implement correct ObjC/C++ exception interactions for the + fragile ABI. + + Note that for this use of setjmp/longjmp to be correct, we may need + to mark some local variables volatile: if a non-volatile local + variable is modified between the setjmp and the longjmp, it has + indeterminate value. For the purposes of LLVM IR, it may be + sufficient to make loads and stores within the @try (to variables + declared outside the @try) volatile. This is necessary for + optimized correctness, but is not currently being done; this is + being tracked as rdar://problem/8160285 + The basic framework for a @try-catch-finally is as follows: { objc_exception_data d; @@ -2560,37 +2627,33 @@ llvm::Constant *CGObjCMac::EnumerationMutationFunction() { Rethrows and Jumps-Through-Finally -- - Support for implicit rethrows and jumping through the finally block is - handled by storing the current exception-handling context in - ObjCEHStack. - - In order to implement proper @finally semantics, we support one basic - mechanism for jumping through the finally block to an arbitrary - destination. Constructs which generate exits from a @try or @catch - block use this mechanism to implement the proper semantics by chaining - jumps, as necessary. - - This mechanism works like the one used for indirect goto: we - arbitrarily assign an ID to each destination and store the ID for the - destination in a variable prior to entering the finally block. At the - end of the finally block we simply create a switch to the proper - destination. - - Code gen for @synchronized(expr) stmt; - Effectively generating code for: - objc_sync_enter(expr); - @try stmt @finally { objc_sync_exit(expr); } + '@throw;' is supported by pushing the currently-caught exception + onto ObjCEHStack while the @catch blocks are emitted. + + Branches through the @finally block are handled with an ordinary + normal cleanup. We do not register an EH cleanup; fragile-ABI ObjC + exceptions are not compatible with C++ exceptions, and this is + hardly the only place where this will go wrong. + + @synchronized(expr) { stmt; } is emitted as if it were: + id synch_value = expr; + objc_sync_enter(synch_value); + @try { stmt; } @finally { objc_sync_exit(synch_value); } */ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const Stmt &S) { bool isTry = isa<ObjCAtTryStmt>(S); - // Create various blocks we refer to for handling @finally. - llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally"); - llvm::BasicBlock *FinallyExit = CGF.createBasicBlock("finally.exit"); - llvm::BasicBlock *FinallyNoExit = CGF.createBasicBlock("finally.noexit"); - llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw"); - llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end"); + + // A destination for the fall-through edges of the catch handlers to + // jump to. + CodeGenFunction::JumpDest FinallyEnd = + CGF.getJumpDestInCurrentScope("finally.end"); + + // A destination for the rethrow edge of the catch handlers to jump + // to. + CodeGenFunction::JumpDest FinallyRethrow = + CGF.getJumpDestInCurrentScope("finally.rethrow"); // For @synchronized, call objc_sync_enter(sync.expr). The // evaluation of the expression must occur before we enter the @@ -2601,75 +2664,139 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, SyncArg = CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr()); SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy); - CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg); + CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg) + ->setDoesNotThrow(); } - // Push an EH context entry, used for handling rethrows and jumps - // through finally. - CGF.PushCleanupBlock(FinallyBlock); - - if (CGF.ObjCEHValueStack.empty()) - CGF.ObjCEHValueStack.push_back(0); - // If This is a nested @try, caught exception is that of enclosing @try. - else - CGF.ObjCEHValueStack.push_back(CGF.ObjCEHValueStack.back()); // Allocate memory for the exception data and rethrow pointer. llvm::Value *ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy, "exceptiondata.ptr"); llvm::Value *RethrowPtr = CGF.CreateTempAlloca(ObjCTypes.ObjectPtrTy, "_rethrow"); - llvm::Value *CallTryExitPtr = CGF.CreateTempAlloca( - llvm::Type::getInt1Ty(VMContext), + + // Create a flag indicating whether the cleanup needs to call + // objc_exception_try_exit. This is true except when + // - no catches match and we're branching through the cleanup + // just to rethrow the exception, or + // - a catch matched and we're falling out of the catch handler. + llvm::Value *CallTryExitVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "_call_try_exit"); CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext), - CallTryExitPtr); + CallTryExitVar); + + // Push a normal cleanup to leave the try scope. + { + CodeGenFunction::CleanupBlock FinallyScope(CGF, NormalCleanup); + + // Check whether we need to call objc_exception_try_exit. + // In optimized code, this branch will always be folded. + llvm::BasicBlock *FinallyCallExit = + CGF.createBasicBlock("finally.call_exit"); + llvm::BasicBlock *FinallyNoCallExit = + CGF.createBasicBlock("finally.no_call_exit"); + CGF.Builder.CreateCondBr(CGF.Builder.CreateLoad(CallTryExitVar), + FinallyCallExit, FinallyNoCallExit); + + CGF.EmitBlock(FinallyCallExit); + CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData) + ->setDoesNotThrow(); + + CGF.EmitBlock(FinallyNoCallExit); + + if (isTry) { + if (const ObjCAtFinallyStmt* FinallyStmt = + cast<ObjCAtTryStmt>(S).getFinallyStmt()) + CGF.EmitStmt(FinallyStmt->getFinallyBody()); + + // ~CleanupBlock requires there to be an exit block. + CGF.EnsureInsertPoint(); + } else { + // Emit objc_sync_exit(expr); as finally's sole statement for + // @synchronized. + CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg) + ->setDoesNotThrow(); + } + } - // Enter a new try block and call setjmp. - CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData); - llvm::Value *JmpBufPtr = CGF.Builder.CreateStructGEP(ExceptionData, 0, - "jmpbufarray"); - JmpBufPtr = CGF.Builder.CreateStructGEP(JmpBufPtr, 0, "tmp"); - llvm::Value *SetJmpResult = CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), - JmpBufPtr, "result"); + // Enter a try block: + // - Call objc_exception_try_enter to push ExceptionData on top of + // the EH stack. + CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData) + ->setDoesNotThrow(); + // - Call setjmp on the exception data buffer. + llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0); + llvm::Value *GEPIndexes[] = { Zero, Zero, Zero }; + llvm::Value *SetJmpBuffer = + CGF.Builder.CreateGEP(ExceptionData, GEPIndexes, GEPIndexes+3, "setjmp_buffer"); + llvm::CallInst *SetJmpResult = + CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result"); + SetJmpResult->setDoesNotThrow(); + + // If setjmp returned 0, enter the protected block; otherwise, + // branch to the handler. llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try"); llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler"); - CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(SetJmpResult, "threw"), - TryHandler, TryBlock); + llvm::Value *DidCatch = + CGF.Builder.CreateIsNull(SetJmpResult, "did_catch_exception"); + CGF.Builder.CreateCondBr(DidCatch, TryBlock, TryHandler); - // Emit the @try block. + // Emit the protected block. CGF.EmitBlock(TryBlock); CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody() - : cast<ObjCAtSynchronizedStmt>(S).getSynchBody()); + : cast<ObjCAtSynchronizedStmt>(S).getSynchBody()); CGF.EmitBranchThroughCleanup(FinallyEnd); - // Emit the "exception in @try" block. + // Emit the exception handler block. CGF.EmitBlock(TryHandler); // Retrieve the exception object. We may emit multiple blocks but // nothing can cross this so the value is already in SSA form. - llvm::Value *Caught = + llvm::CallInst *Caught = CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(), ExceptionData, "caught"); - CGF.ObjCEHValueStack.back() = Caught; - if (!isTry) { - CGF.Builder.CreateStore(Caught, RethrowPtr); + Caught->setDoesNotThrow(); + + // Remember the exception to rethrow. + CGF.Builder.CreateStore(Caught, RethrowPtr); + + // Note: at this point, objc_exception_throw already popped the + // catch handler, so anything that branches to the cleanup needs + // to set CallTryExitVar to false. + + // For a @synchronized (or a @try with no catches), just branch + // through the cleanup to the rethrow block. + if (!isTry || !cast<ObjCAtTryStmt>(S).getNumCatchStmts()) { + // Tell the cleanup not to re-pop the exit. CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), - CallTryExitPtr); + CallTryExitVar); + CGF.EmitBranchThroughCleanup(FinallyRethrow); - } else if (cast<ObjCAtTryStmt>(S).getNumCatchStmts()) { + + // Otherwise, we have to match against the caught exceptions. + } else { + // Push the exception to rethrow onto the EH value stack for the + // benefit of any @throws in the handlers. + CGF.ObjCEHValueStack.push_back(Caught); + const ObjCAtTryStmt* AtTryStmt = cast<ObjCAtTryStmt>(&S); // Enter a new exception try block (in case a @catch block throws - // an exception). - CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData); + // an exception). Now CallTryExitVar (currently true) is back in + // synch with reality. + CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData) + ->setDoesNotThrow(); - llvm::Value *SetJmpResult = CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), - JmpBufPtr, "result"); - llvm::Value *Threw = CGF.Builder.CreateIsNotNull(SetJmpResult, "threw"); + llvm::CallInst *SetJmpResult = + CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer, + "setjmp.result"); + SetJmpResult->setDoesNotThrow(); + + llvm::Value *Threw = + CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception"); llvm::BasicBlock *CatchBlock = CGF.createBasicBlock("catch"); - llvm::BasicBlock *CatchHandler = CGF.createBasicBlock("catch.handler"); + llvm::BasicBlock *CatchHandler = CGF.createBasicBlock("catch_for_catch"); CGF.Builder.CreateCondBr(Threw, CatchHandler, CatchBlock); CGF.EmitBlock(CatchBlock); @@ -2680,7 +2807,6 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, bool AllMatched = false; for (unsigned I = 0, N = AtTryStmt->getNumCatchStmts(); I != N; ++I) { const ObjCAtCatchStmt *CatchStmt = AtTryStmt->getCatchStmt(I); - llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch"); const VarDecl *CatchParam = CatchStmt->getCatchParamDecl(); const ObjCObjectPointerType *OPT = 0; @@ -2691,47 +2817,67 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, } else { OPT = CatchParam->getType()->getAs<ObjCObjectPointerType>(); - // catch(id e) always matches. + // catch(id e) always matches under this ABI, since only + // ObjC exceptions end up here in the first place. // FIXME: For the time being we also match id<X>; this should // be rejected by Sema instead. if (OPT && (OPT->isObjCIdType() || OPT->isObjCQualifiedIdType())) AllMatched = true; } + // If this is a catch-all, we don't need to test anything. if (AllMatched) { + CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF); + if (CatchParam) { CGF.EmitLocalBlockVarDecl(*CatchParam); assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?"); + + // These types work out because ConvertType(id) == i8*. CGF.Builder.CreateStore(Caught, CGF.GetAddrOfLocalVar(CatchParam)); } CGF.EmitStmt(CatchStmt->getCatchBody()); + + // The scope of the catch variable ends right here. + CatchVarCleanups.ForceCleanup(); + CGF.EmitBranchThroughCleanup(FinallyEnd); break; } assert(OPT && "Unexpected non-object pointer type in @catch"); const ObjCObjectType *ObjTy = OPT->getObjectType(); + + // FIXME: @catch (Class c) ? ObjCInterfaceDecl *IDecl = ObjTy->getInterface(); assert(IDecl && "Catch parameter must have Objective-C type!"); // Check if the @catch block matches the exception object. llvm::Value *Class = EmitClassRef(CGF.Builder, IDecl); - llvm::Value *Match = + llvm::CallInst *Match = CGF.Builder.CreateCall2(ObjCTypes.getExceptionMatchFn(), Class, Caught, "match"); + Match->setDoesNotThrow(); - llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("matched"); + llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("match"); + llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch.next"); CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(Match, "matched"), MatchedBlock, NextCatchBlock); // Emit the @catch block. CGF.EmitBlock(MatchedBlock); + + // Collect any cleanups for the catch variable. The scope lasts until + // the end of the catch body. + CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF); + CGF.EmitLocalBlockVarDecl(*CatchParam); assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?"); + // Initialize the catch variable. llvm::Value *Tmp = CGF.Builder.CreateBitCast(Caught, CGF.ConvertType(CatchParam->getType()), @@ -2739,11 +2885,17 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, CGF.Builder.CreateStore(Tmp, CGF.GetAddrOfLocalVar(CatchParam)); CGF.EmitStmt(CatchStmt->getCatchBody()); + + // We're done with the catch variable. + CatchVarCleanups.ForceCleanup(); + CGF.EmitBranchThroughCleanup(FinallyEnd); CGF.EmitBlock(NextCatchBlock); } + CGF.ObjCEHValueStack.pop_back(); + if (!AllMatched) { // None of the handlers caught the exception, so store it to be // rethrown at the end of the @finally block. @@ -2753,59 +2905,34 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, // Emit the exception handler for the @catch blocks. CGF.EmitBlock(CatchHandler); - CGF.Builder.CreateStore( - CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(), - ExceptionData), - RethrowPtr); - CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), - CallTryExitPtr); - CGF.EmitBranchThroughCleanup(FinallyRethrow); - } else { + + // Rethrow the new exception, not the old one. + Caught = CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(), + ExceptionData); + Caught->setDoesNotThrow(); CGF.Builder.CreateStore(Caught, RethrowPtr); + + // Don't pop the catch handler; the throw already did. CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), - CallTryExitPtr); + CallTryExitVar); CGF.EmitBranchThroughCleanup(FinallyRethrow); } - // Pop the exception-handling stack entry. It is important to do - // this now, because the code in the @finally block is not in this - // context. - CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock(); - - CGF.ObjCEHValueStack.pop_back(); - - // Emit the @finally block. - CGF.EmitBlock(FinallyBlock); - llvm::Value* CallTryExit = CGF.Builder.CreateLoad(CallTryExitPtr, "tmp"); + // Pop the cleanup. + CGF.PopCleanupBlock(); + CGF.EmitBlock(FinallyEnd.Block); - CGF.Builder.CreateCondBr(CallTryExit, FinallyExit, FinallyNoExit); - - CGF.EmitBlock(FinallyExit); - CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData); - - CGF.EmitBlock(FinallyNoExit); - if (isTry) { - if (const ObjCAtFinallyStmt* FinallyStmt = - cast<ObjCAtTryStmt>(S).getFinallyStmt()) - CGF.EmitStmt(FinallyStmt->getFinallyBody()); - } else { - // Emit objc_sync_exit(expr); as finally's sole statement for - // @synchronized. - CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg); + // Emit the rethrow block. + CGF.Builder.ClearInsertionPoint(); + CGF.EmitBlock(FinallyRethrow.Block, true); + if (CGF.HaveInsertPoint()) { + CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), + CGF.Builder.CreateLoad(RethrowPtr)) + ->setDoesNotThrow(); + CGF.Builder.CreateUnreachable(); } - // Emit the switch block - if (Info.SwitchBlock) - CGF.EmitBlock(Info.SwitchBlock); - if (Info.EndBlock) - CGF.EmitBlock(Info.EndBlock); - - CGF.EmitBlock(FinallyRethrow); - CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), - CGF.Builder.CreateLoad(RethrowPtr)); - CGF.Builder.CreateUnreachable(); - - CGF.EmitBlock(FinallyEnd); + CGF.Builder.SetInsertPoint(FinallyEnd.Block); } void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF, @@ -2822,7 +2949,8 @@ void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF, ExceptionAsObject = CGF.ObjCEHValueStack.back(); } - CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject); + CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject) + ->setDoesNotReturn(); CGF.Builder.CreateUnreachable(); // Clear the insertion point to indicate we are in unreachable code. @@ -2929,15 +3057,11 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, llvm::Value *DestPtr, llvm::Value *SrcPtr, - QualType Ty) { - // Get size info for this aggregate. - std::pair<uint64_t, unsigned> TypeInfo = CGM.getContext().getTypeInfo(Ty); - unsigned long size = TypeInfo.first/8; + llvm::Value *size) { SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy); DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy); - llvm::Value *N = llvm::ConstantInt::get(ObjCTypes.LongTy, size); CGF.Builder.CreateCall3(ObjCTypes.GcMemmoveCollectableFn(), - DestPtr, SrcPtr, N); + DestPtr, SrcPtr, size); return; } @@ -2997,12 +3121,14 @@ void CGObjCCommonMac::EmitImageInfo() { // We never allow @synthesize of a superclass property. flags |= eImageInfo_CorrectedSynthesize; + const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); + // Emitted as int[2]; llvm::Constant *values[2] = { - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), version), - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags) + llvm::ConstantInt::get(Int32Ty, version), + llvm::ConstantInt::get(Int32Ty, flags) }; - llvm::ArrayType *AT = llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext), 2); + llvm::ArrayType *AT = llvm::ArrayType::get(Int32Ty, 2); const char *Section; if (ObjCABI == 1) @@ -3102,7 +3228,8 @@ llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder, return Builder.CreateLoad(Entry, "tmp"); } -llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel) { +llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel, + bool lvalue) { llvm::GlobalVariable *&Entry = SelectorReferences[Sel]; if (!Entry) { @@ -3115,6 +3242,8 @@ llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel) { 4, true); } + if (lvalue) + return Entry; return Builder.CreateLoad(Entry, "tmp"); } @@ -3632,8 +3761,14 @@ void CGObjCMac::FinishModule() { OS << "\t.objc_class_name_" << (*I)->getName() << "=0\n" << "\t.globl .objc_class_name_" << (*I)->getName() << "\n"; for (llvm::SetVector<IdentifierInfo*>::iterator I = LazySymbols.begin(), - e = LazySymbols.end(); I != e; ++I) + e = LazySymbols.end(); I != e; ++I) { OS << "\t.lazy_reference .objc_class_name_" << (*I)->getName() << "\n"; + } + + for (size_t i = 0; i < DefinedCategoryNames.size(); ++i) { + OS << "\t.objc_category_name_" << DefinedCategoryNames[i] << "=0\n" + << "\t.globl .objc_category_name_" << DefinedCategoryNames[i] << "\n"; + } CGM.getModule().setModuleInlineAsm(OS.str()); } @@ -3949,8 +4084,9 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm) llvm::Type::getInt8PtrTy(VMContext), 4); ExceptionDataTy = - llvm::StructType::get(VMContext, llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext), - SetJmpBufferSize), + llvm::StructType::get(VMContext, + llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext), + SetJmpBufferSize), StackPtrTy, NULL); CGM.getModule().addTypeName("struct._objc_exception_data", ExceptionDataTy); @@ -5147,7 +5283,7 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend( FunctionType::ExtInfo()); llvm::Constant *Fn = 0; std::string Name("\01l_"); - if (CGM.ReturnTypeUsesSret(FnInfo)) { + if (CGM.ReturnTypeUsesSRet(FnInfo)) { #if 0 // unlike what is documented. gcc never generates this API!! if (Receiver->getType() == ObjCTypes.ObjectPtrTy) { @@ -5164,14 +5300,9 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend( Fn = ObjCTypes.getMessageSendStretFixupFn(); Name += "objc_msgSend_stret_fixup"; } - } else if (!IsSuper && ResultType->isFloatingType()) { - if (ResultType->isSpecificBuiltinType(BuiltinType::LongDouble)) { - Fn = ObjCTypes.getMessageSendFpretFixupFn(); - Name += "objc_msgSend_fpret_fixup"; - } else { - Fn = ObjCTypes.getMessageSendFixupFn(); - Name += "objc_msgSend_fixup"; - } + } else if (!IsSuper && CGM.ReturnTypeUsesFPRet(ResultType)) { + Fn = ObjCTypes.getMessageSendFpretFixupFn(); + Name += "objc_msgSend_fpret_fixup"; } else { #if 0 // unlike what is documented. gcc never generates this API!! @@ -5403,7 +5534,7 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, } llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder, - Selector Sel) { + Selector Sel, bool lval) { llvm::GlobalVariable *&Entry = SelectorReferences[Sel]; if (!Entry) { @@ -5418,6 +5549,8 @@ llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder, CGM.AddUsedGlobal(Entry); } + if (lval) + return Entry; return Builder.CreateLoad(Entry, "tmp"); } /// EmitObjCIvarAssign - Code gen for assigning to a __strong object. @@ -5467,15 +5600,11 @@ void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable( CodeGen::CodeGenFunction &CGF, llvm::Value *DestPtr, llvm::Value *SrcPtr, - QualType Ty) { - // Get size info for this aggregate. - std::pair<uint64_t, unsigned> TypeInfo = CGM.getContext().getTypeInfo(Ty); - unsigned long size = TypeInfo.first/8; + llvm::Value *Size) { SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy); DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy); - llvm::Value *N = llvm::ConstantInt::get(ObjCTypes.LongTy, size); CGF.Builder.CreateCall3(ObjCTypes.GcMemmoveCollectableFn(), - DestPtr, SrcPtr, N); + DestPtr, SrcPtr, Size); return; } @@ -5535,75 +5664,92 @@ void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, } void -CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, - const Stmt &S) { - bool isTry = isa<ObjCAtTryStmt>(S); - llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try"); - llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest(); - llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler"); - llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally"); - llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw"); - llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end"); +CGObjCNonFragileABIMac::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF, + const ObjCAtSynchronizedStmt &S) { + // Evaluate the lock operand. This should dominate the cleanup. + llvm::Value *SyncArg = CGF.EmitScalarExpr(S.getSynchExpr()); - // For @synchronized, call objc_sync_enter(sync.expr). The - // evaluation of the expression must occur before we enter the - // @synchronized. We can safely avoid a temp here because jumps into - // @synchronized are illegal & this will dominate uses. - llvm::Value *SyncArg = 0; - if (!isTry) { - SyncArg = - CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr()); - SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy); - CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg); + // Acquire the lock. + SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy); + CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg) + ->setDoesNotThrow(); + + // Register an all-paths cleanup to release the lock. + { + CodeGenFunction::CleanupBlock ReleaseScope(CGF, NormalAndEHCleanup); + + CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg) + ->setDoesNotThrow(); } - // Push an EH context entry, used for handling rethrows and jumps - // through finally. - CGF.PushCleanupBlock(FinallyBlock); + // Emit the body of the statement. + CGF.EmitStmt(S.getSynchBody()); - CGF.setInvokeDest(TryHandler); + // Pop the lock-release cleanup. + CGF.PopCleanupBlock(); +} - CGF.EmitBlock(TryBlock); - CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody() - : cast<ObjCAtSynchronizedStmt>(S).getSynchBody()); - CGF.EmitBranchThroughCleanup(FinallyEnd); +namespace { + struct CatchHandler { + const VarDecl *Variable; + const Stmt *Body; + llvm::BasicBlock *Block; + llvm::Value *TypeInfo; + }; - // Emit the exception handler. + struct CallObjCEndCatch : EHScopeStack::LazyCleanup { + CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) : + MightThrow(MightThrow), Fn(Fn) {} + bool MightThrow; + llvm::Value *Fn; - CGF.EmitBlock(TryHandler); + void Emit(CodeGenFunction &CGF, bool IsForEH) { + if (!MightThrow) { + CGF.Builder.CreateCall(Fn)->setDoesNotThrow(); + return; + } + + CGF.EmitCallOrInvoke(Fn, 0, 0); + } + }; +} - llvm::Value *llvm_eh_exception = - CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception); - llvm::Value *llvm_eh_selector = - CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector); - llvm::Value *llvm_eh_typeid_for = - CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for); - llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc"); - llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow"); - - llvm::SmallVector<llvm::Value*, 8> SelectorArgs; - SelectorArgs.push_back(Exc); - SelectorArgs.push_back(ObjCTypes.getEHPersonalityPtr()); - - // Construct the lists of (type, catch body) to handle. - llvm::SmallVector<std::pair<const VarDecl*, const Stmt*>, 8> Handlers; - bool HasCatchAll = false; - if (isTry) { - const ObjCAtTryStmt &AtTry = cast<ObjCAtTryStmt>(S); - for (unsigned I = 0, N = AtTry.getNumCatchStmts(); I != N; ++I) { - const ObjCAtCatchStmt *CatchStmt = AtTry.getCatchStmt(I); +void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF, + const ObjCAtTryStmt &S) { + // Jump destination for falling out of catch bodies. + CodeGenFunction::JumpDest Cont; + if (S.getNumCatchStmts()) + Cont = CGF.getJumpDestInCurrentScope("eh.cont"); + + CodeGenFunction::FinallyInfo FinallyInfo; + if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) + FinallyInfo = CGF.EnterFinallyBlock(Finally->getFinallyBody(), + ObjCTypes.getObjCBeginCatchFn(), + ObjCTypes.getObjCEndCatchFn(), + ObjCTypes.getExceptionRethrowFn()); + + llvm::SmallVector<CatchHandler, 8> Handlers; + + // Enter the catch, if there is one. + if (S.getNumCatchStmts()) { + for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) { + const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I); const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl(); - Handlers.push_back(std::make_pair(CatchDecl, CatchStmt->getCatchBody())); - // catch(...) always matches. + Handlers.push_back(CatchHandler()); + CatchHandler &Handler = Handlers.back(); + Handler.Variable = CatchDecl; + Handler.Body = CatchStmt->getCatchBody(); + Handler.Block = CGF.createBasicBlock("catch"); + + // @catch(...) always matches. if (!CatchDecl) { - // Use i8* null here to signal this is a catch all, not a cleanup. - llvm::Value *Null = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy); - SelectorArgs.push_back(Null); - HasCatchAll = true; + Handler.TypeInfo = 0; // catch-all + // Don't consider any other catches. break; } + // There's a particular fixed type info for 'id'. if (CatchDecl->getType()->isObjCIdType() || CatchDecl->getType()->isObjCQualifiedIdType()) { llvm::Value *IDEHType = @@ -5614,7 +5760,7 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, false, llvm::GlobalValue::ExternalLinkage, 0, "OBJC_EHTYPE_id"); - SelectorArgs.push_back(IDEHType); + Handler.TypeInfo = IDEHType; } else { // All other types should be Objective-C interface pointer types. const ObjCObjectPointerType *PT = @@ -5622,207 +5768,101 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, assert(PT && "Invalid @catch type."); const ObjCInterfaceType *IT = PT->getInterfaceType(); assert(IT && "Invalid @catch type."); - llvm::Value *EHType = GetInterfaceEHType(IT->getDecl(), false); - SelectorArgs.push_back(EHType); + Handler.TypeInfo = GetInterfaceEHType(IT->getDecl(), false); } } - } - // We use a cleanup unless there was already a catch all. - if (!HasCatchAll) { - // Even though this is a cleanup, treat it as a catch all to avoid the C++ - // personality behavior of terminating the process if only cleanups are - // found in the exception handling stack. - SelectorArgs.push_back(llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy)); - Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0)); + EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size()); + for (unsigned I = 0, E = Handlers.size(); I != E; ++I) + Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block); } + + // Emit the try body. + CGF.EmitStmt(S.getTryBody()); - llvm::Value *Selector = - CGF.Builder.CreateCall(llvm_eh_selector, - SelectorArgs.begin(), SelectorArgs.end(), - "selector"); - for (unsigned i = 0, e = Handlers.size(); i != e; ++i) { - const VarDecl *CatchParam = Handlers[i].first; - const Stmt *CatchBody = Handlers[i].second; - - llvm::BasicBlock *Next = 0; - - // The last handler always matches. - if (i + 1 != e) { - assert(CatchParam && "Only last handler can be a catch all."); + // Leave the try. + if (S.getNumCatchStmts()) + CGF.EHStack.popCatch(); - llvm::BasicBlock *Match = CGF.createBasicBlock("match"); - Next = CGF.createBasicBlock("catch.next"); - llvm::Value *Id = - CGF.Builder.CreateCall(llvm_eh_typeid_for, - CGF.Builder.CreateBitCast(SelectorArgs[i+2], - ObjCTypes.Int8PtrTy)); - CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(Selector, Id), - Match, Next); + // Remember where we were. + CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP(); - CGF.EmitBlock(Match); - } + // Emit the handlers. + for (unsigned I = 0, E = Handlers.size(); I != E; ++I) { + CatchHandler &Handler = Handlers[I]; - if (CatchBody) { - llvm::BasicBlock *MatchEnd = CGF.createBasicBlock("match.end"); - - // Cleanups must call objc_end_catch. - CGF.PushCleanupBlock(MatchEnd); - - llvm::Value *ExcObject = - CGF.Builder.CreateCall(ObjCTypes.getObjCBeginCatchFn(), Exc); - - // Bind the catch parameter if it exists. - if (CatchParam) { - ExcObject = - CGF.Builder.CreateBitCast(ExcObject, - CGF.ConvertType(CatchParam->getType())); - // CatchParam is a ParmVarDecl because of the grammar - // construction used to handle this, but for codegen purposes - // we treat this as a local decl. - CGF.EmitLocalBlockVarDecl(*CatchParam); - CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam)); - } + CGF.EmitBlock(Handler.Block); + llvm::Value *RawExn = CGF.Builder.CreateLoad(CGF.getExceptionSlot()); - // Exceptions inside the catch block must be rethrown. We set a special - // purpose invoke destination for this which just collects the thrown - // exception and overwrites the object in RethrowPtr, branches through the - // match.end to make sure we call objc_end_catch, before branching to the - // rethrow handler. - llvm::BasicBlock *MatchHandler = CGF.createBasicBlock("match.handler"); - CGF.setInvokeDest(MatchHandler); - CGF.ObjCEHValueStack.push_back(ExcObject); - CGF.EmitStmt(CatchBody); - CGF.ObjCEHValueStack.pop_back(); - CGF.setInvokeDest(0); + // Enter the catch. + llvm::CallInst *Exn = + CGF.Builder.CreateCall(ObjCTypes.getObjCBeginCatchFn(), RawExn, + "exn.adjusted"); + Exn->setDoesNotThrow(); - CGF.EmitBranchThroughCleanup(FinallyEnd); + // Add a cleanup to leave the catch. + bool EndCatchMightThrow = (Handler.Variable == 0); + CGF.EHStack.pushLazyCleanup<CallObjCEndCatch>(NormalAndEHCleanup, + EndCatchMightThrow, + ObjCTypes.getObjCEndCatchFn()); - // Don't emit the extra match handler if there we no unprotected calls in - // the catch block. - if (MatchHandler->use_empty()) { - delete MatchHandler; - } else { - CGF.EmitBlock(MatchHandler); - llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc"); - // We are required to emit this call to satisfy LLVM, even - // though we don't use the result. - CGF.Builder.CreateCall3(llvm_eh_selector, - Exc, ObjCTypes.getEHPersonalityPtr(), - llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), 0), - "unused_eh_selector"); - CGF.Builder.CreateStore(Exc, RethrowPtr); - CGF.EmitBranchThroughCleanup(FinallyRethrow); - } + // Bind the catch parameter if it exists. + if (const VarDecl *CatchParam = Handler.Variable) { + const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType()); + llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType); - CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock(); - - CGF.EmitBlock(MatchEnd); - - // Unfortunately, we also have to generate another EH frame here - // in case this throws. - llvm::BasicBlock *MatchEndHandler = - CGF.createBasicBlock("match.end.handler"); - llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont"); - CGF.Builder.CreateInvoke(ObjCTypes.getObjCEndCatchFn(), - Cont, MatchEndHandler); - - CGF.EmitBlock(Cont); - if (Info.SwitchBlock) - CGF.EmitBlock(Info.SwitchBlock); - if (Info.EndBlock) - CGF.EmitBlock(Info.EndBlock); - - CGF.EmitBlock(MatchEndHandler); - llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc"); - // We are required to emit this call to satisfy LLVM, even - // though we don't use the result. - CGF.Builder.CreateCall3(llvm_eh_selector, - Exc, ObjCTypes.getEHPersonalityPtr(), - llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), 0), - "unused_eh_selector"); - CGF.Builder.CreateStore(Exc, RethrowPtr); - CGF.EmitBranchThroughCleanup(FinallyRethrow); + CGF.EmitLocalBlockVarDecl(*CatchParam); + CGF.Builder.CreateStore(CastExn, CGF.GetAddrOfLocalVar(CatchParam)); + } - if (Next) - CGF.EmitBlock(Next); - } else { - assert(!Next && "catchup should be last handler."); + CGF.ObjCEHValueStack.push_back(Exn); + CGF.EmitStmt(Handler.Body); + CGF.ObjCEHValueStack.pop_back(); - CGF.Builder.CreateStore(Exc, RethrowPtr); - CGF.EmitBranchThroughCleanup(FinallyRethrow); - } - } + // Leave the earlier cleanup. + CGF.PopCleanupBlock(); - // Pop the cleanup entry, the @finally is outside this cleanup - // scope. - CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock(); - CGF.setInvokeDest(PrevLandingPad); + CGF.EmitBranchThroughCleanup(Cont); + } - CGF.EmitBlock(FinallyBlock); + // Go back to the try-statement fallthrough. + CGF.Builder.restoreIP(SavedIP); - if (isTry) { - if (const ObjCAtFinallyStmt* FinallyStmt = - cast<ObjCAtTryStmt>(S).getFinallyStmt()) - CGF.EmitStmt(FinallyStmt->getFinallyBody()); - } else { - // Emit 'objc_sync_exit(expr)' as finally's sole statement for - // @synchronized. - CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg); - } - - if (Info.SwitchBlock) - CGF.EmitBlock(Info.SwitchBlock); - if (Info.EndBlock) - CGF.EmitBlock(Info.EndBlock); - - // Branch around the rethrow code. - CGF.EmitBranch(FinallyEnd); - - // Generate the rethrow code, taking care to use an invoke if we are in a - // nested exception scope. - CGF.EmitBlock(FinallyRethrow); - if (PrevLandingPad) { - llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont"); - CGF.Builder.CreateInvoke(ObjCTypes.getUnwindResumeOrRethrowFn(), - Cont, PrevLandingPad, - CGF.Builder.CreateLoad(RethrowPtr)); - CGF.EmitBlock(Cont); - } else { - CGF.Builder.CreateCall(ObjCTypes.getUnwindResumeOrRethrowFn(), - CGF.Builder.CreateLoad(RethrowPtr)); - } - CGF.Builder.CreateUnreachable(); + // Pop out of the normal cleanup on the finally. + if (S.getFinallyStmt()) + CGF.ExitFinallyBlock(FinallyInfo); - CGF.EmitBlock(FinallyEnd); + if (Cont.Block) + CGF.EmitBlock(Cont.Block); } /// EmitThrowStmt - Generate code for a throw statement. void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S) { llvm::Value *Exception; + llvm::Constant *FunctionThrowOrRethrow; if (const Expr *ThrowExpr = S.getThrowExpr()) { Exception = CGF.EmitScalarExpr(ThrowExpr); + FunctionThrowOrRethrow = ObjCTypes.getExceptionThrowFn(); } else { assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) && "Unexpected rethrow outside @catch block."); Exception = CGF.ObjCEHValueStack.back(); + FunctionThrowOrRethrow = ObjCTypes.getExceptionRethrowFn(); } llvm::Value *ExceptionAsObject = CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp"); llvm::BasicBlock *InvokeDest = CGF.getInvokeDest(); if (InvokeDest) { - llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont"); - CGF.Builder.CreateInvoke(ObjCTypes.getExceptionThrowFn(), - Cont, InvokeDest, + CGF.Builder.CreateInvoke(FunctionThrowOrRethrow, + CGF.getUnreachableBlock(), InvokeDest, &ExceptionAsObject, &ExceptionAsObject + 1); - CGF.EmitBlock(Cont); - } else - CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject); - CGF.Builder.CreateUnreachable(); + } else { + CGF.Builder.CreateCall(FunctionThrowOrRethrow, ExceptionAsObject) + ->setDoesNotReturn(); + CGF.Builder.CreateUnreachable(); + } // Clear the insertion point to indicate we are in unreachable code. CGF.Builder.ClearInsertionPoint(); @@ -5863,7 +5903,8 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID, llvm::GlobalValue::ExternalLinkage, 0, VTableName); - llvm::Value *VTableIdx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 2); + llvm::Value *VTableIdx = + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 2); std::vector<llvm::Constant*> Values(3); Values[0] = llvm::ConstantExpr::getGetElementPtr(VTableGV, &VTableIdx, 1); diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h index 8de7f10..eb79f09 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h @@ -97,7 +97,7 @@ public: /// return value should have the LLVM type for pointer-to /// ASTContext::getObjCSelType(). virtual llvm::Value *GetSelector(CGBuilderTy &Builder, - Selector Sel) = 0; + Selector Sel, bool lval=false) = 0; /// Get a typed selector. virtual llvm::Value *GetSelector(CGBuilderTy &Builder, @@ -181,8 +181,10 @@ public: /// compiler when a mutation is detected during foreach iteration. virtual llvm::Constant *EnumerationMutationFunction() = 0; - virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, - const Stmt &S) = 0; + virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF, + const ObjCAtSynchronizedStmt &S) = 0; + virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF, + const ObjCAtTryStmt &S) = 0; virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S) = 0; virtual llvm::Value *EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, @@ -208,7 +210,7 @@ public: virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, llvm::Value *DestPtr, llvm::Value *SrcPtr, - QualType Ty) = 0; + llvm::Value *Size) = 0; }; /// Creates an instance of an Objective-C runtime class. diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp index aec1c45..1cca977 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp @@ -271,7 +271,7 @@ static bool ShouldUseExternalRTTIDescriptor(ASTContext &Context, // Get the key function. const CXXMethodDecl *KeyFunction = RD->getASTContext().getKeyFunction(RD); - if (KeyFunction && !KeyFunction->getBody()) { + if (KeyFunction && !KeyFunction->hasBody()) { // The class has a key function, but it is not defined in this translation // unit, so we should use the external descriptor for it. return true; @@ -728,15 +728,19 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) { void RTTIBuilder::BuildPointerTypeInfo(const PointerType *Ty) { QualType PointeeTy = Ty->getPointeeType(); + Qualifiers Quals; + QualType UnqualifiedPointeeTy = + CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals); + // Itanium C++ ABI 2.9.5p7: // __flags is a flag word describing the cv-qualification and other // attributes of the type pointed to - unsigned Flags = ComputeQualifierFlags(PointeeTy.getQualifiers()); + unsigned Flags = ComputeQualifierFlags(Quals); // Itanium C++ ABI 2.9.5p7: // When the abi::__pbase_type_info is for a direct or indirect pointer to an // incomplete class type, the incomplete target type flag is set. - if (ContainsIncompleteClassType(PointeeTy)) + if (ContainsIncompleteClassType(UnqualifiedPointeeTy)) Flags |= PTI_Incomplete; const llvm::Type *UnsignedIntLTy = @@ -747,7 +751,7 @@ void RTTIBuilder::BuildPointerTypeInfo(const PointerType *Ty) { // __pointee is a pointer to the std::type_info derivation for the // unqualified type being pointed to. llvm::Constant *PointeeTypeInfo = - RTTIBuilder(CGM).BuildTypeInfo(PointeeTy.getUnqualifiedType()); + RTTIBuilder(CGM).BuildTypeInfo(UnqualifiedPointeeTy); Fields.push_back(PointeeTypeInfo); } @@ -756,17 +760,21 @@ void RTTIBuilder::BuildPointerTypeInfo(const PointerType *Ty) { void RTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) { QualType PointeeTy = Ty->getPointeeType(); + Qualifiers Quals; + QualType UnqualifiedPointeeTy = + CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals); + // Itanium C++ ABI 2.9.5p7: // __flags is a flag word describing the cv-qualification and other // attributes of the type pointed to. - unsigned Flags = ComputeQualifierFlags(PointeeTy.getQualifiers()); + unsigned Flags = ComputeQualifierFlags(Quals); const RecordType *ClassType = cast<RecordType>(Ty->getClass()); // Itanium C++ ABI 2.9.5p7: // When the abi::__pbase_type_info is for a direct or indirect pointer to an // incomplete class type, the incomplete target type flag is set. - if (ContainsIncompleteClassType(PointeeTy)) + if (ContainsIncompleteClassType(UnqualifiedPointeeTy)) Flags |= PTI_Incomplete; if (IsIncompleteClassType(ClassType)) @@ -780,7 +788,7 @@ void RTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) { // __pointee is a pointer to the std::type_info derivation for the // unqualified type being pointed to. llvm::Constant *PointeeTypeInfo = - RTTIBuilder(CGM).BuildTypeInfo(PointeeTy.getUnqualifiedType()); + RTTIBuilder(CGM).BuildTypeInfo(UnqualifiedPointeeTy); Fields.push_back(PointeeTypeInfo); // Itanium C++ ABI 2.9.5p9: diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp index efde380..b72725e 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp @@ -79,11 +79,8 @@ void CodeGenFunction::EmitStmt(const Stmt *S) { // Expression emitters don't handle unreachable blocks yet, so look for one // explicitly here. This handles the common case of a call to a noreturn // function. - // We can't erase blocks with an associated cleanup size here since the - // memory might be reused, leaving the old cleanup info pointing at a new - // block. if (llvm::BasicBlock *CurBB = Builder.GetInsertBlock()) { - if (CurBB->empty() && CurBB->use_empty() && !BlockScopes.count(CurBB)) { + if (CurBB->empty() && CurBB->use_empty()) { CurBB->eraseFromParent(); Builder.ClearInsertionPoint(); } @@ -159,7 +156,7 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast, } // Keep track of the current cleanup stack depth. - CleanupScope Scope(*this); + RunCleanupsScope Scope(*this); for (CompoundStmt::const_body_iterator I = S.body_begin(), E = S.body_end()-GetLast; I != E; ++I) @@ -198,7 +195,7 @@ void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) { // If there is a cleanup stack, then we it isn't worth trying to // simplify this block (we would need to remove it from the scope map // and cleanup entry). - if (!CleanupEntries.empty()) + if (!EHStack.empty()) return; // Can only simplify direct branches. @@ -221,18 +218,6 @@ void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) { return; } - // If necessary, associate the block with the cleanup stack size. - if (!CleanupEntries.empty()) { - // Check if the basic block has already been inserted. - BlockScopeMap::iterator I = BlockScopes.find(BB); - if (I != BlockScopes.end()) { - assert(I->second == CleanupEntries.size() - 1); - } else { - BlockScopes[BB] = CleanupEntries.size() - 1; - CleanupEntries.back().Blocks.push_back(BB); - } - } - // Place the block after the current block, if possible, or else at // the end of the function. if (CurBB && CurBB->getParent()) @@ -259,8 +244,35 @@ void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) { Builder.ClearInsertionPoint(); } +CodeGenFunction::JumpDest +CodeGenFunction::getJumpDestForLabel(const LabelStmt *S) { + JumpDest &Dest = LabelMap[S]; + if (Dest.Block) return Dest; + + // Create, but don't insert, the new block. + Dest.Block = createBasicBlock(S->getName()); + Dest.ScopeDepth = EHScopeStack::stable_iterator::invalid(); + return Dest; +} + void CodeGenFunction::EmitLabel(const LabelStmt &S) { - EmitBlock(getBasicBlockForLabel(&S)); + JumpDest &Dest = LabelMap[&S]; + + // If we didn't needed a forward reference to this label, just go + // ahead and create a destination at the current scope. + if (!Dest.Block) { + Dest = getJumpDestInCurrentScope(S.getName()); + + // Otherwise, we need to give this label a target depth and remove + // it from the branch-fixups list. + } else { + assert(!Dest.ScopeDepth.isValid() && "already emitted label!"); + Dest.ScopeDepth = EHStack.stable_begin(); + + EHStack.resolveBranchFixups(Dest.Block); + } + + EmitBlock(Dest.Block); } @@ -276,7 +288,7 @@ void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) { if (HaveInsertPoint()) EmitStopPoint(&S); - EmitBranchThroughCleanup(getBasicBlockForLabel(S.getLabel())); + EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel())); } @@ -301,7 +313,7 @@ void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) { void CodeGenFunction::EmitIfStmt(const IfStmt &S) { // C99 6.8.4.1: The first substatement is executed if the expression compares // unequal to 0. The condition must be a scalar type. - CleanupScope ConditionScope(*this); + RunCleanupsScope ConditionScope(*this); if (S.getConditionVariable()) EmitLocalBlockVarDecl(*S.getConditionVariable()); @@ -318,7 +330,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) { // This avoids emitting dead code and simplifies the CFG substantially. if (!ContainsLabel(Skipped)) { if (Executed) { - CleanupScope ExecutedScope(*this); + RunCleanupsScope ExecutedScope(*this); EmitStmt(Executed); } return; @@ -337,7 +349,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) { // Emit the 'then' code. EmitBlock(ThenBlock); { - CleanupScope ThenScope(*this); + RunCleanupsScope ThenScope(*this); EmitStmt(S.getThen()); } EmitBranch(ContBlock); @@ -346,7 +358,7 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) { if (const Stmt *Else = S.getElse()) { EmitBlock(ElseBlock); { - CleanupScope ElseScope(*this); + RunCleanupsScope ElseScope(*this); EmitStmt(Else); } EmitBranch(ContBlock); @@ -357,20 +369,17 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) { } void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) { - // Emit the header for the loop, insert it, which will create an uncond br to - // it. - llvm::BasicBlock *LoopHeader = createBasicBlock("while.cond"); - EmitBlock(LoopHeader); - - // Create an exit block for when the condition fails, create a block for the - // body of the loop. - llvm::BasicBlock *ExitBlock = createBasicBlock("while.end"); - llvm::BasicBlock *LoopBody = createBasicBlock("while.body"); - llvm::BasicBlock *CleanupBlock = 0; - llvm::BasicBlock *EffectiveExitBlock = ExitBlock; + // Emit the header for the loop, which will also become + // the continue target. + JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond"); + EmitBlock(LoopHeader.Block); + + // Create an exit block for when the condition fails, which will + // also become the break target. + JumpDest LoopExit = getJumpDestInCurrentScope("while.end"); // Store the blocks to use for break and continue. - BreakContinueStack.push_back(BreakContinue(ExitBlock, LoopHeader)); + BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader)); // C++ [stmt.while]p2: // When the condition of a while statement is a declaration, the @@ -379,18 +388,10 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) { // [...] // The object created in a condition is destroyed and created // with each iteration of the loop. - CleanupScope ConditionScope(*this); + RunCleanupsScope ConditionScope(*this); - if (S.getConditionVariable()) { + if (S.getConditionVariable()) EmitLocalBlockVarDecl(*S.getConditionVariable()); - - // If this condition variable requires cleanups, create a basic - // block to handle those cleanups. - if (ConditionScope.requiresCleanups()) { - CleanupBlock = createBasicBlock("while.cleanup"); - EffectiveExitBlock = CleanupBlock; - } - } // Evaluate the conditional in the while header. C99 6.8.5.1: The // evaluation of the controlling expression takes place before each @@ -405,61 +406,63 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) { EmitBoolCondBranch = false; // As long as the condition is true, go to the loop body. - if (EmitBoolCondBranch) - Builder.CreateCondBr(BoolCondVal, LoopBody, EffectiveExitBlock); + llvm::BasicBlock *LoopBody = createBasicBlock("while.body"); + if (EmitBoolCondBranch) { + llvm::BasicBlock *ExitBlock = LoopExit.Block; + if (ConditionScope.requiresCleanups()) + ExitBlock = createBasicBlock("while.exit"); + + Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); + + if (ExitBlock != LoopExit.Block) { + EmitBlock(ExitBlock); + EmitBranchThroughCleanup(LoopExit); + } + } - // Emit the loop body. + // Emit the loop body. We have to emit this in a cleanup scope + // because it might be a singleton DeclStmt. { - CleanupScope BodyScope(*this); + RunCleanupsScope BodyScope(*this); EmitBlock(LoopBody); EmitStmt(S.getBody()); } BreakContinueStack.pop_back(); - if (CleanupBlock) { - // If we have a cleanup block, jump there to perform cleanups - // before looping. - EmitBranch(CleanupBlock); + // Immediately force cleanup. + ConditionScope.ForceCleanup(); - // Emit the cleanup block, performing cleanups for the condition - // and then jumping to either the loop header or the exit block. - EmitBlock(CleanupBlock); - ConditionScope.ForceCleanup(); - Builder.CreateCondBr(BoolCondVal, LoopHeader, ExitBlock); - } else { - // Cycle to the condition. - EmitBranch(LoopHeader); - } + // Branch to the loop header again. + EmitBranch(LoopHeader.Block); // Emit the exit block. - EmitBlock(ExitBlock, true); - + EmitBlock(LoopExit.Block, true); // The LoopHeader typically is just a branch if we skipped emitting // a branch, try to erase it. - if (!EmitBoolCondBranch && !CleanupBlock) - SimplifyForwardingBlocks(LoopHeader); + if (!EmitBoolCondBranch) + SimplifyForwardingBlocks(LoopHeader.Block); } void CodeGenFunction::EmitDoStmt(const DoStmt &S) { - // Emit the body for the loop, insert it, which will create an uncond br to - // it. - llvm::BasicBlock *LoopBody = createBasicBlock("do.body"); - llvm::BasicBlock *AfterDo = createBasicBlock("do.end"); - EmitBlock(LoopBody); - - llvm::BasicBlock *DoCond = createBasicBlock("do.cond"); + JumpDest LoopExit = getJumpDestInCurrentScope("do.end"); + JumpDest LoopCond = getJumpDestInCurrentScope("do.cond"); // Store the blocks to use for break and continue. - BreakContinueStack.push_back(BreakContinue(AfterDo, DoCond)); + BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond)); - // Emit the body of the loop into the block. - EmitStmt(S.getBody()); + // Emit the body of the loop. + llvm::BasicBlock *LoopBody = createBasicBlock("do.body"); + EmitBlock(LoopBody); + { + RunCleanupsScope BodyScope(*this); + EmitStmt(S.getBody()); + } BreakContinueStack.pop_back(); - EmitBlock(DoCond); + EmitBlock(LoopCond.Block); // C99 6.8.5.2: "The evaluation of the controlling expression takes place // after each execution of the loop body." @@ -478,47 +481,49 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S) { // As long as the condition is true, iterate the loop. if (EmitBoolCondBranch) - Builder.CreateCondBr(BoolCondVal, LoopBody, AfterDo); + Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.Block); // Emit the exit block. - EmitBlock(AfterDo); + EmitBlock(LoopExit.Block); // The DoCond block typically is just a branch if we skipped // emitting a branch, try to erase it. if (!EmitBoolCondBranch) - SimplifyForwardingBlocks(DoCond); + SimplifyForwardingBlocks(LoopCond.Block); } void CodeGenFunction::EmitForStmt(const ForStmt &S) { - CleanupScope ForScope(*this); + JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); + + RunCleanupsScope ForScope(*this); // Evaluate the first part before the loop. if (S.getInit()) EmitStmt(S.getInit()); // Start the loop with a block that tests the condition. - llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); - llvm::BasicBlock *AfterFor = createBasicBlock("for.end"); - llvm::BasicBlock *IncBlock = 0; - llvm::BasicBlock *CondCleanup = 0; - llvm::BasicBlock *EffectiveExitBlock = AfterFor; + // If there's an increment, the continue scope will be overwritten + // later. + JumpDest Continue = getJumpDestInCurrentScope("for.cond"); + llvm::BasicBlock *CondBlock = Continue.Block; EmitBlock(CondBlock); // Create a cleanup scope for the condition variable cleanups. - CleanupScope ConditionScope(*this); + RunCleanupsScope ConditionScope(*this); llvm::Value *BoolCondVal = 0; if (S.getCond()) { // If the for statement has a condition scope, emit the local variable // declaration. + llvm::BasicBlock *ExitBlock = LoopExit.Block; if (S.getConditionVariable()) { EmitLocalBlockVarDecl(*S.getConditionVariable()); - - if (ConditionScope.requiresCleanups()) { - CondCleanup = createBasicBlock("for.cond.cleanup"); - EffectiveExitBlock = CondCleanup; - } } + + // If there are any cleanups between here and the loop-exit scope, + // create a block to stage a loop exit along. + if (ForScope.requiresCleanups()) + ExitBlock = createBasicBlock("for.cond.cleanup"); // As long as the condition is true, iterate the loop. llvm::BasicBlock *ForBody = createBasicBlock("for.body"); @@ -526,7 +531,12 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) { // C99 6.8.5p2/p4: The first substatement is executed if the expression // compares unequal to 0. The condition must be a scalar type. BoolCondVal = EvaluateExprAsBool(S.getCond()); - Builder.CreateCondBr(BoolCondVal, ForBody, EffectiveExitBlock); + Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock); + + if (ExitBlock != LoopExit.Block) { + EmitBlock(ExitBlock); + EmitBranchThroughCleanup(LoopExit); + } EmitBlock(ForBody); } else { @@ -535,17 +545,15 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) { } // If the for loop doesn't have an increment we can just use the - // condition as the continue block. - llvm::BasicBlock *ContinueBlock; + // condition as the continue block. Otherwise we'll need to create + // a block for it (in the current scope, i.e. in the scope of the + // condition), and that we will become our continue block. if (S.getInc()) - ContinueBlock = IncBlock = createBasicBlock("for.inc"); - else - ContinueBlock = CondBlock; + Continue = getJumpDestInCurrentScope("for.inc"); // Store the blocks to use for break and continue. - BreakContinueStack.push_back(BreakContinue(AfterFor, ContinueBlock)); + BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); - // If the condition is true, execute the body of the for stmt. CGDebugInfo *DI = getDebugInfo(); if (DI) { DI->setLocation(S.getSourceRange().getBegin()); @@ -555,37 +563,30 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) { { // Create a separate cleanup scope for the body, in case it is not // a compound statement. - CleanupScope BodyScope(*this); + RunCleanupsScope BodyScope(*this); EmitStmt(S.getBody()); } // If there is an increment, emit it next. if (S.getInc()) { - EmitBlock(IncBlock); + EmitBlock(Continue.Block); EmitStmt(S.getInc()); } BreakContinueStack.pop_back(); - - // Finally, branch back up to the condition for the next iteration. - if (CondCleanup) { - // Branch to the cleanup block. - EmitBranch(CondCleanup); - - // Emit the cleanup block, which branches back to the loop body or - // outside of the for statement once it is done. - EmitBlock(CondCleanup); - ConditionScope.ForceCleanup(); - Builder.CreateCondBr(BoolCondVal, CondBlock, AfterFor); - } else - EmitBranch(CondBlock); + + ConditionScope.ForceCleanup(); + EmitBranch(CondBlock); + + ForScope.ForceCleanup(); + if (DI) { DI->setLocation(S.getSourceRange().getEnd()); DI->EmitRegionEnd(CurFn, Builder); } // Emit the fall-through block. - EmitBlock(AfterFor, true); + EmitBlock(LoopExit.Block, true); } void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) { @@ -631,7 +632,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) { } else if (FnRetTy->isReferenceType()) { // If this function returns a reference, take the address of the expression // rather than the value. - RValue Result = EmitReferenceBindingToExpr(RV, false); + RValue Result = EmitReferenceBindingToExpr(RV, /*InitializedDecl=*/0); Builder.CreateStore(Result.getScalarVal(), ReturnValue); } else if (!hasAggregateLLVMType(RV->getType())) { Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); @@ -666,7 +667,7 @@ void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) { if (HaveInsertPoint()) EmitStopPoint(&S); - llvm::BasicBlock *Block = BreakContinueStack.back().BreakBlock; + JumpDest Block = BreakContinueStack.back().BreakBlock; EmitBranchThroughCleanup(Block); } @@ -679,7 +680,7 @@ void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) { if (HaveInsertPoint()) EmitStopPoint(&S); - llvm::BasicBlock *Block = BreakContinueStack.back().ContinueBlock; + JumpDest Block = BreakContinueStack.back().ContinueBlock; EmitBranchThroughCleanup(Block); } @@ -788,7 +789,9 @@ void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) { } void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { - CleanupScope ConditionScope(*this); + JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog"); + + RunCleanupsScope ConditionScope(*this); if (S.getConditionVariable()) EmitLocalBlockVarDecl(*S.getConditionVariable()); @@ -803,7 +806,6 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { // statement. We also need to create a default block now so that // explicit case ranges tests can have a place to jump to on // failure. - llvm::BasicBlock *NextBlock = createBasicBlock("sw.epilog"); llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default"); SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock); CaseRangeBlock = DefaultBlock; @@ -813,12 +815,11 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { // All break statements jump to NextBlock. If BreakContinueStack is non empty // then reuse last ContinueBlock. - llvm::BasicBlock *ContinueBlock = 0; + JumpDest OuterContinue; if (!BreakContinueStack.empty()) - ContinueBlock = BreakContinueStack.back().ContinueBlock; + OuterContinue = BreakContinueStack.back().ContinueBlock; - // Ensure any vlas created between there and here, are undone - BreakContinueStack.push_back(BreakContinue(NextBlock, ContinueBlock)); + BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue)); // Emit switch body. EmitStmt(S.getBody()); @@ -829,15 +830,22 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { // been chained on top. SwitchInsn->setSuccessor(0, CaseRangeBlock); - // If a default was never emitted then reroute any jumps to it and - // discard. + // If a default was never emitted: if (!DefaultBlock->getParent()) { - DefaultBlock->replaceAllUsesWith(NextBlock); - delete DefaultBlock; + // If we have cleanups, emit the default block so that there's a + // place to jump through the cleanups from. + if (ConditionScope.requiresCleanups()) { + EmitBlock(DefaultBlock); + + // Otherwise, just forward the default block to the switch end. + } else { + DefaultBlock->replaceAllUsesWith(SwitchExit.Block); + delete DefaultBlock; + } } // Emit continuation. - EmitBlock(NextBlock, true); + EmitBlock(SwitchExit.Block, true); SwitchInsn = SavedSwitchInsn; CaseRangeBlock = SavedCRBlock; @@ -1066,8 +1074,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { getContext().getTypeSize(InputTy)) { // Use ptrtoint as appropriate so that we can do our extension. if (isa<llvm::PointerType>(Arg->getType())) - Arg = Builder.CreatePtrToInt(Arg, - llvm::IntegerType::get(VMContext, LLVMPointerWidth)); + Arg = Builder.CreatePtrToInt(Arg, IntPtrTy); const llvm::Type *OutputTy = ConvertType(OutputType); if (isa<llvm::IntegerType>(OutputTy)) Arg = Builder.CreateZExt(Arg, OutputTy); @@ -1132,7 +1139,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { // call. unsigned LocID = S.getAsmString()->getLocStart().getRawEncoding(); llvm::Value *LocIDC = - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), LocID); + llvm::ConstantInt::get(Int32Ty, LocID); Result->setMetadata("srcloc", llvm::MDNode::get(VMContext, &LocIDC, 1)); // Extract all of the register value results from the asm. diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp index a8f0467..fd7c616 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGTemporaries.cpp @@ -15,14 +15,38 @@ using namespace clang; using namespace CodeGen; -void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary, - llvm::Value *Ptr) { - assert((LiveTemporaries.empty() || - LiveTemporaries.back().ThisPtr != Ptr || - ConditionalBranchLevel) && - "Pushed the same temporary twice; AST is likely wrong"); - llvm::BasicBlock *DtorBlock = createBasicBlock("temp.dtor"); +static void EmitTemporaryCleanup(CodeGenFunction &CGF, + const CXXTemporary *Temporary, + llvm::Value *Addr, + llvm::Value *CondPtr) { + llvm::BasicBlock *CondEnd = 0; + + // If this is a conditional temporary, we need to check the condition + // boolean and only call the destructor if it's true. + if (CondPtr) { + llvm::BasicBlock *CondBlock = CGF.createBasicBlock("temp.cond-dtor.call"); + CondEnd = CGF.createBasicBlock("temp.cond-dtor.cont"); + + llvm::Value *Cond = CGF.Builder.CreateLoad(CondPtr); + CGF.Builder.CreateCondBr(Cond, CondBlock, CondEnd); + CGF.EmitBlock(CondBlock); + } + + CGF.EmitCXXDestructorCall(Temporary->getDestructor(), + Dtor_Complete, /*ForVirtualBase=*/false, + Addr); + + if (CondPtr) { + // Reset the condition to false. + CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()), + CondPtr); + CGF.EmitBlock(CondEnd); + } +} +/// Emits all the code to cause the given temporary to be cleaned up. +void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary, + llvm::Value *Ptr) { llvm::AllocaInst *CondPtr = 0; // Check if temporaries need to be conditional. If so, we'll create a @@ -38,82 +62,13 @@ void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary, Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext), CondPtr); } - LiveTemporaries.push_back(CXXLiveTemporaryInfo(Temporary, Ptr, DtorBlock, - CondPtr)); - - PushCleanupBlock(DtorBlock); + CleanupBlock Cleanup(*this, NormalCleanup); + EmitTemporaryCleanup(*this, Temporary, Ptr, CondPtr); if (Exceptions) { - const CXXLiveTemporaryInfo& Info = LiveTemporaries.back(); - llvm::BasicBlock *CondEnd = 0; - - EHCleanupBlock Cleanup(*this); - - // If this is a conditional temporary, we need to check the condition - // boolean and only call the destructor if it's true. - if (Info.CondPtr) { - llvm::BasicBlock *CondBlock = createBasicBlock("cond.dtor.call"); - CondEnd = createBasicBlock("cond.dtor.end"); - - llvm::Value *Cond = Builder.CreateLoad(Info.CondPtr); - Builder.CreateCondBr(Cond, CondBlock, CondEnd); - EmitBlock(CondBlock); - } - - EmitCXXDestructorCall(Info.Temporary->getDestructor(), - Dtor_Complete, /*ForVirtualBase=*/false, - Info.ThisPtr); - - if (CondEnd) { - // Reset the condition. to false. - Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), Info.CondPtr); - EmitBlock(CondEnd); - } - } -} - -void CodeGenFunction::PopCXXTemporary() { - const CXXLiveTemporaryInfo& Info = LiveTemporaries.back(); - - CleanupBlockInfo CleanupInfo = PopCleanupBlock(); - assert(CleanupInfo.CleanupBlock == Info.DtorBlock && - "Cleanup block mismatch!"); - assert(!CleanupInfo.SwitchBlock && - "Should not have a switch block for temporary cleanup!"); - assert(!CleanupInfo.EndBlock && - "Should not have an end block for temporary cleanup!"); - - llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); - if (CurBB && !CurBB->getTerminator() && - Info.DtorBlock->getNumUses() == 0) { - CurBB->getInstList().splice(CurBB->end(), Info.DtorBlock->getInstList()); - delete Info.DtorBlock; - } else - EmitBlock(Info.DtorBlock); - - llvm::BasicBlock *CondEnd = 0; - - // If this is a conditional temporary, we need to check the condition - // boolean and only call the destructor if it's true. - if (Info.CondPtr) { - llvm::BasicBlock *CondBlock = createBasicBlock("cond.dtor.call"); - CondEnd = createBasicBlock("cond.dtor.end"); - - llvm::Value *Cond = Builder.CreateLoad(Info.CondPtr); - Builder.CreateCondBr(Cond, CondBlock, CondEnd); - EmitBlock(CondBlock); - } - - EmitCXXDestructorCall(Info.Temporary->getDestructor(), - Dtor_Complete, /*ForVirtualBase=*/false, Info.ThisPtr); - - if (CondEnd) { - // Reset the condition. to false. - Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), Info.CondPtr); - EmitBlock(CondEnd); + Cleanup.beginEHCleanup(); + EmitTemporaryCleanup(*this, Temporary, Ptr, CondPtr); } - - LiveTemporaries.pop_back(); } RValue @@ -121,40 +76,23 @@ CodeGenFunction::EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E, llvm::Value *AggLoc, bool IsAggLocVolatile, bool IsInitializer) { - // Keep track of the current cleanup stack depth. - size_t CleanupStackDepth = CleanupEntries.size(); - (void) CleanupStackDepth; - RValue RV; - { - CXXTemporariesCleanupScope Scope(*this); + RunCleanupsScope Scope(*this); RV = EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile, /*IgnoreResult=*/false, IsInitializer); } - assert(CleanupEntries.size() == CleanupStackDepth && - "Cleanup size mismatch!"); - return RV; } LValue CodeGenFunction::EmitCXXExprWithTemporariesLValue( const CXXExprWithTemporaries *E) { - // Keep track of the current cleanup stack depth. - size_t CleanupStackDepth = CleanupEntries.size(); - (void) CleanupStackDepth; - - unsigned OldNumLiveTemporaries = LiveTemporaries.size(); - - LValue LV = EmitLValue(E->getSubExpr()); - - // Pop temporaries. - while (LiveTemporaries.size() > OldNumLiveTemporaries) - PopCXXTemporary(); - - assert(CleanupEntries.size() == CleanupStackDepth && - "Cleanup size mismatch!"); + LValue LV; + { + RunCleanupsScope Scope(*this); + LV = EmitLValue(E->getSubExpr()); + } return LV; } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp index 0f023e6..6abac26 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp @@ -87,112 +87,61 @@ private: /// MostDerivedClassLayout - the AST record layout of the most derived class. const ASTRecordLayout &MostDerivedClassLayout; - /// BaseSubobjectMethodPairTy - Uniquely identifies a member function + /// MethodBaseOffsetPairTy - Uniquely identifies a member function /// in a base subobject. - typedef std::pair<BaseSubobject, const CXXMethodDecl *> - BaseSubobjectMethodPairTy; - - typedef llvm::DenseMap<BaseSubobjectMethodPairTy, + typedef std::pair<const CXXMethodDecl *, uint64_t> MethodBaseOffsetPairTy; + + typedef llvm::DenseMap<MethodBaseOffsetPairTy, OverriderInfo> OverridersMapTy; /// OverridersMap - The final overriders for all virtual member functions of /// all the base subobjects of the most derived class. OverridersMapTy OverridersMap; - /// VisitedVirtualBases - A set of all the visited virtual bases, used to - /// avoid visiting virtual bases more than once. - llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases; + /// SubobjectsToOffsetsMapTy - A mapping from a base subobject (represented + /// as a record decl and a subobject number) and its offsets in the most + /// derived class as well as the layout class. + typedef llvm::DenseMap<std::pair<const CXXRecordDecl *, unsigned>, + uint64_t> SubobjectOffsetMapTy; - typedef llvm::DenseMap<BaseSubobjectMethodPairTy, BaseOffset> - AdjustmentOffsetsMapTy; - - /// ReturnAdjustments - Holds return adjustments for all the overriders that - /// need to perform return value adjustments. - AdjustmentOffsetsMapTy ReturnAdjustments; - - // FIXME: We might be able to get away with making this a SmallSet. - typedef llvm::SmallSetVector<uint64_t, 2> OffsetSetVectorTy; - - /// SubobjectOffsetsMapTy - This map is used for keeping track of all the - /// base subobject offsets that a single class declaration might refer to. - /// - /// For example, in: - /// - /// struct A { virtual void f(); }; - /// struct B1 : A { }; - /// struct B2 : A { }; - /// struct C : B1, B2 { virtual void f(); }; - /// - /// when we determine that C::f() overrides A::f(), we need to update the - /// overriders map for both A-in-B1 and A-in-B2 and the subobject offsets map - /// will have the subobject offsets for both A copies. - typedef llvm::DenseMap<const CXXRecordDecl *, OffsetSetVectorTy> - SubobjectOffsetsMapTy; - - /// ComputeFinalOverriders - Compute the final overriders for a given base - /// subobject (and all its direct and indirect bases). - void ComputeFinalOverriders(BaseSubobject Base, - bool BaseSubobjectIsVisitedVBase, - uint64_t OffsetInLayoutClass, - SubobjectOffsetsMapTy &Offsets); + typedef llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCountMapTy; - /// AddOverriders - Add the final overriders for this base subobject to the - /// map of final overriders. - void AddOverriders(BaseSubobject Base, uint64_t OffsetInLayoutClass, - SubobjectOffsetsMapTy &Offsets); + /// ComputeBaseOffsets - Compute the offsets for all base subobjects of the + /// given base. + void ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual, + uint64_t OffsetInLayoutClass, + SubobjectOffsetMapTy &SubobjectOffsets, + SubobjectOffsetMapTy &SubobjectLayoutClassOffsets, + SubobjectCountMapTy &SubobjectCounts); - /// PropagateOverrider - Propagate the NewMD overrider to all the functions - /// that OldMD overrides. For example, if we have: - /// - /// struct A { virtual void f(); }; - /// struct B : A { virtual void f(); }; - /// struct C : B { virtual void f(); }; - /// - /// and we want to override B::f with C::f, we also need to override A::f with - /// C::f. - void PropagateOverrider(const CXXMethodDecl *OldMD, - BaseSubobject NewBase, - uint64_t OverriderOffsetInLayoutClass, - const CXXMethodDecl *NewMD, - SubobjectOffsetsMapTy &Offsets); + typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy; + + /// dump - dump the final overriders for a base subobject, and all its direct + /// and indirect base subobjects. + void dump(llvm::raw_ostream &Out, BaseSubobject Base, + VisitedVirtualBasesSetTy& VisitedVirtualBases); - static void MergeSubobjectOffsets(const SubobjectOffsetsMapTy &NewOffsets, - SubobjectOffsetsMapTy &Offsets); - public: FinalOverriders(const CXXRecordDecl *MostDerivedClass, uint64_t MostDerivedClassOffset, const CXXRecordDecl *LayoutClass); /// getOverrider - Get the final overrider for the given method declaration in - /// the given base subobject. - OverriderInfo getOverrider(BaseSubobject Base, - const CXXMethodDecl *MD) const { - assert(OverridersMap.count(std::make_pair(Base, MD)) && + /// the subobject with the given base offset. + OverriderInfo getOverrider(const CXXMethodDecl *MD, + uint64_t BaseOffset) const { + assert(OverridersMap.count(std::make_pair(MD, BaseOffset)) && "Did not find overrider!"); - return OverridersMap.lookup(std::make_pair(Base, MD)); + return OverridersMap.lookup(std::make_pair(MD, BaseOffset)); } - /// getReturnAdjustmentOffset - Get the return adjustment offset for the - /// method decl in the given base subobject. Returns an empty base offset if - /// no adjustment is needed. - BaseOffset getReturnAdjustmentOffset(BaseSubobject Base, - const CXXMethodDecl *MD) const { - return ReturnAdjustments.lookup(std::make_pair(Base, MD)); - } - /// dump - dump the final overriders. void dump() { - assert(VisitedVirtualBases.empty() && - "Visited virtual bases aren't empty!"); - dump(llvm::errs(), BaseSubobject(MostDerivedClass, 0)); - VisitedVirtualBases.clear(); + VisitedVirtualBasesSetTy VisitedVirtualBases; + dump(llvm::errs(), BaseSubobject(MostDerivedClass, 0), VisitedVirtualBases); } - /// dump - dump the final overriders for a base subobject, and all its direct - /// and indirect base subobjects. - void dump(llvm::raw_ostream &Out, BaseSubobject Base); }; #define DUMP_OVERRIDERS 0 @@ -204,54 +153,57 @@ FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass, MostDerivedClassOffset(MostDerivedClassOffset), LayoutClass(LayoutClass), Context(MostDerivedClass->getASTContext()), MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)) { - - // Compute the final overriders. - SubobjectOffsetsMapTy Offsets; - ComputeFinalOverriders(BaseSubobject(MostDerivedClass, 0), - /*BaseSubobjectIsVisitedVBase=*/false, - MostDerivedClassOffset, Offsets); - VisitedVirtualBases.clear(); -#if DUMP_OVERRIDERS - // And dump them (for now). - dump(); - - // Also dump the base offsets (for now). - for (SubobjectOffsetsMapTy::const_iterator I = Offsets.begin(), - E = Offsets.end(); I != E; ++I) { - const OffsetSetVectorTy& OffsetSetVector = I->second; + // Compute base offsets. + SubobjectOffsetMapTy SubobjectOffsets; + SubobjectOffsetMapTy SubobjectLayoutClassOffsets; + SubobjectCountMapTy SubobjectCounts; + ComputeBaseOffsets(BaseSubobject(MostDerivedClass, 0), /*IsVirtual=*/false, + MostDerivedClassOffset, SubobjectOffsets, + SubobjectLayoutClassOffsets, SubobjectCounts); - llvm::errs() << "Base offsets for "; - llvm::errs() << I->first->getQualifiedNameAsString() << '\n'; + // Get the the final overriders. + CXXFinalOverriderMap FinalOverriders; + MostDerivedClass->getFinalOverriders(FinalOverriders); - for (unsigned I = 0, E = OffsetSetVector.size(); I != E; ++I) - llvm::errs() << " " << I << " - " << OffsetSetVector[I] / 8 << '\n'; + for (CXXFinalOverriderMap::const_iterator I = FinalOverriders.begin(), + E = FinalOverriders.end(); I != E; ++I) { + const CXXMethodDecl *MD = I->first; + const OverridingMethods& Methods = I->second; + + for (OverridingMethods::const_iterator I = Methods.begin(), + E = Methods.end(); I != E; ++I) { + unsigned SubobjectNumber = I->first; + assert(SubobjectOffsets.count(std::make_pair(MD->getParent(), + SubobjectNumber)) && + "Did not find subobject offset!"); + + uint64_t BaseOffset = SubobjectOffsets[std::make_pair(MD->getParent(), + SubobjectNumber)]; + + assert(I->second.size() == 1 && "Final overrider is not unique!"); + const UniqueVirtualMethod &Method = I->second.front(); + + const CXXRecordDecl *OverriderRD = Method.Method->getParent(); + assert(SubobjectLayoutClassOffsets.count( + std::make_pair(OverriderRD, Method.Subobject)) + && "Did not find subobject offset!"); + uint64_t OverriderOffset = + SubobjectLayoutClassOffsets[std::make_pair(OverriderRD, + Method.Subobject)]; + + OverriderInfo& Overrider = OverridersMap[std::make_pair(MD, BaseOffset)]; + assert(!Overrider.Method && "Overrider should not exist yet!"); + + Overrider.Offset = OverriderOffset; + Overrider.Method = Method.Method; + } } -#endif -} - -void FinalOverriders::AddOverriders(BaseSubobject Base, - uint64_t OffsetInLayoutClass, - SubobjectOffsetsMapTy &Offsets) { - const CXXRecordDecl *RD = Base.getBase(); - - for (CXXRecordDecl::method_iterator I = RD->method_begin(), - E = RD->method_end(); I != E; ++I) { - const CXXMethodDecl *MD = *I; - - if (!MD->isVirtual()) - continue; - // First, propagate the overrider. - PropagateOverrider(MD, Base, OffsetInLayoutClass, MD, Offsets); - - // Add the overrider as the final overrider of itself. - OverriderInfo& Overrider = OverridersMap[std::make_pair(Base, MD)]; - assert(!Overrider.Method && "Overrider should not exist yet!"); - - Overrider.Offset = OffsetInLayoutClass; - Overrider.Method = MD; - } +#if DUMP_OVERRIDERS + // And dump them (for now). + dump(); +#endif } static BaseOffset ComputeBaseOffset(ASTContext &Context, @@ -365,153 +317,64 @@ ComputeReturnAdjustmentBaseOffset(ASTContext &Context, return ComputeBaseOffset(Context, BaseRD, DerivedRD); } -void FinalOverriders::PropagateOverrider(const CXXMethodDecl *OldMD, - BaseSubobject NewBase, - uint64_t OverriderOffsetInLayoutClass, - const CXXMethodDecl *NewMD, - SubobjectOffsetsMapTy &Offsets) { - for (CXXMethodDecl::method_iterator I = OldMD->begin_overridden_methods(), - E = OldMD->end_overridden_methods(); I != E; ++I) { - const CXXMethodDecl *OverriddenMD = *I; - const CXXRecordDecl *OverriddenRD = OverriddenMD->getParent(); - - // We want to override OverriddenMD in all subobjects, for example: - // - /// struct A { virtual void f(); }; - /// struct B1 : A { }; - /// struct B2 : A { }; - /// struct C : B1, B2 { virtual void f(); }; - /// - /// When overriding A::f with C::f we need to do so in both A subobjects. - const OffsetSetVectorTy &OffsetVector = Offsets[OverriddenRD]; - - // Go through all the subobjects. - for (unsigned I = 0, E = OffsetVector.size(); I != E; ++I) { - uint64_t Offset = OffsetVector[I]; - - BaseSubobject OverriddenSubobject = BaseSubobject(OverriddenRD, Offset); - BaseSubobjectMethodPairTy SubobjectAndMethod = - std::make_pair(OverriddenSubobject, OverriddenMD); - - OverriderInfo &Overrider = OverridersMap[SubobjectAndMethod]; - - assert(Overrider.Method && "Did not find existing overrider!"); - - // Check if we need return adjustments or base adjustments. - // (We don't want to do this for pure virtual member functions). - if (!NewMD->isPure()) { - // Get the return adjustment base offset. - BaseOffset ReturnBaseOffset = - ComputeReturnAdjustmentBaseOffset(Context, NewMD, OverriddenMD); - - if (!ReturnBaseOffset.isEmpty()) { - // Store the return adjustment base offset. - ReturnAdjustments[SubobjectAndMethod] = ReturnBaseOffset; - } - } - - // Set the new overrider. - Overrider.Offset = OverriderOffsetInLayoutClass; - Overrider.Method = NewMD; - - // And propagate it further. - PropagateOverrider(OverriddenMD, NewBase, OverriderOffsetInLayoutClass, - NewMD, Offsets); - } - } -} - void -FinalOverriders::MergeSubobjectOffsets(const SubobjectOffsetsMapTy &NewOffsets, - SubobjectOffsetsMapTy &Offsets) { - // Iterate over the new offsets. - for (SubobjectOffsetsMapTy::const_iterator I = NewOffsets.begin(), - E = NewOffsets.end(); I != E; ++I) { - const CXXRecordDecl *NewRD = I->first; - const OffsetSetVectorTy& NewOffsetVector = I->second; - - OffsetSetVectorTy &OffsetVector = Offsets[NewRD]; - - // Merge the new offsets set vector into the old. - OffsetVector.insert(NewOffsetVector.begin(), NewOffsetVector.end()); - } -} - -void FinalOverriders::ComputeFinalOverriders(BaseSubobject Base, - bool BaseSubobjectIsVisitedVBase, - uint64_t OffsetInLayoutClass, - SubobjectOffsetsMapTy &Offsets) { +FinalOverriders::ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual, + uint64_t OffsetInLayoutClass, + SubobjectOffsetMapTy &SubobjectOffsets, + SubobjectOffsetMapTy &SubobjectLayoutClassOffsets, + SubobjectCountMapTy &SubobjectCounts) { const CXXRecordDecl *RD = Base.getBase(); - const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); - SubobjectOffsetsMapTy NewOffsets; + unsigned SubobjectNumber = 0; + if (!IsVirtual) + SubobjectNumber = ++SubobjectCounts[RD]; + + // Set up the subobject to offset mapping. + assert(!SubobjectOffsets.count(std::make_pair(RD, SubobjectNumber)) + && "Subobject offset already exists!"); + assert(!SubobjectLayoutClassOffsets.count(std::make_pair(RD, SubobjectNumber)) + && "Subobject offset already exists!"); + + SubobjectOffsets[std::make_pair(RD, SubobjectNumber)] = + Base.getBaseOffset(); + SubobjectLayoutClassOffsets[std::make_pair(RD, SubobjectNumber)] = + OffsetInLayoutClass; + // Traverse our bases. for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), E = RD->bases_end(); I != E; ++I) { const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); - - // Ignore bases that don't have any virtual member functions. - if (!BaseDecl->isPolymorphic()) - continue; - - bool IsVisitedVirtualBase = BaseSubobjectIsVisitedVBase; + uint64_t BaseOffset; uint64_t BaseOffsetInLayoutClass; if (I->isVirtual()) { - if (!VisitedVirtualBases.insert(BaseDecl)) - IsVisitedVirtualBase = true; - BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl); - + // Check if we've visited this virtual base before. + if (SubobjectOffsets.count(std::make_pair(BaseDecl, 0))) + continue; + const ASTRecordLayout &LayoutClassLayout = Context.getASTRecordLayout(LayoutClass); + + BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl); BaseOffsetInLayoutClass = LayoutClassLayout.getVBaseClassOffset(BaseDecl); } else { - BaseOffset = Layout.getBaseClassOffset(BaseDecl) + Base.getBaseOffset(); - BaseOffsetInLayoutClass = Layout.getBaseClassOffset(BaseDecl) + - OffsetInLayoutClass; - } + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + uint64_t Offset = Layout.getBaseClassOffset(BaseDecl); - // Compute the final overriders for this base. - // We always want to compute the final overriders, even if the base is a - // visited virtual base. Consider: - // - // struct A { - // virtual void f(); - // virtual void g(); - // }; - // - // struct B : virtual A { - // void f(); - // }; - // - // struct C : virtual A { - // void g (); - // }; - // - // struct D : B, C { }; - // - // Here, we still want to compute the overriders for A as a base of C, - // because otherwise we'll miss that C::g overrides A::f. - ComputeFinalOverriders(BaseSubobject(BaseDecl, BaseOffset), - IsVisitedVirtualBase, BaseOffsetInLayoutClass, - NewOffsets); - } - - /// Now add the overriders for this particular subobject. - /// (We don't want to do this more than once for a virtual base). - if (!BaseSubobjectIsVisitedVBase) - AddOverriders(Base, OffsetInLayoutClass, NewOffsets); - - // And merge the newly discovered subobject offsets. - MergeSubobjectOffsets(NewOffsets, Offsets); - - /// Finally, add the offset for our own subobject. - Offsets[RD].insert(Base.getBaseOffset()); + BaseOffset = Base.getBaseOffset() + Offset; + BaseOffsetInLayoutClass = OffsetInLayoutClass + Offset; + } + + ComputeBaseOffsets(BaseSubobject(BaseDecl, BaseOffset), I->isVirtual(), + BaseOffsetInLayoutClass, SubobjectOffsets, + SubobjectLayoutClassOffsets, SubobjectCounts); + } } -void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base) { +void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base, + VisitedVirtualBasesSetTy &VisitedVirtualBases) { const CXXRecordDecl *RD = Base.getBase(); const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); @@ -537,7 +400,7 @@ void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base) { Base.getBaseOffset(); } - dump(Out, BaseSubobject(BaseDecl, BaseOffset)); + dump(Out, BaseSubobject(BaseDecl, BaseOffset), VisitedVirtualBases); } Out << "Final overriders for (" << RD->getQualifiedNameAsString() << ", "; @@ -551,17 +414,17 @@ void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base) { if (!MD->isVirtual()) continue; - OverriderInfo Overrider = getOverrider(Base, MD); + OverriderInfo Overrider = getOverrider(MD, Base.getBaseOffset()); Out << " " << MD->getQualifiedNameAsString() << " - ("; Out << Overrider.Method->getQualifiedNameAsString(); Out << ", " << ", " << Overrider.Offset / 8 << ')'; - AdjustmentOffsetsMapTy::const_iterator AI = - ReturnAdjustments.find(std::make_pair(Base, MD)); - if (AI != ReturnAdjustments.end()) { - const BaseOffset &Offset = AI->second; + BaseOffset Offset; + if (!Overrider.Method->isPure()) + Offset = ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD); + if (!Offset.isEmpty()) { Out << " [ret-adj: "; if (Offset.VirtualBase) Out << Offset.VirtualBase->getQualifiedNameAsString() << " vbase, "; @@ -1013,7 +876,7 @@ void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base, if (Overriders) { // Get the final overrider. FinalOverriders::OverriderInfo Overrider = - Overriders->getOverrider(Base, MD); + Overriders->getOverrider(MD, Base.getBaseOffset()); /// The vcall offset is the offset from the virtual base to the object /// where the function was overridden. @@ -1390,8 +1253,7 @@ void VTableBuilder::ComputeThisAdjustments() { // Get the final overrider for this method. FinalOverriders::OverriderInfo Overrider = - Overriders.getOverrider(BaseSubobject(MD->getParent(), - MethodInfo.BaseOffset), MD); + Overriders.getOverrider(MD, MethodInfo.BaseOffset); // Check if we need an adjustment at all. if (MethodInfo.BaseOffsetInLayoutClass == Overrider.Offset) { @@ -1763,7 +1625,7 @@ VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass, // Get the final overrider. FinalOverriders::OverriderInfo Overrider = - Overriders.getOverrider(Base, MD); + Overriders.getOverrider(MD, Base.getBaseOffset()); // Check if this virtual member function overrides a method in a primary // base. If this is the case, and the return type doesn't require adjustment @@ -1828,8 +1690,12 @@ VTableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass, } // Check if this overrider needs a return adjustment. - BaseOffset ReturnAdjustmentOffset = - Overriders.getReturnAdjustmentOffset(Base, MD); + // We don't want to do this for pure virtual member functions. + BaseOffset ReturnAdjustmentOffset; + if (!OverriderMD->isPure()) { + ReturnAdjustmentOffset = + ComputeReturnAdjustmentBaseOffset(Context, OverriderMD, MD); + } ReturnAdjustment ReturnAdjustment = ComputeReturnAdjustment(ReturnAdjustmentOffset); @@ -2775,7 +2641,7 @@ void CodeGenVTables::EmitThunks(GlobalDecl GD) const CXXRecordDecl *RD = MD->getParent(); // Compute VTable related info for this class. - ComputeVTableRelatedInformation(RD); + ComputeVTableRelatedInformation(RD, false); ThunksMapTy::const_iterator I = Thunks.find(MD); if (I == Thunks.end()) { @@ -2788,24 +2654,30 @@ void CodeGenVTables::EmitThunks(GlobalDecl GD) EmitThunk(GD, ThunkInfoVector[I]); } -void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD) { - uint64_t *&LayoutData = VTableLayoutMap[RD]; +void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD, + bool RequireVTable) { + VTableLayoutData &Entry = VTableLayoutMap[RD]; + + // We may need to generate a definition for this vtable. + if (RequireVTable && !Entry.getInt()) { + if (!isKeyFunctionInAnotherTU(CGM.getContext(), RD) && + RD->getTemplateSpecializationKind() + != TSK_ExplicitInstantiationDeclaration) + CGM.DeferredVTables.push_back(RD); + + Entry.setInt(true); + } // Check if we've computed this information before. - if (LayoutData) + if (Entry.getPointer()) return; - // We may need to generate a definition for this vtable. - if (!isKeyFunctionInAnotherTU(CGM.getContext(), RD) && - RD->getTemplateSpecializationKind() - != TSK_ExplicitInstantiationDeclaration) - CGM.DeferredVTables.push_back(RD); - VTableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD); // Add the VTable layout. uint64_t NumVTableComponents = Builder.getNumVTableComponents(); - LayoutData = new uint64_t[NumVTableComponents + 1]; + uint64_t *LayoutData = new uint64_t[NumVTableComponents + 1]; + Entry.setPointer(LayoutData); // Store the number of components. LayoutData[0] = NumVTableComponents; @@ -3020,7 +2892,7 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) { CGM.getMangleContext().mangleCXXVTable(RD, OutName); llvm::StringRef Name = OutName.str(); - ComputeVTableRelatedInformation(RD); + ComputeVTableRelatedInformation(RD, true); const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext()); llvm::ArrayType *ArrayType = @@ -3054,6 +2926,9 @@ CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable, // Set the correct linkage. VTable->setLinkage(Linkage); + + // Set the right visibility. + CGM.setGlobalVisibility(VTable, RD); } llvm::GlobalVariable * diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h index e55377f..abcafd6 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h @@ -207,8 +207,12 @@ class CodeGenVTables { /// Thunks - Contains all thunks that a given method decl will need. ThunksMapTy Thunks; - - typedef llvm::DenseMap<const CXXRecordDecl *, uint64_t *> VTableLayoutMapTy; + + // The layout entry and a bool indicating whether we've actually emitted + // the vtable. + typedef llvm::PointerIntPair<uint64_t *, 1, bool> VTableLayoutData; + typedef llvm::DenseMap<const CXXRecordDecl *, VTableLayoutData> + VTableLayoutMapTy; /// VTableLayoutMap - Stores the vtable layout for all record decls. /// The layout is stored as an array of 64-bit integers, where the first @@ -237,13 +241,13 @@ class CodeGenVTables { uint64_t getNumVTableComponents(const CXXRecordDecl *RD) const { assert(VTableLayoutMap.count(RD) && "No vtable layout for this class!"); - return VTableLayoutMap.lookup(RD)[0]; + return VTableLayoutMap.lookup(RD).getPointer()[0]; } const uint64_t *getVTableComponentsData(const CXXRecordDecl *RD) const { assert(VTableLayoutMap.count(RD) && "No vtable layout for this class!"); - uint64_t *Components = VTableLayoutMap.lookup(RD); + uint64_t *Components = VTableLayoutMap.lookup(RD).getPointer(); return &Components[1]; } @@ -275,7 +279,8 @@ class CodeGenVTables { /// ComputeVTableRelatedInformation - Compute and store all vtable related /// information (vtable layout, vbase offset offsets, thunks etc) for the /// given record decl. - void ComputeVTableRelatedInformation(const CXXRecordDecl *RD); + void ComputeVTableRelatedInformation(const CXXRecordDecl *RD, + bool VTableRequired); /// CreateVTableInitializer - Create a vtable initializer for the given record /// decl. @@ -296,7 +301,7 @@ public: const CXXRecordDecl *RD) { assert (RD->isDynamicClass() && "Non dynamic classes have no key."); const CXXMethodDecl *KeyFunction = Context.getKeyFunction(RD); - return KeyFunction && !KeyFunction->getBody(); + return KeyFunction && !KeyFunction->hasBody(); } /// needsVTTParameter - Return whether the given global decl needs a VTT diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt b/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt index a226400..b5a2329 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt +++ b/contrib/llvm/tools/clang/lib/CodeGen/CMakeLists.txt @@ -1,6 +1,7 @@ set(LLVM_NO_RTTI 1) add_clang_library(clangCodeGen + BackendUtil.cpp CGBlocks.cpp CGBuiltin.cpp CGCall.cpp @@ -25,13 +26,16 @@ add_clang_library(clangCodeGen CGTemporaries.cpp CGVTables.cpp CGVTT.cpp + CodeGenAction.cpp CodeGenFunction.cpp CodeGenModule.cpp CodeGenTypes.cpp ItaniumCXXABI.cpp Mangle.cpp + MicrosoftCXXABI.cpp ModuleBuilder.cpp TargetInfo.cpp ) -add_dependencies(clangCodeGen ClangStmtNodes) +add_dependencies(clangCodeGen ClangAttrClasses ClangAttrList ClangDeclNodes + ClangStmtNodes) diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp new file mode 100644 index 0000000..51c55a1 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp @@ -0,0 +1,348 @@ +//===--- CodeGenAction.cpp - LLVM Code Generation Frontend Action ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "clang/CodeGen/CodeGenAction.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/AST/ASTConsumer.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclGroup.h" +#include "clang/CodeGen/BackendUtil.h" +#include "clang/CodeGen/ModuleBuilder.h" +#include "clang/Frontend/CompilerInstance.h" +#include "clang/Frontend/FrontendDiagnostic.h" +#include "llvm/LLVMContext.h" +#include "llvm/Module.h" +#include "llvm/Pass.h" +#include "llvm/ADT/OwningPtr.h" +#include "llvm/Support/IRReader.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/SourceMgr.h" +#include "llvm/Support/Timer.h" +using namespace clang; +using namespace llvm; + +namespace { + class BackendConsumer : public ASTConsumer { + Diagnostic &Diags; + BackendAction Action; + const CodeGenOptions &CodeGenOpts; + const TargetOptions &TargetOpts; + llvm::raw_ostream *AsmOutStream; + ASTContext *Context; + + Timer LLVMIRGeneration; + + llvm::OwningPtr<CodeGenerator> Gen; + + llvm::OwningPtr<llvm::Module> TheModule; + + public: + BackendConsumer(BackendAction action, Diagnostic &_Diags, + const CodeGenOptions &compopts, + const TargetOptions &targetopts, bool TimePasses, + const std::string &infile, llvm::raw_ostream *OS, + LLVMContext &C) : + Diags(_Diags), + Action(action), + CodeGenOpts(compopts), + TargetOpts(targetopts), + AsmOutStream(OS), + LLVMIRGeneration("LLVM IR Generation Time"), + Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)) { + llvm::TimePassesIsEnabled = TimePasses; + } + + llvm::Module *takeModule() { return TheModule.take(); } + + virtual void Initialize(ASTContext &Ctx) { + Context = &Ctx; + + if (llvm::TimePassesIsEnabled) + LLVMIRGeneration.startTimer(); + + Gen->Initialize(Ctx); + + TheModule.reset(Gen->GetModule()); + + if (llvm::TimePassesIsEnabled) + LLVMIRGeneration.stopTimer(); + } + + virtual void HandleTopLevelDecl(DeclGroupRef D) { + PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(), + Context->getSourceManager(), + "LLVM IR generation of declaration"); + + if (llvm::TimePassesIsEnabled) + LLVMIRGeneration.startTimer(); + + Gen->HandleTopLevelDecl(D); + + if (llvm::TimePassesIsEnabled) + LLVMIRGeneration.stopTimer(); + } + + virtual void HandleTranslationUnit(ASTContext &C) { + { + PrettyStackTraceString CrashInfo("Per-file LLVM IR generation"); + if (llvm::TimePassesIsEnabled) + LLVMIRGeneration.startTimer(); + + Gen->HandleTranslationUnit(C); + + if (llvm::TimePassesIsEnabled) + LLVMIRGeneration.stopTimer(); + } + + // Silently ignore if we weren't initialized for some reason. + if (!TheModule) + return; + + // Make sure IR generation is happy with the module. This is released by + // the module provider. + Module *M = Gen->ReleaseModule(); + if (!M) { + // The module has been released by IR gen on failures, do not double + // free. + TheModule.take(); + return; + } + + assert(TheModule.get() == M && + "Unexpected module change during IR generation"); + + // Install an inline asm handler so that diagnostics get printed through + // our diagnostics hooks. + LLVMContext &Ctx = TheModule->getContext(); + void *OldHandler = Ctx.getInlineAsmDiagnosticHandler(); + void *OldContext = Ctx.getInlineAsmDiagnosticContext(); + Ctx.setInlineAsmDiagnosticHandler((void*)(intptr_t)InlineAsmDiagHandler, + this); + + EmitBackendOutput(Diags, CodeGenOpts, TargetOpts, + TheModule.get(), Action, AsmOutStream); + + Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext); + } + + virtual void HandleTagDeclDefinition(TagDecl *D) { + PrettyStackTraceDecl CrashInfo(D, SourceLocation(), + Context->getSourceManager(), + "LLVM IR generation of declaration"); + Gen->HandleTagDeclDefinition(D); + } + + virtual void CompleteTentativeDefinition(VarDecl *D) { + Gen->CompleteTentativeDefinition(D); + } + + virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) { + Gen->HandleVTable(RD, DefinitionRequired); + } + + static void InlineAsmDiagHandler(const llvm::SMDiagnostic &SM,void *Context, + unsigned LocCookie) { + SourceLocation Loc = SourceLocation::getFromRawEncoding(LocCookie); + ((BackendConsumer*)Context)->InlineAsmDiagHandler2(SM, Loc); + } + + void InlineAsmDiagHandler2(const llvm::SMDiagnostic &, + SourceLocation LocCookie); + }; +} + +/// ConvertBackendLocation - Convert a location in a temporary llvm::SourceMgr +/// buffer to be a valid FullSourceLoc. +static FullSourceLoc ConvertBackendLocation(const llvm::SMDiagnostic &D, + SourceManager &CSM) { + // Get both the clang and llvm source managers. The location is relative to + // a memory buffer that the LLVM Source Manager is handling, we need to add + // a copy to the Clang source manager. + const llvm::SourceMgr &LSM = *D.getSourceMgr(); + + // We need to copy the underlying LLVM memory buffer because llvm::SourceMgr + // already owns its one and clang::SourceManager wants to own its one. + const MemoryBuffer *LBuf = + LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc())); + + // Create the copy and transfer ownership to clang::SourceManager. + llvm::MemoryBuffer *CBuf = + llvm::MemoryBuffer::getMemBufferCopy(LBuf->getBuffer(), + LBuf->getBufferIdentifier()); + FileID FID = CSM.createFileIDForMemBuffer(CBuf); + + // Translate the offset into the file. + unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart(); + SourceLocation NewLoc = + CSM.getLocForStartOfFile(FID).getFileLocWithOffset(Offset); + return FullSourceLoc(NewLoc, CSM); +} + + +/// InlineAsmDiagHandler2 - This function is invoked when the backend hits an +/// error parsing inline asm. The SMDiagnostic indicates the error relative to +/// the temporary memory buffer that the inline asm parser has set up. +void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D, + SourceLocation LocCookie) { + // There are a couple of different kinds of errors we could get here. First, + // we re-format the SMDiagnostic in terms of a clang diagnostic. + + // Strip "error: " off the start of the message string. + llvm::StringRef Message = D.getMessage(); + if (Message.startswith("error: ")) + Message = Message.substr(7); + + // If the SMDiagnostic has an inline asm source location, translate it. + FullSourceLoc Loc; + if (D.getLoc() != SMLoc()) + Loc = ConvertBackendLocation(D, Context->getSourceManager()); + + + // If this problem has clang-level source location information, report the + // issue as being an error in the source with a note showing the instantiated + // code. + if (LocCookie.isValid()) { + Diags.Report(FullSourceLoc(LocCookie, Context->getSourceManager()), + diag::err_fe_inline_asm).AddString(Message); + + if (D.getLoc().isValid()) + Diags.Report(Loc, diag::note_fe_inline_asm_here); + return; + } + + // Otherwise, report the backend error as occuring in the generated .s file. + // If Loc is invalid, we still need to report the error, it just gets no + // location info. + Diags.Report(Loc, diag::err_fe_inline_asm).AddString(Message); +} + +// + +CodeGenAction::CodeGenAction(unsigned _Act) : Act(_Act) {} + +CodeGenAction::~CodeGenAction() {} + +bool CodeGenAction::hasIRSupport() const { return true; } + +void CodeGenAction::EndSourceFileAction() { + // If the consumer creation failed, do nothing. + if (!getCompilerInstance().hasASTConsumer()) + return; + + // Steal the module from the consumer. + BackendConsumer *Consumer = static_cast<BackendConsumer*>( + &getCompilerInstance().getASTConsumer()); + + TheModule.reset(Consumer->takeModule()); +} + +llvm::Module *CodeGenAction::takeModule() { + return TheModule.take(); +} + +static raw_ostream *GetOutputStream(CompilerInstance &CI, + llvm::StringRef InFile, + BackendAction Action) { + switch (Action) { + case Backend_EmitAssembly: + return CI.createDefaultOutputFile(false, InFile, "s"); + case Backend_EmitLL: + return CI.createDefaultOutputFile(false, InFile, "ll"); + case Backend_EmitBC: + return CI.createDefaultOutputFile(true, InFile, "bc"); + case Backend_EmitNothing: + return 0; + case Backend_EmitMCNull: + case Backend_EmitObj: + return CI.createDefaultOutputFile(true, InFile, "o"); + } + + assert(0 && "Invalid action!"); + return 0; +} + +ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI, + llvm::StringRef InFile) { + BackendAction BA = static_cast<BackendAction>(Act); + llvm::OwningPtr<llvm::raw_ostream> OS(GetOutputStream(CI, InFile, BA)); + if (BA != Backend_EmitNothing && !OS) + return 0; + + return new BackendConsumer(BA, CI.getDiagnostics(), + CI.getCodeGenOpts(), CI.getTargetOpts(), + CI.getFrontendOpts().ShowTimers, InFile, OS.take(), + CI.getLLVMContext()); +} + +void CodeGenAction::ExecuteAction() { + // If this is an IR file, we have to treat it specially. + if (getCurrentFileKind() == IK_LLVM_IR) { + BackendAction BA = static_cast<BackendAction>(Act); + CompilerInstance &CI = getCompilerInstance(); + raw_ostream *OS = GetOutputStream(CI, getCurrentFile(), BA); + if (BA != Backend_EmitNothing && !OS) + return; + + bool Invalid; + SourceManager &SM = CI.getSourceManager(); + const llvm::MemoryBuffer *MainFile = SM.getBuffer(SM.getMainFileID(), + &Invalid); + if (Invalid) + return; + + // FIXME: This is stupid, IRReader shouldn't take ownership. + llvm::MemoryBuffer *MainFileCopy = + llvm::MemoryBuffer::getMemBufferCopy(MainFile->getBuffer(), + getCurrentFile().c_str()); + + llvm::SMDiagnostic Err; + TheModule.reset(ParseIR(MainFileCopy, Err, CI.getLLVMContext())); + if (!TheModule) { + // Translate from the diagnostic info to the SourceManager location. + SourceLocation Loc = SM.getLocation( + SM.getFileEntryForID(SM.getMainFileID()), Err.getLineNo(), + Err.getColumnNo() + 1); + + // Get a custom diagnostic for the error. We strip off a leading + // diagnostic code if there is one. + llvm::StringRef Msg = Err.getMessage(); + if (Msg.startswith("error: ")) + Msg = Msg.substr(7); + unsigned DiagID = CI.getDiagnostics().getCustomDiagID(Diagnostic::Error, + Msg); + + CI.getDiagnostics().Report(FullSourceLoc(Loc, SM), DiagID); + return; + } + + EmitBackendOutput(CI.getDiagnostics(), CI.getCodeGenOpts(), + CI.getTargetOpts(), TheModule.get(), + BA, OS); + return; + } + + // Otherwise follow the normal AST path. + this->ASTFrontendAction::ExecuteAction(); +} + +// + +EmitAssemblyAction::EmitAssemblyAction() + : CodeGenAction(Backend_EmitAssembly) {} + +EmitBCAction::EmitBCAction() : CodeGenAction(Backend_EmitBC) {} + +EmitLLVMAction::EmitLLVMAction() : CodeGenAction(Backend_EmitLL) {} + +EmitLLVMOnlyAction::EmitLLVMOnlyAction() : CodeGenAction(Backend_EmitNothing) {} + +EmitCodeGenOnlyAction::EmitCodeGenOnlyAction() : CodeGenAction(Backend_EmitMCNull) {} + +EmitObjAction::EmitObjAction() : CodeGenAction(Backend_EmitObj) {} diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp index 73de0fd..eb6c436 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp @@ -14,13 +14,16 @@ #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "CGDebugInfo.h" +#include "CGException.h" #include "clang/Basic/TargetInfo.h" #include "clang/AST/APValue.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/StmtCXX.h" +#include "clang/Frontend/CodeGenOptions.h" #include "llvm/Target/TargetData.h" +#include "llvm/Intrinsics.h" using namespace clang; using namespace CodeGen; @@ -28,13 +31,20 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm) : BlockFunction(cgm, *this, Builder), CGM(cgm), Target(CGM.getContext().Target), Builder(cgm.getModule().getContext()), - DebugInfo(0), IndirectBranch(0), + ExceptionSlot(0), DebugInfo(0), IndirectBranch(0), SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0), + DidCallStackSave(false), UnreachableBlock(0), CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0), - ConditionalBranchLevel(0), TerminateHandler(0), TrapBB(0), - UniqueAggrDestructorCount(0) { - LLVMIntTy = ConvertType(getContext().IntTy); + ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0), + TrapBB(0) { + + // Get some frequently used types. LLVMPointerWidth = Target.getPointerWidth(0); + llvm::LLVMContext &LLVMContext = CGM.getLLVMContext(); + IntPtrTy = llvm::IntegerType::get(LLVMContext, LLVMPointerWidth); + Int32Ty = llvm::Type::getInt32Ty(LLVMContext); + Int64Ty = llvm::Type::getInt64Ty(LLVMContext); + Exceptions = getContext().getLangOptions().Exceptions; CatchUndefined = getContext().getLangOptions().CatchUndefined; CGM.getMangleContext().startNewFunction(); @@ -45,14 +55,6 @@ ASTContext &CodeGenFunction::getContext() const { } -llvm::BasicBlock *CodeGenFunction::getBasicBlockForLabel(const LabelStmt *S) { - llvm::BasicBlock *&BB = LabelMap[S]; - if (BB) return BB; - - // Create, but don't insert, the new block. - return BB = createBasicBlock(S->getName()); -} - llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) { llvm::Value *Res = LocalDeclMap[VD]; assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!"); @@ -87,25 +89,26 @@ void CodeGenFunction::EmitReturnBlock() { // We have a valid insert point, reuse it if it is empty or there are no // explicit jumps to the return block. - if (CurBB->empty() || ReturnBlock->use_empty()) { - ReturnBlock->replaceAllUsesWith(CurBB); - delete ReturnBlock; + if (CurBB->empty() || ReturnBlock.Block->use_empty()) { + ReturnBlock.Block->replaceAllUsesWith(CurBB); + delete ReturnBlock.Block; } else - EmitBlock(ReturnBlock); + EmitBlock(ReturnBlock.Block); return; } // Otherwise, if the return block is the target of a single direct // branch then we can just put the code in that block instead. This // cleans up functions which started with a unified return block. - if (ReturnBlock->hasOneUse()) { + if (ReturnBlock.Block->hasOneUse()) { llvm::BranchInst *BI = - dyn_cast<llvm::BranchInst>(*ReturnBlock->use_begin()); - if (BI && BI->isUnconditional() && BI->getSuccessor(0) == ReturnBlock) { + dyn_cast<llvm::BranchInst>(*ReturnBlock.Block->use_begin()); + if (BI && BI->isUnconditional() && + BI->getSuccessor(0) == ReturnBlock.Block) { // Reset insertion point and delete the branch. Builder.SetInsertPoint(BI->getParent()); BI->eraseFromParent(); - delete ReturnBlock; + delete ReturnBlock.Block; return; } } @@ -114,29 +117,37 @@ void CodeGenFunction::EmitReturnBlock() { // unless it has uses. However, we still need a place to put the debug // region.end for now. - EmitBlock(ReturnBlock); + EmitBlock(ReturnBlock.Block); +} + +static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { + if (!BB) return; + if (!BB->use_empty()) + return CGF.CurFn->getBasicBlockList().push_back(BB); + delete BB; } void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { assert(BreakContinueStack.empty() && "mismatched push/pop in break/continue stack!"); - assert(BlockScopes.empty() && - "did not remove all blocks from block scope map!"); - assert(CleanupEntries.empty() && - "mismatched push/pop in cleanup stack!"); // Emit function epilog (to return). EmitReturnBlock(); + EmitFunctionInstrumentation("__cyg_profile_func_exit"); + // Emit debug descriptor for function end. if (CGDebugInfo *DI = getDebugInfo()) { DI->setLocation(EndLoc); DI->EmitRegionEnd(CurFn, Builder); } - EmitFunctionEpilog(*CurFnInfo, ReturnValue); + EmitFunctionEpilog(*CurFnInfo); EmitEndEHSpec(CurCodeDecl); + assert(EHStack.empty() && + "did not remove all scopes from cleanup stack!"); + // If someone did an indirect goto, emit the indirect goto block at the end of // the function. if (IndirectBranch) { @@ -158,6 +169,53 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { PN->eraseFromParent(); } } + + EmitIfUsed(*this, TerminateLandingPad); + EmitIfUsed(*this, TerminateHandler); + EmitIfUsed(*this, UnreachableBlock); + + if (CGM.getCodeGenOpts().EmitDeclMetadata) + EmitDeclMetadata(); +} + +/// ShouldInstrumentFunction - Return true if the current function should be +/// instrumented with __cyg_profile_func_* calls +bool CodeGenFunction::ShouldInstrumentFunction() { + if (!CGM.getCodeGenOpts().InstrumentFunctions) + return false; + if (CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) + return false; + return true; +} + +/// EmitFunctionInstrumentation - Emit LLVM code to call the specified +/// instrumentation function with the current function and the call site, if +/// function instrumentation is enabled. +void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) { + if (!ShouldInstrumentFunction()) + return; + + const llvm::PointerType *PointerTy; + const llvm::FunctionType *FunctionTy; + std::vector<const llvm::Type*> ProfileFuncArgs; + + // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site); + PointerTy = llvm::Type::getInt8PtrTy(VMContext); + ProfileFuncArgs.push_back(PointerTy); + ProfileFuncArgs.push_back(PointerTy); + FunctionTy = llvm::FunctionType::get( + llvm::Type::getVoidTy(VMContext), + ProfileFuncArgs, false); + + llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn); + llvm::CallInst *CallSite = Builder.CreateCall( + CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0), + llvm::ConstantInt::get(Int32Ty, 0), + "callsite"); + + Builder.CreateCall2(F, + llvm::ConstantExpr::getBitCast(CurFn, PointerTy), + CallSite); } void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, @@ -187,14 +245,12 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // Create a marker to make it easy to insert allocas into the entryblock // later. Don't create this with the builder, because we don't want it // folded. - llvm::Value *Undef = llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext)); - AllocaInsertPt = new llvm::BitCastInst(Undef, - llvm::Type::getInt32Ty(VMContext), "", - EntryBB); + llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); + AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB); if (Builder.isNamePreserving()) AllocaInsertPt->setName("allocapt"); - ReturnBlock = createBasicBlock("return"); + ReturnBlock = getJumpDestInCurrentScope("return"); Builder.SetInsertPoint(EntryBB); @@ -209,6 +265,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, DI->EmitFunctionStart(GD, FnType, CurFn, Builder); } + EmitFunctionInstrumentation("__cyg_profile_func_enter"); + // FIXME: Leaked. // CC info is ignored, hopefully? CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args, @@ -513,15 +571,11 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) { return; // FIXME: Handle variable sized types. - const llvm::Type *IntPtr = llvm::IntegerType::get(VMContext, - LLVMPointerWidth); - - Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtr), DestPtr, + Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr, llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)), // TypeInfo.first describes size in bits. - llvm::ConstantInt::get(IntPtr, TypeInfo.first/8), - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), - TypeInfo.second/8), + llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8), + llvm::ConstantInt::get(Int32Ty, TypeInfo.second/8), llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0)); } @@ -531,7 +585,7 @@ llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) { if (IndirectBranch == 0) GetIndirectGotoBlock(); - llvm::BasicBlock *BB = getBasicBlockForLabel(L); + llvm::BasicBlock *BB = getJumpDestForLabel(L).Block; // Make sure the indirect branch includes all of the address-taken blocks. IndirectBranch->addDestination(BB); @@ -603,233 +657,574 @@ llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) { } llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) { - if (CGM.getContext().getBuiltinVaListType()->isArrayType()) { + if (CGM.getContext().getBuiltinVaListType()->isArrayType()) return EmitScalarExpr(E); - } return EmitLValue(E).getAddress(); } -void CodeGenFunction::PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock, - llvm::BasicBlock *CleanupExitBlock, - llvm::BasicBlock *PreviousInvokeDest, - bool EHOnly) { - CleanupEntries.push_back(CleanupEntry(CleanupEntryBlock, CleanupExitBlock, - PreviousInvokeDest, EHOnly)); +/// Pops cleanup blocks until the given savepoint is reached. +void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) { + assert(Old.isValid()); + + EHScopeStack::iterator E = EHStack.find(Old); + while (EHStack.begin() != E) + PopCleanupBlock(); +} + +/// Destroys a cleanup if it was unused. +static void DestroyCleanup(CodeGenFunction &CGF, + llvm::BasicBlock *Entry, + llvm::BasicBlock *Exit) { + assert(Entry->use_empty() && "destroying cleanup with uses!"); + assert(Exit->getTerminator() == 0 && + "exit has terminator but entry has no predecessors!"); + + // This doesn't always remove the entire cleanup, but it's much + // safer as long as we don't know what blocks belong to the cleanup. + // A *much* better approach if we care about this inefficiency would + // be to lazily emit the cleanup. + + // If the exit block is distinct from the entry, give it a branch to + // an unreachable destination. This preserves the well-formedness + // of the IR. + if (Entry != Exit) + llvm::BranchInst::Create(CGF.getUnreachableBlock(), Exit); + + assert(!Entry->getParent() && "cleanup entry already positioned?"); + // We can't just delete the entry; we have to kill any references to + // its instructions in other blocks. + for (llvm::BasicBlock::iterator I = Entry->begin(), E = Entry->end(); + I != E; ++I) + if (!I->use_empty()) + I->replaceAllUsesWith(llvm::UndefValue::get(I->getType())); + delete Entry; } -void CodeGenFunction::EmitCleanupBlocks(size_t OldCleanupStackSize) { - assert(CleanupEntries.size() >= OldCleanupStackSize && - "Cleanup stack mismatch!"); +/// Creates a switch instruction to thread branches out of the given +/// block (which is the exit block of a cleanup). +static void CreateCleanupSwitch(CodeGenFunction &CGF, + llvm::BasicBlock *Block) { + if (Block->getTerminator()) { + assert(isa<llvm::SwitchInst>(Block->getTerminator()) && + "cleanup block already has a terminator, but it isn't a switch"); + return; + } - while (CleanupEntries.size() > OldCleanupStackSize) - EmitCleanupBlock(); + llvm::Value *DestCodePtr + = CGF.CreateTempAlloca(CGF.Builder.getInt32Ty(), "cleanup.dst"); + CGBuilderTy Builder(Block); + llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp"); + + // Create a switch instruction to determine where to jump next. + Builder.CreateSwitch(DestCode, CGF.getUnreachableBlock()); } -CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock() { - CleanupEntry &CE = CleanupEntries.back(); +/// Attempts to reduce a cleanup's entry block to a fallthrough. This +/// is basically llvm::MergeBlockIntoPredecessor, except +/// simplified/optimized for the tighter constraints on cleanup +/// blocks. +static void SimplifyCleanupEntry(CodeGenFunction &CGF, + llvm::BasicBlock *Entry) { + llvm::BasicBlock *Pred = Entry->getSinglePredecessor(); + if (!Pred) return; - llvm::BasicBlock *CleanupEntryBlock = CE.CleanupEntryBlock; + llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator()); + if (!Br || Br->isConditional()) return; + assert(Br->getSuccessor(0) == Entry); - std::vector<llvm::BasicBlock *> Blocks; - std::swap(Blocks, CE.Blocks); + // If we were previously inserting at the end of the cleanup entry + // block, we'll need to continue inserting at the end of the + // predecessor. + bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry; + assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end()); - std::vector<llvm::BranchInst *> BranchFixups; - std::swap(BranchFixups, CE.BranchFixups); + // Kill the branch. + Br->eraseFromParent(); - bool EHOnly = CE.EHOnly; + // Merge the blocks. + Pred->getInstList().splice(Pred->end(), Entry->getInstList()); - setInvokeDest(CE.PreviousInvokeDest); + // Kill the entry block. + Entry->eraseFromParent(); - CleanupEntries.pop_back(); + if (WasInsertBlock) + CGF.Builder.SetInsertPoint(Pred); +} - // Check if any branch fixups pointed to the scope we just popped. If so, - // we can remove them. - for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) { - llvm::BasicBlock *Dest = BranchFixups[i]->getSuccessor(0); - BlockScopeMap::iterator I = BlockScopes.find(Dest); +/// Attempts to reduce an cleanup's exit switch to an unconditional +/// branch. +static void SimplifyCleanupExit(llvm::BasicBlock *Exit) { + llvm::TerminatorInst *Terminator = Exit->getTerminator(); + assert(Terminator && "completed cleanup exit has no terminator"); - if (I == BlockScopes.end()) - continue; + llvm::SwitchInst *Switch = dyn_cast<llvm::SwitchInst>(Terminator); + if (!Switch) return; + if (Switch->getNumCases() != 2) return; // default + 1 - assert(I->second <= CleanupEntries.size() && "Invalid branch fixup!"); + llvm::LoadInst *Cond = cast<llvm::LoadInst>(Switch->getCondition()); + llvm::AllocaInst *CondVar = cast<llvm::AllocaInst>(Cond->getPointerOperand()); - if (I->second == CleanupEntries.size()) { - // We don't need to do this branch fixup. - BranchFixups[i] = BranchFixups.back(); - BranchFixups.pop_back(); - i--; - e--; - continue; - } - } + // Replace the switch instruction with an unconditional branch. + llvm::BasicBlock *Dest = Switch->getSuccessor(1); // default is 0 + Switch->eraseFromParent(); + llvm::BranchInst::Create(Dest, Exit); - llvm::BasicBlock *SwitchBlock = CE.CleanupExitBlock; - llvm::BasicBlock *EndBlock = 0; - if (!BranchFixups.empty()) { - if (!SwitchBlock) - SwitchBlock = createBasicBlock("cleanup.switch"); - EndBlock = createBasicBlock("cleanup.end"); + // Delete all uses of the condition variable. + Cond->eraseFromParent(); + while (!CondVar->use_empty()) + cast<llvm::StoreInst>(*CondVar->use_begin())->eraseFromParent(); - llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); + // Delete the condition variable itself. + CondVar->eraseFromParent(); +} - Builder.SetInsertPoint(SwitchBlock); +/// Threads a branch fixup through a cleanup block. +static void ThreadFixupThroughCleanup(CodeGenFunction &CGF, + BranchFixup &Fixup, + llvm::BasicBlock *Entry, + llvm::BasicBlock *Exit) { + if (!Exit->getTerminator()) + CreateCleanupSwitch(CGF, Exit); - llvm::Value *DestCodePtr - = CreateTempAlloca(llvm::Type::getInt32Ty(VMContext), - "cleanup.dst"); - llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp"); + // Find the switch and its destination index alloca. + llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Exit->getTerminator()); + llvm::Value *DestCodePtr = + cast<llvm::LoadInst>(Switch->getCondition())->getPointerOperand(); - // Create a switch instruction to determine where to jump next. - llvm::SwitchInst *SI = Builder.CreateSwitch(DestCode, EndBlock, - BranchFixups.size()); + // Compute the index of the new case we're adding to the switch. + unsigned Index = Switch->getNumCases(); - // Restore the current basic block (if any) - if (CurBB) { - Builder.SetInsertPoint(CurBB); + const llvm::IntegerType *i32 = llvm::Type::getInt32Ty(CGF.getLLVMContext()); + llvm::ConstantInt *IndexV = llvm::ConstantInt::get(i32, Index); - // If we had a current basic block, we also need to emit an instruction - // to initialize the cleanup destination. - Builder.CreateStore(llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)), - DestCodePtr); - } else - Builder.ClearInsertionPoint(); - - for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) { - llvm::BranchInst *BI = BranchFixups[i]; - llvm::BasicBlock *Dest = BI->getSuccessor(0); - - // Fixup the branch instruction to point to the cleanup block. - BI->setSuccessor(0, CleanupEntryBlock); - - if (CleanupEntries.empty()) { - llvm::ConstantInt *ID; - - // Check if we already have a destination for this block. - if (Dest == SI->getDefaultDest()) - ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); - else { - ID = SI->findCaseDest(Dest); - if (!ID) { - // No code found, get a new unique one by using the number of - // switch successors. - ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), - SI->getNumSuccessors()); - SI->addCase(ID, Dest); - } - } + // Set the index in the origin block. + new llvm::StoreInst(IndexV, DestCodePtr, Fixup.Origin); - // Store the jump destination before the branch instruction. - new llvm::StoreInst(ID, DestCodePtr, BI); - } else { - // We need to jump through another cleanup block. Create a pad block - // with a branch instruction that jumps to the final destination and add - // it as a branch fixup to the current cleanup scope. + // Add a case to the switch. + Switch->addCase(IndexV, Fixup.Destination); - // Create the pad block. - llvm::BasicBlock *CleanupPad = createBasicBlock("cleanup.pad", CurFn); + // Change the last branch to point to the cleanup entry block. + Fixup.LatestBranch->setSuccessor(Fixup.LatestBranchIndex, Entry); - // Create a unique case ID. - llvm::ConstantInt *ID - = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), - SI->getNumSuccessors()); + // And finally, update the fixup. + Fixup.LatestBranch = Switch; + Fixup.LatestBranchIndex = Index; +} - // Store the jump destination before the branch instruction. - new llvm::StoreInst(ID, DestCodePtr, BI); +/// Try to simplify both the entry and exit edges of a cleanup. +static void SimplifyCleanupEdges(CodeGenFunction &CGF, + llvm::BasicBlock *Entry, + llvm::BasicBlock *Exit) { - // Add it as the destination. - SI->addCase(ID, CleanupPad); + // Given their current implementations, it's important to run these + // in this order: SimplifyCleanupEntry will delete Entry if it can + // be merged into its predecessor, which will then break + // SimplifyCleanupExit if (as is common) Entry == Exit. - // Create the branch to the final destination. - llvm::BranchInst *BI = llvm::BranchInst::Create(Dest); - CleanupPad->getInstList().push_back(BI); + SimplifyCleanupExit(Exit); + SimplifyCleanupEntry(CGF, Entry); +} - // And add it as a branch fixup. - CleanupEntries.back().BranchFixups.push_back(BI); - } - } +static void EmitLazyCleanup(CodeGenFunction &CGF, + EHScopeStack::LazyCleanup *Fn, + bool ForEH) { + if (ForEH) CGF.EHStack.pushTerminate(); + Fn->Emit(CGF, ForEH); + if (ForEH) CGF.EHStack.popTerminate(); + assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); +} + +static void SplitAndEmitLazyCleanup(CodeGenFunction &CGF, + EHScopeStack::LazyCleanup *Fn, + bool ForEH, + llvm::BasicBlock *Entry) { + assert(Entry && "no entry block for cleanup"); + + // Remove the switch and load from the end of the entry block. + llvm::Instruction *Switch = &Entry->getInstList().back(); + Entry->getInstList().remove(Switch); + assert(isa<llvm::SwitchInst>(Switch)); + llvm::Instruction *Load = &Entry->getInstList().back(); + Entry->getInstList().remove(Load); + assert(isa<llvm::LoadInst>(Load)); + + assert(Entry->getInstList().empty() && + "lazy cleanup block not empty after removing load/switch pair?"); + + // Emit the actual cleanup at the end of the entry block. + CGF.Builder.SetInsertPoint(Entry); + EmitLazyCleanup(CGF, Fn, ForEH); + + // Put the load and switch at the end of the exit block. + llvm::BasicBlock *Exit = CGF.Builder.GetInsertBlock(); + Exit->getInstList().push_back(Load); + Exit->getInstList().push_back(Switch); + + // Clean up the edges if possible. + SimplifyCleanupEdges(CGF, Entry, Exit); + + CGF.Builder.ClearInsertionPoint(); +} + +static void PopLazyCleanupBlock(CodeGenFunction &CGF) { + assert(isa<EHLazyCleanupScope>(*CGF.EHStack.begin()) && "top not a cleanup!"); + EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*CGF.EHStack.begin()); + assert(Scope.getFixupDepth() <= CGF.EHStack.getNumBranchFixups()); + + // Check whether we need an EH cleanup. This is only true if we've + // generated a lazy EH cleanup block. + llvm::BasicBlock *EHEntry = Scope.getEHBlock(); + bool RequiresEHCleanup = (EHEntry != 0); + + // Check the three conditions which might require a normal cleanup: + + // - whether there are branch fix-ups through this cleanup + unsigned FixupDepth = Scope.getFixupDepth(); + bool HasFixups = CGF.EHStack.getNumBranchFixups() != FixupDepth; + + // - whether control has already been threaded through this cleanup + llvm::BasicBlock *NormalEntry = Scope.getNormalBlock(); + bool HasExistingBranches = (NormalEntry != 0); + + // - whether there's a fallthrough + llvm::BasicBlock *FallthroughSource = CGF.Builder.GetInsertBlock(); + bool HasFallthrough = (FallthroughSource != 0); + + bool RequiresNormalCleanup = false; + if (Scope.isNormalCleanup() && + (HasFixups || HasExistingBranches || HasFallthrough)) { + RequiresNormalCleanup = true; } - // Remove all blocks from the block scope map. - for (size_t i = 0, e = Blocks.size(); i != e; ++i) { - assert(BlockScopes.count(Blocks[i]) && - "Did not find block in scope map!"); + // If we don't need the cleanup at all, we're done. + if (!RequiresNormalCleanup && !RequiresEHCleanup) { + CGF.EHStack.popCleanup(); + assert(CGF.EHStack.getNumBranchFixups() == 0 || + CGF.EHStack.hasNormalCleanups()); + return; + } + + // Copy the cleanup emission data out. Note that SmallVector + // guarantees maximal alignment for its buffer regardless of its + // type parameter. + llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer; + CleanupBuffer.reserve(Scope.getCleanupSize()); + memcpy(CleanupBuffer.data(), + Scope.getCleanupBuffer(), Scope.getCleanupSize()); + CleanupBuffer.set_size(Scope.getCleanupSize()); + EHScopeStack::LazyCleanup *Fn = + reinterpret_cast<EHScopeStack::LazyCleanup*>(CleanupBuffer.data()); + + // We're done with the scope; pop it off so we can emit the cleanups. + CGF.EHStack.popCleanup(); + + if (RequiresNormalCleanup) { + // If we have a fallthrough and no other need for the cleanup, + // emit it directly. + if (HasFallthrough && !HasFixups && !HasExistingBranches) { + EmitLazyCleanup(CGF, Fn, /*ForEH*/ false); + + // Otherwise, the best approach is to thread everything through + // the cleanup block and then try to clean up after ourselves. + } else { + // Force the entry block to exist. + if (!HasExistingBranches) { + NormalEntry = CGF.createBasicBlock("cleanup"); + CreateCleanupSwitch(CGF, NormalEntry); + } - BlockScopes.erase(Blocks[i]); + CGF.EmitBlock(NormalEntry); + + // Thread the fallthrough edge through the (momentarily trivial) + // cleanup. + llvm::BasicBlock *FallthroughDestination = 0; + if (HasFallthrough) { + assert(isa<llvm::BranchInst>(FallthroughSource->getTerminator())); + FallthroughDestination = CGF.createBasicBlock("cleanup.cont"); + + BranchFixup Fix; + Fix.Destination = FallthroughDestination; + Fix.LatestBranch = FallthroughSource->getTerminator(); + Fix.LatestBranchIndex = 0; + Fix.Origin = Fix.LatestBranch; + + // Restore fixup invariant. EmitBlock added a branch to the + // cleanup which we need to redirect to the destination. + cast<llvm::BranchInst>(Fix.LatestBranch) + ->setSuccessor(0, Fix.Destination); + + ThreadFixupThroughCleanup(CGF, Fix, NormalEntry, NormalEntry); + } + + // Thread any "real" fixups we need to thread. + for (unsigned I = FixupDepth, E = CGF.EHStack.getNumBranchFixups(); + I != E; ++I) + if (CGF.EHStack.getBranchFixup(I).Destination) + ThreadFixupThroughCleanup(CGF, CGF.EHStack.getBranchFixup(I), + NormalEntry, NormalEntry); + + SplitAndEmitLazyCleanup(CGF, Fn, /*ForEH*/ false, NormalEntry); + + if (HasFallthrough) + CGF.EmitBlock(FallthroughDestination); + } } - return CleanupBlockInfo(CleanupEntryBlock, SwitchBlock, EndBlock, EHOnly); + // Emit the EH cleanup if required. + if (RequiresEHCleanup) { + CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP(); + CGF.EmitBlock(EHEntry); + SplitAndEmitLazyCleanup(CGF, Fn, /*ForEH*/ true, EHEntry); + CGF.Builder.restoreIP(SavedIP); + } } -void CodeGenFunction::EmitCleanupBlock() { - CleanupBlockInfo Info = PopCleanupBlock(); +/// Pops a cleanup block. If the block includes a normal cleanup, the +/// current insertion point is threaded through the cleanup, as are +/// any branch fixups on the cleanup. +void CodeGenFunction::PopCleanupBlock() { + assert(!EHStack.empty() && "cleanup stack is empty!"); + if (isa<EHLazyCleanupScope>(*EHStack.begin())) + return PopLazyCleanupBlock(*this); + + assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!"); + EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); + assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); + + // Handle the EH cleanup if (1) there is one and (2) it's different + // from the normal cleanup. + if (Scope.isEHCleanup() && + Scope.getEHEntry() != Scope.getNormalEntry()) { + llvm::BasicBlock *EHEntry = Scope.getEHEntry(); + llvm::BasicBlock *EHExit = Scope.getEHExit(); + + if (EHEntry->use_empty()) { + DestroyCleanup(*this, EHEntry, EHExit); + } else { + // TODO: this isn't really the ideal location to put this EH + // cleanup, but lazy emission is a better solution than trying + // to pick a better spot. + CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); + EmitBlock(EHEntry); + Builder.restoreIP(SavedIP); + + SimplifyCleanupEdges(*this, EHEntry, EHExit); + } + } + + // If we only have an EH cleanup, we don't really need to do much + // here. Branch fixups just naturally drop down to the enclosing + // cleanup scope. + if (!Scope.isNormalCleanup()) { + EHStack.popCleanup(); + assert(EHStack.getNumBranchFixups() == 0 || EHStack.hasNormalCleanups()); + return; + } - if (Info.EHOnly) { - // FIXME: Add this to the exceptional edge - if (Info.CleanupBlock->getNumUses() == 0) - delete Info.CleanupBlock; + // Check whether the scope has any fixups that need to be threaded. + unsigned FixupDepth = Scope.getFixupDepth(); + bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; + + // Grab the entry and exit blocks. + llvm::BasicBlock *Entry = Scope.getNormalEntry(); + llvm::BasicBlock *Exit = Scope.getNormalExit(); + + // Check whether anything's been threaded through the cleanup already. + assert((Exit->getTerminator() == 0) == Entry->use_empty() && + "cleanup entry/exit mismatch"); + bool HasExistingBranches = !Entry->use_empty(); + + // Check whether we need to emit a "fallthrough" branch through the + // cleanup for the current insertion point. + llvm::BasicBlock *FallThrough = Builder.GetInsertBlock(); + if (FallThrough && FallThrough->getTerminator()) + FallThrough = 0; + + // If *nothing* is using the cleanup, kill it. + if (!FallThrough && !HasFixups && !HasExistingBranches) { + EHStack.popCleanup(); + DestroyCleanup(*this, Entry, Exit); return; } - // Scrub debug location info. - for (llvm::BasicBlock::iterator LBI = Info.CleanupBlock->begin(), - LBE = Info.CleanupBlock->end(); LBI != LBE; ++LBI) - Builder.SetInstDebugLocation(LBI); + // Otherwise, add the block to the function. + EmitBlock(Entry); - llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); - if (CurBB && !CurBB->getTerminator() && - Info.CleanupBlock->getNumUses() == 0) { - CurBB->getInstList().splice(CurBB->end(), Info.CleanupBlock->getInstList()); - delete Info.CleanupBlock; - } else - EmitBlock(Info.CleanupBlock); + if (FallThrough) + Builder.SetInsertPoint(Exit); + else + Builder.ClearInsertionPoint(); - if (Info.SwitchBlock) - EmitBlock(Info.SwitchBlock); - if (Info.EndBlock) - EmitBlock(Info.EndBlock); -} + // Fast case: if we don't have to add any fixups, and either + // we don't have a fallthrough or the cleanup wasn't previously + // used, then the setup above is sufficient. + if (!HasFixups) { + if (!FallThrough) { + assert(HasExistingBranches && "no reason for cleanup but didn't kill before"); + EHStack.popCleanup(); + SimplifyCleanupEdges(*this, Entry, Exit); + return; + } else if (!HasExistingBranches) { + assert(FallThrough && "no reason for cleanup but didn't kill before"); + // We can't simplify the exit edge in this case because we're + // already inserting at the end of the exit block. + EHStack.popCleanup(); + SimplifyCleanupEntry(*this, Entry); + return; + } + } + + // Otherwise we're going to have to thread things through the cleanup. + llvm::SmallVector<BranchFixup*, 8> Fixups; -void CodeGenFunction::AddBranchFixup(llvm::BranchInst *BI) { - assert(!CleanupEntries.empty() && - "Trying to add branch fixup without cleanup block!"); + // Synthesize a fixup for the current insertion point. + BranchFixup Cur; + if (FallThrough) { + Cur.Destination = createBasicBlock("cleanup.cont"); + Cur.LatestBranch = FallThrough->getTerminator(); + Cur.LatestBranchIndex = 0; + Cur.Origin = Cur.LatestBranch; - // FIXME: We could be more clever here and check if there's already a branch - // fixup for this destination and recycle it. - CleanupEntries.back().BranchFixups.push_back(BI); + // Restore fixup invariant. EmitBlock added a branch to the cleanup + // which we need to redirect to the destination. + cast<llvm::BranchInst>(Cur.LatestBranch)->setSuccessor(0, Cur.Destination); + + Fixups.push_back(&Cur); + } else { + Cur.Destination = 0; + } + + // Collect any "real" fixups we need to thread. + for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); + I != E; ++I) + if (EHStack.getBranchFixup(I).Destination) + Fixups.push_back(&EHStack.getBranchFixup(I)); + + assert(!Fixups.empty() && "no fixups, invariants broken!"); + + // If there's only a single fixup to thread through, do so with + // unconditional branches. This only happens if there's a single + // branch and no fallthrough. + if (Fixups.size() == 1 && !HasExistingBranches) { + Fixups[0]->LatestBranch->setSuccessor(Fixups[0]->LatestBranchIndex, Entry); + llvm::BranchInst *Br = + llvm::BranchInst::Create(Fixups[0]->Destination, Exit); + Fixups[0]->LatestBranch = Br; + Fixups[0]->LatestBranchIndex = 0; + + // Otherwise, force a switch statement and thread everything through + // the switch. + } else { + CreateCleanupSwitch(*this, Exit); + for (unsigned I = 0, E = Fixups.size(); I != E; ++I) + ThreadFixupThroughCleanup(*this, *Fixups[I], Entry, Exit); + } + + // Emit the fallthrough destination block if necessary. + if (Cur.Destination) + EmitBlock(Cur.Destination); + + // We're finally done with the cleanup. + EHStack.popCleanup(); } -void CodeGenFunction::EmitBranchThroughCleanup(llvm::BasicBlock *Dest) { +void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) { if (!HaveInsertPoint()) return; - llvm::BranchInst* BI = Builder.CreateBr(Dest); - - Builder.ClearInsertionPoint(); + // Create the branch. + llvm::BranchInst *BI = Builder.CreateBr(Dest.Block); - // The stack is empty, no need to do any cleanup. - if (CleanupEntries.empty()) + // If we're not in a cleanup scope, we don't need to worry about + // fixups. + if (!EHStack.hasNormalCleanups()) { + Builder.ClearInsertionPoint(); return; + } - if (!Dest->getParent()) { - // We are trying to branch to a block that hasn't been inserted yet. - AddBranchFixup(BI); + // Initialize a fixup. + BranchFixup Fixup; + Fixup.Destination = Dest.Block; + Fixup.Origin = BI; + Fixup.LatestBranch = BI; + Fixup.LatestBranchIndex = 0; + + // If we can't resolve the destination cleanup scope, just add this + // to the current cleanup scope. + if (!Dest.ScopeDepth.isValid()) { + EHStack.addBranchFixup() = Fixup; + Builder.ClearInsertionPoint(); return; } - BlockScopeMap::iterator I = BlockScopes.find(Dest); - if (I == BlockScopes.end()) { - // We are trying to jump to a block that is outside of any cleanup scope. - AddBranchFixup(BI); - return; + for (EHScopeStack::iterator I = EHStack.begin(), + E = EHStack.find(Dest.ScopeDepth); I != E; ++I) { + if (isa<EHCleanupScope>(*I)) { + EHCleanupScope &Scope = cast<EHCleanupScope>(*I); + if (Scope.isNormalCleanup()) + ThreadFixupThroughCleanup(*this, Fixup, Scope.getNormalEntry(), + Scope.getNormalExit()); + } else if (isa<EHLazyCleanupScope>(*I)) { + EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*I); + if (Scope.isNormalCleanup()) { + llvm::BasicBlock *Block = Scope.getNormalBlock(); + if (!Block) { + Block = createBasicBlock("cleanup"); + Scope.setNormalBlock(Block); + } + ThreadFixupThroughCleanup(*this, Fixup, Block, Block); + } + } } + + Builder.ClearInsertionPoint(); +} - assert(I->second < CleanupEntries.size() && - "Trying to branch into cleanup region"); +void CodeGenFunction::EmitBranchThroughEHCleanup(JumpDest Dest) { + if (!HaveInsertPoint()) + return; + + // Create the branch. + llvm::BranchInst *BI = Builder.CreateBr(Dest.Block); - if (I->second == CleanupEntries.size() - 1) { - // We have a branch to a block in the same scope. + // If we're not in a cleanup scope, we don't need to worry about + // fixups. + if (!EHStack.hasEHCleanups()) { + Builder.ClearInsertionPoint(); return; } - AddBranchFixup(BI); + // Initialize a fixup. + BranchFixup Fixup; + Fixup.Destination = Dest.Block; + Fixup.Origin = BI; + Fixup.LatestBranch = BI; + Fixup.LatestBranchIndex = 0; + + // We should never get invalid scope depths for these: invalid scope + // depths only arise for as-yet-unemitted labels, and we can't do an + // EH-unwind to one of those. + assert(Dest.ScopeDepth.isValid() && "invalid scope depth on EH dest?"); + + for (EHScopeStack::iterator I = EHStack.begin(), + E = EHStack.find(Dest.ScopeDepth); I != E; ++I) { + if (isa<EHCleanupScope>(*I)) { + EHCleanupScope &Scope = cast<EHCleanupScope>(*I); + if (Scope.isEHCleanup()) + ThreadFixupThroughCleanup(*this, Fixup, Scope.getEHEntry(), + Scope.getEHExit()); + } else if (isa<EHLazyCleanupScope>(*I)) { + EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*I); + if (Scope.isEHCleanup()) { + llvm::BasicBlock *Block = Scope.getEHBlock(); + if (!Block) { + Block = createBasicBlock("eh.cleanup"); + Scope.setEHBlock(Block); + } + ThreadFixupThroughCleanup(*this, Fixup, Block, Block); + } + } + } + + Builder.ClearInsertionPoint(); } diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h index ece275e..5ee3db0 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h @@ -37,6 +37,7 @@ namespace llvm { class SwitchInst; class Twine; class Value; + class CallSite; } namespace clang { @@ -69,12 +70,317 @@ namespace CodeGen { class CGRecordLayout; class CGBlockInfo; +/// A branch fixup. These are required when emitting a goto to a +/// label which hasn't been emitted yet. The goto is optimistically +/// emitted as a branch to the basic block for the label, and (if it +/// occurs in a scope with non-trivial cleanups) a fixup is added to +/// the innermost cleanup. When a (normal) cleanup is popped, any +/// unresolved fixups in that scope are threaded through the cleanup. +struct BranchFixup { + /// The origin of the branch. Any switch-index stores required by + /// cleanup threading are added before this instruction. + llvm::Instruction *Origin; + + /// The destination of the branch. + /// + /// This can be set to null to indicate that this fixup was + /// successfully resolved. + llvm::BasicBlock *Destination; + + /// The last branch of the fixup. It is an invariant that + /// LatestBranch->getSuccessor(LatestBranchIndex) == Destination. + /// + /// The branch is always either a BranchInst or a SwitchInst. + llvm::TerminatorInst *LatestBranch; + unsigned LatestBranchIndex; +}; + +enum CleanupKind { NormalAndEHCleanup, EHCleanup, NormalCleanup }; + +/// A stack of scopes which respond to exceptions, including cleanups +/// and catch blocks. +class EHScopeStack { +public: + /// A saved depth on the scope stack. This is necessary because + /// pushing scopes onto the stack invalidates iterators. + class stable_iterator { + friend class EHScopeStack; + + /// Offset from StartOfData to EndOfBuffer. + ptrdiff_t Size; + + stable_iterator(ptrdiff_t Size) : Size(Size) {} + + public: + static stable_iterator invalid() { return stable_iterator(-1); } + stable_iterator() : Size(-1) {} + + bool isValid() const { return Size >= 0; } + + friend bool operator==(stable_iterator A, stable_iterator B) { + return A.Size == B.Size; + } + friend bool operator!=(stable_iterator A, stable_iterator B) { + return A.Size != B.Size; + } + }; + + /// A lazy cleanup. Subclasses must be POD-like: cleanups will + /// not be destructed, and they will be allocated on the cleanup + /// stack and freely copied and moved around. + /// + /// LazyCleanup implementations should generally be declared in an + /// anonymous namespace. + class LazyCleanup { + public: + // Anchor the construction vtable. We use the destructor because + // gcc gives an obnoxious warning if there are virtual methods + // with an accessible non-virtual destructor. Unfortunately, + // declaring this destructor makes it non-trivial, but there + // doesn't seem to be any other way around this warning. + // + // This destructor will never be called. + virtual ~LazyCleanup(); + + /// Emit the cleanup. For normal cleanups, this is run in the + /// same EH context as when the cleanup was pushed, i.e. the + /// immediately-enclosing context of the cleanup scope. For + /// EH cleanups, this is run in a terminate context. + /// + // \param IsForEHCleanup true if this is for an EH cleanup, false + /// if for a normal cleanup. + virtual void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) = 0; + }; + +private: + // The implementation for this class is in CGException.h and + // CGException.cpp; the definition is here because it's used as a + // member of CodeGenFunction. + + /// The start of the scope-stack buffer, i.e. the allocated pointer + /// for the buffer. All of these pointers are either simultaneously + /// null or simultaneously valid. + char *StartOfBuffer; + + /// The end of the buffer. + char *EndOfBuffer; + + /// The first valid entry in the buffer. + char *StartOfData; + + /// The innermost normal cleanup on the stack. + stable_iterator InnermostNormalCleanup; + + /// The innermost EH cleanup on the stack. + stable_iterator InnermostEHCleanup; + + /// The number of catches on the stack. + unsigned CatchDepth; + + /// The current set of branch fixups. A branch fixup is a jump to + /// an as-yet unemitted label, i.e. a label for which we don't yet + /// know the EH stack depth. Whenever we pop a cleanup, we have + /// to thread all the current branch fixups through it. + /// + /// Fixups are recorded as the Use of the respective branch or + /// switch statement. The use points to the final destination. + /// When popping out of a cleanup, these uses are threaded through + /// the cleanup and adjusted to point to the new cleanup. + /// + /// Note that branches are allowed to jump into protected scopes + /// in certain situations; e.g. the following code is legal: + /// struct A { ~A(); }; // trivial ctor, non-trivial dtor + /// goto foo; + /// A a; + /// foo: + /// bar(); + llvm::SmallVector<BranchFixup, 8> BranchFixups; + + char *allocate(size_t Size); + + void popNullFixups(); + + void *pushLazyCleanup(CleanupKind K, size_t DataSize); + +public: + EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0), + InnermostNormalCleanup(stable_end()), + InnermostEHCleanup(stable_end()), + CatchDepth(0) {} + ~EHScopeStack() { delete[] StartOfBuffer; } + + // Variadic templates would make this not terrible. + + /// Push a lazily-created cleanup on the stack. + template <class T> + void pushLazyCleanup(CleanupKind Kind) { + void *Buffer = pushLazyCleanup(Kind, sizeof(T)); + LazyCleanup *Obj = new(Buffer) T(); + (void) Obj; + } + + /// Push a lazily-created cleanup on the stack. + template <class T, class A0> + void pushLazyCleanup(CleanupKind Kind, A0 a0) { + void *Buffer = pushLazyCleanup(Kind, sizeof(T)); + LazyCleanup *Obj = new(Buffer) T(a0); + (void) Obj; + } + + /// Push a lazily-created cleanup on the stack. + template <class T, class A0, class A1> + void pushLazyCleanup(CleanupKind Kind, A0 a0, A1 a1) { + void *Buffer = pushLazyCleanup(Kind, sizeof(T)); + LazyCleanup *Obj = new(Buffer) T(a0, a1); + (void) Obj; + } + + /// Push a lazily-created cleanup on the stack. + template <class T, class A0, class A1, class A2> + void pushLazyCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) { + void *Buffer = pushLazyCleanup(Kind, sizeof(T)); + LazyCleanup *Obj = new(Buffer) T(a0, a1, a2); + (void) Obj; + } + + /// Push a lazily-created cleanup on the stack. + template <class T, class A0, class A1, class A2, class A3> + void pushLazyCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) { + void *Buffer = pushLazyCleanup(Kind, sizeof(T)); + LazyCleanup *Obj = new(Buffer) T(a0, a1, a2, a3); + (void) Obj; + } + + /// Push a cleanup on the stack. + void pushCleanup(llvm::BasicBlock *NormalEntry, + llvm::BasicBlock *NormalExit, + llvm::BasicBlock *EHEntry, + llvm::BasicBlock *EHExit); + + /// Pops a cleanup scope off the stack. This should only be called + /// by CodeGenFunction::PopCleanupBlock. + void popCleanup(); + + /// Push a set of catch handlers on the stack. The catch is + /// uninitialized and will need to have the given number of handlers + /// set on it. + class EHCatchScope *pushCatch(unsigned NumHandlers); + + /// Pops a catch scope off the stack. + void popCatch(); + + /// Push an exceptions filter on the stack. + class EHFilterScope *pushFilter(unsigned NumFilters); + + /// Pops an exceptions filter off the stack. + void popFilter(); + + /// Push a terminate handler on the stack. + void pushTerminate(); + + /// Pops a terminate handler off the stack. + void popTerminate(); + + /// Determines whether the exception-scopes stack is empty. + bool empty() const { return StartOfData == EndOfBuffer; } + + bool requiresLandingPad() const { + return (CatchDepth || hasEHCleanups()); + } + + /// Determines whether there are any normal cleanups on the stack. + bool hasNormalCleanups() const { + return InnermostNormalCleanup != stable_end(); + } + + /// Returns the innermost normal cleanup on the stack, or + /// stable_end() if there are no normal cleanups. + stable_iterator getInnermostNormalCleanup() const { + return InnermostNormalCleanup; + } + + /// Determines whether there are any EH cleanups on the stack. + bool hasEHCleanups() const { + return InnermostEHCleanup != stable_end(); + } + + /// Returns the innermost EH cleanup on the stack, or stable_end() + /// if there are no EH cleanups. + stable_iterator getInnermostEHCleanup() const { + return InnermostEHCleanup; + } + + /// An unstable reference to a scope-stack depth. Invalidated by + /// pushes but not pops. + class iterator; + + /// Returns an iterator pointing to the innermost EH scope. + iterator begin() const; + + /// Returns an iterator pointing to the outermost EH scope. + iterator end() const; + + /// Create a stable reference to the top of the EH stack. The + /// returned reference is valid until that scope is popped off the + /// stack. + stable_iterator stable_begin() const { + return stable_iterator(EndOfBuffer - StartOfData); + } + + /// Create a stable reference to the bottom of the EH stack. + static stable_iterator stable_end() { + return stable_iterator(0); + } + + /// Translates an iterator into a stable_iterator. + stable_iterator stabilize(iterator it) const; + + /// Finds the nearest cleanup enclosing the given iterator. + /// Returns stable_iterator::invalid() if there are no such cleanups. + stable_iterator getEnclosingEHCleanup(iterator it) const; + + /// Turn a stable reference to a scope depth into a unstable pointer + /// to the EH stack. + iterator find(stable_iterator save) const; + + /// Removes the cleanup pointed to by the given stable_iterator. + void removeCleanup(stable_iterator save); + + /// Add a branch fixup to the current cleanup scope. + BranchFixup &addBranchFixup() { + assert(hasNormalCleanups() && "adding fixup in scope without cleanups"); + BranchFixups.push_back(BranchFixup()); + return BranchFixups.back(); + } + + unsigned getNumBranchFixups() const { return BranchFixups.size(); } + BranchFixup &getBranchFixup(unsigned I) { + assert(I < getNumBranchFixups()); + return BranchFixups[I]; + } + + /// Mark any branch fixups leading to the given block as resolved. + void resolveBranchFixups(llvm::BasicBlock *Dest); +}; + /// CodeGenFunction - This class organizes the per-function state that is used /// while generating LLVM code. class CodeGenFunction : public BlockFunction { CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT public: + /// A jump destination is a pair of a basic block and a cleanup + /// depth. They are used to implement direct jumps across cleanup + /// scopes, e.g. goto, break, continue, and return. + struct JumpDest { + JumpDest() : Block(0), ScopeDepth() {} + JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth) + : Block(Block), ScopeDepth(Depth) {} + + llvm::BasicBlock *Block; + EHScopeStack::stable_iterator ScopeDepth; + }; + CodeGenModule &CGM; // Per-module state. const TargetInfo &Target; @@ -94,7 +400,8 @@ public: GlobalDecl CurGD; /// ReturnBlock - Unified return block. - llvm::BasicBlock *ReturnBlock; + JumpDest ReturnBlock; + /// ReturnValue - The temporary alloca to hold the return value. This is null /// iff the function has no return value. llvm::Value *ReturnValue; @@ -103,7 +410,8 @@ public: /// we prefer to insert allocas. llvm::AssertingVH<llvm::Instruction> AllocaInsertPt; - const llvm::Type *LLVMIntTy; + // intptr_t, i32, i64 + const llvm::IntegerType *IntPtrTy, *Int32Ty, *Int64Ty; uint32_t LLVMPointerWidth; bool Exceptions; @@ -112,141 +420,97 @@ public: /// \brief A mapping from NRVO variables to the flags used to indicate /// when the NRVO has been applied to this variable. llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags; - -public: - /// ObjCEHValueStack - Stack of Objective-C exception values, used for - /// rethrows. - llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack; - /// PushCleanupBlock - Push a new cleanup entry on the stack and set the - /// passed in block as the cleanup block. - void PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock, - llvm::BasicBlock *CleanupExitBlock, - llvm::BasicBlock *PreviousInvokeDest, - bool EHOnly = false); - void PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock) { - PushCleanupBlock(CleanupEntryBlock, 0, getInvokeDest(), false); - } - - /// CleanupBlockInfo - A struct representing a popped cleanup block. - struct CleanupBlockInfo { - /// CleanupEntryBlock - the cleanup entry block - llvm::BasicBlock *CleanupBlock; + EHScopeStack EHStack; - /// SwitchBlock - the block (if any) containing the switch instruction used - /// for jumping to the final destination. - llvm::BasicBlock *SwitchBlock; + /// The exception slot. All landing pads write the current + /// exception pointer into this alloca. + llvm::Value *ExceptionSlot; - /// EndBlock - the default destination for the switch instruction. - llvm::BasicBlock *EndBlock; + /// Emits a landing pad for the current EH stack. + llvm::BasicBlock *EmitLandingPad(); - /// EHOnly - True iff this cleanup should only be performed on the - /// exceptional edge. - bool EHOnly; + llvm::BasicBlock *getInvokeDestImpl(); - CleanupBlockInfo(llvm::BasicBlock *cb, llvm::BasicBlock *sb, - llvm::BasicBlock *eb, bool ehonly = false) - : CleanupBlock(cb), SwitchBlock(sb), EndBlock(eb), EHOnly(ehonly) {} - }; +public: + /// ObjCEHValueStack - Stack of Objective-C exception values, used for + /// rethrows. + llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack; - /// EHCleanupBlock - RAII object that will create a cleanup block for the - /// exceptional edge and set the insert point to that block. When destroyed, - /// it creates the cleanup edge and sets the insert point to the previous - /// block. - class EHCleanupBlock { - CodeGenFunction& CGF; - llvm::BasicBlock *PreviousInsertionBlock; - llvm::BasicBlock *CleanupHandler; - llvm::BasicBlock *PreviousInvokeDest; - public: - EHCleanupBlock(CodeGenFunction &cgf) - : CGF(cgf), - PreviousInsertionBlock(CGF.Builder.GetInsertBlock()), - CleanupHandler(CGF.createBasicBlock("ehcleanup", CGF.CurFn)), - PreviousInvokeDest(CGF.getInvokeDest()) { - llvm::BasicBlock *TerminateHandler = CGF.getTerminateHandler(); - CGF.Builder.SetInsertPoint(CleanupHandler); - CGF.setInvokeDest(TerminateHandler); - } - ~EHCleanupBlock(); + // A struct holding information about a finally block's IR + // generation. For now, doesn't actually hold anything. + struct FinallyInfo { }; - /// PopCleanupBlock - Will pop the cleanup entry on the stack, process all - /// branch fixups and return a block info struct with the switch block and end - /// block. This will also reset the invoke handler to the previous value - /// from when the cleanup block was created. - CleanupBlockInfo PopCleanupBlock(); - - /// DelayedCleanupBlock - RAII object that will create a cleanup block and set - /// the insert point to that block. When destructed, it sets the insert point - /// to the previous block and pushes a new cleanup entry on the stack. - class DelayedCleanupBlock { - CodeGenFunction& CGF; - llvm::BasicBlock *CurBB; - llvm::BasicBlock *CleanupEntryBB; - llvm::BasicBlock *CleanupExitBB; - llvm::BasicBlock *CurInvokeDest; - bool EHOnly; + FinallyInfo EnterFinallyBlock(const Stmt *Stmt, + llvm::Constant *BeginCatchFn, + llvm::Constant *EndCatchFn, + llvm::Constant *RethrowFn); + void ExitFinallyBlock(FinallyInfo &FinallyInfo); + + /// PushDestructorCleanup - Push a cleanup to call the + /// complete-object destructor of an object of the given type at the + /// given address. Does nothing if T is not a C++ class type with a + /// non-trivial destructor. + void PushDestructorCleanup(QualType T, llvm::Value *Addr); + + /// PopCleanupBlock - Will pop the cleanup entry on the stack and + /// process all branch fixups. + void PopCleanupBlock(); + + /// CleanupBlock - RAII object that will create a cleanup block and + /// set the insert point to that block. When destructed, it sets the + /// insert point to the previous block and pushes a new cleanup + /// entry on the stack. + class CleanupBlock { + CodeGenFunction &CGF; + CGBuilderTy::InsertPoint SavedIP; + llvm::BasicBlock *NormalCleanupEntryBB; + llvm::BasicBlock *NormalCleanupExitBB; + llvm::BasicBlock *EHCleanupEntryBB; public: - DelayedCleanupBlock(CodeGenFunction &cgf, bool ehonly = false) - : CGF(cgf), CurBB(CGF.Builder.GetInsertBlock()), - CleanupEntryBB(CGF.createBasicBlock("cleanup")), - CleanupExitBB(0), - CurInvokeDest(CGF.getInvokeDest()), - EHOnly(ehonly) { - CGF.Builder.SetInsertPoint(CleanupEntryBB); - } + CleanupBlock(CodeGenFunction &CGF, CleanupKind Kind); - llvm::BasicBlock *getCleanupExitBlock() { - if (!CleanupExitBB) - CleanupExitBB = CGF.createBasicBlock("cleanup.exit"); - return CleanupExitBB; - } + /// If we're currently writing a normal cleanup, tie that off and + /// start writing an EH cleanup. + void beginEHCleanup(); - ~DelayedCleanupBlock() { - CGF.PushCleanupBlock(CleanupEntryBB, CleanupExitBB, CurInvokeDest, - EHOnly); - // FIXME: This is silly, move this into the builder. - if (CurBB) - CGF.Builder.SetInsertPoint(CurBB); - else - CGF.Builder.ClearInsertionPoint(); - } + ~CleanupBlock(); }; - /// \brief Enters a new scope for capturing cleanups, all of which will be - /// executed once the scope is exited. - class CleanupScope { + /// \brief Enters a new scope for capturing cleanups, all of which + /// will be executed once the scope is exited. + class RunCleanupsScope { CodeGenFunction& CGF; - size_t CleanupStackDepth; + EHScopeStack::stable_iterator CleanupStackDepth; bool OldDidCallStackSave; bool PerformCleanup; - CleanupScope(const CleanupScope &); // DO NOT IMPLEMENT - CleanupScope &operator=(const CleanupScope &); // DO NOT IMPLEMENT + RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT + RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT public: /// \brief Enter a new cleanup scope. - explicit CleanupScope(CodeGenFunction &CGF) + explicit RunCleanupsScope(CodeGenFunction &CGF) : CGF(CGF), PerformCleanup(true) { - CleanupStackDepth = CGF.CleanupEntries.size(); + CleanupStackDepth = CGF.EHStack.stable_begin(); OldDidCallStackSave = CGF.DidCallStackSave; } /// \brief Exit this cleanup scope, emitting any accumulated /// cleanups. - ~CleanupScope() { + ~RunCleanupsScope() { if (PerformCleanup) { CGF.DidCallStackSave = OldDidCallStackSave; - CGF.EmitCleanupBlocks(CleanupStackDepth); + CGF.PopCleanupBlocks(CleanupStackDepth); } } /// \brief Determine whether this scope requires any cleanups. bool requiresCleanups() const { - return CGF.CleanupEntries.size() > CleanupStackDepth; + return CGF.EHStack.stable_begin() != CleanupStackDepth; } /// \brief Force the emission of cleanups now, instead of waiting @@ -254,42 +518,39 @@ public: void ForceCleanup() { assert(PerformCleanup && "Already forced cleanup"); CGF.DidCallStackSave = OldDidCallStackSave; - CGF.EmitCleanupBlocks(CleanupStackDepth); + CGF.PopCleanupBlocks(CleanupStackDepth); PerformCleanup = false; } }; - /// CXXTemporariesCleanupScope - Enters a new scope for catching live - /// temporaries, all of which will be popped once the scope is exited. - class CXXTemporariesCleanupScope { - CodeGenFunction &CGF; - size_t NumLiveTemporaries; - - // DO NOT IMPLEMENT - CXXTemporariesCleanupScope(const CXXTemporariesCleanupScope &); - CXXTemporariesCleanupScope &operator=(const CXXTemporariesCleanupScope &); - - public: - explicit CXXTemporariesCleanupScope(CodeGenFunction &CGF) - : CGF(CGF), NumLiveTemporaries(CGF.LiveTemporaries.size()) { } - - ~CXXTemporariesCleanupScope() { - while (CGF.LiveTemporaries.size() > NumLiveTemporaries) - CGF.PopCXXTemporary(); - } - }; + /// PopCleanupBlocks - Takes the old cleanup stack size and emits + /// the cleanup blocks that have been added. + void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize); - /// EmitCleanupBlocks - Takes the old cleanup stack size and emits the cleanup - /// blocks that have been added. - void EmitCleanupBlocks(size_t OldCleanupStackSize); + /// The given basic block lies in the current EH scope, but may be a + /// target of a potentially scope-crossing jump; get a stable handle + /// to which we can perform this jump later. + JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) const { + return JumpDest(Target, EHStack.stable_begin()); + } - /// EmitBranchThroughCleanup - Emit a branch from the current insert block - /// through the cleanup handling code (if any) and then on to \arg Dest. - /// - /// FIXME: Maybe this should really be in EmitBranch? Don't we always want - /// this behavior for branches? - void EmitBranchThroughCleanup(llvm::BasicBlock *Dest); + /// The given basic block lies in the current EH scope, but may be a + /// target of a potentially scope-crossing jump; get a stable handle + /// to which we can perform this jump later. + JumpDest getJumpDestInCurrentScope(const char *Name = 0) { + return JumpDest(createBasicBlock(Name), EHStack.stable_begin()); + } + + /// EmitBranchThroughCleanup - Emit a branch from the current insert + /// block through the normal cleanup handling code (if any) and then + /// on to \arg Dest. + void EmitBranchThroughCleanup(JumpDest Dest); + + /// EmitBranchThroughEHCleanup - Emit a branch from the current + /// insert block through the EH cleanup handling code (if any) and + /// then on to \arg Dest. + void EmitBranchThroughEHCleanup(JumpDest Dest); /// BeginConditionalBranch - Should be called before a conditional part of an /// expression is emitted. For example, before the RHS of the expression below @@ -326,16 +587,16 @@ private: llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap; /// LabelMap - This keeps track of the LLVM basic block for each C label. - llvm::DenseMap<const LabelStmt*, llvm::BasicBlock*> LabelMap; + llvm::DenseMap<const LabelStmt*, JumpDest> LabelMap; // BreakContinueStack - This keeps track of where break and continue // statements should jump to. struct BreakContinue { - BreakContinue(llvm::BasicBlock *bb, llvm::BasicBlock *cb) - : BreakBlock(bb), ContinueBlock(cb) {} + BreakContinue(JumpDest Break, JumpDest Continue) + : BreakBlock(Break), ContinueBlock(Continue) {} - llvm::BasicBlock *BreakBlock; - llvm::BasicBlock *ContinueBlock; + JumpDest BreakBlock; + JumpDest ContinueBlock; }; llvm::SmallVector<BreakContinue, 8> BreakContinueStack; @@ -363,44 +624,9 @@ private: /// calling llvm.stacksave for multiple VLAs in the same scope. bool DidCallStackSave; - struct CleanupEntry { - /// CleanupEntryBlock - The block of code that does the actual cleanup. - llvm::BasicBlock *CleanupEntryBlock; - - /// CleanupExitBlock - The cleanup exit block. - llvm::BasicBlock *CleanupExitBlock; - - /// Blocks - Basic blocks that were emitted in the current cleanup scope. - std::vector<llvm::BasicBlock *> Blocks; - - /// BranchFixups - Branch instructions to basic blocks that haven't been - /// inserted into the current function yet. - std::vector<llvm::BranchInst *> BranchFixups; - - /// PreviousInvokeDest - The invoke handler from the start of the cleanup - /// region. - llvm::BasicBlock *PreviousInvokeDest; - - /// EHOnly - Perform this only on the exceptional edge, not the main edge. - bool EHOnly; - - explicit CleanupEntry(llvm::BasicBlock *CleanupEntryBlock, - llvm::BasicBlock *CleanupExitBlock, - llvm::BasicBlock *PreviousInvokeDest, - bool ehonly) - : CleanupEntryBlock(CleanupEntryBlock), - CleanupExitBlock(CleanupExitBlock), - PreviousInvokeDest(PreviousInvokeDest), - EHOnly(ehonly) {} - }; - - /// CleanupEntries - Stack of cleanup entries. - llvm::SmallVector<CleanupEntry, 8> CleanupEntries; - - typedef llvm::DenseMap<llvm::BasicBlock*, size_t> BlockScopeMap; - - /// BlockScopes - Map of which "cleanup scope" scope basic blocks have. - BlockScopeMap BlockScopes; + /// A block containing a single 'unreachable' instruction. Created + /// lazily by getUnreachableBlock(). + llvm::BasicBlock *UnreachableBlock; /// CXXThisDecl - When generating code for a C++ member function, /// this will hold the implicit 'this' declaration. @@ -413,31 +639,6 @@ private: ImplicitParamDecl *CXXVTTDecl; llvm::Value *CXXVTTValue; - /// CXXLiveTemporaryInfo - Holds information about a live C++ temporary. - struct CXXLiveTemporaryInfo { - /// Temporary - The live temporary. - const CXXTemporary *Temporary; - - /// ThisPtr - The pointer to the temporary. - llvm::Value *ThisPtr; - - /// DtorBlock - The destructor block. - llvm::BasicBlock *DtorBlock; - - /// CondPtr - If this is a conditional temporary, this is the pointer to the - /// condition variable that states whether the destructor should be called - /// or not. - llvm::Value *CondPtr; - - CXXLiveTemporaryInfo(const CXXTemporary *temporary, - llvm::Value *thisptr, llvm::BasicBlock *dtorblock, - llvm::Value *condptr) - : Temporary(temporary), ThisPtr(thisptr), DtorBlock(dtorblock), - CondPtr(condptr) { } - }; - - llvm::SmallVector<CXXLiveTemporaryInfo, 4> LiveTemporaries; - /// ConditionalBranchLevel - Contains the nesting level of the current /// conditional branch. This is used so that we know if a temporary should be /// destroyed conditionally. @@ -453,18 +654,32 @@ private: /// number that holds the value. unsigned getByRefValueLLVMField(const ValueDecl *VD) const; + llvm::BasicBlock *TerminateLandingPad; llvm::BasicBlock *TerminateHandler; llvm::BasicBlock *TrapBB; - int UniqueAggrDestructorCount; public: CodeGenFunction(CodeGenModule &cgm); ASTContext &getContext() const; CGDebugInfo *getDebugInfo() { return DebugInfo; } - llvm::BasicBlock *getInvokeDest() { return InvokeDest; } - void setInvokeDest(llvm::BasicBlock *B) { InvokeDest = B; } + /// Returns a pointer to the function's exception object slot, which + /// is assigned in every landing pad. + llvm::Value *getExceptionSlot(); + + llvm::BasicBlock *getUnreachableBlock() { + if (!UnreachableBlock) { + UnreachableBlock = createBasicBlock("unreachable"); + new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock); + } + return UnreachableBlock; + } + + llvm::BasicBlock *getInvokeDest() { + if (!EHStack.requiresLandingPad()) return 0; + return getInvokeDestImpl(); + } llvm::LLVMContext &getLLVMContext() { return VMContext; } @@ -501,7 +716,8 @@ public: const llvm::StructType *, std::vector<HelperInfo> *); - llvm::Function *GenerateBlockFunction(const BlockExpr *BExpr, + llvm::Function *GenerateBlockFunction(GlobalDecl GD, + const BlockExpr *BExpr, CGBlockInfo &Info, const Decl *OuterFuncDecl, llvm::DenseMap<const Decl*, llvm::Value*> ldm); @@ -567,6 +783,15 @@ public: void EmitDtorEpilogue(const CXXDestructorDecl *Dtor, CXXDtorType Type); + /// ShouldInstrumentFunction - Return true if the current function should be + /// instrumented with __cyg_profile_func_* calls + bool ShouldInstrumentFunction(); + + /// EmitFunctionInstrumentation - Emit LLVM code to call the specified + /// instrumentation function with the current function and the call site, if + /// function instrumentation is enabled. + void EmitFunctionInstrumentation(const char *Fn); + /// EmitFunctionProlog - Emit the target specific LLVM code to load the /// arguments for the given function. This is also responsible for naming the /// LLVM function arguments. @@ -576,7 +801,7 @@ public: /// EmitFunctionEpilog - Emit the target specific LLVM code to return the /// given temporary. - void EmitFunctionEpilog(const CGFunctionInfo &FI, llvm::Value *ReturnValue); + void EmitFunctionEpilog(const CGFunctionInfo &FI); /// EmitStartEHSpec - Emit the start of the exception spec. void EmitStartEHSpec(const Decl *D); @@ -584,7 +809,12 @@ public: /// EmitEndEHSpec - Emit the end of the exception spec. void EmitEndEHSpec(const Decl *D); - /// getTerminateHandler - Return a handler that just calls terminate. + /// getTerminateLandingPad - Return a landing pad that just calls terminate. + llvm::BasicBlock *getTerminateLandingPad(); + + /// getTerminateHandler - Return a handler (not a landing pad, just + /// a catch handler) that just calls terminate. This is used when + /// a terminate scope encloses a try. llvm::BasicBlock *getTerminateHandler(); const llvm::Type *ConvertTypeForMem(QualType T); @@ -617,7 +847,7 @@ public: /// getBasicBlockForLabel - Return the LLVM basicblock that the specified /// label maps to. - llvm::BasicBlock *getBasicBlockForLabel(const LabelStmt *S); + JumpDest getJumpDestForLabel(const LabelStmt *S); /// SimplifyForwardingBlocks - If the given basic block is only a branch to /// another basic block, simplify it. This assumes that no other code could @@ -688,11 +918,11 @@ public: /// value needs to be stored into an alloca (for example, to avoid explicit /// PHI construction), but the type is the IR type, not the type appropriate /// for storing in memory. - llvm::Value *CreateIRTemp(QualType T, const llvm::Twine &Name = "tmp"); + llvm::AllocaInst *CreateIRTemp(QualType T, const llvm::Twine &Name = "tmp"); /// CreateMemTemp - Create a temporary memory object of the given type, with /// appropriate alignment. - llvm::Value *CreateMemTemp(QualType T, const llvm::Twine &Name = "tmp"); + llvm::AllocaInst *CreateMemTemp(QualType T, const llvm::Twine &Name = "tmp"); /// EvaluateExprAsBool - Perform the usual unary conversions on the specified /// expression and compare the result against zero, returning an Int1Ty value. @@ -835,15 +1065,17 @@ public: llvm::Value *NumElements, llvm::Value *This); - llvm::Constant *GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D, - const ArrayType *Array, - llvm::Value *This); + llvm::Function *GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D, + const ArrayType *Array, + llvm::Value *This); void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, llvm::Value *This); + + void EmitNewArrayInitializer(const CXXNewExpr *E, llvm::Value *NewPtr, + llvm::Value *NumElements); - void PushCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr); - void PopCXXTemporary(); + void EmitCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr); llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E); void EmitCXXDeleteExpr(const CXXDeleteExpr *E); @@ -874,10 +1106,13 @@ public: /// This function can be called with a null (unreachable) insert point. void EmitBlockVarDecl(const VarDecl &D); + typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D, + llvm::Value *Address); + /// EmitLocalBlockVarDecl - Emit a local block variable declaration. /// /// This function can be called with a null (unreachable) insert point. - void EmitLocalBlockVarDecl(const VarDecl &D); + void EmitLocalBlockVarDecl(const VarDecl &D, SpecialInitFn *SpecialInit = 0); void EmitStaticBlockVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage); @@ -938,13 +1173,8 @@ public: void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S); llvm::Constant *getUnwindResumeOrRethrowFn(); - struct CXXTryStmtInfo { - llvm::BasicBlock *SavedLandingPad; - llvm::BasicBlock *HandlerBlock; - llvm::BasicBlock *FinallyBlock; - }; - CXXTryStmtInfo EnterCXXTryStmt(const CXXTryStmt &S); - void ExitCXXTryStmt(const CXXTryStmt &S, CXXTryStmtInfo Info); + void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); + void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false); void EmitCXXTryStmt(const CXXTryStmt &S); @@ -1050,7 +1280,7 @@ public: LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E); LValue EmitConditionalOperatorLValue(const ConditionalOperator *E); LValue EmitCastLValue(const CastExpr *E); - LValue EmitNullInitializationLValue(const CXXZeroInitValueExpr *E); + LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E); llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar); @@ -1088,6 +1318,7 @@ public: LValue EmitObjCSuperExprLValue(const ObjCSuperExpr *E); LValue EmitStmtExprLValue(const StmtExpr *E); LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E); + LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E); //===--------------------------------------------------------------------===// // Scalar Expression Emission @@ -1114,6 +1345,11 @@ public: RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue = ReturnValueSlot()); + llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee, + llvm::Value * const *ArgBegin, + llvm::Value * const *ArgEnd, + const llvm::Twine &Name = ""); + llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This, const llvm::Type *Ty); llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type, @@ -1146,6 +1382,14 @@ public: llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E); llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E); + llvm::Value *EmitNeonCall(llvm::Function *F, + llvm::SmallVectorImpl<llvm::Value*> &O, + const char *name, bool splat = false, + unsigned shift = 0, bool rightshift = false); + llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx); + llvm::Value *EmitNeonShiftVector(llvm::Value *V, const llvm::Type *Ty, + bool negateForRightShift); + llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E); llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E); @@ -1164,7 +1408,8 @@ public: /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in /// expression. Will emit a temporary variable if E is not an LValue. - RValue EmitReferenceBindingToExpr(const Expr* E, bool IsInitializer = false); + RValue EmitReferenceBindingToExpr(const Expr* E, + const NamedDecl *InitializedDecl); //===--------------------------------------------------------------------===// // Expression Emission @@ -1260,7 +1505,7 @@ public: /// GenerateCXXGlobalDtorFunc - Generates code for destroying global /// variables. void GenerateCXXGlobalDtorFunc(llvm::Function *Fn, - const std::vector<std::pair<llvm::Constant*, + const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> > &DtorsAndObjects); void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D); @@ -1308,7 +1553,6 @@ public: RValue EmitDelegateCallArg(const VarDecl *Param); private: - void EmitReturnOfRValue(RValue RV, QualType Ty); /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty @@ -1331,13 +1575,6 @@ private: const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr, std::string &ConstraintStr); - /// EmitCleanupBlock - emits a single cleanup block. - void EmitCleanupBlock(); - - /// AddBranchFixup - adds a branch instruction to the list of fixups for the - /// current cleanup scope. - void AddBranchFixup(llvm::BranchInst *BI); - /// EmitCallArgs - Emit call arguments for a function. /// The CallArgTypeInfo parameter is used for iterating over the known /// argument types of the function being called. @@ -1381,6 +1618,8 @@ private: const TargetCodeGenInfo &getTargetHooks() const { return CGM.getTargetCodeGenInfo(); } + + void EmitDeclMetadata(); }; diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp index 103024c..bf606a6 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp @@ -18,11 +18,12 @@ #include "CGObjCRuntime.h" #include "Mangle.h" #include "TargetInfo.h" -#include "clang/CodeGen/CodeGenOptions.h" +#include "clang/Frontend/CodeGenOptions.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CharUnits.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclTemplate.h" #include "clang/AST/RecordLayout.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/Diagnostic.h" @@ -86,8 +87,10 @@ void CodeGenModule::createObjCRuntime() { } void CodeGenModule::createCXXABI() { - // For now, just create an Itanium ABI. - ABI = CreateItaniumCXXABI(*this); + if (Context.Target.getCXXABI() == "microsoft") + ABI = CreateMicrosoftCXXABI(*this); + else + ABI = CreateItaniumCXXABI(*this); } void CodeGenModule::Release() { @@ -101,6 +104,9 @@ void CodeGenModule::Release() { EmitCtorList(GlobalDtors, "llvm.global_dtors"); EmitAnnotations(); EmitLLVMUsed(); + + if (getCodeGenOpts().EmitDeclMetadata) + EmitDeclMetadata(); } bool CodeGenModule::isTargetDarwin() const { @@ -149,7 +155,38 @@ CodeGenModule::getDeclVisibilityMode(const Decl *D) const { return LangOptions::Protected; } } + + if (getLangOptions().CPlusPlus) { + // Entities subject to an explicit instantiation declaration get default + // visibility. + if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) { + if (Function->getTemplateSpecializationKind() + == TSK_ExplicitInstantiationDeclaration) + return LangOptions::Default; + } else if (const ClassTemplateSpecializationDecl *ClassSpec + = dyn_cast<ClassTemplateSpecializationDecl>(D)) { + if (ClassSpec->getSpecializationKind() + == TSK_ExplicitInstantiationDeclaration) + return LangOptions::Default; + } else if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) { + if (Record->getTemplateSpecializationKind() + == TSK_ExplicitInstantiationDeclaration) + return LangOptions::Default; + } else if (const VarDecl *Var = dyn_cast<VarDecl>(D)) { + if (Var->isStaticDataMember() && + (Var->getTemplateSpecializationKind() + == TSK_ExplicitInstantiationDeclaration)) + return LangOptions::Default; + } + // If -fvisibility-inlines-hidden was provided, then inline C++ member + // functions get "hidden" visibility by default. + if (getLangOptions().InlineVisibilityHidden) + if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) + if (Method->isInlined()) + return LangOptions::Hidden; + } + // This decl should have the same visibility as its parent. if (const DeclContext *DC = D->getDeclContext()) return getDeclVisibilityMode(cast<Decl>(DC)); @@ -176,32 +213,44 @@ void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV, } } -void CodeGenModule::getMangledName(MangleBuffer &Buffer, GlobalDecl GD) { +llvm::StringRef CodeGenModule::getMangledName(GlobalDecl GD) { const NamedDecl *ND = cast<NamedDecl>(GD.getDecl()); - if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND)) - return getMangledCXXCtorName(Buffer, D, GD.getCtorType()); - if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND)) - return getMangledCXXDtorName(Buffer, D, GD.getDtorType()); - - return getMangledName(Buffer, ND); -} + llvm::StringRef &Str = MangledDeclNames[GD.getCanonicalDecl()]; + if (!Str.empty()) + return Str; -/// \brief Retrieves the mangled name for the given declaration. -/// -/// If the given declaration requires a mangled name, returns an -/// const char* containing the mangled name. Otherwise, returns -/// the unmangled name. -/// -void CodeGenModule::getMangledName(MangleBuffer &Buffer, - const NamedDecl *ND) { if (!getMangleContext().shouldMangleDeclName(ND)) { - assert(ND->getIdentifier() && "Attempt to mangle unnamed decl."); - Buffer.setString(ND->getNameAsCString()); - return; + IdentifierInfo *II = ND->getIdentifier(); + assert(II && "Attempt to mangle unnamed decl."); + + Str = II->getName(); + return Str; } + + llvm::SmallString<256> Buffer; + if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND)) + getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Buffer); + else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND)) + getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Buffer); + else if (const BlockDecl *BD = dyn_cast<BlockDecl>(ND)) + getMangleContext().mangleBlock(GD, BD, Buffer); + else + getMangleContext().mangleName(ND, Buffer); + + // Allocate space for the mangled name. + size_t Length = Buffer.size(); + char *Name = MangledNamesAllocator.Allocate<char>(Length); + std::copy(Buffer.begin(), Buffer.end(), Name); + + Str = llvm::StringRef(Name, Length); + + return Str; +} - getMangleContext().mangleName(ND, Buffer.getBuffer()); +void CodeGenModule::getMangledName(GlobalDecl GD, MangleBuffer &Buffer, + const BlockDecl *BD) { + getMangleContext().mangleBlock(GD, BD, Buffer.getBuffer()); } llvm::GlobalValue *CodeGenModule::GetGlobalValue(llvm::StringRef Name) { @@ -333,35 +382,39 @@ llvm::GlobalValue::LinkageTypes CodeGenModule::getFunctionLinkage(const FunctionDecl *D) { GVALinkage Linkage = GetLinkageForFunction(getContext(), D, Features); - if (Linkage == GVA_Internal) { + if (Linkage == GVA_Internal) return llvm::Function::InternalLinkage; - } else if (D->hasAttr<DLLExportAttr>()) { + + if (D->hasAttr<DLLExportAttr>()) return llvm::Function::DLLExportLinkage; - } else if (D->hasAttr<WeakAttr>()) { + + if (D->hasAttr<WeakAttr>()) return llvm::Function::WeakAnyLinkage; - } else if (Linkage == GVA_C99Inline) { - // In C99 mode, 'inline' functions are guaranteed to have a strong - // definition somewhere else, so we can use available_externally linkage. + + // In C99 mode, 'inline' functions are guaranteed to have a strong + // definition somewhere else, so we can use available_externally linkage. + if (Linkage == GVA_C99Inline) return llvm::Function::AvailableExternallyLinkage; - } else if (Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation) { - // In C++, the compiler has to emit a definition in every translation unit - // that references the function. We should use linkonce_odr because - // a) if all references in this translation unit are optimized away, we - // don't need to codegen it. b) if the function persists, it needs to be - // merged with other definitions. c) C++ has the ODR, so we know the - // definition is dependable. + + // In C++, the compiler has to emit a definition in every translation unit + // that references the function. We should use linkonce_odr because + // a) if all references in this translation unit are optimized away, we + // don't need to codegen it. b) if the function persists, it needs to be + // merged with other definitions. c) C++ has the ODR, so we know the + // definition is dependable. + if (Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation) return llvm::Function::LinkOnceODRLinkage; - } else if (Linkage == GVA_ExplicitTemplateInstantiation) { - // An explicit instantiation of a template has weak linkage, since - // explicit instantiations can occur in multiple translation units - // and must all be equivalent. However, we are not allowed to - // throw away these explicit instantiations. + + // An explicit instantiation of a template has weak linkage, since + // explicit instantiations can occur in multiple translation units + // and must all be equivalent. However, we are not allowed to + // throw away these explicit instantiations. + if (Linkage == GVA_ExplicitTemplateInstantiation) return llvm::Function::WeakODRLinkage; - } else { - assert(Linkage == GVA_StrongExternal); - // Otherwise, we have strong external linkage. - return llvm::Function::ExternalLinkage; - } + + // Otherwise, we have strong external linkage. + assert(Linkage == GVA_StrongExternal); + return llvm::Function::ExternalLinkage; } @@ -521,8 +574,7 @@ void CodeGenModule::EmitDeferred() { // ignore these cases. // // TODO: That said, looking this up multiple times is very wasteful. - MangleBuffer Name; - getMangledName(Name, D); + llvm::StringRef Name = getMangledName(D); llvm::GlobalValue *CGRef = GetGlobalValue(Name); assert(CGRef && "Deferred decl wasn't referenced?"); @@ -586,6 +638,47 @@ llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV, return llvm::ConstantStruct::get(VMContext, Fields, 4, false); } +static CodeGenModule::GVALinkage +GetLinkageForVariable(ASTContext &Context, const VarDecl *VD) { + // If this is a static data member, compute the kind of template + // specialization. Otherwise, this variable is not part of a + // template. + TemplateSpecializationKind TSK = TSK_Undeclared; + if (VD->isStaticDataMember()) + TSK = VD->getTemplateSpecializationKind(); + + Linkage L = VD->getLinkage(); + if (L == ExternalLinkage && Context.getLangOptions().CPlusPlus && + VD->getType()->getLinkage() == UniqueExternalLinkage) + L = UniqueExternalLinkage; + + switch (L) { + case NoLinkage: + case InternalLinkage: + case UniqueExternalLinkage: + return CodeGenModule::GVA_Internal; + + case ExternalLinkage: + switch (TSK) { + case TSK_Undeclared: + case TSK_ExplicitSpecialization: + return CodeGenModule::GVA_StrongExternal; + + case TSK_ExplicitInstantiationDeclaration: + llvm_unreachable("Variable should not be instantiated"); + // Fall through to treat this like any other instantiation. + + case TSK_ExplicitInstantiationDefinition: + return CodeGenModule::GVA_ExplicitTemplateInstantiation; + + case TSK_ImplicitInstantiation: + return CodeGenModule::GVA_TemplateInstantiation; + } + } + + return CodeGenModule::GVA_StrongExternal; +} + bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) { // Never defer when EmitAllDecls is specified or the decl has // attribute used. @@ -634,24 +727,10 @@ bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) { } } - // Static data may be deferred, but out-of-line static data members - // cannot be. - Linkage L = VD->getLinkage(); - if (L == ExternalLinkage && getContext().getLangOptions().CPlusPlus && - VD->getType()->getLinkage() == UniqueExternalLinkage) - L = UniqueExternalLinkage; - - switch (L) { - case NoLinkage: - case InternalLinkage: - case UniqueExternalLinkage: - // Initializer has side effects? - if (VD->getInit() && VD->getInit()->HasSideEffects(Context)) - return false; - return !(VD->isStaticDataMember() && VD->isOutOfLine()); - - case ExternalLinkage: - break; + GVALinkage L = GetLinkageForVariable(getContext(), VD); + if (L == GVA_Internal || L == GVA_TemplateInstantiation) { + if (!(VD->getInit() && VD->getInit()->HasSideEffects(Context))) + return true; } return false; @@ -716,8 +795,7 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) { // If the value has already been used, add it directly to the // DeferredDeclsToEmit list. - MangleBuffer MangledName; - getMangledName(MangledName, GD); + llvm::StringRef MangledName = getMangledName(GD); if (GetGlobalValue(MangledName)) DeferredDeclsToEmit.push_back(GD); else { @@ -735,18 +813,27 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) { Context.getSourceManager(), "Generating code for declaration"); - if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) - if (Method->isVirtual()) - getVTables().EmitThunks(GD); + if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) { + // At -O0, don't generate IR for functions with available_externally + // linkage. + if (CodeGenOpts.OptimizationLevel == 0 && + getFunctionLinkage(Function) + == llvm::Function::AvailableExternallyLinkage) + return; + + if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) { + if (Method->isVirtual()) + getVTables().EmitThunks(GD); - if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D)) - return EmitCXXConstructor(CD, GD.getCtorType()); + if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Method)) + return EmitCXXConstructor(CD, GD.getCtorType()); - if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D)) - return EmitCXXDestructor(DD, GD.getDtorType()); + if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(Method)) + return EmitCXXDestructor(DD, GD.getDtorType()); + } - if (isa<FunctionDecl>(D)) return EmitGlobalFunctionDefinition(GD); + } if (const VarDecl *VD = dyn_cast<VarDecl>(D)) return EmitGlobalVarDefinition(VD); @@ -797,6 +884,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName, std::vector<const llvm::Type*>(), false); IsIncompleteFunction = true; } + llvm::Function *F = llvm::Function::Create(FTy, llvm::Function::ExternalLinkage, MangledName, &getModule()); @@ -857,8 +945,8 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD, // If there was no specific requested type, just convert it now. if (!Ty) Ty = getTypes().ConvertType(cast<ValueDecl>(GD.getDecl())->getType()); - MangleBuffer MangledName; - getMangledName(MangledName, GD); + + llvm::StringRef MangledName = getMangledName(GD); return GetOrCreateLLVMFunction(MangledName, Ty, GD); } @@ -961,8 +1049,7 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D, const llvm::PointerType *PTy = llvm::PointerType::get(Ty, ASTTy.getAddressSpace()); - MangleBuffer MangledName; - getMangledName(MangledName, D); + llvm::StringRef MangledName = getMangledName(D); return GetOrCreateLLVMGlobal(MangledName, PTy, D); } @@ -981,8 +1068,7 @@ void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) { // If we have not seen a reference to this variable yet, place it // into the deferred declarations table to be emitted if needed // later. - MangleBuffer MangledName; - getMangledName(MangledName, D); + llvm::StringRef MangledName = getMangledName(D); if (!GetGlobalValue(MangledName)) { DeferredDecls[MangledName] = D; return; @@ -1008,7 +1094,7 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) { // If this class has a key function, use that to determine the linkage of // the vtable. const FunctionDecl *Def = 0; - if (KeyFunction->getBody(Def)) + if (KeyFunction->hasBody(Def)) KeyFunction = cast<CXXMethodDecl>(Def); switch (KeyFunction->getTemplateSpecializationKind()) { @@ -1049,47 +1135,6 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) { return llvm::GlobalVariable::WeakODRLinkage; } -static CodeGenModule::GVALinkage -GetLinkageForVariable(ASTContext &Context, const VarDecl *VD) { - // If this is a static data member, compute the kind of template - // specialization. Otherwise, this variable is not part of a - // template. - TemplateSpecializationKind TSK = TSK_Undeclared; - if (VD->isStaticDataMember()) - TSK = VD->getTemplateSpecializationKind(); - - Linkage L = VD->getLinkage(); - if (L == ExternalLinkage && Context.getLangOptions().CPlusPlus && - VD->getType()->getLinkage() == UniqueExternalLinkage) - L = UniqueExternalLinkage; - - switch (L) { - case NoLinkage: - case InternalLinkage: - case UniqueExternalLinkage: - return CodeGenModule::GVA_Internal; - - case ExternalLinkage: - switch (TSK) { - case TSK_Undeclared: - case TSK_ExplicitSpecialization: - return CodeGenModule::GVA_StrongExternal; - - case TSK_ExplicitInstantiationDeclaration: - llvm_unreachable("Variable should not be instantiated"); - // Fall through to treat this like any other instantiation. - - case TSK_ExplicitInstantiationDefinition: - return CodeGenModule::GVA_ExplicitTemplateInstantiation; - - case TSK_ImplicitInstantiation: - return CodeGenModule::GVA_TemplateInstantiation; - } - } - - return CodeGenModule::GVA_StrongExternal; -} - CharUnits CodeGenModule::GetTargetTypeStoreSize(const llvm::Type *Ty) const { return CharUnits::fromQuantity( TheTargetData.getTypeStoreSizeInBits(Ty) / Context.getCharWidth()); @@ -1367,8 +1412,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) { const AliasAttr *AA = D->getAttr<AliasAttr>(); assert(AA && "Not an alias?"); - MangleBuffer MangledName; - getMangledName(MangledName, GD); + llvm::StringRef MangledName = getMangledName(GD); // If there is a definition in the module, then it wins over the alias. // This is dubious, but allow it to be safe. Just ignore the alias. @@ -1409,7 +1453,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) { Entry->getType())); Entry->eraseFromParent(); } else { - GA->setName(MangledName.getString()); + GA->setName(MangledName); } // Set attributes which are particular to an alias; this is a @@ -1418,7 +1462,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) { if (D->hasAttr<DLLExportAttr>()) { if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { // The dllexport attribute is ignored for undefined symbols. - if (FD->getBody()) + if (FD->hasBody()) GA->setLinkage(llvm::Function::DLLExportLinkage); } else { GA->setLinkage(llvm::Function::DLLExportLinkage); @@ -2004,3 +2048,73 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) { assert(isa<TypeDecl>(D) && "Unsupported decl kind"); } } + +/// Turns the given pointer into a constant. +static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context, + const void *Ptr) { + uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr); + const llvm::Type *i64 = llvm::Type::getInt64Ty(Context); + return llvm::ConstantInt::get(i64, PtrInt); +} + +static void EmitGlobalDeclMetadata(CodeGenModule &CGM, + llvm::NamedMDNode *&GlobalMetadata, + GlobalDecl D, + llvm::GlobalValue *Addr) { + if (!GlobalMetadata) + GlobalMetadata = + CGM.getModule().getOrInsertNamedMetadata("clang.global.decl.ptrs"); + + // TODO: should we report variant information for ctors/dtors? + llvm::Value *Ops[] = { + Addr, + GetPointerConstant(CGM.getLLVMContext(), D.getDecl()) + }; + GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops, 2)); +} + +/// Emits metadata nodes associating all the global values in the +/// current module with the Decls they came from. This is useful for +/// projects using IR gen as a subroutine. +/// +/// Since there's currently no way to associate an MDNode directly +/// with an llvm::GlobalValue, we create a global named metadata +/// with the name 'clang.global.decl.ptrs'. +void CodeGenModule::EmitDeclMetadata() { + llvm::NamedMDNode *GlobalMetadata = 0; + + // StaticLocalDeclMap + for (llvm::DenseMap<GlobalDecl,llvm::StringRef>::iterator + I = MangledDeclNames.begin(), E = MangledDeclNames.end(); + I != E; ++I) { + llvm::GlobalValue *Addr = getModule().getNamedValue(I->second); + EmitGlobalDeclMetadata(*this, GlobalMetadata, I->first, Addr); + } +} + +/// Emits metadata nodes for all the local variables in the current +/// function. +void CodeGenFunction::EmitDeclMetadata() { + if (LocalDeclMap.empty()) return; + + llvm::LLVMContext &Context = getLLVMContext(); + + // Find the unique metadata ID for this name. + unsigned DeclPtrKind = Context.getMDKindID("clang.decl.ptr"); + + llvm::NamedMDNode *GlobalMetadata = 0; + + for (llvm::DenseMap<const Decl*, llvm::Value*>::iterator + I = LocalDeclMap.begin(), E = LocalDeclMap.end(); I != E; ++I) { + const Decl *D = I->first; + llvm::Value *Addr = I->second; + + if (llvm::AllocaInst *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) { + llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D); + Alloca->setMetadata(DeclPtrKind, llvm::MDNode::get(Context, &DAddr, 1)); + } else if (llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(Addr)) { + GlobalDecl GD = GlobalDecl(cast<VarDecl>(D)); + EmitGlobalDeclMetadata(CGM, GlobalMetadata, GD, GV); + } + } +} diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h index 319744c4..27f15fc 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h @@ -75,6 +75,25 @@ namespace CodeGen { class CGObjCRuntime; class MangleBuffer; + struct OrderGlobalInits { + unsigned int priority; + unsigned int lex_order; + OrderGlobalInits(unsigned int p, unsigned int l) + : priority(p), lex_order(l) {} + + bool operator==(const OrderGlobalInits &RHS) const { + return priority == RHS.priority && + lex_order == RHS.lex_order; + } + + bool operator<(const OrderGlobalInits &RHS) const { + if (priority < RHS.priority) + return true; + + return priority == RHS.priority && lex_order < RHS.lex_order; + } + }; + /// CodeGenModule - This class organizes the cross-function state that is used /// while generating LLVM code. class CodeGenModule : public BlockModule { @@ -130,6 +149,10 @@ class CodeGenModule : public BlockModule { /// priorities to be emitted when the translation unit is complete. CtorList GlobalDtors; + /// MangledDeclNames - A map of canonical GlobalDecls to their mangled names. + llvm::DenseMap<GlobalDecl, llvm::StringRef> MangledDeclNames; + llvm::BumpPtrAllocator MangledNamesAllocator; + std::vector<llvm::Constant*> Annotations; llvm::StringMap<llvm::Constant*> CFConstantStringMap; @@ -139,10 +162,16 @@ class CodeGenModule : public BlockModule { /// CXXGlobalInits - Global variables with initializers that need to run /// before main. std::vector<llvm::Constant*> CXXGlobalInits; + + /// - Global variables with initializers whose order of initialization + /// is set by init_priority attribute. + + llvm::SmallVector<std::pair<OrderGlobalInits, llvm::Function*>, 8> + PrioritizedCXXGlobalInits; /// CXXGlobalDtors - Global destructor functions and arguments that need to /// run on termination. - std::vector<std::pair<llvm::Constant*,llvm::Constant*> > CXXGlobalDtors; + std::vector<std::pair<llvm::WeakVH,llvm::Constant*> > CXXGlobalDtors; /// CFConstantStringClassRef - Cached reference to the class for constant /// strings. This value has type int * but is actually an Obj-C class pointer. @@ -315,6 +344,10 @@ public: llvm::GlobalValue *GetAddrOfCXXDestructor(const CXXDestructorDecl *D, CXXDtorType Type); + // GetCXXMemberFunctionPointerValue - Given a method declaration, return the + // integer used in a member function pointer to refer to that value. + llvm::Constant *GetCXXMemberFunctionPointerValue(const CXXMethodDecl *MD); + /// getBuiltinLibFunction - Given a builtin id for a function like /// "__builtin_fabsf", return a Function* for "fabsf". llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD, @@ -346,7 +379,9 @@ public: /// AddCXXDtorEntry - Add a destructor and object to add to the C++ global /// destructor function. - void AddCXXDtorEntry(llvm::Constant *DtorFn, llvm::Constant *Object); + void AddCXXDtorEntry(llvm::Constant *DtorFn, llvm::Constant *Object) { + CXXGlobalDtors.push_back(std::make_pair(DtorFn, Object)); + } /// CreateRuntimeFunction - Create a new runtime function with the specified /// type and name. @@ -409,9 +444,13 @@ public: /// which only apply to a function definintion. void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F); - /// ReturnTypeUsesSret - Return true iff the given type uses 'sret' when used + /// ReturnTypeUsesSRet - Return true iff the given type uses 'sret' when used /// as a return type. - bool ReturnTypeUsesSret(const CGFunctionInfo &FI); + bool ReturnTypeUsesSRet(const CGFunctionInfo &FI); + + /// ReturnTypeUsesSret - Return true iff the given type uses 'fpret' when used + /// as a return type. + bool ReturnTypeUsesFPRet(QualType ResultType); /// ConstructAttributeList - Get the LLVM attributes and calling convention to /// use for a particular function type. @@ -427,15 +466,8 @@ public: AttributeListType &PAL, unsigned &CallingConv); - void getMangledName(MangleBuffer &Buffer, GlobalDecl D); - void getMangledName(MangleBuffer &Buffer, const NamedDecl *ND); - void getMangledName(MangleBuffer &Buffer, const BlockDecl *BD); - void getMangledCXXCtorName(MangleBuffer &Buffer, - const CXXConstructorDecl *D, - CXXCtorType Type); - void getMangledCXXDtorName(MangleBuffer &Buffer, - const CXXDestructorDecl *D, - CXXDtorType Type); + llvm::StringRef getMangledName(GlobalDecl GD); + void getMangledName(GlobalDecl GD, MangleBuffer &Buffer, const BlockDecl *BD); void EmitTentativeDefinition(const VarDecl *D); @@ -566,6 +598,8 @@ private: /// references to global which may otherwise be optimized out. void EmitLLVMUsed(void); + void EmitDeclMetadata(); + /// MayDeferGeneration - Determine if the given decl can be emitted /// lazily; this is only relevant for definitions. The given decl /// must be either a function or var decl. diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp index a46dc72..d469b90 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp @@ -42,11 +42,13 @@ CodeGenTypes::~CodeGenTypes() { delete &*I++; } -/// ConvertType - Convert the specified type to its LLVM form. -const llvm::Type *CodeGenTypes::ConvertType(QualType T) { - llvm::PATypeHolder Result = ConvertTypeRecursive(T); - - // Any pointers that were converted defered evaluation of their pointee type, +/// HandleLateResolvedPointers - For top-level ConvertType calls, this handles +/// pointers that are referenced but have not been converted yet. This is used +/// to handle cyclic structures properly. +void CodeGenTypes::HandleLateResolvedPointers() { + assert(!PointersToResolve.empty() && "No pointers to resolve!"); + + // Any pointers that were converted deferred evaluation of their pointee type, // creating an opaque type instead. This is in order to avoid problems with // circular types. Loop through all these defered pointees, if any, and // resolve them now. @@ -59,7 +61,21 @@ const llvm::Type *CodeGenTypes::ConvertType(QualType T) { const llvm::Type *NT = ConvertTypeForMemRecursive(P.first); P.second->refineAbstractTypeTo(NT); } +} + +/// ConvertType - Convert the specified type to its LLVM form. +const llvm::Type *CodeGenTypes::ConvertType(QualType T, bool IsRecursive) { + const llvm::Type *Result = ConvertTypeRecursive(T); + + // If this is a top-level call to ConvertType and sub-conversions caused + // pointers to get lazily built as opaque types, resolve the pointers, which + // might cause Result to be merged away. + if (!IsRecursive && !PointersToResolve.empty()) { + llvm::PATypeHolder ResultHandle = Result; + HandleLateResolvedPointers(); + Result = ResultHandle; + } return Result; } @@ -80,21 +96,12 @@ const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) { return ResultType; } -const llvm::Type *CodeGenTypes::ConvertTypeForMemRecursive(QualType T) { - const llvm::Type *ResultType = ConvertTypeRecursive(T); - if (ResultType->isIntegerTy(1)) - return llvm::IntegerType::get(getLLVMContext(), - (unsigned)Context.getTypeSize(T)); - // FIXME: Should assert that the llvm type and AST type has the same size. - return ResultType; -} - /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from /// ConvertType in that it is used to convert to the memory representation for /// a type. For example, the scalar representation for _Bool is i1, but the /// memory representation is usually i8 or i32, depending on the target. -const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) { - const llvm::Type *R = ConvertType(T); +const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool IsRecursive){ + const llvm::Type *R = ConvertType(T, IsRecursive); // If this is a non-bool type, don't map it. if (!R->isIntegerTy(1)) @@ -108,7 +115,7 @@ const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) { // Code to verify a given function type is complete, i.e. the return type // and all of the argument types are complete. -static const TagType *VerifyFuncTypeComplete(const Type* T) { +const TagType *CodeGenTypes::VerifyFuncTypeComplete(const Type* T) { const FunctionType *FT = cast<FunctionType>(T); if (const TagType* TT = FT->getResultType()->getAs<TagType>()) if (!TT->getDecl()->isDefinition()) @@ -201,7 +208,7 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { case BuiltinType::ObjCSel: // LLVM void type can only be used as the result of a function call. Just // map to the same as char. - return llvm::IntegerType::get(getLLVMContext(), 8); + return llvm::Type::getInt8Ty(getLLVMContext()); case BuiltinType::Bool: // Note that we always return bool as i1 for use as a scalar type. @@ -233,7 +240,7 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { case BuiltinType::NullPtr: { // Model std::nullptr_t as i8* - const llvm::Type *Ty = llvm::IntegerType::get(getLLVMContext(), 8); + const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext()); return llvm::PointerType::getUnqual(Ty); } @@ -284,7 +291,8 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { assert(A.getIndexTypeCVRQualifiers() == 0 && "FIXME: We only handle trivial array types so far!"); // int X[] -> [0 x int] - return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()), 0); + return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()), + 0); } case Type::ConstantArray: { const ConstantArrayType &A = cast<ConstantArrayType>(Ty); @@ -299,8 +307,12 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { } case Type::FunctionNoProto: case Type::FunctionProto: { - // First, check whether we can build the full function type. - if (const TagType* TT = VerifyFuncTypeComplete(&Ty)) { + // First, check whether we can build the full function type. If the + // function type depends on an incomplete type (e.g. a struct or enum), we + // cannot lower the function type. Instead, turn it into an Opaque pointer + // and have UpdateCompletedType revisit the function type when/if the opaque + // argument type is defined. + if (const TagType *TT = VerifyFuncTypeComplete(&Ty)) { // This function's type depends on an incomplete tag type; make sure // we have an opaque type corresponding to the tag type. ConvertTagDeclType(TT->getDecl()); @@ -309,17 +321,25 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { FunctionTypes.insert(std::make_pair(&Ty, ResultType)); return ResultType; } + // The function type can be built; call the appropriate routines to // build it. - if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty)) - return GetFunctionType(getFunctionInfo( - CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT,0))), - FPT->isVariadic()); - - const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty); - return GetFunctionType(getFunctionInfo( - CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT,0))), - true); + const CGFunctionInfo *FI; + bool isVariadic; + if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty)) { + FI = &getFunctionInfo( + CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)), + true /*Recursive*/); + isVariadic = FPT->isVariadic(); + } else { + const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty); + FI = &getFunctionInfo( + CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)), + true /*Recursive*/); + isVariadic = true; + } + + return GetFunctionType(*FI, isVariadic, true); } case Type::ObjCObject: diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h index fc28c3a..c7f48e6 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h @@ -94,6 +94,12 @@ private: /// is available only for ConvertType(). CovertType() is preferred /// interface to convert type T into a llvm::Type. const llvm::Type *ConvertNewType(QualType T); + + /// HandleLateResolvedPointers - For top-level ConvertType calls, this handles + /// pointers that are referenced but have not been converted yet. This is + /// used to handle cyclic structures properly. + void HandleLateResolvedPointers(); + public: CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD, const ABIInfo &Info); @@ -106,22 +112,29 @@ public: llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); } /// ConvertType - Convert type T into a llvm::Type. - const llvm::Type *ConvertType(QualType T); + const llvm::Type *ConvertType(QualType T, bool IsRecursive = false); const llvm::Type *ConvertTypeRecursive(QualType T); /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from /// ConvertType in that it is used to convert to the memory representation for /// a type. For example, the scalar representation for _Bool is i1, but the /// memory representation is usually i8 or i32, depending on the target. - const llvm::Type *ConvertTypeForMem(QualType T); - const llvm::Type *ConvertTypeForMemRecursive(QualType T); + const llvm::Type *ConvertTypeForMem(QualType T, bool IsRecursive = false); + const llvm::Type *ConvertTypeForMemRecursive(QualType T) { + return ConvertTypeForMem(T, true); + } /// GetFunctionType - Get the LLVM function type for \arg Info. const llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info, - bool IsVariadic); + bool IsVariadic, + bool IsRecursive = false); const llvm::FunctionType *GetFunctionType(GlobalDecl GD); + /// VerifyFuncTypeComplete - Utility to check whether a function type can + /// be converted to an LLVM type (i.e. doesn't depend on an incomplete tag + /// type). + static const TagType *VerifyFuncTypeComplete(const Type* T); /// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, /// given a CXXMethodDecl. If the method to has an incomplete return type, @@ -150,8 +163,11 @@ public: return getFunctionInfo(Ty->getResultType(), Args, Ty->getExtInfo()); } - const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty); - const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty); + + const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty, + bool IsRecursive = false); + const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty, + bool IsRecursive = false); // getFunctionInfo - Get the function info for a member function. const CGFunctionInfo &getFunctionInfo(const CXXRecordDecl *RD, @@ -172,7 +188,8 @@ public: /// \param ArgTys - must all actually be canonical as params const CGFunctionInfo &getFunctionInfo(CanQualType RetTy, const llvm::SmallVectorImpl<CanQualType> &ArgTys, - const FunctionType::ExtInfo &Info); + const FunctionType::ExtInfo &Info, + bool IsRecursive = false); /// \brief Compute a new LLVM record layout object for the given record. CGRecordLayout *ComputeRecordLayout(const RecordDecl *D); @@ -185,7 +202,8 @@ public: // These are internal details of CGT that shouldn't be used externally. /// GetExpandedTypes - Expand the type \arg Ty into the LLVM /// argument types it would be passed as on the provided vector \arg /// ArgTys. See ABIArgInfo::Expand. - void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys); + void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys, + bool IsRecursive); /// ContainsPointerToDataMember - Return whether the given type contains a /// pointer to a data member. diff --git a/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h b/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h index b8a98d7..26dea40 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/GlobalDecl.h @@ -36,7 +36,7 @@ class GlobalDecl { Value.setPointer(D); } - + public: GlobalDecl() {} @@ -50,6 +50,14 @@ public: GlobalDecl(const CXXDestructorDecl *D, CXXDtorType Type) : Value(D, Type) {} + GlobalDecl getCanonicalDecl() const { + GlobalDecl CanonGD; + CanonGD.Value.setPointer(Value.getPointer()->getCanonicalDecl()); + CanonGD.Value.setInt(Value.getInt()); + + return CanonGD; + } + const Decl *getDecl() const { return Value.getPointer(); } CXXCtorType getCtorType() const { diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Makefile b/contrib/llvm/tools/clang/lib/CodeGen/Makefile index 3cea6bb..4b93524 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/Makefile +++ b/contrib/llvm/tools/clang/lib/CodeGen/Makefile @@ -12,14 +12,9 @@ # ##===----------------------------------------------------------------------===## -LEVEL = ../../../.. +CLANG_LEVEL := ../.. LIBRARYNAME := clangCodeGen BUILD_ARCHIVE = 1 -CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include -ifdef CLANG_VENDOR -CPP.Flags += -DCLANG_VENDOR='"$(CLANG_VENDOR) "' -endif - -include $(LEVEL)/Makefile.common +include $(CLANG_LEVEL)/Makefile diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp index 6c2a648..30ee541 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.cpp @@ -40,7 +40,7 @@ MiscNameMangler::MiscNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res) : Context(C), Out(Res) { } -void MiscNameMangler::mangleBlock(const BlockDecl *BD) { +void MiscNameMangler::mangleBlock(GlobalDecl GD, const BlockDecl *BD) { // Mangle the context of the block. // FIXME: We currently mimic GCC's mangling scheme, which leaves much to be // desired. Come up with a better mangling scheme. @@ -55,6 +55,16 @@ void MiscNameMangler::mangleBlock(const BlockDecl *BD) { const NamedDecl *ND = cast<NamedDecl>(DC); if (IdentifierInfo *II = ND->getIdentifier()) Out << II->getName(); + else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND)) { + llvm::SmallString<64> Buffer; + Context.mangleCXXDtor(D, GD.getDtorType(), Buffer); + Out << Buffer; + } + else if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND)) { + llvm::SmallString<64> Buffer; + Context.mangleCXXCtor(D, GD.getCtorType(), Buffer); + Out << Buffer; + } else { // FIXME: We were doing a mangleUnqualifiedName() before, but that's // a private member of a class that will soon itself be private to the @@ -125,19 +135,24 @@ class CXXNameMangler { const CXXMethodDecl *Structor; unsigned StructorType; + /// SeqID - The next subsitution sequence number. + unsigned SeqID; + llvm::DenseMap<uintptr_t, unsigned> Substitutions; ASTContext &getASTContext() const { return Context.getASTContext(); } public: CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res) - : Context(C), Out(Res), Structor(0), StructorType(0) { } + : Context(C), Out(Res), Structor(0), StructorType(0), SeqID(0) { } CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res, const CXXConstructorDecl *D, CXXCtorType Type) - : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type) { } + : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type), + SeqID(0) { } CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res, const CXXDestructorDecl *D, CXXDtorType Type) - : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type) { } + : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type), + SeqID(0) { } #if MANGLE_CHECKER ~CXXNameMangler() { @@ -154,7 +169,9 @@ public: void mangle(const NamedDecl *D, llvm::StringRef Prefix = "_Z"); void mangleCallOffset(int64_t NonVirtual, int64_t Virtual); + void mangleNumber(const llvm::APSInt &I); void mangleNumber(int64_t Number); + void mangleFloat(const llvm::APFloat &F); void mangleFunctionEncoding(const FunctionDecl *FD); void mangleName(const NamedDecl *ND); void mangleType(QualType T); @@ -215,6 +232,7 @@ private: #include "clang/AST/TypeNodes.def" void mangleType(const TagType*); + void mangleType(TemplateName); void mangleBareFunctionType(const FunctionType *T, bool MangleReturnType); @@ -279,7 +297,7 @@ bool MangleContext::shouldMangleDeclName(const NamedDecl *D) { if (!FD) { const DeclContext *DC = D->getDeclContext(); // Check for extern variable declared locally. - if (isa<FunctionDecl>(DC) && D->hasLinkage()) + if (DC->isFunctionOrMethod() && D->hasLinkage()) while (!DC->isNamespace() && !DC->isTranslationUnit()) DC = DC->getParent(); if (DC->isTranslationUnit() && D->getLinkage() != InternalLinkage) @@ -357,12 +375,6 @@ void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) { mangleBareFunctionType(FT, MangleReturnType); } -/// isStd - Return whether a given namespace is the 'std' namespace. -static bool isStd(const NamespaceDecl *NS) { - const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier(); - return II && II->isStr("std"); -} - static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) { while (isa<LinkageSpecDecl>(DC)) { DC = DC->getParent(); @@ -371,15 +383,21 @@ static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) { return DC; } +/// isStd - Return whether a given namespace is the 'std' namespace. +static bool isStd(const NamespaceDecl *NS) { + if (!IgnoreLinkageSpecDecls(NS->getParent())->isTranslationUnit()) + return false; + + const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier(); + return II && II->isStr("std"); +} + // isStdNamespace - Return whether a given decl context is a toplevel 'std' // namespace. static bool isStdNamespace(const DeclContext *DC) { if (!DC->isNamespace()) return false; - if (!IgnoreLinkageSpecDecls(DC->getParent())->isTranslationUnit()) - return false; - return isStd(cast<NamespaceDecl>(DC)); } @@ -511,6 +529,21 @@ void CXXNameMangler::mangleUnscopedTemplateName(TemplateName Template) { addSubstitution(Template); } +void CXXNameMangler::mangleFloat(const llvm::APFloat &F) { + // TODO: avoid this copy with careful stream management. + llvm::SmallString<20> Buffer; + F.bitcastToAPInt().toString(Buffer, 16, false); + Out.write(Buffer.data(), Buffer.size()); +} + +void CXXNameMangler::mangleNumber(const llvm::APSInt &Value) { + if (Value.isSigned() && Value.isNegative()) { + Out << 'n'; + Value.abs().print(Out, true); + } else + Value.print(Out, Value.isSigned()); +} + void CXXNameMangler::mangleNumber(int64_t Number) { // <number> ::= [n] <non-negative decimal integer> if (Number < 0) { @@ -593,6 +626,28 @@ void CXXNameMangler::mangleUnresolvedName(NestedNameSpecifier *Qualifier, mangleUnqualifiedName(0, Name, KnownArity); } +static const FieldDecl *FindFirstNamedDataMember(const RecordDecl *RD) { + assert(RD->isAnonymousStructOrUnion() && + "Expected anonymous struct or union!"); + + for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end(); + I != E; ++I) { + const FieldDecl *FD = *I; + + if (FD->getIdentifier()) + return FD; + + if (const RecordType *RT = FD->getType()->getAs<RecordType>()) { + if (const FieldDecl *NamedDataMember = + FindFirstNamedDataMember(RT->getDecl())) + return NamedDataMember; + } + } + + // We didn't find a named data member. + return 0; +} + void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name, unsigned KnownArity) { @@ -625,6 +680,28 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND, } } + if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { + // We must have an anonymous union or struct declaration. + const RecordDecl *RD = + cast<RecordDecl>(VD->getType()->getAs<RecordType>()->getDecl()); + + // Itanium C++ ABI 5.1.2: + // + // For the purposes of mangling, the name of an anonymous union is + // considered to be the name of the first named data member found by a + // pre-order, depth-first, declaration-order walk of the data members of + // the anonymous union. If there is no such data member (i.e., if all of + // the data members in the union are unnamed), then there is no way for + // a program to refer to the anonymous union, and there is therefore no + // need to mangle its name. + const FieldDecl *FD = FindFirstNamedDataMember(RD); + assert(FD && "Didn't find a named data member!"); + assert(FD->getIdentifier() && "Data member name isn't an identifier!"); + + mangleSourceName(FD->getIdentifier()); + break; + } + // We must have an anonymous struct. const TagDecl *TD = cast<TagDecl>(ND); if (const TypedefDecl *D = TD->getTypedefForAnonDecl()) { @@ -808,7 +885,7 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) { if (const BlockDecl *Block = dyn_cast<BlockDecl>(DC)) { manglePrefix(DC->getParent(), NoFunction); llvm::SmallString<64> Name; - Context.mangleBlock(Block, Name); + Context.mangleBlock(GlobalDecl(), Block, Name); Out << Name.size() << Name; return; } @@ -880,6 +957,53 @@ void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND) { addSubstitution(ND); } +/// Mangles a template name under the production <type>. Required for +/// template template arguments. +/// <type> ::= <class-enum-type> +/// ::= <template-param> +/// ::= <substitution> +void CXXNameMangler::mangleType(TemplateName TN) { + if (mangleSubstitution(TN)) + return; + + TemplateDecl *TD = 0; + + switch (TN.getKind()) { + case TemplateName::QualifiedTemplate: + TD = TN.getAsQualifiedTemplateName()->getTemplateDecl(); + goto HaveDecl; + + case TemplateName::Template: + TD = TN.getAsTemplateDecl(); + goto HaveDecl; + + HaveDecl: + if (isa<TemplateTemplateParmDecl>(TD)) + mangleTemplateParameter(cast<TemplateTemplateParmDecl>(TD)->getIndex()); + else + mangleName(TD); + break; + + case TemplateName::OverloadedTemplate: + llvm_unreachable("can't mangle an overloaded template name as a <type>"); + break; + + case TemplateName::DependentTemplate: { + const DependentTemplateName *Dependent = TN.getAsDependentTemplateName(); + assert(Dependent->isIdentifier()); + + // <class-enum-type> ::= <name> + // <name> ::= <nested-name> + mangleUnresolvedScope(Dependent->getQualifier()); + mangleSourceName(Dependent->getIdentifier()); + break; + } + + } + + addSubstitution(TN); +} + void CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) { switch (OO) { @@ -1001,6 +1125,18 @@ void CXXNameMangler::mangleQualifiers(Qualifiers Quals) { if (Quals.hasConst()) Out << 'K'; + if (Quals.hasAddressSpace()) { + // Extension: + // + // <type> ::= U <address-space-number> + // + // where <address-space-number> is a source name consisting of 'AS' + // followed by the address space <number>. + llvm::SmallString<64> ASString; + ASString = "AS" + llvm::utostr_32(Quals.getAddressSpace()); + Out << 'U' << ASString.size() << ASString; + } + // FIXME: For now, just drop all extension qualifiers on the floor. } @@ -1138,7 +1274,8 @@ void CXXNameMangler::mangleBareFunctionType(const FunctionType *T, if (MangleReturnType) mangleType(Proto->getResultType()); - if (Proto->getNumArgs() == 0) { + if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) { + // <builtin-type> ::= v # void Out << 'v'; return; } @@ -1204,6 +1341,22 @@ void CXXNameMangler::mangleType(const MemberPointerType *T) { if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) { mangleQualifiers(Qualifiers::fromCVRMask(FPT->getTypeQuals())); mangleType(FPT); + + // Itanium C++ ABI 5.1.8: + // + // The type of a non-static member function is considered to be different, + // for the purposes of substitution, from the type of a namespace-scope or + // static member function whose type appears similar. The types of two + // non-static member functions are considered to be different, for the + // purposes of substitution, if the functions are members of different + // classes. In other words, for the purposes of substitution, the class of + // which the function is a member is considered part of the type of + // function. + + // We increment the SeqID here to emulate adding an entry to the + // substitution table. We can't actually add it because we don't want this + // particular function type to be substituted. + ++SeqID; } else mangleType(PointeeType); } @@ -1213,8 +1366,6 @@ void CXXNameMangler::mangleType(const TemplateTypeParmType *T) { mangleTemplateParameter(T->getIndex()); } -// FIXME: <type> ::= <template-template-param> <template-args> - // <type> ::= P <type> # pointer-to void CXXNameMangler::mangleType(const PointerType *T) { Out << 'P'; @@ -1244,12 +1395,20 @@ void CXXNameMangler::mangleType(const ComplexType *T) { } // GNU extension: vector types -// <type> ::= <vector-type> -// <vector-type> ::= Dv <positive dimension number> _ <element type> -// ::= Dv [<dimension expression>] _ <element type> +// <type> ::= <vector-type> +// <vector-type> ::= Dv <positive dimension number> _ +// <extended element type> +// ::= Dv [<dimension expression>] _ <element type> +// <extended element type> ::= <element type> +// ::= p # AltiVec vector pixel void CXXNameMangler::mangleType(const VectorType *T) { Out << "Dv" << T->getNumElements() << '_'; - mangleType(T->getElementType()); + if (T->getAltiVecSpecific() == VectorType::Pixel) + Out << 'p'; + else if (T->getAltiVecSpecific() == VectorType::Bool) + Out << 'b'; + else + mangleType(T->getElementType()); } void CXXNameMangler::mangleType(const ExtVectorType *T) { mangleType(static_cast<const VectorType*>(T)); @@ -1303,23 +1462,25 @@ void CXXNameMangler::mangleType(const TemplateSpecializationType *T) { void CXXNameMangler::mangleType(const DependentNameType *T) { // Typename types are always nested Out << 'N'; - if (T->getIdentifier()) { - mangleUnresolvedScope(T->getQualifier()); - mangleSourceName(T->getIdentifier()); - } else { - const TemplateSpecializationType *TST = T->getTemplateId(); - if (!mangleSubstitution(QualType(TST, 0))) { - mangleTemplatePrefix(TST->getTemplateName()); - - // FIXME: GCC does not appear to mangle the template arguments when - // the template in question is a dependent template name. Should we - // emulate that badness? - mangleTemplateArgs(TST->getTemplateName(), TST->getArgs(), - TST->getNumArgs()); - addSubstitution(QualType(TST, 0)); - } - } - + mangleUnresolvedScope(T->getQualifier()); + mangleSourceName(T->getIdentifier()); + Out << 'E'; +} + +void CXXNameMangler::mangleType(const DependentTemplateSpecializationType *T) { + // Dependently-scoped template types are always nested + Out << 'N'; + + // TODO: avoid making this TemplateName. + TemplateName Prefix = + getASTContext().getDependentTemplateName(T->getQualifier(), + T->getIdentifier()); + mangleTemplatePrefix(Prefix); + + // FIXME: GCC does not appear to mangle the template arguments when + // the template in question is a dependent template name. Should we + // emulate that badness? + mangleTemplateArgs(Prefix, T->getArgs(), T->getNumArgs()); Out << 'E'; } @@ -1369,9 +1530,7 @@ void CXXNameMangler::mangleIntegerLiteral(QualType T, // Boolean values are encoded as 0/1. Out << (Value.getBoolValue() ? '1' : '0'); } else { - if (Value.isNegative()) - Out << 'n'; - Value.abs().print(Out, false); + mangleNumber(Value); } Out << 'E'; @@ -1435,10 +1594,44 @@ void CXXNameMangler::mangleExpression(const Expr *E) { #define STMT(Type, Base) \ case Expr::Type##Class: #include "clang/AST/StmtNodes.inc" + // fallthrough + + // These all can only appear in local or variable-initialization + // contexts and so should never appear in a mangling. + case Expr::AddrLabelExprClass: + case Expr::BlockDeclRefExprClass: + case Expr::CXXThisExprClass: + case Expr::DesignatedInitExprClass: + case Expr::ImplicitValueInitExprClass: + case Expr::InitListExprClass: + case Expr::ParenListExprClass: + case Expr::CXXScalarValueInitExprClass: llvm_unreachable("unexpected statement kind"); break; - default: { + // FIXME: invent manglings for all these. + case Expr::BlockExprClass: + case Expr::CXXPseudoDestructorExprClass: + case Expr::ChooseExprClass: + case Expr::CompoundLiteralExprClass: + case Expr::ExtVectorElementExprClass: + case Expr::ObjCEncodeExprClass: + case Expr::ObjCImplicitSetterGetterRefExprClass: + case Expr::ObjCIsaExprClass: + case Expr::ObjCIvarRefExprClass: + case Expr::ObjCMessageExprClass: + case Expr::ObjCPropertyRefExprClass: + case Expr::ObjCProtocolExprClass: + case Expr::ObjCSelectorExprClass: + case Expr::ObjCStringLiteralClass: + case Expr::ObjCSuperExprClass: + case Expr::OffsetOfExprClass: + case Expr::PredefinedExprClass: + case Expr::ShuffleVectorExprClass: + case Expr::StmtExprClass: + case Expr::TypesCompatibleExprClass: + case Expr::UnaryTypeTraitExprClass: + case Expr::VAArgExprClass: { // As bad as this diagnostic is, it's better than crashing. Diagnostic &Diags = Context.getDiags(); unsigned DiagID = Diags.getCustomDiagID(Diagnostic::Error, @@ -1450,6 +1643,11 @@ void CXXNameMangler::mangleExpression(const Expr *E) { break; } + case Expr::CXXDefaultArgExprClass: + mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr()); + break; + + case Expr::CXXMemberCallExprClass: // fallthrough case Expr::CallExprClass: { const CallExpr *CE = cast<CallExpr>(E); Out << "cl"; @@ -1460,6 +1658,26 @@ void CXXNameMangler::mangleExpression(const Expr *E) { break; } + case Expr::CXXNewExprClass: { + // Proposal from David Vandervoorde, 2010.06.30 + const CXXNewExpr *New = cast<CXXNewExpr>(E); + if (New->isGlobalNew()) Out << "gs"; + Out << (New->isArray() ? "na" : "nw"); + for (CXXNewExpr::const_arg_iterator I = New->placement_arg_begin(), + E = New->placement_arg_end(); I != E; ++I) + mangleExpression(*I); + Out << '_'; + mangleType(New->getAllocatedType()); + if (New->hasInitializer()) { + Out << "pi"; + for (CXXNewExpr::const_arg_iterator I = New->constructor_arg_begin(), + E = New->constructor_arg_end(); I != E; ++I) + mangleExpression(*I); + } + Out << 'E'; + break; + } + case Expr::MemberExprClass: { const MemberExpr *ME = cast<MemberExpr>(E); mangleMemberExpr(ME->getBase(), ME->isArrow(), @@ -1533,6 +1751,43 @@ void CXXNameMangler::mangleExpression(const Expr *E) { break; } + case Expr::CXXThrowExprClass: { + const CXXThrowExpr *TE = cast<CXXThrowExpr>(E); + + // Proposal from David Vandervoorde, 2010.06.30 + if (TE->getSubExpr()) { + Out << "tw"; + mangleExpression(TE->getSubExpr()); + } else { + Out << "tr"; + } + break; + } + + case Expr::CXXTypeidExprClass: { + const CXXTypeidExpr *TIE = cast<CXXTypeidExpr>(E); + + // Proposal from David Vandervoorde, 2010.06.30 + if (TIE->isTypeOperand()) { + Out << "ti"; + mangleType(TIE->getTypeOperand()); + } else { + Out << "te"; + mangleExpression(TIE->getExprOperand()); + } + break; + } + + case Expr::CXXDeleteExprClass: { + const CXXDeleteExpr *DE = cast<CXXDeleteExpr>(E); + + // Proposal from David Vandervoorde, 2010.06.30 + if (DE->isGlobalDelete()) Out << "gs"; + Out << (DE->isArrayForm() ? "da" : "dl"); + mangleExpression(DE->getArgument()); + break; + } + case Expr::UnaryOperatorClass: { const UnaryOperator *UO = cast<UnaryOperator>(E); mangleOperatorName(UnaryOperator::getOverloadedOperator(UO->getOpcode()), @@ -1541,6 +1796,18 @@ void CXXNameMangler::mangleExpression(const Expr *E) { break; } + case Expr::ArraySubscriptExprClass: { + const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(E); + + // Array subscript is treated as a syntactically wierd form of + // binary operator. + Out << "ix"; + mangleExpression(AE->getLHS()); + mangleExpression(AE->getRHS()); + break; + } + + case Expr::CompoundAssignOperatorClass: // fallthrough case Expr::BinaryOperatorClass: { const BinaryOperator *BO = cast<BinaryOperator>(E); mangleOperatorName(BinaryOperator::getOverloadedOperator(BO->getOpcode()), @@ -1657,12 +1924,7 @@ void CXXNameMangler::mangleExpression(const Expr *E) { const FloatingLiteral *FL = cast<FloatingLiteral>(E); Out << 'L'; mangleType(FL->getType()); - - // TODO: avoid this copy with careful stream management. - llvm::SmallString<20> Buffer; - FL->getValue().bitcastToAPInt().toString(Buffer, 16, false); - Out.write(Buffer.data(), Buffer.size()); - + mangleFloat(FL->getValue()); Out << 'E'; break; } @@ -1680,16 +1942,62 @@ void CXXNameMangler::mangleExpression(const Expr *E) { Out << 'E'; break; - case Expr::IntegerLiteralClass: - mangleIntegerLiteral(E->getType(), - llvm::APSInt(cast<IntegerLiteral>(E)->getValue())); + case Expr::IntegerLiteralClass: { + llvm::APSInt Value(cast<IntegerLiteral>(E)->getValue()); + if (E->getType()->isSignedIntegerType()) + Value.setIsSigned(true); + mangleIntegerLiteral(E->getType(), Value); break; + } + case Expr::ImaginaryLiteralClass: { + const ImaginaryLiteral *IE = cast<ImaginaryLiteral>(E); + // Mangle as if a complex literal. + // Proposal from David Vandervoorde, 2010.06.30. + Out << 'L'; + mangleType(E->getType()); + if (const FloatingLiteral *Imag = + dyn_cast<FloatingLiteral>(IE->getSubExpr())) { + // Mangle a floating-point zero of the appropriate type. + mangleFloat(llvm::APFloat(Imag->getValue().getSemantics())); + Out << '_'; + mangleFloat(Imag->getValue()); + } else { + Out << '0' << '_'; + llvm::APSInt Value(cast<IntegerLiteral>(IE->getSubExpr())->getValue()); + if (IE->getSubExpr()->getType()->isSignedIntegerType()) + Value.setIsSigned(true); + mangleNumber(Value); + } + Out << 'E'; + break; } -} -// FIXME: <type> ::= G <type> # imaginary (C 2000) -// FIXME: <type> ::= U <source-name> <type> # vendor extended type qualifier + case Expr::StringLiteralClass: { + // Proposal from David Vandervoorde, 2010.06.30. + // I've sent a comment off asking whether this needs to also + // represent the length of the string. + Out << 'L'; + const ConstantArrayType *T = cast<ConstantArrayType>(E->getType()); + QualType CharTy = T->getElementType().getUnqualifiedType(); + mangleType(CharTy); + Out << 'E'; + break; + } + + case Expr::GNUNullExprClass: + // FIXME: should this really be mangled the same as nullptr? + // fallthrough + + case Expr::CXXNullPtrLiteralExprClass: { + // Proposal from David Vandervoorde, 2010.06.30, as + // modified by ABI list discussion. + Out << "LDnE"; + break; + } + + } +} void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) { // <ctor-dtor-name> ::= C1 # complete object constructor @@ -1774,9 +2082,8 @@ void CXXNameMangler::mangleTemplateArg(const NamedDecl *P, mangleType(A.getAsType()); break; case TemplateArgument::Template: - assert(A.getAsTemplate().getAsTemplateDecl() && - "Can't get dependent template names here"); - mangleName(A.getAsTemplate().getAsTemplateDecl()); + // This is mangled as <type>. + mangleType(A.getAsTemplate()); break; case TemplateArgument::Expression: Out << 'X'; @@ -1882,7 +2189,7 @@ bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) { while (SeqID) { assert(BufferPtr > Buffer && "Buffer overflow!"); - unsigned char c = static_cast<unsigned char>(SeqID) % 36; + char c = static_cast<char>(SeqID % 36); *--BufferPtr = (c < 10 ? '0' + c : 'A' + c - 10); SeqID /= 36; @@ -2049,10 +2356,8 @@ void CXXNameMangler::addSubstitution(TemplateName Template) { } void CXXNameMangler::addSubstitution(uintptr_t Ptr) { - unsigned SeqID = Substitutions.size(); - assert(!Substitutions.count(Ptr) && "Substitution already exists!"); - Substitutions[Ptr] = SeqID; + Substitutions[Ptr] = SeqID++; } // @@ -2092,10 +2397,10 @@ void MangleContext::mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type, Mangler.mangle(D); } -void MangleContext::mangleBlock(const BlockDecl *BD, +void MangleContext::mangleBlock(GlobalDecl GD, const BlockDecl *BD, llvm::SmallVectorImpl<char> &Res) { MiscNameMangler Mangler(*this, Res); - Mangler.mangleBlock(BD); + Mangler.mangleBlock(GD, BD); } void MangleContext::mangleThunk(const CXXMethodDecl *MD, @@ -2155,6 +2460,15 @@ void MangleContext::mangleGuardVariable(const VarDecl *D, Mangler.mangleName(D); } +void MangleContext::mangleReferenceTemporary(const VarDecl *D, + llvm::SmallVectorImpl<char> &Res) { + // We match the GCC mangling here. + // <special-name> ::= GR <object name> + CXXNameMangler Mangler(*this, Res); + Mangler.getStream() << "_ZGR"; + Mangler.mangleName(D); +} + void MangleContext::mangleCXXVTable(const CXXRecordDecl *RD, llvm::SmallVectorImpl<char> &Res) { // <special-name> ::= TV <type> # virtual table diff --git a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h index f1c5358..139f6c0 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h +++ b/contrib/llvm/tools/clang/lib/CodeGen/Mangle.h @@ -19,6 +19,7 @@ #define LLVM_CLANG_CODEGEN_MANGLE_H #include "CGCXX.h" +#include "GlobalDecl.h" #include "clang/AST/Type.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/StringRef.h" @@ -84,6 +85,8 @@ public: Diagnostic &Diags) : Context(Context), Diags(Diags) { } + virtual ~MangleContext() { } + ASTContext &getASTContext() const { return Context; } Diagnostic &getDiags() const { return Diags; } @@ -108,7 +111,7 @@ public: /// @name Mangler Entry Points /// @{ - bool shouldMangleDeclName(const NamedDecl *D); + virtual bool shouldMangleDeclName(const NamedDecl *D); virtual void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &); virtual void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk, @@ -118,6 +121,8 @@ public: llvm::SmallVectorImpl<char> &); virtual void mangleGuardVariable(const VarDecl *D, llvm::SmallVectorImpl<char> &); + virtual void mangleReferenceTemporary(const VarDecl *D, + llvm::SmallVectorImpl<char> &); virtual void mangleCXXVTable(const CXXRecordDecl *RD, llvm::SmallVectorImpl<char> &); virtual void mangleCXXVTT(const CXXRecordDecl *RD, @@ -131,7 +136,8 @@ public: llvm::SmallVectorImpl<char> &); virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type, llvm::SmallVectorImpl<char> &); - void mangleBlock(const BlockDecl *BD, llvm::SmallVectorImpl<char> &); + void mangleBlock(GlobalDecl GD, + const BlockDecl *BD, llvm::SmallVectorImpl<char> &); void mangleInitDiscriminator() { Discriminator = 0; @@ -161,7 +167,7 @@ public: llvm::raw_svector_ostream &getStream() { return Out; } - void mangleBlock(const BlockDecl *BD); + void mangleBlock(GlobalDecl GD, const BlockDecl *BD); void mangleObjCMethodName(const ObjCMethodDecl *MD); }; diff --git a/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp new file mode 100644 index 0000000..da0fdb6 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp @@ -0,0 +1,1191 @@ +//===--- MicrosoftCXXABI.cpp - Emit LLVM Code from ASTs for a Module ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This provides C++ code generation targetting the Microsoft Visual C++ ABI. +// The class in this file generates structures that follow the Microsoft +// Visual C++ ABI, which is actually not very well documented at all outside +// of Microsoft. +// +//===----------------------------------------------------------------------===// + +#include "CGCXXABI.h" +#include "CodeGenModule.h" +#include "Mangle.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclTemplate.h" +#include "clang/AST/ExprCXX.h" +#include "CGVTables.h" + +using namespace clang; +using namespace CodeGen; + +namespace { + +/// MicrosoftCXXNameMangler - Manage the mangling of a single name for the +/// Microsoft Visual C++ ABI. +class MicrosoftCXXNameMangler { + MangleContext &Context; + llvm::raw_svector_ostream Out; + + ASTContext &getASTContext() const { return Context.getASTContext(); } + +public: + MicrosoftCXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res) + : Context(C), Out(Res) { } + + llvm::raw_svector_ostream &getStream() { return Out; } + + void mangle(const NamedDecl *D, llvm::StringRef Prefix = "?"); + void mangleName(const NamedDecl *ND); + void mangleFunctionEncoding(const FunctionDecl *FD); + void mangleVariableEncoding(const VarDecl *VD); + void mangleNumber(int64_t Number); + void mangleType(QualType T); + +private: + void mangleUnqualifiedName(const NamedDecl *ND) { + mangleUnqualifiedName(ND, ND->getDeclName()); + } + void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name); + void mangleSourceName(const IdentifierInfo *II); + void manglePostfix(const DeclContext *DC, bool NoFunction=false); + void mangleOperatorName(OverloadedOperatorKind OO); + void mangleQualifiers(Qualifiers Quals, bool IsMember); + + void mangleObjCMethodName(const ObjCMethodDecl *MD); + + // Declare manglers for every type class. +#define ABSTRACT_TYPE(CLASS, PARENT) +#define NON_CANONICAL_TYPE(CLASS, PARENT) +#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T); +#include "clang/AST/TypeNodes.def" + + void mangleType(const TagType*); + void mangleType(const FunctionType *T, const FunctionDecl *D, + bool IsStructor, bool IsInstMethod); + void mangleType(const ArrayType *T, bool IsGlobal); + void mangleExtraDimensions(QualType T); + void mangleFunctionClass(const FunctionDecl *FD); + void mangleCallingConvention(const FunctionType *T); + void mangleThrowSpecification(const FunctionProtoType *T); + +}; + +/// MicrosoftMangleContext - Overrides the default MangleContext for the +/// Microsoft Visual C++ ABI. +class MicrosoftMangleContext : public MangleContext { +public: + MicrosoftMangleContext(ASTContext &Context, + Diagnostic &Diags) : MangleContext(Context, Diags) { } + virtual bool shouldMangleDeclName(const NamedDecl *D); + virtual void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &); + virtual void mangleThunk(const CXXMethodDecl *MD, + const ThunkInfo &Thunk, + llvm::SmallVectorImpl<char> &); + virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type, + const ThisAdjustment &ThisAdjustment, + llvm::SmallVectorImpl<char> &); + virtual void mangleGuardVariable(const VarDecl *D, + llvm::SmallVectorImpl<char> &); + virtual void mangleCXXVTable(const CXXRecordDecl *RD, + llvm::SmallVectorImpl<char> &); + virtual void mangleCXXVTT(const CXXRecordDecl *RD, + llvm::SmallVectorImpl<char> &); + virtual void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset, + const CXXRecordDecl *Type, + llvm::SmallVectorImpl<char> &); + virtual void mangleCXXRTTI(QualType T, llvm::SmallVectorImpl<char> &); + virtual void mangleCXXRTTIName(QualType T, llvm::SmallVectorImpl<char> &); + virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type, + llvm::SmallVectorImpl<char> &); + virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type, + llvm::SmallVectorImpl<char> &); +}; + +class MicrosoftCXXABI : public CXXABI { + MicrosoftMangleContext MangleCtx; +public: + MicrosoftCXXABI(CodeGenModule &CGM) + : MangleCtx(CGM.getContext(), CGM.getDiags()) {} + + MicrosoftMangleContext &getMangleContext() { + return MangleCtx; + } +}; + +} + +static bool isInCLinkageSpecification(const Decl *D) { + D = D->getCanonicalDecl(); + for (const DeclContext *DC = D->getDeclContext(); + !DC->isTranslationUnit(); DC = DC->getParent()) { + if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC)) + return Linkage->getLanguage() == LinkageSpecDecl::lang_c; + } + + return false; +} + +bool MicrosoftMangleContext::shouldMangleDeclName(const NamedDecl *D) { + // In C, functions with no attributes never need to be mangled. Fastpath them. + if (!getASTContext().getLangOptions().CPlusPlus && !D->hasAttrs()) + return false; + + // Any decl can be declared with __asm("foo") on it, and this takes precedence + // over all other naming in the .o file. + if (D->hasAttr<AsmLabelAttr>()) + return true; + + // Clang's "overloadable" attribute extension to C/C++ implies name mangling + // (always) as does passing a C++ member function and a function + // whose name is not a simple identifier. + const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); + if (FD && (FD->hasAttr<OverloadableAttr>() || isa<CXXMethodDecl>(FD) || + !FD->getDeclName().isIdentifier())) + return true; + + // Otherwise, no mangling is done outside C++ mode. + if (!getASTContext().getLangOptions().CPlusPlus) + return false; + + // Variables at global scope with internal linkage are not mangled. + if (!FD) { + const DeclContext *DC = D->getDeclContext(); + if (DC->isTranslationUnit() && D->getLinkage() == InternalLinkage) + return false; + } + + // C functions and "main" are not mangled. + if ((FD && FD->isMain()) || isInCLinkageSpecification(D)) + return false; + + return true; +} + +void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, + llvm::StringRef Prefix) { + // MSVC doesn't mangle C++ names the same way it mangles extern "C" names. + // Therefore it's really important that we don't decorate the + // name with leading underscores or leading/trailing at signs. So, emit a + // asm marker at the start so we get the name right. + Out << '\01'; // LLVM IR Marker for __asm("foo") + + // Any decl can be declared with __asm("foo") on it, and this takes precedence + // over all other naming in the .o file. + if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) { + // If we have an asm name, then we use it as the mangling. + Out << ALA->getLabel(); + return; + } + + // <mangled-name> ::= ? <name> <type-encoding> + Out << Prefix; + mangleName(D); + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) + mangleFunctionEncoding(FD); + else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) + mangleVariableEncoding(VD); + // TODO: Fields? Can MSVC even mangle them? +} + +void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) { + // <type-encoding> ::= <function-class> <function-type> + + // Don't mangle in the type if this isn't a decl we should typically mangle. + if (!Context.shouldMangleDeclName(FD)) + return; + + // We should never ever see a FunctionNoProtoType at this point. + // We don't even know how to mangle their types anyway :). + const FunctionProtoType *FT = cast<FunctionProtoType>(FD->getType()); + + bool InStructor = false, InInstMethod = false; + const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); + if (MD) { + if (MD->isInstance()) + InInstMethod = true; + if (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) + InStructor = true; + } + + // First, the function class. + mangleFunctionClass(FD); + + mangleType(FT, FD, InStructor, InInstMethod); +} + +void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) { + // <type-encoding> ::= <storage-class> <variable-type> + // <storage-class> ::= 0 # private static member + // ::= 1 # protected static member + // ::= 2 # public static member + // ::= 3 # global + // ::= 4 # static local + + // The first character in the encoding (after the name) is the storage class. + if (VD->isStaticDataMember()) { + // If it's a static member, it also encodes the access level. + switch (VD->getAccess()) { + default: + case AS_private: Out << '0'; break; + case AS_protected: Out << '1'; break; + case AS_public: Out << '2'; break; + } + } + else if (!VD->isStaticLocal()) + Out << '3'; + else + Out << '4'; + // Now mangle the type. + // <variable-type> ::= <type> <cvr-qualifiers> + // ::= <type> A # pointers, references, arrays + // Pointers and references are odd. The type of 'int * const foo;' gets + // mangled as 'QAHA' instead of 'PAHB', for example. + QualType Ty = VD->getType(); + if (Ty->isPointerType() || Ty->isReferenceType()) { + mangleType(Ty); + Out << 'A'; + } else if (Ty->isArrayType()) { + // Global arrays are funny, too. + mangleType(static_cast<ArrayType *>(Ty.getTypePtr()), true); + Out << 'A'; + } else { + mangleType(Ty.getLocalUnqualifiedType()); + mangleQualifiers(Ty.getLocalQualifiers(), false); + } +} + +void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) { + // <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @ + const DeclContext *DC = ND->getDeclContext(); + + // Always start with the unqualified name. + mangleUnqualifiedName(ND); + + // If this is an extern variable declared locally, the relevant DeclContext + // is that of the containing namespace, or the translation unit. + if (isa<FunctionDecl>(DC) && ND->hasLinkage()) + while (!DC->isNamespace() && !DC->isTranslationUnit()) + DC = DC->getParent(); + + manglePostfix(DC); + + // Terminate the whole name with an '@'. + Out << '@'; +} + +void MicrosoftCXXNameMangler::mangleNumber(int64_t Number) { + // <number> ::= [?] <decimal digit> # <= 9 + // ::= [?] <hex digit>+ @ # > 9; A = 0, B = 1, etc... + if (Number < 0) { + Out << '?'; + Number = -Number; + } + if (Number >= 1 && Number <= 10) { + Out << Number-1; + } else { + // We have to build up the encoding in reverse order, so it will come + // out right when we write it out. + char Encoding[16]; + char *EndPtr = Encoding+sizeof(Encoding); + char *CurPtr = EndPtr; + while (Number) { + *--CurPtr = 'A' + (Number % 16); + Number /= 16; + } + Out.write(CurPtr, EndPtr-CurPtr); + Out << '@'; + } +} + +void +MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND, + DeclarationName Name) { + // <unqualified-name> ::= <operator-name> + // ::= <ctor-dtor-name> + // ::= <source-name> + switch (Name.getNameKind()) { + case DeclarationName::Identifier: { + if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) { + mangleSourceName(II); + break; + } + + // Otherwise, an anonymous entity. We must have a declaration. + assert(ND && "mangling empty name without declaration"); + + if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) { + if (NS->isAnonymousNamespace()) { + Out << "?A"; + break; + } + } + + // We must have an anonymous struct. + const TagDecl *TD = cast<TagDecl>(ND); + if (const TypedefDecl *D = TD->getTypedefForAnonDecl()) { + assert(TD->getDeclContext() == D->getDeclContext() && + "Typedef should not be in another decl context!"); + assert(D->getDeclName().getAsIdentifierInfo() && + "Typedef was not named!"); + mangleSourceName(D->getDeclName().getAsIdentifierInfo()); + break; + } + + // When VC encounters an anonymous type with no tag and no typedef, + // it literally emits '<unnamed-tag>'. + Out << "<unnamed-tag>"; + break; + } + + case DeclarationName::ObjCZeroArgSelector: + case DeclarationName::ObjCOneArgSelector: + case DeclarationName::ObjCMultiArgSelector: + assert(false && "Can't mangle Objective-C selector names here!"); + break; + + case DeclarationName::CXXConstructorName: + assert(false && "Can't mangle constructors yet!"); + break; + + case DeclarationName::CXXDestructorName: + assert(false && "Can't mangle destructors yet!"); + break; + + case DeclarationName::CXXConversionFunctionName: + // <operator-name> ::= ?B # (cast) + // The target type is encoded as the return type. + Out << "?B"; + break; + + case DeclarationName::CXXOperatorName: + mangleOperatorName(Name.getCXXOverloadedOperator()); + break; + + case DeclarationName::CXXLiteralOperatorName: + // FIXME: Was this added in VS2010? Does MS even know how to mangle this? + assert(false && "Don't know how to mangle literal operators yet!"); + break; + + case DeclarationName::CXXUsingDirective: + assert(false && "Can't mangle a using directive name!"); + break; + } +} + +void MicrosoftCXXNameMangler::manglePostfix(const DeclContext *DC, + bool NoFunction) { + // <postfix> ::= <unqualified-name> [<postfix>] + // ::= <template-postfix> <template-args> [<postfix>] + // ::= <template-param> + // ::= <substitution> [<postfix>] + + if (!DC) return; + + while (isa<LinkageSpecDecl>(DC)) + DC = DC->getParent(); + + if (DC->isTranslationUnit()) + return; + + if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) { + llvm::SmallString<64> Name; + Context.mangleBlock(GlobalDecl(), BD, Name); + Out << Name << '@'; + return manglePostfix(DC->getParent(), NoFunction); + } + + if (NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC))) + return; + else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC)) + mangleObjCMethodName(Method); + else { + mangleUnqualifiedName(cast<NamedDecl>(DC)); + manglePostfix(DC->getParent(), NoFunction); + } +} + +void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO) { + switch (OO) { + // ?0 # constructor + // ?1 # destructor + // <operator-name> ::= ?2 # new + case OO_New: Out << "?2"; break; + // <operator-name> ::= ?3 # delete + case OO_Delete: Out << "?3"; break; + // <operator-name> ::= ?4 # = + case OO_Equal: Out << "?4"; break; + // <operator-name> ::= ?5 # >> + case OO_GreaterGreater: Out << "?5"; break; + // <operator-name> ::= ?6 # << + case OO_LessLess: Out << "?6"; break; + // <operator-name> ::= ?7 # ! + case OO_Exclaim: Out << "?7"; break; + // <operator-name> ::= ?8 # == + case OO_EqualEqual: Out << "?8"; break; + // <operator-name> ::= ?9 # != + case OO_ExclaimEqual: Out << "?9"; break; + // <operator-name> ::= ?A # [] + case OO_Subscript: Out << "?A"; break; + // ?B # conversion + // <operator-name> ::= ?C # -> + case OO_Arrow: Out << "?C"; break; + // <operator-name> ::= ?D # * + case OO_Star: Out << "?D"; break; + // <operator-name> ::= ?E # ++ + case OO_PlusPlus: Out << "?E"; break; + // <operator-name> ::= ?F # -- + case OO_MinusMinus: Out << "?F"; break; + // <operator-name> ::= ?G # - + case OO_Minus: Out << "?G"; break; + // <operator-name> ::= ?H # + + case OO_Plus: Out << "?H"; break; + // <operator-name> ::= ?I # & + case OO_Amp: Out << "?I"; break; + // <operator-name> ::= ?J # ->* + case OO_ArrowStar: Out << "?J"; break; + // <operator-name> ::= ?K # / + case OO_Slash: Out << "?K"; break; + // <operator-name> ::= ?L # % + case OO_Percent: Out << "?L"; break; + // <operator-name> ::= ?M # < + case OO_Less: Out << "?M"; break; + // <operator-name> ::= ?N # <= + case OO_LessEqual: Out << "?N"; break; + // <operator-name> ::= ?O # > + case OO_Greater: Out << "?O"; break; + // <operator-name> ::= ?P # >= + case OO_GreaterEqual: Out << "?P"; break; + // <operator-name> ::= ?Q # , + case OO_Comma: Out << "?Q"; break; + // <operator-name> ::= ?R # () + case OO_Call: Out << "?R"; break; + // <operator-name> ::= ?S # ~ + case OO_Tilde: Out << "?S"; break; + // <operator-name> ::= ?T # ^ + case OO_Caret: Out << "?T"; break; + // <operator-name> ::= ?U # | + case OO_Pipe: Out << "?U"; break; + // <operator-name> ::= ?V # && + case OO_AmpAmp: Out << "?V"; break; + // <operator-name> ::= ?W # || + case OO_PipePipe: Out << "?W"; break; + // <operator-name> ::= ?X # *= + case OO_StarEqual: Out << "?X"; break; + // <operator-name> ::= ?Y # += + case OO_PlusEqual: Out << "?Y"; break; + // <operator-name> ::= ?Z # -= + case OO_MinusEqual: Out << "?Z"; break; + // <operator-name> ::= ?_0 # /= + case OO_SlashEqual: Out << "?_0"; break; + // <operator-name> ::= ?_1 # %= + case OO_PercentEqual: Out << "?_1"; break; + // <operator-name> ::= ?_2 # >>= + case OO_GreaterGreaterEqual: Out << "?_2"; break; + // <operator-name> ::= ?_3 # <<= + case OO_LessLessEqual: Out << "?_3"; break; + // <operator-name> ::= ?_4 # &= + case OO_AmpEqual: Out << "?_4"; break; + // <operator-name> ::= ?_5 # |= + case OO_PipeEqual: Out << "?_5"; break; + // <operator-name> ::= ?_6 # ^= + case OO_CaretEqual: Out << "?_6"; break; + // ?_7 # vftable + // ?_8 # vbtable + // ?_9 # vcall + // ?_A # typeof + // ?_B # local static guard + // ?_C # string + // ?_D # vbase destructor + // ?_E # vector deleting destructor + // ?_F # default constructor closure + // ?_G # scalar deleting destructor + // ?_H # vector constructor iterator + // ?_I # vector destructor iterator + // ?_J # vector vbase constructor iterator + // ?_K # virtual displacement map + // ?_L # eh vector constructor iterator + // ?_M # eh vector destructor iterator + // ?_N # eh vector vbase constructor iterator + // ?_O # copy constructor closure + // ?_P<name> # udt returning <name> + // ?_Q # <unknown> + // ?_R0 # RTTI Type Descriptor + // ?_R1 # RTTI Base Class Descriptor at (a,b,c,d) + // ?_R2 # RTTI Base Class Array + // ?_R3 # RTTI Class Hierarchy Descriptor + // ?_R4 # RTTI Complete Object Locator + // ?_S # local vftable + // ?_T # local vftable constructor closure + // <operator-name> ::= ?_U # new[] + case OO_Array_New: Out << "?_U"; break; + // <operator-name> ::= ?_V # delete[] + case OO_Array_Delete: Out << "?_V"; break; + + case OO_Conditional: + assert(false && "Don't know how to mangle ?:"); + break; + + case OO_None: + case NUM_OVERLOADED_OPERATORS: + assert(false && "Not an overloaded operator"); + break; + } +} + +void MicrosoftCXXNameMangler::mangleSourceName(const IdentifierInfo *II) { + // <source name> ::= <identifier> @ + Out << II->getName() << '@'; +} + +void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) { + llvm::SmallString<64> Buffer; + MiscNameMangler(Context, Buffer).mangleObjCMethodName(MD); + Out << Buffer; +} + +void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals, + bool IsMember) { + // <cvr-qualifiers> ::= [E] [F] [I] <base-cvr-qualifiers> + // 'E' means __ptr64 (32-bit only); 'F' means __unaligned (32/64-bit only); + // 'I' means __restrict (32/64-bit). + // Note that the MSVC __restrict keyword isn't the same as the C99 restrict + // keyword! + // <base-cvr-qualifiers> ::= A # near + // ::= B # near const + // ::= C # near volatile + // ::= D # near const volatile + // ::= E # far (16-bit) + // ::= F # far const (16-bit) + // ::= G # far volatile (16-bit) + // ::= H # far const volatile (16-bit) + // ::= I # huge (16-bit) + // ::= J # huge const (16-bit) + // ::= K # huge volatile (16-bit) + // ::= L # huge const volatile (16-bit) + // ::= M <basis> # based + // ::= N <basis> # based const + // ::= O <basis> # based volatile + // ::= P <basis> # based const volatile + // ::= Q # near member + // ::= R # near const member + // ::= S # near volatile member + // ::= T # near const volatile member + // ::= U # far member (16-bit) + // ::= V # far const member (16-bit) + // ::= W # far volatile member (16-bit) + // ::= X # far const volatile member (16-bit) + // ::= Y # huge member (16-bit) + // ::= Z # huge const member (16-bit) + // ::= 0 # huge volatile member (16-bit) + // ::= 1 # huge const volatile member (16-bit) + // ::= 2 <basis> # based member + // ::= 3 <basis> # based const member + // ::= 4 <basis> # based volatile member + // ::= 5 <basis> # based const volatile member + // ::= 6 # near function (pointers only) + // ::= 7 # far function (pointers only) + // ::= 8 # near method (pointers only) + // ::= 9 # far method (pointers only) + // ::= _A <basis> # based function (pointers only) + // ::= _B <basis> # based function (far?) (pointers only) + // ::= _C <basis> # based method (pointers only) + // ::= _D <basis> # based method (far?) (pointers only) + // ::= _E # block (Clang) + // <basis> ::= 0 # __based(void) + // ::= 1 # __based(segment)? + // ::= 2 <name> # __based(name) + // ::= 3 # ? + // ::= 4 # ? + // ::= 5 # not really based + if (!IsMember) { + if (!Quals.hasVolatile()) { + if (!Quals.hasConst()) + Out << 'A'; + else + Out << 'B'; + } else { + if (!Quals.hasConst()) + Out << 'C'; + else + Out << 'D'; + } + } else { + if (!Quals.hasVolatile()) { + if (!Quals.hasConst()) + Out << 'Q'; + else + Out << 'R'; + } else { + if (!Quals.hasConst()) + Out << 'S'; + else + Out << 'T'; + } + } + + // FIXME: For now, just drop all extension qualifiers on the floor. +} + +void MicrosoftCXXNameMangler::mangleType(QualType T) { + // Only operate on the canonical type! + T = getASTContext().getCanonicalType(T); + + Qualifiers Quals = T.getLocalQualifiers(); + if (Quals) { + // We have to mangle these now, while we still have enough information. + // <pointer-cvr-qualifiers> ::= P # pointer + // ::= Q # const pointer + // ::= R # volatile pointer + // ::= S # const volatile pointer + if (T->isAnyPointerType() || T->isMemberPointerType() || + T->isBlockPointerType()) { + if (!Quals.hasVolatile()) + Out << 'Q'; + else { + if (!Quals.hasConst()) + Out << 'R'; + else + Out << 'S'; + } + } else + // Just emit qualifiers like normal. + // NB: When we mangle a pointer/reference type, and the pointee + // type has no qualifiers, the lack of qualifier gets mangled + // in there. + mangleQualifiers(Quals, false); + } else if (T->isAnyPointerType() || T->isMemberPointerType() || + T->isBlockPointerType()) { + Out << 'P'; + } + switch (T->getTypeClass()) { +#define ABSTRACT_TYPE(CLASS, PARENT) +#define NON_CANONICAL_TYPE(CLASS, PARENT) \ +case Type::CLASS: \ +llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \ +return; +#define TYPE(CLASS, PARENT) \ +case Type::CLASS: \ +mangleType(static_cast<const CLASS##Type*>(T.getTypePtr())); \ +break; +#include "clang/AST/TypeNodes.def" + } +} + +void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T) { + // <type> ::= <builtin-type> + // <builtin-type> ::= X # void + // ::= C # signed char + // ::= D # char + // ::= E # unsigned char + // ::= F # short + // ::= G # unsigned short (or wchar_t if it's not a builtin) + // ::= H # int + // ::= I # unsigned int + // ::= J # long + // ::= K # unsigned long + // L # <none> + // ::= M # float + // ::= N # double + // ::= O # long double (__float80 is mangled differently) + // ::= _D # __int8 (yup, it's a distinct type in MSVC) + // ::= _E # unsigned __int8 + // ::= _F # __int16 + // ::= _G # unsigned __int16 + // ::= _H # __int32 + // ::= _I # unsigned __int32 + // ::= _J # long long, __int64 + // ::= _K # unsigned long long, __int64 + // ::= _L # __int128 + // ::= _M # unsigned __int128 + // ::= _N # bool + // _O # <array in parameter> + // ::= _T # __float80 (Intel) + // ::= _W # wchar_t + // ::= _Z # __float80 (Digital Mars) + switch (T->getKind()) { + case BuiltinType::Void: Out << 'X'; break; + case BuiltinType::SChar: Out << 'C'; break; + case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'D'; break; + case BuiltinType::UChar: Out << 'E'; break; + case BuiltinType::Short: Out << 'F'; break; + case BuiltinType::UShort: Out << 'G'; break; + case BuiltinType::Int: Out << 'H'; break; + case BuiltinType::UInt: Out << 'I'; break; + case BuiltinType::Long: Out << 'J'; break; + case BuiltinType::ULong: Out << 'K'; break; + case BuiltinType::Float: Out << 'M'; break; + case BuiltinType::Double: Out << 'N'; break; + // TODO: Determine size and mangle accordingly + case BuiltinType::LongDouble: Out << 'O'; break; + // TODO: __int8 and friends + case BuiltinType::LongLong: Out << "_J"; break; + case BuiltinType::ULongLong: Out << "_K"; break; + case BuiltinType::Int128: Out << "_L"; break; + case BuiltinType::UInt128: Out << "_M"; break; + case BuiltinType::Bool: Out << "_N"; break; + case BuiltinType::WChar: Out << "_W"; break; + + case BuiltinType::Overload: + case BuiltinType::Dependent: + assert(false && + "Overloaded and dependent types shouldn't get to name mangling"); + break; + case BuiltinType::UndeducedAuto: + assert(0 && "Should not see undeduced auto here"); + break; + case BuiltinType::ObjCId: Out << "PAUobjc_object@@"; break; + case BuiltinType::ObjCClass: Out << "PAUobjc_class@@"; break; + case BuiltinType::ObjCSel: Out << "PAUobjc_selector@@"; break; + + case BuiltinType::Char16: + case BuiltinType::Char32: + case BuiltinType::NullPtr: + assert(false && "Don't know how to mangle this type"); + break; + } +} + +// <type> ::= <function-type> +void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T) { + // Structors only appear in decls, so at this point we know it's not a + // structor type. + // I'll probably have mangleType(MemberPointerType) call the mangleType() + // method directly. + mangleType(T, NULL, false, false); +} +void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T) { + llvm_unreachable("Can't mangle K&R function prototypes"); +} + +void MicrosoftCXXNameMangler::mangleType(const FunctionType *T, + const FunctionDecl *D, + bool IsStructor, + bool IsInstMethod) { + // <function-type> ::= <this-cvr-qualifiers> <calling-convention> + // <return-type> <argument-list> <throw-spec> + const FunctionProtoType *Proto = cast<FunctionProtoType>(T); + + // If this is a C++ instance method, mangle the CVR qualifiers for the + // this pointer. + if (IsInstMethod) + mangleQualifiers(Qualifiers::fromCVRMask(Proto->getTypeQuals()), false); + + mangleCallingConvention(T); + + // <return-type> ::= <type> + // ::= @ # structors (they have no declared return type) + if (IsStructor) + Out << '@'; + else + mangleType(Proto->getResultType()); + + // <argument-list> ::= X # void + // ::= <type>+ @ + // ::= <type>* Z # varargs + if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) { + Out << 'X'; + } else { + if (D) { + // If we got a decl, use the "types-as-written" to make sure arrays + // get mangled right. + for (FunctionDecl::param_const_iterator Parm = D->param_begin(), + ParmEnd = D->param_end(); + Parm != ParmEnd; ++Parm) + mangleType((*Parm)->getTypeSourceInfo()->getType()); + } else { + for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(), + ArgEnd = Proto->arg_type_end(); + Arg != ArgEnd; ++Arg) + mangleType(*Arg); + } + // <builtin-type> ::= Z # ellipsis + if (Proto->isVariadic()) + Out << 'Z'; + else + Out << '@'; + } + + mangleThrowSpecification(Proto); +} + +void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) { + // <function-class> ::= A # private: near + // ::= B # private: far + // ::= C # private: static near + // ::= D # private: static far + // ::= E # private: virtual near + // ::= F # private: virtual far + // ::= G # private: thunk near + // ::= H # private: thunk far + // ::= I # protected: near + // ::= J # protected: far + // ::= K # protected: static near + // ::= L # protected: static far + // ::= M # protected: virtual near + // ::= N # protected: virtual far + // ::= O # protected: thunk near + // ::= P # protected: thunk far + // ::= Q # public: near + // ::= R # public: far + // ::= S # public: static near + // ::= T # public: static far + // ::= U # public: virtual near + // ::= V # public: virtual far + // ::= W # public: thunk near + // ::= X # public: thunk far + // ::= Y # global near + // ::= Z # global far + if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { + switch (MD->getAccess()) { + default: + case AS_private: + if (MD->isStatic()) + Out << 'C'; + else if (MD->isVirtual()) + Out << 'E'; + else + Out << 'A'; + break; + case AS_protected: + if (MD->isStatic()) + Out << 'K'; + else if (MD->isVirtual()) + Out << 'M'; + else + Out << 'I'; + break; + case AS_public: + if (MD->isStatic()) + Out << 'S'; + else if (MD->isVirtual()) + Out << 'U'; + else + Out << 'Q'; + } + } else + Out << 'Y'; +} +void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) { + // <calling-convention> ::= A # __cdecl + // ::= B # __export __cdecl + // ::= C # __pascal + // ::= D # __export __pascal + // ::= E # __thiscall + // ::= F # __export __thiscall + // ::= G # __stdcall + // ::= H # __export __stdcall + // ::= I # __fastcall + // ::= J # __export __fastcall + // The 'export' calling conventions are from a bygone era + // (*cough*Win16*cough*) when functions were declared for export with + // that keyword. (It didn't actually export them, it just made them so + // that they could be in a DLL and somebody from another module could call + // them.) + switch (T->getCallConv()) { + case CC_Default: + case CC_C: Out << 'A'; break; + case CC_X86ThisCall: Out << 'E'; break; + case CC_X86StdCall: Out << 'G'; break; + case CC_X86FastCall: Out << 'I'; break; + } +} +void MicrosoftCXXNameMangler::mangleThrowSpecification( + const FunctionProtoType *FT) { + // <throw-spec> ::= Z # throw(...) (default) + // ::= @ # throw() or __declspec/__attribute__((nothrow)) + // ::= <type>+ + // NOTE: Since the Microsoft compiler ignores throw specifications, they are + // all actually mangled as 'Z'. (They're ignored because their associated + // functionality isn't implemented, and probably never will be.) + Out << 'Z'; +} + +void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T) { + assert(false && "Don't know how to mangle UnresolvedUsingTypes yet!"); +} + +// <type> ::= <union-type> | <struct-type> | <class-type> | <enum-type> +// <union-type> ::= T <name> +// <struct-type> ::= U <name> +// <class-type> ::= V <name> +// <enum-type> ::= W <size> <name> +void MicrosoftCXXNameMangler::mangleType(const EnumType *T) { + mangleType(static_cast<const TagType*>(T)); +} +void MicrosoftCXXNameMangler::mangleType(const RecordType *T) { + mangleType(static_cast<const TagType*>(T)); +} +void MicrosoftCXXNameMangler::mangleType(const TagType *T) { + switch (T->getDecl()->getTagKind()) { + case TTK_Union: + Out << 'T'; + break; + case TTK_Struct: + Out << 'U'; + break; + case TTK_Class: + Out << 'V'; + break; + case TTK_Enum: + Out << 'W'; + Out << getASTContext().getTypeSizeInChars( + cast<EnumDecl>(T->getDecl())->getIntegerType()).getQuantity(); + break; + } + mangleName(T->getDecl()); +} + +// <type> ::= <array-type> +// <array-type> ::= P <cvr-qualifiers> [Y <dimension-count> <dimension>+] +// <element-type> # as global +// ::= Q <cvr-qualifiers> [Y <dimension-count> <dimension>+] +// <element-type> # as param +// It's supposed to be the other way around, but for some strange reason, it +// isn't. Today this behavior is retained for the sole purpose of backwards +// compatibility. +void MicrosoftCXXNameMangler::mangleType(const ArrayType *T, bool IsGlobal) { + // This isn't a recursive mangling, so now we have to do it all in this + // one call. + if (IsGlobal) + Out << 'P'; + else + Out << 'Q'; + mangleExtraDimensions(T->getElementType()); +} +void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T) { + mangleType(static_cast<const ArrayType *>(T), false); +} +void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T) { + mangleType(static_cast<const ArrayType *>(T), false); +} +void MicrosoftCXXNameMangler::mangleType(const DependentSizedArrayType *T) { + mangleType(static_cast<const ArrayType *>(T), false); +} +void MicrosoftCXXNameMangler::mangleType(const IncompleteArrayType *T) { + mangleType(static_cast<const ArrayType *>(T), false); +} +void MicrosoftCXXNameMangler::mangleExtraDimensions(QualType ElementTy) { + llvm::SmallVector<llvm::APInt, 3> Dimensions; + for (;;) { + if (ElementTy->isConstantArrayType()) { + const ConstantArrayType *CAT = + static_cast<const ConstantArrayType *>(ElementTy.getTypePtr()); + Dimensions.push_back(CAT->getSize()); + ElementTy = CAT->getElementType(); + } else if (ElementTy->isVariableArrayType()) { + assert(false && "Don't know how to mangle VLAs!"); + } else if (ElementTy->isDependentSizedArrayType()) { + // The dependent expression has to be folded into a constant (TODO). + assert(false && "Don't know how to mangle dependent-sized arrays!"); + } else if (ElementTy->isIncompleteArrayType()) continue; + else break; + } + mangleQualifiers(ElementTy.getQualifiers(), false); + // If there are any additional dimensions, mangle them now. + if (Dimensions.size() > 0) { + Out << 'Y'; + // <dimension-count> ::= <number> # number of extra dimensions + mangleNumber(Dimensions.size()); + for (unsigned Dim = 0; Dim < Dimensions.size(); ++Dim) { + mangleNumber(Dimensions[Dim].getLimitedValue()); + } + } + mangleType(ElementTy.getLocalUnqualifiedType()); +} + +// <type> ::= <pointer-to-member-type> +// <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers> +// <class name> <type> +void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T) { + QualType PointeeType = T->getPointeeType(); + if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) { + Out << '8'; + mangleName(cast<RecordType>(T->getClass())->getDecl()); + mangleType(FPT, NULL, false, true); + } else { + mangleQualifiers(PointeeType.getQualifiers(), true); + mangleName(cast<RecordType>(T->getClass())->getDecl()); + mangleType(PointeeType.getLocalUnqualifiedType()); + } +} + +void MicrosoftCXXNameMangler::mangleType(const TemplateTypeParmType *T) { + assert(false && "Don't know how to mangle TemplateTypeParmTypes yet!"); +} + +// <type> ::= <pointer-type> +// <pointer-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers> <type> +void MicrosoftCXXNameMangler::mangleType(const PointerType *T) { + QualType PointeeTy = T->getPointeeType(); + if (PointeeTy->isArrayType()) { + // Pointers to arrays are mangled like arrays. + mangleExtraDimensions(T->getPointeeType()); + } else if (PointeeTy->isFunctionType()) { + // Function pointers are special. + Out << '6'; + mangleType(static_cast<const FunctionType *>(PointeeTy.getTypePtr()), + NULL, false, false); + } else { + if (!PointeeTy.hasQualifiers()) + // Lack of qualifiers is mangled as 'A'. + Out << 'A'; + mangleType(PointeeTy); + } +} +void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T) { + // Object pointers never have qualifiers. + Out << 'A'; + mangleType(T->getPointeeType()); +} + +// <type> ::= <reference-type> +// <reference-type> ::= A <cvr-qualifiers> <type> +void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T) { + Out << 'A'; + QualType PointeeTy = T->getPointeeType(); + if (!PointeeTy.hasQualifiers()) + // Lack of qualifiers is mangled as 'A'. + Out << 'A'; + mangleType(PointeeTy); +} + +void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T) { + assert(false && "Don't know how to mangle RValueReferenceTypes yet!"); +} + +void MicrosoftCXXNameMangler::mangleType(const ComplexType *T) { + assert(false && "Don't know how to mangle ComplexTypes yet!"); +} + +void MicrosoftCXXNameMangler::mangleType(const VectorType *T) { + assert(false && "Don't know how to mangle VectorTypes yet!"); +} +void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T) { + assert(false && "Don't know how to mangle ExtVectorTypes yet!"); +} +void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T) { + assert(false && "Don't know how to mangle DependentSizedExtVectorTypes yet!"); +} + +void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T) { + // ObjC interfaces have structs underlying them. + Out << 'U'; + mangleName(T->getDecl()); +} + +void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T) { + // We don't allow overloading by different protocol qualification, + // so mangling them isn't necessary. + mangleType(T->getBaseType()); +} + +void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T) { + Out << "_E"; + mangleType(T->getPointeeType()); +} + +void MicrosoftCXXNameMangler::mangleType(const InjectedClassNameType *T) { + assert(false && "Don't know how to mangle InjectedClassNameTypes yet!"); +} + +void MicrosoftCXXNameMangler::mangleType(const TemplateSpecializationType *T) { + assert(false && "Don't know how to mangle TemplateSpecializationTypes yet!"); +} + +void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T) { + assert(false && "Don't know how to mangle DependentNameTypes yet!"); +} + +void MicrosoftCXXNameMangler::mangleType( + const DependentTemplateSpecializationType *T) { + assert(false && + "Don't know how to mangle DependentTemplateSpecializationTypes yet!"); +} + +void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T) { + assert(false && "Don't know how to mangle TypeOfTypes yet!"); +} + +void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T) { + assert(false && "Don't know how to mangle TypeOfExprTypes yet!"); +} + +void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T) { + assert(false && "Don't know how to mangle DecltypeTypes yet!"); +} + +void MicrosoftMangleContext::mangleName(const NamedDecl *D, + llvm::SmallVectorImpl<char> &Name) { + assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) && + "Invalid mangleName() call, argument is not a variable or function!"); + assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) && + "Invalid mangleName() call on 'structor decl!"); + + PrettyStackTraceDecl CrashInfo(D, SourceLocation(), + getASTContext().getSourceManager(), + "Mangling declaration"); + + MicrosoftCXXNameMangler Mangler(*this, Name); + return Mangler.mangle(D); +} +void MicrosoftMangleContext::mangleThunk(const CXXMethodDecl *MD, + const ThunkInfo &Thunk, + llvm::SmallVectorImpl<char> &) { + assert(false && "Can't yet mangle thunks!"); +} +void MicrosoftMangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD, + CXXDtorType Type, + const ThisAdjustment &, + llvm::SmallVectorImpl<char> &) { + assert(false && "Can't yet mangle destructor thunks!"); +} +void MicrosoftMangleContext::mangleGuardVariable(const VarDecl *D, + llvm::SmallVectorImpl<char> &) { + assert(false && "Can't yet mangle guard variables!"); +} +void MicrosoftMangleContext::mangleCXXVTable(const CXXRecordDecl *RD, + llvm::SmallVectorImpl<char> &) { + assert(false && "Can't yet mangle virtual tables!"); +} +void MicrosoftMangleContext::mangleCXXVTT(const CXXRecordDecl *RD, + llvm::SmallVectorImpl<char> &) { + llvm_unreachable("The MS C++ ABI does not have virtual table tables!"); +} +void MicrosoftMangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD, + int64_t Offset, + const CXXRecordDecl *Type, + llvm::SmallVectorImpl<char> &) { + llvm_unreachable("The MS C++ ABI does not have constructor vtables!"); +} +void MicrosoftMangleContext::mangleCXXRTTI(QualType T, + llvm::SmallVectorImpl<char> &) { + assert(false && "Can't yet mangle RTTI!"); +} +void MicrosoftMangleContext::mangleCXXRTTIName(QualType T, + llvm::SmallVectorImpl<char> &) { + assert(false && "Can't yet mangle RTTI names!"); +} +void MicrosoftMangleContext::mangleCXXCtor(const CXXConstructorDecl *D, + CXXCtorType Type, + llvm::SmallVectorImpl<char> &) { + assert(false && "Can't yet mangle constructors!"); +} +void MicrosoftMangleContext::mangleCXXDtor(const CXXDestructorDecl *D, + CXXDtorType Type, + llvm::SmallVectorImpl<char> &) { + assert(false && "Can't yet mangle destructors!"); +} + +CXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) { + return new MicrosoftCXXABI(CGM); +} + diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp index 9905ca6..6d9d277 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp @@ -13,7 +13,7 @@ #include "clang/CodeGen/ModuleBuilder.h" #include "CodeGenModule.h" -#include "clang/CodeGen/CodeGenOptions.h" +#include "clang/Frontend/CodeGenOptions.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/Expr.h" diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp index b29d3cb..c65f203 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp @@ -17,6 +17,7 @@ #include "CodeGenFunction.h" #include "clang/AST/RecordLayout.h" #include "llvm/Type.h" +#include "llvm/Target/TargetData.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Triple.h" #include "llvm/Support/raw_ostream.h" @@ -280,7 +281,9 @@ class DefaultABIInfo : public ABIInfo { llvm::LLVMContext &VMContext) const; virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context, - llvm::LLVMContext &VMContext) const { + llvm::LLVMContext &VMContext, + const llvm::Type *const *PrefTypes, + unsigned NumPrefTypes) const { FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context, VMContext); for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); @@ -316,6 +319,10 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty, ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } +//===----------------------------------------------------------------------===// +// X86-32 ABI Implementation +//===----------------------------------------------------------------------===// + /// X86_32ABIInfo - The X86-32 ABI information. class X86_32ABIInfo : public ABIInfo { ASTContext &Context; @@ -343,7 +350,9 @@ public: llvm::LLVMContext &VMContext) const; virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context, - llvm::LLVMContext &VMContext) const { + llvm::LLVMContext &VMContext, + const llvm::Type *const *PrefTypes, + unsigned NumPrefTypes) const { FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context, VMContext); for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); @@ -599,8 +608,7 @@ llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, uint64_t Offset = llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); llvm::Value *NextAddr = - Builder.CreateGEP(Addr, llvm::ConstantInt::get( - llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset), + Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); Builder.CreateStore(NextAddr, VAListAddrAsBPP); @@ -657,9 +665,17 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable( return false; } +//===----------------------------------------------------------------------===// +// X86-64 ABI Implementation +//===----------------------------------------------------------------------===// + + namespace { /// X86_64ABIInfo - The X86_64 ABI information. class X86_64ABIInfo : public ABIInfo { + ASTContext &Context; + const llvm::TargetData &TD; + enum Class { Integer = 0, SSE, @@ -680,7 +696,7 @@ class X86_64ABIInfo : public ABIInfo { /// always be either NoClass or the result of a previous merge /// call. In addition, this should never be Memory (the caller /// should just return Memory for the aggregate). - Class merge(Class Accum, Class Field) const; + static Class merge(Class Accum, Class Field); /// classify - Determine the x86_64 register classes in which the /// given type T should be passed. @@ -703,8 +719,7 @@ class X86_64ABIInfo : public ABIInfo { /// /// If the \arg Lo class is ComplexX87, then the \arg Hi class will /// also be ComplexX87. - void classify(QualType T, ASTContext &Context, uint64_t OffsetBase, - Class &Lo, Class &Hi) const; + void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const; /// getCoerceResult - Given a source type \arg Ty and an LLVM type /// to coerce to, chose the best way to pass Ty in the same place @@ -716,30 +731,33 @@ class X86_64ABIInfo : public ABIInfo { /// type. This makes this code more explicit, and it makes it clearer that we /// are also doing this for correctness in the case of passing scalar types. ABIArgInfo getCoerceResult(QualType Ty, - const llvm::Type *CoerceTo, - ASTContext &Context) const; + const llvm::Type *CoerceTo) const; /// getIndirectResult - Give a source type \arg Ty, return a suitable result /// such that the argument will be returned in memory. - ABIArgInfo getIndirectReturnResult(QualType Ty, ASTContext &Context) const; + ABIArgInfo getIndirectReturnResult(QualType Ty) const; /// getIndirectResult - Give a source type \arg Ty, return a suitable result /// such that the argument will be passed in memory. - ABIArgInfo getIndirectResult(QualType Ty, ASTContext &Context) const; + ABIArgInfo getIndirectResult(QualType Ty) const; ABIArgInfo classifyReturnType(QualType RetTy, - ASTContext &Context, llvm::LLVMContext &VMContext) const; ABIArgInfo classifyArgumentType(QualType Ty, - ASTContext &Context, llvm::LLVMContext &VMContext, unsigned &neededInt, - unsigned &neededSSE) const; + unsigned &neededSSE, + const llvm::Type *PrefType) const; public: + X86_64ABIInfo(ASTContext &Ctx, const llvm::TargetData &td) + : Context(Ctx), TD(td) {} + virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context, - llvm::LLVMContext &VMContext) const; + llvm::LLVMContext &VMContext, + const llvm::Type *const *PrefTypes, + unsigned NumPrefTypes) const; virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const; @@ -747,7 +765,8 @@ public: class X86_64TargetCodeGenInfo : public TargetCodeGenInfo { public: - X86_64TargetCodeGenInfo():TargetCodeGenInfo(new X86_64ABIInfo()) {} + X86_64TargetCodeGenInfo(ASTContext &Ctx, const llvm::TargetData &TD) + : TargetCodeGenInfo(new X86_64ABIInfo(Ctx, TD)) {} int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const { return 7; @@ -771,8 +790,7 @@ public: } -X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, - Class Field) const { +X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) { // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is // classified recursively so that always two fields are // considered. The resulting class is calculated according to @@ -800,22 +818,19 @@ X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, "Invalid accumulated classification during merge."); if (Accum == Field || Field == NoClass) return Accum; - else if (Field == Memory) + if (Field == Memory) return Memory; - else if (Accum == NoClass) + if (Accum == NoClass) return Field; - else if (Accum == Integer || Field == Integer) + if (Accum == Integer || Field == Integer) return Integer; - else if (Field == X87 || Field == X87Up || Field == ComplexX87 || - Accum == X87 || Accum == X87Up) + if (Field == X87 || Field == X87Up || Field == ComplexX87 || + Accum == X87 || Accum == X87Up) return Memory; - else - return SSE; + return SSE; } -void X86_64ABIInfo::classify(QualType Ty, - ASTContext &Context, - uint64_t OffsetBase, +void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo, Class &Hi) const { // FIXME: This code can be simplified by introducing a simple value class for // Class pairs with appropriate constructor methods for the various @@ -848,17 +863,29 @@ void X86_64ABIInfo::classify(QualType Ty, } // FIXME: _Decimal32 and _Decimal64 are SSE. // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). - } else if (const EnumType *ET = Ty->getAs<EnumType>()) { + return; + } + + if (const EnumType *ET = Ty->getAs<EnumType>()) { // Classify the underlying integer type. - classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi); - } else if (Ty->hasPointerRepresentation()) { + classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi); + return; + } + + if (Ty->hasPointerRepresentation()) { Current = Integer; - } else if (Ty->isMemberPointerType()) { + return; + } + + if (Ty->isMemberPointerType()) { if (Ty->isMemberFunctionPointerType()) Lo = Hi = Integer; else Current = Integer; - } else if (const VectorType *VT = Ty->getAs<VectorType>()) { + return; + } + + if (const VectorType *VT = Ty->getAs<VectorType>()) { uint64_t Size = Context.getTypeSize(VT); if (Size == 32) { // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x @@ -890,11 +917,14 @@ void X86_64ABIInfo::classify(QualType Ty, Lo = SSE; Hi = SSEUp; } - } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) { + return; + } + + if (const ComplexType *CT = Ty->getAs<ComplexType>()) { QualType ET = Context.getCanonicalType(CT->getElementType()); uint64_t Size = Context.getTypeSize(Ty); - if (ET->isIntegralType()) { + if (ET->isIntegralOrEnumerationType()) { if (Size <= 64) Current = Integer; else if (Size <= 128) @@ -912,7 +942,11 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64; if (Hi == NoClass && EB_Real != EB_Imag) Hi = Lo; - } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { + + return; + } + + if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { // Arrays are treated like structures. uint64_t Size = Context.getTypeSize(Ty); @@ -936,7 +970,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t ArraySize = AT->getSize().getZExtValue(); for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { Class FieldLo, FieldHi; - classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi); + classify(AT->getElementType(), Offset, FieldLo, FieldHi); Lo = merge(Lo, FieldLo); Hi = merge(Hi, FieldHi); if (Lo == Memory || Hi == Memory) @@ -947,7 +981,10 @@ void X86_64ABIInfo::classify(QualType Ty, if (Hi == Memory) Lo = Memory; assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); - } else if (const RecordType *RT = Ty->getAs<RecordType>()) { + return; + } + + if (const RecordType *RT = Ty->getAs<RecordType>()) { uint64_t Size = Context.getTypeSize(Ty); // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger @@ -988,7 +1025,7 @@ void X86_64ABIInfo::classify(QualType Ty, // initialized to class NO_CLASS. Class FieldLo, FieldHi; uint64_t Offset = OffsetBase + Layout.getBaseClassOffset(Base); - classify(i->getType(), Context, Offset, FieldLo, FieldHi); + classify(i->getType(), Offset, FieldLo, FieldHi); Lo = merge(Lo, FieldLo); Hi = merge(Hi, FieldHi); if (Lo == Memory || Hi == Memory) @@ -1047,7 +1084,7 @@ void X86_64ABIInfo::classify(QualType Ty, FieldHi = EB_Hi ? Integer : NoClass; } } else - classify(i->getType(), Context, Offset, FieldLo, FieldHi); + classify(i->getType(), Offset, FieldLo, FieldHi); Lo = merge(Lo, FieldLo); Hi = merge(Hi, FieldHi); if (Lo == Memory || Hi == Memory) @@ -1074,9 +1111,8 @@ void X86_64ABIInfo::classify(QualType Ty, } ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty, - const llvm::Type *CoerceTo, - ASTContext &Context) const { - if (CoerceTo == llvm::Type::getInt64Ty(CoerceTo->getContext())) { + const llvm::Type *CoerceTo) const { + if (CoerceTo->isIntegerTy(64) || isa<llvm::PointerType>(CoerceTo)) { // Integer and pointer types will end up in a general purpose // register. @@ -1084,10 +1120,21 @@ ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty, if (const EnumType *EnumTy = Ty->getAs<EnumType>()) Ty = EnumTy->getDecl()->getIntegerType(); - if (Ty->isIntegralType() || Ty->hasPointerRepresentation()) + if (Ty->isIntegralOrEnumerationType() || Ty->hasPointerRepresentation()) return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); - } else if (CoerceTo == llvm::Type::getDoubleTy(CoerceTo->getContext())) { + + // If this is a 8/16/32-bit structure that is passed as an int64, then it + // will be passed in the low 8/16/32-bits of a 64-bit GPR, which is the same + // as how an i8/i16/i32 is passed. Coerce to a i8/i16/i32 instead of a i64. + switch (Context.getTypeSizeInChars(Ty).getQuantity()) { + default: break; + case 1: CoerceTo = llvm::Type::getInt8Ty(CoerceTo->getContext()); break; + case 2: CoerceTo = llvm::Type::getInt16Ty(CoerceTo->getContext()); break; + case 4: CoerceTo = llvm::Type::getInt32Ty(CoerceTo->getContext()); break; + } + + } else if (CoerceTo->isDoubleTy()) { assert(Ty.isCanonical() && "should always have a canonical type here"); assert(!Ty.hasQualifiers() && "should never have a qualified type here"); @@ -1095,13 +1142,17 @@ ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty, if (Ty == Context.FloatTy || Ty == Context.DoubleTy) return ABIArgInfo::getDirect(); + // If this is a 32-bit structure that is passed as a double, then it will be + // passed in the low 32-bits of the XMM register, which is the same as how a + // float is passed. Coerce to a float instead of a double. + if (Context.getTypeSizeInChars(Ty).getQuantity() == 4) + CoerceTo = llvm::Type::getFloatTy(CoerceTo->getContext()); } return ABIArgInfo::getCoerce(CoerceTo); } -ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty, - ASTContext &Context) const { +ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const { // If this is a scalar LLVM value then assume LLVM will pass it in the right // place naturally. if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { @@ -1116,8 +1167,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty, return ABIArgInfo::getIndirect(0); } -ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, - ASTContext &Context) const { +ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const { // If this is a scalar LLVM value then assume LLVM will pass it in the right // place naturally. if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { @@ -1141,13 +1191,12 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, return ABIArgInfo::getIndirect(0); } -ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy, - ASTContext &Context, - llvm::LLVMContext &VMContext) const { +ABIArgInfo X86_64ABIInfo:: +classifyReturnType(QualType RetTy, llvm::LLVMContext &VMContext) const { // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the // classification algorithm. X86_64ABIInfo::Class Lo, Hi; - classify(RetTy, Context, 0, Lo, Hi); + classify(RetTy, 0, Lo, Hi); // Check some invariants. assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); @@ -1166,7 +1215,7 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy, // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via // hidden argument. case Memory: - return getIndirectReturnResult(RetTy, Context); + return getIndirectReturnResult(RetTy); // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next // available register of the sequence %rax, %rdx is used. @@ -1236,15 +1285,40 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy, break; } - return getCoerceResult(RetTy, ResType, Context); + return getCoerceResult(RetTy, ResType); } -ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context, +static const llvm::Type *Get8ByteTypeAtOffset(const llvm::Type *PrefType, + unsigned Offset, + const llvm::TargetData &TD) { + if (PrefType == 0) return 0; + + // Pointers are always 8-bytes at offset 0. + if (Offset == 0 && isa<llvm::PointerType>(PrefType)) + return PrefType; + + // TODO: 1/2/4/8 byte integers are also interesting, but we have to know that + // the "hole" is not used in the containing struct (just undef padding). + const llvm::StructType *STy = dyn_cast<llvm::StructType>(PrefType); + if (STy == 0) return 0; + + // If this is a struct, recurse into the field at the specified offset. + const llvm::StructLayout *SL = TD.getStructLayout(STy); + if (Offset >= SL->getSizeInBytes()) return 0; + + unsigned FieldIdx = SL->getElementContainingOffset(Offset); + Offset -= SL->getElementOffset(FieldIdx); + + return Get8ByteTypeAtOffset(STy->getElementType(FieldIdx), Offset, TD); +} + +ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, llvm::LLVMContext &VMContext, unsigned &neededInt, - unsigned &neededSSE) const { + unsigned &neededSSE, + const llvm::Type *PrefType)const{ X86_64ABIInfo::Class Lo, Hi; - classify(Ty, Context, 0, Lo, Hi); + classify(Ty, 0, Lo, Hi); // Check some invariants. // FIXME: Enforce these by construction. @@ -1267,7 +1341,7 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context, // COMPLEX_X87, it is passed in memory. case X87: case ComplexX87: - return getIndirectResult(Ty, Context); + return getIndirectResult(Ty); case SSEUp: case X87Up: @@ -1277,8 +1351,16 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context, // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 // and %r9 is used. case Integer: - ++neededInt; + // It is always safe to classify this as an i64 argument. ResType = llvm::Type::getInt64Ty(VMContext); + ++neededInt; + + // If we can choose a better 8-byte type based on the preferred type, and if + // that type is still passed in a GPR, use it. + if (const llvm::Type *PrefTypeLo = Get8ByteTypeAtOffset(PrefType, 0, TD)) + if (isa<llvm::IntegerType>(PrefTypeLo) || + isa<llvm::PointerType>(PrefTypeLo)) + ResType = PrefTypeLo; break; // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next @@ -1301,11 +1383,22 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context, break; case NoClass: break; - case Integer: - ResType = llvm::StructType::get(VMContext, ResType, - llvm::Type::getInt64Ty(VMContext), NULL); + + case Integer: { + // It is always safe to classify this as an i64 argument. + const llvm::Type *HiType = llvm::Type::getInt64Ty(VMContext); ++neededInt; + + // If we can choose a better 8-byte type based on the preferred type, and if + // that type is still passed in a GPR, use it. + if (const llvm::Type *PrefTypeHi = Get8ByteTypeAtOffset(PrefType, 8, TD)) + if (isa<llvm::IntegerType>(PrefTypeHi) || + isa<llvm::PointerType>(PrefTypeHi)) + HiType = PrefTypeHi; + + ResType = llvm::StructType::get(VMContext, ResType, HiType, NULL); break; + } // X87Up generally doesn't occur here (long double is passed in // memory), except in situations involving unions. @@ -1325,13 +1418,14 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context, break; } - return getCoerceResult(Ty, ResType, Context); + return getCoerceResult(Ty, ResType); } void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context, - llvm::LLVMContext &VMContext) const { - FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), - Context, VMContext); + llvm::LLVMContext &VMContext, + const llvm::Type *const *PrefTypes, + unsigned NumPrefTypes) const { + FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), VMContext); // Keep track of the number of assigned registers. unsigned freeIntRegs = 6, freeSSERegs = 8; @@ -1345,9 +1439,17 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context, // get assigned (in left-to-right order) for passing as follows... for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); it != ie; ++it) { + // If the client specified a preferred IR type to use, pass it down to + // classifyArgumentType. + const llvm::Type *PrefType = 0; + if (NumPrefTypes) { + PrefType = *PrefTypes++; + --NumPrefTypes; + } + unsigned neededInt, neededSSE; - it->info = classifyArgumentType(it->type, Context, VMContext, - neededInt, neededSSE); + it->info = classifyArgumentType(it->type, VMContext, + neededInt, neededSSE, PrefType); // AMD64-ABI 3.2.3p3: If there are no registers available for any // eightbyte of an argument, the whole argument is passed on the @@ -1357,7 +1459,7 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context, freeIntRegs -= neededInt; freeSSERegs -= neededSSE; } else { - it->info = getIndirectResult(it->type, Context); + it->info = getIndirectResult(it->type); } } } @@ -1380,12 +1482,11 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, // overflow_arg_area = (overflow_arg_area + 15) & ~15; llvm::Value *Offset = - llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), 15); + llvm::ConstantInt::get(CGF.Int32Ty, 15); overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, - llvm::Type::getInt64Ty(CGF.getLLVMContext())); - llvm::Value *Mask = llvm::ConstantInt::get( - llvm::Type::getInt64Ty(CGF.getLLVMContext()), ~15LL); + CGF.Int64Ty); + llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~15LL); overflow_arg_area = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), overflow_arg_area->getType(), @@ -1405,8 +1506,7 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; llvm::Value *Offset = - llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), - (SizeInBytes + 7) & ~7); + llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, "overflow_arg_area.next"); CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); @@ -1418,8 +1518,6 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { llvm::LLVMContext &VMContext = CGF.getLLVMContext(); - const llvm::Type *i32Ty = llvm::Type::getInt32Ty(VMContext); - const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext); // Assume that va_list type is correct; should be pointer to LLVM type: // struct { @@ -1431,8 +1529,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, unsigned neededInt, neededSSE; Ty = CGF.getContext().getCanonicalType(Ty); - ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), VMContext, - neededInt, neededSSE); + ABIArgInfo AI = classifyArgumentType(Ty, VMContext, neededInt, neededSSE, 0); // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed // in the registers. If not go to step 7. @@ -1456,21 +1553,16 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, if (neededInt) { gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); - InRegs = - CGF.Builder.CreateICmpULE(gp_offset, - llvm::ConstantInt::get(i32Ty, - 48 - neededInt * 8), - "fits_in_gp"); + InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8); + InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp"); } if (neededSSE) { fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); llvm::Value *FitsInFP = - CGF.Builder.CreateICmpULE(fp_offset, - llvm::ConstantInt::get(i32Ty, - 176 - neededSSE * 16), - "fits_in_fp"); + llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16); + FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp"); InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; } @@ -1525,45 +1617,42 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); RegAddr = CGF.Builder.CreateBitCast(RegAddr, llvm::PointerType::getUnqual(LTy)); + } else if (neededSSE == 1) { + RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); + RegAddr = CGF.Builder.CreateBitCast(RegAddr, + llvm::PointerType::getUnqual(LTy)); } else { - if (neededSSE == 1) { - RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); - RegAddr = CGF.Builder.CreateBitCast(RegAddr, - llvm::PointerType::getUnqual(LTy)); - } else { - assert(neededSSE == 2 && "Invalid number of needed registers!"); - // SSE registers are spaced 16 bytes apart in the register save - // area, we need to collect the two eightbytes together. - llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); - llvm::Value *RegAddrHi = - CGF.Builder.CreateGEP(RegAddrLo, - llvm::ConstantInt::get(i32Ty, 16)); - const llvm::Type *DblPtrTy = - llvm::PointerType::getUnqual(DoubleTy); - const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy, - DoubleTy, NULL); - llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); - V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, - DblPtrTy)); - CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); - V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, - DblPtrTy)); - CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); - RegAddr = CGF.Builder.CreateBitCast(Tmp, - llvm::PointerType::getUnqual(LTy)); - } + assert(neededSSE == 2 && "Invalid number of needed registers!"); + // SSE registers are spaced 16 bytes apart in the register save + // area, we need to collect the two eightbytes together. + llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); + llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16); + const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext); + const llvm::Type *DblPtrTy = + llvm::PointerType::getUnqual(DoubleTy); + const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy, + DoubleTy, NULL); + llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); + V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, + DblPtrTy)); + CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); + V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, + DblPtrTy)); + CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); + RegAddr = CGF.Builder.CreateBitCast(Tmp, + llvm::PointerType::getUnqual(LTy)); } // AMD64-ABI 3.5.7p5: Step 5. Set: // l->gp_offset = l->gp_offset + num_gp * 8 // l->fp_offset = l->fp_offset + num_fp * 16. if (neededInt) { - llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededInt * 8); + llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), gp_offset_p); } if (neededSSE) { - llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededSSE * 16); + llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), fp_offset_p); } @@ -1582,11 +1671,14 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, ResAddr->reserveOperandSpace(2); ResAddr->addIncoming(RegAddr, InRegBlock); ResAddr->addIncoming(MemAddr, InMemBlock); - return ResAddr; } + + +//===----------------------------------------------------------------------===// // PIC16 ABI Implementation +//===----------------------------------------------------------------------===// namespace { @@ -1600,7 +1692,9 @@ class PIC16ABIInfo : public ABIInfo { llvm::LLVMContext &VMContext) const; virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context, - llvm::LLVMContext &VMContext) const { + llvm::LLVMContext &VMContext, + const llvm::Type *const *PrefTypes, + unsigned NumPrefTypes) const { FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context, VMContext); for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); @@ -1636,7 +1730,7 @@ ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty, } llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, - CodeGenFunction &CGF) const { + CodeGenFunction &CGF) const { const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); @@ -1719,7 +1813,9 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF, } +//===----------------------------------------------------------------------===// // ARM ABI Implementation +//===----------------------------------------------------------------------===// namespace { @@ -1749,7 +1845,9 @@ private: llvm::LLVMContext &VMContext) const; virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context, - llvm::LLVMContext &VMContext) const; + llvm::LLVMContext &VMContext, + const llvm::Type *const *PrefTypes, + unsigned NumPrefTypes) const; virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const; @@ -1768,7 +1866,9 @@ public: } void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context, - llvm::LLVMContext &VMContext) const { + llvm::LLVMContext &VMContext, + const llvm::Type *const *PrefTypes, + unsigned NumPrefTypes) const { FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context, VMContext); for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); @@ -1776,14 +1876,23 @@ void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context, it->info = classifyArgumentType(it->type, Context, VMContext); } - // ARM always overrides the calling convention. + const llvm::Triple &Triple(Context.Target.getTriple()); + llvm::CallingConv::ID DefaultCC; + if (Triple.getEnvironmentName() == "gnueabi" || + Triple.getEnvironmentName() == "eabi") + DefaultCC = llvm::CallingConv::ARM_AAPCS; + else + DefaultCC = llvm::CallingConv::ARM_APCS; + switch (getABIKind()) { case APCS: - FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); + if (DefaultCC != llvm::CallingConv::ARM_APCS) + FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS); break; case AAPCS: - FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); + if (DefaultCC != llvm::CallingConv::ARM_AAPCS) + FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS); break; case AAPCS_VFP: @@ -1808,6 +1917,11 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, if (isEmptyRecord(Context, Ty, true)) return ABIArgInfo::getIgnore(); + // Structures with either a non-trivial destructor or a non-trivial + // copy constructor are always indirect. + if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) + return ABIArgInfo::getIndirect(0, /*ByVal=*/false); + // FIXME: This is kind of nasty... but there isn't much choice because the ARM // backend doesn't support byval. // FIXME: This doesn't handle alignment > 64 bits. @@ -1927,6 +2041,11 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, ABIArgInfo::getExtend() : ABIArgInfo::getDirect()); } + // Structures with either a non-trivial destructor or a non-trivial + // copy constructor are always indirect. + if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy)) + return ABIArgInfo::getIndirect(0, /*ByVal=*/false); + // Are we following APCS? if (getABIKind() == APCS) { if (isEmptyRecord(Context, RetTy, false)) @@ -1976,7 +2095,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, } llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, - CodeGenFunction &CGF) const { + CodeGenFunction &CGF) const { // FIXME: Need to handle alignment const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); @@ -1992,8 +2111,7 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, uint64_t Offset = llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); llvm::Value *NextAddr = - Builder.CreateGEP(Addr, llvm::ConstantInt::get( - llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset), + Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); Builder.CreateStore(NextAddr, VAListAddrAsBPP); @@ -2017,7 +2135,9 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy, } } +//===----------------------------------------------------------------------===// // SystemZ ABI Implementation +//===----------------------------------------------------------------------===// namespace { @@ -2031,7 +2151,9 @@ class SystemZABIInfo : public ABIInfo { llvm::LLVMContext &VMContext) const; virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context, - llvm::LLVMContext &VMContext) const { + llvm::LLVMContext &VMContext, + const llvm::Type *const *PrefTypes, + unsigned NumPrefTypes) const { FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context, VMContext); for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); @@ -2101,7 +2223,9 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty, } } +//===----------------------------------------------------------------------===// // MSP430 ABI Implementation +//===----------------------------------------------------------------------===// namespace { @@ -2138,8 +2262,11 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D, } } +//===----------------------------------------------------------------------===// // MIPS ABI Implementation. This works for both little-endian and // big-endian variants. +//===----------------------------------------------------------------------===// + namespace { class MIPSTargetCodeGenInfo : public TargetCodeGenInfo { public: @@ -2195,10 +2322,10 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const { // For now we just cache the TargetCodeGenInfo in CodeGenModule and don't // free it. - const llvm::Triple &Triple(getContext().Target.getTriple()); + const llvm::Triple &Triple = getContext().Target.getTriple(); switch (Triple.getArch()) { default: - return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo); + return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo()); case llvm::Triple::mips: case llvm::Triple::mipsel: @@ -2247,6 +2374,7 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const { } case llvm::Triple::x86_64: - return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo()); + return *(TheTargetCodeGenInfo = + new X86_64TargetCodeGenInfo(Context, TheTargetData)); } } diff --git a/contrib/llvm/tools/clang/lib/Driver/Action.cpp b/contrib/llvm/tools/clang/lib/Driver/Action.cpp index b9a3306..f34971b 100644 --- a/contrib/llvm/tools/clang/lib/Driver/Action.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/Action.cpp @@ -30,6 +30,7 @@ const char *Action::getClassName(ActionClass AC) { case AssembleJobClass: return "assembler"; case LinkJobClass: return "linker"; case LipoJobClass: return "lipo"; + case DsymutilJobClass: return "dsymutil"; } assert(0 && "invalid class"); @@ -79,3 +80,7 @@ LinkJobAction::LinkJobAction(ActionList &Inputs, types::ID Type) LipoJobAction::LipoJobAction(ActionList &Inputs, types::ID Type) : JobAction(LipoJobClass, Inputs, Type) { } + +DsymutilJobAction::DsymutilJobAction(ActionList &Inputs, types::ID Type) + : JobAction(DsymutilJobClass, Inputs, Type) { +} diff --git a/contrib/llvm/tools/clang/lib/Driver/Arg.cpp b/contrib/llvm/tools/clang/lib/Driver/Arg.cpp index 7e61a1d..83d0d26 100644 --- a/contrib/llvm/tools/clang/lib/Driver/Arg.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/Arg.cpp @@ -10,47 +10,59 @@ #include "clang/Driver/Arg.h" #include "clang/Driver/ArgList.h" #include "clang/Driver/Option.h" +#include "llvm/ADT/SmallString.h" #include "llvm/ADT/Twine.h" #include "llvm/Support/raw_ostream.h" using namespace clang::driver; -Arg::Arg(ArgClass _Kind, const Option *_Opt, unsigned _Index, - const Arg *_BaseArg) - : Kind(_Kind), Opt(_Opt), BaseArg(_BaseArg), Index(_Index), Claimed(false) { +Arg::Arg(const Option *_Opt, unsigned _Index, const Arg *_BaseArg) + : Opt(_Opt), BaseArg(_BaseArg), Index(_Index), + Claimed(false), OwnsValues(false) { } -Arg::~Arg() { } +Arg::Arg(const Option *_Opt, unsigned _Index, + const char *Value0, const Arg *_BaseArg) + : Opt(_Opt), BaseArg(_BaseArg), Index(_Index), + Claimed(false), OwnsValues(false) { + Values.push_back(Value0); +} + +Arg::Arg(const Option *_Opt, unsigned _Index, + const char *Value0, const char *Value1, const Arg *_BaseArg) + : Opt(_Opt), BaseArg(_BaseArg), Index(_Index), + Claimed(false), OwnsValues(false) { + Values.push_back(Value0); + Values.push_back(Value1); +} + +Arg::~Arg() { + if (OwnsValues) { + for (unsigned i = 0, e = Values.size(); i != e; ++i) + delete[] Values[i]; + } +} void Arg::dump() const { llvm::errs() << "<"; - switch (Kind) { - default: - assert(0 && "Invalid kind"); -#define P(N) case N: llvm::errs() << #N; break - P(FlagClass); - P(PositionalClass); - P(JoinedClass); - P(SeparateClass); - P(CommaJoinedClass); - P(JoinedAndSeparateClass); -#undef P - } llvm::errs() << " Opt:"; Opt->dump(); llvm::errs() << " Index:" << Index; - if (isa<CommaJoinedArg>(this) || isa<SeparateArg>(this)) - llvm::errs() << " NumValues:" << getNumValues(); + llvm::errs() << " Values: ["; + for (unsigned i = 0, e = Values.size(); i != e; ++i) { + if (i) llvm::errs() << ", "; + llvm::errs() << "'" << Values[i] << "'"; + } - llvm::errs() << ">\n"; + llvm::errs() << "]>\n"; } std::string Arg::getAsString(const ArgList &Args) const { - std::string Res; - llvm::raw_string_ostream OS(Res); + llvm::SmallString<256> Res; + llvm::raw_svector_ostream OS(Res); ArgStringList ASL; render(Args, ASL); @@ -74,117 +86,36 @@ void Arg::renderAsInput(const ArgList &Args, ArgStringList &Output) const { Output.push_back(getValue(Args, i)); } -FlagArg::FlagArg(const Option *Opt, unsigned Index, const Arg *BaseArg) - : Arg(FlagClass, Opt, Index, BaseArg) { -} - -void FlagArg::render(const ArgList &Args, ArgStringList &Output) const { - Output.push_back(Args.getArgString(getIndex())); -} - -const char *FlagArg::getValue(const ArgList &Args, unsigned N) const { - assert(0 && "Invalid index."); - return 0; -} - -PositionalArg::PositionalArg(const Option *Opt, unsigned Index, - const Arg *BaseArg) - : Arg(PositionalClass, Opt, Index, BaseArg) { -} - -void PositionalArg::render(const ArgList &Args, ArgStringList &Output) const { - Output.push_back(Args.getArgString(getIndex())); -} - -const char *PositionalArg::getValue(const ArgList &Args, unsigned N) const { - assert(N < getNumValues() && "Invalid index."); - return Args.getArgString(getIndex()); -} - -JoinedArg::JoinedArg(const Option *Opt, unsigned Index, const Arg *BaseArg) - : Arg(JoinedClass, Opt, Index, BaseArg) { -} - -void JoinedArg::render(const ArgList &Args, ArgStringList &Output) const { - if (getOption().hasForceSeparateRender()) { - Output.push_back(getOption().getName()); - Output.push_back(getValue(Args, 0)); - } else { - Output.push_back(Args.getArgString(getIndex())); - } -} - -const char *JoinedArg::getValue(const ArgList &Args, unsigned N) const { - assert(N < getNumValues() && "Invalid index."); - // FIXME: Avoid strlen. - return Args.getArgString(getIndex()) + strlen(getOption().getName()); -} - -CommaJoinedArg::CommaJoinedArg(const Option *Opt, unsigned Index, - const char *Str, const Arg *BaseArg) - : Arg(CommaJoinedClass, Opt, Index, BaseArg) { - const char *Prev = Str; - for (;; ++Str) { - char c = *Str; - - if (!c) { - if (Prev != Str) - Values.push_back(std::string(Prev, Str)); - break; - } else if (c == ',') { - if (Prev != Str) - Values.push_back(std::string(Prev, Str)); - Prev = Str + 1; +void Arg::render(const ArgList &Args, ArgStringList &Output) const { + switch (getOption().getRenderStyle()) { + case Option::RenderValuesStyle: + for (unsigned i = 0, e = getNumValues(); i != e; ++i) + Output.push_back(getValue(Args, i)); + break; + + case Option::RenderCommaJoinedStyle: { + llvm::SmallString<256> Res; + llvm::raw_svector_ostream OS(Res); + OS << getOption().getName(); + for (unsigned i = 0, e = getNumValues(); i != e; ++i) { + if (i) OS << ','; + OS << getValue(Args, i); } + Output.push_back(Args.MakeArgString(OS.str())); + break; } -} - -void CommaJoinedArg::render(const ArgList &Args, ArgStringList &Output) const { - Output.push_back(Args.getArgString(getIndex())); -} - -const char *CommaJoinedArg::getValue(const ArgList &Args, unsigned N) const { - assert(N < getNumValues() && "Invalid index."); - return Values[N].c_str(); -} - -SeparateArg::SeparateArg(const Option *Opt, unsigned Index, unsigned _NumValues, - const Arg *BaseArg) - : Arg(SeparateClass, Opt, Index, BaseArg), NumValues(_NumValues) { -} + + case Option::RenderJoinedStyle: + Output.push_back(Args.GetOrMakeJoinedArgString( + getIndex(), getOption().getName(), getValue(Args, 0))); + for (unsigned i = 1, e = getNumValues(); i != e; ++i) + Output.push_back(getValue(Args, i)); + break; -void SeparateArg::render(const ArgList &Args, ArgStringList &Output) const { - if (getOption().hasForceJoinedRender()) { - assert(getNumValues() == 1 && "Cannot force joined render with > 1 args."); - Output.push_back(Args.MakeArgString(llvm::StringRef(getOption().getName()) + - getValue(Args, 0))); - } else { - Output.push_back(Args.getArgString(getIndex())); - for (unsigned i = 0; i < NumValues; ++i) + case Option::RenderSeparateStyle: + Output.push_back(getOption().getName()); + for (unsigned i = 0, e = getNumValues(); i != e; ++i) Output.push_back(getValue(Args, i)); + break; } } - -const char *SeparateArg::getValue(const ArgList &Args, unsigned N) const { - assert(N < getNumValues() && "Invalid index."); - return Args.getArgString(getIndex() + 1 + N); -} - -JoinedAndSeparateArg::JoinedAndSeparateArg(const Option *Opt, unsigned Index, - const Arg *BaseArg) - : Arg(JoinedAndSeparateClass, Opt, Index, BaseArg) { -} - -void JoinedAndSeparateArg::render(const ArgList &Args, - ArgStringList &Output) const { - Output.push_back(Args.getArgString(getIndex())); - Output.push_back(Args.getArgString(getIndex() + 1)); -} - -const char *JoinedAndSeparateArg::getValue(const ArgList &Args, - unsigned N) const { - assert(N < getNumValues() && "Invalid index."); - if (N == 0) - return Args.getArgString(getIndex()) + strlen(getOption().getName()); - return Args.getArgString(getIndex() + 1); -} diff --git a/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp b/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp index 3d07431..9101523 100644 --- a/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp @@ -36,7 +36,7 @@ void arg_iterator::SkipToNextArg() { // -ArgList::ArgList(arglist_type &_Args) : Args(_Args) { +ArgList::ArgList() { } ArgList::~ArgList() { @@ -62,12 +62,14 @@ Arg *ArgList::getLastArg(OptSpecifier Id) const { } Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1) const { - Arg *Res, *A0 = getLastArgNoClaim(Id0), *A1 = getLastArgNoClaim(Id1); - - if (A0 && A1) - Res = A0->getIndex() > A1->getIndex() ? A0 : A1; - else - Res = A0 ? A0 : A1; + Arg *Res = 0; + for (const_reverse_iterator it = rbegin(), ie = rend(); it != ie; ++it) { + if ((*it)->getOption().matches(Id0) || + (*it)->getOption().matches(Id1)) { + Res = *it; + break; + } + } if (Res) Res->claim(); @@ -78,24 +80,32 @@ Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1) const { Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2) const { Arg *Res = 0; - Arg *A0 = getLastArgNoClaim(Id0); - Arg *A1 = getLastArgNoClaim(Id1); - Arg *A2 = getLastArgNoClaim(Id2); - - int A0Idx = A0 ? (int) A0->getIndex() : -1; - int A1Idx = A1 ? (int) A1->getIndex() : -1; - int A2Idx = A2 ? (int) A2->getIndex() : -1; - - if (A0Idx > A1Idx) { - if (A0Idx > A2Idx) - Res = A0; - else if (A2Idx != -1) - Res = A2; - } else { - if (A1Idx > A2Idx) - Res = A1; - else if (A2Idx != -1) - Res = A2; + for (const_reverse_iterator it = rbegin(), ie = rend(); it != ie; ++it) { + if ((*it)->getOption().matches(Id0) || + (*it)->getOption().matches(Id1) || + (*it)->getOption().matches(Id2)) { + Res = *it; + break; + } + } + + if (Res) + Res->claim(); + + return Res; +} + +Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1, + OptSpecifier Id2, OptSpecifier Id3) const { + Arg *Res = 0; + for (const_reverse_iterator it = rbegin(), ie = rend(); it != ie; ++it) { + if ((*it)->getOption().matches(Id0) || + (*it)->getOption().matches(Id1) || + (*it)->getOption().matches(Id2) || + (*it)->getOption().matches(Id3)) { + Res = *it; + break; + } } if (Res) @@ -147,8 +157,8 @@ void ArgList::AddAllArgs(ArgStringList &Output, OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2) const { for (arg_iterator it = filtered_begin(Id0, Id1, Id2), ie = filtered_end(); it != ie; ++it) { - it->claim(); - it->render(*this, Output); + (*it)->claim(); + (*it)->render(*this, Output); } } @@ -156,9 +166,9 @@ void ArgList::AddAllArgValues(ArgStringList &Output, OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2) const { for (arg_iterator it = filtered_begin(Id0, Id1, Id2), ie = filtered_end(); it != ie; ++it) { - it->claim(); - for (unsigned i = 0, e = it->getNumValues(); i != e; ++i) - Output.push_back(it->getValue(*this, i)); + (*it)->claim(); + for (unsigned i = 0, e = (*it)->getNumValues(); i != e; ++i) + Output.push_back((*it)->getValue(*this, i)); } } @@ -167,14 +177,14 @@ void ArgList::AddAllArgsTranslated(ArgStringList &Output, OptSpecifier Id0, bool Joined) const { for (arg_iterator it = filtered_begin(Id0), ie = filtered_end(); it != ie; ++it) { - it->claim(); + (*it)->claim(); if (Joined) { Output.push_back(MakeArgString(llvm::StringRef(Translation) + - it->getValue(*this, 0))); + (*it)->getValue(*this, 0))); } else { Output.push_back(Translation); - Output.push_back(it->getValue(*this, 0)); + Output.push_back((*it)->getValue(*this, 0)); } } } @@ -182,7 +192,7 @@ void ArgList::AddAllArgsTranslated(ArgStringList &Output, OptSpecifier Id0, void ArgList::ClaimAllArgs(OptSpecifier Id0) const { for (arg_iterator it = filtered_begin(Id0), ie = filtered_end(); it != ie; ++it) - it->claim(); + (*it)->claim(); } const char *ArgList::MakeArgString(const llvm::Twine &T) const { @@ -191,10 +201,21 @@ const char *ArgList::MakeArgString(const llvm::Twine &T) const { return MakeArgString(Str.str()); } +const char *ArgList::GetOrMakeJoinedArgString(unsigned Index, + llvm::StringRef LHS, + llvm::StringRef RHS) const { + llvm::StringRef Cur = getArgString(Index); + if (Cur.size() == LHS.size() + RHS.size() && + Cur.startswith(LHS) && Cur.endswith(RHS)) + return Cur.data(); + + return MakeArgString(LHS + RHS); +} + // InputArgList::InputArgList(const char **ArgBegin, const char **ArgEnd) - : ArgList(ActualArgs), NumInputArgStrings(ArgEnd - ArgBegin) { + : NumInputArgStrings(ArgEnd - ArgBegin) { ArgStrings.append(ArgBegin, ArgEnd); } @@ -229,9 +250,8 @@ const char *InputArgList::MakeArgString(llvm::StringRef Str) const { // -DerivedArgList::DerivedArgList(InputArgList &_BaseArgs, bool _OnlyProxy) - : ArgList(_OnlyProxy ? _BaseArgs.getArgs() : ActualArgs), - BaseArgs(_BaseArgs), OnlyProxy(_OnlyProxy) { +DerivedArgList::DerivedArgList(const InputArgList &_BaseArgs) + : BaseArgs(_BaseArgs) { } DerivedArgList::~DerivedArgList() { @@ -246,30 +266,33 @@ const char *DerivedArgList::MakeArgString(llvm::StringRef Str) const { } Arg *DerivedArgList::MakeFlagArg(const Arg *BaseArg, const Option *Opt) const { - Arg *A = new FlagArg(Opt, BaseArgs.MakeIndex(Opt->getName()), BaseArg); + Arg *A = new Arg(Opt, BaseArgs.MakeIndex(Opt->getName()), BaseArg); SynthesizedArgs.push_back(A); return A; } Arg *DerivedArgList::MakePositionalArg(const Arg *BaseArg, const Option *Opt, llvm::StringRef Value) const { - Arg *A = new PositionalArg(Opt, BaseArgs.MakeIndex(Value), BaseArg); + unsigned Index = BaseArgs.MakeIndex(Value); + Arg *A = new Arg(Opt, Index, BaseArgs.getArgString(Index), BaseArg); SynthesizedArgs.push_back(A); return A; } Arg *DerivedArgList::MakeSeparateArg(const Arg *BaseArg, const Option *Opt, llvm::StringRef Value) const { - Arg *A = new SeparateArg(Opt, BaseArgs.MakeIndex(Opt->getName(), Value), 1, - BaseArg); + unsigned Index = BaseArgs.MakeIndex(Opt->getName(), Value); + Arg *A = new Arg(Opt, Index, BaseArgs.getArgString(Index + 1), BaseArg); SynthesizedArgs.push_back(A); return A; } Arg *DerivedArgList::MakeJoinedArg(const Arg *BaseArg, const Option *Opt, llvm::StringRef Value) const { - Arg *A = new JoinedArg(Opt, BaseArgs.MakeIndex(Opt->getName() + Value.str()), - BaseArg); + unsigned Index = BaseArgs.MakeIndex(Opt->getName() + Value.str()); + Arg *A = new Arg(Opt, Index, + BaseArgs.getArgString(Index) + strlen(Opt->getName()), + BaseArg); SynthesizedArgs.push_back(A); return A; } diff --git a/contrib/llvm/tools/clang/lib/Driver/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Driver/CMakeLists.txt index 5af754d..00d076b 100644 --- a/contrib/llvm/tools/clang/lib/Driver/CMakeLists.txt +++ b/contrib/llvm/tools/clang/lib/Driver/CMakeLists.txt @@ -21,5 +21,5 @@ add_clang_library(clangDriver Types.cpp ) -add_dependencies(clangDriver ClangDiagnosticDriver ClangDriverOptions - ClangCC1Options ClangCC1AsOptions) +add_dependencies(clangDriver ClangAttrList ClangDiagnosticDriver + ClangDriverOptions ClangCC1Options ClangCC1AsOptions) diff --git a/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp b/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp index 227f79a..282e9fe 100644 --- a/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp @@ -22,20 +22,22 @@ #include <errno.h> using namespace clang::driver; -Compilation::Compilation(const Driver &D, - const ToolChain &_DefaultToolChain, - InputArgList *_Args) - : TheDriver(D), DefaultToolChain(_DefaultToolChain), Args(_Args) { +Compilation::Compilation(const Driver &D, const ToolChain &_DefaultToolChain, + InputArgList *_Args, DerivedArgList *_TranslatedArgs) + : TheDriver(D), DefaultToolChain(_DefaultToolChain), Args(_Args), + TranslatedArgs(_TranslatedArgs) { } Compilation::~Compilation() { + delete TranslatedArgs; delete Args; // Free any derived arg lists. for (llvm::DenseMap<std::pair<const ToolChain*, const char*>, DerivedArgList*>::iterator it = TCArgs.begin(), ie = TCArgs.end(); it != ie; ++it) - delete it->second; + if (it->second != TranslatedArgs) + delete it->second; // Free the actions, if built. for (ActionList::iterator it = Actions.begin(), ie = Actions.end(); @@ -49,8 +51,11 @@ const DerivedArgList &Compilation::getArgsForToolChain(const ToolChain *TC, TC = &DefaultToolChain; DerivedArgList *&Entry = TCArgs[std::make_pair(TC, BoundArch)]; - if (!Entry) - Entry = TC->TranslateArgs(*Args, BoundArch); + if (!Entry) { + Entry = TC->TranslateArgs(*TranslatedArgs, BoundArch); + if (!Entry) + Entry = TranslatedArgs; + } return *Entry; } diff --git a/contrib/llvm/tools/clang/lib/Driver/Driver.cpp b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp index da83803..2fc0a53 100644 --- a/contrib/llvm/tools/clang/lib/Driver/Driver.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp @@ -39,9 +39,6 @@ using namespace clang::driver; using namespace clang; -// Used to set values for "production" clang, for releases. -// #define USE_PRODUCTION_CLANG - Driver::Driver(llvm::StringRef _Name, llvm::StringRef _Dir, llvm::StringRef _DefaultHostTriple, llvm::StringRef _DefaultImageName, @@ -78,6 +75,11 @@ Driver::Driver(llvm::StringRef _Name, llvm::StringRef _Dir, P.appendComponent("clang"); P.appendComponent(CLANG_VERSION_STRING); ResourceDir = P.str(); + + // Save the original clang executable path. + P = Dir; + P.appendComponent(Name); + ClangExecutable = P.str(); } Driver::~Driver() { @@ -110,6 +112,57 @@ InputArgList *Driver::ParseArgStrings(const char **ArgBegin, return Args; } +DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const { + DerivedArgList *DAL = new DerivedArgList(Args); + + for (ArgList::const_iterator it = Args.begin(), + ie = Args.end(); it != ie; ++it) { + const Arg *A = *it; + + // Unfortunately, we have to parse some forwarding options (-Xassembler, + // -Xlinker, -Xpreprocessor) because we either integrate their functionality + // (assembler and preprocessor), or bypass a previous driver ('collect2'). + + // Rewrite linker options, to replace --no-demangle with a custom internal + // option. + if ((A->getOption().matches(options::OPT_Wl_COMMA) || + A->getOption().matches(options::OPT_Xlinker)) && + A->containsValue("--no-demangle")) { + // Add the rewritten no-demangle argument. + DAL->AddFlagArg(A, Opts->getOption(options::OPT_Z_Xlinker__no_demangle)); + + // Add the remaining values as Xlinker arguments. + for (unsigned i = 0, e = A->getNumValues(); i != e; ++i) + if (llvm::StringRef(A->getValue(Args, i)) != "--no-demangle") + DAL->AddSeparateArg(A, Opts->getOption(options::OPT_Xlinker), + A->getValue(Args, i)); + + continue; + } + + // Rewrite preprocessor options, to replace -Wp,-MD,FOO which is used by + // some build systems. We don't try to be complete here because we don't + // care to encourage this usage model. + if (A->getOption().matches(options::OPT_Wp_COMMA) && + A->getNumValues() == 2 && + (A->getValue(Args, 0) == llvm::StringRef("-MD") || + A->getValue(Args, 0) == llvm::StringRef("-MMD"))) { + // Rewrite to -MD/-MMD along with -MF. + if (A->getValue(Args, 0) == llvm::StringRef("-MD")) + DAL->AddFlagArg(A, Opts->getOption(options::OPT_MD)); + else + DAL->AddFlagArg(A, Opts->getOption(options::OPT_MMD)); + DAL->AddSeparateArg(A, Opts->getOption(options::OPT_MF), + A->getValue(Args, 1)); + continue; + } + + DAL->append(*it); + } + + return DAL; +} + Compilation *Driver::BuildCompilation(int argc, const char **argv) { llvm::PrettyStackTraceString CrashInfo("Compilation construction"); @@ -179,12 +232,16 @@ Compilation *Driver::BuildCompilation(int argc, const char **argv) { Host = GetHostInfo(HostTriple); + // Perform the default argument translations. + DerivedArgList *TranslatedArgs = TranslateInputArgs(*Args); + // The compilation takes ownership of Args. - Compilation *C = new Compilation(*this, *Host->CreateToolChain(*Args), Args); + Compilation *C = new Compilation(*this, *Host->CreateToolChain(*Args), Args, + TranslatedArgs); // FIXME: This behavior shouldn't be here. if (CCCPrintOptions) { - PrintOptions(C->getArgs()); + PrintOptions(C->getInputArgs()); return C; } @@ -274,8 +331,6 @@ void Driver::PrintOptions(const ArgList &Args) const { } } -// FIXME: Move -ccc options to real options in the .td file (or eliminate), and -// then move to using OptTable::PrintHelp. void Driver::PrintHelp(bool ShowHidden) const { getOpts().PrintHelp(llvm::outs(), Name.c_str(), DriverTitle.c_str(), ShowHidden); @@ -303,14 +358,14 @@ static void PrintDiagnosticCategories(llvm::raw_ostream &OS) { } bool Driver::HandleImmediateArgs(const Compilation &C) { - // The order these options are handled in in gcc is all over the place, but we + // The order these options are handled in gcc is all over the place, but we // don't expect inconsistencies w.r.t. that to matter in practice. if (C.getArgs().hasArg(options::OPT_dumpversion)) { llvm::outs() << CLANG_VERSION_STRING "\n"; return false; } - + if (C.getArgs().hasArg(options::OPT__print_diagnostic_categories)) { PrintDiagnosticCategories(llvm::outs()); return false; @@ -457,6 +512,19 @@ void Driver::PrintActions(const Compilation &C) const { PrintActions1(C, *it, Ids); } +/// \brief Check whether the given input tree contains any compilation (or +/// assembly) actions. +static bool ContainsCompileAction(const Action *A) { + if (isa<CompileJobAction>(A) || isa<AssembleJobAction>(A)) + return true; + + for (Action::const_iterator it = A->begin(), ie = A->end(); it != ie; ++it) + if (ContainsCompileAction(*it)) + return true; + + return false; +} + void Driver::BuildUniversalActions(const ArgList &Args, ActionList &Actions) const { llvm::PrettyStackTraceString CrashInfo("Building universal build actions"); @@ -504,7 +572,8 @@ void Driver::BuildUniversalActions(const ArgList &Args, ActionList SingleActions; BuildActions(Args, SingleActions); - // Add in arch binding and lipo (if necessary) for every top level action. + // Add in arch bindings for every top level action, as well as lipo and + // dsymutil steps if needed. for (unsigned i = 0, e = SingleActions.size(); i != e; ++i) { Action *Act = SingleActions[i]; @@ -531,6 +600,23 @@ void Driver::BuildUniversalActions(const ArgList &Args, Actions.append(Inputs.begin(), Inputs.end()); else Actions.push_back(new LipoJobAction(Inputs, Act->getType())); + + // Add a 'dsymutil' step if necessary, when debug info is enabled and we + // have a compile input. We need to run 'dsymutil' ourselves in such cases + // because the debug info will refer to a temporary object file which is + // will be removed at the end of the compilation process. + if (Act->getType() == types::TY_Image) { + Arg *A = Args.getLastArg(options::OPT_g_Group); + if (A && !A->getOption().matches(options::OPT_g0) && + !A->getOption().matches(options::OPT_gstabs) && + ContainsCompileAction(Actions.back())) { + ActionList Inputs; + Inputs.push_back(Actions.back()); + Actions.pop_back(); + + Actions.push_back(new DsymutilJobAction(Inputs, types::TY_dSYM)); + } + } } } @@ -783,7 +869,7 @@ Action *Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase, } else if (Args.hasArg(options::OPT_emit_llvm) || Args.hasArg(options::OPT_flto) || HasO4) { types::ID Output = - Args.hasArg(options::OPT_S) ? types::TY_LLVMAsm : types::TY_LLVMBC; + Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC; return new CompileJobAction(Input, Output); } else { return new CompileJobAction(Input, types::TY_PP_Asm); @@ -962,7 +1048,7 @@ void Driver::BuildJobsForAction(Compilation &C, // just using Args was better? const Arg &Input = IA->getInputArg(); Input.claim(); - if (isa<PositionalArg>(Input)) { + if (Input.getOption().matches(options::OPT_INPUT)) { const char *Name = Input.getValue(C.getArgs()); Result = InputInfo(Name, A->getType(), Name); } else @@ -992,9 +1078,17 @@ void Driver::BuildJobsForAction(Compilation &C, InputInfoList InputInfos; for (ActionList::const_iterator it = Inputs->begin(), ie = Inputs->end(); it != ie; ++it) { + // Treat dsymutil sub-jobs as being at the top-level too, they shouldn't get + // temporary output names. + // + // FIXME: Clean this up. + bool SubJobAtTopLevel = false; + if (AtTopLevel && isa<DsymutilJobAction>(A)) + SubJobAtTopLevel = true; + InputInfo II; BuildJobsForAction(C, *it, TC, BoundArch, TryToUsePipeInput, - /*AtTopLevel*/false, LinkingOutput, II); + SubJobAtTopLevel, LinkingOutput, II); InputInfos.push_back(II); } @@ -1023,6 +1117,11 @@ void Driver::BuildJobsForAction(Compilation &C, // Always use the first input as the base input. const char *BaseInput = InputInfos[0].getBaseInput(); + // ... except dsymutil actions, which use their actual input as the base + // input. + if (JA->getType() == types::TY_dSYM) + BaseInput = InputInfos[0].getFilename(); + // Determine the place to write output to (nothing, pipe, or filename) and // where to put the new job. if (JA->getType() == types::TY_Nothing) { @@ -1065,7 +1164,7 @@ const char *Driver::GetNamedOutputPath(Compilation &C, bool AtTopLevel) const { llvm::PrettyStackTraceString CrashInfo("Computing output path"); // Output to a user requested destination? - if (AtTopLevel) { + if (AtTopLevel && !isa<DsymutilJobAction>(JA)) { if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o)) return C.addResultFile(FinalOutput->getValue(C.getArgs())); } @@ -1191,7 +1290,7 @@ const HostInfo *Driver::GetHostInfo(const char *TripleStr) const { // TCE is an osless target if (Triple.getArchName() == "tce") - return createTCEHostInfo(*this, Triple); + return createTCEHostInfo(*this, Triple); switch (Triple.getOS()) { case llvm::Triple::AuroraUX: @@ -1204,6 +1303,8 @@ const HostInfo *Driver::GetHostInfo(const char *TripleStr) const { return createOpenBSDHostInfo(*this, Triple); case llvm::Triple::FreeBSD: return createFreeBSDHostInfo(*this, Triple); + case llvm::Triple::Minix: + return createMinixHostInfo(*this, Triple); case llvm::Triple::Linux: return createLinuxHostInfo(*this, Triple); default: @@ -1236,8 +1337,8 @@ bool Driver::ShouldUseClangCompiler(const Compilation &C, const JobAction &JA, // Always use clang for precompiling, AST generation, and rewriting, // regardless of archs. - if (isa<PrecompileJobAction>(JA) || JA.getType() == types::TY_AST || - JA.getType() == types::TY_RewrittenObjC) + if (isa<PrecompileJobAction>(JA) || + types::isOnlyAcceptedByClang(JA.getType())) return true; // Finally, don't use clang if this isn't one of the user specified archs to diff --git a/contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp b/contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp index d9e2e37..0636d9e 100644 --- a/contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/HostInfo.cpp @@ -425,6 +425,58 @@ ToolChain *FreeBSDHostInfo::CreateToolChain(const ArgList &Args, return TC; } +// Minix Host Info + +/// MinixHostInfo - Minix host information implementation. +class MinixHostInfo : public HostInfo { + /// Cache of tool chains we have created. + mutable llvm::StringMap<ToolChain*> ToolChains; + +public: + MinixHostInfo(const Driver &D, const llvm::Triple& Triple) + : HostInfo(D, Triple) {} + ~MinixHostInfo(); + + virtual bool useDriverDriver() const; + + virtual types::ID lookupTypeForExtension(const char *Ext) const { + return types::lookupTypeForExtension(Ext); + } + + virtual ToolChain *CreateToolChain(const ArgList &Args, + const char *ArchName) const; +}; + +MinixHostInfo::~MinixHostInfo() { + for (llvm::StringMap<ToolChain*>::iterator + it = ToolChains.begin(), ie = ToolChains.end(); it != ie; ++it){ + delete it->second; + } +} + +bool MinixHostInfo::useDriverDriver() const { + return false; +} + +ToolChain *MinixHostInfo::CreateToolChain(const ArgList &Args, + const char *ArchName) const { + assert(!ArchName && + "Unexpected arch name on platform without driver driver support."); + + std::string Arch = getArchName(); + ArchName = Arch.c_str(); + + ToolChain *&TC = ToolChains[ArchName]; + if (!TC) { + llvm::Triple TCTriple(getTriple()); + TCTriple.setArchName(ArchName); + + TC = new toolchains::Minix(*this, TCTriple); + } + + return TC; +} + // DragonFly Host Info /// DragonFlyHostInfo - DragonFly host information implementation. @@ -566,6 +618,12 @@ clang::driver::createFreeBSDHostInfo(const Driver &D, } const HostInfo * +clang::driver::createMinixHostInfo(const Driver &D, + const llvm::Triple& Triple) { + return new MinixHostInfo(D, Triple); +} + +const HostInfo * clang::driver::createDragonFlyHostInfo(const Driver &D, const llvm::Triple& Triple) { return new DragonFlyHostInfo(D, Triple); diff --git a/contrib/llvm/tools/clang/lib/Driver/Makefile b/contrib/llvm/tools/clang/lib/Driver/Makefile index 371bda7..7bc340e 100644 --- a/contrib/llvm/tools/clang/lib/Driver/Makefile +++ b/contrib/llvm/tools/clang/lib/Driver/Makefile @@ -7,10 +7,8 @@ # ##===----------------------------------------------------------------------===## -LEVEL = ../../../.. +CLANG_LEVEL := ../.. LIBRARYNAME := clangDriver BUILD_ARCHIVE = 1 -CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include - -include $(LEVEL)/Makefile.common +include $(CLANG_LEVEL)/Makefile diff --git a/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp b/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp index de1e459..39530f2 100644 --- a/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp @@ -169,12 +169,12 @@ Option *OptTable::CreateOption(unsigned id) const { if (info.Flags & RenderJoined) { assert((info.Kind == Option::JoinedOrSeparateClass || info.Kind == Option::SeparateClass) && "Invalid option."); - Opt->setForceJoinedRender(true); + Opt->setRenderStyle(Option::RenderJoinedStyle); } if (info.Flags & RenderSeparate) { assert((info.Kind == Option::JoinedOrSeparateClass || info.Kind == Option::JoinedClass) && "Invalid option."); - Opt->setForceSeparateRender(true); + Opt->setRenderStyle(Option::RenderSeparateStyle); } if (info.Flags & Unsupported) Opt->setUnsupported(true); @@ -182,13 +182,13 @@ Option *OptTable::CreateOption(unsigned id) const { return Opt; } -Arg *OptTable::ParseOneArg(const InputArgList &Args, unsigned &Index) const { +Arg *OptTable::ParseOneArg(const ArgList &Args, unsigned &Index) const { unsigned Prev = Index; const char *Str = Args.getArgString(Index); // Anything that doesn't start with '-' is an input, as is '-' itself. if (Str[0] != '-' || Str[1] == '\0') - return new PositionalArg(TheInputOption, Index++); + return new Arg(TheInputOption, Index++, Str); const Info *Start = OptionInfos + FirstSearchableIndex; const Info *End = OptionInfos + getNumOptions(); @@ -221,7 +221,7 @@ Arg *OptTable::ParseOneArg(const InputArgList &Args, unsigned &Index) const { return 0; } - return new PositionalArg(TheUnknownOption, Index++); + return new Arg(TheUnknownOption, Index++, Str); } InputArgList *OptTable::ParseArgs(const char **ArgBegin, const char **ArgEnd, @@ -267,7 +267,7 @@ static std::string getOptionHelpName(const OptTable &Opts, OptSpecifier Id) { case Option::GroupClass: case Option::InputClass: case Option::UnknownClass: assert(0 && "Invalid option with help text."); - case Option::MultiArgClass: case Option::JoinedAndSeparateClass: + case Option::MultiArgClass: assert(0 && "Cannot print metavar for this kind of option."); case Option::FlagClass: @@ -277,6 +277,7 @@ static std::string getOptionHelpName(const OptTable &Opts, OptSpecifier Id) { Name += ' '; // FALLTHROUGH case Option::JoinedClass: case Option::CommaJoinedClass: + case Option::JoinedAndSeparateClass: if (const char *MetaVarName = Opts.getOptionMetaVar(Id)) Name += MetaVarName; else diff --git a/contrib/llvm/tools/clang/lib/Driver/Option.cpp b/contrib/llvm/tools/clang/lib/Driver/Option.cpp index 17d00f5..dd48af8 100644 --- a/contrib/llvm/tools/clang/lib/Driver/Option.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/Option.cpp @@ -20,7 +20,6 @@ Option::Option(OptionClass _Kind, OptSpecifier _ID, const char *_Name, const OptionGroup *_Group, const Option *_Alias) : Kind(_Kind), ID(_ID.getID()), Name(_Name), Group(_Group), Alias(_Alias), Unsupported(false), LinkerInput(false), NoOptAsInput(false), - ForceSeparateRender(false), ForceJoinedRender(false), DriverOption(false), NoArgumentUnused(false) { // Multi-level aliases are not supported, and alias options cannot @@ -28,6 +27,31 @@ Option::Option(OptionClass _Kind, OptSpecifier _ID, const char *_Name, // inherent limitation. assert((!Alias || (!Alias->Alias && !Group)) && "Multi-level aliases and aliases with groups are unsupported."); + + // Initialize rendering options based on the class. + switch (Kind) { + case GroupClass: + case InputClass: + case UnknownClass: + RenderStyle = RenderValuesStyle; + break; + + case JoinedClass: + case JoinedAndSeparateClass: + RenderStyle = RenderJoinedStyle; + break; + + case CommaJoinedClass: + RenderStyle = RenderCommaJoinedStyle; + break; + + case FlagClass: + case SeparateClass: + case MultiArgClass: + case JoinedOrSeparateClass: + RenderStyle = RenderSeparateStyle; + break; + } } Option::~Option() { @@ -89,7 +113,7 @@ OptionGroup::OptionGroup(OptSpecifier ID, const char *Name, : Option(Option::GroupClass, ID, Name, Group, 0) { } -Arg *OptionGroup::accept(const InputArgList &Args, unsigned &Index) const { +Arg *OptionGroup::accept(const ArgList &Args, unsigned &Index) const { assert(0 && "accept() should never be called on an OptionGroup"); return 0; } @@ -98,7 +122,7 @@ InputOption::InputOption(OptSpecifier ID) : Option(Option::InputClass, ID, "<input>", 0, 0) { } -Arg *InputOption::accept(const InputArgList &Args, unsigned &Index) const { +Arg *InputOption::accept(const ArgList &Args, unsigned &Index) const { assert(0 && "accept() should never be called on an InputOption"); return 0; } @@ -107,7 +131,7 @@ UnknownOption::UnknownOption(OptSpecifier ID) : Option(Option::UnknownClass, ID, "<unknown>", 0, 0) { } -Arg *UnknownOption::accept(const InputArgList &Args, unsigned &Index) const { +Arg *UnknownOption::accept(const ArgList &Args, unsigned &Index) const { assert(0 && "accept() should never be called on an UnknownOption"); return 0; } @@ -117,13 +141,13 @@ FlagOption::FlagOption(OptSpecifier ID, const char *Name, : Option(Option::FlagClass, ID, Name, Group, Alias) { } -Arg *FlagOption::accept(const InputArgList &Args, unsigned &Index) const { +Arg *FlagOption::accept(const ArgList &Args, unsigned &Index) const { // Matches iff this is an exact match. // FIXME: Avoid strlen. if (strlen(getName()) != strlen(Args.getArgString(Index))) return 0; - return new FlagArg(this, Index++); + return new Arg(getUnaliasedOption(), Index++); } JoinedOption::JoinedOption(OptSpecifier ID, const char *Name, @@ -131,9 +155,10 @@ JoinedOption::JoinedOption(OptSpecifier ID, const char *Name, : Option(Option::JoinedClass, ID, Name, Group, Alias) { } -Arg *JoinedOption::accept(const InputArgList &Args, unsigned &Index) const { +Arg *JoinedOption::accept(const ArgList &Args, unsigned &Index) const { // Always matches. - return new JoinedArg(this, Index++); + const char *Value = Args.getArgString(Index) + strlen(getName()); + return new Arg(getUnaliasedOption(), Index++, Value); } CommaJoinedOption::CommaJoinedOption(OptSpecifier ID, const char *Name, @@ -142,15 +167,34 @@ CommaJoinedOption::CommaJoinedOption(OptSpecifier ID, const char *Name, : Option(Option::CommaJoinedClass, ID, Name, Group, Alias) { } -Arg *CommaJoinedOption::accept(const InputArgList &Args, +Arg *CommaJoinedOption::accept(const ArgList &Args, unsigned &Index) const { - // Always matches. We count the commas now so we can answer - // getNumValues easily. + // Always matches. + const char *Str = Args.getArgString(Index) + strlen(getName()); + Arg *A = new Arg(getUnaliasedOption(), Index++); + + // Parse out the comma separated values. + const char *Prev = Str; + for (;; ++Str) { + char c = *Str; + + if (!c || c == ',') { + if (Prev != Str) { + char *Value = new char[Str - Prev + 1]; + memcpy(Value, Prev, Str - Prev); + Value[Str - Prev] = '\0'; + A->getValues().push_back(Value); + } + + if (!c) + break; + + Prev = Str + 1; + } + } + A->setOwnsValues(true); - // Get the suffix string. - // FIXME: Avoid strlen, and move to helper method? - const char *Suffix = Args.getArgString(Index) + strlen(getName()); - return new CommaJoinedArg(this, Index++, Suffix); + return A; } SeparateOption::SeparateOption(OptSpecifier ID, const char *Name, @@ -158,7 +202,7 @@ SeparateOption::SeparateOption(OptSpecifier ID, const char *Name, : Option(Option::SeparateClass, ID, Name, Group, Alias) { } -Arg *SeparateOption::accept(const InputArgList &Args, unsigned &Index) const { +Arg *SeparateOption::accept(const ArgList &Args, unsigned &Index) const { // Matches iff this is an exact match. // FIXME: Avoid strlen. if (strlen(getName()) != strlen(Args.getArgString(Index))) @@ -168,7 +212,7 @@ Arg *SeparateOption::accept(const InputArgList &Args, unsigned &Index) const { if (Index > Args.getNumInputArgStrings()) return 0; - return new SeparateArg(this, Index - 2, 1); + return new Arg(getUnaliasedOption(), Index - 2, Args.getArgString(Index - 1)); } MultiArgOption::MultiArgOption(OptSpecifier ID, const char *Name, @@ -178,7 +222,7 @@ MultiArgOption::MultiArgOption(OptSpecifier ID, const char *Name, assert(NumArgs > 1 && "Invalid MultiArgOption!"); } -Arg *MultiArgOption::accept(const InputArgList &Args, unsigned &Index) const { +Arg *MultiArgOption::accept(const ArgList &Args, unsigned &Index) const { // Matches iff this is an exact match. // FIXME: Avoid strlen. if (strlen(getName()) != strlen(Args.getArgString(Index))) @@ -188,28 +232,35 @@ Arg *MultiArgOption::accept(const InputArgList &Args, unsigned &Index) const { if (Index > Args.getNumInputArgStrings()) return 0; - return new SeparateArg(this, Index - 1 - NumArgs, NumArgs); + Arg *A = new Arg(getUnaliasedOption(), Index - 1 - NumArgs, + Args.getArgString(Index - NumArgs)); + for (unsigned i = 1; i != NumArgs; ++i) + A->getValues().push_back(Args.getArgString(Index - NumArgs + i)); + return A; } -JoinedOrSeparateOption::JoinedOrSeparateOption(OptSpecifier ID, const char *Name, +JoinedOrSeparateOption::JoinedOrSeparateOption(OptSpecifier ID, + const char *Name, const OptionGroup *Group, const Option *Alias) : Option(Option::JoinedOrSeparateClass, ID, Name, Group, Alias) { } -Arg *JoinedOrSeparateOption::accept(const InputArgList &Args, +Arg *JoinedOrSeparateOption::accept(const ArgList &Args, unsigned &Index) const { // If this is not an exact match, it is a joined arg. // FIXME: Avoid strlen. - if (strlen(getName()) != strlen(Args.getArgString(Index))) - return new JoinedArg(this, Index++); + if (strlen(getName()) != strlen(Args.getArgString(Index))) { + const char *Value = Args.getArgString(Index) + strlen(getName()); + return new Arg(this, Index++, Value); + } // Otherwise it must be separate. Index += 2; if (Index > Args.getNumInputArgStrings()) return 0; - return new SeparateArg(this, Index - 2, 1); + return new Arg(getUnaliasedOption(), Index - 2, Args.getArgString(Index - 1)); } JoinedAndSeparateOption::JoinedAndSeparateOption(OptSpecifier ID, @@ -219,7 +270,7 @@ JoinedAndSeparateOption::JoinedAndSeparateOption(OptSpecifier ID, : Option(Option::JoinedAndSeparateClass, ID, Name, Group, Alias) { } -Arg *JoinedAndSeparateOption::accept(const InputArgList &Args, +Arg *JoinedAndSeparateOption::accept(const ArgList &Args, unsigned &Index) const { // Always matches. @@ -227,6 +278,7 @@ Arg *JoinedAndSeparateOption::accept(const InputArgList &Args, if (Index > Args.getNumInputArgStrings()) return 0; - return new JoinedAndSeparateArg(this, Index - 2); + return new Arg(getUnaliasedOption(), Index - 2, + Args.getArgString(Index-2)+strlen(getName()), + Args.getArgString(Index-1)); } - diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp index 9b6264a..9fae67d 100644 --- a/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp @@ -26,14 +26,11 @@ const Driver &ToolChain::getDriver() const { return Host.getDriver(); } -std::string ToolChain::GetFilePath(const Compilation &C, - const char *Name) const { +std::string ToolChain::GetFilePath(const char *Name) const { return Host.getDriver().GetFilePath(Name, *this); } -std::string ToolChain::GetProgramPath(const Compilation &C, - const char *Name, - bool WantFile) const { +std::string ToolChain::GetProgramPath(const char *Name, bool WantFile) const { return Host.getDriver().GetProgramPath(Name, *this, WantFile); } diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp index abb55b0..a78d153 100644 --- a/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp @@ -225,6 +225,8 @@ Tool &Darwin::SelectTool(const Compilation &C, const JobAction &JA) const { T = new tools::darwin::Link(*this); break; case Action::LipoJobClass: T = new tools::darwin::Lipo(*this); break; + case Action::DsymutilJobClass: + T = new tools::darwin::Dsymutil(*this); break; } } @@ -323,6 +325,33 @@ DarwinClang::DarwinClang(const HostInfo &Host, const llvm::Triple& Triple, void DarwinClang::AddLinkSearchPathArgs(const ArgList &Args, ArgStringList &CmdArgs) const { // The Clang toolchain uses explicit paths for internal libraries. + + // Unfortunately, we still might depend on a few of the libraries that are + // only available in the gcc library directory (in particular + // libstdc++.dylib). For now, hardcode the path to the known install location. + llvm::sys::Path P(getDriver().Dir); + P.eraseComponent(); // .../usr/bin -> ../usr + P.appendComponent("lib"); + P.appendComponent("gcc"); + switch (getTriple().getArch()) { + default: + assert(0 && "Invalid Darwin arch!"); + case llvm::Triple::x86: + case llvm::Triple::x86_64: + P.appendComponent("i686-apple-darwin10"); + break; + case llvm::Triple::arm: + case llvm::Triple::thumb: + P.appendComponent("arm-apple-darwin10"); + break; + case llvm::Triple::ppc: + case llvm::Triple::ppc64: + P.appendComponent("powerpc-apple-darwin10"); + break; + } + P.appendComponent("4.2.1"); + if (P.exists()) + CmdArgs.push_back(Args.MakeArgString("-L" + P.str())); } void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args, @@ -386,9 +415,9 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args, } } -DerivedArgList *Darwin::TranslateArgs(InputArgList &Args, +DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args, const char *BoundArch) const { - DerivedArgList *DAL = new DerivedArgList(Args, false); + DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs()); const OptTable &Opts = getDriver().getOpts(); // FIXME: We really want to get out of the tool chain level argument @@ -440,19 +469,10 @@ DerivedArgList *Darwin::TranslateArgs(InputArgList &Args, iPhoneVersion = DAL->MakeJoinedArg(0, O, iPhoneOSTarget); DAL->append(iPhoneVersion); } else { - // Otherwise, choose a default platform based on the tool chain. - // - // FIXME: Don't hardcode default here. - if (getTriple().getArch() == llvm::Triple::arm || - getTriple().getArch() == llvm::Triple::thumb) { - const Option *O = Opts.getOption(options::OPT_miphoneos_version_min_EQ); - iPhoneVersion = DAL->MakeJoinedArg(0, O, "3.0"); - DAL->append(iPhoneVersion); - } else { - const Option *O = Opts.getOption(options::OPT_mmacosx_version_min_EQ); - OSXVersion = DAL->MakeJoinedArg(0, O, MacosxVersionMin); - DAL->append(OSXVersion); - } + // Otherwise, assume we are targeting OS X. + const Option *O = Opts.getOption(options::OPT_mmacosx_version_min_EQ); + OSXVersion = DAL->MakeJoinedArg(0, O, MacosxVersionMin); + DAL->append(OSXVersion); } } @@ -476,7 +496,8 @@ DerivedArgList *Darwin::TranslateArgs(InputArgList &Args, } setTarget(iPhoneVersion, Major, Minor, Micro); - for (ArgList::iterator it = Args.begin(), ie = Args.end(); it != ie; ++it) { + for (ArgList::const_iterator it = Args.begin(), + ie = Args.end(); it != ie; ++it) { Arg *A = *it; if (A->getOption().matches(options::OPT_Xarch__)) { @@ -484,9 +505,8 @@ DerivedArgList *Darwin::TranslateArgs(InputArgList &Args, if (getArchName() != A->getValue(Args, 0)) continue; - // FIXME: The arg is leaked here, and we should have a nicer - // interface for this. - unsigned Prev, Index = Prev = A->getIndex() + 1; + unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(Args, 1)); + unsigned Prev = Index; Arg *XarchArg = Opts.ParseOneArg(Args, Index); // If the argument parsing failed or more than one argument was @@ -506,6 +526,8 @@ DerivedArgList *Darwin::TranslateArgs(InputArgList &Args, XarchArg->setBaseArg(A); A = XarchArg; + + DAL->AddSynthesizedArg(A); } // Sob. These is strictly gcc compatible for the time being. Apple @@ -519,66 +541,61 @@ DerivedArgList *Darwin::TranslateArgs(InputArgList &Args, case options::OPT_mkernel: case options::OPT_fapple_kext: DAL->append(A); - DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_static))); - DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_static))); + DAL->AddFlagArg(A, Opts.getOption(options::OPT_static)); + DAL->AddFlagArg(A, Opts.getOption(options::OPT_static)); break; case options::OPT_dependency_file: - DAL->append(DAL->MakeSeparateArg(A, Opts.getOption(options::OPT_MF), - A->getValue(Args))); + DAL->AddSeparateArg(A, Opts.getOption(options::OPT_MF), + A->getValue(Args)); break; case options::OPT_gfull: - DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_g_Flag))); - DAL->append(DAL->MakeFlagArg(A, - Opts.getOption(options::OPT_fno_eliminate_unused_debug_symbols))); + DAL->AddFlagArg(A, Opts.getOption(options::OPT_g_Flag)); + DAL->AddFlagArg(A, + Opts.getOption(options::OPT_fno_eliminate_unused_debug_symbols)); break; case options::OPT_gused: - DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_g_Flag))); - DAL->append(DAL->MakeFlagArg(A, - Opts.getOption(options::OPT_feliminate_unused_debug_symbols))); + DAL->AddFlagArg(A, Opts.getOption(options::OPT_g_Flag)); + DAL->AddFlagArg(A, + Opts.getOption(options::OPT_feliminate_unused_debug_symbols)); break; case options::OPT_fterminated_vtables: case options::OPT_findirect_virtual_calls: - DAL->append(DAL->MakeFlagArg(A, - Opts.getOption(options::OPT_fapple_kext))); - DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_static))); + DAL->AddFlagArg(A, Opts.getOption(options::OPT_fapple_kext)); + DAL->AddFlagArg(A, Opts.getOption(options::OPT_static)); break; case options::OPT_shared: - DAL->append(DAL->MakeFlagArg(A, Opts.getOption(options::OPT_dynamiclib))); + DAL->AddFlagArg(A, Opts.getOption(options::OPT_dynamiclib)); break; case options::OPT_fconstant_cfstrings: - DAL->append(DAL->MakeFlagArg(A, - Opts.getOption(options::OPT_mconstant_cfstrings))); + DAL->AddFlagArg(A, Opts.getOption(options::OPT_mconstant_cfstrings)); break; case options::OPT_fno_constant_cfstrings: - DAL->append(DAL->MakeFlagArg(A, - Opts.getOption(options::OPT_mno_constant_cfstrings))); + DAL->AddFlagArg(A, Opts.getOption(options::OPT_mno_constant_cfstrings)); break; case options::OPT_Wnonportable_cfstrings: - DAL->append(DAL->MakeFlagArg(A, - Opts.getOption(options::OPT_mwarn_nonportable_cfstrings))); + DAL->AddFlagArg(A, + Opts.getOption(options::OPT_mwarn_nonportable_cfstrings)); break; case options::OPT_Wno_nonportable_cfstrings: - DAL->append(DAL->MakeFlagArg(A, - Opts.getOption(options::OPT_mno_warn_nonportable_cfstrings))); + DAL->AddFlagArg(A, + Opts.getOption(options::OPT_mno_warn_nonportable_cfstrings)); break; case options::OPT_fpascal_strings: - DAL->append(DAL->MakeFlagArg(A, - Opts.getOption(options::OPT_mpascal_strings))); + DAL->AddFlagArg(A, Opts.getOption(options::OPT_mpascal_strings)); break; case options::OPT_fno_pascal_strings: - DAL->append(DAL->MakeFlagArg(A, - Opts.getOption(options::OPT_mno_pascal_strings))); + DAL->AddFlagArg(A, Opts.getOption(options::OPT_mno_pascal_strings)); break; } } @@ -586,8 +603,7 @@ DerivedArgList *Darwin::TranslateArgs(InputArgList &Args, if (getTriple().getArch() == llvm::Triple::x86 || getTriple().getArch() == llvm::Triple::x86_64) if (!Args.hasArgNoClaim(options::OPT_mtune_EQ)) - DAL->append(DAL->MakeJoinedArg(0, Opts.getOption(options::OPT_mtune_EQ), - "core2")); + DAL->AddJoinedArg(0, Opts.getOption(options::OPT_mtune_EQ), "core2"); // Add the arch options based on the particular spelling of -arch, to match // how the driver driver works. @@ -601,57 +617,57 @@ DerivedArgList *Darwin::TranslateArgs(InputArgList &Args, if (Name == "ppc") ; else if (Name == "ppc601") - DAL->append(DAL->MakeJoinedArg(0, MCpu, "601")); + DAL->AddJoinedArg(0, MCpu, "601"); else if (Name == "ppc603") - DAL->append(DAL->MakeJoinedArg(0, MCpu, "603")); + DAL->AddJoinedArg(0, MCpu, "603"); else if (Name == "ppc604") - DAL->append(DAL->MakeJoinedArg(0, MCpu, "604")); + DAL->AddJoinedArg(0, MCpu, "604"); else if (Name == "ppc604e") - DAL->append(DAL->MakeJoinedArg(0, MCpu, "604e")); + DAL->AddJoinedArg(0, MCpu, "604e"); else if (Name == "ppc750") - DAL->append(DAL->MakeJoinedArg(0, MCpu, "750")); + DAL->AddJoinedArg(0, MCpu, "750"); else if (Name == "ppc7400") - DAL->append(DAL->MakeJoinedArg(0, MCpu, "7400")); + DAL->AddJoinedArg(0, MCpu, "7400"); else if (Name == "ppc7450") - DAL->append(DAL->MakeJoinedArg(0, MCpu, "7450")); + DAL->AddJoinedArg(0, MCpu, "7450"); else if (Name == "ppc970") - DAL->append(DAL->MakeJoinedArg(0, MCpu, "970")); + DAL->AddJoinedArg(0, MCpu, "970"); else if (Name == "ppc64") - DAL->append(DAL->MakeFlagArg(0, Opts.getOption(options::OPT_m64))); + DAL->AddFlagArg(0, Opts.getOption(options::OPT_m64)); else if (Name == "i386") ; else if (Name == "i486") - DAL->append(DAL->MakeJoinedArg(0, MArch, "i486")); + DAL->AddJoinedArg(0, MArch, "i486"); else if (Name == "i586") - DAL->append(DAL->MakeJoinedArg(0, MArch, "i586")); + DAL->AddJoinedArg(0, MArch, "i586"); else if (Name == "i686") - DAL->append(DAL->MakeJoinedArg(0, MArch, "i686")); + DAL->AddJoinedArg(0, MArch, "i686"); else if (Name == "pentium") - DAL->append(DAL->MakeJoinedArg(0, MArch, "pentium")); + DAL->AddJoinedArg(0, MArch, "pentium"); else if (Name == "pentium2") - DAL->append(DAL->MakeJoinedArg(0, MArch, "pentium2")); + DAL->AddJoinedArg(0, MArch, "pentium2"); else if (Name == "pentpro") - DAL->append(DAL->MakeJoinedArg(0, MArch, "pentiumpro")); + DAL->AddJoinedArg(0, MArch, "pentiumpro"); else if (Name == "pentIIm3") - DAL->append(DAL->MakeJoinedArg(0, MArch, "pentium2")); + DAL->AddJoinedArg(0, MArch, "pentium2"); else if (Name == "x86_64") - DAL->append(DAL->MakeFlagArg(0, Opts.getOption(options::OPT_m64))); + DAL->AddFlagArg(0, Opts.getOption(options::OPT_m64)); else if (Name == "arm") - DAL->append(DAL->MakeJoinedArg(0, MArch, "armv4t")); + DAL->AddJoinedArg(0, MArch, "armv4t"); else if (Name == "armv4t") - DAL->append(DAL->MakeJoinedArg(0, MArch, "armv4t")); + DAL->AddJoinedArg(0, MArch, "armv4t"); else if (Name == "armv5") - DAL->append(DAL->MakeJoinedArg(0, MArch, "armv5tej")); + DAL->AddJoinedArg(0, MArch, "armv5tej"); else if (Name == "xscale") - DAL->append(DAL->MakeJoinedArg(0, MArch, "xscale")); + DAL->AddJoinedArg(0, MArch, "xscale"); else if (Name == "armv6") - DAL->append(DAL->MakeJoinedArg(0, MArch, "armv6k")); + DAL->AddJoinedArg(0, MArch, "armv6k"); else if (Name == "armv7") - DAL->append(DAL->MakeJoinedArg(0, MArch, "armv7a")); + DAL->AddJoinedArg(0, MArch, "armv7a"); else llvm_unreachable("invalid Darwin arch"); @@ -740,6 +756,8 @@ Tool &Generic_GCC::SelectTool(const Compilation &C, // driver is Darwin. case Action::LipoJobClass: T = new tools::darwin::Lipo(*this); break; + case Action::DsymutilJobClass: + T = new tools::darwin::Dsymutil(*this); break; } } @@ -760,12 +778,6 @@ const char *Generic_GCC::GetForcedPicModel() const { return 0; } -DerivedArgList *Generic_GCC::TranslateArgs(InputArgList &Args, - const char *BoundArch) const { - return new DerivedArgList(Args, true); -} - - /// TCEToolChain - A tool chain using the llvm bitcode tools to perform /// all subcommands. See http://tce.cs.tut.fi for our peculiar target. /// Currently does not support anything else but compilation. @@ -820,11 +832,6 @@ Tool &TCEToolChain::SelectTool(const Compilation &C, return *T; } -DerivedArgList *TCEToolChain::TranslateArgs(InputArgList &Args, - const char *BoundArch) const { - return new DerivedArgList(Args, true); -} - /// OpenBSD - OpenBSD tool chain which can call as(1) and ld(1) directly. OpenBSD::OpenBSD(const HostInfo &Host, const llvm::Triple& Triple) @@ -859,6 +866,8 @@ Tool &OpenBSD::SelectTool(const Compilation &C, const JobAction &JA) const { FreeBSD::FreeBSD(const HostInfo &Host, const llvm::Triple& Triple, bool Lib32) : Generic_GCC(Host, Triple) { + getProgramPaths().push_back(getDriver().Dir + "/../libexec"); + getProgramPaths().push_back("/usr/libexec"); if (Lib32) { getFilePaths().push_back(getDriver().Dir + "/../lib32"); getFilePaths().push_back("/usr/lib32"); @@ -890,6 +899,38 @@ Tool &FreeBSD::SelectTool(const Compilation &C, const JobAction &JA) const { return *T; } +/// Minix - Minix tool chain which can call as(1) and ld(1) directly. + +Minix::Minix(const HostInfo &Host, const llvm::Triple& Triple) + : Generic_GCC(Host, Triple) { + getFilePaths().push_back(getDriver().Dir + "/../lib"); + getFilePaths().push_back("/usr/lib"); + getFilePaths().push_back("/usr/gnu/lib"); + getFilePaths().push_back("/usr/gnu/lib/gcc/i686-pc-minix/4.4.3"); +} + +Tool &Minix::SelectTool(const Compilation &C, const JobAction &JA) const { + Action::ActionClass Key; + if (getDriver().ShouldUseClangCompiler(C, JA, getTriple())) + Key = Action::AnalyzeJobClass; + else + Key = JA.getKind(); + + Tool *&T = Tools[Key]; + if (!T) { + switch (Key) { + case Action::AssembleJobClass: + T = new tools::minix::Assemble(*this); break; + case Action::LinkJobClass: + T = new tools::minix::Link(*this); break; + default: + T = &Generic_GCC::SelectTool(C, JA); + } + } + + return *T; +} + /// AuroraUX - AuroraUX tool chain which can call as(1) and ld(1) directly. AuroraUX::AuroraUX(const HostInfo &Host, const llvm::Triple& Triple) diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.h b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h index ad975bf..4bdd00f 100644 --- a/contrib/llvm/tools/clang/lib/Driver/ToolChains.h +++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h @@ -33,9 +33,6 @@ public: Generic_GCC(const HostInfo &Host, const llvm::Triple& Triple); ~Generic_GCC(); - virtual DerivedArgList *TranslateArgs(InputArgList &Args, - const char *BoundArch) const; - virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const; virtual bool IsUnwindTablesDefault() const; @@ -147,7 +144,7 @@ public: /// @name ToolChain Implementation /// { - virtual DerivedArgList *TranslateArgs(InputArgList &Args, + virtual DerivedArgList *TranslateArgs(const DerivedArgList &Args, const char *BoundArch) const; virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const; @@ -160,9 +157,13 @@ public: return !isMacosxVersionLT(10, 6); } virtual bool IsIntegratedAssemblerDefault() const { +#ifdef DISABLE_DEFAULT_INTEGRATED_ASSEMBLER + return false; +#else // Default integrated assembler to on for x86. return (getTriple().getArch() == llvm::Triple::x86 || getTriple().getArch() == llvm::Triple::x86_64); +#endif } virtual bool IsObjCNonFragileABIDefault() const { // Non-fragile ABI is default for everything but i386. @@ -270,6 +271,13 @@ public: virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const; }; +class LLVM_LIBRARY_VISIBILITY Minix : public Generic_GCC { +public: + Minix(const HostInfo &Host, const llvm::Triple& Triple); + + virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const; +}; + class LLVM_LIBRARY_VISIBILITY DragonFly : public Generic_GCC { public: DragonFly(const HostInfo &Host, const llvm::Triple& Triple); @@ -290,8 +298,6 @@ public: TCEToolChain(const HostInfo &Host, const llvm::Triple& Triple); ~TCEToolChain(); - virtual DerivedArgList *TranslateArgs(InputArgList &Args, - const char *BoundArch) const; virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const; bool IsMathErrnoDefault() const; bool IsUnwindTablesDefault() const; diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.cpp b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp index ce35552..c2cb1fb 100644 --- a/contrib/llvm/tools/clang/lib/Driver/Tools.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp @@ -157,18 +157,18 @@ void Clang::AddPreprocessingOptions(const Driver &D, for (arg_iterator it = Args.filtered_begin(options::OPT_MT, options::OPT_MQ), ie = Args.filtered_end(); it != ie; ++it) { + const Arg *A = *it; + A->claim(); - it->claim(); - - if (it->getOption().matches(options::OPT_MQ)) { + if (A->getOption().matches(options::OPT_MQ)) { CmdArgs.push_back("-MT"); llvm::SmallString<128> Quoted; - QuoteTarget(it->getValue(Args), Quoted); + QuoteTarget(A->getValue(Args), Quoted); CmdArgs.push_back(Args.MakeArgString(Quoted)); // -MT flag - no change } else { - it->render(Args, CmdArgs); + A->render(Args, CmdArgs); } } @@ -252,54 +252,59 @@ void Clang::AddPreprocessingOptions(const Driver &D, /// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targetting. // // FIXME: tblgen this. -static const char *getARMTargetCPU(const ArgList &Args) { +static const char *getARMTargetCPU(const ArgList &Args, + const llvm::Triple &Triple) { // FIXME: Warn on inconsistent use of -mcpu and -march. // If we have -mcpu=, use that. if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) return A->getValue(Args); - // Otherwise, if we have -march= choose the base CPU for that arch. + llvm::StringRef MArch; if (Arg *A = Args.getLastArg(options::OPT_march_EQ)) { - llvm::StringRef MArch = A->getValue(Args); - - if (MArch == "armv2" || MArch == "armv2a") - return "arm2"; - if (MArch == "armv3") - return "arm6"; - if (MArch == "armv3m") - return "arm7m"; - if (MArch == "armv4" || MArch == "armv4t") - return "arm7tdmi"; - if (MArch == "armv5" || MArch == "armv5t") - return "arm10tdmi"; - if (MArch == "armv5e" || MArch == "armv5te") - return "arm1026ejs"; - if (MArch == "armv5tej") - return "arm926ej-s"; - if (MArch == "armv6" || MArch == "armv6k") - return "arm1136jf-s"; - if (MArch == "armv6j") - return "arm1136j-s"; - if (MArch == "armv6z" || MArch == "armv6zk") - return "arm1176jzf-s"; - if (MArch == "armv6t2") - return "arm1156t2-s"; - if (MArch == "armv7" || MArch == "armv7a" || MArch == "armv7-a") - return "cortex-a8"; - if (MArch == "armv7r" || MArch == "armv7-r") - return "cortex-r4"; - if (MArch == "armv7m" || MArch == "armv7-m") - return "cortex-m3"; - if (MArch == "ep9312") - return "ep9312"; - if (MArch == "iwmmxt") - return "iwmmxt"; - if (MArch == "xscale") - return "xscale"; - } - - // Otherwise return the most base CPU LLVM supports. + // Otherwise, if we have -march= choose the base CPU for that arch. + MArch = A->getValue(Args); + } else { + // Otherwise, use the Arch from the triple. + MArch = Triple.getArchName(); + } + + if (MArch == "armv2" || MArch == "armv2a") + return "arm2"; + if (MArch == "armv3") + return "arm6"; + if (MArch == "armv3m") + return "arm7m"; + if (MArch == "armv4" || MArch == "armv4t") + return "arm7tdmi"; + if (MArch == "armv5" || MArch == "armv5t") + return "arm10tdmi"; + if (MArch == "armv5e" || MArch == "armv5te") + return "arm1026ejs"; + if (MArch == "armv5tej") + return "arm926ej-s"; + if (MArch == "armv6" || MArch == "armv6k") + return "arm1136jf-s"; + if (MArch == "armv6j") + return "arm1136j-s"; + if (MArch == "armv6z" || MArch == "armv6zk") + return "arm1176jzf-s"; + if (MArch == "armv6t2") + return "arm1156t2-s"; + if (MArch == "armv7" || MArch == "armv7a" || MArch == "armv7-a") + return "cortex-a8"; + if (MArch == "armv7r" || MArch == "armv7-r") + return "cortex-r4"; + if (MArch == "armv7m" || MArch == "armv7-m") + return "cortex-m3"; + if (MArch == "ep9312") + return "ep9312"; + if (MArch == "iwmmxt") + return "iwmmxt"; + if (MArch == "xscale") + return "xscale"; + + // If all else failed, return the most base CPU LLVM supports. return "arm7tdmi"; } @@ -352,7 +357,8 @@ static std::string getLLVMTriple(const ToolChain &TC, const ArgList &Args) { // Thumb2 is the default for V7 on Darwin. // // FIXME: Thumb should just be another -target-feaure, not in the triple. - llvm::StringRef Suffix = getLLVMArchSuffixForARM(getARMTargetCPU(Args)); + llvm::StringRef Suffix = + getLLVMArchSuffixForARM(getARMTargetCPU(Args, Triple)); bool ThumbDefault = (Suffix == "v7" && TC.getTriple().getOS() == llvm::Triple::Darwin); std::string ArchName = "arm"; @@ -385,6 +391,7 @@ static bool isSignedCharDefault(const llvm::Triple &Triple) { void Clang::AddARMTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const { const Driver &D = getToolChain().getDriver(); + llvm::Triple Triple = getToolChain().getTriple(); // Select the ABI to use. // @@ -394,27 +401,20 @@ void Clang::AddARMTargetArgs(const ArgList &Args, ABIName = A->getValue(Args); } else { // Select the default based on the platform. - switch (getToolChain().getTriple().getOS()) { - // FIXME: Is this right for non-Darwin and non-Linux? - default: + llvm::StringRef env = Triple.getEnvironmentName(); + if (env == "gnueabi") + ABIName = "aapcs-linux"; + else if (env == "eabi") ABIName = "aapcs"; - break; - - case llvm::Triple::Darwin: + else ABIName = "apcs-gnu"; - break; - - case llvm::Triple::Linux: - ABIName = "aapcs-linux"; - break; - } } CmdArgs.push_back("-target-abi"); CmdArgs.push_back(ABIName); // Set the CPU based on -march= and -mcpu=. CmdArgs.push_back("-target-cpu"); - CmdArgs.push_back(getARMTargetCPU(Args)); + CmdArgs.push_back(getARMTargetCPU(Args, Triple)); // Select the float ABI as determined by -msoft-float, -mhard-float, and // -mfloat-abi=. @@ -438,14 +438,14 @@ void Clang::AddARMTargetArgs(const ArgList &Args, // If unspecified, choose the default based on the platform. if (FloatABI.empty()) { - // FIXME: This is wrong for non-Darwin, we don't have a mechanism yet for - // distinguishing things like linux-eabi vs linux-elf. - switch (getToolChain().getTriple().getOS()) { + const llvm::Triple &Triple = getToolChain().getTriple(); + switch (Triple.getOS()) { case llvm::Triple::Darwin: { // Darwin defaults to "softfp" for v6 and v7. // // FIXME: Factor out an ARM class so we can cache the arch somewhere. - llvm::StringRef ArchName = getLLVMArchSuffixForARM(getARMTargetCPU(Args)); + llvm::StringRef ArchName = + getLLVMArchSuffixForARM(getARMTargetCPU(Args, Triple)); if (ArchName.startswith("v6") || ArchName.startswith("v7")) FloatABI = "softfp"; else @@ -453,6 +453,15 @@ void Clang::AddARMTargetArgs(const ArgList &Args, break; } + case llvm::Triple::Linux: { + llvm::StringRef Env = getToolChain().getTriple().getEnvironmentName(); + if (Env == "gnueabi") { + FloatABI = "softfp"; + break; + } + } + // fall through + default: // Assume "soft", but warn the user we are guessing. FloatABI = "soft"; @@ -639,8 +648,8 @@ void Clang::AddX86TargetArgs(const ArgList &Args, for (arg_iterator it = Args.filtered_begin(options::OPT_m_x86_Features_Group), ie = Args.filtered_end(); it != ie; ++it) { - llvm::StringRef Name = it->getOption().getName(); - it->claim(); + llvm::StringRef Name = (*it)->getOption().getName(); + (*it)->claim(); // Skip over "-m". assert(Name.startswith("-m") && "Invalid feature name."); @@ -792,9 +801,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, if (JA.getType() == types::TY_Nothing) { CmdArgs.push_back("-fsyntax-only"); - } else if (JA.getType() == types::TY_LLVMAsm) { + } else if (JA.getType() == types::TY_LLVM_IR || + JA.getType() == types::TY_LTO_IR) { CmdArgs.push_back("-emit-llvm"); - } else if (JA.getType() == types::TY_LLVMBC) { + } else if (JA.getType() == types::TY_LLVM_BC || + JA.getType() == types::TY_LTO_BC) { CmdArgs.push_back("-emit-llvm-bc"); } else if (JA.getType() == types::TY_PP_Asm) { CmdArgs.push_back("-S"); @@ -988,6 +999,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, break; } + // -mno-omit-leaf-frame-pointer is default. + if (Args.hasFlag(options::OPT_momit_leaf_frame_pointer, + options::OPT_mno_omit_leaf_frame_pointer, false)) + CmdArgs.push_back("-momit-leaf-frame-pointer"); + // -fno-math-errno is default. if (Args.hasFlag(options::OPT_fmath_errno, options::OPT_fno_math_errno, @@ -1026,6 +1042,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Args.AddAllArgs(CmdArgs, options::OPT_ffunction_sections); Args.AddAllArgs(CmdArgs, options::OPT_fdata_sections); + Args.AddAllArgs(CmdArgs, options::OPT_finstrument_functions); + Args.AddLastArg(CmdArgs, options::OPT_nostdinc); Args.AddLastArg(CmdArgs, options::OPT_nostdincxx); Args.AddLastArg(CmdArgs, options::OPT_nobuiltininc); @@ -1072,8 +1090,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, else Std->render(Args, CmdArgs); - if (Arg *A = Args.getLastArg(options::OPT_trigraphs)) - if (A->getIndex() > Std->getIndex()) + if (Arg *A = Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi, + options::OPT_trigraphs)) + if (A != Std) A->render(Args, CmdArgs); } else { // Honor -std-default. @@ -1146,6 +1165,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back(A->getValue(Args)); } + Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden); + // -fhosted is default. if (KernelOrKext || Args.hasFlag(options::OPT_ffreestanding, options::OPT_fhosted, @@ -1178,12 +1199,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, } } - Args.AddLastArg(CmdArgs, options::OPT_fno_caret_diagnostics); Args.AddLastArg(CmdArgs, options::OPT_fno_show_column); Args.AddLastArg(CmdArgs, options::OPT_fobjc_sender_dependent_dispatch); Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_print_source_range_info); Args.AddLastArg(CmdArgs, options::OPT_ftime_report); Args.AddLastArg(CmdArgs, options::OPT_ftrapv); + Args.AddLastArg(CmdArgs, options::OPT_fwrapv); Args.AddLastArg(CmdArgs, options::OPT_fwritable_strings); Args.AddLastArg(CmdArgs, options::OPT_pthread); @@ -1347,6 +1368,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, D.Diag(clang::diag::warn_drv_clang_unsupported) << Args.getLastArg(options::OPT_funsigned_bitfields)->getAsString(Args); + // -fcaret-diagnostics is default. + if (!Args.hasFlag(options::OPT_fcaret_diagnostics, + options::OPT_fno_caret_diagnostics, true)) + CmdArgs.push_back("-fno-caret-diagnostics"); + // -fdiagnostics-fixit-info is default, only pass non-default. if (!Args.hasFlag(options::OPT_fdiagnostics_fixit_info, options::OPT_fno_diagnostics_fixit_info)) @@ -1376,6 +1402,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, options::OPT_fno_show_source_location)) CmdArgs.push_back("-fno-show-source-location"); + if (!Args.hasFlag(options::OPT_fspell_checking, + options::OPT_fno_spell_checking)) + CmdArgs.push_back("-fno-spell-checking"); + + if (Arg *A = Args.getLastArg(options::OPT_fshow_overloads_EQ)) + A->render(Args, CmdArgs); + // -fdollars-in-identifiers default varies depending on platform and // language; only pass if specified. if (Arg *A = Args.getLastArg(options::OPT_fdollars_in_identifiers, @@ -1420,14 +1453,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Args.AddAllArgValues(CmdArgs, options::OPT_Xclang); for (arg_iterator it = Args.filtered_begin(options::OPT_mllvm), ie = Args.filtered_end(); it != ie; ++it) { - it->claim(); + (*it)->claim(); // We translate this by hand to the -cc1 argument, since nightly test uses // it and developers have been trained to spell it with -mllvm. - if (llvm::StringRef(it->getValue(Args, 0)) == "-disable-llvm-optzns") + if (llvm::StringRef((*it)->getValue(Args, 0)) == "-disable-llvm-optzns") CmdArgs.push_back("-disable-llvm-optzns"); else - it->render(Args, CmdArgs); + (*it)->render(Args, CmdArgs); } if (Output.getType() == types::TY_Dependencies) { @@ -1457,30 +1490,34 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, Args.AddAllArgs(CmdArgs, options::OPT_undef); - const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, "clang")); + std::string Exec = getToolChain().getDriver().getClangProgramPath(); // Optionally embed the -cc1 level arguments into the debug info, for build // analysis. if (getToolChain().UseDwarfDebugFlags()) { + ArgStringList OriginalArgs; + for (ArgList::const_iterator it = Args.begin(), + ie = Args.end(); it != ie; ++it) + (*it)->render(Args, OriginalArgs); + llvm::SmallString<256> Flags; Flags += Exec; - for (unsigned i = 0, e = CmdArgs.size(); i != e; ++i) { + for (unsigned i = 0, e = OriginalArgs.size(); i != e; ++i) { Flags += " "; - Flags += CmdArgs[i]; + Flags += OriginalArgs[i]; } CmdArgs.push_back("-dwarf-debug-flags"); CmdArgs.push_back(Args.MakeArgString(Flags.str())); } - Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); + Dest.addCommand(new Command(JA, *this, Exec.c_str(), CmdArgs)); // Explicitly warn that these options are unsupported, even though // we are allowing compilation to continue. for (arg_iterator it = Args.filtered_begin(options::OPT_pg), ie = Args.filtered_end(); it != ie; ++it) { - it->claim(); - D.Diag(clang::diag::warn_drv_clang_unsupported) << it->getAsString(Args); + (*it)->claim(); + D.Diag(clang::diag::warn_drv_clang_unsupported) << (*it)->getAsString(Args); } // Claim some arguments which clang supports automatically. @@ -1530,7 +1567,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA, if (Args.hasFlag(options::OPT_mrelax_all, options::OPT_mno_relax_all, !IsOpt)) - CmdArgs.push_back("-mrelax-all"); + CmdArgs.push_back("-relax-all"); // FIXME: Add -force_cpusubtype_ALL support, once we have it. @@ -1552,9 +1589,8 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back(Input.getFilename()); } - const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, "clang")); - Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); + std::string Exec = getToolChain().getDriver().getClangProgramPath(); + Dest.addCommand(new Command(JA, *this, Exec.c_str(), CmdArgs)); } void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA, @@ -1630,7 +1666,8 @@ void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &II = *it; // Don't try to pass LLVM or AST inputs to a generic gcc. - if (II.getType() == types::TY_LLVMBC) + if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR || + II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC) D.Diag(clang::diag::err_drv_no_linker_llvm_support) << getToolChain().getTripleString(); else if (II.getType() == types::TY_AST) @@ -1653,7 +1690,7 @@ void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA, const char *GCCName = getToolChain().getDriver().CCCGenericGCCName.c_str(); const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, GCCName)); + Args.MakeArgString(getToolChain().GetProgramPath(GCCName)); Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); } @@ -1672,7 +1709,8 @@ void gcc::Compile::RenderExtraToolArgs(const JobAction &JA, const Driver &D = getToolChain().getDriver(); // If -flto, etc. are present then make sure not to force assembly output. - if (JA.getType() == types::TY_LLVMBC) + if (JA.getType() == types::TY_LLVM_IR || JA.getType() == types::TY_LTO_IR || + JA.getType() == types::TY_LLVM_BC || JA.getType() == types::TY_LTO_BC) CmdArgs.push_back("-c"); else { if (JA.getType() != types::TY_PP_Asm) @@ -1845,10 +1883,10 @@ void darwin::CC1::AddCC1OptionsArgs(const ArgList &Args, ArgStringList &CmdArgs, for (arg_iterator it = Args.filtered_begin(options::OPT_f_Group, options::OPT_fsyntax_only), ie = Args.filtered_end(); it != ie; ++it) { - if (!it->getOption().matches(options::OPT_fbuiltin_strcat) && - !it->getOption().matches(options::OPT_fbuiltin_strcpy)) { - it->claim(); - it->render(Args, CmdArgs); + if (!(*it)->getOption().matches(options::OPT_fbuiltin_strcat) && + !(*it)->getOption().matches(options::OPT_fbuiltin_strcpy)) { + (*it)->claim(); + (*it)->render(Args, CmdArgs); } } } else @@ -2059,7 +2097,7 @@ void darwin::Preprocess::ConstructJob(Compilation &C, const JobAction &JA, const char *CC1Name = getCC1Name(Inputs[0].getType()); const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, CC1Name)); + Args.MakeArgString(getToolChain().GetProgramPath(CC1Name)); Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); } @@ -2079,9 +2117,11 @@ void darwin::Compile::ConstructJob(Compilation &C, const JobAction &JA, D.Diag(clang::diag::err_drv_argument_only_allowed_with) << A->getAsString(Args) << "-E"; - if (Output.getType() == types::TY_LLVMAsm) + if (JA.getType() == types::TY_LLVM_IR || + JA.getType() == types::TY_LTO_IR) CmdArgs.push_back("-emit-llvm"); - else if (Output.getType() == types::TY_LLVMBC) + else if (JA.getType() == types::TY_LLVM_BC || + JA.getType() == types::TY_LTO_BC) CmdArgs.push_back("-emit-llvm-bc"); else if (Output.getType() == types::TY_AST) D.Diag(clang::diag::err_drv_no_ast_support) @@ -2157,7 +2197,7 @@ void darwin::Compile::ConstructJob(Compilation &C, const JobAction &JA, const char *CC1Name = getCC1Name(Inputs[0].getType()); const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, CC1Name)); + Args.MakeArgString(getToolChain().GetProgramPath(CC1Name)); Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); } @@ -2212,30 +2252,10 @@ void darwin::Assemble::ConstructJob(Compilation &C, const JobAction &JA, // asm_final spec is empty. const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, "as")); + Args.MakeArgString(getToolChain().GetProgramPath("as")); Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); } -/// Helper routine for seeing if we should use dsymutil; this is a -/// gcc compatible hack, we should remove it and use the input -/// type information. -static bool isSourceSuffix(const char *Str) { - // match: 'C', 'CPP', 'c', 'cc', 'cp', 'c++', 'cpp', 'cxx', 'm', - // 'mm'. - return llvm::StringSwitch<bool>(Str) - .Case("C", true) - .Case("c", true) - .Case("m", true) - .Case("cc", true) - .Case("cp", true) - .Case("mm", true) - .Case("CPP", true) - .Case("c++", true) - .Case("cpp", true) - .Case("cxx", true) - .Default(false); -} - void darwin::DarwinTool::AddDarwinArch(const ArgList &Args, ArgStringList &CmdArgs) const { llvm::StringRef ArchName = getDarwinToolChain().getDarwinArchName(Args); @@ -2335,8 +2355,15 @@ void darwin::Link::AddLinkArgs(const ArgList &Args, Args.AddAllArgs(CmdArgs, options::OPT_multiply__defined); Args.AddAllArgs(CmdArgs, options::OPT_multiply__defined__unused); - if (Args.hasArg(options::OPT_fpie)) - CmdArgs.push_back("-pie"); + if (const Arg *A = Args.getLastArg(options::OPT_fpie, options::OPT_fPIE, + options::OPT_fno_pie, + options::OPT_fno_PIE)) { + if (A->getOption().matches(options::OPT_fpie) || + A->getOption().matches(options::OPT_fPIE)) + CmdArgs.push_back("-pie"); + else + CmdArgs.push_back("-no_pie"); + } Args.AddLastArg(CmdArgs, options::OPT_prebind); Args.AddLastArg(CmdArgs, options::OPT_noprebind); @@ -2484,7 +2511,7 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA, Args.hasArg(options::OPT_shared_libgcc) && getDarwinToolChain().isMacosxVersionLT(10, 5)) { const char *Str = - Args.MakeArgString(getToolChain().GetFilePath(C, "crt3.o")); + Args.MakeArgString(getToolChain().GetFilePath("crt3.o")); CmdArgs.push_back(Str); } } @@ -2544,40 +2571,8 @@ void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA, Args.AddAllArgs(CmdArgs, options::OPT_F); const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, "ld")); + Args.MakeArgString(getToolChain().GetProgramPath("ld")); Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); - - // Find the first non-empty base input (we want to ignore linker - // inputs). - const char *BaseInput = ""; - for (unsigned i = 0, e = Inputs.size(); i != e; ++i) { - if (Inputs[i].getBaseInput()[0] != '\0') { - BaseInput = Inputs[i].getBaseInput(); - break; - } - } - - // Run dsymutil if we are making an executable in a single step. - // - // FIXME: Currently we don't want to do this when we are part of a - // universal build step, as this would end up creating stray temp - // files. - if (!LinkingOutput && - Args.getLastArg(options::OPT_g_Group) && - !Args.getLastArg(options::OPT_gstabs) && - !Args.getLastArg(options::OPT_g0)) { - // FIXME: This is gross, but matches gcc. The test only considers - // the suffix (not the -x type), and then only of the first - // source input. Awesome. - const char *Suffix = strrchr(BaseInput, '.'); - if (Suffix && isSourceSuffix(Suffix + 1)) { - const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, "dsymutil")); - ArgStringList CmdArgs; - CmdArgs.push_back(Output.getFilename()); - C.getJobs().addCommand(new Command(JA, *this, Exec, CmdArgs)); - } - } } void darwin::Lipo::ConstructJob(Compilation &C, const JobAction &JA, @@ -2600,7 +2595,27 @@ void darwin::Lipo::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back(II.getFilename()); } const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, "lipo")); + Args.MakeArgString(getToolChain().GetProgramPath("lipo")); + Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); +} + +void darwin::Dsymutil::ConstructJob(Compilation &C, const JobAction &JA, + Job &Dest, const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &Args, + const char *LinkingOutput) const { + ArgStringList CmdArgs; + + assert(Inputs.size() == 1 && "Unable to handle multiple inputs."); + const InputInfo &Input = Inputs[0]; + assert(Input.isFilename() && "Unexpected dsymutil input."); + CmdArgs.push_back(Input.getFilename()); + + CmdArgs.push_back("-o"); + CmdArgs.push_back(Output.getFilename()); + + const char *Exec = + Args.MakeArgString(getToolChain().GetProgramPath("dsymutil")); Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); } @@ -2630,7 +2645,7 @@ void auroraux::Assemble::ConstructJob(Compilation &C, const JobAction &JA, } const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, "gas")); + Args.MakeArgString(getToolChain().GetProgramPath("gas")); Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); } @@ -2675,13 +2690,18 @@ void auroraux::Link::ConstructJob(Compilation &C, const JobAction &JA, if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) { - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crt1.o"))); - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crti.o"))); - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbegin.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crt1.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crti.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crtbegin.o"))); } else { - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crti.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crti.o"))); } - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtn.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crtn.o"))); } CmdArgs.push_back(Args.MakeArgString("-L/opt/gcc4/lib/gcc/" @@ -2697,7 +2717,8 @@ void auroraux::Link::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &II = *it; // Don't try to pass LLVM inputs to a generic gcc. - if (II.getType() == types::TY_LLVMBC) + if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR || + II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC) D.Diag(clang::diag::err_drv_no_linker_llvm_support) << getToolChain().getTripleString(); @@ -2725,13 +2746,12 @@ void auroraux::Link::ConstructJob(Compilation &C, const JobAction &JA, if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtend.o"))); -// else -// CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtendS.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crtend.o"))); } const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, "ld")); + Args.MakeArgString(getToolChain().GetProgramPath("ld")); Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); } @@ -2761,7 +2781,7 @@ void openbsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA, } const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, "as")); + Args.MakeArgString(getToolChain().GetProgramPath("as")); Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); } @@ -2805,10 +2825,13 @@ void openbsd::Link::ConstructJob(Compilation &C, const JobAction &JA, if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) { - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crt0.o"))); - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbegin.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crt0.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crtbegin.o"))); } else { - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbeginS.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crtbeginS.o"))); } } @@ -2827,7 +2850,8 @@ void openbsd::Link::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &II = *it; // Don't try to pass LLVM inputs to a generic gcc. - if (II.getType() == types::TY_LLVMBC) + if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR || + II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC) D.Diag(clang::diag::err_drv_no_linker_llvm_support) << getToolChain().getTripleString(); @@ -2855,13 +2879,15 @@ void openbsd::Link::ConstructJob(Compilation &C, const JobAction &JA, if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtend.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crtend.o"))); else - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtendS.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crtendS.o"))); } const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, "ld")); + Args.MakeArgString(getToolChain().GetProgramPath("ld")); Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); } @@ -2903,7 +2929,7 @@ void freebsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA, } const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, "as")); + Args.MakeArgString(getToolChain().GetProgramPath("as")); Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); } @@ -2947,12 +2973,17 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA, if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) { - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crt1.o"))); - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crti.o"))); - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbegin.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crt1.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crti.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crtbegin.o"))); } else { - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crti.o"))); - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbeginS.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crti.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crtbeginS.o"))); } } @@ -2965,7 +2996,8 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &II = *it; // Don't try to pass LLVM inputs to a generic gcc. - if (II.getType() == types::TY_LLVMBC) + if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR || + II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC) D.Diag(clang::diag::err_drv_no_linker_llvm_support) << getToolChain().getTripleString(); @@ -3011,14 +3043,120 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA, if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtend.o"))); + CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath( + "crtend.o"))); else - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtendS.o"))); - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtn.o"))); + CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath( + "crtendS.o"))); + CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath( + "crtn.o"))); + } + + const char *Exec = + Args.MakeArgString(getToolChain().GetProgramPath("ld")); + Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); +} + +void minix::Assemble::ConstructJob(Compilation &C, const JobAction &JA, + Job &Dest, const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &Args, + const char *LinkingOutput) const { + ArgStringList CmdArgs; + + Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, + options::OPT_Xassembler); + + CmdArgs.push_back("-o"); + if (Output.isPipe()) + CmdArgs.push_back("-"); + else + CmdArgs.push_back(Output.getFilename()); + + for (InputInfoList::const_iterator + it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) { + const InputInfo &II = *it; + if (II.isPipe()) + CmdArgs.push_back("-"); + else + CmdArgs.push_back(II.getFilename()); + } + + const char *Exec = + Args.MakeArgString(getToolChain().GetProgramPath("gas")); + Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); +} + +void minix::Link::ConstructJob(Compilation &C, const JobAction &JA, + Job &Dest, const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &Args, + const char *LinkingOutput) const { + const Driver &D = getToolChain().getDriver(); + ArgStringList CmdArgs; + + if (Output.isPipe()) { + CmdArgs.push_back("-o"); + CmdArgs.push_back("-"); + } else if (Output.isFilename()) { + CmdArgs.push_back("-o"); + CmdArgs.push_back(Output.getFilename()); + } else { + assert(Output.isNothing() && "Invalid output."); + } + + if (!Args.hasArg(options::OPT_nostdlib) && + !Args.hasArg(options::OPT_nostartfiles)) + CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath( + "/usr/gnu/lib/crtso.o"))); + + Args.AddAllArgs(CmdArgs, options::OPT_L); + Args.AddAllArgs(CmdArgs, options::OPT_T_Group); + Args.AddAllArgs(CmdArgs, options::OPT_e); + + for (InputInfoList::const_iterator + it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) { + const InputInfo &II = *it; + + // Don't try to pass LLVM inputs to a generic gcc. + if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR || + II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC) + D.Diag(clang::diag::err_drv_no_linker_llvm_support) + << getToolChain().getTripleString(); + + if (II.isPipe()) + CmdArgs.push_back("-"); + else if (II.isFilename()) + CmdArgs.push_back(II.getFilename()); + else + II.getInputArg().renderAsInput(Args, CmdArgs); + } + + if (!Args.hasArg(options::OPT_nostdlib) && + !Args.hasArg(options::OPT_nodefaultlibs)) { + if (D.CCCIsCXX) { + CmdArgs.push_back("-lstdc++"); + CmdArgs.push_back("-lm"); + } + + if (Args.hasArg(options::OPT_pthread)) + CmdArgs.push_back("-lpthread"); + CmdArgs.push_back("-lc"); + CmdArgs.push_back("-lgcc"); + CmdArgs.push_back("-L/usr/gnu/lib"); + // FIXME: fill in the correct search path for the final + // support libraries. + CmdArgs.push_back("-L/usr/gnu/lib/gcc/i686-pc-minix/4.4.3"); + } + + if (!Args.hasArg(options::OPT_nostdlib) && + !Args.hasArg(options::OPT_nostartfiles)) { + CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath( + "/usr/gnu/lib/libend.a"))); } const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, "ld")); + Args.MakeArgString(getToolChain().GetProgramPath("/usr/gnu/bin/gld")); Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); } @@ -3057,7 +3195,7 @@ void dragonfly::Assemble::ConstructJob(Compilation &C, const JobAction &JA, } const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, "as")); + Args.MakeArgString(getToolChain().GetProgramPath("as")); Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); } @@ -3100,12 +3238,17 @@ void dragonfly::Link::ConstructJob(Compilation &C, const JobAction &JA, if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) { - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crt1.o"))); - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crti.o"))); - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbegin.o"))); + CmdArgs.push_back( + Args.MakeArgString(getToolChain().GetFilePath("crt1.o"))); + CmdArgs.push_back( + Args.MakeArgString(getToolChain().GetFilePath("crti.o"))); + CmdArgs.push_back( + Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o"))); } else { - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crti.o"))); - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtbeginS.o"))); + CmdArgs.push_back( + Args.MakeArgString(getToolChain().GetFilePath("crti.o"))); + CmdArgs.push_back( + Args.MakeArgString(getToolChain().GetFilePath("crtbeginS.o"))); } } @@ -3118,7 +3261,8 @@ void dragonfly::Link::ConstructJob(Compilation &C, const JobAction &JA, const InputInfo &II = *it; // Don't try to pass LLVM inputs to a generic gcc. - if (II.getType() == types::TY_LLVMBC) + if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR || + II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC) D.Diag(clang::diag::err_drv_no_linker_llvm_support) << getToolChain().getTripleString(); @@ -3174,13 +3318,16 @@ void dragonfly::Link::ConstructJob(Compilation &C, const JobAction &JA, if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_nostartfiles)) { if (!Args.hasArg(options::OPT_shared)) - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtend.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crtend.o"))); else - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtendS.o"))); - CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(C, "crtn.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crtendS.o"))); + CmdArgs.push_back(Args.MakeArgString( + getToolChain().GetFilePath("crtn.o"))); } const char *Exec = - Args.MakeArgString(getToolChain().GetProgramPath(C, "ld")); + Args.MakeArgString(getToolChain().GetProgramPath("ld")); Dest.addCommand(new Command(JA, *this, Exec, CmdArgs)); } diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.h b/contrib/llvm/tools/clang/lib/Driver/Tools.h index d5e98dd..2a18103 100644 --- a/contrib/llvm/tools/clang/lib/Driver/Tools.h +++ b/contrib/llvm/tools/clang/lib/Driver/Tools.h @@ -288,6 +288,23 @@ namespace darwin { const ArgList &TCArgs, const char *LinkingOutput) const; }; + + class LLVM_LIBRARY_VISIBILITY Dsymutil : public DarwinTool { + public: + Dsymutil(const ToolChain &TC) : DarwinTool("darwin::Dsymutil", + "dsymutil", TC) {} + + virtual bool acceptsPipedInput() const { return false; } + virtual bool canPipeOutput() const { return false; } + virtual bool hasIntegratedCPP() const { return false; } + + virtual void ConstructJob(Compilation &C, const JobAction &JA, + Job &Dest, + const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &TCArgs, + const char *LinkingOutput) const; + }; } /// openbsd -- Directly call GNU Binutils assembler and linker @@ -360,6 +377,41 @@ namespace freebsd { }; } // end namespace freebsd + /// minix -- Directly call GNU Binutils assembler and linker +namespace minix { + class LLVM_LIBRARY_VISIBILITY Assemble : public Tool { + public: + Assemble(const ToolChain &TC) : Tool("minix::Assemble", "assembler", + TC) {} + + virtual bool acceptsPipedInput() const { return true; } + virtual bool canPipeOutput() const { return true; } + virtual bool hasIntegratedCPP() const { return false; } + + virtual void ConstructJob(Compilation &C, const JobAction &JA, + Job &Dest, + const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &TCArgs, + const char *LinkingOutput) const; + }; + class LLVM_LIBRARY_VISIBILITY Link : public Tool { + public: + Link(const ToolChain &TC) : Tool("minix::Link", "linker", TC) {} + + virtual bool acceptsPipedInput() const { return true; } + virtual bool canPipeOutput() const { return true; } + virtual bool hasIntegratedCPP() const { return false; } + + virtual void ConstructJob(Compilation &C, const JobAction &JA, + Job &Dest, + const InputInfo &Output, + const InputInfoList &Inputs, + const ArgList &TCArgs, + const char *LinkingOutput) const; + }; +} // end namespace minix + /// auroraux -- Directly call GNU Binutils assembler and linker namespace auroraux { class LLVM_LIBRARY_VISIBILITY Assemble : public Tool { diff --git a/contrib/llvm/tools/clang/lib/Driver/Types.cpp b/contrib/llvm/tools/clang/lib/Driver/Types.cpp index 8857fb1..3c07cf2 100644 --- a/contrib/llvm/tools/clang/lib/Driver/Types.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/Types.cpp @@ -86,6 +86,20 @@ bool types::isAcceptedByClang(ID Id) { case TY_CXXHeader: case TY_PP_CXXHeader: case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader: case TY_AST: + case TY_LLVM_IR: case TY_LLVM_BC: + return true; + } +} + +bool types::isOnlyAcceptedByClang(ID Id) { + switch (Id) { + default: + return false; + + case TY_AST: + case TY_LLVM_IR: + case TY_LLVM_BC: + case TY_RewrittenObjC: return true; } } @@ -132,15 +146,19 @@ types::ID types::lookupTypeForExtension(const char *Ext) { .Case("ii", TY_PP_CXX) .Case("mi", TY_PP_ObjC) .Case("mm", TY_ObjCXX) + .Case("bc", TY_LLVM_BC) .Case("cc", TY_CXX) .Case("CC", TY_CXX) .Case("cl", TY_CL) .Case("cp", TY_CXX) .Case("hh", TY_CXXHeader) + .Case("ll", TY_LLVM_IR) .Case("hpp", TY_CXXHeader) .Case("ads", TY_Ada) .Case("adb", TY_Ada) .Case("ast", TY_AST) + .Case("c++", TY_CXX) + .Case("C++", TY_CXX) .Case("cxx", TY_CXX) .Case("cpp", TY_CXX) .Case("CPP", TY_CXX) diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp index 7b8ebf9..87b01d4 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp @@ -13,7 +13,6 @@ #include "clang/Frontend/ASTConsumers.h" #include "clang/Frontend/DocumentXML.h" -#include "clang/Frontend/PathDiagnosticClients.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/FileManager.h" @@ -22,7 +21,6 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/PrettyPrinter.h" -#include "clang/CodeGen/ModuleBuilder.h" #include "llvm/Module.h" #include "llvm/Support/Timer.h" #include "llvm/Support/raw_ostream.h" @@ -111,25 +109,14 @@ namespace { } void ASTViewer::HandleTopLevelSingleDecl(Decl *D) { - if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { - FD->print(llvm::errs()); - - if (Stmt *Body = FD->getBody()) { + if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) { + D->print(llvm::errs()); + + if (Stmt *Body = D->getBody()) { llvm::errs() << '\n'; Body->viewAST(); llvm::errs() << '\n'; } - return; - } - - if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { - MD->print(llvm::errs()); - - if (MD->getBody()) { - llvm::errs() << '\n'; - MD->getBody()->viewAST(); - llvm::errs() << '\n'; - } } } diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp index b0faf0a..e916e20 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp @@ -26,7 +26,8 @@ bool ASTMergeAction::BeginSourceFileAction(CompilerInstance &CI, // FIXME: This is a hack. We need a better way to communicate the // AST file, compiler instance, and file name than member variables // of FrontendAction. - AdaptedAction->setCurrentFile(getCurrentFile(), takeCurrentASTUnit()); + AdaptedAction->setCurrentFile(getCurrentFile(), getCurrentFileKind(), + takeCurrentASTUnit()); AdaptedAction->setCompilerInstance(&CI); return AdaptedAction->BeginSourceFileAction(CI, Filename); } @@ -95,8 +96,8 @@ bool ASTMergeAction::hasPCHSupport() const { return AdaptedAction->hasPCHSupport(); } -bool ASTMergeAction::hasASTSupport() const { - return AdaptedAction->hasASTSupport(); +bool ASTMergeAction::hasASTFileSupport() const { + return AdaptedAction->hasASTFileSupport(); } bool ASTMergeAction::hasCodeCompletionSupport() const { diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp index 4730bdc..88f0037 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp @@ -74,11 +74,13 @@ public: return false; } - virtual bool ReadPredefinesBuffer(llvm::StringRef PCHPredef, - FileID PCHBufferID, + virtual bool ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers, llvm::StringRef OriginalFileName, std::string &SuggestedPredefines) { - Predefines = PCHPredef; + Predefines = Buffers[0].Data; + for (unsigned I = 1, N = Buffers.size(); I != N; ++I) { + Predefines += Buffers[I].Data; + } return false; } @@ -219,6 +221,7 @@ ASTUnit *ASTUnit::LoadFromPCHFile(const std::string &Filename, // FIXME: This is broken, we should store the TargetOptions in the PCH. TargetOptions TargetOpts; TargetOpts.ABI = ""; + TargetOpts.CXXABI = "itanium"; TargetOpts.CPU = ""; TargetOpts.Features.clear(); TargetOpts.Triple = TargetTriple; @@ -332,8 +335,10 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI, assert(Clang.getFrontendOpts().Inputs.size() == 1 && "Invocation must have exactly one source file!"); - assert(Clang.getFrontendOpts().Inputs[0].first != FrontendOptions::IK_AST && + assert(Clang.getFrontendOpts().Inputs[0].first != IK_AST && "FIXME: AST inputs not yet supported here!"); + assert(Clang.getFrontendOpts().Inputs[0].first != IK_LLVM_IR && + "IR inputs not support here!"); // Create the AST unit. AST.reset(new ASTUnit(false)); @@ -354,12 +359,9 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI, // Create the source manager. Clang.setSourceManager(&AST->getSourceManager()); - // Create the preprocessor. - Clang.createPreprocessor(); - Act.reset(new TopLevelDeclTrackerAction(*AST)); if (!Act->BeginSourceFile(Clang, Clang.getFrontendOpts().Inputs[0].second, - /*IsAST=*/false)) + Clang.getFrontendOpts().Inputs[0].first)) goto error; Act->Execute(); diff --git a/contrib/llvm/tools/clang/lib/Frontend/BoostConAction.cpp b/contrib/llvm/tools/clang/lib/Frontend/BoostConAction.cpp index ae150c6..4a12ff2 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/BoostConAction.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/BoostConAction.cpp @@ -14,17 +14,17 @@ using namespace clang; namespace { - class BoostConASTConsumer : public ASTConsumer, + class BoostConASTConsumer : public ASTConsumer, public RecursiveASTVisitor<BoostConASTConsumer> { public: /// HandleTranslationUnit - This method is called when the ASTs for entire /// translation unit have been parsed. virtual void HandleTranslationUnit(ASTContext &Ctx); - + bool VisitCXXRecordDecl(CXXRecordDecl *D) { std::cout << D->getNameAsString() << std::endl; - return false; - } + return true; + } }; } @@ -35,5 +35,5 @@ ASTConsumer *BoostConAction::CreateASTConsumer(CompilerInstance &CI, void BoostConASTConsumer::HandleTranslationUnit(ASTContext &Ctx) { fprintf(stderr, "Welcome to BoostCon!\n"); - Visit(Ctx.getTranslationUnitDecl()); + TraverseDecl(Ctx.getTranslationUnitDecl()); } diff --git a/contrib/llvm/tools/clang/lib/Frontend/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Frontend/CMakeLists.txt index 01592d1..8757e2c 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/CMakeLists.txt +++ b/contrib/llvm/tools/clang/lib/Frontend/CMakeLists.txt @@ -4,23 +4,18 @@ add_clang_library(clangFrontend ASTConsumers.cpp ASTMerge.cpp ASTUnit.cpp - AnalysisConsumer.cpp BoostConAction.cpp CacheTokens.cpp - CodeGenAction.cpp CompilerInstance.cpp CompilerInvocation.cpp DeclXML.cpp DependencyFile.cpp DiagChecker.cpp DocumentXML.cpp - FixItRewriter.cpp FrontendAction.cpp FrontendActions.cpp FrontendOptions.cpp GeneratePCH.cpp - HTMLDiagnostics.cpp - HTMLPrint.cpp InitHeaderSearch.cpp InitPreprocessor.cpp LangStandards.cpp @@ -30,12 +25,8 @@ add_clang_library(clangFrontend PCHWriter.cpp PCHWriterDecl.cpp PCHWriterStmt.cpp - PlistDiagnostics.cpp PrintParserCallbacks.cpp PrintPreprocessedOutput.cpp - RewriteMacros.cpp - RewriteObjC.cpp - RewriteTest.cpp StmtXML.cpp TextDiagnosticBuffer.cpp TextDiagnosticPrinter.cpp @@ -53,7 +44,10 @@ IF(MSVC) ENDIF(MSVC) add_dependencies(clangFrontend + ClangAttrClasses + ClangAttrList ClangDiagnosticFrontend ClangDiagnosticLex ClangDiagnosticSema + ClangDeclNodes ClangStmtNodes) diff --git a/contrib/llvm/tools/clang/lib/Frontend/CodeGenAction.cpp b/contrib/llvm/tools/clang/lib/Frontend/CodeGenAction.cpp deleted file mode 100644 index 3416aa8..0000000 --- a/contrib/llvm/tools/clang/lib/Frontend/CodeGenAction.cpp +++ /dev/null @@ -1,593 +0,0 @@ -//===--- CodeGenAction.cpp - LLVM Code Generation Frontend Action ---------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#include "clang/Frontend/CodeGenAction.h" -#include "clang/Basic/SourceManager.h" -#include "clang/Basic/TargetInfo.h" -#include "clang/Basic/TargetOptions.h" -#include "clang/AST/ASTConsumer.h" -#include "clang/AST/ASTContext.h" -#include "clang/AST/DeclGroup.h" -#include "clang/CodeGen/CodeGenOptions.h" -#include "clang/CodeGen/ModuleBuilder.h" -#include "clang/Frontend/ASTConsumers.h" -#include "clang/Frontend/CompilerInstance.h" -#include "clang/Frontend/FrontendDiagnostic.h" -#include "llvm/LLVMContext.h" -#include "llvm/Module.h" -#include "llvm/PassManager.h" -#include "llvm/ADT/OwningPtr.h" -#include "llvm/Assembly/PrintModulePass.h" -#include "llvm/Analysis/CallGraph.h" -#include "llvm/Analysis/Verifier.h" -#include "llvm/Bitcode/ReaderWriter.h" -#include "llvm/CodeGen/RegAllocRegistry.h" -#include "llvm/CodeGen/SchedulerRegistry.h" -#include "llvm/Support/FormattedStream.h" -#include "llvm/Support/MemoryBuffer.h" -#include "llvm/Support/SourceMgr.h" -#include "llvm/Support/StandardPasses.h" -#include "llvm/Support/Timer.h" -#include "llvm/Target/SubtargetFeature.h" -#include "llvm/Target/TargetData.h" -#include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetOptions.h" -#include "llvm/Target/TargetRegistry.h" -using namespace clang; -using namespace llvm; - -namespace { - enum BackendAction { - Backend_EmitAssembly, ///< Emit native assembly files - Backend_EmitBC, ///< Emit LLVM bitcode files - Backend_EmitLL, ///< Emit human-readable LLVM assembly - Backend_EmitNothing, ///< Don't emit anything (benchmarking mode) - Backend_EmitMCNull, ///< Run CodeGen, but don't emit anything - Backend_EmitObj ///< Emit native object files - }; - - class BackendConsumer : public ASTConsumer { - Diagnostic &Diags; - BackendAction Action; - const CodeGenOptions &CodeGenOpts; - const LangOptions &LangOpts; - const TargetOptions &TargetOpts; - llvm::raw_ostream *AsmOutStream; - llvm::formatted_raw_ostream FormattedOutStream; - ASTContext *Context; - - Timer LLVMIRGeneration; - Timer CodeGenerationTime; - - llvm::OwningPtr<CodeGenerator> Gen; - - llvm::OwningPtr<llvm::Module> TheModule; - llvm::TargetData *TheTargetData; - - mutable FunctionPassManager *CodeGenPasses; - mutable PassManager *PerModulePasses; - mutable FunctionPassManager *PerFunctionPasses; - - FunctionPassManager *getCodeGenPasses() const; - PassManager *getPerModulePasses() const; - FunctionPassManager *getPerFunctionPasses() const; - - void CreatePasses(); - - /// AddEmitPasses - Add passes necessary to emit assembly or LLVM IR. - /// - /// \return True on success. - bool AddEmitPasses(); - - void EmitAssembly(); - - public: - BackendConsumer(BackendAction action, Diagnostic &_Diags, - const LangOptions &langopts, const CodeGenOptions &compopts, - const TargetOptions &targetopts, bool TimePasses, - const std::string &infile, llvm::raw_ostream *OS, - LLVMContext &C) : - Diags(_Diags), - Action(action), - CodeGenOpts(compopts), - LangOpts(langopts), - TargetOpts(targetopts), - AsmOutStream(OS), - LLVMIRGeneration("LLVM IR Generation Time"), - CodeGenerationTime("Code Generation Time"), - Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)), - TheTargetData(0), - CodeGenPasses(0), PerModulePasses(0), PerFunctionPasses(0) { - - if (AsmOutStream) - FormattedOutStream.setStream(*AsmOutStream, - formatted_raw_ostream::PRESERVE_STREAM); - - llvm::TimePassesIsEnabled = TimePasses; - } - - ~BackendConsumer() { - delete TheTargetData; - delete CodeGenPasses; - delete PerModulePasses; - delete PerFunctionPasses; - } - - llvm::Module *takeModule() { return TheModule.take(); } - - virtual void Initialize(ASTContext &Ctx) { - Context = &Ctx; - - if (llvm::TimePassesIsEnabled) - LLVMIRGeneration.startTimer(); - - Gen->Initialize(Ctx); - - TheModule.reset(Gen->GetModule()); - TheTargetData = new llvm::TargetData(Ctx.Target.getTargetDescription()); - - if (llvm::TimePassesIsEnabled) - LLVMIRGeneration.stopTimer(); - } - - virtual void HandleTopLevelDecl(DeclGroupRef D) { - PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(), - Context->getSourceManager(), - "LLVM IR generation of declaration"); - - if (llvm::TimePassesIsEnabled) - LLVMIRGeneration.startTimer(); - - Gen->HandleTopLevelDecl(D); - - if (llvm::TimePassesIsEnabled) - LLVMIRGeneration.stopTimer(); - } - - virtual void HandleTranslationUnit(ASTContext &C) { - { - PrettyStackTraceString CrashInfo("Per-file LLVM IR generation"); - if (llvm::TimePassesIsEnabled) - LLVMIRGeneration.startTimer(); - - Gen->HandleTranslationUnit(C); - - if (llvm::TimePassesIsEnabled) - LLVMIRGeneration.stopTimer(); - } - - // EmitAssembly times and registers crash info itself. - EmitAssembly(); - - // Force a flush here in case we never get released. - if (AsmOutStream) - FormattedOutStream.flush(); - } - - virtual void HandleTagDeclDefinition(TagDecl *D) { - PrettyStackTraceDecl CrashInfo(D, SourceLocation(), - Context->getSourceManager(), - "LLVM IR generation of declaration"); - Gen->HandleTagDeclDefinition(D); - } - - virtual void CompleteTentativeDefinition(VarDecl *D) { - Gen->CompleteTentativeDefinition(D); - } - - virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) { - Gen->HandleVTable(RD, DefinitionRequired); - } - - static void InlineAsmDiagHandler(const llvm::SMDiagnostic &SM,void *Context, - unsigned LocCookie) { - SourceLocation Loc = SourceLocation::getFromRawEncoding(LocCookie); - ((BackendConsumer*)Context)->InlineAsmDiagHandler2(SM, Loc); - } - - void InlineAsmDiagHandler2(const llvm::SMDiagnostic &, - SourceLocation LocCookie); - }; -} - -FunctionPassManager *BackendConsumer::getCodeGenPasses() const { - if (!CodeGenPasses) { - CodeGenPasses = new FunctionPassManager(&*TheModule); - CodeGenPasses->add(new TargetData(*TheTargetData)); - } - - return CodeGenPasses; -} - -PassManager *BackendConsumer::getPerModulePasses() const { - if (!PerModulePasses) { - PerModulePasses = new PassManager(); - PerModulePasses->add(new TargetData(*TheTargetData)); - } - - return PerModulePasses; -} - -FunctionPassManager *BackendConsumer::getPerFunctionPasses() const { - if (!PerFunctionPasses) { - PerFunctionPasses = new FunctionPassManager(&*TheModule); - PerFunctionPasses->add(new TargetData(*TheTargetData)); - } - - return PerFunctionPasses; -} - -bool BackendConsumer::AddEmitPasses() { - if (Action == Backend_EmitNothing) - return true; - - if (Action == Backend_EmitBC) { - getPerModulePasses()->add(createBitcodeWriterPass(FormattedOutStream)); - return true; - } - - if (Action == Backend_EmitLL) { - getPerModulePasses()->add(createPrintModulePass(&FormattedOutStream)); - return true; - } - - bool Fast = CodeGenOpts.OptimizationLevel == 0; - - // Create the TargetMachine for generating code. - std::string Error; - std::string Triple = TheModule->getTargetTriple(); - const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error); - if (!TheTarget) { - Diags.Report(diag::err_fe_unable_to_create_target) << Error; - return false; - } - - // FIXME: Expose these capabilities via actual APIs!!!! Aside from just - // being gross, this is also totally broken if we ever care about - // concurrency. - llvm::NoFramePointerElim = CodeGenOpts.DisableFPElim; - if (CodeGenOpts.FloatABI == "soft") - llvm::FloatABIType = llvm::FloatABI::Soft; - else if (CodeGenOpts.FloatABI == "hard") - llvm::FloatABIType = llvm::FloatABI::Hard; - else { - assert(CodeGenOpts.FloatABI.empty() && "Invalid float abi!"); - llvm::FloatABIType = llvm::FloatABI::Default; - } - NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS; - llvm::UseSoftFloat = CodeGenOpts.SoftFloat; - UnwindTablesMandatory = CodeGenOpts.UnwindTables; - - TargetMachine::setAsmVerbosityDefault(CodeGenOpts.AsmVerbose); - - TargetMachine::setFunctionSections(CodeGenOpts.FunctionSections); - TargetMachine::setDataSections (CodeGenOpts.DataSections); - - // FIXME: Parse this earlier. - if (CodeGenOpts.RelocationModel == "static") { - TargetMachine::setRelocationModel(llvm::Reloc::Static); - } else if (CodeGenOpts.RelocationModel == "pic") { - TargetMachine::setRelocationModel(llvm::Reloc::PIC_); - } else { - assert(CodeGenOpts.RelocationModel == "dynamic-no-pic" && - "Invalid PIC model!"); - TargetMachine::setRelocationModel(llvm::Reloc::DynamicNoPIC); - } - // FIXME: Parse this earlier. - if (CodeGenOpts.CodeModel == "small") { - TargetMachine::setCodeModel(llvm::CodeModel::Small); - } else if (CodeGenOpts.CodeModel == "kernel") { - TargetMachine::setCodeModel(llvm::CodeModel::Kernel); - } else if (CodeGenOpts.CodeModel == "medium") { - TargetMachine::setCodeModel(llvm::CodeModel::Medium); - } else if (CodeGenOpts.CodeModel == "large") { - TargetMachine::setCodeModel(llvm::CodeModel::Large); - } else { - assert(CodeGenOpts.CodeModel.empty() && "Invalid code model!"); - TargetMachine::setCodeModel(llvm::CodeModel::Default); - } - - std::vector<const char *> BackendArgs; - BackendArgs.push_back("clang"); // Fake program name. - if (!CodeGenOpts.DebugPass.empty()) { - BackendArgs.push_back("-debug-pass"); - BackendArgs.push_back(CodeGenOpts.DebugPass.c_str()); - } - if (!CodeGenOpts.LimitFloatPrecision.empty()) { - BackendArgs.push_back("-limit-float-precision"); - BackendArgs.push_back(CodeGenOpts.LimitFloatPrecision.c_str()); - } - if (llvm::TimePassesIsEnabled) - BackendArgs.push_back("-time-passes"); - BackendArgs.push_back(0); - llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1, - const_cast<char **>(&BackendArgs[0])); - - std::string FeaturesStr; - if (TargetOpts.CPU.size() || TargetOpts.Features.size()) { - SubtargetFeatures Features; - Features.setCPU(TargetOpts.CPU); - for (std::vector<std::string>::const_iterator - it = TargetOpts.Features.begin(), - ie = TargetOpts.Features.end(); it != ie; ++it) - Features.AddFeature(*it); - FeaturesStr = Features.getString(); - } - TargetMachine *TM = TheTarget->createTargetMachine(Triple, FeaturesStr); - - if (CodeGenOpts.RelaxAll) - TM->setMCRelaxAll(true); - - // Set register scheduler & allocation policy. - RegisterScheduler::setDefault(createDefaultScheduler); - RegisterRegAlloc::setDefault(Fast ? createLocalRegisterAllocator : - createLinearScanRegisterAllocator); - - // Create the code generator passes. - FunctionPassManager *PM = getCodeGenPasses(); - CodeGenOpt::Level OptLevel = CodeGenOpt::Default; - - switch (CodeGenOpts.OptimizationLevel) { - default: break; - case 0: OptLevel = CodeGenOpt::None; break; - case 3: OptLevel = CodeGenOpt::Aggressive; break; - } - - // Normal mode, emit a .s or .o file by running the code generator. Note, - // this also adds codegenerator level optimization passes. - TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile; - if (Action == Backend_EmitObj) - CGFT = TargetMachine::CGFT_ObjectFile; - else if (Action == Backend_EmitMCNull) - CGFT = TargetMachine::CGFT_Null; - else - assert(Action == Backend_EmitAssembly && "Invalid action!"); - if (TM->addPassesToEmitFile(*PM, FormattedOutStream, CGFT, OptLevel, - /*DisableVerify=*/!CodeGenOpts.VerifyModule)) { - Diags.Report(diag::err_fe_unable_to_interface_with_target); - return false; - } - - return true; -} - -void BackendConsumer::CreatePasses() { - unsigned OptLevel = CodeGenOpts.OptimizationLevel; - CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining; - - // Handle disabling of LLVM optimization, where we want to preserve the - // internal module before any optimization. - if (CodeGenOpts.DisableLLVMOpts) { - OptLevel = 0; - Inlining = CodeGenOpts.NoInlining; - } - - // In -O0 if checking is disabled, we don't even have per-function passes. - if (CodeGenOpts.VerifyModule) - getPerFunctionPasses()->add(createVerifierPass()); - - // Assume that standard function passes aren't run for -O0. - if (OptLevel > 0) - llvm::createStandardFunctionPasses(getPerFunctionPasses(), OptLevel); - - llvm::Pass *InliningPass = 0; - switch (Inlining) { - case CodeGenOptions::NoInlining: break; - case CodeGenOptions::NormalInlining: { - // Set the inline threshold following llvm-gcc. - // - // FIXME: Derive these constants in a principled fashion. - unsigned Threshold = 225; - if (CodeGenOpts.OptimizeSize) - Threshold = 75; - else if (OptLevel > 2) - Threshold = 275; - InliningPass = createFunctionInliningPass(Threshold); - break; - } - case CodeGenOptions::OnlyAlwaysInlining: - InliningPass = createAlwaysInlinerPass(); // Respect always_inline - break; - } - - // For now we always create per module passes. - PassManager *PM = getPerModulePasses(); - llvm::createStandardModulePasses(PM, OptLevel, CodeGenOpts.OptimizeSize, - CodeGenOpts.UnitAtATime, - CodeGenOpts.UnrollLoops, - /*SimplifyLibCalls=*/!LangOpts.NoBuiltin, - /*HaveExceptions=*/true, - InliningPass); -} - -/// EmitAssembly - Handle interaction with LLVM backend to generate -/// actual machine code. -void BackendConsumer::EmitAssembly() { - // Silently ignore if we weren't initialized for some reason. - if (!TheModule || !TheTargetData) - return; - - TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : 0); - - // Make sure IR generation is happy with the module. This is - // released by the module provider. - Module *M = Gen->ReleaseModule(); - if (!M) { - // The module has been released by IR gen on failures, do not - // double free. - TheModule.take(); - return; - } - - assert(TheModule.get() == M && - "Unexpected module change during IR generation"); - - CreatePasses(); - if (!AddEmitPasses()) - return; - - // Run passes. For now we do all passes at once, but eventually we - // would like to have the option of streaming code generation. - - if (PerFunctionPasses) { - PrettyStackTraceString CrashInfo("Per-function optimization"); - - PerFunctionPasses->doInitialization(); - for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I) - if (!I->isDeclaration()) - PerFunctionPasses->run(*I); - PerFunctionPasses->doFinalization(); - } - - if (PerModulePasses) { - PrettyStackTraceString CrashInfo("Per-module optimization passes"); - PerModulePasses->run(*M); - } - - if (CodeGenPasses) { - PrettyStackTraceString CrashInfo("Code generation"); - - // Install an inline asm handler so that diagnostics get printed through our - // diagnostics hooks. - LLVMContext &Ctx = TheModule->getContext(); - void *OldHandler = Ctx.getInlineAsmDiagnosticHandler(); - void *OldContext = Ctx.getInlineAsmDiagnosticContext(); - Ctx.setInlineAsmDiagnosticHandler((void*)(intptr_t)InlineAsmDiagHandler, - this); - - CodeGenPasses->doInitialization(); - for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I) - if (!I->isDeclaration()) - CodeGenPasses->run(*I); - CodeGenPasses->doFinalization(); - - Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext); - } -} - -/// ConvertBackendLocation - Convert a location in a temporary llvm::SourceMgr -/// buffer to be a valid FullSourceLoc. -static FullSourceLoc ConvertBackendLocation(const llvm::SMDiagnostic &D, - SourceManager &CSM) { - // Get both the clang and llvm source managers. The location is relative to - // a memory buffer that the LLVM Source Manager is handling, we need to add - // a copy to the Clang source manager. - const llvm::SourceMgr &LSM = *D.getSourceMgr(); - - // We need to copy the underlying LLVM memory buffer because llvm::SourceMgr - // already owns its one and clang::SourceManager wants to own its one. - const MemoryBuffer *LBuf = - LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc())); - - // Create the copy and transfer ownership to clang::SourceManager. - llvm::MemoryBuffer *CBuf = - llvm::MemoryBuffer::getMemBufferCopy(LBuf->getBuffer(), - LBuf->getBufferIdentifier()); - FileID FID = CSM.createFileIDForMemBuffer(CBuf); - - // Translate the offset into the file. - unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart(); - SourceLocation NewLoc = - CSM.getLocForStartOfFile(FID).getFileLocWithOffset(Offset); - return FullSourceLoc(NewLoc, CSM); -} - - -/// InlineAsmDiagHandler2 - This function is invoked when the backend hits an -/// error parsing inline asm. The SMDiagnostic indicates the error relative to -/// the temporary memory buffer that the inline asm parser has set up. -void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D, - SourceLocation LocCookie) { - // There are a couple of different kinds of errors we could get here. First, - // we re-format the SMDiagnostic in terms of a clang diagnostic. - - // Strip "error: " off the start of the message string. - llvm::StringRef Message = D.getMessage(); - if (Message.startswith("error: ")) - Message = Message.substr(7); - - // There are two cases: the SMDiagnostic could have a inline asm source - // location or it might not. If it does, translate the location. - FullSourceLoc Loc; - if (D.getLoc() != SMLoc()) - Loc = ConvertBackendLocation(D, Context->getSourceManager()); - Diags.Report(Loc, diag::err_fe_inline_asm).AddString(Message); - - // This could be a problem with no clang-level source location information. - // In this case, LocCookie is invalid. If there is source level information, - // print an "generated from" note. - if (LocCookie.isValid()) - Diags.Report(FullSourceLoc(LocCookie, Context->getSourceManager()), - diag::note_fe_inline_asm_here); -} - -// - -CodeGenAction::CodeGenAction(unsigned _Act) : Act(_Act) {} - -CodeGenAction::~CodeGenAction() {} - -void CodeGenAction::EndSourceFileAction() { - // If the consumer creation failed, do nothing. - if (!getCompilerInstance().hasASTConsumer()) - return; - - // Steal the module from the consumer. - BackendConsumer *Consumer = static_cast<BackendConsumer*>( - &getCompilerInstance().getASTConsumer()); - - TheModule.reset(Consumer->takeModule()); -} - -llvm::Module *CodeGenAction::takeModule() { - return TheModule.take(); -} - -ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI, - llvm::StringRef InFile) { - BackendAction BA = static_cast<BackendAction>(Act); - llvm::OwningPtr<llvm::raw_ostream> OS; - switch (BA) { - case Backend_EmitAssembly: - OS.reset(CI.createDefaultOutputFile(false, InFile, "s")); - break; - case Backend_EmitLL: - OS.reset(CI.createDefaultOutputFile(false, InFile, "ll")); - break; - case Backend_EmitBC: - OS.reset(CI.createDefaultOutputFile(true, InFile, "bc")); - break; - case Backend_EmitNothing: - break; - case Backend_EmitMCNull: - case Backend_EmitObj: - OS.reset(CI.createDefaultOutputFile(true, InFile, "o")); - break; - } - if (BA != Backend_EmitNothing && !OS) - return 0; - - return new BackendConsumer(BA, CI.getDiagnostics(), CI.getLangOpts(), - CI.getCodeGenOpts(), CI.getTargetOpts(), - CI.getFrontendOpts().ShowTimers, InFile, OS.take(), - CI.getLLVMContext()); -} - -EmitAssemblyAction::EmitAssemblyAction() - : CodeGenAction(Backend_EmitAssembly) {} - -EmitBCAction::EmitBCAction() : CodeGenAction(Backend_EmitBC) {} - -EmitLLVMAction::EmitLLVMAction() : CodeGenAction(Backend_EmitLL) {} - -EmitLLVMOnlyAction::EmitLLVMOnlyAction() : CodeGenAction(Backend_EmitNothing) {} - -EmitCodeGenOnlyAction::EmitCodeGenOnlyAction() : CodeGenAction(Backend_EmitMCNull) {} - -EmitObjAction::EmitObjAction() : CodeGenAction(Backend_EmitObj) {} diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp index 2b25168..5037c83 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp @@ -37,7 +37,7 @@ using namespace clang; CompilerInstance::CompilerInstance() - : Invocation(new CompilerInvocation()) { + : Invocation(new CompilerInvocation()), Reader(0) { } CompilerInstance::~CompilerInstance() { @@ -255,6 +255,8 @@ void CompilerInstance::createPCHExternalASTSource(llvm::StringRef Path) { llvm::OwningPtr<ExternalASTSource> Source; Source.reset(createPCHExternalASTSource(Path, getHeaderSearchOpts().Sysroot, getPreprocessor(), getASTContext())); + // Remember the PCHReader, but in a non-owning way. + Reader = static_cast<PCHReader*>(Source.get()); getASTContext().setExternalSource(Source); } @@ -442,7 +444,7 @@ bool CompilerInstance::InitializeSourceManager(llvm::StringRef InputFile, } } else { llvm::MemoryBuffer *SB = llvm::MemoryBuffer::getSTDIN(); - SourceMgr.createMainFileIDForMemBuffer(SB); + if (SB) SourceMgr.createMainFileIDForMemBuffer(SB); if (SourceMgr.getMainFileID().isInvalid()) { Diags.Report(diag::err_fe_error_reading_stdin); return false; @@ -489,27 +491,11 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) { for (unsigned i = 0, e = getFrontendOpts().Inputs.size(); i != e; ++i) { const std::string &InFile = getFrontendOpts().Inputs[i].second; - // If we aren't using an AST file, setup the file and source managers and - // the preprocessor. - bool IsAST = getFrontendOpts().Inputs[i].first == FrontendOptions::IK_AST; - if (!IsAST) { - if (!i) { - // Create a file manager object to provide access to and cache the - // filesystem. - createFileManager(); - - // Create the source manager. - createSourceManager(); - } else { - // Reset the ID tables if we are reusing the SourceManager. - getSourceManager().clearIDTables(); - } - - // Create the preprocessor. - createPreprocessor(); - } + // Reset the ID tables if we are reusing the SourceManager. + if (hasSourceManager()) + getSourceManager().clearIDTables(); - if (Act.BeginSourceFile(*this, InFile, IsAST)) { + if (Act.BeginSourceFile(*this, InFile, getFrontendOpts().Inputs[i].first)) { Act.Execute(); Act.EndSourceFile(); } @@ -530,7 +516,7 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) { OS << " generated.\n"; } - if (getFrontendOpts().ShowStats) { + if (getFrontendOpts().ShowStats && hasFileManager()) { getFileManager().PrintStats(); OS << "\n"; } diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp index ff372e1..53debdb 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp @@ -112,6 +112,8 @@ static void AnalyzerOptsToArgs(const AnalyzerOptions &Opts, Res.push_back("-analyzer-experimental-checks"); if (Opts.EnableExperimentalInternalChecks) Res.push_back("-analyzer-experimental-internal-checks"); + if (Opts.EnableIdempotentOperationChecker) + Res.push_back("-analyzer-idempotent-operation"); } static void CodeGenOptsToArgs(const CodeGenOptions &Opts, @@ -132,6 +134,8 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts, Res.push_back("-fno-common"); if (Opts.NoImplicitFloat) Res.push_back("-no-implicit-float"); + if (Opts.OmitLeafFramePointer) + Res.push_back("-momit-leaf-frame-pointer"); if (Opts.OptimizeSize) { assert(Opts.OptimizationLevel == 2 && "Invalid options!"); Res.push_back("-Os"); @@ -280,20 +284,21 @@ static void DiagnosticOptsToArgs(const DiagnosticOptions &Opts, Res.push_back("-W" + Opts.Warnings[i]); } -static const char *getInputKindName(FrontendOptions::InputKind Kind) { +static const char *getInputKindName(InputKind Kind) { switch (Kind) { - case FrontendOptions::IK_None: break; - case FrontendOptions::IK_AST: return "ast"; - case FrontendOptions::IK_Asm: return "assembler-with-cpp"; - case FrontendOptions::IK_C: return "c"; - case FrontendOptions::IK_CXX: return "c++"; - case FrontendOptions::IK_ObjC: return "objective-c"; - case FrontendOptions::IK_ObjCXX: return "objective-c++"; - case FrontendOptions::IK_OpenCL: return "cl"; - case FrontendOptions::IK_PreprocessedC: return "cpp-output"; - case FrontendOptions::IK_PreprocessedCXX: return "c++-cpp-output"; - case FrontendOptions::IK_PreprocessedObjC: return "objective-c-cpp-output"; - case FrontendOptions::IK_PreprocessedObjCXX:return "objective-c++-cpp-output"; + case IK_None: break; + case IK_AST: return "ast"; + case IK_Asm: return "assembler-with-cpp"; + case IK_C: return "c"; + case IK_CXX: return "c++"; + case IK_LLVM_IR: return "ir"; + case IK_ObjC: return "objective-c"; + case IK_ObjCXX: return "objective-c++"; + case IK_OpenCL: return "cl"; + case IK_PreprocessedC: return "cpp-output"; + case IK_PreprocessedCXX: return "c++-cpp-output"; + case IK_PreprocessedObjC: return "objective-c-cpp-output"; + case IK_PreprocessedObjCXX:return "objective-c++-cpp-output"; } llvm_unreachable("Unexpected language kind!"); @@ -348,6 +353,8 @@ static void FrontendOptsToArgs(const FrontendOptions &Opts, Res.push_back("-disable-free"); if (Opts.RelocatablePCH) Res.push_back("-relocatable-pch"); + if (Opts.ChainedPCH) + Res.push_back("-chained-pch"); if (Opts.ShowHelp) Res.push_back("-help"); if (Opts.ShowMacrosInCodeCompletion) @@ -396,6 +403,10 @@ static void FrontendOptsToArgs(const FrontendOptions &Opts, if (!Opts.ActionName.empty()) { Res.push_back("-plugin"); Res.push_back(Opts.ActionName); + for(unsigned i = 0, e = Opts.PluginArgs.size(); i != e; ++i) { + Res.push_back("-plugin-arg-" + Opts.ActionName); + Res.push_back(Opts.PluginArgs[i]); + } } for (unsigned i = 0, e = Opts.Plugins.size(); i != e; ++i) { Res.push_back("-load"); @@ -546,8 +557,11 @@ static void LangOptsToArgs(const LangOptions &Opts, Res.push_back("-femit-all-decls"); if (Opts.MathErrno) Res.push_back("-fmath-errno"); - if (Opts.OverflowChecking) - Res.push_back("-ftrapv"); + switch (Opts.getSignedOverflowBehavior()) { + case LangOptions::SOB_Undefined: break; + case LangOptions::SOB_Defined: Res.push_back("-fwrapv"); break; + case LangOptions::SOB_Trapping: Res.push_back("-ftrapv"); break; + } if (Opts.HeinousExtensions) Res.push_back("-fheinous-gnu-extensions"); // Optimize is implicit. @@ -596,6 +610,9 @@ static void LangOptsToArgs(const LangOptions &Opts, Res.push_back("protected"); } } + if (Opts.InlineVisibilityHidden) + Res.push_back("-fvisibility-inlines-hidden"); + if (Opts.getStackProtectorMode() != 0) { Res.push_back("-stack-protector"); Res.push_back(llvm::utostr(Opts.getStackProtectorMode())); @@ -681,6 +698,8 @@ static void TargetOptsToArgs(const TargetOptions &Opts, Res.push_back("-target-abi"); Res.push_back(Opts.ABI); } + Res.push_back("-cxx-abi"); + Res.push_back(Opts.CXXABI); for (unsigned i = 0, e = Opts.Features.size(); i != e; ++i) { Res.push_back("-target-feature"); Res.push_back(Opts.Features[i]); @@ -728,7 +747,7 @@ static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args, // FIXME: Error handling. if (Value == NumStores) Diags.Report(diag::err_drv_invalid_value) - << Args.getLastArg(OPT_O)->getAsString(Args) << Name; + << A->getAsString(Args) << Name; else Opts.AnalysisStoreOpt = Value; } @@ -743,7 +762,7 @@ static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args, // FIXME: Error handling. if (Value == NumConstraints) Diags.Report(diag::err_drv_invalid_value) - << Args.getLastArg(OPT_O)->getAsString(Args) << Name; + << A->getAsString(Args) << Name; else Opts.AnalysisConstraintsOpt = Value; } @@ -758,7 +777,7 @@ static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args, // FIXME: Error handling. if (Value == NUM_ANALYSIS_DIAG_CLIENTS) Diags.Report(diag::err_drv_invalid_value) - << Args.getLastArg(OPT_O)->getAsString(Args) << Name; + << A->getAsString(Args) << Name; else Opts.AnalysisDiagOpt = Value; } @@ -775,6 +794,8 @@ static void ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args, Opts.EnableExperimentalChecks = Args.hasArg(OPT_analyzer_experimental_checks); Opts.EnableExperimentalInternalChecks = Args.hasArg(OPT_analyzer_experimental_internal_checks); + Opts.EnableIdempotentOperationChecker = + Args.hasArg(OPT_analyzer_idempotent_operation); Opts.TrimGraph = Args.hasArg(OPT_trim_egraph); Opts.MaxNodes = Args.getLastArgIntValue(OPT_analyzer_max_nodes, 150000,Diags); Opts.MaxLoop = Args.getLastArgIntValue(OPT_analyzer_max_loop, 3, Diags); @@ -808,6 +829,8 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, Opts.NoCommon = Args.hasArg(OPT_fno_common); Opts.NoImplicitFloat = Args.hasArg(OPT_no_implicit_float); Opts.OptimizeSize = Args.hasArg(OPT_Os); + Opts.SimplifyLibCalls = !(Args.hasArg(OPT_fno_builtin) || + Args.hasArg(OPT_ffreestanding)); Opts.UnrollLoops = (Opts.OptimizationLevel > 1 && !Opts.OptimizeSize); Opts.AsmVerbose = Args.hasArg(OPT_masm_verbose); @@ -820,6 +843,7 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, Opts.LimitFloatPrecision = Args.getLastArgValue(OPT_mlimit_float_precision); Opts.NoZeroInitializedInBSS = Args.hasArg(OPT_mno_zero_initialized_in_bss); Opts.RelaxAll = Args.hasArg(OPT_mrelax_all); + Opts.OmitLeafFramePointer = Args.hasArg(OPT_momit_leaf_frame_pointer); Opts.SoftFloat = Args.hasArg(OPT_msoft_float); Opts.UnwindTables = Args.hasArg(OPT_munwind_tables); Opts.RelocationModel = Args.getLastArgValue(OPT_mrelocation_model, "pic"); @@ -830,6 +854,8 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, Opts.MainFileName = Args.getLastArgValue(OPT_main_file_name); Opts.VerifyModule = !Args.hasArg(OPT_disable_llvm_verifier); + Opts.InstrumentFunctions = Args.hasArg(OPT_finstrument_functions); + if (Arg *A = Args.getLastArg(OPT_fobjc_dispatch_method_EQ)) { llvm::StringRef Name = A->getValue(Args); unsigned Method = llvm::StringSwitch<unsigned>(Name) @@ -866,7 +892,18 @@ static void ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args, Opts.ShowFixits = !Args.hasArg(OPT_fno_diagnostics_fixit_info); Opts.ShowLocation = !Args.hasArg(OPT_fno_show_source_location); Opts.ShowOptionNames = Args.hasArg(OPT_fdiagnostics_show_option); - + + llvm::StringRef ShowOverloads = + Args.getLastArgValue(OPT_fshow_overloads_EQ, "all"); + if (ShowOverloads == "best") + Opts.ShowOverloads = Diagnostic::Ovl_Best; + else if (ShowOverloads == "all") + Opts.ShowOverloads = Diagnostic::Ovl_All; + else + Diags.Report(diag::err_drv_invalid_value) + << Args.getLastArg(OPT_fshow_overloads_EQ)->getAsString(Args) + << ShowOverloads; + llvm::StringRef ShowCategory = Args.getLastArgValue(OPT_fdiagnostics_show_category, "none"); if (ShowCategory == "none") @@ -903,8 +940,8 @@ static void ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args, Opts.Warnings = Args.getAllArgValues(OPT_W); } -static FrontendOptions::InputKind -ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Diagnostic &Diags) { +static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, + Diagnostic &Diags) { using namespace cc1options; Opts.ProgramAction = frontend::ParseSyntaxOnly; if (const Arg *A = Args.getLastArg(OPT_Action_Group)) { @@ -972,9 +1009,17 @@ ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Diagnostic &Diags) { Opts.ProgramAction = frontend::RunPreprocessorOnly; break; } } - if (const Arg *A = Args.getLastArg(OPT_plugin)) { + + if (const Arg* A = Args.getLastArg(OPT_plugin)) { + Opts.Plugins.push_back(A->getValue(Args,0)); Opts.ProgramAction = frontend::PluginAction; Opts.ActionName = A->getValue(Args); + + for (arg_iterator it = Args.filtered_begin(OPT_plugin_arg), + end = Args.filtered_end(); it != end; ++it) { + if ((*it)->getValue(Args, 0) == Opts.ActionName) + Opts.PluginArgs.push_back((*it)->getValue(Args, 1)); + } } if (const Arg *A = Args.getLastArg(OPT_code_completion_at)) { @@ -991,6 +1036,7 @@ ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Diagnostic &Diags) { Opts.OutputFile = Args.getLastArgValue(OPT_o); Opts.Plugins = Args.getAllArgValues(OPT_load); Opts.RelocatablePCH = Args.hasArg(OPT_relocatable_pch); + Opts.ChainedPCH = Args.hasArg(OPT_chained_pch); Opts.ShowHelp = Args.hasArg(OPT_help); Opts.ShowMacrosInCodeCompletion = Args.hasArg(OPT_code_completion_macros); Opts.ShowCodePatternsInCodeCompletion @@ -1002,28 +1048,29 @@ ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Diagnostic &Diags) { Opts.ASTMergeFiles = Args.getAllArgValues(OPT_ast_merge); Opts.LLVMArgs = Args.getAllArgValues(OPT_mllvm); - FrontendOptions::InputKind DashX = FrontendOptions::IK_None; + InputKind DashX = IK_None; if (const Arg *A = Args.getLastArg(OPT_x)) { - DashX = llvm::StringSwitch<FrontendOptions::InputKind>(A->getValue(Args)) - .Case("c", FrontendOptions::IK_C) - .Case("cl", FrontendOptions::IK_OpenCL) - .Case("c", FrontendOptions::IK_C) - .Case("cl", FrontendOptions::IK_OpenCL) - .Case("c++", FrontendOptions::IK_CXX) - .Case("objective-c", FrontendOptions::IK_ObjC) - .Case("objective-c++", FrontendOptions::IK_ObjCXX) - .Case("cpp-output", FrontendOptions::IK_PreprocessedC) - .Case("assembler-with-cpp", FrontendOptions::IK_Asm) - .Case("c++-cpp-output", FrontendOptions::IK_PreprocessedCXX) - .Case("objective-c-cpp-output", FrontendOptions::IK_PreprocessedObjC) - .Case("objective-c++-cpp-output", FrontendOptions::IK_PreprocessedObjCXX) - .Case("c-header", FrontendOptions::IK_C) - .Case("objective-c-header", FrontendOptions::IK_ObjC) - .Case("c++-header", FrontendOptions::IK_CXX) - .Case("objective-c++-header", FrontendOptions::IK_ObjCXX) - .Case("ast", FrontendOptions::IK_AST) - .Default(FrontendOptions::IK_None); - if (DashX == FrontendOptions::IK_None) + DashX = llvm::StringSwitch<InputKind>(A->getValue(Args)) + .Case("c", IK_C) + .Case("cl", IK_OpenCL) + .Case("c", IK_C) + .Case("cl", IK_OpenCL) + .Case("c++", IK_CXX) + .Case("objective-c", IK_ObjC) + .Case("objective-c++", IK_ObjCXX) + .Case("cpp-output", IK_PreprocessedC) + .Case("assembler-with-cpp", IK_Asm) + .Case("c++-cpp-output", IK_PreprocessedCXX) + .Case("objective-c-cpp-output", IK_PreprocessedObjC) + .Case("objective-c++-cpp-output", IK_PreprocessedObjCXX) + .Case("c-header", IK_C) + .Case("objective-c-header", IK_ObjC) + .Case("c++-header", IK_CXX) + .Case("objective-c++-header", IK_ObjCXX) + .Case("ast", IK_AST) + .Case("ir", IK_LLVM_IR) + .Default(IK_None); + if (DashX == IK_None) Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << A->getValue(Args); } @@ -1034,8 +1081,8 @@ ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args, Diagnostic &Diags) { if (Inputs.empty()) Inputs.push_back("-"); for (unsigned i = 0, e = Inputs.size(); i != e; ++i) { - FrontendOptions::InputKind IK = DashX; - if (IK == FrontendOptions::IK_None) { + InputKind IK = DashX; + if (IK == IK_None) { IK = FrontendOptions::getInputKindForExtension( llvm::StringRef(Inputs[i]).rsplit('.').second); // FIXME: Remove this hack. @@ -1077,51 +1124,51 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) { // Add -I... and -F... options in order. for (arg_iterator it = Args.filtered_begin(OPT_I, OPT_F), ie = Args.filtered_end(); it != ie; ++it) - Opts.AddPath(it->getValue(Args), frontend::Angled, true, - /*IsFramework=*/ it->getOption().matches(OPT_F)); + Opts.AddPath((*it)->getValue(Args), frontend::Angled, true, + /*IsFramework=*/ (*it)->getOption().matches(OPT_F)); // Add -iprefix/-iwith-prefix/-iwithprefixbefore options. llvm::StringRef Prefix = ""; // FIXME: This isn't the correct default prefix. for (arg_iterator it = Args.filtered_begin(OPT_iprefix, OPT_iwithprefix, OPT_iwithprefixbefore), ie = Args.filtered_end(); it != ie; ++it) { - if (it->getOption().matches(OPT_iprefix)) - Prefix = it->getValue(Args); - else if (it->getOption().matches(OPT_iwithprefix)) - Opts.AddPath(Prefix.str() + it->getValue(Args), + const Arg *A = *it; + if (A->getOption().matches(OPT_iprefix)) + Prefix = A->getValue(Args); + else if (A->getOption().matches(OPT_iwithprefix)) + Opts.AddPath(Prefix.str() + A->getValue(Args), frontend::System, false, false); else - Opts.AddPath(Prefix.str() + it->getValue(Args), + Opts.AddPath(Prefix.str() + A->getValue(Args), frontend::Angled, false, false); } for (arg_iterator it = Args.filtered_begin(OPT_idirafter), ie = Args.filtered_end(); it != ie; ++it) - Opts.AddPath(it->getValue(Args), frontend::After, true, false); + Opts.AddPath((*it)->getValue(Args), frontend::After, true, false); for (arg_iterator it = Args.filtered_begin(OPT_iquote), ie = Args.filtered_end(); it != ie; ++it) - Opts.AddPath(it->getValue(Args), frontend::Quoted, true, false); + Opts.AddPath((*it)->getValue(Args), frontend::Quoted, true, false); for (arg_iterator it = Args.filtered_begin(OPT_isystem), ie = Args.filtered_end(); it != ie; ++it) - Opts.AddPath(it->getValue(Args), frontend::System, true, false); + Opts.AddPath((*it)->getValue(Args), frontend::System, true, false); // FIXME: Need options for the various environment variables! } -static void ParseLangArgs(LangOptions &Opts, ArgList &Args, - FrontendOptions::InputKind IK, +static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK, Diagnostic &Diags) { // FIXME: Cleanup per-file based stuff. // Set some properties which depend soley on the input kind; it would be nice // to move these to the language standard, and have the driver resolve the // input kind + language standard. - if (IK == FrontendOptions::IK_Asm) { + if (IK == IK_Asm) { Opts.AsmPreprocessor = 1; - } else if (IK == FrontendOptions::IK_ObjC || - IK == FrontendOptions::IK_ObjCXX || - IK == FrontendOptions::IK_PreprocessedObjC || - IK == FrontendOptions::IK_PreprocessedObjCXX) { + } else if (IK == IK_ObjC || + IK == IK_ObjCXX || + IK == IK_PreprocessedObjC || + IK == IK_PreprocessedObjCXX) { Opts.ObjC1 = Opts.ObjC2 = 1; } @@ -1140,23 +1187,24 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, if (LangStd == LangStandard::lang_unspecified) { // Based on the base language, pick one. switch (IK) { - case FrontendOptions::IK_None: - case FrontendOptions::IK_AST: + case IK_None: + case IK_AST: + case IK_LLVM_IR: assert(0 && "Invalid input kind!"); - case FrontendOptions::IK_OpenCL: + case IK_OpenCL: LangStd = LangStandard::lang_opencl; break; - case FrontendOptions::IK_Asm: - case FrontendOptions::IK_C: - case FrontendOptions::IK_PreprocessedC: - case FrontendOptions::IK_ObjC: - case FrontendOptions::IK_PreprocessedObjC: + case IK_Asm: + case IK_C: + case IK_PreprocessedC: + case IK_ObjC: + case IK_PreprocessedObjC: LangStd = LangStandard::lang_gnu99; break; - case FrontendOptions::IK_CXX: - case FrontendOptions::IK_PreprocessedCXX: - case FrontendOptions::IK_ObjCXX: - case FrontendOptions::IK_PreprocessedObjCXX: + case IK_CXX: + case IK_PreprocessedCXX: + case IK_ObjCXX: + case IK_PreprocessedObjCXX: LangStd = LangStandard::lang_gnucxx98; break; } @@ -1222,7 +1270,13 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, Diags.Report(diag::err_drv_invalid_value) << Args.getLastArg(OPT_fvisibility)->getAsString(Args) << Vis; - Opts.OverflowChecking = Args.hasArg(OPT_ftrapv); + if (Args.hasArg(OPT_fvisibility_inlines_hidden)) + Opts.InlineVisibilityHidden = 1; + + if (Args.hasArg(OPT_ftrapv)) + Opts.setSignedOverflowBehavior(LangOptions::SOB_Trapping); + else if (Args.hasArg(OPT_fwrapv)) + Opts.setSignedOverflowBehavior(LangOptions::SOB_Defined); // Mimicing gcc's behavior, trigraphs are only enabled if -trigraphs // is specified, or -std is set to a conforming mode. @@ -1270,6 +1324,7 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, Opts.Static = Args.hasArg(OPT_static_define); Opts.DumpRecordLayouts = Args.hasArg(OPT_fdump_record_layouts); Opts.DumpVTableLayouts = Args.hasArg(OPT_fdump_vtable_layouts); + Opts.SpellChecking = !Args.hasArg(OPT_fno_spell_checking); Opts.NoBitFieldTypeAlign = Args.hasArg(OPT_fno_bitfield_type_align); Opts.OptimizeSize = 0; @@ -1311,10 +1366,10 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args, // Add macros from the command line. for (arg_iterator it = Args.filtered_begin(OPT_D, OPT_U), ie = Args.filtered_end(); it != ie; ++it) { - if (it->getOption().matches(OPT_D)) - Opts.addMacroDef(it->getValue(Args)); + if ((*it)->getOption().matches(OPT_D)) + Opts.addMacroDef((*it)->getValue(Args)); else - Opts.addMacroUndef(it->getValue(Args)); + Opts.addMacroUndef((*it)->getValue(Args)); } Opts.MacroIncludes = Args.getAllArgValues(OPT_imacros); @@ -1323,16 +1378,17 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args, for (arg_iterator it = Args.filtered_begin(OPT_include, OPT_include_pch, OPT_include_pth), ie = Args.filtered_end(); it != ie; ++it) { + const Arg *A = *it; // PCH is handled specially, we need to extra the original include path. - if (it->getOption().matches(OPT_include_pch)) { + if (A->getOption().matches(OPT_include_pch)) { std::string OriginalFile = - PCHReader::getOriginalSourceFile(it->getValue(Args), Diags); + PCHReader::getOriginalSourceFile(A->getValue(Args), Diags); if (OriginalFile.empty()) continue; Opts.Includes.push_back(OriginalFile); } else - Opts.Includes.push_back(it->getValue(Args)); + Opts.Includes.push_back(A->getValue(Args)); } // Include 'altivec.h' if -faltivec option present @@ -1341,11 +1397,12 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args, for (arg_iterator it = Args.filtered_begin(OPT_remap_file), ie = Args.filtered_end(); it != ie; ++it) { + const Arg *A = *it; std::pair<llvm::StringRef,llvm::StringRef> Split = - llvm::StringRef(it->getValue(Args)).split(';'); + llvm::StringRef(A->getValue(Args)).split(';'); if (Split.second.empty()) { - Diags.Report(diag::err_drv_invalid_remap_file) << it->getAsString(Args); + Diags.Report(diag::err_drv_invalid_remap_file) << A->getAsString(Args); continue; } @@ -1366,6 +1423,7 @@ static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts, static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args) { using namespace cc1options; Opts.ABI = Args.getLastArgValue(OPT_target_abi); + Opts.CXXABI = Args.getLastArgValue(OPT_cxx_abi); Opts.CPU = Args.getLastArgValue(OPT_target_cpu); Opts.Triple = Args.getLastArgValue(OPT_triple); Opts.Features = Args.getAllArgValues(OPT_target_feature); @@ -1373,6 +1431,10 @@ static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args) { // Use the host triple if unspecified. if (Opts.Triple.empty()) Opts.Triple = llvm::sys::getHostTriple(); + + // Use the Itanium C++ ABI if unspecified. + if (Opts.CXXABI.empty()) + Opts.CXXABI = "itanium"; } // @@ -1395,16 +1457,15 @@ void CompilerInvocation::CreateFromArgs(CompilerInvocation &Res, // Issue errors on unknown arguments. for (arg_iterator it = Args->filtered_begin(OPT_UNKNOWN), ie = Args->filtered_end(); it != ie; ++it) - Diags.Report(diag::err_drv_unknown_argument) << it->getAsString(*Args); + Diags.Report(diag::err_drv_unknown_argument) << (*it)->getAsString(*Args); ParseAnalyzerArgs(Res.getAnalyzerOpts(), *Args, Diags); ParseCodeGenArgs(Res.getCodeGenOpts(), *Args, Diags); ParseDependencyOutputArgs(Res.getDependencyOutputOpts(), *Args); ParseDiagnosticArgs(Res.getDiagnosticOpts(), *Args, Diags); - FrontendOptions::InputKind DashX = - ParseFrontendArgs(Res.getFrontendOpts(), *Args, Diags); + InputKind DashX = ParseFrontendArgs(Res.getFrontendOpts(), *Args, Diags); ParseHeaderSearchArgs(Res.getHeaderSearchOpts(), *Args); - if (DashX != FrontendOptions::IK_AST) + if (DashX != IK_AST && DashX != IK_LLVM_IR) ParseLangArgs(Res.getLangOpts(), *Args, DashX, Diags); ParsePreprocessorArgs(Res.getPreprocessorOpts(), *Args, Diags); ParsePreprocessorOutputArgs(Res.getPreprocessorOutputOpts(), *Args); diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp index 87fc122..dbbf69c 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp @@ -25,25 +25,28 @@ FrontendAction::FrontendAction() : Instance(0) {} FrontendAction::~FrontendAction() {} -void FrontendAction::setCurrentFile(llvm::StringRef Value, ASTUnit *AST) { +void FrontendAction::setCurrentFile(llvm::StringRef Value, InputKind Kind, + ASTUnit *AST) { CurrentFile = Value; + CurrentFileKind = Kind; CurrentASTUnit.reset(AST); } bool FrontendAction::BeginSourceFile(CompilerInstance &CI, llvm::StringRef Filename, - bool IsAST) { + InputKind InputKind) { assert(!Instance && "Already processing a source file!"); assert(!Filename.empty() && "Unexpected empty filename!"); - setCurrentFile(Filename); + setCurrentFile(Filename, InputKind); setCompilerInstance(&CI); // AST files follow a very different path, since they share objects via the // AST unit. - if (IsAST) { + if (InputKind == IK_AST) { assert(!usesPreprocessorOnly() && "Attempt to pass AST file to preprocessor only action!"); - assert(hasASTSupport() && "This action does not have AST support!"); + assert(hasASTFileSupport() && + "This action does not have AST file support!"); llvm::IntrusiveRefCntPtr<Diagnostic> Diags(&CI.getDiagnostics()); std::string Error; @@ -51,7 +54,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI, if (!AST) goto failure; - setCurrentFile(Filename, AST); + setCurrentFile(Filename, InputKind, AST); // Set the shared objects, these are reset when we finish processing the // file, otherwise the CompilerInstance will happily destroy them. @@ -72,6 +75,30 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI, return true; } + // Set up the file and source managers, if needed. + if (!CI.hasFileManager()) + CI.createFileManager(); + if (!CI.hasSourceManager()) + CI.createSourceManager(); + + // IR files bypass the rest of initialization. + if (InputKind == IK_LLVM_IR) { + assert(hasIRSupport() && + "This action does not have IR file support!"); + + // Inform the diagnostic client we are processing a source file. + CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(), 0); + + // Initialize the action. + if (!BeginSourceFileAction(CI, Filename)) + goto failure; + + return true; + } + + // Set up the preprocessor. + CI.createPreprocessor(); + // Inform the diagnostic client we are processing a source file. CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(), &CI.getPreprocessor()); @@ -84,11 +111,10 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI, /// action. if (!usesPreprocessorOnly()) { CI.createASTContext(); - CI.setASTConsumer(CreateASTConsumer(CI, Filename)); - if (!CI.hasASTConsumer()) - goto failure; - /// Use PCH? + /// Use PCH? If so, we want the PCHReader active before the consumer + /// is created, because the consumer might be interested in the reader + /// (e.g. the PCH writer for chaining). if (!CI.getPreprocessorOpts().ImplicitPCHInclude.empty()) { assert(hasPCHSupport() && "This action does not have PCH support!"); CI.createPCHExternalASTSource( @@ -96,6 +122,10 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI, if (!CI.getASTContext().getExternalSource()) goto failure; } + + CI.setASTConsumer(CreateASTConsumer(CI, Filename)); + if (!CI.hasASTConsumer()) + goto failure; } // Initialize builtin info as long as we aren't using an external AST @@ -119,7 +149,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI, } CI.getDiagnosticClient().EndSourceFile(); - setCurrentFile(""); + setCurrentFile("", IK_None); setCompilerInstance(0); return false; } @@ -198,7 +228,7 @@ void FrontendAction::EndSourceFile() { } setCompilerInstance(0); - setCurrentFile(""); + setCurrentFile("", IK_None); } //===----------------------------------------------------------------------===// diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp index 6cd960b..3a53dee 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp @@ -9,14 +9,13 @@ #include "clang/Frontend/FrontendActions.h" #include "clang/AST/ASTConsumer.h" +#include "clang/Lex/Pragma.h" #include "clang/Lex/Preprocessor.h" #include "clang/Parse/Parser.h" #include "clang/Basic/FileManager.h" -#include "clang/Frontend/AnalysisConsumer.h" #include "clang/Frontend/ASTConsumers.h" #include "clang/Frontend/ASTUnit.h" #include "clang/Frontend/CompilerInstance.h" -#include "clang/Frontend/FixItRewriter.h" #include "clang/Frontend/FrontendDiagnostic.h" #include "clang/Frontend/Utils.h" #include "llvm/ADT/OwningPtr.h" @@ -39,13 +38,6 @@ void InitOnlyAction::ExecuteAction() { // AST Consumer Actions //===----------------------------------------------------------------------===// -ASTConsumer *AnalysisAction::CreateASTConsumer(CompilerInstance &CI, - llvm::StringRef InFile) { - return CreateAnalysisConsumer(CI.getPreprocessor(), - CI.getFrontendOpts().OutputFile, - CI.getAnalyzerOpts()); -} - ASTConsumer *ASTPrintAction::CreateASTConsumer(CompilerInstance &CI, llvm::StringRef InFile) { if (llvm::raw_ostream *OS = CI.createDefaultOutputFile(false, InFile)) @@ -88,17 +80,11 @@ ASTConsumer *GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI, if (!OS) return 0; - if (CI.getFrontendOpts().RelocatablePCH) - return CreatePCHGenerator(CI.getPreprocessor(), OS, Sysroot.c_str()); - - return CreatePCHGenerator(CI.getPreprocessor(), OS); -} - -ASTConsumer *HTMLPrintAction::CreateASTConsumer(CompilerInstance &CI, - llvm::StringRef InFile) { - if (llvm::raw_ostream *OS = CI.createDefaultOutputFile(false, InFile)) - return CreateHTMLPrinter(OS, CI.getPreprocessor()); - return 0; + PCHReader *Chain = CI.getInvocation().getFrontendOpts().ChainedPCH ? + CI.getPCHReader() : 0; + const char *isysroot = CI.getFrontendOpts().RelocatablePCH ? + Sysroot.c_str() : 0; + return CreatePCHGenerator(CI.getPreprocessor(), OS, Chain, isysroot); } ASTConsumer *InheritanceViewAction::CreateASTConsumer(CompilerInstance &CI, @@ -106,57 +92,6 @@ ASTConsumer *InheritanceViewAction::CreateASTConsumer(CompilerInstance &CI, return CreateInheritanceViewer(CI.getFrontendOpts().ViewClassInheritance); } -FixItAction::FixItAction() {} -FixItAction::~FixItAction() {} - -ASTConsumer *FixItAction::CreateASTConsumer(CompilerInstance &CI, - llvm::StringRef InFile) { - return new ASTConsumer(); -} - -class FixItActionSuffixInserter : public FixItPathRewriter { - std::string NewSuffix; - -public: - explicit FixItActionSuffixInserter(std::string NewSuffix) - : NewSuffix(NewSuffix) {} - - std::string RewriteFilename(const std::string &Filename) { - llvm::sys::Path Path(Filename); - std::string Suffix = Path.getSuffix(); - Path.eraseSuffix(); - Path.appendSuffix(NewSuffix + "." + Suffix); - return Path.c_str(); - } -}; - -bool FixItAction::BeginSourceFileAction(CompilerInstance &CI, - llvm::StringRef Filename) { - const FrontendOptions &FEOpts = getCompilerInstance().getFrontendOpts(); - if (!FEOpts.FixItSuffix.empty()) { - PathRewriter.reset(new FixItActionSuffixInserter(FEOpts.FixItSuffix)); - } else { - PathRewriter.reset(); - } - Rewriter.reset(new FixItRewriter(CI.getDiagnostics(), CI.getSourceManager(), - CI.getLangOpts(), PathRewriter.get())); - return true; -} - -void FixItAction::EndSourceFileAction() { - // Otherwise rewrite all files. - Rewriter->WriteFixedFiles(); -} - -ASTConsumer *RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI, - llvm::StringRef InFile) { - if (llvm::raw_ostream *OS = CI.createDefaultOutputFile(false, InFile, "cpp")) - return CreateObjCRewriter(InFile, OS, - CI.getDiagnostics(), CI.getLangOpts(), - CI.getDiagnosticOpts().NoRewriteMacros); - return 0; -} - ASTConsumer *SyntaxOnlyAction::CreateASTConsumer(CompilerInstance &CI, llvm::StringRef InFile) { return new ASTConsumer(); @@ -223,6 +158,9 @@ void ParseOnlyAction::ExecuteAction() { void PreprocessOnlyAction::ExecuteAction() { Preprocessor &PP = getCompilerInstance().getPreprocessor(); + // Ignore unknown pragmas. + PP.AddPragmaHandler(new EmptyPragmaHandler()); + Token Tok; // Start parsing the specified input file. PP.EnterMainSourceFile(); @@ -254,19 +192,3 @@ void PrintPreprocessedAction::ExecuteAction() { DoPrintPreprocessedInput(CI.getPreprocessor(), OS, CI.getPreprocessorOutputOpts()); } - -void RewriteMacrosAction::ExecuteAction() { - CompilerInstance &CI = getCompilerInstance(); - llvm::raw_ostream *OS = CI.createDefaultOutputFile(true, getCurrentFile()); - if (!OS) return; - - RewriteMacrosInInput(CI.getPreprocessor(), OS); -} - -void RewriteTestAction::ExecuteAction() { - CompilerInstance &CI = getCompilerInstance(); - llvm::raw_ostream *OS = CI.createDefaultOutputFile(false, getCurrentFile()); - if (!OS) return; - - DoRewriteTest(CI.getPreprocessor(), OS); -} diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp index bd91638..9dfee24 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp @@ -11,8 +11,7 @@ #include "llvm/ADT/StringSwitch.h" using namespace clang; -FrontendOptions::InputKind -FrontendOptions::getInputKindForExtension(llvm::StringRef Extension) { +InputKind FrontendOptions::getInputKindForExtension(llvm::StringRef Extension) { return llvm::StringSwitch<InputKind>(Extension) .Case("ast", IK_AST) .Case("c", IK_C) @@ -27,5 +26,6 @@ FrontendOptions::getInputKindForExtension(llvm::StringRef Extension) { .Cases("C", "cc", "cp", IK_CXX) .Cases("cpp", "CPP", "c++", "cxx", "hpp", IK_CXX) .Case("cl", IK_OpenCL) + .Cases("ll", "bc", IK_LLVM_IR) .Default(IK_C); } diff --git a/contrib/llvm/tools/clang/lib/Frontend/GeneratePCH.cpp b/contrib/llvm/tools/clang/lib/Frontend/GeneratePCH.cpp index 6251bac..2f3df94 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/GeneratePCH.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/GeneratePCH.cpp @@ -32,20 +32,24 @@ namespace { llvm::raw_ostream *Out; Sema *SemaPtr; MemorizeStatCalls *StatCalls; // owned by the FileManager + std::vector<unsigned char> Buffer; + llvm::BitstreamWriter Stream; + PCHWriter Writer; public: - explicit PCHGenerator(const Preprocessor &PP, - const char *isysroot, - llvm::raw_ostream *Out); + PCHGenerator(const Preprocessor &PP, PCHReader *Chain, + const char *isysroot, llvm::raw_ostream *Out); virtual void InitializeSema(Sema &S) { SemaPtr = &S; } virtual void HandleTranslationUnit(ASTContext &Ctx); }; } PCHGenerator::PCHGenerator(const Preprocessor &PP, + PCHReader *Chain, const char *isysroot, llvm::raw_ostream *OS) - : PP(PP), isysroot(isysroot), Out(OS), SemaPtr(0), StatCalls(0) { + : PP(PP), isysroot(isysroot), Out(OS), SemaPtr(0), StatCalls(0), + Stream(Buffer), Writer(Stream, Chain) { // Install a stat() listener to keep track of all of the stat() // calls. @@ -57,11 +61,6 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) { if (PP.getDiagnostics().hasErrorOccurred()) return; - // Write the PCH contents into a buffer - std::vector<unsigned char> Buffer; - llvm::BitstreamWriter Stream(Buffer); - PCHWriter Writer(Stream); - // Emit the PCH file assert(SemaPtr && "No Sema?"); Writer.WritePCH(*SemaPtr, StatCalls, isysroot); @@ -71,10 +70,14 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) { // Make sure it hits disk now. Out->flush(); + + // Free up some memory, in case the process is kept alive. + Buffer.clear(); } ASTConsumer *clang::CreatePCHGenerator(const Preprocessor &PP, llvm::raw_ostream *OS, + PCHReader *Chain, const char *isysroot) { - return new PCHGenerator(PP, isysroot, OS); + return new PCHGenerator(PP, Chain, isysroot, OS); } diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp index 9490705..fcfee712 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp @@ -550,6 +550,8 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) { System, true, false, false); AddPath("/lib/gcc/i686-pc-cygwin/3.4.4/include/c++", System, true, false, false); + AddPath("/lib/gcc/i686-pc-cygwin/3.4.4/include/c++/i686-pc-cygwin", + System, true, false, false); break; case llvm::Triple::MinGW64: // Try gcc 4.4.0 @@ -564,10 +566,35 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) { AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.3.0"); break; case llvm::Triple::Darwin: - AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1", - "i686-apple-darwin10", "", "x86_64", triple); - AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.0.0", - "i686-apple-darwin8", "", "", triple); + switch (triple.getArch()) { + default: break; + + case llvm::Triple::ppc: + case llvm::Triple::ppc64: + AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1", + "powerpc-apple-darwin10", "", "ppc64", + triple); + AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.0.0", + "powerpc-apple-darwin10", "", "ppc64", + triple); + break; + + case llvm::Triple::x86: + case llvm::Triple::x86_64: + AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1", + "i686-apple-darwin10", "", "x86_64", triple); + AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.0.0", + "i686-apple-darwin8", "", "", triple); + break; + + case llvm::Triple::arm: + case llvm::Triple::thumb: + AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1", + "arm-apple-darwin10", "v7", "", triple); + AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1", + "arm-apple-darwin10", "v6", "", triple); + break; + } break; case llvm::Triple::DragonFly: AddPath("/usr/include/c++/4.1", System, true, false, false); @@ -591,6 +618,8 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) { "x86_64-linux-gnu", "32", "", triple); AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.3", "i486-linux-gnu", "", "64", triple); + AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.3", + "arm-linux-gnueabi", "", "", triple); // Ubuntu 8.04.4 LTS "Hardy Heron" -- gcc-4.2.4 // Ubuntu 8.04.[0-3] LTS "Hardy Heron" -- gcc-4.2.3 AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2", @@ -607,6 +636,10 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) { // Redhat based distros. //===------------------------------------------------------------------===// // Fedora 13 + AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4.4", + "x86_64-redhat-linux", "32", "", triple); + AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4.4", + "i686-redhat-linux","", "", triple); // Fedora 12 AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4.3", "x86_64-redhat-linux", "32", "", triple); @@ -694,6 +727,11 @@ AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple) { // FreeBSD 8.0 // FreeBSD 7.3 AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2", "", "", "", triple); + AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2/backward", "", "", "", triple); + break; + case llvm::Triple::Minix: + AddGnuCPlusPlusIncludePaths("/usr/gnu/include/c++/4.4.3", + "", "", "", triple); break; case llvm::Triple::Solaris: // Solaris - Fall though.. diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp index 2b35c8e..889b6e5 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp @@ -83,8 +83,8 @@ static void AddImplicitIncludeMacros(MacroBuilder &Builder, static void AddImplicitIncludePTH(MacroBuilder &Builder, Preprocessor &PP, llvm::StringRef ImplicitIncludePTH) { PTHManager *P = PP.getPTHManager(); - assert(P && "No PTHManager."); - const char *OriginalFile = P->getOriginalSourceFile(); + // Null check 'P' in the corner case where it couldn't be created. + const char *OriginalFile = P ? P->getOriginalSourceFile() : 0; if (!OriginalFile) { PP.getDiagnostics().Report(diag::err_fe_pth_file_has_no_source_header) @@ -195,9 +195,21 @@ static void DefineTypeWidth(llvm::StringRef MacroName, TargetInfo::IntType Ty, Builder.defineMacro(MacroName, llvm::Twine(TI.getTypeWidth(Ty))); } +static void DefineTypeSizeof(llvm::StringRef MacroName, unsigned BitWidth, + const TargetInfo &TI, MacroBuilder &Builder) { + Builder.defineMacro(MacroName, + llvm::Twine(BitWidth / TI.getCharWidth())); +} + static void DefineExactWidthIntType(TargetInfo::IntType Ty, const TargetInfo &TI, MacroBuilder &Builder) { int TypeWidth = TI.getTypeWidth(Ty); + + // Use the target specified int64 type, when appropriate, so that [u]int64_t + // ends up being defined in terms of the correct type. + if (TypeWidth == 64) + Ty = TI.getInt64Type(); + DefineType("__INT" + llvm::Twine(TypeWidth) + "_TYPE__", Ty, Builder); llvm::StringRef ConstSuffix(TargetInfo::getTypeConstantSuffix(Ty)); @@ -293,6 +305,8 @@ static void InitializePredefinedMacros(const TargetInfo &TI, if (LangOpts.Exceptions) Builder.defineMacro("__EXCEPTIONS"); + if (LangOpts.RTTI) + Builder.defineMacro("__GXX_RTTI"); if (LangOpts.SjLjExceptions) Builder.defineMacro("__USING_SJLJ_EXCEPTIONS__"); @@ -350,6 +364,23 @@ static void InitializePredefinedMacros(const TargetInfo &TI, DefineTypeSize("__WCHAR_MAX__", TI.getWCharType(), TI, Builder); DefineTypeSize("__INTMAX_MAX__", TI.getIntMaxType(), TI, Builder); + DefineTypeSizeof("__SIZEOF_DOUBLE__", TI.getDoubleWidth(), TI, Builder); + DefineTypeSizeof("__SIZEOF_FLOAT__", TI.getFloatWidth(), TI, Builder); + DefineTypeSizeof("__SIZEOF_INT__", TI.getIntWidth(), TI, Builder); + DefineTypeSizeof("__SIZEOF_LONG__", TI.getLongWidth(), TI, Builder); + DefineTypeSizeof("__SIZEOF_LONG_DOUBLE__",TI.getLongDoubleWidth(),TI,Builder); + DefineTypeSizeof("__SIZEOF_LONG_LONG__", TI.getLongLongWidth(), TI, Builder); + DefineTypeSizeof("__SIZEOF_POINTER__", TI.getPointerWidth(0), TI, Builder); + DefineTypeSizeof("__SIZEOF_SHORT__", TI.getShortWidth(), TI, Builder); + DefineTypeSizeof("__SIZEOF_PTRDIFF_T__", + TI.getTypeWidth(TI.getPtrDiffType(0)), TI, Builder); + DefineTypeSizeof("__SIZEOF_SIZE_T__", + TI.getTypeWidth(TI.getSizeType()), TI, Builder); + DefineTypeSizeof("__SIZEOF_WCHAR_T__", + TI.getTypeWidth(TI.getWCharType()), TI, Builder); + DefineTypeSizeof("__SIZEOF_WINT_T__", + TI.getTypeWidth(TI.getWIntType()), TI, Builder); + DefineType("__INTMAX_TYPE__", TI.getIntMaxType(), Builder); DefineType("__UINTMAX_TYPE__", TI.getUIntMaxType(), Builder); DefineTypeWidth("__INTMAX_WIDTH__", TI.getIntMaxType(), TI, Builder); @@ -364,6 +395,8 @@ static void InitializePredefinedMacros(const TargetInfo &TI, DefineType("__WINT_TYPE__", TI.getWIntType(), Builder); DefineTypeWidth("__WINT_WIDTH__", TI.getWIntType(), TI, Builder); DefineTypeWidth("__SIG_ATOMIC_WIDTH__", TI.getSigAtomicType(), TI, Builder); + DefineType("__CHAR16_TYPE__", TI.getChar16Type(), Builder); + DefineType("__CHAR32_TYPE__", TI.getChar32Type(), Builder); DefineFloatMacros(Builder, "FLT", &TI.getFloatFormat()); DefineFloatMacros(Builder, "DBL", &TI.getDoubleFormat()); diff --git a/contrib/llvm/tools/clang/lib/Frontend/Makefile b/contrib/llvm/tools/clang/lib/Frontend/Makefile index 9e1a864..3eb4bc9 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/Makefile +++ b/contrib/llvm/tools/clang/lib/Frontend/Makefile @@ -7,11 +7,9 @@ # ##===----------------------------------------------------------------------===## -LEVEL = ../../../.. +CLANG_LEVEL := ../.. LIBRARYNAME := clangFrontend BUILD_ARCHIVE = 1 -CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include - -include $(LEVEL)/Makefile.common +include $(CLANG_LEVEL)/Makefile diff --git a/contrib/llvm/tools/clang/lib/Frontend/PCHReader.cpp b/contrib/llvm/tools/clang/lib/Frontend/PCHReader.cpp index 88e9b9d..00aee49 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/PCHReader.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/PCHReader.cpp @@ -13,6 +13,7 @@ #include "clang/Frontend/PCHReader.h" #include "clang/Frontend/FrontendDiagnostic.h" +#include "clang/Frontend/PCHDeserializationListener.h" #include "clang/Frontend/Utils.h" #include "../Sema/Sema.h" // FIXME: move Sema headers elsewhere #include "clang/AST/ASTConsumer.h" @@ -93,7 +94,7 @@ PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts) { PARSE_LANGOPT_IMPORTANT(Blocks, diag::warn_pch_blocks); PARSE_LANGOPT_BENIGN(EmitAllDecls); PARSE_LANGOPT_IMPORTANT(MathErrno, diag::warn_pch_math_errno); - PARSE_LANGOPT_IMPORTANT(OverflowChecking, diag::warn_pch_overflow_checking); + PARSE_LANGOPT_BENIGN(getSignedOverflowBehavior()); PARSE_LANGOPT_IMPORTANT(HeinousExtensions, diag::warn_pch_heinous_extensions); // FIXME: Most of the options below are benign if the macro wasn't @@ -124,6 +125,7 @@ PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts) { PARSE_LANGOPT_IMPORTANT(OpenCL, diag::warn_pch_opencl); PARSE_LANGOPT_BENIGN(CatchUndefined); PARSE_LANGOPT_IMPORTANT(ElideConstructors, diag::warn_pch_elide_constructors); + PARSE_LANGOPT_BENIGN(SpellChecking); #undef PARSE_LANGOPT_IMPORTANT #undef PARSE_LANGOPT_BENIGN @@ -139,8 +141,86 @@ bool PCHValidator::ReadTargetTriple(llvm::StringRef Triple) { return true; } -bool PCHValidator::ReadPredefinesBuffer(llvm::StringRef PCHPredef, - FileID PCHBufferID, +struct EmptyStringRef { + bool operator ()(llvm::StringRef r) const { return r.empty(); } +}; +struct EmptyBlock { + bool operator ()(const PCHPredefinesBlock &r) const { return r.Data.empty(); } +}; + +static bool EqualConcatenations(llvm::SmallVector<llvm::StringRef, 2> L, + PCHPredefinesBlocks R) { + // First, sum up the lengths. + unsigned LL = 0, RL = 0; + for (unsigned I = 0, N = L.size(); I != N; ++I) { + LL += L[I].size(); + } + for (unsigned I = 0, N = R.size(); I != N; ++I) { + RL += R[I].Data.size(); + } + if (LL != RL) + return false; + if (LL == 0 && RL == 0) + return true; + + // Kick out empty parts, they confuse the algorithm below. + L.erase(std::remove_if(L.begin(), L.end(), EmptyStringRef()), L.end()); + R.erase(std::remove_if(R.begin(), R.end(), EmptyBlock()), R.end()); + + // Do it the hard way. At this point, both vectors must be non-empty. + llvm::StringRef LR = L[0], RR = R[0].Data; + unsigned LI = 0, RI = 0, LN = L.size(), RN = R.size(); + for (;;) { + // Compare the current pieces. + if (LR.size() == RR.size()) { + // If they're the same length, it's pretty easy. + if (LR != RR) + return false; + // Both pieces are done, advance. + ++LI; + ++RI; + // If either string is done, they're both done, since they're the same + // length. + if (LI == LN) { + assert(RI == RN && "Strings not the same length after all?"); + return true; + } + LR = L[LI]; + RR = R[RI].Data; + } else if (LR.size() < RR.size()) { + // Right piece is longer. + if (!RR.startswith(LR)) + return false; + ++LI; + assert(LI != LN && "Strings not the same length after all?"); + RR = RR.substr(LR.size()); + LR = L[LI]; + } else { + // Left piece is longer. + if (!LR.startswith(RR)) + return false; + ++RI; + assert(RI != RN && "Strings not the same length after all?"); + LR = LR.substr(RR.size()); + RR = R[RI].Data; + } + } +} + +static std::pair<FileID, llvm::StringRef::size_type> +FindMacro(const PCHPredefinesBlocks &Buffers, llvm::StringRef MacroDef) { + std::pair<FileID, llvm::StringRef::size_type> Res; + for (unsigned I = 0, N = Buffers.size(); I != N; ++I) { + Res.second = Buffers[I].Data.find(MacroDef); + if (Res.second != llvm::StringRef::npos) { + Res.first = Buffers[I].BufferID; + break; + } + } + return Res; +} + +bool PCHValidator::ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers, llvm::StringRef OriginalFileName, std::string &SuggestedPredefines) { // We are in the context of an implicit include, so the predefines buffer will @@ -159,9 +239,15 @@ bool PCHValidator::ReadPredefinesBuffer(llvm::StringRef PCHPredef, return true; } - // If the predefines is equal to the joined left and right halves, we're done! - if (Left.size() + Right.size() == PCHPredef.size() && - PCHPredef.startswith(Left) && PCHPredef.endswith(Right)) + // If the concatenation of all the PCH buffers is equal to the adjusted + // command line, we're done. + // We build a SmallVector of the command line here, because we'll eventually + // need to support an arbitrary amount of pieces anyway (when we have chained + // PCH reading). + llvm::SmallVector<llvm::StringRef, 2> CommandLine; + CommandLine.push_back(Left); + CommandLine.push_back(Right); + if (EqualConcatenations(CommandLine, Buffers)) return false; SourceManager &SourceMgr = PP.getSourceManager(); @@ -169,7 +255,8 @@ bool PCHValidator::ReadPredefinesBuffer(llvm::StringRef PCHPredef, // The predefines buffers are different. Determine what the differences are, // and whether they require us to reject the PCH file. llvm::SmallVector<llvm::StringRef, 8> PCHLines; - PCHPredef.split(PCHLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false); + for (unsigned I = 0, N = Buffers.size(); I != N; ++I) + Buffers[I].Data.split(PCHLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false); llvm::SmallVector<llvm::StringRef, 8> CmdLineLines; Left.split(CmdLineLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false); @@ -234,10 +321,12 @@ bool PCHValidator::ReadPredefinesBuffer(llvm::StringRef PCHPredef, << MacroName; // Show the definition of this macro within the PCH file. - llvm::StringRef::size_type Offset = PCHPredef.find(Missing); - assert(Offset != llvm::StringRef::npos && "Unable to find macro!"); - SourceLocation PCHMissingLoc = SourceMgr.getLocForStartOfFile(PCHBufferID) - .getFileLocWithOffset(Offset); + std::pair<FileID, llvm::StringRef::size_type> MacroLoc = + FindMacro(Buffers, Missing); + assert(MacroLoc.second!=llvm::StringRef::npos && "Unable to find macro!"); + SourceLocation PCHMissingLoc = + SourceMgr.getLocForStartOfFile(MacroLoc.first) + .getFileLocWithOffset(MacroLoc.second); Reader.Diag(PCHMissingLoc, diag::note_pch_macro_defined_as) << MacroName; ConflictingDefines = true; @@ -255,10 +344,12 @@ bool PCHValidator::ReadPredefinesBuffer(llvm::StringRef PCHPredef, } // Show the definition of this macro within the PCH file. - llvm::StringRef::size_type Offset = PCHPredef.find(Missing); - assert(Offset != llvm::StringRef::npos && "Unable to find macro!"); - SourceLocation PCHMissingLoc = SourceMgr.getLocForStartOfFile(PCHBufferID) - .getFileLocWithOffset(Offset); + std::pair<FileID, llvm::StringRef::size_type> MacroLoc = + FindMacro(Buffers, Missing); + assert(MacroLoc.second!=llvm::StringRef::npos && "Unable to find macro!"); + SourceLocation PCHMissingLoc = + SourceMgr.getLocForStartOfFile(MacroLoc.first) + .getFileLocWithOffset(MacroLoc.second); Reader.Diag(PCHMissingLoc, diag::note_using_macro_def_from_pch); } @@ -323,10 +414,10 @@ void PCHValidator::ReadCounter(unsigned Value) { PCHReader::PCHReader(Preprocessor &PP, ASTContext *Context, const char *isysroot) - : Listener(new PCHValidator(PP, *this)), SourceMgr(PP.getSourceManager()), - FileMgr(PP.getFileManager()), Diags(PP.getDiagnostics()), - SemaObj(0), PP(&PP), Context(Context), StatCache(0), Consumer(0), - IdentifierTableData(0), IdentifierLookupTable(0), + : Listener(new PCHValidator(PP, *this)), DeserializationListener(0), + SourceMgr(PP.getSourceManager()), FileMgr(PP.getFileManager()), + Diags(PP.getDiagnostics()), SemaObj(0), PP(&PP), Context(Context), + StatCache(0), Consumer(0), IdentifierTableData(0), IdentifierLookupTable(0), IdentifierOffsets(0), MethodPoolLookupTable(0), MethodPoolLookupTableData(0), TotalSelectorsInMethodPool(0), SelectorOffsets(0), @@ -342,8 +433,8 @@ PCHReader::PCHReader(Preprocessor &PP, ASTContext *Context, PCHReader::PCHReader(SourceManager &SourceMgr, FileManager &FileMgr, Diagnostic &Diags, const char *isysroot) - : SourceMgr(SourceMgr), FileMgr(FileMgr), Diags(Diags), - SemaObj(0), PP(0), Context(0), StatCache(0), Consumer(0), + : DeserializationListener(0), SourceMgr(SourceMgr), FileMgr(FileMgr), + Diags(Diags), SemaObj(0), PP(0), Context(0), StatCache(0), Consumer(0), IdentifierTableData(0), IdentifierLookupTable(0), IdentifierOffsets(0), MethodPoolLookupTable(0), MethodPoolLookupTableData(0), @@ -360,14 +451,6 @@ PCHReader::PCHReader(SourceManager &SourceMgr, FileManager &FileMgr, PCHReader::~PCHReader() {} -Expr *PCHReader::ReadDeclExpr() { - return dyn_cast_or_null<Expr>(ReadStmt(DeclsCursor)); -} - -Expr *PCHReader::ReadTypeExpr() { - return dyn_cast_or_null<Expr>(ReadStmt(DeclsCursor)); -} - namespace { class PCHMethodPoolLookupTrait { @@ -616,27 +699,18 @@ void PCHReader::Error(const char *Msg) { Diag(diag::err_fe_pch_malformed) << Msg; } -/// \brief Check the contents of the predefines buffer against the -/// contents of the predefines buffer used to build the PCH file. -/// -/// The contents of the two predefines buffers should be the same. If -/// not, then some command-line option changed the preprocessor state -/// and we must reject the PCH file. -/// -/// \param PCHPredef The start of the predefines buffer in the PCH -/// file. -/// -/// \param PCHPredefLen The length of the predefines buffer in the PCH -/// file. +/// \brief Check the contents of the concatenation of all predefines buffers in +/// the PCH chain against the contents of the predefines buffer of the current +/// compiler invocation. /// -/// \param PCHBufferID The FileID for the PCH predefines buffer. +/// The contents should be the same. If not, then some command-line option +/// changed the preprocessor state and we must probably reject the PCH file. /// /// \returns true if there was a mismatch (in which case the PCH file /// should be ignored), or false otherwise. -bool PCHReader::CheckPredefinesBuffer(llvm::StringRef PCHPredef, - FileID PCHBufferID) { +bool PCHReader::CheckPredefinesBuffers() { if (Listener) - return Listener->ReadPredefinesBuffer(PCHPredef, PCHBufferID, + return Listener->ReadPredefinesBuffer(PCHPredefinesBuffers, ActualOriginalFileName, SuggestedPredefines); return false; @@ -667,16 +741,17 @@ bool PCHReader::ParseLineTable(llvm::SmallVectorImpl<uint64_t> &Record) { // Parse the line entries std::vector<LineEntry> Entries; while (Idx < Record.size()) { - int FID = FileIDs[Record[Idx++]]; + int FID = Record[Idx++]; // Extract the line entries unsigned NumEntries = Record[Idx++]; + assert(NumEntries && "Numentries is 00000"); Entries.clear(); Entries.reserve(NumEntries); for (unsigned I = 0; I != NumEntries; ++I) { unsigned FileOffset = Record[Idx++]; unsigned LineNo = Record[Idx++]; - int FilenameID = Record[Idx++]; + int FilenameID = FileIDs[Record[Idx++]]; SrcMgr::CharacteristicKind FileKind = (SrcMgr::CharacteristicKind)Record[Idx++]; unsigned IncludeOffset = Record[Idx++]; @@ -964,9 +1039,11 @@ PCHReader::PCHReadResult PCHReader::ReadSLocEntryRecord(unsigned ID) { FileID BufferID = SourceMgr.createFileIDForMemBuffer(Buffer, ID, Offset); if (strcmp(Name, "<built-in>") == 0) { - PCHPredefinesBufferID = BufferID; - PCHPredefines = BlobStart; - PCHPredefinesLen = BlobLen - 1; + PCHPredefinesBlock Block = { + BufferID, + llvm::StringRef(BlobStart, BlobLen - 1) + }; + PCHPredefinesBuffers.push_back(Block); } break; @@ -1512,6 +1589,22 @@ PCHReader::ReadPCHBlock() { ExtVectorDecls.swap(Record); break; + case pch::VTABLE_USES: + if (!VTableUses.empty()) { + Error("duplicate VTABLE_USES record in PCH file"); + return Failure; + } + VTableUses.swap(Record); + break; + + case pch::DYNAMIC_CLASSES: + if (!DynamicClasses.empty()) { + Error("duplicate DYNAMIC_CLASSES record in PCH file"); + return Failure; + } + DynamicClasses.swap(Record); + break; + case pch::ORIGINAL_FILE_NAME: ActualOriginalFileName.assign(BlobStart, BlobLen); OriginalFileName = ActualOriginalFileName; @@ -1626,8 +1719,7 @@ PCHReader::PCHReadResult PCHReader::ReadPCH(const std::string &FileName) { } // Check the predefines buffer. - if (CheckPredefinesBuffer(llvm::StringRef(PCHPredefines, PCHPredefinesLen), - PCHPredefinesBufferID)) + if (CheckPredefinesBuffers()) return IgnorePCH; if (PP) { @@ -1693,7 +1785,7 @@ void PCHReader::InitializeContext(ASTContext &Ctx) { PP->setExternalSource(this); // Load the translation unit declaration - ReadDeclRecord(DeclOffsets[0], 0); + GetTranslationUnitDecl(); // Load the special types. Context->setBuiltinVaListType( @@ -1776,6 +1868,9 @@ void PCHReader::InitializeContext(ASTContext &Ctx) { Context->ObjCSelRedefinitionType = GetType(ObjCSelRedef); if (unsigned String = SpecialTypes[pch::SPECIAL_TYPE_NS_CONSTANT_STRING]) Context->setNSConstantStringType(GetType(String)); + + if (SpecialTypes[pch::SPECIAL_TYPE_INT128_INSTALLED]) + Context->setInt128Installed(); } /// \brief Retrieve the name of the original source file name @@ -1915,7 +2010,8 @@ bool PCHReader::ParseLanguageOptions( PARSE_LANGOPT(Blocks); PARSE_LANGOPT(EmitAllDecls); PARSE_LANGOPT(MathErrno); - PARSE_LANGOPT(OverflowChecking); + LangOpts.setSignedOverflowBehavior((LangOptions::SignedOverflowBehaviorTy) + Record[Idx++]); PARSE_LANGOPT(HeinousExtensions); PARSE_LANGOPT(Optimize); PARSE_LANGOPT(OptimizeSize); @@ -1926,13 +2022,10 @@ bool PCHReader::ParseLanguageOptions( PARSE_LANGOPT(AccessControl); PARSE_LANGOPT(CharIsSigned); PARSE_LANGOPT(ShortWChar); - LangOpts.setGCMode((LangOptions::GCMode)Record[Idx]); - ++Idx; - LangOpts.setVisibilityMode((LangOptions::VisibilityMode)Record[Idx]); - ++Idx; + LangOpts.setGCMode((LangOptions::GCMode)Record[Idx++]); + LangOpts.setVisibilityMode((LangOptions::VisibilityMode)Record[Idx++]); LangOpts.setStackProtectorMode((LangOptions::StackProtectorMode) - Record[Idx]); - ++Idx; + Record[Idx++]); PARSE_LANGOPT(InstantiationDepth); PARSE_LANGOPT(OpenCL); PARSE_LANGOPT(CatchUndefined); @@ -1959,6 +2052,8 @@ QualType PCHReader::ReadTypeRecord(uint64_t Offset) { // after reading this type. SavedStreamPosition SavedPosition(DeclsCursor); + ReadingKindTracker ReadingKind(Read_Type, *this); + // Note that we are loading a type record. LoadingTypeOrDecl Loading(*this); @@ -2022,7 +2117,7 @@ QualType PCHReader::ReadTypeRecord(uint64_t Offset) { } case pch::TYPE_MEMBER_POINTER: { - if (Record.size() != 1) { + if (Record.size() != 2) { Error("Incorrect encoding of member pointer type"); return QualType(); } @@ -2054,26 +2149,26 @@ QualType PCHReader::ReadTypeRecord(uint64_t Offset) { unsigned IndexTypeQuals = Record[2]; SourceLocation LBLoc = SourceLocation::getFromRawEncoding(Record[3]); SourceLocation RBLoc = SourceLocation::getFromRawEncoding(Record[4]); - return Context->getVariableArrayType(ElementType, ReadTypeExpr(), + return Context->getVariableArrayType(ElementType, ReadExpr(), ASM, IndexTypeQuals, SourceRange(LBLoc, RBLoc)); } case pch::TYPE_VECTOR: { - if (Record.size() != 4) { + if (Record.size() != 3) { Error("incorrect encoding of vector type in PCH file"); return QualType(); } QualType ElementType = GetType(Record[0]); unsigned NumElements = Record[1]; - bool AltiVec = Record[2]; - bool Pixel = Record[3]; - return Context->getVectorType(ElementType, NumElements, AltiVec, Pixel); + unsigned AltiVecSpec = Record[2]; + return Context->getVectorType(ElementType, NumElements, + (VectorType::AltiVecSpecific)AltiVecSpec); } case pch::TYPE_EXT_VECTOR: { - if (Record.size() != 4) { + if (Record.size() != 3) { Error("incorrect encoding of extended vector type in PCH file"); return QualType(); } @@ -2123,15 +2218,18 @@ QualType PCHReader::ReadTypeRecord(uint64_t Offset) { return Context->getTypeDeclType( cast<UnresolvedUsingTypenameDecl>(GetDecl(Record[0]))); - case pch::TYPE_TYPEDEF: - if (Record.size() != 1) { + case pch::TYPE_TYPEDEF: { + if (Record.size() != 2) { Error("incorrect encoding of typedef type"); return QualType(); } - return Context->getTypeDeclType(cast<TypedefDecl>(GetDecl(Record[0]))); + TypedefDecl *Decl = cast<TypedefDecl>(GetDecl(Record[0])); + QualType Canonical = GetType(Record[1]); + return Context->getTypedefType(Decl, Canonical); + } case pch::TYPE_TYPEOF_EXPR: - return Context->getTypeOfExprType(ReadTypeExpr()); + return Context->getTypeOfExprType(ReadExpr()); case pch::TYPE_TYPEOF: { if (Record.size() != 1) { @@ -2143,32 +2241,36 @@ QualType PCHReader::ReadTypeRecord(uint64_t Offset) { } case pch::TYPE_DECLTYPE: - return Context->getDecltypeType(ReadTypeExpr()); + return Context->getDecltypeType(ReadExpr()); - case pch::TYPE_RECORD: - if (Record.size() != 1) { + case pch::TYPE_RECORD: { + if (Record.size() != 2) { Error("incorrect encoding of record type"); return QualType(); } - return Context->getTypeDeclType(cast<RecordDecl>(GetDecl(Record[0]))); + bool IsDependent = Record[0]; + QualType T = Context->getRecordType(cast<RecordDecl>(GetDecl(Record[1]))); + T->Dependent = IsDependent; + return T; + } - case pch::TYPE_ENUM: - if (Record.size() != 1) { + case pch::TYPE_ENUM: { + if (Record.size() != 2) { Error("incorrect encoding of enum type"); return QualType(); } - return Context->getTypeDeclType(cast<EnumDecl>(GetDecl(Record[0]))); + bool IsDependent = Record[0]; + QualType T = Context->getEnumType(cast<EnumDecl>(GetDecl(Record[1]))); + T->Dependent = IsDependent; + return T; + } case pch::TYPE_ELABORATED: { - if (Record.size() != 2) { - Error("incorrect encoding of elaborated type"); - return QualType(); - } - unsigned Tag = Record[1]; - // FIXME: Deserialize the qualifier (C++ only) - return Context->getElaboratedType((ElaboratedTypeKeyword) Tag, - /* NNS */ 0, - GetType(Record[0])); + unsigned Idx = 0; + ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++]; + NestedNameSpecifier *NNS = ReadNestedNameSpecifier(Record, Idx); + QualType NamedType = GetType(Record[Idx++]); + return Context->getElaboratedType(Keyword, NNS, NamedType); } case pch::TYPE_OBJC_INTERFACE: { @@ -2205,7 +2307,77 @@ QualType PCHReader::ReadTypeRecord(uint64_t Offset) { case pch::TYPE_INJECTED_CLASS_NAME: { CXXRecordDecl *D = cast<CXXRecordDecl>(GetDecl(Record[0])); QualType TST = GetType(Record[1]); // probably derivable - return Context->getInjectedClassNameType(D, TST); + // FIXME: ASTContext::getInjectedClassNameType is not currently suitable + // for PCH reading, too much interdependencies. + return + QualType(new (*Context, TypeAlignment) InjectedClassNameType(D, TST), 0); + } + + case pch::TYPE_TEMPLATE_TYPE_PARM: { + unsigned Idx = 0; + unsigned Depth = Record[Idx++]; + unsigned Index = Record[Idx++]; + bool Pack = Record[Idx++]; + IdentifierInfo *Name = GetIdentifierInfo(Record, Idx); + return Context->getTemplateTypeParmType(Depth, Index, Pack, Name); + } + + case pch::TYPE_DEPENDENT_NAME: { + unsigned Idx = 0; + ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++]; + NestedNameSpecifier *NNS = ReadNestedNameSpecifier(Record, Idx); + const IdentifierInfo *Name = this->GetIdentifierInfo(Record, Idx); + QualType Canon = GetType(Record[Idx++]); + return Context->getDependentNameType(Keyword, NNS, Name, Canon); + } + + case pch::TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION: { + unsigned Idx = 0; + ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++]; + NestedNameSpecifier *NNS = ReadNestedNameSpecifier(Record, Idx); + const IdentifierInfo *Name = this->GetIdentifierInfo(Record, Idx); + unsigned NumArgs = Record[Idx++]; + llvm::SmallVector<TemplateArgument, 8> Args; + Args.reserve(NumArgs); + while (NumArgs--) + Args.push_back(ReadTemplateArgument(Record, Idx)); + return Context->getDependentTemplateSpecializationType(Keyword, NNS, Name, + Args.size(), Args.data()); + } + + case pch::TYPE_DEPENDENT_SIZED_ARRAY: { + unsigned Idx = 0; + + // ArrayType + QualType ElementType = GetType(Record[Idx++]); + ArrayType::ArraySizeModifier ASM + = (ArrayType::ArraySizeModifier)Record[Idx++]; + unsigned IndexTypeQuals = Record[Idx++]; + + // DependentSizedArrayType + Expr *NumElts = ReadExpr(); + SourceRange Brackets = ReadSourceRange(Record, Idx); + + return Context->getDependentSizedArrayType(ElementType, NumElts, ASM, + IndexTypeQuals, Brackets); + } + + case pch::TYPE_TEMPLATE_SPECIALIZATION: { + unsigned Idx = 0; + bool IsDependent = Record[Idx++]; + TemplateName Name = ReadTemplateName(Record, Idx); + llvm::SmallVector<TemplateArgument, 8> Args; + ReadTemplateArgumentList(Args, Record, Idx); + QualType Canon = GetType(Record[Idx++]); + QualType T; + if (Canon.isNull()) + T = Context->getCanonicalTemplateSpecializationType(Name, Args.data(), + Args.size()); + else + T = Context->getTemplateSpecializationType(Name, Args.data(), + Args.size(), Canon); + T->Dependent = IsDependent; + return T; } } // Suppress a GCC warning @@ -2272,7 +2444,7 @@ void TypeLocReader::VisitArrayTypeLoc(ArrayTypeLoc TL) { TL.setLBracketLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); TL.setRBracketLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); if (Record[Idx++]) - TL.setSizeExpr(Reader.ReadDeclExpr()); + TL.setSizeExpr(Reader.ReadExpr()); else TL.setSizeExpr(0); } @@ -2367,6 +2539,18 @@ void TypeLocReader::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) { TL.setQualifierRange(Reader.ReadSourceRange(Record, Idx)); TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); } +void TypeLocReader::VisitDependentTemplateSpecializationTypeLoc( + DependentTemplateSpecializationTypeLoc TL) { + TL.setKeywordLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); + TL.setQualifierRange(Reader.ReadSourceRange(Record, Idx)); + TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); + TL.setLAngleLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); + TL.setRAngleLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); + for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) + TL.setArgLocInfo(I, + Reader.GetTemplateArgumentLocInfo(TL.getTypePtr()->getArg(I).getKind(), + Record, Idx)); +} void TypeLocReader::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) { TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); } @@ -2443,8 +2627,12 @@ QualType PCHReader::GetType(pch::TypeID ID) { Index -= pch::NUM_PREDEF_TYPE_IDS; //assert(Index < TypesLoaded.size() && "Type index out-of-range"); - if (TypesLoaded[Index].isNull()) + if (TypesLoaded[Index].isNull()) { TypesLoaded[Index] = ReadTypeRecord(TypeOffsets[Index]); + TypesLoaded[Index]->setFromPCH(); + if (DeserializationListener) + DeserializationListener->TypeRead(ID, TypesLoaded[Index]); + } return TypesLoaded[Index].withFastQualifiers(FastQuals); } @@ -2455,16 +2643,13 @@ PCHReader::GetTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind, unsigned &Index) { switch (Kind) { case TemplateArgument::Expression: - return ReadDeclExpr(); + return ReadExpr(); case TemplateArgument::Type: return GetTypeSourceInfo(Record, Index); case TemplateArgument::Template: { - SourceLocation - QualStart = SourceLocation::getFromRawEncoding(Record[Index++]), - QualEnd = SourceLocation::getFromRawEncoding(Record[Index++]), - TemplateNameLoc = SourceLocation::getFromRawEncoding(Record[Index++]); - return TemplateArgumentLocInfo(SourceRange(QualStart, QualEnd), - TemplateNameLoc); + SourceRange QualifierRange = ReadSourceRange(Record, Index); + SourceLocation TemplateNameLoc = ReadSourceLocation(Record, Index); + return TemplateArgumentLocInfo(QualifierRange, TemplateNameLoc); } case TemplateArgument::Null: case TemplateArgument::Integral: @@ -2476,6 +2661,32 @@ PCHReader::GetTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind, return TemplateArgumentLocInfo(); } +TemplateArgumentLoc +PCHReader::ReadTemplateArgumentLoc(const RecordData &Record, unsigned &Index) { + TemplateArgument Arg = ReadTemplateArgument(Record, Index); + + if (Arg.getKind() == TemplateArgument::Expression) { + if (Record[Index++]) // bool InfoHasSameExpr. + return TemplateArgumentLoc(Arg, TemplateArgumentLocInfo(Arg.getAsExpr())); + } + return TemplateArgumentLoc(Arg, GetTemplateArgumentLocInfo(Arg.getKind(), + Record, Index)); +} + +Decl *PCHReader::GetExternalDecl(uint32_t ID) { + return GetDecl(ID); +} + +TranslationUnitDecl *PCHReader::GetTranslationUnitDecl() { + if (!DeclsLoaded[0]) { + ReadDeclRecord(DeclOffsets[0], 0); + if (DeserializationListener) + DeserializationListener->DeclRead(0, DeclsLoaded[0]); + } + + return cast<TranslationUnitDecl>(DeclsLoaded[0]); +} + Decl *PCHReader::GetDecl(pch::DeclID ID) { if (ID == 0) return 0; @@ -2486,8 +2697,11 @@ Decl *PCHReader::GetDecl(pch::DeclID ID) { } unsigned Index = ID - 1; - if (!DeclsLoaded[Index]) + if (!DeclsLoaded[Index]) { ReadDeclRecord(DeclOffsets[Index], Index); + if (DeserializationListener) + DeserializationListener->DeclRead(ID, DeclsLoaded[Index]); + } return DeclsLoaded[Index]; } @@ -2497,15 +2711,15 @@ Decl *PCHReader::GetDecl(pch::DeclID ID) { /// This operation will read a new statement from the external /// source each time it is called, and is meant to be used via a /// LazyOffsetPtr (which is used by Decls for the body of functions, etc). -Stmt *PCHReader::GetDeclStmt(uint64_t Offset) { +Stmt *PCHReader::GetExternalDeclStmt(uint64_t Offset) { // Since we know tha this statement is part of a decl, make sure to use the // decl cursor to read it. DeclsCursor.JumpToBit(Offset); - return ReadStmt(DeclsCursor); + return ReadStmtFromStream(DeclsCursor); } -bool PCHReader::ReadDeclsLexicallyInContext(DeclContext *DC, - llvm::SmallVectorImpl<pch::DeclID> &Decls) { +bool PCHReader::FindExternalLexicalDecls(const DeclContext *DC, + llvm::SmallVectorImpl<Decl*> &Decls) { assert(DC->hasExternalLexicalStorage() && "DeclContext has no lexical decls in storage"); @@ -2531,20 +2745,22 @@ bool PCHReader::ReadDeclsLexicallyInContext(DeclContext *DC, } // Load all of the declaration IDs - Decls.clear(); - Decls.insert(Decls.end(), Record.begin(), Record.end()); + for (RecordData::iterator I = Record.begin(), E = Record.end(); I != E; ++I) + Decls.push_back(GetDecl(*I)); ++NumLexicalDeclContextsRead; return false; } -bool PCHReader::ReadDeclsVisibleInContext(DeclContext *DC, - llvm::SmallVectorImpl<VisibleDeclaration> &Decls) { +DeclContext::lookup_result +PCHReader::FindExternalVisibleDeclsByName(const DeclContext *DC, + DeclarationName Name) { assert(DC->hasExternalVisibleStorage() && "DeclContext has no visible decls in storage"); uint64_t Offset = DeclContextOffsets[DC].second; if (Offset == 0) { Error("DeclContext has no visible decls in storage"); - return true; + return DeclContext::lookup_result(DeclContext::lookup_iterator(), + DeclContext::lookup_iterator()); } // Keep track of where we are in the stream, then jump back there @@ -2559,13 +2775,16 @@ bool PCHReader::ReadDeclsVisibleInContext(DeclContext *DC, unsigned RecCode = DeclsCursor.ReadRecord(Code, Record); if (RecCode != pch::DECL_CONTEXT_VISIBLE) { Error("Expected visible block"); - return true; + return DeclContext::lookup_result(DeclContext::lookup_iterator(), + DeclContext::lookup_iterator()); } - if (Record.size() == 0) - return false; - - Decls.clear(); + llvm::SmallVector<VisibleDeclaration, 64> Decls; + if (Record.empty()) { + SetExternalVisibleDecls(DC, Decls); + return DeclContext::lookup_result(DeclContext::lookup_iterator(), + DeclContext::lookup_iterator()); + } unsigned Idx = 0; while (Idx < Record.size()) { @@ -2580,7 +2799,18 @@ bool PCHReader::ReadDeclsVisibleInContext(DeclContext *DC, } ++NumVisibleDeclContextsRead; - return false; + + SetExternalVisibleDecls(DC, Decls); + return const_cast<DeclContext*>(DC)->lookup(Name); +} + +void PCHReader::PassInterestingDeclsToConsumer() { + assert(Consumer); + while (!InterestingDecls.empty()) { + DeclGroupRef DG(InterestingDecls.front()); + InterestingDecls.pop_front(); + Consumer->HandleTopLevelDecl(DG); + } } void PCHReader::StartTranslationUnit(ASTConsumer *Consumer) { @@ -2590,15 +2820,12 @@ void PCHReader::StartTranslationUnit(ASTConsumer *Consumer) { return; for (unsigned I = 0, N = ExternalDefinitions.size(); I != N; ++I) { - // Force deserialization of this decl, which will cause it to be passed to - // the consumer (or queued). + // Force deserialization of this decl, which will cause it to be queued for + // passing to the consumer. GetDecl(ExternalDefinitions[I]); } - for (unsigned I = 0, N = InterestingDecls.size(); I != N; ++I) { - DeclGroupRef DG(InterestingDecls[I]); - Consumer->HandleTopLevelDecl(DG); - } + PassInterestingDeclsToConsumer(); } void PCHReader::PrintStats() { @@ -2708,6 +2935,26 @@ void PCHReader::InitializeSema(Sema &S) { for (unsigned I = 0, N = ExtVectorDecls.size(); I != N; ++I) SemaObj->ExtVectorDecls.push_back( cast<TypedefDecl>(GetDecl(ExtVectorDecls[I]))); + + // FIXME: Do VTable uses and dynamic classes deserialize too much ? + // Can we cut them down before writing them ? + + // If there were any VTable uses, deserialize the information and add it + // to Sema's vector and map of VTable uses. + unsigned Idx = 0; + for (unsigned I = 0, N = VTableUses[Idx++]; I != N; ++I) { + CXXRecordDecl *Class = cast<CXXRecordDecl>(GetDecl(VTableUses[Idx++])); + SourceLocation Loc = ReadSourceLocation(VTableUses, Idx); + bool DefinitionRequired = VTableUses[Idx++]; + SemaObj->VTableUses.push_back(std::make_pair(Class, Loc)); + SemaObj->VTablesUsed[Class] = DefinitionRequired; + } + + // If there were any dynamic classes declarations, deserialize them + // and add them to Sema's vector of such declarations. + for (unsigned I = 0, N = DynamicClasses.size(); I != N; ++I) + SemaObj->DynamicClasses.push_back( + cast<CXXRecordDecl>(GetDecl(DynamicClasses[I]))); } IdentifierInfo* PCHReader::get(const char *NameStart, const char *NameEnd) { @@ -2853,11 +3100,11 @@ Selector PCHReader::DecodeSelector(unsigned ID) { return SelectorsLoaded[Index]; } -Selector PCHReader::GetSelector(uint32_t ID) { +Selector PCHReader::GetExternalSelector(uint32_t ID) { return DecodeSelector(ID); } -uint32_t PCHReader::GetNumKnownSelectors() { +uint32_t PCHReader::GetNumExternalSelectors() { return TotalNumSelectors + 1; } @@ -2901,6 +3148,126 @@ PCHReader::ReadDeclarationName(const RecordData &Record, unsigned &Idx) { return DeclarationName(); } +TemplateName +PCHReader::ReadTemplateName(const RecordData &Record, unsigned &Idx) { + TemplateName::NameKind Kind = (TemplateName::NameKind)Record[Idx++]; + switch (Kind) { + case TemplateName::Template: + return TemplateName(cast_or_null<TemplateDecl>(GetDecl(Record[Idx++]))); + + case TemplateName::OverloadedTemplate: { + unsigned size = Record[Idx++]; + UnresolvedSet<8> Decls; + while (size--) + Decls.addDecl(cast<NamedDecl>(GetDecl(Record[Idx++]))); + + return Context->getOverloadedTemplateName(Decls.begin(), Decls.end()); + } + + case TemplateName::QualifiedTemplate: { + NestedNameSpecifier *NNS = ReadNestedNameSpecifier(Record, Idx); + bool hasTemplKeyword = Record[Idx++]; + TemplateDecl *Template = cast<TemplateDecl>(GetDecl(Record[Idx++])); + return Context->getQualifiedTemplateName(NNS, hasTemplKeyword, Template); + } + + case TemplateName::DependentTemplate: { + NestedNameSpecifier *NNS = ReadNestedNameSpecifier(Record, Idx); + if (Record[Idx++]) // isIdentifier + return Context->getDependentTemplateName(NNS, + GetIdentifierInfo(Record, Idx)); + return Context->getDependentTemplateName(NNS, + (OverloadedOperatorKind)Record[Idx++]); + } + } + + assert(0 && "Unhandled template name kind!"); + return TemplateName(); +} + +TemplateArgument +PCHReader::ReadTemplateArgument(const RecordData &Record, unsigned &Idx) { + switch ((TemplateArgument::ArgKind)Record[Idx++]) { + case TemplateArgument::Null: + return TemplateArgument(); + case TemplateArgument::Type: + return TemplateArgument(GetType(Record[Idx++])); + case TemplateArgument::Declaration: + return TemplateArgument(GetDecl(Record[Idx++])); + case TemplateArgument::Integral: { + llvm::APSInt Value = ReadAPSInt(Record, Idx); + QualType T = GetType(Record[Idx++]); + return TemplateArgument(Value, T); + } + case TemplateArgument::Template: + return TemplateArgument(ReadTemplateName(Record, Idx)); + case TemplateArgument::Expression: + return TemplateArgument(ReadExpr()); + case TemplateArgument::Pack: { + unsigned NumArgs = Record[Idx++]; + llvm::SmallVector<TemplateArgument, 8> Args; + Args.reserve(NumArgs); + while (NumArgs--) + Args.push_back(ReadTemplateArgument(Record, Idx)); + TemplateArgument TemplArg; + TemplArg.setArgumentPack(Args.data(), Args.size(), /*CopyArgs=*/true); + return TemplArg; + } + } + + assert(0 && "Unhandled template argument kind!"); + return TemplateArgument(); +} + +TemplateParameterList * +PCHReader::ReadTemplateParameterList(const RecordData &Record, unsigned &Idx) { + SourceLocation TemplateLoc = ReadSourceLocation(Record, Idx); + SourceLocation LAngleLoc = ReadSourceLocation(Record, Idx); + SourceLocation RAngleLoc = ReadSourceLocation(Record, Idx); + + unsigned NumParams = Record[Idx++]; + llvm::SmallVector<NamedDecl *, 16> Params; + Params.reserve(NumParams); + while (NumParams--) + Params.push_back(cast<NamedDecl>(GetDecl(Record[Idx++]))); + + TemplateParameterList* TemplateParams = + TemplateParameterList::Create(*Context, TemplateLoc, LAngleLoc, + Params.data(), Params.size(), RAngleLoc); + return TemplateParams; +} + +void +PCHReader:: +ReadTemplateArgumentList(llvm::SmallVector<TemplateArgument, 8> &TemplArgs, + const RecordData &Record, unsigned &Idx) { + unsigned NumTemplateArgs = Record[Idx++]; + TemplArgs.reserve(NumTemplateArgs); + while (NumTemplateArgs--) + TemplArgs.push_back(ReadTemplateArgument(Record, Idx)); +} + +/// \brief Read a UnresolvedSet structure. +void PCHReader::ReadUnresolvedSet(UnresolvedSetImpl &Set, + const RecordData &Record, unsigned &Idx) { + unsigned NumDecls = Record[Idx++]; + while (NumDecls--) { + NamedDecl *D = cast<NamedDecl>(GetDecl(Record[Idx++])); + AccessSpecifier AS = (AccessSpecifier)Record[Idx++]; + Set.addDecl(D, AS); + } +} + +CXXBaseSpecifier +PCHReader::ReadCXXBaseSpecifier(const RecordData &Record, unsigned &Idx) { + bool isVirtual = static_cast<bool>(Record[Idx++]); + bool isBaseOfClass = static_cast<bool>(Record[Idx++]); + AccessSpecifier AS = static_cast<AccessSpecifier>(Record[Idx++]); + QualType T = GetType(Record[Idx++]); + SourceRange Range = ReadSourceRange(Record, Idx); + return CXXBaseSpecifier(Range, isVirtual, isBaseOfClass, AS, T); +} + NestedNameSpecifier * PCHReader::ReadNestedNameSpecifier(const RecordData &Record, unsigned &Idx) { unsigned N = Record[Idx++]; @@ -2934,16 +3301,17 @@ PCHReader::ReadNestedNameSpecifier(const RecordData &Record, unsigned &Idx) { // No associated value, and there can't be a prefix. break; } - Prev = NNS; } + Prev = NNS; } return NNS; } SourceRange PCHReader::ReadSourceRange(const RecordData &Record, unsigned &Idx) { - return SourceRange(SourceLocation::getFromRawEncoding(Record[Idx++]), - SourceLocation::getFromRawEncoding(Record[Idx++])); + SourceLocation beg = SourceLocation::getFromRawEncoding(Record[Idx++]); + SourceLocation end = SourceLocation::getFromRawEncoding(Record[Idx++]); + return SourceRange(beg, end); } /// \brief Read an integral value @@ -3090,6 +3458,11 @@ PCHReader::LoadingTypeOrDecl::~LoadingTypeOrDecl() { true); Reader.PendingIdentifierInfos.pop_front(); } + + // We are not in recursive loading, so it's safe to pass the "interesting" + // decls to the consumer. + if (Reader.Consumer) + Reader.PassInterestingDeclsToConsumer(); } Reader.CurrentlyLoadingTypeOrDecl = Parent; diff --git a/contrib/llvm/tools/clang/lib/Frontend/PCHReaderDecl.cpp b/contrib/llvm/tools/clang/lib/Frontend/PCHReaderDecl.cpp index 1ef0441..742f0e4 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/PCHReaderDecl.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/PCHReaderDecl.cpp @@ -27,16 +27,19 @@ using namespace clang; // Declaration deserialization //===----------------------------------------------------------------------===// -namespace { +namespace clang { class PCHDeclReader : public DeclVisitor<PCHDeclReader, void> { PCHReader &Reader; const PCHReader::RecordData &Record; unsigned &Idx; + pch::TypeID TypeIDForTypeDecl; public: PCHDeclReader(PCHReader &Reader, const PCHReader::RecordData &Record, unsigned &Idx) - : Reader(Reader), Record(Record), Idx(Idx) { } + : Reader(Reader), Record(Record), Idx(Idx), TypeIDForTypeDecl(0) { } + + void Visit(Decl *D); void VisitDecl(Decl *D); void VisitTranslationUnitDecl(TranslationUnitDecl *TU); @@ -46,7 +49,7 @@ namespace { void VisitNamespaceAliasDecl(NamespaceAliasDecl *D); void VisitTypeDecl(TypeDecl *TD); void VisitTypedefDecl(TypedefDecl *TD); - void VisitUnresolvedUsingTypename(UnresolvedUsingTypenameDecl *D); + void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D); void VisitTagDecl(TagDecl *TD); void VisitEnumDecl(EnumDecl *ED); void VisitRecordDecl(RecordDecl *RD); @@ -58,7 +61,7 @@ namespace { void VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D); void VisitValueDecl(ValueDecl *VD); void VisitEnumConstantDecl(EnumConstantDecl *ECD); - void VisitUnresolvedUsingValue(UnresolvedUsingValueDecl *D); + void VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D); void VisitDeclaratorDecl(DeclaratorDecl *DD); void VisitFunctionDecl(FunctionDecl *FD); void VisitCXXMethodDecl(CXXMethodDecl *D); @@ -72,19 +75,21 @@ namespace { void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D); void VisitTemplateDecl(TemplateDecl *D); void VisitClassTemplateDecl(ClassTemplateDecl *D); - void visitFunctionTemplateDecl(FunctionTemplateDecl *D); + void VisitFunctionTemplateDecl(FunctionTemplateDecl *D); void VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D); - void VisitUsing(UsingDecl *D); - void VisitUsingShadow(UsingShadowDecl *D); + void VisitUsingDecl(UsingDecl *D); + void VisitUsingShadowDecl(UsingShadowDecl *D); void VisitLinkageSpecDecl(LinkageSpecDecl *D); void VisitFileScopeAsmDecl(FileScopeAsmDecl *AD); + void VisitAccessSpecDecl(AccessSpecDecl *D); + void VisitFriendDecl(FriendDecl *D); void VisitFriendTemplateDecl(FriendTemplateDecl *D); void VisitStaticAssertDecl(StaticAssertDecl *D); void VisitBlockDecl(BlockDecl *BD); std::pair<uint64_t, uint64_t> VisitDeclContext(DeclContext *DC); - // FIXME: Reorder according to DeclNodes.def? + // FIXME: Reorder according to DeclNodes.td? void VisitObjCMethodDecl(ObjCMethodDecl *D); void VisitObjCContainerDecl(ObjCContainerDecl *D); void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D); @@ -103,6 +108,19 @@ namespace { }; } +void PCHDeclReader::Visit(Decl *D) { + DeclVisitor<PCHDeclReader, void>::Visit(D); + + if (TypeDecl *TD = dyn_cast<TypeDecl>(D)) { + // if we have a fully initialized TypeDecl, we can safely read its type now. + TD->setTypeForDecl(Reader.GetType(TypeIDForTypeDecl).getTypePtr()); + } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { + // FunctionDecl's body was written last after all other Stmts/Exprs. + if (Record[Idx++]) + FD->setLazyBody(Reader.getDeclsCursor().GetCurrentBitNo()); + } +} + void PCHDeclReader::VisitDecl(Decl *D) { D->setDeclContext(cast_or_null<DeclContext>(Reader.GetDecl(Record[Idx++]))); D->setLexicalDeclContext( @@ -110,7 +128,7 @@ void PCHDeclReader::VisitDecl(Decl *D) { D->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++])); D->setInvalidDecl(Record[Idx++]); if (Record[Idx++]) - D->addAttr(Reader.ReadAttributes()); + D->initAttrs(Reader.ReadAttributes()); D->setImplicit(Record[Idx++]); D->setUsed(Record[Idx++]); D->setAccess((AccessSpecifier)Record[Idx++]); @@ -130,21 +148,18 @@ void PCHDeclReader::VisitNamedDecl(NamedDecl *ND) { void PCHDeclReader::VisitTypeDecl(TypeDecl *TD) { VisitNamedDecl(TD); - TD->setTypeForDecl(Reader.GetType(Record[Idx++]).getTypePtr()); + // Delay type reading until after we have fully initialized the decl. + TypeIDForTypeDecl = Record[Idx++]; } void PCHDeclReader::VisitTypedefDecl(TypedefDecl *TD) { - // Note that we cannot use VisitTypeDecl here, because we need to - // set the underlying type of the typedef *before* we try to read - // the type associated with the TypedefDecl. - VisitNamedDecl(TD); - uint64_t TypeData = Record[Idx++]; + VisitTypeDecl(TD); TD->setTypeSourceInfo(Reader.GetTypeSourceInfo(Record, Idx)); - TD->setTypeForDecl(Reader.GetType(TypeData).getTypePtr()); } void PCHDeclReader::VisitTagDecl(TagDecl *TD) { VisitTypeDecl(TD); + TD->IdentifierNamespace = Record[Idx++]; TD->setPreviousDeclaration( cast_or_null<TagDecl>(Reader.GetDecl(Record[Idx++]))); TD->setTagKind((TagDecl::TagKind)Record[Idx++]); @@ -163,7 +178,8 @@ void PCHDeclReader::VisitEnumDecl(EnumDecl *ED) { ED->setPromotionType(Reader.GetType(Record[Idx++])); ED->setNumPositiveBits(Record[Idx++]); ED->setNumNegativeBits(Record[Idx++]); - // FIXME: C++ InstantiatedFrom + ED->setInstantiationOfMemberEnum( + cast_or_null<EnumDecl>(Reader.GetDecl(Record[Idx++]))); } void PCHDeclReader::VisitRecordDecl(RecordDecl *RD) { @@ -181,7 +197,7 @@ void PCHDeclReader::VisitValueDecl(ValueDecl *VD) { void PCHDeclReader::VisitEnumConstantDecl(EnumConstantDecl *ECD) { VisitValueDecl(ECD); if (Record[Idx++]) - ECD->setInitExpr(Reader.ReadDeclExpr()); + ECD->setInitExpr(Reader.ReadExpr()); ECD->setInitVal(Reader.ReadAPSInt(Record, Idx)); } @@ -195,9 +211,83 @@ void PCHDeclReader::VisitDeclaratorDecl(DeclaratorDecl *DD) { void PCHDeclReader::VisitFunctionDecl(FunctionDecl *FD) { VisitDeclaratorDecl(FD); - if (Record[Idx++]) - FD->setLazyBody(Reader.getDeclsCursor().GetCurrentBitNo()); - FD->setPreviousDeclaration( + + FD->IdentifierNamespace = Record[Idx++]; + switch ((FunctionDecl::TemplatedKind)Record[Idx++]) { + default: assert(false && "Unhandled TemplatedKind!"); + break; + case FunctionDecl::TK_NonTemplate: + break; + case FunctionDecl::TK_FunctionTemplate: + FD->setDescribedFunctionTemplate( + cast<FunctionTemplateDecl>(Reader.GetDecl(Record[Idx++]))); + break; + case FunctionDecl::TK_MemberSpecialization: { + FunctionDecl *InstFD = cast<FunctionDecl>(Reader.GetDecl(Record[Idx++])); + TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++]; + SourceLocation POI = Reader.ReadSourceLocation(Record, Idx); + FD->setInstantiationOfMemberFunction(InstFD, TSK); + FD->getMemberSpecializationInfo()->setPointOfInstantiation(POI); + break; + } + case FunctionDecl::TK_FunctionTemplateSpecialization: { + FunctionTemplateDecl *Template + = cast<FunctionTemplateDecl>(Reader.GetDecl(Record[Idx++])); + TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++]; + + // Template arguments. + llvm::SmallVector<TemplateArgument, 8> TemplArgs; + Reader.ReadTemplateArgumentList(TemplArgs, Record, Idx); + + // Template args as written. + llvm::SmallVector<TemplateArgumentLoc, 8> TemplArgLocs; + SourceLocation LAngleLoc, RAngleLoc; + if (Record[Idx++]) { // TemplateArgumentsAsWritten != 0 + unsigned NumTemplateArgLocs = Record[Idx++]; + TemplArgLocs.reserve(NumTemplateArgLocs); + for (unsigned i=0; i != NumTemplateArgLocs; ++i) + TemplArgLocs.push_back(Reader.ReadTemplateArgumentLoc(Record, Idx)); + + LAngleLoc = Reader.ReadSourceLocation(Record, Idx); + RAngleLoc = Reader.ReadSourceLocation(Record, Idx); + } + + SourceLocation POI = Reader.ReadSourceLocation(Record, Idx); + + FD->setFunctionTemplateSpecialization(Template, TemplArgs.size(), + TemplArgs.data(), TSK, + TemplArgLocs.size(), + TemplArgLocs.data(), + LAngleLoc, RAngleLoc, POI); + break; + } + case FunctionDecl::TK_DependentFunctionTemplateSpecialization: { + // Templates. + UnresolvedSet<8> TemplDecls; + unsigned NumTemplates = Record[Idx++]; + while (NumTemplates--) + TemplDecls.addDecl(cast<NamedDecl>(Reader.GetDecl(Record[Idx++]))); + + // Templates args. + TemplateArgumentListInfo TemplArgs; + unsigned NumArgs = Record[Idx++]; + while (NumArgs--) + TemplArgs.addArgument(Reader.ReadTemplateArgumentLoc(Record, Idx)); + TemplArgs.setLAngleLoc(Reader.ReadSourceLocation(Record, Idx)); + TemplArgs.setRAngleLoc(Reader.ReadSourceLocation(Record, Idx)); + + FD->setDependentTemplateSpecialization(*Reader.getContext(), + TemplDecls, TemplArgs); + break; + } + } + + // FunctionDecl's body is handled last at PCHReaderDecl::Visit, + // after everything else is read. + + // Avoid side effects and invariant checking of FunctionDecl's + // setPreviousDeclaration. + FD->redeclarable_base::setPreviousDeclaration( cast_or_null<FunctionDecl>(Reader.GetDecl(Record[Idx++]))); FD->setStorageClass((FunctionDecl::StorageClass)Record[Idx++]); FD->setStorageClassAsWritten((FunctionDecl::StorageClass)Record[Idx++]); @@ -211,7 +301,6 @@ void PCHDeclReader::VisitFunctionDecl(FunctionDecl *FD) { FD->setCopyAssignment(Record[Idx++]); FD->setHasImplicitReturnZero(Record[Idx++]); FD->setLocEnd(SourceLocation::getFromRawEncoding(Record[Idx++])); - // FIXME: C++ TemplateOrInstantiation // Read in the parameters. unsigned NumParams = Record[Idx++]; @@ -220,11 +309,6 @@ void PCHDeclReader::VisitFunctionDecl(FunctionDecl *FD) { for (unsigned I = 0; I != NumParams; ++I) Params.push_back(cast<ParmVarDecl>(Reader.GetDecl(Record[Idx++]))); FD->setParams(Params.data(), NumParams); - - // FIXME: order this properly w.r.t. friendness - // FIXME: this same thing needs to happen for function templates - if (FD->isOverloadedOperator() && !FD->getDeclContext()->isRecord()) - FD->setNonMemberOperator(); } void PCHDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) { @@ -232,7 +316,7 @@ void PCHDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) { if (Record[Idx++]) { // In practice, this won't be executed (since method definitions // don't occur in header files). - MD->setBody(Reader.ReadDeclStmt()); + MD->setBody(Reader.ReadStmt()); MD->setSelfDecl(cast<ImplicitParamDecl>(Reader.GetDecl(Record[Idx++]))); MD->setCmdDecl(cast<ImplicitParamDecl>(Reader.GetDecl(Record[Idx++]))); } @@ -374,10 +458,12 @@ void PCHDeclReader::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *CAD) { void PCHDeclReader::VisitObjCPropertyDecl(ObjCPropertyDecl *D) { VisitNamedDecl(D); D->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - D->setType(Reader.GetType(Record[Idx++])); + D->setType(Reader.GetTypeSourceInfo(Record, Idx)); // FIXME: stable encoding D->setPropertyAttributes( (ObjCPropertyDecl::PropertyAttributeKind)Record[Idx++]); + D->setPropertyAttributesAsWritten( + (ObjCPropertyDecl::PropertyAttributeKind)Record[Idx++]); // FIXME: stable encoding D->setPropertyImplementation( (ObjCPropertyDecl::PropertyControl)Record[Idx++]); @@ -424,7 +510,12 @@ void PCHDeclReader::VisitFieldDecl(FieldDecl *FD) { VisitDeclaratorDecl(FD); FD->setMutable(Record[Idx++]); if (Record[Idx++]) - FD->setBitWidth(Reader.ReadDeclExpr()); + FD->setBitWidth(Reader.ReadExpr()); + if (!FD->getDeclName()) { + FieldDecl *Tmpl = cast_or_null<FieldDecl>(Reader.GetDecl(Record[Idx++])); + if (Tmpl) + Reader.getContext()->setInstantiatedFromUnnamedFieldDecl(FD, Tmpl); + } } void PCHDeclReader::VisitVarDecl(VarDecl *VD) { @@ -439,7 +530,14 @@ void PCHDeclReader::VisitVarDecl(VarDecl *VD) { VD->setPreviousDeclaration( cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++]))); if (Record[Idx++]) - VD->setInit(Reader.ReadDeclExpr()); + VD->setInit(Reader.ReadExpr()); + + if (Record[Idx++]) { // HasMemberSpecializationInfo. + VarDecl *Tmpl = cast<VarDecl>(Reader.GetDecl(Record[Idx++])); + TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++]; + SourceLocation POI = Reader.ReadSourceLocation(Record, Idx); + Reader.getContext()->setInstantiatedFromStaticDataMember(VD, Tmpl, TSK,POI); + } } void PCHDeclReader::VisitImplicitParamDecl(ImplicitParamDecl *PD) { @@ -450,16 +548,19 @@ void PCHDeclReader::VisitParmVarDecl(ParmVarDecl *PD) { VisitVarDecl(PD); PD->setObjCDeclQualifier((Decl::ObjCDeclQualifier)Record[Idx++]); PD->setHasInheritedDefaultArg(Record[Idx++]); + if (Record[Idx++]) // hasUninstantiatedDefaultArg. + PD->setUninstantiatedDefaultArg(Reader.ReadExpr()); } void PCHDeclReader::VisitFileScopeAsmDecl(FileScopeAsmDecl *AD) { VisitDecl(AD); - AD->setAsmString(cast<StringLiteral>(Reader.ReadDeclExpr())); + AD->setAsmString(cast<StringLiteral>(Reader.ReadExpr())); } void PCHDeclReader::VisitBlockDecl(BlockDecl *BD) { VisitDecl(BD); - BD->setBody(cast_or_null<CompoundStmt>(Reader.ReadDeclStmt())); + BD->setBody(cast_or_null<CompoundStmt>(Reader.ReadStmt())); + BD->setSignatureAsWritten(Reader.GetTypeSourceInfo(Record, Idx)); unsigned NumParams = Record[Idx++]; llvm::SmallVector<ParmVarDecl *, 16> Params; Params.reserve(NumParams); @@ -481,13 +582,9 @@ void PCHDeclReader::VisitNamespaceDecl(NamespaceDecl *D) { D->setNextNamespace( cast_or_null<NamespaceDecl>(Reader.GetDecl(Record[Idx++]))); - // Only read one reference--the original or anonymous namespace. bool IsOriginal = Record[Idx++]; - if (IsOriginal) - D->setAnonymousNamespace( - cast_or_null<NamespaceDecl>(Reader.GetDecl(Record[Idx++]))); - else - D->setOriginalNamespace( + D->OrigOrAnonNamespace.setInt(IsOriginal); + D->OrigOrAnonNamespace.setPointer( cast_or_null<NamespaceDecl>(Reader.GetDecl(Record[Idx++]))); } @@ -501,7 +598,7 @@ void PCHDeclReader::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) { D->setAliasedNamespace(cast<NamedDecl>(Reader.GetDecl(Record[Idx++]))); } -void PCHDeclReader::VisitUsing(UsingDecl *D) { +void PCHDeclReader::VisitUsingDecl(UsingDecl *D) { VisitNamedDecl(D); D->setUsingLocation(Reader.ReadSourceLocation(Record, Idx)); D->setNestedNameRange(Reader.ReadSourceRange(Record, Idx)); @@ -512,15 +609,24 @@ void PCHDeclReader::VisitUsing(UsingDecl *D) { // would avoid existence checks. unsigned NumShadows = Record[Idx++]; for(unsigned I = 0; I != NumShadows; ++I) { - D->addShadowDecl(cast<UsingShadowDecl>(Reader.GetDecl(Record[Idx++]))); + // Avoid invariant checking of UsingDecl::addShadowDecl, the decl may still + // be initializing. + D->Shadows.insert(cast<UsingShadowDecl>(Reader.GetDecl(Record[Idx++]))); } D->setTypeName(Record[Idx++]); + NamedDecl *Pattern = cast_or_null<NamedDecl>(Reader.GetDecl(Record[Idx++])); + if (Pattern) + Reader.getContext()->setInstantiatedFromUsingDecl(D, Pattern); } -void PCHDeclReader::VisitUsingShadow(UsingShadowDecl *D) { +void PCHDeclReader::VisitUsingShadowDecl(UsingShadowDecl *D) { VisitNamedDecl(D); D->setTargetDecl(cast<NamedDecl>(Reader.GetDecl(Record[Idx++]))); D->setUsingDecl(cast<UsingDecl>(Reader.GetDecl(Record[Idx++]))); + UsingShadowDecl *Pattern + = cast_or_null<UsingShadowDecl>(Reader.GetDecl(Record[Idx++])); + if (Pattern) + Reader.getContext()->setInstantiatedFromUsingShadowDecl(D, Pattern); } void PCHDeclReader::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) { @@ -534,14 +640,14 @@ void PCHDeclReader::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) { Reader.GetDecl(Record[Idx++]))); } -void PCHDeclReader::VisitUnresolvedUsingValue(UnresolvedUsingValueDecl *D) { +void PCHDeclReader::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) { VisitValueDecl(D); D->setTargetNestedNameRange(Reader.ReadSourceRange(Record, Idx)); D->setUsingLoc(Reader.ReadSourceLocation(Record, Idx)); D->setTargetNestedNameSpecifier(Reader.ReadNestedNameSpecifier(Record, Idx)); } -void PCHDeclReader::VisitUnresolvedUsingTypename( +void PCHDeclReader::VisitUnresolvedUsingTypenameDecl( UnresolvedUsingTypenameDecl *D) { VisitTypeDecl(D); D->setTargetNestedNameRange(Reader.ReadSourceRange(Record, Idx)); @@ -551,28 +657,196 @@ void PCHDeclReader::VisitUnresolvedUsingTypename( } void PCHDeclReader::VisitCXXRecordDecl(CXXRecordDecl *D) { - // assert(false && "cannot read CXXRecordDecl"); + ASTContext &C = *Reader.getContext(); + + // We need to allocate the DefinitionData struct ahead of VisitRecordDecl + // so that the other CXXRecordDecls can get a pointer even when the owner + // is still initializing. + bool OwnsDefinitionData = false; + enum DataOwnership { Data_NoDefData, Data_Owner, Data_NotOwner }; + switch ((DataOwnership)Record[Idx++]) { + default: + assert(0 && "Out of sync with PCHDeclWriter or messed up reading"); + case Data_NoDefData: + break; + case Data_Owner: + OwnsDefinitionData = true; + D->DefinitionData = new (C) struct CXXRecordDecl::DefinitionData(D); + break; + case Data_NotOwner: + D->DefinitionData + = cast<CXXRecordDecl>(Reader.GetDecl(Record[Idx++]))->DefinitionData; + break; + } + VisitRecordDecl(D); + + if (OwnsDefinitionData) { + assert(D->DefinitionData); + struct CXXRecordDecl::DefinitionData &Data = *D->DefinitionData; + + Data.UserDeclaredConstructor = Record[Idx++]; + Data.UserDeclaredCopyConstructor = Record[Idx++]; + Data.UserDeclaredCopyAssignment = Record[Idx++]; + Data.UserDeclaredDestructor = Record[Idx++]; + Data.Aggregate = Record[Idx++]; + Data.PlainOldData = Record[Idx++]; + Data.Empty = Record[Idx++]; + Data.Polymorphic = Record[Idx++]; + Data.Abstract = Record[Idx++]; + Data.HasTrivialConstructor = Record[Idx++]; + Data.HasTrivialCopyConstructor = Record[Idx++]; + Data.HasTrivialCopyAssignment = Record[Idx++]; + Data.HasTrivialDestructor = Record[Idx++]; + Data.ComputedVisibleConversions = Record[Idx++]; + Data.DeclaredDefaultConstructor = Record[Idx++]; + Data.DeclaredCopyConstructor = Record[Idx++]; + Data.DeclaredCopyAssignment = Record[Idx++]; + Data.DeclaredDestructor = Record[Idx++]; + + // setBases() is unsuitable since it may try to iterate the bases of an + // unitialized base. + Data.NumBases = Record[Idx++]; + Data.Bases = new(C) CXXBaseSpecifier [Data.NumBases]; + for (unsigned i = 0; i != Data.NumBases; ++i) + Data.Bases[i] = Reader.ReadCXXBaseSpecifier(Record, Idx); + + // FIXME: Make VBases lazily computed when needed to avoid storing them. + Data.NumVBases = Record[Idx++]; + Data.VBases = new(C) CXXBaseSpecifier [Data.NumVBases]; + for (unsigned i = 0; i != Data.NumVBases; ++i) + Data.VBases[i] = Reader.ReadCXXBaseSpecifier(Record, Idx); + + Reader.ReadUnresolvedSet(Data.Conversions, Record, Idx); + Reader.ReadUnresolvedSet(Data.VisibleConversions, Record, Idx); + assert(Data.Definition && "Data.Definition should be already set!"); + Data.FirstFriend + = cast_or_null<FriendDecl>(Reader.GetDecl(Record[Idx++])); + } + + enum CXXRecKind { + CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization + }; + switch ((CXXRecKind)Record[Idx++]) { + default: + assert(false && "Out of sync with PCHDeclWriter::VisitCXXRecordDecl?"); + case CXXRecNotTemplate: + break; + case CXXRecTemplate: + D->setDescribedClassTemplate( + cast<ClassTemplateDecl>(Reader.GetDecl(Record[Idx++]))); + break; + case CXXRecMemberSpecialization: { + CXXRecordDecl *RD = cast<CXXRecordDecl>(Reader.GetDecl(Record[Idx++])); + TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++]; + SourceLocation POI = Reader.ReadSourceLocation(Record, Idx); + D->setInstantiationOfMemberClass(RD, TSK); + D->getMemberSpecializationInfo()->setPointOfInstantiation(POI); + break; + } + } } void PCHDeclReader::VisitCXXMethodDecl(CXXMethodDecl *D) { - // assert(false && "cannot read CXXMethodDecl"); VisitFunctionDecl(D); + unsigned NumOverridenMethods = Record[Idx++]; + while (NumOverridenMethods--) { + CXXMethodDecl *MD = cast<CXXMethodDecl>(Reader.GetDecl(Record[Idx++])); + // Avoid invariant checking of CXXMethodDecl::addOverriddenMethod, + // MD may be initializing. + Reader.getContext()->addOverriddenMethod(D, MD); + } } void PCHDeclReader::VisitCXXConstructorDecl(CXXConstructorDecl *D) { - // assert(false && "cannot read CXXConstructorDecl"); VisitCXXMethodDecl(D); + + D->IsExplicitSpecified = Record[Idx++]; + D->ImplicitlyDefined = Record[Idx++]; + + unsigned NumInitializers = Record[Idx++]; + D->NumBaseOrMemberInitializers = NumInitializers; + if (NumInitializers) { + ASTContext &C = *Reader.getContext(); + + D->BaseOrMemberInitializers + = new (C) CXXBaseOrMemberInitializer*[NumInitializers]; + for (unsigned i=0; i != NumInitializers; ++i) { + TypeSourceInfo *BaseClassInfo = 0; + bool IsBaseVirtual = false; + FieldDecl *Member = 0; + + bool IsBaseInitializer = Record[Idx++]; + if (IsBaseInitializer) { + BaseClassInfo = Reader.GetTypeSourceInfo(Record, Idx); + IsBaseVirtual = Record[Idx++]; + } else { + Member = cast<FieldDecl>(Reader.GetDecl(Record[Idx++])); + } + SourceLocation MemberLoc = Reader.ReadSourceLocation(Record, Idx); + Expr *Init = Reader.ReadExpr(); + FieldDecl *AnonUnionMember + = cast_or_null<FieldDecl>(Reader.GetDecl(Record[Idx++])); + SourceLocation LParenLoc = Reader.ReadSourceLocation(Record, Idx); + SourceLocation RParenLoc = Reader.ReadSourceLocation(Record, Idx); + bool IsWritten = Record[Idx++]; + unsigned SourceOrderOrNumArrayIndices; + llvm::SmallVector<VarDecl *, 8> Indices; + if (IsWritten) { + SourceOrderOrNumArrayIndices = Record[Idx++]; + } else { + SourceOrderOrNumArrayIndices = Record[Idx++]; + Indices.reserve(SourceOrderOrNumArrayIndices); + for (unsigned i=0; i != SourceOrderOrNumArrayIndices; ++i) + Indices.push_back(cast<VarDecl>(Reader.GetDecl(Record[Idx++]))); + } + + CXXBaseOrMemberInitializer *BOMInit; + if (IsBaseInitializer) { + BOMInit = new (C) CXXBaseOrMemberInitializer(C, BaseClassInfo, + IsBaseVirtual, LParenLoc, + Init, RParenLoc); + } else if (IsWritten) { + BOMInit = new (C) CXXBaseOrMemberInitializer(C, Member, MemberLoc, + LParenLoc, Init, RParenLoc); + } else { + BOMInit = CXXBaseOrMemberInitializer::Create(C, Member, MemberLoc, + LParenLoc, Init, RParenLoc, + Indices.data(), + Indices.size()); + } + + BOMInit->setAnonUnionMember(AnonUnionMember); + D->BaseOrMemberInitializers[i] = BOMInit; + } + } } void PCHDeclReader::VisitCXXDestructorDecl(CXXDestructorDecl *D) { - // assert(false && "cannot read CXXDestructorDecl"); VisitCXXMethodDecl(D); + + D->ImplicitlyDefined = Record[Idx++]; + D->OperatorDelete = cast_or_null<FunctionDecl>(Reader.GetDecl(Record[Idx++])); } void PCHDeclReader::VisitCXXConversionDecl(CXXConversionDecl *D) { - // assert(false && "cannot read CXXConversionDecl"); VisitCXXMethodDecl(D); + D->IsExplicitSpecified = Record[Idx++]; +} + +void PCHDeclReader::VisitAccessSpecDecl(AccessSpecDecl *D) { + VisitDecl(D); + D->setColonLoc(Reader.ReadSourceLocation(Record, Idx)); +} + +void PCHDeclReader::VisitFriendDecl(FriendDecl *D) { + VisitDecl(D); + if (Record[Idx++]) + D->Friend = Reader.GetTypeSourceInfo(Record, Idx); + else + D->Friend = cast<NamedDecl>(Reader.GetDecl(Record[Idx++])); + D->NextFriend = cast_or_null<FriendDecl>(Reader.GetDecl(Record[Idx++])); + D->FriendLoc = Reader.ReadSourceLocation(Record, Idx); } void PCHDeclReader::VisitFriendTemplateDecl(FriendTemplateDecl *D) { @@ -580,37 +854,171 @@ void PCHDeclReader::VisitFriendTemplateDecl(FriendTemplateDecl *D) { } void PCHDeclReader::VisitTemplateDecl(TemplateDecl *D) { - assert(false && "cannot read TemplateDecl"); + VisitNamedDecl(D); + + NamedDecl *TemplatedDecl + = cast_or_null<NamedDecl>(Reader.GetDecl(Record[Idx++])); + TemplateParameterList* TemplateParams + = Reader.ReadTemplateParameterList(Record, Idx); + D->init(TemplatedDecl, TemplateParams); } void PCHDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) { - assert(false && "cannot read ClassTemplateDecl"); + VisitTemplateDecl(D); + + D->IdentifierNamespace = Record[Idx++]; + ClassTemplateDecl *PrevDecl = + cast_or_null<ClassTemplateDecl>(Reader.GetDecl(Record[Idx++])); + D->setPreviousDeclaration(PrevDecl); + if (PrevDecl == 0) { + // This ClassTemplateDecl owns a CommonPtr; read it. + + // FoldingSets are filled in VisitClassTemplateSpecializationDecl. + unsigned size = Record[Idx++]; + while (size--) + cast<ClassTemplateSpecializationDecl>(Reader.GetDecl(Record[Idx++])); + + size = Record[Idx++]; + while (size--) + cast<ClassTemplatePartialSpecializationDecl>( + Reader.GetDecl(Record[Idx++])); + + // InjectedClassNameType is computed. + + if (ClassTemplateDecl *CTD + = cast_or_null<ClassTemplateDecl>(Reader.GetDecl(Record[Idx++]))) { + D->setInstantiatedFromMemberTemplate(CTD); + if (Record[Idx++]) + D->setMemberSpecialization(); + } + } } void PCHDeclReader::VisitClassTemplateSpecializationDecl( ClassTemplateSpecializationDecl *D) { - assert(false && "cannot read ClassTemplateSpecializationDecl"); + VisitCXXRecordDecl(D); + + if (Decl *InstD = Reader.GetDecl(Record[Idx++])) { + if (ClassTemplateDecl *CTD = dyn_cast<ClassTemplateDecl>(InstD)) { + D->setInstantiationOf(CTD); + } else { + llvm::SmallVector<TemplateArgument, 8> TemplArgs; + Reader.ReadTemplateArgumentList(TemplArgs, Record, Idx); + D->setInstantiationOf(cast<ClassTemplatePartialSpecializationDecl>(InstD), + TemplArgs.data(), TemplArgs.size()); + } + } + + // Explicit info. + if (TypeSourceInfo *TyInfo = Reader.GetTypeSourceInfo(Record, Idx)) { + D->setTypeAsWritten(TyInfo); + D->setExternLoc(Reader.ReadSourceLocation(Record, Idx)); + D->setTemplateKeywordLoc(Reader.ReadSourceLocation(Record, Idx)); + } + + llvm::SmallVector<TemplateArgument, 8> TemplArgs; + Reader.ReadTemplateArgumentList(TemplArgs, Record, Idx); + D->initTemplateArgs(TemplArgs.data(), TemplArgs.size()); + SourceLocation POI = Reader.ReadSourceLocation(Record, Idx); + if (POI.isValid()) + D->setPointOfInstantiation(POI); + D->setSpecializationKind((TemplateSpecializationKind)Record[Idx++]); + + if (Record[Idx++]) { // IsKeptInFoldingSet. + ClassTemplateDecl *CanonPattern + = cast<ClassTemplateDecl>(Reader.GetDecl(Record[Idx++])); + if (ClassTemplatePartialSpecializationDecl *Partial + = dyn_cast<ClassTemplatePartialSpecializationDecl>(D)) { + CanonPattern->getPartialSpecializations().InsertNode(Partial); + } else { + CanonPattern->getSpecializations().InsertNode(D); + } + } } void PCHDeclReader::VisitClassTemplatePartialSpecializationDecl( ClassTemplatePartialSpecializationDecl *D) { - assert(false && "cannot read ClassTemplatePartialSpecializationDecl"); + VisitClassTemplateSpecializationDecl(D); + + D->initTemplateParameters(Reader.ReadTemplateParameterList(Record, Idx)); + + TemplateArgumentListInfo ArgInfos; + unsigned NumArgs = Record[Idx++]; + while (NumArgs--) + ArgInfos.addArgument(Reader.ReadTemplateArgumentLoc(Record, Idx)); + D->initTemplateArgsAsWritten(ArgInfos); + + D->setSequenceNumber(Record[Idx++]); + + // These are read/set from/to the first declaration. + if (D->getPreviousDeclaration() == 0) { + D->setInstantiatedFromMember( + cast_or_null<ClassTemplatePartialSpecializationDecl>( + Reader.GetDecl(Record[Idx++]))); + if (Record[Idx++]) + D->setMemberSpecialization(); + } } -void PCHDeclReader::visitFunctionTemplateDecl(FunctionTemplateDecl *D) { - assert(false && "cannot read FunctionTemplateDecl"); +void PCHDeclReader::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) { + VisitTemplateDecl(D); + + D->IdentifierNamespace = Record[Idx++]; + FunctionTemplateDecl *PrevDecl = + cast_or_null<FunctionTemplateDecl>(Reader.GetDecl(Record[Idx++])); + D->setPreviousDeclaration(PrevDecl); + if (PrevDecl == 0) { + // This FunctionTemplateDecl owns a CommonPtr; read it. + + // Read the function specialization declarations. + // FunctionTemplateDecl's FunctionTemplateSpecializationInfos are filled + // through the specialized FunctionDecl's setFunctionTemplateSpecialization. + unsigned NumSpecs = Record[Idx++]; + while (NumSpecs--) + Reader.GetDecl(Record[Idx++]); + + if (FunctionTemplateDecl *CTD + = cast_or_null<FunctionTemplateDecl>(Reader.GetDecl(Record[Idx++]))) { + D->setInstantiatedFromMemberTemplate(CTD); + if (Record[Idx++]) + D->setMemberSpecialization(); + } + } } void PCHDeclReader::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) { - assert(false && "cannot read TemplateTypeParmDecl"); + VisitTypeDecl(D); + + D->setDeclaredWithTypename(Record[Idx++]); + D->setParameterPack(Record[Idx++]); + + bool Inherited = Record[Idx++]; + TypeSourceInfo *DefArg = Reader.GetTypeSourceInfo(Record, Idx); + D->setDefaultArgument(DefArg, Inherited); } void PCHDeclReader::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) { - assert(false && "cannot read NonTypeTemplateParmDecl"); + VisitVarDecl(D); + // TemplateParmPosition. + D->setDepth(Record[Idx++]); + D->setPosition(Record[Idx++]); + // Rest of NonTypeTemplateParmDecl. + if (Record[Idx++]) { + Expr *DefArg = Reader.ReadExpr(); + bool Inherited = Record[Idx++]; + D->setDefaultArgument(DefArg, Inherited); + } } void PCHDeclReader::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) { - assert(false && "cannot read TemplateTemplateParmDecl"); + VisitTemplateDecl(D); + // TemplateParmPosition. + D->setDepth(Record[Idx++]); + D->setPosition(Record[Idx++]); + // Rest of TemplateTemplateParmDecl. + TemplateArgumentLoc Arg = Reader.ReadTemplateArgumentLoc(Record, Idx); + bool IsInherited = Record[Idx++]; + D->setDefaultArgument(Arg, IsInherited); } void PCHDeclReader::VisitStaticAssertDecl(StaticAssertDecl *D) { @@ -641,24 +1049,24 @@ Attr *PCHReader::ReadAttributes() { (void)RecCode; #define SIMPLE_ATTR(Name) \ - case Attr::Name: \ + case attr::Name: \ New = ::new (*Context) Name##Attr(); \ break #define STRING_ATTR(Name) \ - case Attr::Name: \ + case attr::Name: \ New = ::new (*Context) Name##Attr(*Context, ReadString(Record, Idx)); \ break #define UNSIGNED_ATTR(Name) \ - case Attr::Name: \ + case attr::Name: \ New = ::new (*Context) Name##Attr(Record[Idx++]); \ break Attr *Attrs = 0; while (Idx < Record.size()) { Attr *New = 0; - Attr::Kind Kind = (Attr::Kind)Record[Idx++]; + attr::Kind Kind = (attr::Kind)Record[Idx++]; bool IsInherited = Record[Idx++]; switch (Kind) { @@ -674,14 +1082,14 @@ Attr *PCHReader::ReadAttributes() { STRING_ATTR(AsmLabel); SIMPLE_ATTR(BaseCheck); - case Attr::Blocks: + case attr::Blocks: New = ::new (*Context) BlocksAttr( (BlocksAttr::BlocksAttrTypes)Record[Idx++]); break; SIMPLE_ATTR(CDecl); - case Attr::Cleanup: + case attr::Cleanup: New = ::new (*Context) CleanupAttr( cast<FunctionDecl>(GetDecl(Record[Idx++]))); break; @@ -695,7 +1103,7 @@ Attr *PCHReader::ReadAttributes() { SIMPLE_ATTR(FastCall); SIMPLE_ATTR(Final); - case Attr::Format: { + case attr::Format: { std::string Type = ReadString(Record, Idx); unsigned FormatIdx = Record[Idx++]; unsigned FirstArg = Record[Idx++]; @@ -703,13 +1111,13 @@ Attr *PCHReader::ReadAttributes() { break; } - case Attr::FormatArg: { + case attr::FormatArg: { unsigned FormatIdx = Record[Idx++]; New = ::new (*Context) FormatArgAttr(FormatIdx); break; } - case Attr::Sentinel: { + case attr::Sentinel: { int sentinel = Record[Idx++]; int nullPos = Record[Idx++]; New = ::new (*Context) SentinelAttr(sentinel, nullPos); @@ -719,15 +1127,15 @@ Attr *PCHReader::ReadAttributes() { SIMPLE_ATTR(GNUInline); SIMPLE_ATTR(Hiding); - case Attr::IBActionKind: + case attr::IBAction: New = ::new (*Context) IBActionAttr(); break; - case Attr::IBOutletKind: + case attr::IBOutlet: New = ::new (*Context) IBOutletAttr(); break; - case Attr::IBOutletCollectionKind: { + case attr::IBOutletCollection: { ObjCInterfaceDecl *D = cast_or_null<ObjCInterfaceDecl>(GetDecl(Record[Idx++])); New = ::new (*Context) IBOutletCollectionAttr(D); @@ -740,7 +1148,7 @@ Attr *PCHReader::ReadAttributes() { SIMPLE_ATTR(NoReturn); SIMPLE_ATTR(NoThrow); - case Attr::NonNull: { + case attr::NonNull: { unsigned Size = Record[Idx++]; llvm::SmallVector<unsigned, 16> ArgNums; ArgNums.insert(ArgNums.end(), &Record[Idx], &Record[Idx] + Size); @@ -749,7 +1157,7 @@ Attr *PCHReader::ReadAttributes() { break; } - case Attr::ReqdWorkGroupSize: { + case attr::ReqdWorkGroupSize: { unsigned X = Record[Idx++]; unsigned Y = Record[Idx++]; unsigned Z = Record[Idx++]; @@ -777,7 +1185,7 @@ Attr *PCHReader::ReadAttributes() { SIMPLE_ATTR(Unused); SIMPLE_ATTR(Used); - case Attr::Visibility: + case attr::Visibility: New = ::new (*Context) VisibilityAttr( (VisibilityAttr::VisibilityTypes)Record[Idx++]); break; @@ -848,6 +1256,8 @@ Decl *PCHReader::ReadDeclRecord(uint64_t Offset, unsigned Index) { // after reading this declaration. SavedStreamPosition SavedPosition(DeclsCursor); + ReadingKindTracker ReadingKind(Read_Decl, *this); + // Note that we are loading a declaration record. LoadingTypeOrDecl Loading(*this); @@ -872,11 +1282,10 @@ Decl *PCHReader::ReadDeclRecord(uint64_t Offset, unsigned Index) { D = TypedefDecl::Create(*Context, 0, SourceLocation(), 0, 0); break; case pch::DECL_ENUM: - D = EnumDecl::Create(*Context, 0, SourceLocation(), 0, SourceLocation(), 0); + D = EnumDecl::Create(*Context, Decl::EmptyShell()); break; case pch::DECL_RECORD: - D = RecordDecl::Create(*Context, TTK_Struct, 0, SourceLocation(), - 0, SourceLocation(), 0); + D = RecordDecl::Create(*Context, Decl::EmptyShell()); break; case pch::DECL_ENUM_CONSTANT: D = EnumConstantDecl::Create(*Context, 0, SourceLocation(), 0, QualType(), @@ -923,8 +1332,7 @@ Decl *PCHReader::ReadDeclRecord(uint64_t Offset, unsigned Index) { DeclarationName()); break; case pch::DECL_CXX_RECORD: - D = CXXRecordDecl::Create(*Context, TTK_Struct, 0, - SourceLocation(), 0, SourceLocation(), 0); + D = CXXRecordDecl::Create(*Context, Decl::EmptyShell()); break; case pch::DECL_CXX_METHOD: D = CXXMethodDecl::Create(*Context, 0, SourceLocation(), DeclarationName(), @@ -939,36 +1347,40 @@ Decl *PCHReader::ReadDeclRecord(uint64_t Offset, unsigned Index) { case pch::DECL_CXX_CONVERSION: D = CXXConversionDecl::Create(*Context, Decl::EmptyShell()); break; + case pch::DECL_ACCESS_SPEC: + D = AccessSpecDecl::Create(*Context, AS_none, 0, SourceLocation(), + SourceLocation()); + break; case pch::DECL_FRIEND: - assert(false && "cannot read FriendDecl"); + D = FriendDecl::Create(*Context, Decl::EmptyShell()); break; case pch::DECL_FRIEND_TEMPLATE: assert(false && "cannot read FriendTemplateDecl"); break; - case pch::DECL_TEMPLATE: - // FIXME: Should TemplateDecl be ABSTRACT_DECL??? - assert(false && "TemplateDecl should be abstract!"); - break; case pch::DECL_CLASS_TEMPLATE: - assert(false && "cannot read ClassTemplateDecl"); + D = ClassTemplateDecl::Create(*Context, 0, SourceLocation(), + DeclarationName(), 0, 0, 0); break; case pch::DECL_CLASS_TEMPLATE_SPECIALIZATION: - assert(false && "cannot read ClasstemplateSpecializationDecl"); + D = ClassTemplateSpecializationDecl::Create(*Context, Decl::EmptyShell()); break; case pch::DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION: - assert(false && "cannot read ClassTemplatePartialSpecializationDecl"); + D = ClassTemplatePartialSpecializationDecl::Create(*Context, + Decl::EmptyShell()); break; case pch::DECL_FUNCTION_TEMPLATE: - assert(false && "cannot read FunctionTemplateDecl"); + D = FunctionTemplateDecl::Create(*Context, 0, SourceLocation(), + DeclarationName(), 0, 0); break; case pch::DECL_TEMPLATE_TYPE_PARM: - assert(false && "cannot read TemplateTypeParmDecl"); + D = TemplateTypeParmDecl::Create(*Context, Decl::EmptyShell()); break; case pch::DECL_NON_TYPE_TEMPLATE_PARM: - assert(false && "cannot read NonTypeTemplateParmDecl"); + D = NonTypeTemplateParmDecl::Create(*Context, 0, SourceLocation(), 0,0,0, + QualType(),0); break; case pch::DECL_TEMPLATE_TEMPLATE_PARM: - assert(false && "cannot read TemplateTemplateParmDecl"); + D = TemplateTemplateParmDecl::Create(*Context, 0, SourceLocation(),0,0,0,0); break; case pch::DECL_STATIC_ASSERT: assert(false && "cannot read StaticAssertDecl"); @@ -1013,7 +1425,7 @@ Decl *PCHReader::ReadDeclRecord(uint64_t Offset, unsigned Index) { break; case pch::DECL_OBJC_PROPERTY: D = ObjCPropertyDecl::Create(*Context, 0, SourceLocation(), 0, SourceLocation(), - QualType()); + 0); break; case pch::DECL_OBJC_PROPERTY_IMPL: D = ObjCPropertyImplDecl::Create(*Context, 0, SourceLocation(), @@ -1062,16 +1474,11 @@ Decl *PCHReader::ReadDeclRecord(uint64_t Offset, unsigned Index) { assert(Idx == Record.size()); // If we have deserialized a declaration that has a definition the - // AST consumer might need to know about, notify the consumer - // about that definition now or queue it for later. - if (isConsumerInterestedIn(D)) { - if (Consumer) { - DeclGroupRef DG(D); - Consumer->HandleTopLevelDecl(DG); - } else { - InterestingDecls.push_back(D); - } - } + // AST consumer might need to know about, queue it. + // We don't pass it to the consumer immediately because we may be in recursive + // loading, and some declarations may still be initializing. + if (isConsumerInterestedIn(D)) + InterestingDecls.push_back(D); return D; } diff --git a/contrib/llvm/tools/clang/lib/Frontend/PCHReaderStmt.cpp b/contrib/llvm/tools/clang/lib/Frontend/PCHReaderStmt.cpp index 3931adb..ace62d7 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/PCHReaderStmt.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/PCHReaderStmt.cpp @@ -17,17 +17,17 @@ #include "clang/AST/StmtVisitor.h" using namespace clang; -namespace { - class PCHStmtReader : public StmtVisitor<PCHStmtReader, unsigned> { +namespace clang { + + class PCHStmtReader : public StmtVisitor<PCHStmtReader> { PCHReader &Reader; const PCHReader::RecordData &Record; unsigned &Idx; - llvm::SmallVectorImpl<Stmt *> &StmtStack; public: PCHStmtReader(PCHReader &Reader, const PCHReader::RecordData &Record, - unsigned &Idx, llvm::SmallVectorImpl<Stmt *> &StmtStack) - : Reader(Reader), Record(Record), Idx(Idx), StmtStack(StmtStack) { } + unsigned &Idx) + : Reader(Reader), Record(Record), Idx(Idx) { } /// \brief The number of record fields required for the Stmt class /// itself. @@ -36,180 +36,201 @@ namespace { /// \brief The number of record fields required for the Expr class /// itself. static const unsigned NumExprFields = NumStmtFields + 3; - - // Each of the Visit* functions reads in part of the expression - // from the given record and the current expression stack, then - // return the total number of operands that it read from the - // expression stack. - - unsigned VisitStmt(Stmt *S); - unsigned VisitNullStmt(NullStmt *S); - unsigned VisitCompoundStmt(CompoundStmt *S); - unsigned VisitSwitchCase(SwitchCase *S); - unsigned VisitCaseStmt(CaseStmt *S); - unsigned VisitDefaultStmt(DefaultStmt *S); - unsigned VisitLabelStmt(LabelStmt *S); - unsigned VisitIfStmt(IfStmt *S); - unsigned VisitSwitchStmt(SwitchStmt *S); - unsigned VisitWhileStmt(WhileStmt *S); - unsigned VisitDoStmt(DoStmt *S); - unsigned VisitForStmt(ForStmt *S); - unsigned VisitGotoStmt(GotoStmt *S); - unsigned VisitIndirectGotoStmt(IndirectGotoStmt *S); - unsigned VisitContinueStmt(ContinueStmt *S); - unsigned VisitBreakStmt(BreakStmt *S); - unsigned VisitReturnStmt(ReturnStmt *S); - unsigned VisitDeclStmt(DeclStmt *S); - unsigned VisitAsmStmt(AsmStmt *S); - unsigned VisitExpr(Expr *E); - unsigned VisitPredefinedExpr(PredefinedExpr *E); - unsigned VisitDeclRefExpr(DeclRefExpr *E); - unsigned VisitIntegerLiteral(IntegerLiteral *E); - unsigned VisitFloatingLiteral(FloatingLiteral *E); - unsigned VisitImaginaryLiteral(ImaginaryLiteral *E); - unsigned VisitStringLiteral(StringLiteral *E); - unsigned VisitCharacterLiteral(CharacterLiteral *E); - unsigned VisitParenExpr(ParenExpr *E); - unsigned VisitUnaryOperator(UnaryOperator *E); - unsigned VisitOffsetOfExpr(OffsetOfExpr *E); - unsigned VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E); - unsigned VisitArraySubscriptExpr(ArraySubscriptExpr *E); - unsigned VisitCallExpr(CallExpr *E); - unsigned VisitMemberExpr(MemberExpr *E); - unsigned VisitCastExpr(CastExpr *E); - unsigned VisitBinaryOperator(BinaryOperator *E); - unsigned VisitCompoundAssignOperator(CompoundAssignOperator *E); - unsigned VisitConditionalOperator(ConditionalOperator *E); - unsigned VisitImplicitCastExpr(ImplicitCastExpr *E); - unsigned VisitExplicitCastExpr(ExplicitCastExpr *E); - unsigned VisitCStyleCastExpr(CStyleCastExpr *E); - unsigned VisitCompoundLiteralExpr(CompoundLiteralExpr *E); - unsigned VisitExtVectorElementExpr(ExtVectorElementExpr *E); - unsigned VisitInitListExpr(InitListExpr *E); - unsigned VisitDesignatedInitExpr(DesignatedInitExpr *E); - unsigned VisitImplicitValueInitExpr(ImplicitValueInitExpr *E); - unsigned VisitVAArgExpr(VAArgExpr *E); - unsigned VisitAddrLabelExpr(AddrLabelExpr *E); - unsigned VisitStmtExpr(StmtExpr *E); - unsigned VisitTypesCompatibleExpr(TypesCompatibleExpr *E); - unsigned VisitChooseExpr(ChooseExpr *E); - unsigned VisitGNUNullExpr(GNUNullExpr *E); - unsigned VisitShuffleVectorExpr(ShuffleVectorExpr *E); - unsigned VisitBlockExpr(BlockExpr *E); - unsigned VisitBlockDeclRefExpr(BlockDeclRefExpr *E); - unsigned VisitObjCStringLiteral(ObjCStringLiteral *E); - unsigned VisitObjCEncodeExpr(ObjCEncodeExpr *E); - unsigned VisitObjCSelectorExpr(ObjCSelectorExpr *E); - unsigned VisitObjCProtocolExpr(ObjCProtocolExpr *E); - unsigned VisitObjCIvarRefExpr(ObjCIvarRefExpr *E); - unsigned VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E); - unsigned VisitObjCImplicitSetterGetterRefExpr( + + /// \brief Read and initialize a ExplicitTemplateArgumentList structure. + void ReadExplicitTemplateArgumentList(ExplicitTemplateArgumentList &ArgList, + unsigned NumTemplateArgs); + + void VisitStmt(Stmt *S); + void VisitNullStmt(NullStmt *S); + void VisitCompoundStmt(CompoundStmt *S); + void VisitSwitchCase(SwitchCase *S); + void VisitCaseStmt(CaseStmt *S); + void VisitDefaultStmt(DefaultStmt *S); + void VisitLabelStmt(LabelStmt *S); + void VisitIfStmt(IfStmt *S); + void VisitSwitchStmt(SwitchStmt *S); + void VisitWhileStmt(WhileStmt *S); + void VisitDoStmt(DoStmt *S); + void VisitForStmt(ForStmt *S); + void VisitGotoStmt(GotoStmt *S); + void VisitIndirectGotoStmt(IndirectGotoStmt *S); + void VisitContinueStmt(ContinueStmt *S); + void VisitBreakStmt(BreakStmt *S); + void VisitReturnStmt(ReturnStmt *S); + void VisitDeclStmt(DeclStmt *S); + void VisitAsmStmt(AsmStmt *S); + void VisitExpr(Expr *E); + void VisitPredefinedExpr(PredefinedExpr *E); + void VisitDeclRefExpr(DeclRefExpr *E); + void VisitIntegerLiteral(IntegerLiteral *E); + void VisitFloatingLiteral(FloatingLiteral *E); + void VisitImaginaryLiteral(ImaginaryLiteral *E); + void VisitStringLiteral(StringLiteral *E); + void VisitCharacterLiteral(CharacterLiteral *E); + void VisitParenExpr(ParenExpr *E); + void VisitParenListExpr(ParenListExpr *E); + void VisitUnaryOperator(UnaryOperator *E); + void VisitOffsetOfExpr(OffsetOfExpr *E); + void VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E); + void VisitArraySubscriptExpr(ArraySubscriptExpr *E); + void VisitCallExpr(CallExpr *E); + void VisitMemberExpr(MemberExpr *E); + void VisitCastExpr(CastExpr *E); + void VisitBinaryOperator(BinaryOperator *E); + void VisitCompoundAssignOperator(CompoundAssignOperator *E); + void VisitConditionalOperator(ConditionalOperator *E); + void VisitImplicitCastExpr(ImplicitCastExpr *E); + void VisitExplicitCastExpr(ExplicitCastExpr *E); + void VisitCStyleCastExpr(CStyleCastExpr *E); + void VisitCompoundLiteralExpr(CompoundLiteralExpr *E); + void VisitExtVectorElementExpr(ExtVectorElementExpr *E); + void VisitInitListExpr(InitListExpr *E); + void VisitDesignatedInitExpr(DesignatedInitExpr *E); + void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E); + void VisitVAArgExpr(VAArgExpr *E); + void VisitAddrLabelExpr(AddrLabelExpr *E); + void VisitStmtExpr(StmtExpr *E); + void VisitTypesCompatibleExpr(TypesCompatibleExpr *E); + void VisitChooseExpr(ChooseExpr *E); + void VisitGNUNullExpr(GNUNullExpr *E); + void VisitShuffleVectorExpr(ShuffleVectorExpr *E); + void VisitBlockExpr(BlockExpr *E); + void VisitBlockDeclRefExpr(BlockDeclRefExpr *E); + void VisitObjCStringLiteral(ObjCStringLiteral *E); + void VisitObjCEncodeExpr(ObjCEncodeExpr *E); + void VisitObjCSelectorExpr(ObjCSelectorExpr *E); + void VisitObjCProtocolExpr(ObjCProtocolExpr *E); + void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E); + void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E); + void VisitObjCImplicitSetterGetterRefExpr( ObjCImplicitSetterGetterRefExpr *E); - unsigned VisitObjCMessageExpr(ObjCMessageExpr *E); - unsigned VisitObjCSuperExpr(ObjCSuperExpr *E); - unsigned VisitObjCIsaExpr(ObjCIsaExpr *E); - - unsigned VisitObjCForCollectionStmt(ObjCForCollectionStmt *); - unsigned VisitObjCAtCatchStmt(ObjCAtCatchStmt *); - unsigned VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *); - unsigned VisitObjCAtTryStmt(ObjCAtTryStmt *); - unsigned VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *); - unsigned VisitObjCAtThrowStmt(ObjCAtThrowStmt *); - - unsigned VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E); - unsigned VisitCXXConstructExpr(CXXConstructExpr *E); - unsigned VisitCXXNamedCastExpr(CXXNamedCastExpr *E); - unsigned VisitCXXStaticCastExpr(CXXStaticCastExpr *E); - unsigned VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E); - unsigned VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *E); - unsigned VisitCXXConstCastExpr(CXXConstCastExpr *E); - unsigned VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E); - unsigned VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E); - unsigned VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E); - unsigned VisitCXXTypeidExpr(CXXTypeidExpr *E); - unsigned VisitCXXThisExpr(CXXThisExpr *E); - unsigned VisitCXXThrowExpr(CXXThrowExpr *E); - unsigned VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E); - unsigned VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); + void VisitObjCMessageExpr(ObjCMessageExpr *E); + void VisitObjCSuperExpr(ObjCSuperExpr *E); + void VisitObjCIsaExpr(ObjCIsaExpr *E); + + void VisitObjCForCollectionStmt(ObjCForCollectionStmt *); + void VisitObjCAtCatchStmt(ObjCAtCatchStmt *); + void VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *); + void VisitObjCAtTryStmt(ObjCAtTryStmt *); + void VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *); + void VisitObjCAtThrowStmt(ObjCAtThrowStmt *); + + void VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E); + void VisitCXXConstructExpr(CXXConstructExpr *E); + void VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E); + void VisitCXXNamedCastExpr(CXXNamedCastExpr *E); + void VisitCXXStaticCastExpr(CXXStaticCastExpr *E); + void VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E); + void VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *E); + void VisitCXXConstCastExpr(CXXConstCastExpr *E); + void VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E); + void VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E); + void VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E); + void VisitCXXTypeidExpr(CXXTypeidExpr *E); + void VisitCXXThisExpr(CXXThisExpr *E); + void VisitCXXThrowExpr(CXXThrowExpr *E); + void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E); + void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); + void VisitCXXBindReferenceExpr(CXXBindReferenceExpr *E); - unsigned VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E); - unsigned VisitCXXNewExpr(CXXNewExpr *E); + void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E); + void VisitCXXNewExpr(CXXNewExpr *E); + void VisitCXXDeleteExpr(CXXDeleteExpr *E); + void VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E); - unsigned VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E); + void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E); + + void VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E); + void VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E); + void VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E); + + void VisitOverloadExpr(OverloadExpr *E); + void VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E); + void VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E); + + void VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E); }; } -unsigned PCHStmtReader::VisitStmt(Stmt *S) { +void PCHStmtReader:: +ReadExplicitTemplateArgumentList(ExplicitTemplateArgumentList &ArgList, + unsigned NumTemplateArgs) { + TemplateArgumentListInfo ArgInfo; + ArgInfo.setLAngleLoc(Reader.ReadSourceLocation(Record, Idx)); + ArgInfo.setRAngleLoc(Reader.ReadSourceLocation(Record, Idx)); + for (unsigned i = 0; i != NumTemplateArgs; ++i) + ArgInfo.addArgument(Reader.ReadTemplateArgumentLoc(Record, Idx)); + ArgList.initializeFrom(ArgInfo); +} + +void PCHStmtReader::VisitStmt(Stmt *S) { assert(Idx == NumStmtFields && "Incorrect statement field count"); - return 0; } -unsigned PCHStmtReader::VisitNullStmt(NullStmt *S) { +void PCHStmtReader::VisitNullStmt(NullStmt *S) { VisitStmt(S); S->setSemiLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 0; } -unsigned PCHStmtReader::VisitCompoundStmt(CompoundStmt *S) { +void PCHStmtReader::VisitCompoundStmt(CompoundStmt *S) { VisitStmt(S); + llvm::SmallVector<Stmt *, 16> Stmts; unsigned NumStmts = Record[Idx++]; - S->setStmts(*Reader.getContext(), - StmtStack.data() + StmtStack.size() - NumStmts, NumStmts); + while (NumStmts--) + Stmts.push_back(Reader.ReadSubStmt()); + S->setStmts(*Reader.getContext(), Stmts.data(), Stmts.size()); S->setLBracLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); S->setRBracLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return NumStmts; } -unsigned PCHStmtReader::VisitSwitchCase(SwitchCase *S) { +void PCHStmtReader::VisitSwitchCase(SwitchCase *S) { VisitStmt(S); Reader.RecordSwitchCaseID(S, Record[Idx++]); - return 0; } -unsigned PCHStmtReader::VisitCaseStmt(CaseStmt *S) { +void PCHStmtReader::VisitCaseStmt(CaseStmt *S) { VisitSwitchCase(S); - S->setLHS(cast<Expr>(StmtStack[StmtStack.size() - 3])); - S->setRHS(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2])); - S->setSubStmt(StmtStack.back()); + S->setLHS(Reader.ReadSubExpr()); + S->setRHS(Reader.ReadSubExpr()); + S->setSubStmt(Reader.ReadSubStmt()); S->setCaseLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); S->setEllipsisLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); S->setColonLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 3; } -unsigned PCHStmtReader::VisitDefaultStmt(DefaultStmt *S) { +void PCHStmtReader::VisitDefaultStmt(DefaultStmt *S) { VisitSwitchCase(S); - S->setSubStmt(StmtStack.back()); + S->setSubStmt(Reader.ReadSubStmt()); S->setDefaultLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); S->setColonLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 1; } -unsigned PCHStmtReader::VisitLabelStmt(LabelStmt *S) { +void PCHStmtReader::VisitLabelStmt(LabelStmt *S) { VisitStmt(S); S->setID(Reader.GetIdentifierInfo(Record, Idx)); - S->setSubStmt(StmtStack.back()); + S->setSubStmt(Reader.ReadSubStmt()); S->setIdentLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); Reader.RecordLabelStmt(S, Record[Idx++]); - return 1; } -unsigned PCHStmtReader::VisitIfStmt(IfStmt *S) { +void PCHStmtReader::VisitIfStmt(IfStmt *S) { VisitStmt(S); - S->setConditionVariable(cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++]))); - S->setCond(cast<Expr>(StmtStack[StmtStack.size() - 3])); - S->setThen(StmtStack[StmtStack.size() - 2]); - S->setElse(StmtStack[StmtStack.size() - 1]); + S->setConditionVariable(*Reader.getContext(), + cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++]))); + S->setCond(Reader.ReadSubExpr()); + S->setThen(Reader.ReadSubStmt()); + S->setElse(Reader.ReadSubStmt()); S->setIfLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); S->setElseLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 3; } -unsigned PCHStmtReader::VisitSwitchStmt(SwitchStmt *S) { +void PCHStmtReader::VisitSwitchStmt(SwitchStmt *S) { VisitStmt(S); - S->setConditionVariable(cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++]))); - S->setCond(cast<Expr>(StmtStack[StmtStack.size() - 2])); - S->setBody(StmtStack.back()); + S->setConditionVariable(*Reader.getContext(), + cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++]))); + S->setCond(Reader.ReadSubExpr()); + S->setBody(Reader.ReadSubStmt()); S->setSwitchLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); SwitchCase *PrevSC = 0; for (unsigned N = Record.size(); Idx != N; ++Idx) { @@ -224,78 +245,71 @@ unsigned PCHStmtReader::VisitSwitchStmt(SwitchStmt *S) { SC->Retain(); PrevSC = SC; } - return 2; } -unsigned PCHStmtReader::VisitWhileStmt(WhileStmt *S) { +void PCHStmtReader::VisitWhileStmt(WhileStmt *S) { VisitStmt(S); - S->setConditionVariable(cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++]))); - S->setCond(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2])); - S->setBody(StmtStack.back()); + S->setConditionVariable(*Reader.getContext(), + cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++]))); + S->setCond(Reader.ReadSubExpr()); + S->setBody(Reader.ReadSubStmt()); S->setWhileLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 2; } -unsigned PCHStmtReader::VisitDoStmt(DoStmt *S) { +void PCHStmtReader::VisitDoStmt(DoStmt *S) { VisitStmt(S); - S->setCond(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2])); - S->setBody(StmtStack.back()); + S->setCond(Reader.ReadSubExpr()); + S->setBody(Reader.ReadSubStmt()); S->setDoLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); S->setWhileLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 2; } -unsigned PCHStmtReader::VisitForStmt(ForStmt *S) { +void PCHStmtReader::VisitForStmt(ForStmt *S) { VisitStmt(S); - S->setInit(StmtStack[StmtStack.size() - 4]); - S->setCond(cast_or_null<Expr>(StmtStack[StmtStack.size() - 3])); - S->setConditionVariable(cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++]))); - S->setInc(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2])); - S->setBody(StmtStack.back()); + S->setInit(Reader.ReadSubStmt()); + S->setCond(Reader.ReadSubExpr()); + S->setConditionVariable(*Reader.getContext(), + cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++]))); + S->setInc(Reader.ReadSubExpr()); + S->setBody(Reader.ReadSubStmt()); S->setForLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); S->setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 4; } -unsigned PCHStmtReader::VisitGotoStmt(GotoStmt *S) { +void PCHStmtReader::VisitGotoStmt(GotoStmt *S) { VisitStmt(S); Reader.SetLabelOf(S, Record[Idx++]); S->setGotoLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); S->setLabelLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 0; } -unsigned PCHStmtReader::VisitIndirectGotoStmt(IndirectGotoStmt *S) { +void PCHStmtReader::VisitIndirectGotoStmt(IndirectGotoStmt *S) { VisitStmt(S); S->setGotoLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); S->setStarLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - S->setTarget(cast_or_null<Expr>(StmtStack.back())); - return 1; + S->setTarget(Reader.ReadSubExpr()); } -unsigned PCHStmtReader::VisitContinueStmt(ContinueStmt *S) { +void PCHStmtReader::VisitContinueStmt(ContinueStmt *S) { VisitStmt(S); S->setContinueLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 0; } -unsigned PCHStmtReader::VisitBreakStmt(BreakStmt *S) { +void PCHStmtReader::VisitBreakStmt(BreakStmt *S) { VisitStmt(S); S->setBreakLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 0; } -unsigned PCHStmtReader::VisitReturnStmt(ReturnStmt *S) { +void PCHStmtReader::VisitReturnStmt(ReturnStmt *S) { VisitStmt(S); - S->setRetValue(cast_or_null<Expr>(StmtStack.back())); + S->setRetValue(Reader.ReadSubExpr()); S->setReturnLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); S->setNRVOCandidate(cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++]))); - return 1; } -unsigned PCHStmtReader::VisitDeclStmt(DeclStmt *S) { +void PCHStmtReader::VisitDeclStmt(DeclStmt *S) { VisitStmt(S); S->setStartLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); S->setEndLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); @@ -312,10 +326,9 @@ unsigned PCHStmtReader::VisitDeclStmt(DeclStmt *S) { Decls.data(), Decls.size()))); } - return 0; } -unsigned PCHStmtReader::VisitAsmStmt(AsmStmt *S) { +void PCHStmtReader::VisitAsmStmt(AsmStmt *S) { VisitStmt(S); unsigned NumOutputs = Record[Idx++]; unsigned NumInputs = Record[Idx++]; @@ -326,9 +339,7 @@ unsigned PCHStmtReader::VisitAsmStmt(AsmStmt *S) { S->setSimple(Record[Idx++]); S->setMSAsm(Record[Idx++]); - unsigned StackIdx - = StmtStack.size() - (NumOutputs*2 + NumInputs*2 + NumClobbers + 1); - S->setAsmString(cast_or_null<StringLiteral>(StmtStack[StackIdx++])); + S->setAsmString(cast_or_null<StringLiteral>(Reader.ReadSubStmt())); // Outputs and inputs llvm::SmallVector<IdentifierInfo *, 16> Names; @@ -336,71 +347,76 @@ unsigned PCHStmtReader::VisitAsmStmt(AsmStmt *S) { llvm::SmallVector<Stmt*, 16> Exprs; for (unsigned I = 0, N = NumOutputs + NumInputs; I != N; ++I) { Names.push_back(Reader.GetIdentifierInfo(Record, Idx)); - Constraints.push_back(cast_or_null<StringLiteral>(StmtStack[StackIdx++])); - Exprs.push_back(StmtStack[StackIdx++]); + Constraints.push_back(cast_or_null<StringLiteral>(Reader.ReadSubStmt())); + Exprs.push_back(Reader.ReadSubStmt()); } // Constraints llvm::SmallVector<StringLiteral*, 16> Clobbers; for (unsigned I = 0; I != NumClobbers; ++I) - Clobbers.push_back(cast_or_null<StringLiteral>(StmtStack[StackIdx++])); + Clobbers.push_back(cast_or_null<StringLiteral>(Reader.ReadSubStmt())); S->setOutputsAndInputsAndClobbers(*Reader.getContext(), Names.data(), Constraints.data(), Exprs.data(), NumOutputs, NumInputs, Clobbers.data(), NumClobbers); - - assert(StackIdx == StmtStack.size() && "Error deserializing AsmStmt"); - return NumOutputs*2 + NumInputs*2 + NumClobbers + 1; } -unsigned PCHStmtReader::VisitExpr(Expr *E) { +void PCHStmtReader::VisitExpr(Expr *E) { VisitStmt(E); E->setType(Reader.GetType(Record[Idx++])); E->setTypeDependent(Record[Idx++]); E->setValueDependent(Record[Idx++]); assert(Idx == NumExprFields && "Incorrect expression field count"); - return 0; } -unsigned PCHStmtReader::VisitPredefinedExpr(PredefinedExpr *E) { +void PCHStmtReader::VisitPredefinedExpr(PredefinedExpr *E) { VisitExpr(E); E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setIdentType((PredefinedExpr::IdentType)Record[Idx++]); - return 0; } -unsigned PCHStmtReader::VisitDeclRefExpr(DeclRefExpr *E) { +void PCHStmtReader::VisitDeclRefExpr(DeclRefExpr *E) { VisitExpr(E); + + bool HasQualifier = Record[Idx++]; + unsigned NumTemplateArgs = Record[Idx++]; + + E->DecoratedD.setInt((HasQualifier? DeclRefExpr::HasQualifierFlag : 0) | + (NumTemplateArgs ? DeclRefExpr::HasExplicitTemplateArgumentListFlag : 0)); + + if (HasQualifier) { + E->getNameQualifier()->NNS = Reader.ReadNestedNameSpecifier(Record, Idx); + E->getNameQualifier()->Range = Reader.ReadSourceRange(Record, Idx); + } + + if (NumTemplateArgs) + ReadExplicitTemplateArgumentList(*E->getExplicitTemplateArgumentList(), + NumTemplateArgs); + E->setDecl(cast<ValueDecl>(Reader.GetDecl(Record[Idx++]))); - E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++])); - // FIXME: read qualifier - // FIXME: read explicit template arguments - return 0; + E->setLocation(Reader.ReadSourceLocation(Record, Idx)); } -unsigned PCHStmtReader::VisitIntegerLiteral(IntegerLiteral *E) { +void PCHStmtReader::VisitIntegerLiteral(IntegerLiteral *E) { VisitExpr(E); E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setValue(Reader.ReadAPInt(Record, Idx)); - return 0; } -unsigned PCHStmtReader::VisitFloatingLiteral(FloatingLiteral *E) { +void PCHStmtReader::VisitFloatingLiteral(FloatingLiteral *E) { VisitExpr(E); E->setValue(Reader.ReadAPFloat(Record, Idx)); E->setExact(Record[Idx++]); E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 0; } -unsigned PCHStmtReader::VisitImaginaryLiteral(ImaginaryLiteral *E) { +void PCHStmtReader::VisitImaginaryLiteral(ImaginaryLiteral *E) { VisitExpr(E); - E->setSubExpr(cast<Expr>(StmtStack.back())); - return 1; + E->setSubExpr(Reader.ReadSubExpr()); } -unsigned PCHStmtReader::VisitStringLiteral(StringLiteral *E) { +void PCHStmtReader::VisitStringLiteral(StringLiteral *E) { VisitExpr(E); unsigned Len = Record[Idx++]; assert(Record[Idx] == E->getNumConcatenated() && @@ -416,35 +432,41 @@ unsigned PCHStmtReader::VisitStringLiteral(StringLiteral *E) { // Read source locations for (unsigned I = 0, N = E->getNumConcatenated(); I != N; ++I) E->setStrTokenLoc(I, SourceLocation::getFromRawEncoding(Record[Idx++])); - - return 0; } -unsigned PCHStmtReader::VisitCharacterLiteral(CharacterLiteral *E) { +void PCHStmtReader::VisitCharacterLiteral(CharacterLiteral *E) { VisitExpr(E); E->setValue(Record[Idx++]); E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setWide(Record[Idx++]); - return 0; } -unsigned PCHStmtReader::VisitParenExpr(ParenExpr *E) { +void PCHStmtReader::VisitParenExpr(ParenExpr *E) { VisitExpr(E); E->setLParen(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setRParen(SourceLocation::getFromRawEncoding(Record[Idx++])); - E->setSubExpr(cast<Expr>(StmtStack.back())); - return 1; + E->setSubExpr(Reader.ReadSubExpr()); +} + +void PCHStmtReader::VisitParenListExpr(ParenListExpr *E) { + VisitExpr(E); + unsigned NumExprs = Record[Idx++]; + E->Exprs = new (*Reader.getContext()) Stmt*[NumExprs]; + for (unsigned i = 0; i != NumExprs; ++i) + E->Exprs[i] = Reader.ReadSubStmt(); + E->NumExprs = NumExprs; + E->LParenLoc = Reader.ReadSourceLocation(Record, Idx); + E->RParenLoc = Reader.ReadSourceLocation(Record, Idx); } -unsigned PCHStmtReader::VisitUnaryOperator(UnaryOperator *E) { +void PCHStmtReader::VisitUnaryOperator(UnaryOperator *E) { VisitExpr(E); - E->setSubExpr(cast<Expr>(StmtStack.back())); + E->setSubExpr(Reader.ReadSubExpr()); E->setOpcode((UnaryOperator::Opcode)Record[Idx++]); E->setOperatorLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 1; } -unsigned PCHStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) { +void PCHStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) { typedef OffsetOfExpr::OffsetOfNode Node; VisitExpr(E); assert(E->getNumComponents() == Record[Idx]); @@ -482,153 +504,141 @@ unsigned PCHStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) { } for (unsigned I = 0, N = E->getNumExpressions(); I != N; ++I) - E->setIndexExpr(I, cast_or_null<Expr>(StmtStack[StmtStack.size() - N + I])); - - return E->getNumExpressions(); + E->setIndexExpr(I, Reader.ReadSubExpr()); } -unsigned PCHStmtReader::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) { +void PCHStmtReader::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) { VisitExpr(E); E->setSizeof(Record[Idx++]); if (Record[Idx] == 0) { - E->setArgument(cast<Expr>(StmtStack.back())); + E->setArgument(Reader.ReadSubExpr()); ++Idx; } else { E->setArgument(Reader.GetTypeSourceInfo(Record, Idx)); } E->setOperatorLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return E->isArgumentType()? 0 : 1; } -unsigned PCHStmtReader::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { +void PCHStmtReader::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { VisitExpr(E); - E->setLHS(cast<Expr>(StmtStack[StmtStack.size() - 2])); - E->setRHS(cast<Expr>(StmtStack[StmtStack.size() - 1])); + E->setLHS(Reader.ReadSubExpr()); + E->setRHS(Reader.ReadSubExpr()); E->setRBracketLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 2; } -unsigned PCHStmtReader::VisitCallExpr(CallExpr *E) { +void PCHStmtReader::VisitCallExpr(CallExpr *E) { VisitExpr(E); E->setNumArgs(*Reader.getContext(), Record[Idx++]); E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - E->setCallee(cast<Expr>(StmtStack[StmtStack.size() - E->getNumArgs() - 1])); + E->setCallee(Reader.ReadSubExpr()); for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) - E->setArg(I, cast<Expr>(StmtStack[StmtStack.size() - N + I])); - return E->getNumArgs() + 1; + E->setArg(I, Reader.ReadSubExpr()); } -unsigned PCHStmtReader::VisitMemberExpr(MemberExpr *E) { - VisitExpr(E); - E->setBase(cast<Expr>(StmtStack.back())); - E->setMemberDecl(cast<ValueDecl>(Reader.GetDecl(Record[Idx++]))); - E->setMemberLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - E->setArrow(Record[Idx++]); - return 1; +void PCHStmtReader::VisitMemberExpr(MemberExpr *E) { + // Don't call VisitExpr, this is fully initialized at creation. + assert(E->getStmtClass() == Stmt::MemberExprClass && + "It's a subclass, we must advance Idx!"); } -unsigned PCHStmtReader::VisitObjCIsaExpr(ObjCIsaExpr *E) { +void PCHStmtReader::VisitObjCIsaExpr(ObjCIsaExpr *E) { VisitExpr(E); - E->setBase(cast<Expr>(StmtStack.back())); + E->setBase(Reader.ReadSubExpr()); E->setIsaMemberLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setArrow(Record[Idx++]); - return 1; } -unsigned PCHStmtReader::VisitCastExpr(CastExpr *E) { +void PCHStmtReader::VisitCastExpr(CastExpr *E) { VisitExpr(E); - E->setSubExpr(cast<Expr>(StmtStack.back())); + E->setSubExpr(Reader.ReadSubExpr()); E->setCastKind((CastExpr::CastKind)Record[Idx++]); - return 1; + CXXBaseSpecifierArray &BasePath = E->getBasePath(); + unsigned NumBaseSpecs = Record[Idx++]; + while (NumBaseSpecs--) { + // FIXME: These gets leaked. + CXXBaseSpecifier *BaseSpec = new (*Reader.getContext()) CXXBaseSpecifier; + *BaseSpec = Reader.ReadCXXBaseSpecifier(Record, Idx); + BasePath.push_back(BaseSpec); + } } -unsigned PCHStmtReader::VisitBinaryOperator(BinaryOperator *E) { +void PCHStmtReader::VisitBinaryOperator(BinaryOperator *E) { VisitExpr(E); - E->setLHS(cast<Expr>(StmtStack.end()[-2])); - E->setRHS(cast<Expr>(StmtStack.end()[-1])); + E->setLHS(Reader.ReadSubExpr()); + E->setRHS(Reader.ReadSubExpr()); E->setOpcode((BinaryOperator::Opcode)Record[Idx++]); E->setOperatorLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 2; } -unsigned PCHStmtReader::VisitCompoundAssignOperator(CompoundAssignOperator *E) { +void PCHStmtReader::VisitCompoundAssignOperator(CompoundAssignOperator *E) { VisitBinaryOperator(E); E->setComputationLHSType(Reader.GetType(Record[Idx++])); E->setComputationResultType(Reader.GetType(Record[Idx++])); - return 2; } -unsigned PCHStmtReader::VisitConditionalOperator(ConditionalOperator *E) { +void PCHStmtReader::VisitConditionalOperator(ConditionalOperator *E) { VisitExpr(E); - E->setCond(cast<Expr>(StmtStack[StmtStack.size() - 3])); - E->setLHS(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2])); - E->setRHS(cast_or_null<Expr>(StmtStack[StmtStack.size() - 1])); + E->setCond(Reader.ReadSubExpr()); + E->setLHS(Reader.ReadSubExpr()); + E->setRHS(Reader.ReadSubExpr()); E->setQuestionLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setColonLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 3; } -unsigned PCHStmtReader::VisitImplicitCastExpr(ImplicitCastExpr *E) { +void PCHStmtReader::VisitImplicitCastExpr(ImplicitCastExpr *E) { VisitCastExpr(E); E->setLvalueCast(Record[Idx++]); - return 1; } -unsigned PCHStmtReader::VisitExplicitCastExpr(ExplicitCastExpr *E) { +void PCHStmtReader::VisitExplicitCastExpr(ExplicitCastExpr *E) { VisitCastExpr(E); E->setTypeInfoAsWritten(Reader.GetTypeSourceInfo(Record, Idx)); - return 1; } -unsigned PCHStmtReader::VisitCStyleCastExpr(CStyleCastExpr *E) { +void PCHStmtReader::VisitCStyleCastExpr(CStyleCastExpr *E) { VisitExplicitCastExpr(E); E->setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 1; } -unsigned PCHStmtReader::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { +void PCHStmtReader::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { VisitExpr(E); E->setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setTypeSourceInfo(Reader.GetTypeSourceInfo(Record, Idx)); - E->setInitializer(cast<Expr>(StmtStack.back())); + E->setInitializer(Reader.ReadSubExpr()); E->setFileScope(Record[Idx++]); - return 1; } -unsigned PCHStmtReader::VisitExtVectorElementExpr(ExtVectorElementExpr *E) { +void PCHStmtReader::VisitExtVectorElementExpr(ExtVectorElementExpr *E) { VisitExpr(E); - E->setBase(cast<Expr>(StmtStack.back())); + E->setBase(Reader.ReadSubExpr()); E->setAccessor(Reader.GetIdentifierInfo(Record, Idx)); E->setAccessorLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 1; } -unsigned PCHStmtReader::VisitInitListExpr(InitListExpr *E) { +void PCHStmtReader::VisitInitListExpr(InitListExpr *E) { VisitExpr(E); unsigned NumInits = Record[Idx++]; E->reserveInits(*Reader.getContext(), NumInits); for (unsigned I = 0; I != NumInits; ++I) - E->updateInit(*Reader.getContext(), I, - cast<Expr>(StmtStack[StmtStack.size() - NumInits - 1 + I])); - E->setSyntacticForm(cast_or_null<InitListExpr>(StmtStack.back())); + E->updateInit(*Reader.getContext(), I, Reader.ReadSubExpr()); + E->setSyntacticForm(cast_or_null<InitListExpr>(Reader.ReadSubStmt())); E->setLBraceLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setRBraceLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setInitializedFieldInUnion( cast_or_null<FieldDecl>(Reader.GetDecl(Record[Idx++]))); E->sawArrayRangeDesignator(Record[Idx++]); - return NumInits + 1; } -unsigned PCHStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) { +void PCHStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) { typedef DesignatedInitExpr::Designator Designator; VisitExpr(E); unsigned NumSubExprs = Record[Idx++]; assert(NumSubExprs == E->getNumSubExprs() && "Wrong number of subexprs"); for (unsigned I = 0; I != NumSubExprs; ++I) - E->setSubExpr(I, cast<Expr>(StmtStack[StmtStack.size() - NumSubExprs + I])); + E->setSubExpr(I, Reader.ReadSubExpr()); E->setEqualOrColonLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setGNUSyntax(Record[Idx++]); @@ -683,143 +693,128 @@ unsigned PCHStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) { } E->setDesignators(*Reader.getContext(), Designators.data(), Designators.size()); - - return NumSubExprs; } -unsigned PCHStmtReader::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { +void PCHStmtReader::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { VisitExpr(E); - return 0; } -unsigned PCHStmtReader::VisitVAArgExpr(VAArgExpr *E) { +void PCHStmtReader::VisitVAArgExpr(VAArgExpr *E) { VisitExpr(E); - E->setSubExpr(cast<Expr>(StmtStack.back())); + E->setSubExpr(Reader.ReadSubExpr()); E->setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 1; } -unsigned PCHStmtReader::VisitAddrLabelExpr(AddrLabelExpr *E) { +void PCHStmtReader::VisitAddrLabelExpr(AddrLabelExpr *E) { VisitExpr(E); E->setAmpAmpLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setLabelLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); Reader.SetLabelOf(E, Record[Idx++]); - return 0; } -unsigned PCHStmtReader::VisitStmtExpr(StmtExpr *E) { +void PCHStmtReader::VisitStmtExpr(StmtExpr *E) { VisitExpr(E); E->setLParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - E->setSubStmt(cast_or_null<CompoundStmt>(StmtStack.back())); - return 1; + E->setSubStmt(cast_or_null<CompoundStmt>(Reader.ReadSubStmt())); } -unsigned PCHStmtReader::VisitTypesCompatibleExpr(TypesCompatibleExpr *E) { +void PCHStmtReader::VisitTypesCompatibleExpr(TypesCompatibleExpr *E) { VisitExpr(E); E->setArgType1(Reader.GetType(Record[Idx++])); E->setArgType2(Reader.GetType(Record[Idx++])); E->setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 0; } -unsigned PCHStmtReader::VisitChooseExpr(ChooseExpr *E) { +void PCHStmtReader::VisitChooseExpr(ChooseExpr *E) { VisitExpr(E); - E->setCond(cast<Expr>(StmtStack[StmtStack.size() - 3])); - E->setLHS(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2])); - E->setRHS(cast_or_null<Expr>(StmtStack[StmtStack.size() - 1])); + E->setCond(Reader.ReadSubExpr()); + E->setLHS(Reader.ReadSubExpr()); + E->setRHS(Reader.ReadSubExpr()); E->setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 3; } -unsigned PCHStmtReader::VisitGNUNullExpr(GNUNullExpr *E) { +void PCHStmtReader::VisitGNUNullExpr(GNUNullExpr *E) { VisitExpr(E); E->setTokenLocation(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 0; } -unsigned PCHStmtReader::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { +void PCHStmtReader::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { VisitExpr(E); + llvm::SmallVector<Expr *, 16> Exprs; unsigned NumExprs = Record[Idx++]; - E->setExprs(*Reader.getContext(), - (Expr **)&StmtStack[StmtStack.size() - NumExprs], NumExprs); + while (NumExprs--) + Exprs.push_back(Reader.ReadSubExpr()); + E->setExprs(*Reader.getContext(), Exprs.data(), Exprs.size()); E->setBuiltinLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return NumExprs; } -unsigned PCHStmtReader::VisitBlockExpr(BlockExpr *E) { +void PCHStmtReader::VisitBlockExpr(BlockExpr *E) { VisitExpr(E); E->setBlockDecl(cast_or_null<BlockDecl>(Reader.GetDecl(Record[Idx++]))); E->setHasBlockDeclRefExprs(Record[Idx++]); - return 0; } -unsigned PCHStmtReader::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) { +void PCHStmtReader::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) { VisitExpr(E); E->setDecl(cast<ValueDecl>(Reader.GetDecl(Record[Idx++]))); E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setByRef(Record[Idx++]); E->setConstQualAdded(Record[Idx++]); - return 0; + E->setCopyConstructorExpr(Reader.ReadSubExpr()); } //===----------------------------------------------------------------------===// // Objective-C Expressions and Statements -unsigned PCHStmtReader::VisitObjCStringLiteral(ObjCStringLiteral *E) { +void PCHStmtReader::VisitObjCStringLiteral(ObjCStringLiteral *E) { VisitExpr(E); - E->setString(cast<StringLiteral>(StmtStack.back())); + E->setString(cast<StringLiteral>(Reader.ReadSubStmt())); E->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 1; } -unsigned PCHStmtReader::VisitObjCEncodeExpr(ObjCEncodeExpr *E) { +void PCHStmtReader::VisitObjCEncodeExpr(ObjCEncodeExpr *E) { VisitExpr(E); E->setEncodedTypeSourceInfo(Reader.GetTypeSourceInfo(Record, Idx)); E->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 0; } -unsigned PCHStmtReader::VisitObjCSelectorExpr(ObjCSelectorExpr *E) { +void PCHStmtReader::VisitObjCSelectorExpr(ObjCSelectorExpr *E) { VisitExpr(E); E->setSelector(Reader.GetSelector(Record, Idx)); E->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 0; } -unsigned PCHStmtReader::VisitObjCProtocolExpr(ObjCProtocolExpr *E) { +void PCHStmtReader::VisitObjCProtocolExpr(ObjCProtocolExpr *E) { VisitExpr(E); E->setProtocol(cast<ObjCProtocolDecl>(Reader.GetDecl(Record[Idx++]))); E->setAtLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 0; } -unsigned PCHStmtReader::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { +void PCHStmtReader::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { VisitExpr(E); E->setDecl(cast<ObjCIvarDecl>(Reader.GetDecl(Record[Idx++]))); E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++])); - E->setBase(cast<Expr>(StmtStack.back())); + E->setBase(Reader.ReadSubExpr()); E->setIsArrow(Record[Idx++]); E->setIsFreeIvar(Record[Idx++]); - return 1; } -unsigned PCHStmtReader::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) { +void PCHStmtReader::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) { VisitExpr(E); E->setProperty(cast<ObjCPropertyDecl>(Reader.GetDecl(Record[Idx++]))); E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++])); - E->setBase(cast<Expr>(StmtStack.back())); - return 1; + E->setBase(Reader.ReadSubExpr()); } -unsigned PCHStmtReader::VisitObjCImplicitSetterGetterRefExpr( +void PCHStmtReader::VisitObjCImplicitSetterGetterRefExpr( ObjCImplicitSetterGetterRefExpr *E) { VisitExpr(E); E->setGetterMethod( @@ -828,13 +823,12 @@ unsigned PCHStmtReader::VisitObjCImplicitSetterGetterRefExpr( cast_or_null<ObjCMethodDecl>(Reader.GetDecl(Record[Idx++]))); E->setInterfaceDecl( cast_or_null<ObjCInterfaceDecl>(Reader.GetDecl(Record[Idx++]))); - E->setBase(cast_or_null<Expr>(StmtStack.back())); + E->setBase(Reader.ReadSubExpr()); E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setClassLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 1; } -unsigned PCHStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) { +void PCHStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) { VisitExpr(E); assert(Record[Idx] == E->getNumArgs()); ++Idx; @@ -842,8 +836,7 @@ unsigned PCHStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) { = static_cast<ObjCMessageExpr::ReceiverKind>(Record[Idx++]); switch (Kind) { case ObjCMessageExpr::Instance: - E->setInstanceReceiver( - cast_or_null<Expr>(StmtStack[StmtStack.size() - E->getNumArgs() - 1])); + E->setInstanceReceiver(Reader.ReadSubExpr()); break; case ObjCMessageExpr::Class: @@ -870,193 +863,185 @@ unsigned PCHStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) { E->setRightLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) - E->setArg(I, cast<Expr>(StmtStack[StmtStack.size() - N + I])); - return E->getNumArgs() + (Kind == ObjCMessageExpr::Instance); + E->setArg(I, Reader.ReadSubExpr()); } -unsigned PCHStmtReader::VisitObjCSuperExpr(ObjCSuperExpr *E) { +void PCHStmtReader::VisitObjCSuperExpr(ObjCSuperExpr *E) { VisitExpr(E); E->setLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 0; } -unsigned PCHStmtReader::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) { +void PCHStmtReader::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) { VisitStmt(S); - S->setElement(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 3])); - S->setCollection(cast_or_null<Expr>(StmtStack[StmtStack.size() - 2])); - S->setBody(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 1])); + S->setElement(Reader.ReadSubStmt()); + S->setCollection(Reader.ReadSubExpr()); + S->setBody(Reader.ReadSubStmt()); S->setForLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 3; } -unsigned PCHStmtReader::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) { +void PCHStmtReader::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) { VisitStmt(S); - S->setCatchBody(cast_or_null<Stmt>(StmtStack.back())); + S->setCatchBody(Reader.ReadSubStmt()); S->setCatchParamDecl(cast_or_null<VarDecl>(Reader.GetDecl(Record[Idx++]))); S->setAtCatchLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); S->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 1; } -unsigned PCHStmtReader::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) { +void PCHStmtReader::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) { VisitStmt(S); - S->setFinallyBody(StmtStack.back()); + S->setFinallyBody(Reader.ReadSubStmt()); S->setAtFinallyLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 1; } -unsigned PCHStmtReader::VisitObjCAtTryStmt(ObjCAtTryStmt *S) { +void PCHStmtReader::VisitObjCAtTryStmt(ObjCAtTryStmt *S) { VisitStmt(S); assert(Record[Idx] == S->getNumCatchStmts()); ++Idx; bool HasFinally = Record[Idx++]; - for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) { - unsigned Offset = StmtStack.size() - N - HasFinally + I; - S->setCatchStmt(I, cast_or_null<ObjCAtCatchStmt>(StmtStack[Offset])); - } + S->setTryBody(Reader.ReadSubStmt()); + for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) + S->setCatchStmt(I, cast_or_null<ObjCAtCatchStmt>(Reader.ReadSubStmt())); - unsigned TryOffset - = StmtStack.size() - S->getNumCatchStmts() - HasFinally - 1; - S->setTryBody(cast_or_null<Stmt>(StmtStack[TryOffset])); if (HasFinally) - S->setFinallyStmt(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 1])); + S->setFinallyStmt(Reader.ReadSubStmt()); S->setAtTryLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 1 + S->getNumCatchStmts() + HasFinally; } -unsigned PCHStmtReader::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) { +void PCHStmtReader::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) { VisitStmt(S); - S->setSynchExpr(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 2])); - S->setSynchBody(cast_or_null<Stmt>(StmtStack[StmtStack.size() - 1])); + S->setSynchExpr(Reader.ReadSubStmt()); + S->setSynchBody(Reader.ReadSubStmt()); S->setAtSynchronizedLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 2; } -unsigned PCHStmtReader::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) { +void PCHStmtReader::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) { VisitStmt(S); - S->setThrowExpr(StmtStack.back()); + S->setThrowExpr(Reader.ReadSubStmt()); S->setThrowLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 1; } //===----------------------------------------------------------------------===// // C++ Expressions and Statements -unsigned PCHStmtReader::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) { - unsigned num = VisitCallExpr(E); +void PCHStmtReader::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) { + VisitCallExpr(E); E->setOperator((OverloadedOperatorKind)Record[Idx++]); - return num; } -unsigned PCHStmtReader::VisitCXXConstructExpr(CXXConstructExpr *E) { +void PCHStmtReader::VisitCXXConstructExpr(CXXConstructExpr *E) { VisitExpr(E); + E->NumArgs = Record[Idx++]; + if (E->NumArgs) + E->Args = new (*Reader.getContext()) Stmt*[E->NumArgs]; + for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) + E->setArg(I, Reader.ReadSubExpr()); E->setConstructor(cast<CXXConstructorDecl>(Reader.GetDecl(Record[Idx++]))); E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setElidable(Record[Idx++]); E->setRequiresZeroInitialization(Record[Idx++]); - for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) - E->setArg(I, cast<Expr>(StmtStack[StmtStack.size() - N + I])); E->setConstructionKind((CXXConstructExpr::ConstructionKind)Record[Idx++]); - return E->getNumArgs(); } -unsigned PCHStmtReader::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) { - unsigned num = VisitExplicitCastExpr(E); +void PCHStmtReader::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) { + VisitCXXConstructExpr(E); + E->TyBeginLoc = Reader.ReadSourceLocation(Record, Idx); + E->RParenLoc = Reader.ReadSourceLocation(Record, Idx); +} + +void PCHStmtReader::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) { + VisitExplicitCastExpr(E); E->setOperatorLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return num; } -unsigned PCHStmtReader::VisitCXXStaticCastExpr(CXXStaticCastExpr *E) { +void PCHStmtReader::VisitCXXStaticCastExpr(CXXStaticCastExpr *E) { return VisitCXXNamedCastExpr(E); } -unsigned PCHStmtReader::VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E) { +void PCHStmtReader::VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E) { return VisitCXXNamedCastExpr(E); } -unsigned PCHStmtReader::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *E) { +void PCHStmtReader::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *E) { return VisitCXXNamedCastExpr(E); } -unsigned PCHStmtReader::VisitCXXConstCastExpr(CXXConstCastExpr *E) { +void PCHStmtReader::VisitCXXConstCastExpr(CXXConstCastExpr *E) { return VisitCXXNamedCastExpr(E); } -unsigned PCHStmtReader::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) { - unsigned num = VisitExplicitCastExpr(E); +void PCHStmtReader::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) { + VisitExplicitCastExpr(E); E->setTypeBeginLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return num; } -unsigned PCHStmtReader::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) { +void PCHStmtReader::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) { VisitExpr(E); E->setValue(Record[Idx++]); E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 0; } -unsigned PCHStmtReader::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) { +void PCHStmtReader::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) { VisitExpr(E); E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 0; } -unsigned PCHStmtReader::VisitCXXTypeidExpr(CXXTypeidExpr *E) { +void PCHStmtReader::VisitCXXTypeidExpr(CXXTypeidExpr *E) { VisitExpr(E); E->setSourceRange(Reader.ReadSourceRange(Record, Idx)); if (E->isTypeOperand()) { // typeid(int) E->setTypeOperandSourceInfo(Reader.GetTypeSourceInfo(Record, Idx)); - return 0; + return; } // typeid(42+2) - E->setExprOperand(cast<Expr>(StmtStack.back())); - return 1; + E->setExprOperand(Reader.ReadSubExpr()); } -unsigned PCHStmtReader::VisitCXXThisExpr(CXXThisExpr *E) { +void PCHStmtReader::VisitCXXThisExpr(CXXThisExpr *E) { VisitExpr(E); E->setLocation(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setImplicit(Record[Idx++]); - return 0; } -unsigned PCHStmtReader::VisitCXXThrowExpr(CXXThrowExpr *E) { +void PCHStmtReader::VisitCXXThrowExpr(CXXThrowExpr *E) { VisitExpr(E); E->setThrowLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - E->setSubExpr(cast<Expr>(StmtStack.back())); - return 1; + E->setSubExpr(Reader.ReadSubExpr()); } -unsigned PCHStmtReader::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) { +void PCHStmtReader::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) { VisitExpr(E); - E->setUsedLocation(SourceLocation::getFromRawEncoding(Record[Idx++])); - bool HasStoredExpr = Record[Idx++]; - if (!HasStoredExpr) return 0; - E->setExpr(cast<Expr>(StmtStack.back())); - return 1; + + assert(Record[Idx] == E->Param.getInt() && "We messed up at creation ?"); + ++Idx; // HasOtherExprStored and SubExpr was handled during creation. + E->Param.setPointer(cast<ParmVarDecl>(Reader.GetDecl(Record[Idx++]))); + E->Loc = Reader.ReadSourceLocation(Record, Idx); } -unsigned PCHStmtReader::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { +void PCHStmtReader::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { VisitExpr(E); E->setTemporary(Reader.ReadCXXTemporary(Record, Idx)); - E->setSubExpr(cast<Expr>(StmtStack.back())); - return 1; + E->setSubExpr(Reader.ReadSubExpr()); } -unsigned PCHStmtReader::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) { +void PCHStmtReader::VisitCXXBindReferenceExpr(CXXBindReferenceExpr *E) { + VisitExpr(E); + E->SubExpr = Reader.ReadSubExpr(); + E->ExtendsLifetime = Record[Idx++]; + E->RequiresTemporaryCopy = Record[Idx++]; +} + +void PCHStmtReader::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { VisitExpr(E); E->setTypeBeginLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setRParenLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); - return 0; } -unsigned PCHStmtReader::VisitCXXNewExpr(CXXNewExpr *E) { +void PCHStmtReader::VisitCXXNewExpr(CXXNewExpr *E) { VisitExpr(E); E->setGlobalNew(Record[Idx++]); - E->setParenTypeId(Record[Idx++]); E->setHasInitializer(Record[Idx++]); bool isArray = Record[Idx++]; unsigned NumPlacementArgs = Record[Idx++]; @@ -1066,6 +1051,10 @@ unsigned PCHStmtReader::VisitCXXNewExpr(CXXNewExpr *E) { cast_or_null<FunctionDecl>(Reader.GetDecl(Record[Idx++]))); E->setConstructor( cast_or_null<CXXConstructorDecl>(Reader.GetDecl(Record[Idx++]))); + SourceRange TypeIdParens; + TypeIdParens.setBegin(SourceLocation::getFromRawEncoding(Record[Idx++])); + TypeIdParens.setEnd(SourceLocation::getFromRawEncoding(Record[Idx++])); + E->TypeIdParens = TypeIdParens; E->setStartLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); E->setEndLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); @@ -1073,17 +1062,41 @@ unsigned PCHStmtReader::VisitCXXNewExpr(CXXNewExpr *E) { NumCtorArgs); // Install all the subexpressions. - unsigned TotalSubExprs = E->raw_arg_end()-E->raw_arg_begin(); - unsigned SSIdx = StmtStack.size()-TotalSubExprs; for (CXXNewExpr::raw_arg_iterator I = E->raw_arg_begin(),e = E->raw_arg_end(); I != e; ++I) - *I = StmtStack[SSIdx++]; - - return TotalSubExprs; + *I = Reader.ReadSubStmt(); +} + +void PCHStmtReader::VisitCXXDeleteExpr(CXXDeleteExpr *E) { + VisitExpr(E); + E->setGlobalDelete(Record[Idx++]); + E->setArrayForm(Record[Idx++]); + E->setOperatorDelete( + cast_or_null<FunctionDecl>(Reader.GetDecl(Record[Idx++]))); + E->setArgument(Reader.ReadSubExpr()); + E->setStartLoc(SourceLocation::getFromRawEncoding(Record[Idx++])); } +void PCHStmtReader::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) { + VisitExpr(E); -unsigned PCHStmtReader::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) { + E->setBase(Reader.ReadSubExpr()); + E->setArrow(Record[Idx++]); + E->setOperatorLoc(Reader.ReadSourceLocation(Record, Idx)); + E->setQualifier(Reader.ReadNestedNameSpecifier(Record, Idx)); + E->setQualifierRange(Reader.ReadSourceRange(Record, Idx)); + E->setScopeTypeInfo(Reader.GetTypeSourceInfo(Record, Idx)); + E->setColonColonLoc(Reader.ReadSourceLocation(Record, Idx)); + E->setTildeLoc(Reader.ReadSourceLocation(Record, Idx)); + + IdentifierInfo *II = Reader.GetIdentifierInfo(Record, Idx); + if (II) + E->setDestroyedType(II, Reader.ReadSourceLocation(Record, Idx)); + else + E->setDestroyedType(Reader.GetTypeSourceInfo(Record, Idx)); +} + +void PCHStmtReader::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) { VisitExpr(E); unsigned NumTemps = Record[Idx++]; if (NumTemps) { @@ -1091,23 +1104,152 @@ unsigned PCHStmtReader::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) { for (unsigned i = 0; i != NumTemps; ++i) E->setTemporary(i, Reader.ReadCXXTemporary(Record, Idx)); } - E->setSubExpr(cast<Expr>(StmtStack.back())); - return 1; + E->setSubExpr(Reader.ReadSubExpr()); +} + +void +PCHStmtReader::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){ + VisitExpr(E); + + unsigned NumTemplateArgs = Record[Idx++]; + assert((NumTemplateArgs != 0) == E->hasExplicitTemplateArgs() && + "Read wrong record during creation ?"); + if (E->hasExplicitTemplateArgs()) + ReadExplicitTemplateArgumentList(*E->getExplicitTemplateArgumentList(), + NumTemplateArgs); + + E->setBase(Reader.ReadSubExpr()); + E->setBaseType(Reader.GetType(Record[Idx++])); + E->setArrow(Record[Idx++]); + E->setOperatorLoc(Reader.ReadSourceLocation(Record, Idx)); + E->setQualifier(Reader.ReadNestedNameSpecifier(Record, Idx)); + E->setQualifierRange(Reader.ReadSourceRange(Record, Idx)); + E->setFirstQualifierFoundInScope( + cast_or_null<NamedDecl>(Reader.GetDecl(Record[Idx++]))); + E->setMember(Reader.ReadDeclarationName(Record, Idx)); + E->setMemberLoc(Reader.ReadSourceLocation(Record, Idx)); +} + +void +PCHStmtReader::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) { + VisitExpr(E); + + unsigned NumTemplateArgs = Record[Idx++]; + assert((NumTemplateArgs != 0) == E->hasExplicitTemplateArgs() && + "Read wrong record during creation ?"); + if (E->hasExplicitTemplateArgs()) + ReadExplicitTemplateArgumentList(E->getExplicitTemplateArgs(), + NumTemplateArgs); + + E->setDeclName(Reader.ReadDeclarationName(Record, Idx)); + E->setLocation(Reader.ReadSourceLocation(Record, Idx)); + E->setQualifierRange(Reader.ReadSourceRange(Record, Idx)); + E->setQualifier(Reader.ReadNestedNameSpecifier(Record, Idx)); +} + +void +PCHStmtReader::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) { + VisitExpr(E); + assert(Record[Idx] == E->arg_size() && "Read wrong record during creation ?"); + ++Idx; // NumArgs; + for (unsigned I = 0, N = E->arg_size(); I != N; ++I) + E->setArg(I, Reader.ReadSubExpr()); + E->setTypeBeginLoc(Reader.ReadSourceLocation(Record, Idx)); + E->setTypeAsWritten(Reader.GetType(Record[Idx++])); + E->setLParenLoc(Reader.ReadSourceLocation(Record, Idx)); + E->setRParenLoc(Reader.ReadSourceLocation(Record, Idx)); +} + +void PCHStmtReader::VisitOverloadExpr(OverloadExpr *E) { + VisitExpr(E); + + unsigned NumTemplateArgs = Record[Idx++]; + assert((NumTemplateArgs != 0) == E->hasExplicitTemplateArgs() && + "Read wrong record during creation ?"); + if (E->hasExplicitTemplateArgs()) + ReadExplicitTemplateArgumentList(E->getExplicitTemplateArgs(), + NumTemplateArgs); + + unsigned NumDecls = Record[Idx++]; + UnresolvedSet<8> Decls; + for (unsigned i = 0; i != NumDecls; ++i) { + NamedDecl *D = cast<NamedDecl>(Reader.GetDecl(Record[Idx++])); + AccessSpecifier AS = (AccessSpecifier)Record[Idx++]; + Decls.addDecl(D, AS); + } + E->initializeResults(*Reader.getContext(), Decls.begin(), Decls.end()); + + E->setName(Reader.ReadDeclarationName(Record, Idx)); + E->setQualifier(Reader.ReadNestedNameSpecifier(Record, Idx)); + E->setQualifierRange(Reader.ReadSourceRange(Record, Idx)); + E->setNameLoc(Reader.ReadSourceLocation(Record, Idx)); +} + +void PCHStmtReader::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) { + VisitOverloadExpr(E); + E->setArrow(Record[Idx++]); + E->setHasUnresolvedUsing(Record[Idx++]); + E->setBase(Reader.ReadSubExpr()); + E->setBaseType(Reader.GetType(Record[Idx++])); + E->setOperatorLoc(Reader.ReadSourceLocation(Record, Idx)); +} + +void PCHStmtReader::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) { + VisitOverloadExpr(E); + E->setRequiresADL(Record[Idx++]); + E->setOverloaded(Record[Idx++]); + E->setNamingClass(cast_or_null<CXXRecordDecl>(Reader.GetDecl(Record[Idx++]))); +} + +void PCHStmtReader::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) { + VisitExpr(E); + E->UTT = (UnaryTypeTrait)Record[Idx++]; + SourceRange Range = Reader.ReadSourceRange(Record, Idx); + E->Loc = Range.getBegin(); + E->RParen = Range.getEnd(); + E->QueriedType = Reader.GetType(Record[Idx++]); +} + +Stmt *PCHReader::ReadStmt() { + switch (ReadingKind) { + case Read_Decl: + case Read_Type: + // Read a statement from the current DeclCursor. + return ReadStmtFromStream(DeclsCursor); + case Read_Stmt: + return ReadSubStmt(); + } + + llvm_unreachable("ReadingKind not set ?"); + return 0; } +Expr *PCHReader::ReadExpr() { + return cast_or_null<Expr>(ReadStmt()); +} + +Expr *PCHReader::ReadSubExpr() { + return cast_or_null<Expr>(ReadSubStmt()); +} // Within the bitstream, expressions are stored in Reverse Polish // Notation, with each of the subexpressions preceding the -// expression they are stored in. To evaluate expressions, we -// continue reading expressions and placing them on the stack, with -// expressions having operands removing those operands from the +// expression they are stored in. Subexpressions are stored from last to first. +// To evaluate expressions, we continue reading expressions and placing them on +// the stack, with expressions having operands removing those operands from the // stack. Evaluation terminates when we see a STMT_STOP record, and // the single remaining expression on the stack is our result. -Stmt *PCHReader::ReadStmt(llvm::BitstreamCursor &Cursor) { +Stmt *PCHReader::ReadStmtFromStream(llvm::BitstreamCursor &Cursor) { + + ReadingKindTracker ReadingKind(Read_Stmt, *this); + +#ifndef NDEBUG + unsigned PrevNumStmts = StmtStack.size(); +#endif + RecordData Record; unsigned Idx; - llvm::SmallVector<Stmt *, 16> StmtStack; - PCHStmtReader Reader(*this, Record, Idx, StmtStack); + PCHStmtReader Reader(*this, Record, Idx); Stmt::EmptyShell Empty; while (true) { @@ -1221,7 +1363,9 @@ Stmt *PCHReader::ReadStmt(llvm::BitstreamCursor &Cursor) { break; case pch::EXPR_DECL_REF: - S = new (Context) DeclRefExpr(Empty); + S = DeclRefExpr::CreateEmpty(*Context, + /*HasQualifier=*/Record[PCHStmtReader::NumExprFields], + /*NumTemplateArgs=*/Record[PCHStmtReader::NumExprFields + 1]); break; case pch::EXPR_INTEGER_LITERAL: @@ -1249,6 +1393,10 @@ Stmt *PCHReader::ReadStmt(llvm::BitstreamCursor &Cursor) { S = new (Context) ParenExpr(Empty); break; + case pch::EXPR_PAREN_LIST: + S = new (Context) ParenListExpr(Empty); + break; + case pch::EXPR_UNARY_OPERATOR: S = new (Context) UnaryOperator(Empty); break; @@ -1271,9 +1419,43 @@ Stmt *PCHReader::ReadStmt(llvm::BitstreamCursor &Cursor) { S = new (Context) CallExpr(*Context, Stmt::CallExprClass, Empty); break; - case pch::EXPR_MEMBER: - S = new (Context) MemberExpr(Empty); + case pch::EXPR_MEMBER: { + // We load everything here and fully initialize it at creation. + // That way we can use MemberExpr::Create and don't have to duplicate its + // logic with a MemberExpr::CreateEmpty. + + assert(Idx == 0); + NestedNameSpecifier *NNS = 0; + SourceRange QualifierRange; + if (Record[Idx++]) { // HasQualifier. + NNS = ReadNestedNameSpecifier(Record, Idx); + QualifierRange = ReadSourceRange(Record, Idx); + } + + TemplateArgumentListInfo ArgInfo; + unsigned NumTemplateArgs = Record[Idx++]; + if (NumTemplateArgs) { + ArgInfo.setLAngleLoc(ReadSourceLocation(Record, Idx)); + ArgInfo.setRAngleLoc(ReadSourceLocation(Record, Idx)); + for (unsigned i = 0; i != NumTemplateArgs; ++i) + ArgInfo.addArgument(ReadTemplateArgumentLoc(Record, Idx)); + } + + NamedDecl *FoundD = cast_or_null<NamedDecl>(GetDecl(Record[Idx++])); + AccessSpecifier AS = (AccessSpecifier)Record[Idx++]; + DeclAccessPair FoundDecl = DeclAccessPair::make(FoundD, AS); + + QualType T = GetType(Record[Idx++]); + Expr *Base = ReadSubExpr(); + ValueDecl *MemberD = cast<ValueDecl>(GetDecl(Record[Idx++])); + SourceLocation MemberLoc = ReadSourceLocation(Record, Idx); + bool IsArrow = Record[Idx++]; + + S = MemberExpr::Create(*Context, Base, IsArrow, NNS, QualifierRange, + MemberD, FoundDecl, MemberLoc, + NumTemplateArgs ? &ArgInfo : 0, T); break; + } case pch::EXPR_BINARY_OPERATOR: S = new (Context) BinaryOperator(Empty); @@ -1414,8 +1596,11 @@ Stmt *PCHReader::ReadStmt(llvm::BitstreamCursor &Cursor) { break; case pch::EXPR_CXX_CONSTRUCT: - S = new (Context) CXXConstructExpr(Empty, *Context, - Record[PCHStmtReader::NumExprFields + 2]); + S = new (Context) CXXConstructExpr(Empty); + break; + + case pch::EXPR_CXX_TEMPORARY_OBJECT: + S = new (Context) CXXTemporaryObjectExpr(Empty); break; case pch::EXPR_CXX_STATIC_CAST: @@ -1457,43 +1642,86 @@ Stmt *PCHReader::ReadStmt(llvm::BitstreamCursor &Cursor) { case pch::EXPR_CXX_THROW: S = new (Context) CXXThrowExpr(Empty); break; - case pch::EXPR_CXX_DEFAULT_ARG: - S = new (Context) CXXDefaultArgExpr(Empty); + case pch::EXPR_CXX_DEFAULT_ARG: { + bool HasOtherExprStored = Record[PCHStmtReader::NumExprFields]; + if (HasOtherExprStored) { + Expr *SubExpr = ReadSubExpr(); + S = CXXDefaultArgExpr::Create(*Context, SourceLocation(), 0, SubExpr); + } else + S = new (Context) CXXDefaultArgExpr(Empty); break; + } case pch::EXPR_CXX_BIND_TEMPORARY: S = new (Context) CXXBindTemporaryExpr(Empty); break; + case pch::EXPR_CXX_BIND_REFERENCE: + S = new (Context) CXXBindReferenceExpr(Empty); + break; - case pch::EXPR_CXX_ZERO_INIT_VALUE: - S = new (Context) CXXZeroInitValueExpr(Empty); + case pch::EXPR_CXX_SCALAR_VALUE_INIT: + S = new (Context) CXXScalarValueInitExpr(Empty); break; case pch::EXPR_CXX_NEW: S = new (Context) CXXNewExpr(Empty); break; - + case pch::EXPR_CXX_DELETE: + S = new (Context) CXXDeleteExpr(Empty); + break; + case pch::EXPR_CXX_PSEUDO_DESTRUCTOR: + S = new (Context) CXXPseudoDestructorExpr(Empty); + break; case pch::EXPR_CXX_EXPR_WITH_TEMPORARIES: S = new (Context) CXXExprWithTemporaries(Empty); break; + + case pch::EXPR_CXX_DEPENDENT_SCOPE_MEMBER: + S = CXXDependentScopeMemberExpr::CreateEmpty(*Context, + /*NumTemplateArgs=*/Record[PCHStmtReader::NumExprFields]); + break; + + case pch::EXPR_CXX_DEPENDENT_SCOPE_DECL_REF: + S = DependentScopeDeclRefExpr::CreateEmpty(*Context, + /*NumTemplateArgs=*/Record[PCHStmtReader::NumExprFields]); + break; + + case pch::EXPR_CXX_UNRESOLVED_CONSTRUCT: + S = CXXUnresolvedConstructExpr::CreateEmpty(*Context, + /*NumArgs=*/Record[PCHStmtReader::NumExprFields]); + break; + + case pch::EXPR_CXX_UNRESOLVED_MEMBER: + S = UnresolvedMemberExpr::CreateEmpty(*Context, + /*NumTemplateArgs=*/Record[PCHStmtReader::NumExprFields]); + break; + + case pch::EXPR_CXX_UNRESOLVED_LOOKUP: + S = UnresolvedLookupExpr::CreateEmpty(*Context, + /*NumTemplateArgs=*/Record[PCHStmtReader::NumExprFields]); + break; + + case pch::EXPR_CXX_UNARY_TYPE_TRAIT: + S = new (Context) UnaryTypeTraitExpr(Empty); + break; } - + // We hit a STMT_STOP, so we're done with this expression. if (Finished) break; ++NumStatementsRead; - if (S) { - unsigned NumSubStmts = Reader.Visit(S); - while (NumSubStmts > 0) { - StmtStack.pop_back(); - --NumSubStmts; - } - } + if (S) + Reader.Visit(S); assert(Idx == Record.size() && "Invalid deserialization of statement"); StmtStack.push_back(S); } - assert(StmtStack.size() == 1 && "Extra expressions on stack!"); - return StmtStack.back(); + +#ifndef NDEBUG + assert(StmtStack.size() > PrevNumStmts && "Read too many sub stmts!"); + assert(StmtStack.size() == PrevNumStmts + 1 && "Extra expressions on stack!"); +#endif + + return StmtStack.pop_back_val(); } diff --git a/contrib/llvm/tools/clang/lib/Frontend/PCHWriter.cpp b/contrib/llvm/tools/clang/lib/Frontend/PCHWriter.cpp index 3d5b7d8..093c1e3 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/PCHWriter.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/PCHWriter.cpp @@ -20,6 +20,7 @@ #include "clang/AST/Expr.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLocVisitor.h" +#include "clang/Frontend/PCHReader.h" #include "clang/Lex/MacroInfo.h" #include "clang/Lex/PreprocessingRecord.h" #include "clang/Lex/Preprocessor.h" @@ -61,9 +62,7 @@ namespace { #define TYPE(Class, Base) void Visit##Class##Type(const Class##Type *T); #define ABSTRACT_TYPE(Class, Base) -#define DEPENDENT_TYPE(Class, Base) #include "clang/AST/TypeNodes.def" - void VisitInjectedClassNameType(const InjectedClassNameType *T); }; } @@ -130,8 +129,7 @@ void PCHTypeWriter::VisitVariableArrayType(const VariableArrayType *T) { void PCHTypeWriter::VisitVectorType(const VectorType *T) { Writer.AddTypeRef(T->getElementType(), Record); Record.push_back(T->getNumElements()); - Record.push_back(T->isAltiVec()); - Record.push_back(T->isPixel()); + Record.push_back(T->getAltiVecSpecific()); Code = pch::TYPE_VECTOR; } @@ -169,16 +167,15 @@ void PCHTypeWriter::VisitFunctionProtoType(const FunctionProtoType *T) { Code = pch::TYPE_FUNCTION_PROTO; } -#if 0 -// For when we want it.... void PCHTypeWriter::VisitUnresolvedUsingType(const UnresolvedUsingType *T) { Writer.AddDeclRef(T->getDecl(), Record); Code = pch::TYPE_UNRESOLVED_USING; } -#endif void PCHTypeWriter::VisitTypedefType(const TypedefType *T) { Writer.AddDeclRef(T->getDecl(), Record); + assert(!T->isCanonicalUnqualified() && "Invalid typedef ?"); + Writer.AddTypeRef(T->getCanonicalTypeInternal(), Record); Code = pch::TYPE_TYPEDEF; } @@ -198,6 +195,7 @@ void PCHTypeWriter::VisitDecltypeType(const DecltypeType *T) { } void PCHTypeWriter::VisitTagType(const TagType *T) { + Record.push_back(T->isDependentType()); Writer.AddDeclRef(T->getDecl(), Record); assert(!T->isBeingDefined() && "Cannot serialize in the middle of a type definition"); @@ -224,15 +222,70 @@ PCHTypeWriter::VisitSubstTemplateTypeParmType( void PCHTypeWriter::VisitTemplateSpecializationType( const TemplateSpecializationType *T) { + Record.push_back(T->isDependentType()); + Writer.AddTemplateName(T->getTemplateName(), Record); + Record.push_back(T->getNumArgs()); + for (TemplateSpecializationType::iterator ArgI = T->begin(), ArgE = T->end(); + ArgI != ArgE; ++ArgI) + Writer.AddTemplateArgument(*ArgI, Record); + Writer.AddTypeRef(T->isCanonicalUnqualified() ? QualType() + : T->getCanonicalTypeInternal(), + Record); + Code = pch::TYPE_TEMPLATE_SPECIALIZATION; +} + +void +PCHTypeWriter::VisitDependentSizedArrayType(const DependentSizedArrayType *T) { + VisitArrayType(T); + Writer.AddStmt(T->getSizeExpr()); + Writer.AddSourceRange(T->getBracketsRange(), Record); + Code = pch::TYPE_DEPENDENT_SIZED_ARRAY; +} + +void +PCHTypeWriter::VisitDependentSizedExtVectorType( + const DependentSizedExtVectorType *T) { // FIXME: Serialize this type (C++ only) - assert(false && "Cannot serialize template specialization types"); + assert(false && "Cannot serialize dependent sized extended vector types"); +} + +void +PCHTypeWriter::VisitTemplateTypeParmType(const TemplateTypeParmType *T) { + Record.push_back(T->getDepth()); + Record.push_back(T->getIndex()); + Record.push_back(T->isParameterPack()); + Writer.AddIdentifierRef(T->getName(), Record); + Code = pch::TYPE_TEMPLATE_TYPE_PARM; +} + +void +PCHTypeWriter::VisitDependentNameType(const DependentNameType *T) { + Record.push_back(T->getKeyword()); + Writer.AddNestedNameSpecifier(T->getQualifier(), Record); + Writer.AddIdentifierRef(T->getIdentifier(), Record); + Writer.AddTypeRef(T->isCanonicalUnqualified() ? QualType() + : T->getCanonicalTypeInternal(), + Record); + Code = pch::TYPE_DEPENDENT_NAME; +} + +void +PCHTypeWriter::VisitDependentTemplateSpecializationType( + const DependentTemplateSpecializationType *T) { + Record.push_back(T->getKeyword()); + Writer.AddNestedNameSpecifier(T->getQualifier(), Record); + Writer.AddIdentifierRef(T->getIdentifier(), Record); + Record.push_back(T->getNumArgs()); + for (DependentTemplateSpecializationType::iterator + I = T->begin(), E = T->end(); I != E; ++I) + Writer.AddTemplateArgument(*I, Record); + Code = pch::TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION; } void PCHTypeWriter::VisitElaboratedType(const ElaboratedType *T) { - Writer.AddTypeRef(T->getNamedType(), Record); Record.push_back(T->getKeyword()); - // FIXME: Serialize the qualifier (C++ only) - assert(T->getQualifier() == 0 && "Cannot serialize qualified name types"); + Writer.AddNestedNameSpecifier(T->getQualifier(), Record); + Writer.AddTypeRef(T->getNamedType(), Record); Code = pch::TYPE_ELABORATED; } @@ -394,7 +447,8 @@ void TypeLocWriter::VisitTemplateSpecializationTypeLoc( Writer.AddSourceLocation(TL.getLAngleLoc(), Record); Writer.AddSourceLocation(TL.getRAngleLoc(), Record); for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) - Writer.AddTemplateArgumentLoc(TL.getArgLoc(i), Record); + Writer.AddTemplateArgumentLocInfo(TL.getArgLoc(i).getArgument().getKind(), + TL.getArgLoc(i).getLocInfo(), Record); } void TypeLocWriter::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) { Writer.AddSourceLocation(TL.getKeywordLoc(), Record); @@ -408,6 +462,17 @@ void TypeLocWriter::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) { Writer.AddSourceRange(TL.getQualifierRange(), Record); Writer.AddSourceLocation(TL.getNameLoc(), Record); } +void TypeLocWriter::VisitDependentTemplateSpecializationTypeLoc( + DependentTemplateSpecializationTypeLoc TL) { + Writer.AddSourceLocation(TL.getKeywordLoc(), Record); + Writer.AddSourceRange(TL.getQualifierRange(), Record); + Writer.AddSourceLocation(TL.getNameLoc(), Record); + Writer.AddSourceLocation(TL.getLAngleLoc(), Record); + Writer.AddSourceLocation(TL.getRAngleLoc(), Record); + for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) + Writer.AddTemplateArgumentLocInfo(TL.getArgLoc(I).getArgument().getKind(), + TL.getArgLoc(I).getLocInfo(), Record); +} void TypeLocWriter::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) { Writer.AddSourceLocation(TL.getNameLoc(), Record); } @@ -564,6 +629,7 @@ void PCHWriter::WriteBlockInfoBlock() { RECORD(VERSION_CONTROL_BRANCH_REVISION); RECORD(UNUSED_STATIC_FUNCS); RECORD(MACRO_DEFINITION_OFFSETS); + RECORD(CHAINED_METADATA); // SourceManager Block. BLOCK(SOURCE_MANAGER_BLOCK); @@ -683,24 +749,27 @@ void PCHWriter::WriteMetadata(ASTContext &Context, const char *isysroot) { // Metadata const TargetInfo &Target = Context.Target; BitCodeAbbrev *MetaAbbrev = new BitCodeAbbrev(); - MetaAbbrev->Add(BitCodeAbbrevOp(pch::METADATA)); + MetaAbbrev->Add(BitCodeAbbrevOp( + Chain ? pch::CHAINED_METADATA : pch::METADATA)); MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // PCH major MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // PCH minor MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang major MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang minor MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Relocatable - MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Target triple + // Target triple or chained PCH name + MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); unsigned MetaAbbrevCode = Stream.EmitAbbrev(MetaAbbrev); RecordData Record; - Record.push_back(pch::METADATA); + Record.push_back(Chain ? pch::CHAINED_METADATA : pch::METADATA); Record.push_back(pch::VERSION_MAJOR); Record.push_back(pch::VERSION_MINOR); Record.push_back(CLANG_VERSION_MAJOR); Record.push_back(CLANG_VERSION_MINOR); Record.push_back(isysroot != 0); - const std::string &TripleStr = Target.getTriple().getTriple(); - Stream.EmitRecordWithBlob(MetaAbbrevCode, Record, TripleStr); + // FIXME: This writes the absolute path for chained headers. + const std::string &BlobStr = Chain ? Chain->getFileName() : Target.getTriple().getTriple(); + Stream.EmitRecordWithBlob(MetaAbbrevCode, Record, BlobStr); // Original file name SourceManager &SM = Context.getSourceManager(); @@ -779,11 +848,8 @@ void PCHWriter::WriteLanguageOptions(const LangOptions &LangOpts) { Record.push_back(LangOpts.MathErrno); // Math functions must respect errno // (modulo the platform support). - Record.push_back(LangOpts.OverflowChecking); // Extension to call a handler function when - // signed integer arithmetic overflows. - - Record.push_back(LangOpts.HeinousExtensions); // Extensions that we really don't like and - // may be ripped out at any time. + Record.push_back(LangOpts.getSignedOverflowBehavior()); + Record.push_back(LangOpts.HeinousExtensions); Record.push_back(LangOpts.Optimize); // Whether __OPTIMIZE__ should be defined. Record.push_back(LangOpts.OptimizeSize); // Whether __OPTIMIZE_SIZE__ should be @@ -807,6 +873,7 @@ void PCHWriter::WriteLanguageOptions(const LangOptions &LangOpts) { Record.push_back(LangOpts.OpenCL); Record.push_back(LangOpts.CatchUndefined); Record.push_back(LangOpts.ElideConstructors); + Record.push_back(LangOpts.SpellChecking); Stream.EmitRecord(pch::LANGUAGE_OPTIONS, Record); } @@ -866,8 +933,7 @@ public: } // end anonymous namespace /// \brief Write the stat() system call cache to the PCH file. -void PCHWriter::WriteStatCache(MemorizeStatCalls &StatCalls, - const char *isysroot) { +void PCHWriter::WriteStatCache(MemorizeStatCalls &StatCalls) { // Build the on-disk hash table containing information about every // stat() call. OnDiskChainedHashTableGenerator<PCHStatCacheTrait> Generator; @@ -876,7 +942,6 @@ void PCHWriter::WriteStatCache(MemorizeStatCalls &StatCalls, StatEnd = StatCalls.end(); Stat != StatEnd; ++Stat, ++NumStatEntries) { const char *Filename = Stat->first(); - Filename = adjustFilenameForRelocatablePCH(Filename, isysroot); Generator.insert(Filename, Stat->second); } @@ -1347,16 +1412,7 @@ void PCHWriter::WriteType(QualType T) { #define TYPE(Class, Base) \ case Type::Class: W.Visit##Class##Type(cast<Class##Type>(T)); break; #define ABSTRACT_TYPE(Class, Base) -#define DEPENDENT_TYPE(Class, Base) #include "clang/AST/TypeNodes.def" - - // For all of the dependent type nodes (which only occur in C++ - // templates), produce an error. -#define TYPE(Class, Base) -#define DEPENDENT_TYPE(Class, Base) case Type::Class: -#include "clang/AST/TypeNodes.def" - assert(false && "Cannot serialize dependent type nodes"); - break; } } @@ -1402,11 +1458,16 @@ uint64_t PCHWriter::WriteDeclContextVisibleBlock(ASTContext &Context, if (DC->getPrimaryContext() != DC) return 0; - // Since there is no name lookup into functions or methods, and we - // perform name lookup for the translation unit via the - // IdentifierInfo chains, don't bother to build a - // visible-declarations table for these entities. - if (DC->isFunctionOrMethod() || DC->isTranslationUnit()) + // Since there is no name lookup into functions or methods, don't bother to + // build a visible-declarations table for these entities. + if (DC->isFunctionOrMethod()) + return 0; + + // If not in C++, we perform name lookup for the translation unit via the + // IdentifierInfo chains, don't bother to build a visible-declarations table. + // FIXME: In C++ we need the visible declarations in order to "see" the + // friend declarations, is there a way to do this without writing the table ? + if (DC->isTranslationUnit() && !Context.getLangOptions().CPlusPlus) return 0; // Force the DeclContext to build a its name-lookup table. @@ -1832,66 +1893,66 @@ void PCHWriter::WriteAttributeRecord(const Attr *Attr) { default: assert(0 && "Does not support PCH writing for this attribute yet!"); break; - case Attr::Alias: + case attr::Alias: AddString(cast<AliasAttr>(Attr)->getAliasee(), Record); break; - case Attr::AlignMac68k: + case attr::AlignMac68k: break; - case Attr::Aligned: + case attr::Aligned: Record.push_back(cast<AlignedAttr>(Attr)->getAlignment()); break; - case Attr::AlwaysInline: + case attr::AlwaysInline: break; - case Attr::AnalyzerNoReturn: + case attr::AnalyzerNoReturn: break; - case Attr::Annotate: + case attr::Annotate: AddString(cast<AnnotateAttr>(Attr)->getAnnotation(), Record); break; - case Attr::AsmLabel: + case attr::AsmLabel: AddString(cast<AsmLabelAttr>(Attr)->getLabel(), Record); break; - case Attr::BaseCheck: + case attr::BaseCheck: break; - case Attr::Blocks: + case attr::Blocks: Record.push_back(cast<BlocksAttr>(Attr)->getType()); // FIXME: stable break; - case Attr::CDecl: + case attr::CDecl: break; - case Attr::Cleanup: + case attr::Cleanup: AddDeclRef(cast<CleanupAttr>(Attr)->getFunctionDecl(), Record); break; - case Attr::Const: + case attr::Const: break; - case Attr::Constructor: + case attr::Constructor: Record.push_back(cast<ConstructorAttr>(Attr)->getPriority()); break; - case Attr::DLLExport: - case Attr::DLLImport: - case Attr::Deprecated: + case attr::DLLExport: + case attr::DLLImport: + case attr::Deprecated: break; - case Attr::Destructor: + case attr::Destructor: Record.push_back(cast<DestructorAttr>(Attr)->getPriority()); break; - case Attr::FastCall: - case Attr::Final: + case attr::FastCall: + case attr::Final: break; - case Attr::Format: { + case attr::Format: { const FormatAttr *Format = cast<FormatAttr>(Attr); AddString(Format->getType(), Record); Record.push_back(Format->getFormatIdx()); @@ -1899,93 +1960,93 @@ void PCHWriter::WriteAttributeRecord(const Attr *Attr) { break; } - case Attr::FormatArg: { + case attr::FormatArg: { const FormatArgAttr *Format = cast<FormatArgAttr>(Attr); Record.push_back(Format->getFormatIdx()); break; } - case Attr::Sentinel : { + case attr::Sentinel : { const SentinelAttr *Sentinel = cast<SentinelAttr>(Attr); Record.push_back(Sentinel->getSentinel()); Record.push_back(Sentinel->getNullPos()); break; } - case Attr::GNUInline: - case Attr::Hiding: - case Attr::IBActionKind: - case Attr::IBOutletKind: - case Attr::Malloc: - case Attr::NoDebug: - case Attr::NoInline: - case Attr::NoReturn: - case Attr::NoThrow: + case attr::GNUInline: + case attr::Hiding: + case attr::IBAction: + case attr::IBOutlet: + case attr::Malloc: + case attr::NoDebug: + case attr::NoInline: + case attr::NoReturn: + case attr::NoThrow: break; - case Attr::IBOutletCollectionKind: { + case attr::IBOutletCollection: { const IBOutletCollectionAttr *ICA = cast<IBOutletCollectionAttr>(Attr); AddDeclRef(ICA->getClass(), Record); break; } - case Attr::NonNull: { + case attr::NonNull: { const NonNullAttr *NonNull = cast<NonNullAttr>(Attr); Record.push_back(NonNull->size()); Record.insert(Record.end(), NonNull->begin(), NonNull->end()); break; } - case Attr::CFReturnsNotRetained: - case Attr::CFReturnsRetained: - case Attr::NSReturnsNotRetained: - case Attr::NSReturnsRetained: - case Attr::ObjCException: - case Attr::ObjCNSObject: - case Attr::Overloadable: - case Attr::Override: + case attr::CFReturnsNotRetained: + case attr::CFReturnsRetained: + case attr::NSReturnsNotRetained: + case attr::NSReturnsRetained: + case attr::ObjCException: + case attr::ObjCNSObject: + case attr::Overloadable: + case attr::Override: break; - case Attr::MaxFieldAlignment: + case attr::MaxFieldAlignment: Record.push_back(cast<MaxFieldAlignmentAttr>(Attr)->getAlignment()); break; - case Attr::Packed: + case attr::Packed: break; - case Attr::Pure: + case attr::Pure: break; - case Attr::Regparm: + case attr::Regparm: Record.push_back(cast<RegparmAttr>(Attr)->getNumParams()); break; - case Attr::ReqdWorkGroupSize: + case attr::ReqdWorkGroupSize: Record.push_back(cast<ReqdWorkGroupSizeAttr>(Attr)->getXDim()); Record.push_back(cast<ReqdWorkGroupSizeAttr>(Attr)->getYDim()); Record.push_back(cast<ReqdWorkGroupSizeAttr>(Attr)->getZDim()); break; - case Attr::Section: + case attr::Section: AddString(cast<SectionAttr>(Attr)->getName(), Record); break; - case Attr::StdCall: - case Attr::TransparentUnion: - case Attr::Unavailable: - case Attr::Unused: - case Attr::Used: + case attr::StdCall: + case attr::TransparentUnion: + case attr::Unavailable: + case attr::Unused: + case attr::Used: break; - case Attr::Visibility: + case attr::Visibility: // FIXME: stable encoding Record.push_back(cast<VisibilityAttr>(Attr)->getVisibility()); break; - case Attr::WarnUnusedResult: - case Attr::Weak: - case Attr::WeakRef: - case Attr::WeakImport: + case attr::WarnUnusedResult: + case attr::Weak: + case attr::WeakRef: + case attr::WeakImport: break; } } @@ -2012,18 +2073,16 @@ void PCHWriter::SetSelectorOffset(Selector Sel, uint32_t Offset) { SelectorOffsets[ID - 1] = Offset; } -PCHWriter::PCHWriter(llvm::BitstreamWriter &Stream) - : Stream(Stream), NextTypeID(pch::NUM_PREDEF_TYPE_IDS), - NumStatements(0), NumMacros(0), NumLexicalDeclContexts(0), - NumVisibleDeclContexts(0) { } +PCHWriter::PCHWriter(llvm::BitstreamWriter &Stream, PCHReader *Chain) + : Stream(Stream), Chain(Chain), NextTypeID(pch::NUM_PREDEF_TYPE_IDS), + CollectedStmts(&StmtsToEmit), NumStatements(0), NumMacros(0), + NumLexicalDeclContexts(0), NumVisibleDeclContexts(0) { + if (Chain) + Chain->setDeserializationListener(this); +} void PCHWriter::WritePCH(Sema &SemaRef, MemorizeStatCalls *StatCalls, const char *isysroot) { - using namespace llvm; - - ASTContext &Context = SemaRef.Context; - Preprocessor &PP = SemaRef.PP; - // Emit the file header. Stream.Emit((unsigned)'C', 8); Stream.Emit((unsigned)'P', 8); @@ -2032,6 +2091,19 @@ void PCHWriter::WritePCH(Sema &SemaRef, MemorizeStatCalls *StatCalls, WriteBlockInfoBlock(); + if (Chain) + WritePCHChain(SemaRef, StatCalls, isysroot); + else + WritePCHCore(SemaRef, StatCalls, isysroot); +} + +void PCHWriter::WritePCHCore(Sema &SemaRef, MemorizeStatCalls *StatCalls, + const char *isysroot) { + using namespace llvm; + + ASTContext &Context = SemaRef.Context; + Preprocessor &PP = SemaRef.PP; + // The translation unit is the first declaration we'll emit. DeclIDs[Context.getTranslationUnitDecl()] = 1; DeclTypesToEmit.push(Context.getTranslationUnitDecl()); @@ -2077,13 +2149,27 @@ void PCHWriter::WritePCH(Sema &SemaRef, MemorizeStatCalls *StatCalls, for (unsigned I = 0, N = SemaRef.ExtVectorDecls.size(); I != N; ++I) AddDeclRef(SemaRef.ExtVectorDecls[I], ExtVectorDecls); + // Build a record containing all of the VTable uses information. + RecordData VTableUses; + VTableUses.push_back(SemaRef.VTableUses.size()); + for (unsigned I = 0, N = SemaRef.VTableUses.size(); I != N; ++I) { + AddDeclRef(SemaRef.VTableUses[I].first, VTableUses); + AddSourceLocation(SemaRef.VTableUses[I].second, VTableUses); + VTableUses.push_back(SemaRef.VTablesUsed[SemaRef.VTableUses[I].first]); + } + + // Build a record containing all of dynamic classes declarations. + RecordData DynamicClasses; + for (unsigned I = 0, N = SemaRef.DynamicClasses.size(); I != N; ++I) + AddDeclRef(SemaRef.DynamicClasses[I], DynamicClasses); + // Write the remaining PCH contents. RecordData Record; Stream.EnterSubblock(pch::PCH_BLOCK_ID, 5); WriteMetadata(Context, isysroot); WriteLanguageOptions(Context.getLangOptions()); if (StatCalls && !isysroot) - WriteStatCache(*StatCalls, isysroot); + WriteStatCache(*StatCalls); WriteSourceManagerBlock(Context.getSourceManager(), PP, isysroot); // Write the record of special types. Record.clear(); @@ -2104,6 +2190,7 @@ void PCHWriter::WritePCH(Sema &SemaRef, MemorizeStatCalls *StatCalls, AddTypeRef(Context.getRawBlockdescriptorExtendedType(), Record); AddTypeRef(Context.ObjCSelRedefinitionType, Record); AddTypeRef(Context.getRawNSConstantStringType(), Record); + Record.push_back(Context.isInt128Installed()); Stream.EmitRecord(pch::SPECIAL_TYPES, Record); // Keep writing types and declarations until all types and @@ -2171,6 +2258,14 @@ void PCHWriter::WritePCH(Sema &SemaRef, MemorizeStatCalls *StatCalls, if (!ExtVectorDecls.empty()) Stream.EmitRecord(pch::EXT_VECTOR_DECLS, ExtVectorDecls); + // Write the record containing VTable uses information. + if (!VTableUses.empty()) + Stream.EmitRecord(pch::VTABLE_USES, VTableUses); + + // Write the record containing dynamic classes declarations. + if (!DynamicClasses.empty()) + Stream.EmitRecord(pch::DYNAMIC_CLASSES, DynamicClasses); + // Some simple statistics Record.clear(); Record.push_back(NumStatements); @@ -2181,6 +2276,64 @@ void PCHWriter::WritePCH(Sema &SemaRef, MemorizeStatCalls *StatCalls, Stream.ExitBlock(); } +void PCHWriter::WritePCHChain(Sema &SemaRef, MemorizeStatCalls *StatCalls, + const char *isysroot) { + using namespace llvm; + + ASTContext &Context = SemaRef.Context; + Preprocessor &PP = SemaRef.PP; + (void)PP; + + RecordData Record; + Stream.EnterSubblock(pch::PCH_BLOCK_ID, 5); + WriteMetadata(Context, isysroot); + // FIXME: StatCache + // FIXME: Source manager block + + // The special types are in the chained PCH. + + // We don't start with the translation unit, but with its decls that + // don't come from the other PCH. + const TranslationUnitDecl *TU = Context.getTranslationUnitDecl(); + // FIXME: We don't want to iterate over everything here, because it needlessly + // deserializes the entire original PCH. Instead we only want to iterate over + // the stuff that's already there. + // All in good time, though. + for (DeclContext::decl_iterator I = TU->decls_begin(), E = TU->decls_end(); + I != E; ++I) { + if ((*I)->getPCHLevel() == 0) { + (*I)->dump(); + DeclTypesToEmit.push(*I); + } + } + + Stream.EnterSubblock(pch::DECLTYPES_BLOCK_ID, 3); + WriteDeclsBlockAbbrevs(); + while (!DeclTypesToEmit.empty()) { + DeclOrType DOT = DeclTypesToEmit.front(); + DeclTypesToEmit.pop(); + if (DOT.isType()) + WriteType(DOT.getType()); + else + WriteDecl(Context, DOT.getDecl()); + } + Stream.ExitBlock(); + + // FIXME: Preprocessor + // FIXME: Method pool + // FIXME: Identifier table + // FIXME: Type offsets + // FIXME: Declaration offsets + // FIXME: External unnamed definitions + // FIXME: Tentative definitions + // FIXME: Unused static functions + // FIXME: Locally-scoped external definitions + // FIXME: ext_vector type names + // FIXME: Dynamic classes declarations + // FIXME: Statistics + Stream.ExitBlock(); +} + void PCHWriter::AddSourceLocation(SourceLocation Loc, RecordData &Record) { Record.push_back(Loc.getRawEncoding()); } @@ -2249,20 +2402,19 @@ void PCHWriter::AddCXXTemporary(const CXXTemporary *Temp, RecordData &Record) { AddDeclRef(Temp->getDestructor(), Record); } -void PCHWriter::AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg, - RecordData &Record) { - switch (Arg.getArgument().getKind()) { +void PCHWriter::AddTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind, + const TemplateArgumentLocInfo &Arg, + RecordData &Record) { + switch (Kind) { case TemplateArgument::Expression: - AddStmt(Arg.getLocInfo().getAsExpr()); + AddStmt(Arg.getAsExpr()); break; case TemplateArgument::Type: - AddTypeSourceInfo(Arg.getLocInfo().getAsTypeSourceInfo(), Record); + AddTypeSourceInfo(Arg.getAsTypeSourceInfo(), Record); break; case TemplateArgument::Template: - Record.push_back( - Arg.getTemplateQualifierRange().getBegin().getRawEncoding()); - Record.push_back(Arg.getTemplateQualifierRange().getEnd().getRawEncoding()); - Record.push_back(Arg.getTemplateNameLoc().getRawEncoding()); + AddSourceRange(Arg.getTemplateQualifierRange(), Record); + AddSourceLocation(Arg.getTemplateNameLoc(), Record); break; case TemplateArgument::Null: case TemplateArgument::Integral: @@ -2272,6 +2424,21 @@ void PCHWriter::AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg, } } +void PCHWriter::AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg, + RecordData &Record) { + AddTemplateArgument(Arg.getArgument(), Record); + + if (Arg.getArgument().getKind() == TemplateArgument::Expression) { + bool InfoHasSameExpr + = Arg.getArgument().getAsExpr() == Arg.getLocInfo().getAsExpr(); + Record.push_back(InfoHasSameExpr); + if (InfoHasSameExpr) + return; // Avoid storing the same expr twice. + } + AddTemplateArgumentLocInfo(Arg.getArgument().getKind(), Arg.getLocInfo(), + Record); +} + void PCHWriter::AddTypeSourceInfo(TypeSourceInfo *TInfo, RecordData &Record) { if (TInfo == 0) { AddTypeRef(QualType(), Record); @@ -2459,3 +2626,123 @@ void PCHWriter::AddNestedNameSpecifier(NestedNameSpecifier *NNS, } } } + +void PCHWriter::AddTemplateName(TemplateName Name, RecordData &Record) { + TemplateName::NameKind Kind = Name.getKind(); + Record.push_back(Kind); + switch (Kind) { + case TemplateName::Template: + AddDeclRef(Name.getAsTemplateDecl(), Record); + break; + + case TemplateName::OverloadedTemplate: { + OverloadedTemplateStorage *OvT = Name.getAsOverloadedTemplate(); + Record.push_back(OvT->size()); + for (OverloadedTemplateStorage::iterator I = OvT->begin(), E = OvT->end(); + I != E; ++I) + AddDeclRef(*I, Record); + break; + } + + case TemplateName::QualifiedTemplate: { + QualifiedTemplateName *QualT = Name.getAsQualifiedTemplateName(); + AddNestedNameSpecifier(QualT->getQualifier(), Record); + Record.push_back(QualT->hasTemplateKeyword()); + AddDeclRef(QualT->getTemplateDecl(), Record); + break; + } + + case TemplateName::DependentTemplate: { + DependentTemplateName *DepT = Name.getAsDependentTemplateName(); + AddNestedNameSpecifier(DepT->getQualifier(), Record); + Record.push_back(DepT->isIdentifier()); + if (DepT->isIdentifier()) + AddIdentifierRef(DepT->getIdentifier(), Record); + else + Record.push_back(DepT->getOperator()); + break; + } + } +} + +void PCHWriter::AddTemplateArgument(const TemplateArgument &Arg, + RecordData &Record) { + Record.push_back(Arg.getKind()); + switch (Arg.getKind()) { + case TemplateArgument::Null: + break; + case TemplateArgument::Type: + AddTypeRef(Arg.getAsType(), Record); + break; + case TemplateArgument::Declaration: + AddDeclRef(Arg.getAsDecl(), Record); + break; + case TemplateArgument::Integral: + AddAPSInt(*Arg.getAsIntegral(), Record); + AddTypeRef(Arg.getIntegralType(), Record); + break; + case TemplateArgument::Template: + AddTemplateName(Arg.getAsTemplate(), Record); + break; + case TemplateArgument::Expression: + AddStmt(Arg.getAsExpr()); + break; + case TemplateArgument::Pack: + Record.push_back(Arg.pack_size()); + for (TemplateArgument::pack_iterator I=Arg.pack_begin(), E=Arg.pack_end(); + I != E; ++I) + AddTemplateArgument(*I, Record); + break; + } +} + +void +PCHWriter::AddTemplateParameterList(const TemplateParameterList *TemplateParams, + RecordData &Record) { + assert(TemplateParams && "No TemplateParams!"); + AddSourceLocation(TemplateParams->getTemplateLoc(), Record); + AddSourceLocation(TemplateParams->getLAngleLoc(), Record); + AddSourceLocation(TemplateParams->getRAngleLoc(), Record); + Record.push_back(TemplateParams->size()); + for (TemplateParameterList::const_iterator + P = TemplateParams->begin(), PEnd = TemplateParams->end(); + P != PEnd; ++P) + AddDeclRef(*P, Record); +} + +/// \brief Emit a template argument list. +void +PCHWriter::AddTemplateArgumentList(const TemplateArgumentList *TemplateArgs, + RecordData &Record) { + assert(TemplateArgs && "No TemplateArgs!"); + Record.push_back(TemplateArgs->flat_size()); + for (int i=0, e = TemplateArgs->flat_size(); i != e; ++i) + AddTemplateArgument(TemplateArgs->get(i), Record); +} + + +void +PCHWriter::AddUnresolvedSet(const UnresolvedSetImpl &Set, RecordData &Record) { + Record.push_back(Set.size()); + for (UnresolvedSetImpl::const_iterator + I = Set.begin(), E = Set.end(); I != E; ++I) { + AddDeclRef(I.getDecl(), Record); + Record.push_back(I.getAccess()); + } +} + +void PCHWriter::AddCXXBaseSpecifier(const CXXBaseSpecifier &Base, + RecordData &Record) { + Record.push_back(Base.isVirtual()); + Record.push_back(Base.isBaseOfClass()); + Record.push_back(Base.getAccessSpecifierAsWritten()); + AddTypeRef(Base.getType(), Record); + AddSourceRange(Base.getSourceRange(), Record); +} + +void PCHWriter::TypeRead(pch::TypeID ID, QualType T) { +} + +void PCHWriter::DeclRead(pch::DeclID ID, const Decl *D) { +} + diff --git a/contrib/llvm/tools/clang/lib/Frontend/PCHWriterDecl.cpp b/contrib/llvm/tools/clang/lib/Frontend/PCHWriterDecl.cpp index cc58e8e..bc4452e 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/PCHWriterDecl.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/PCHWriterDecl.cpp @@ -25,7 +25,7 @@ using namespace clang; // Declaration serialization //===----------------------------------------------------------------------===// -namespace { +namespace clang { class PCHDeclWriter : public DeclVisitor<PCHDeclWriter, void> { PCHWriter &Writer; @@ -40,6 +40,8 @@ namespace { PCHWriter::RecordData &Record) : Writer(Writer), Context(Context), Record(Record) { } + + void Visit(Decl *D); void VisitDecl(Decl *D); void VisitTranslationUnitDecl(TranslationUnitDecl *D); @@ -49,7 +51,7 @@ namespace { void VisitNamespaceAliasDecl(NamespaceAliasDecl *D); void VisitTypeDecl(TypeDecl *D); void VisitTypedefDecl(TypedefDecl *D); - void VisitUnresolvedUsingTypename(UnresolvedUsingTypenameDecl *D); + void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D); void VisitTagDecl(TagDecl *D); void VisitEnumDecl(EnumDecl *D); void VisitRecordDecl(RecordDecl *D); @@ -61,7 +63,7 @@ namespace { void VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D); void VisitValueDecl(ValueDecl *D); void VisitEnumConstantDecl(EnumConstantDecl *D); - void VisitUnresolvedUsingValue(UnresolvedUsingValueDecl *D); + void VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D); void VisitDeclaratorDecl(DeclaratorDecl *D); void VisitFunctionDecl(FunctionDecl *D); void VisitCXXMethodDecl(CXXMethodDecl *D); @@ -75,12 +77,14 @@ namespace { void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D); void VisitTemplateDecl(TemplateDecl *D); void VisitClassTemplateDecl(ClassTemplateDecl *D); - void visitFunctionTemplateDecl(FunctionTemplateDecl *D); + void VisitFunctionTemplateDecl(FunctionTemplateDecl *D); void VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D); - void VisitUsing(UsingDecl *D); - void VisitUsingShadow(UsingShadowDecl *D); + void VisitUsingDecl(UsingDecl *D); + void VisitUsingShadowDecl(UsingShadowDecl *D); void VisitLinkageSpecDecl(LinkageSpecDecl *D); void VisitFileScopeAsmDecl(FileScopeAsmDecl *D); + void VisitAccessSpecDecl(AccessSpecDecl *D); + void VisitFriendDecl(FriendDecl *D); void VisitFriendTemplateDecl(FriendTemplateDecl *D); void VisitStaticAssertDecl(StaticAssertDecl *D); void VisitBlockDecl(BlockDecl *D); @@ -89,7 +93,7 @@ namespace { uint64_t VisibleOffset); - // FIXME: Put in the same order is DeclNodes.def? + // FIXME: Put in the same order is DeclNodes.td? void VisitObjCMethodDecl(ObjCMethodDecl *D); void VisitObjCContainerDecl(ObjCContainerDecl *D); void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D); @@ -108,6 +112,19 @@ namespace { }; } +void PCHDeclWriter::Visit(Decl *D) { + DeclVisitor<PCHDeclWriter>::Visit(D); + + // Handle FunctionDecl's body here and write it after all other Stmts/Exprs + // have been written. We want it last because we will not read it back when + // retrieving it from the PCH, we'll just lazily set the offset. + if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { + Record.push_back(FD->isThisDeclarationADefinition()); + if (FD->isThisDeclarationADefinition()) + Writer.AddStmt(FD->getBody()); + } +} + void PCHDeclWriter::VisitDecl(Decl *D) { Writer.AddDeclRef(cast_or_null<Decl>(D->getDeclContext()), Record); Writer.AddDeclRef(cast_or_null<Decl>(D->getLexicalDeclContext()), Record); @@ -115,7 +132,7 @@ void PCHDeclWriter::VisitDecl(Decl *D) { Record.push_back(D->isInvalidDecl()); Record.push_back(D->hasAttrs()); Record.push_back(D->isImplicit()); - Record.push_back(D->isUsed()); + Record.push_back(D->isUsed(false)); Record.push_back(D->getAccess()); Record.push_back(D->getPCHLevel()); } @@ -144,6 +161,7 @@ void PCHDeclWriter::VisitTypedefDecl(TypedefDecl *D) { void PCHDeclWriter::VisitTagDecl(TagDecl *D) { VisitTypeDecl(D); + Record.push_back(D->getIdentifierNamespace()); Writer.AddDeclRef(D->getPreviousDeclaration(), Record); Record.push_back((unsigned)D->getTagKind()); // FIXME: stable encoding Record.push_back(D->isDefinition()); @@ -160,7 +178,7 @@ void PCHDeclWriter::VisitEnumDecl(EnumDecl *D) { Writer.AddTypeRef(D->getPromotionType(), Record); Record.push_back(D->getNumPositiveBits()); Record.push_back(D->getNumNegativeBits()); - // FIXME: C++ InstantiatedFrom + Writer.AddDeclRef(D->getInstantiatedFromMemberEnum(), Record); Code = pch::DECL_ENUM; } @@ -195,9 +213,70 @@ void PCHDeclWriter::VisitDeclaratorDecl(DeclaratorDecl *D) { void PCHDeclWriter::VisitFunctionDecl(FunctionDecl *D) { VisitDeclaratorDecl(D); - Record.push_back(D->isThisDeclarationADefinition()); - if (D->isThisDeclarationADefinition()) - Writer.AddStmt(D->getBody()); + Record.push_back(D->getIdentifierNamespace()); + Record.push_back(D->getTemplatedKind()); + switch (D->getTemplatedKind()) { + default: assert(false && "Unhandled TemplatedKind!"); + break; + case FunctionDecl::TK_NonTemplate: + break; + case FunctionDecl::TK_FunctionTemplate: + Writer.AddDeclRef(D->getDescribedFunctionTemplate(), Record); + break; + case FunctionDecl::TK_MemberSpecialization: { + MemberSpecializationInfo *MemberInfo = D->getMemberSpecializationInfo(); + Writer.AddDeclRef(MemberInfo->getInstantiatedFrom(), Record); + Record.push_back(MemberInfo->getTemplateSpecializationKind()); + Writer.AddSourceLocation(MemberInfo->getPointOfInstantiation(), Record); + break; + } + case FunctionDecl::TK_FunctionTemplateSpecialization: { + FunctionTemplateSpecializationInfo * + FTSInfo = D->getTemplateSpecializationInfo(); + // We want it canonical to guarantee that it has a Common*. + Writer.AddDeclRef(FTSInfo->getTemplate()->getCanonicalDecl(), Record); + Record.push_back(FTSInfo->getTemplateSpecializationKind()); + + // Template arguments. + Writer.AddTemplateArgumentList(FTSInfo->TemplateArguments, Record); + + // Template args as written. + Record.push_back(FTSInfo->TemplateArgumentsAsWritten != 0); + if (FTSInfo->TemplateArgumentsAsWritten) { + Record.push_back(FTSInfo->TemplateArgumentsAsWritten->size()); + for (int i=0, e = FTSInfo->TemplateArgumentsAsWritten->size(); i!=e; ++i) + Writer.AddTemplateArgumentLoc((*FTSInfo->TemplateArgumentsAsWritten)[i], + Record); + Writer.AddSourceLocation(FTSInfo->TemplateArgumentsAsWritten->getLAngleLoc(), + Record); + Writer.AddSourceLocation(FTSInfo->TemplateArgumentsAsWritten->getRAngleLoc(), + Record); + } + + Writer.AddSourceLocation(FTSInfo->getPointOfInstantiation(), Record); + break; + } + case FunctionDecl::TK_DependentFunctionTemplateSpecialization: { + DependentFunctionTemplateSpecializationInfo * + DFTSInfo = D->getDependentSpecializationInfo(); + + // Templates. + Record.push_back(DFTSInfo->getNumTemplates()); + for (int i=0, e = DFTSInfo->getNumTemplates(); i != e; ++i) + Writer.AddDeclRef(DFTSInfo->getTemplate(i), Record); + + // Templates args. + Record.push_back(DFTSInfo->getNumTemplateArgs()); + for (int i=0, e = DFTSInfo->getNumTemplateArgs(); i != e; ++i) + Writer.AddTemplateArgumentLoc(DFTSInfo->getTemplateArg(i), Record); + Writer.AddSourceLocation(DFTSInfo->getLAngleLoc(), Record); + Writer.AddSourceLocation(DFTSInfo->getRAngleLoc(), Record); + break; + } + } + + // FunctionDecl's body is handled last at PCHWriterDecl::Visit, + // after everything else is written. Writer.AddDeclRef(D->getPreviousDeclaration(), Record); Record.push_back(D->getStorageClass()); // FIXME: stable encoding @@ -211,7 +290,6 @@ void PCHDeclWriter::VisitFunctionDecl(FunctionDecl *D) { Record.push_back(D->isTrivial()); Record.push_back(D->isCopyAssignment()); Record.push_back(D->hasImplicitReturnZero()); - // FIXME: C++ TemplateOrInstantiation??? Writer.AddSourceLocation(D->getLocEnd(), Record); Record.push_back(D->param_size()); @@ -357,9 +435,10 @@ void PCHDeclWriter::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D) { void PCHDeclWriter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) { VisitNamedDecl(D); Writer.AddSourceLocation(D->getAtLoc(), Record); - Writer.AddTypeRef(D->getType(), Record); + Writer.AddTypeSourceInfo(D->getTypeSourceInfo(), Record); // FIXME: stable encoding Record.push_back((unsigned)D->getPropertyAttributes()); + Record.push_back((unsigned)D->getPropertyAttributesAsWritten()); // FIXME: stable encoding Record.push_back((unsigned)D->getPropertyImplementation()); Writer.AddDeclarationName(D->getGetterName(), Record); @@ -404,6 +483,8 @@ void PCHDeclWriter::VisitFieldDecl(FieldDecl *D) { Record.push_back(D->getBitWidth()? 1 : 0); if (D->getBitWidth()) Writer.AddStmt(D->getBitWidth()); + if (!D->getDeclName()) + Writer.AddDeclRef(Context.getInstantiatedFromUnnamedFieldDecl(D), Record); Code = pch::DECL_FIELD; } @@ -420,6 +501,16 @@ void PCHDeclWriter::VisitVarDecl(VarDecl *D) { Record.push_back(D->getInit() ? 1 : 0); if (D->getInit()) Writer.AddStmt(D->getInit()); + + MemberSpecializationInfo *SpecInfo + = D->isStaticDataMember() ? D->getMemberSpecializationInfo() : 0; + Record.push_back(SpecInfo != 0); + if (SpecInfo) { + Writer.AddDeclRef(SpecInfo->getInstantiatedFrom(), Record); + Record.push_back(SpecInfo->getTemplateSpecializationKind()); + Writer.AddSourceLocation(SpecInfo->getPointOfInstantiation(), Record); + } + Code = pch::DECL_VAR; } @@ -432,6 +523,9 @@ void PCHDeclWriter::VisitParmVarDecl(ParmVarDecl *D) { VisitVarDecl(D); Record.push_back(D->getObjCDeclQualifier()); // FIXME: stable encoding Record.push_back(D->hasInheritedDefaultArg()); + Record.push_back(D->hasUninstantiatedDefaultArg()); + if (D->hasUninstantiatedDefaultArg()) + Writer.AddStmt(D->getUninstantiatedDefaultArg()); Code = pch::DECL_PARM_VAR; // If the assumptions about the DECL_PARM_VAR abbrev are true, use it. Here @@ -440,14 +534,15 @@ void PCHDeclWriter::VisitParmVarDecl(ParmVarDecl *D) { if (!D->getTypeSourceInfo() && !D->hasAttrs() && !D->isImplicit() && - !D->isUsed() && + !D->isUsed(false) && D->getAccess() == AS_none && D->getPCHLevel() == 0 && D->getStorageClass() == 0 && !D->hasCXXDirectInitializer() && // Can params have this ever? D->getObjCDeclQualifier() == 0 && !D->hasInheritedDefaultArg() && - D->getInit() == 0) // No default expr. + D->getInit() == 0 && + !D->hasUninstantiatedDefaultArg()) // No default expr. AbbrevToUse = Writer.getParmVarDeclAbbrev(); // Check things we know are true of *every* PARM_VAR_DECL, which is more than @@ -458,6 +553,8 @@ void PCHDeclWriter::VisitParmVarDecl(ParmVarDecl *D) { assert(!D->isDeclaredInCondition() && "PARM_VAR_DECL can't be in condition"); assert(!D->isExceptionVariable() && "PARM_VAR_DECL can't be exception var"); assert(D->getPreviousDeclaration() == 0 && "PARM_VAR_DECL can't be redecl"); + assert(!D->isStaticDataMember() && + "PARM_VAR_DECL can't be static data member"); } void PCHDeclWriter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) { @@ -469,6 +566,7 @@ void PCHDeclWriter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) { void PCHDeclWriter::VisitBlockDecl(BlockDecl *D) { VisitDecl(D); Writer.AddStmt(D->getBody()); + Writer.AddTypeSourceInfo(D->getSignatureAsWritten(), Record); Record.push_back(D->param_size()); for (FunctionDecl::param_iterator P = D->param_begin(), PEnd = D->param_end(); P != PEnd; ++P) @@ -510,7 +608,7 @@ void PCHDeclWriter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) { Code = pch::DECL_NAMESPACE_ALIAS; } -void PCHDeclWriter::VisitUsing(UsingDecl *D) { +void PCHDeclWriter::VisitUsingDecl(UsingDecl *D) { VisitNamedDecl(D); Writer.AddSourceRange(D->getNestedNameRange(), Record); Writer.AddSourceLocation(D->getUsingLocation(), Record); @@ -520,13 +618,15 @@ void PCHDeclWriter::VisitUsing(UsingDecl *D) { PEnd = D->shadow_end(); P != PEnd; ++P) Writer.AddDeclRef(*P, Record); Record.push_back(D->isTypeName()); + Writer.AddDeclRef(Context.getInstantiatedFromUsingDecl(D), Record); Code = pch::DECL_USING; } -void PCHDeclWriter::VisitUsingShadow(UsingShadowDecl *D) { +void PCHDeclWriter::VisitUsingShadowDecl(UsingShadowDecl *D) { VisitNamedDecl(D); Writer.AddDeclRef(D->getTargetDecl(), Record); Writer.AddDeclRef(D->getUsingDecl(), Record); + Writer.AddDeclRef(Context.getInstantiatedFromUsingShadowDecl(D), Record); Code = pch::DECL_USING_SHADOW; } @@ -541,7 +641,7 @@ void PCHDeclWriter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) { Code = pch::DECL_USING_DIRECTIVE; } -void PCHDeclWriter::VisitUnresolvedUsingValue(UnresolvedUsingValueDecl *D) { +void PCHDeclWriter::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) { VisitValueDecl(D); Writer.AddSourceRange(D->getTargetNestedNameRange(), Record); Writer.AddSourceLocation(D->getUsingLoc(), Record); @@ -549,7 +649,7 @@ void PCHDeclWriter::VisitUnresolvedUsingValue(UnresolvedUsingValueDecl *D) { Code = pch::DECL_UNRESOLVED_USING_VALUE; } -void PCHDeclWriter::VisitUnresolvedUsingTypename( +void PCHDeclWriter::VisitUnresolvedUsingTypenameDecl( UnresolvedUsingTypenameDecl *D) { VisitTypeDecl(D); Writer.AddSourceRange(D->getTargetNestedNameRange(), Record); @@ -560,71 +660,324 @@ void PCHDeclWriter::VisitUnresolvedUsingTypename( } void PCHDeclWriter::VisitCXXRecordDecl(CXXRecordDecl *D) { - // assert(false && "cannot write CXXRecordDecl"); + // See comments at PCHDeclReader::VisitCXXRecordDecl about why this happens + // before VisitRecordDecl. + enum { Data_NoDefData, Data_Owner, Data_NotOwner }; + bool OwnsDefinitionData = false; + if (D->DefinitionData) { + assert(D->DefinitionData->Definition && + "DefinitionData don't point to a definition decl!"); + OwnsDefinitionData = D->DefinitionData->Definition == D; + if (OwnsDefinitionData) { + Record.push_back(Data_Owner); + } else { + Record.push_back(Data_NotOwner); + Writer.AddDeclRef(D->DefinitionData->Definition, Record); + } + } else + Record.push_back(Data_NoDefData); + VisitRecordDecl(D); + + if (OwnsDefinitionData) { + assert(D->DefinitionData); + struct CXXRecordDecl::DefinitionData &Data = *D->DefinitionData; + + Record.push_back(Data.UserDeclaredConstructor); + Record.push_back(Data.UserDeclaredCopyConstructor); + Record.push_back(Data.UserDeclaredCopyAssignment); + Record.push_back(Data.UserDeclaredDestructor); + Record.push_back(Data.Aggregate); + Record.push_back(Data.PlainOldData); + Record.push_back(Data.Empty); + Record.push_back(Data.Polymorphic); + Record.push_back(Data.Abstract); + Record.push_back(Data.HasTrivialConstructor); + Record.push_back(Data.HasTrivialCopyConstructor); + Record.push_back(Data.HasTrivialCopyAssignment); + Record.push_back(Data.HasTrivialDestructor); + Record.push_back(Data.ComputedVisibleConversions); + Record.push_back(Data.DeclaredDefaultConstructor); + Record.push_back(Data.DeclaredCopyConstructor); + Record.push_back(Data.DeclaredCopyAssignment); + Record.push_back(Data.DeclaredDestructor); + + Record.push_back(D->getNumBases()); + for (CXXRecordDecl::base_class_iterator I = D->bases_begin(), + E = D->bases_end(); I != E; ++I) + Writer.AddCXXBaseSpecifier(*I, Record); + + // FIXME: Make VBases lazily computed when needed to avoid storing them. + Record.push_back(D->getNumVBases()); + for (CXXRecordDecl::base_class_iterator I = D->vbases_begin(), + E = D->vbases_end(); I != E; ++I) + Writer.AddCXXBaseSpecifier(*I, Record); + + Writer.AddUnresolvedSet(Data.Conversions, Record); + Writer.AddUnresolvedSet(Data.VisibleConversions, Record); + // Data.Definition is written at the top. + Writer.AddDeclRef(Data.FirstFriend, Record); + } + + enum { + CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization + }; + if (ClassTemplateDecl *TemplD = D->getDescribedClassTemplate()) { + Record.push_back(CXXRecTemplate); + Writer.AddDeclRef(TemplD, Record); + } else if (MemberSpecializationInfo *MSInfo + = D->getMemberSpecializationInfo()) { + Record.push_back(CXXRecMemberSpecialization); + Writer.AddDeclRef(MSInfo->getInstantiatedFrom(), Record); + Record.push_back(MSInfo->getTemplateSpecializationKind()); + Writer.AddSourceLocation(MSInfo->getPointOfInstantiation(), Record); + } else { + Record.push_back(CXXRecNotTemplate); + } + Code = pch::DECL_CXX_RECORD; } void PCHDeclWriter::VisitCXXMethodDecl(CXXMethodDecl *D) { - // assert(false && "cannot write CXXMethodDecl"); VisitFunctionDecl(D); + Record.push_back(D->size_overridden_methods()); + for (CXXMethodDecl::method_iterator + I = D->begin_overridden_methods(), E = D->end_overridden_methods(); + I != E; ++I) + Writer.AddDeclRef(*I, Record); Code = pch::DECL_CXX_METHOD; } void PCHDeclWriter::VisitCXXConstructorDecl(CXXConstructorDecl *D) { - // assert(false && "cannot write CXXConstructorDecl"); VisitCXXMethodDecl(D); + + Record.push_back(D->IsExplicitSpecified); + Record.push_back(D->ImplicitlyDefined); + + Record.push_back(D->NumBaseOrMemberInitializers); + for (unsigned i=0; i != D->NumBaseOrMemberInitializers; ++i) { + CXXBaseOrMemberInitializer *Init = D->BaseOrMemberInitializers[i]; + + Record.push_back(Init->isBaseInitializer()); + if (Init->isBaseInitializer()) { + Writer.AddTypeSourceInfo(Init->getBaseClassInfo(), Record); + Record.push_back(Init->isBaseVirtual()); + } else { + Writer.AddDeclRef(Init->getMember(), Record); + } + Writer.AddSourceLocation(Init->getMemberLocation(), Record); + Writer.AddStmt(Init->getInit()); + Writer.AddDeclRef(Init->getAnonUnionMember(), Record); + Writer.AddSourceLocation(Init->getLParenLoc(), Record); + Writer.AddSourceLocation(Init->getRParenLoc(), Record); + Record.push_back(Init->isWritten()); + if (Init->isWritten()) { + Record.push_back(Init->getSourceOrder()); + } else { + Record.push_back(Init->getNumArrayIndices()); + for (unsigned i=0, e=Init->getNumArrayIndices(); i != e; ++i) + Writer.AddDeclRef(Init->getArrayIndex(i), Record); + } + } + Code = pch::DECL_CXX_CONSTRUCTOR; } void PCHDeclWriter::VisitCXXDestructorDecl(CXXDestructorDecl *D) { - // assert(false && "cannot write CXXDestructorDecl"); VisitCXXMethodDecl(D); + + Record.push_back(D->ImplicitlyDefined); + Writer.AddDeclRef(D->OperatorDelete, Record); + Code = pch::DECL_CXX_DESTRUCTOR; } void PCHDeclWriter::VisitCXXConversionDecl(CXXConversionDecl *D) { - // assert(false && "cannot write CXXConversionDecl"); VisitCXXMethodDecl(D); + Record.push_back(D->IsExplicitSpecified); Code = pch::DECL_CXX_CONVERSION; } +void PCHDeclWriter::VisitAccessSpecDecl(AccessSpecDecl *D) { + VisitDecl(D); + Writer.AddSourceLocation(D->getColonLoc(), Record); + Code = pch::DECL_ACCESS_SPEC; +} + +void PCHDeclWriter::VisitFriendDecl(FriendDecl *D) { + VisitDecl(D); + Record.push_back(D->Friend.is<TypeSourceInfo*>()); + if (D->Friend.is<TypeSourceInfo*>()) + Writer.AddTypeSourceInfo(D->Friend.get<TypeSourceInfo*>(), Record); + else + Writer.AddDeclRef(D->Friend.get<NamedDecl*>(), Record); + Writer.AddDeclRef(D->NextFriend, Record); + Writer.AddSourceLocation(D->FriendLoc, Record); + Code = pch::DECL_FRIEND; +} + void PCHDeclWriter::VisitFriendTemplateDecl(FriendTemplateDecl *D) { assert(false && "cannot write FriendTemplateDecl"); } void PCHDeclWriter::VisitTemplateDecl(TemplateDecl *D) { - assert(false && "cannot write TemplateDecl"); + VisitNamedDecl(D); + + Writer.AddDeclRef(D->getTemplatedDecl(), Record); + Writer.AddTemplateParameterList(D->getTemplateParameters(), Record); +} + +static bool IsKeptInFoldingSet(ClassTemplateSpecializationDecl *D) { + return D->getTypeForDecl()->getAsCXXRecordDecl() == D; } void PCHDeclWriter::VisitClassTemplateDecl(ClassTemplateDecl *D) { - assert(false && "cannot write ClassTemplateDecl"); + VisitTemplateDecl(D); + + Record.push_back(D->getIdentifierNamespace()); + Writer.AddDeclRef(D->getPreviousDeclaration(), Record); + if (D->getPreviousDeclaration() == 0) { + // This ClassTemplateDecl owns the CommonPtr; write it. + assert(D->isCanonicalDecl()); + + typedef llvm::FoldingSet<ClassTemplateSpecializationDecl> CTSDSetTy; + CTSDSetTy &CTSDSet = D->getSpecializations(); + Record.push_back(CTSDSet.size()); + for (CTSDSetTy::iterator I=CTSDSet.begin(), E = CTSDSet.end(); I!=E; ++I) { + assert(IsKeptInFoldingSet(&*I)); + Writer.AddDeclRef(&*I, Record); + } + + typedef llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> CTPSDSetTy; + CTPSDSetTy &CTPSDSet = D->getPartialSpecializations(); + Record.push_back(CTPSDSet.size()); + for (CTPSDSetTy::iterator I=CTPSDSet.begin(), E=CTPSDSet.end(); I!=E; ++I) { + assert(IsKeptInFoldingSet(&*I)); + Writer.AddDeclRef(&*I, Record); + } + + // InjectedClassNameType is computed, no need to write it. + + Writer.AddDeclRef(D->getInstantiatedFromMemberTemplate(), Record); + if (D->getInstantiatedFromMemberTemplate()) + Record.push_back(D->isMemberSpecialization()); + } + Code = pch::DECL_CLASS_TEMPLATE; } void PCHDeclWriter::VisitClassTemplateSpecializationDecl( ClassTemplateSpecializationDecl *D) { - assert(false && "cannot write ClassTemplateSpecializationDecl"); + VisitCXXRecordDecl(D); + + llvm::PointerUnion<ClassTemplateDecl *, + ClassTemplatePartialSpecializationDecl *> InstFrom + = D->getSpecializedTemplateOrPartial(); + if (InstFrom.is<ClassTemplateDecl *>()) { + Writer.AddDeclRef(InstFrom.get<ClassTemplateDecl *>(), Record); + } else { + Writer.AddDeclRef(InstFrom.get<ClassTemplatePartialSpecializationDecl *>(), + Record); + Writer.AddTemplateArgumentList(&D->getTemplateInstantiationArgs(), Record); + } + + // Explicit info. + Writer.AddTypeSourceInfo(D->getTypeAsWritten(), Record); + if (D->getTypeAsWritten()) { + Writer.AddSourceLocation(D->getExternLoc(), Record); + Writer.AddSourceLocation(D->getTemplateKeywordLoc(), Record); + } + + Writer.AddTemplateArgumentList(&D->getTemplateArgs(), Record); + Writer.AddSourceLocation(D->getPointOfInstantiation(), Record); + Record.push_back(D->getSpecializationKind()); + + bool IsInInFoldingSet = IsKeptInFoldingSet(D); + Record.push_back(IsInInFoldingSet); + if (IsInInFoldingSet) { + // When reading, we'll add it to the folding set of this one. + Writer.AddDeclRef(D->getSpecializedTemplate()->getCanonicalDecl(), Record); + } + + Code = pch::DECL_CLASS_TEMPLATE_SPECIALIZATION; } void PCHDeclWriter::VisitClassTemplatePartialSpecializationDecl( ClassTemplatePartialSpecializationDecl *D) { - assert(false && "cannot write ClassTemplatePartialSpecializationDecl"); + VisitClassTemplateSpecializationDecl(D); + + Writer.AddTemplateParameterList(D->getTemplateParameters(), Record); + + Record.push_back(D->getNumTemplateArgsAsWritten()); + for (int i = 0, e = D->getNumTemplateArgsAsWritten(); i != e; ++i) + Writer.AddTemplateArgumentLoc(D->getTemplateArgsAsWritten()[i], Record); + + Record.push_back(D->getSequenceNumber()); + + // These are read/set from/to the first declaration. + if (D->getPreviousDeclaration() == 0) { + Writer.AddDeclRef(D->getInstantiatedFromMember(), Record); + Record.push_back(D->isMemberSpecialization()); + } + + Code = pch::DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION; } -void PCHDeclWriter::visitFunctionTemplateDecl(FunctionTemplateDecl *D) { - assert(false && "cannot write FunctionTemplateDecl"); +void PCHDeclWriter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) { + VisitTemplateDecl(D); + + Record.push_back(D->getIdentifierNamespace()); + Writer.AddDeclRef(D->getPreviousDeclaration(), Record); + if (D->getPreviousDeclaration() == 0) { + // This FunctionTemplateDecl owns the CommonPtr; write it. + + // Write the function specialization declarations. + Record.push_back(D->getSpecializations().size()); + for (llvm::FoldingSet<FunctionTemplateSpecializationInfo>::iterator + I = D->getSpecializations().begin(), + E = D->getSpecializations().end() ; I != E; ++I) + Writer.AddDeclRef(I->Function, Record); + + Writer.AddDeclRef(D->getInstantiatedFromMemberTemplate(), Record); + if (D->getInstantiatedFromMemberTemplate()) + Record.push_back(D->isMemberSpecialization()); + } + Code = pch::DECL_FUNCTION_TEMPLATE; } void PCHDeclWriter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) { - assert(false && "cannot write TemplateTypeParmDecl"); + VisitTypeDecl(D); + + Record.push_back(D->wasDeclaredWithTypename()); + Record.push_back(D->isParameterPack()); + Record.push_back(D->defaultArgumentWasInherited()); + Writer.AddTypeSourceInfo(D->getDefaultArgumentInfo(), Record); + + Code = pch::DECL_TEMPLATE_TYPE_PARM; } void PCHDeclWriter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) { - assert(false && "cannot write NonTypeTemplateParmDecl"); + VisitVarDecl(D); + // TemplateParmPosition. + Record.push_back(D->getDepth()); + Record.push_back(D->getPosition()); + // Rest of NonTypeTemplateParmDecl. + Record.push_back(D->getDefaultArgument() != 0); + if (D->getDefaultArgument()) { + Writer.AddStmt(D->getDefaultArgument()); + Record.push_back(D->defaultArgumentWasInherited()); + } + Code = pch::DECL_NON_TYPE_TEMPLATE_PARM; } void PCHDeclWriter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) { - assert(false && "cannot write TemplateTemplateParmDecl"); + VisitTemplateDecl(D); + // TemplateParmPosition. + Record.push_back(D->getDepth()); + Record.push_back(D->getPosition()); + // Rest of TemplateTemplateParmDecl. + Writer.AddTemplateArgumentLoc(D->getDefaultArgument(), Record); + Record.push_back(D->defaultArgumentWasInherited()); + Code = pch::DECL_TEMPLATE_TEMPLATE_PARM; } void PCHDeclWriter::VisitStaticAssertDecl(StaticAssertDecl *D) { @@ -687,9 +1040,11 @@ void PCHWriter::WriteDeclsBlockAbbrevs() { Abv->Add(BitCodeAbbrevOp(0)); // isNRVOVariable Abv->Add(BitCodeAbbrevOp(0)); // PrevDecl Abv->Add(BitCodeAbbrevOp(0)); // HasInit + Abv->Add(BitCodeAbbrevOp(0)); // HasMemberSpecializationInfo // ParmVarDecl Abv->Add(BitCodeAbbrevOp(0)); // ObjCDeclQualifier Abv->Add(BitCodeAbbrevOp(0)); // HasInheritedDefaultArg + Abv->Add(BitCodeAbbrevOp(0)); // HasUninstantiatedDefaultArg ParmVarDeclAbbrev = Stream.EmitAbbrev(Abv); } diff --git a/contrib/llvm/tools/clang/lib/Frontend/PCHWriterStmt.cpp b/contrib/llvm/tools/clang/lib/Frontend/PCHWriterStmt.cpp index a9ee435..7537728 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/PCHWriterStmt.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/PCHWriterStmt.cpp @@ -22,7 +22,7 @@ using namespace clang; // Statement/expression serialization //===----------------------------------------------------------------------===// -namespace { +namespace clang { class PCHStmtWriter : public StmtVisitor<PCHStmtWriter, void> { PCHWriter &Writer; PCHWriter::RecordData &Record; @@ -32,6 +32,9 @@ namespace { PCHStmtWriter(PCHWriter &Writer, PCHWriter::RecordData &Record) : Writer(Writer), Record(Record) { } + + void + AddExplicitTemplateArgumentList(const ExplicitTemplateArgumentList &Args); void VisitStmt(Stmt *S); void VisitNullStmt(NullStmt *S); @@ -61,6 +64,7 @@ namespace { void VisitStringLiteral(StringLiteral *E); void VisitCharacterLiteral(CharacterLiteral *E); void VisitParenExpr(ParenExpr *E); + void VisitParenListExpr(ParenListExpr *E); void VisitUnaryOperator(UnaryOperator *E); void VisitOffsetOfExpr(OffsetOfExpr *E); void VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E); @@ -114,6 +118,7 @@ namespace { void VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E); void VisitCXXMemberCallExpr(CXXMemberCallExpr *E); void VisitCXXConstructExpr(CXXConstructExpr *E); + void VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E); void VisitCXXNamedCastExpr(CXXNamedCastExpr *E); void VisitCXXStaticCastExpr(CXXStaticCastExpr *E); void VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E); @@ -127,14 +132,34 @@ namespace { void VisitCXXThrowExpr(CXXThrowExpr *E); void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E); void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); - - void VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E); + void VisitCXXBindReferenceExpr(CXXBindReferenceExpr *E); + + void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E); void VisitCXXNewExpr(CXXNewExpr *E); - + void VisitCXXDeleteExpr(CXXDeleteExpr *E); + void VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E); + void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E); + void VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E); + void VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E); + void VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E); + + void VisitOverloadExpr(OverloadExpr *E); + void VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E); + void VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E); + + void VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E); }; } +void PCHStmtWriter:: +AddExplicitTemplateArgumentList(const ExplicitTemplateArgumentList &Args) { + Writer.AddSourceLocation(Args.LAngleLoc, Record); + Writer.AddSourceLocation(Args.RAngleLoc, Record); + for (unsigned i=0; i != Args.NumTemplateArgs; ++i) + Writer.AddTemplateArgumentLoc(Args.getTemplateArgs()[i], Record); +} + void PCHStmtWriter::VisitStmt(Stmt *S) { } @@ -149,7 +174,7 @@ void PCHStmtWriter::VisitCompoundStmt(CompoundStmt *S) { Record.push_back(S->size()); for (CompoundStmt::body_iterator CS = S->body_begin(), CSEnd = S->body_end(); CS != CSEnd; ++CS) - Writer.WriteSubStmt(*CS); + Writer.AddStmt(*CS); Writer.AddSourceLocation(S->getLBracLoc(), Record); Writer.AddSourceLocation(S->getRBracLoc(), Record); Code = pch::STMT_COMPOUND; @@ -157,14 +182,14 @@ void PCHStmtWriter::VisitCompoundStmt(CompoundStmt *S) { void PCHStmtWriter::VisitSwitchCase(SwitchCase *S) { VisitStmt(S); - Record.push_back(Writer.RecordSwitchCaseID(S)); + Record.push_back(Writer.getSwitchCaseID(S)); } void PCHStmtWriter::VisitCaseStmt(CaseStmt *S) { VisitSwitchCase(S); - Writer.WriteSubStmt(S->getLHS()); - Writer.WriteSubStmt(S->getRHS()); - Writer.WriteSubStmt(S->getSubStmt()); + Writer.AddStmt(S->getLHS()); + Writer.AddStmt(S->getRHS()); + Writer.AddStmt(S->getSubStmt()); Writer.AddSourceLocation(S->getCaseLoc(), Record); Writer.AddSourceLocation(S->getEllipsisLoc(), Record); Writer.AddSourceLocation(S->getColonLoc(), Record); @@ -173,7 +198,7 @@ void PCHStmtWriter::VisitCaseStmt(CaseStmt *S) { void PCHStmtWriter::VisitDefaultStmt(DefaultStmt *S) { VisitSwitchCase(S); - Writer.WriteSubStmt(S->getSubStmt()); + Writer.AddStmt(S->getSubStmt()); Writer.AddSourceLocation(S->getDefaultLoc(), Record); Writer.AddSourceLocation(S->getColonLoc(), Record); Code = pch::STMT_DEFAULT; @@ -182,7 +207,7 @@ void PCHStmtWriter::VisitDefaultStmt(DefaultStmt *S) { void PCHStmtWriter::VisitLabelStmt(LabelStmt *S) { VisitStmt(S); Writer.AddIdentifierRef(S->getID(), Record); - Writer.WriteSubStmt(S->getSubStmt()); + Writer.AddStmt(S->getSubStmt()); Writer.AddSourceLocation(S->getIdentLoc(), Record); Record.push_back(Writer.GetLabelID(S)); Code = pch::STMT_LABEL; @@ -191,9 +216,9 @@ void PCHStmtWriter::VisitLabelStmt(LabelStmt *S) { void PCHStmtWriter::VisitIfStmt(IfStmt *S) { VisitStmt(S); Writer.AddDeclRef(S->getConditionVariable(), Record); - Writer.WriteSubStmt(S->getCond()); - Writer.WriteSubStmt(S->getThen()); - Writer.WriteSubStmt(S->getElse()); + Writer.AddStmt(S->getCond()); + Writer.AddStmt(S->getThen()); + Writer.AddStmt(S->getElse()); Writer.AddSourceLocation(S->getIfLoc(), Record); Writer.AddSourceLocation(S->getElseLoc(), Record); Code = pch::STMT_IF; @@ -202,28 +227,28 @@ void PCHStmtWriter::VisitIfStmt(IfStmt *S) { void PCHStmtWriter::VisitSwitchStmt(SwitchStmt *S) { VisitStmt(S); Writer.AddDeclRef(S->getConditionVariable(), Record); - Writer.WriteSubStmt(S->getCond()); - Writer.WriteSubStmt(S->getBody()); + Writer.AddStmt(S->getCond()); + Writer.AddStmt(S->getBody()); Writer.AddSourceLocation(S->getSwitchLoc(), Record); for (SwitchCase *SC = S->getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) - Record.push_back(Writer.getSwitchCaseID(SC)); + Record.push_back(Writer.RecordSwitchCaseID(SC)); Code = pch::STMT_SWITCH; } void PCHStmtWriter::VisitWhileStmt(WhileStmt *S) { VisitStmt(S); Writer.AddDeclRef(S->getConditionVariable(), Record); - Writer.WriteSubStmt(S->getCond()); - Writer.WriteSubStmt(S->getBody()); + Writer.AddStmt(S->getCond()); + Writer.AddStmt(S->getBody()); Writer.AddSourceLocation(S->getWhileLoc(), Record); Code = pch::STMT_WHILE; } void PCHStmtWriter::VisitDoStmt(DoStmt *S) { VisitStmt(S); - Writer.WriteSubStmt(S->getCond()); - Writer.WriteSubStmt(S->getBody()); + Writer.AddStmt(S->getCond()); + Writer.AddStmt(S->getBody()); Writer.AddSourceLocation(S->getDoLoc(), Record); Writer.AddSourceLocation(S->getWhileLoc(), Record); Writer.AddSourceLocation(S->getRParenLoc(), Record); @@ -232,11 +257,11 @@ void PCHStmtWriter::VisitDoStmt(DoStmt *S) { void PCHStmtWriter::VisitForStmt(ForStmt *S) { VisitStmt(S); - Writer.WriteSubStmt(S->getInit()); - Writer.WriteSubStmt(S->getCond()); + Writer.AddStmt(S->getInit()); + Writer.AddStmt(S->getCond()); Writer.AddDeclRef(S->getConditionVariable(), Record); - Writer.WriteSubStmt(S->getInc()); - Writer.WriteSubStmt(S->getBody()); + Writer.AddStmt(S->getInc()); + Writer.AddStmt(S->getBody()); Writer.AddSourceLocation(S->getForLoc(), Record); Writer.AddSourceLocation(S->getLParenLoc(), Record); Writer.AddSourceLocation(S->getRParenLoc(), Record); @@ -255,7 +280,7 @@ void PCHStmtWriter::VisitIndirectGotoStmt(IndirectGotoStmt *S) { VisitStmt(S); Writer.AddSourceLocation(S->getGotoLoc(), Record); Writer.AddSourceLocation(S->getStarLoc(), Record); - Writer.WriteSubStmt(S->getTarget()); + Writer.AddStmt(S->getTarget()); Code = pch::STMT_INDIRECT_GOTO; } @@ -273,7 +298,7 @@ void PCHStmtWriter::VisitBreakStmt(BreakStmt *S) { void PCHStmtWriter::VisitReturnStmt(ReturnStmt *S) { VisitStmt(S); - Writer.WriteSubStmt(S->getRetValue()); + Writer.AddStmt(S->getRetValue()); Writer.AddSourceLocation(S->getReturnLoc(), Record); Writer.AddDeclRef(S->getNRVOCandidate(), Record); Code = pch::STMT_RETURN; @@ -299,25 +324,25 @@ void PCHStmtWriter::VisitAsmStmt(AsmStmt *S) { Record.push_back(S->isVolatile()); Record.push_back(S->isSimple()); Record.push_back(S->isMSAsm()); - Writer.WriteSubStmt(S->getAsmString()); + Writer.AddStmt(S->getAsmString()); // Outputs for (unsigned I = 0, N = S->getNumOutputs(); I != N; ++I) { Writer.AddIdentifierRef(S->getOutputIdentifier(I), Record); - Writer.WriteSubStmt(S->getOutputConstraintLiteral(I)); - Writer.WriteSubStmt(S->getOutputExpr(I)); + Writer.AddStmt(S->getOutputConstraintLiteral(I)); + Writer.AddStmt(S->getOutputExpr(I)); } // Inputs for (unsigned I = 0, N = S->getNumInputs(); I != N; ++I) { Writer.AddIdentifierRef(S->getInputIdentifier(I), Record); - Writer.WriteSubStmt(S->getInputConstraintLiteral(I)); - Writer.WriteSubStmt(S->getInputExpr(I)); + Writer.AddStmt(S->getInputConstraintLiteral(I)); + Writer.AddStmt(S->getInputExpr(I)); } // Clobbers for (unsigned I = 0, N = S->getNumClobbers(); I != N; ++I) - Writer.WriteSubStmt(S->getClobber(I)); + Writer.AddStmt(S->getClobber(I)); Code = pch::STMT_ASM; } @@ -338,10 +363,23 @@ void PCHStmtWriter::VisitPredefinedExpr(PredefinedExpr *E) { void PCHStmtWriter::VisitDeclRefExpr(DeclRefExpr *E) { VisitExpr(E); + + Record.push_back(E->hasQualifier()); + unsigned NumTemplateArgs = E->getNumTemplateArgs(); + assert((NumTemplateArgs != 0) == E->hasExplicitTemplateArgumentList() && + "Template args list with no args ?"); + Record.push_back(NumTemplateArgs); + + if (E->hasQualifier()) { + Writer.AddNestedNameSpecifier(E->getQualifier(), Record); + Writer.AddSourceRange(E->getQualifierRange(), Record); + } + + if (NumTemplateArgs) + AddExplicitTemplateArgumentList(*E->getExplicitTemplateArgumentList()); + Writer.AddDeclRef(E->getDecl(), Record); Writer.AddSourceLocation(E->getLocation(), Record); - // FIXME: write qualifier - // FIXME: write explicit template arguments Code = pch::EXPR_DECL_REF; } @@ -362,7 +400,7 @@ void PCHStmtWriter::VisitFloatingLiteral(FloatingLiteral *E) { void PCHStmtWriter::VisitImaginaryLiteral(ImaginaryLiteral *E) { VisitExpr(E); - Writer.WriteSubStmt(E->getSubExpr()); + Writer.AddStmt(E->getSubExpr()); Code = pch::EXPR_IMAGINARY_LITERAL; } @@ -394,13 +432,23 @@ void PCHStmtWriter::VisitParenExpr(ParenExpr *E) { VisitExpr(E); Writer.AddSourceLocation(E->getLParen(), Record); Writer.AddSourceLocation(E->getRParen(), Record); - Writer.WriteSubStmt(E->getSubExpr()); + Writer.AddStmt(E->getSubExpr()); Code = pch::EXPR_PAREN; } +void PCHStmtWriter::VisitParenListExpr(ParenListExpr *E) { + VisitExpr(E); + Record.push_back(E->NumExprs); + for (unsigned i=0; i != E->NumExprs; ++i) + Writer.AddStmt(E->Exprs[i]); + Writer.AddSourceLocation(E->LParenLoc, Record); + Writer.AddSourceLocation(E->RParenLoc, Record); + Code = pch::EXPR_PAREN_LIST; +} + void PCHStmtWriter::VisitUnaryOperator(UnaryOperator *E) { VisitExpr(E); - Writer.WriteSubStmt(E->getSubExpr()); + Writer.AddStmt(E->getSubExpr()); Record.push_back(E->getOpcode()); // FIXME: stable encoding Writer.AddSourceLocation(E->getOperatorLoc(), Record); Code = pch::EXPR_UNARY_OPERATOR; @@ -438,7 +486,7 @@ void PCHStmtWriter::VisitOffsetOfExpr(OffsetOfExpr *E) { } } for (unsigned I = 0, N = E->getNumExpressions(); I != N; ++I) - Writer.WriteSubStmt(E->getIndexExpr(I)); + Writer.AddStmt(E->getIndexExpr(I)); Code = pch::EXPR_OFFSETOF; } @@ -449,7 +497,7 @@ void PCHStmtWriter::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) { Writer.AddTypeSourceInfo(E->getArgumentTypeInfo(), Record); else { Record.push_back(0); - Writer.WriteSubStmt(E->getArgumentExpr()); + Writer.AddStmt(E->getArgumentExpr()); } Writer.AddSourceLocation(E->getOperatorLoc(), Record); Writer.AddSourceLocation(E->getRParenLoc(), Record); @@ -458,8 +506,8 @@ void PCHStmtWriter::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) { void PCHStmtWriter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { VisitExpr(E); - Writer.WriteSubStmt(E->getLHS()); - Writer.WriteSubStmt(E->getRHS()); + Writer.AddStmt(E->getLHS()); + Writer.AddStmt(E->getRHS()); Writer.AddSourceLocation(E->getRBracketLoc(), Record); Code = pch::EXPR_ARRAY_SUBSCRIPT; } @@ -468,27 +516,48 @@ void PCHStmtWriter::VisitCallExpr(CallExpr *E) { VisitExpr(E); Record.push_back(E->getNumArgs()); Writer.AddSourceLocation(E->getRParenLoc(), Record); - Writer.WriteSubStmt(E->getCallee()); + Writer.AddStmt(E->getCallee()); for (CallExpr::arg_iterator Arg = E->arg_begin(), ArgEnd = E->arg_end(); Arg != ArgEnd; ++Arg) - Writer.WriteSubStmt(*Arg); + Writer.AddStmt(*Arg); Code = pch::EXPR_CALL; } void PCHStmtWriter::VisitMemberExpr(MemberExpr *E) { - VisitExpr(E); - Writer.WriteSubStmt(E->getBase()); + // Don't call VisitExpr, we'll write everything here. + + Record.push_back(E->hasQualifier()); + if (E->hasQualifier()) { + Writer.AddNestedNameSpecifier(E->getQualifier(), Record); + Writer.AddSourceRange(E->getQualifierRange(), Record); + } + + unsigned NumTemplateArgs = E->getNumTemplateArgs(); + assert((NumTemplateArgs != 0) == E->hasExplicitTemplateArgumentList() && + "Template args list with no args ?"); + Record.push_back(NumTemplateArgs); + if (NumTemplateArgs) { + Writer.AddSourceLocation(E->getLAngleLoc(), Record); + Writer.AddSourceLocation(E->getRAngleLoc(), Record); + for (unsigned i=0; i != NumTemplateArgs; ++i) + Writer.AddTemplateArgumentLoc(E->getTemplateArgs()[i], Record); + } + + DeclAccessPair FoundDecl = E->getFoundDecl(); + Writer.AddDeclRef(FoundDecl.getDecl(), Record); + Record.push_back(FoundDecl.getAccess()); + + Writer.AddTypeRef(E->getType(), Record); + Writer.AddStmt(E->getBase()); Writer.AddDeclRef(E->getMemberDecl(), Record); Writer.AddSourceLocation(E->getMemberLoc(), Record); Record.push_back(E->isArrow()); - // FIXME: C++ nested-name-specifier - // FIXME: C++ template argument list Code = pch::EXPR_MEMBER; } void PCHStmtWriter::VisitObjCIsaExpr(ObjCIsaExpr *E) { VisitExpr(E); - Writer.WriteSubStmt(E->getBase()); + Writer.AddStmt(E->getBase()); Writer.AddSourceLocation(E->getIsaMemberLoc(), Record); Record.push_back(E->isArrow()); Code = pch::EXPR_OBJC_ISA; @@ -496,14 +565,19 @@ void PCHStmtWriter::VisitObjCIsaExpr(ObjCIsaExpr *E) { void PCHStmtWriter::VisitCastExpr(CastExpr *E) { VisitExpr(E); - Writer.WriteSubStmt(E->getSubExpr()); + Writer.AddStmt(E->getSubExpr()); Record.push_back(E->getCastKind()); // FIXME: stable encoding + CXXBaseSpecifierArray &BasePath = E->getBasePath(); + Record.push_back(BasePath.size()); + for (CXXBaseSpecifierArray::iterator I = BasePath.begin(), E = BasePath.end(); + I != E; ++I) + Writer.AddCXXBaseSpecifier(**I, Record); } void PCHStmtWriter::VisitBinaryOperator(BinaryOperator *E) { VisitExpr(E); - Writer.WriteSubStmt(E->getLHS()); - Writer.WriteSubStmt(E->getRHS()); + Writer.AddStmt(E->getLHS()); + Writer.AddStmt(E->getRHS()); Record.push_back(E->getOpcode()); // FIXME: stable encoding Writer.AddSourceLocation(E->getOperatorLoc(), Record); Code = pch::EXPR_BINARY_OPERATOR; @@ -518,9 +592,9 @@ void PCHStmtWriter::VisitCompoundAssignOperator(CompoundAssignOperator *E) { void PCHStmtWriter::VisitConditionalOperator(ConditionalOperator *E) { VisitExpr(E); - Writer.WriteSubStmt(E->getCond()); - Writer.WriteSubStmt(E->getLHS()); - Writer.WriteSubStmt(E->getRHS()); + Writer.AddStmt(E->getCond()); + Writer.AddStmt(E->getLHS()); + Writer.AddStmt(E->getRHS()); Writer.AddSourceLocation(E->getQuestionLoc(), Record); Writer.AddSourceLocation(E->getColonLoc(), Record); Code = pch::EXPR_CONDITIONAL_OPERATOR; @@ -548,14 +622,14 @@ void PCHStmtWriter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { VisitExpr(E); Writer.AddSourceLocation(E->getLParenLoc(), Record); Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record); - Writer.WriteSubStmt(E->getInitializer()); + Writer.AddStmt(E->getInitializer()); Record.push_back(E->isFileScope()); Code = pch::EXPR_COMPOUND_LITERAL; } void PCHStmtWriter::VisitExtVectorElementExpr(ExtVectorElementExpr *E) { VisitExpr(E); - Writer.WriteSubStmt(E->getBase()); + Writer.AddStmt(E->getBase()); Writer.AddIdentifierRef(&E->getAccessor(), Record); Writer.AddSourceLocation(E->getAccessorLoc(), Record); Code = pch::EXPR_EXT_VECTOR_ELEMENT; @@ -565,8 +639,8 @@ void PCHStmtWriter::VisitInitListExpr(InitListExpr *E) { VisitExpr(E); Record.push_back(E->getNumInits()); for (unsigned I = 0, N = E->getNumInits(); I != N; ++I) - Writer.WriteSubStmt(E->getInit(I)); - Writer.WriteSubStmt(E->getSyntacticForm()); + Writer.AddStmt(E->getInit(I)); + Writer.AddStmt(E->getSyntacticForm()); Writer.AddSourceLocation(E->getLBraceLoc(), Record); Writer.AddSourceLocation(E->getRBraceLoc(), Record); Writer.AddDeclRef(E->getInitializedFieldInUnion(), Record); @@ -578,7 +652,7 @@ void PCHStmtWriter::VisitDesignatedInitExpr(DesignatedInitExpr *E) { VisitExpr(E); Record.push_back(E->getNumSubExprs()); for (unsigned I = 0, N = E->getNumSubExprs(); I != N; ++I) - Writer.WriteSubStmt(E->getSubExpr(I)); + Writer.AddStmt(E->getSubExpr(I)); Writer.AddSourceLocation(E->getEqualOrColonLoc(), Record); Record.push_back(E->usesGNUSyntax()); for (DesignatedInitExpr::designators_iterator D = E->designators_begin(), @@ -618,7 +692,7 @@ void PCHStmtWriter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { void PCHStmtWriter::VisitVAArgExpr(VAArgExpr *E) { VisitExpr(E); - Writer.WriteSubStmt(E->getSubExpr()); + Writer.AddStmt(E->getSubExpr()); Writer.AddSourceLocation(E->getBuiltinLoc(), Record); Writer.AddSourceLocation(E->getRParenLoc(), Record); Code = pch::EXPR_VA_ARG; @@ -634,7 +708,7 @@ void PCHStmtWriter::VisitAddrLabelExpr(AddrLabelExpr *E) { void PCHStmtWriter::VisitStmtExpr(StmtExpr *E) { VisitExpr(E); - Writer.WriteSubStmt(E->getSubStmt()); + Writer.AddStmt(E->getSubStmt()); Writer.AddSourceLocation(E->getLParenLoc(), Record); Writer.AddSourceLocation(E->getRParenLoc(), Record); Code = pch::EXPR_STMT; @@ -651,9 +725,9 @@ void PCHStmtWriter::VisitTypesCompatibleExpr(TypesCompatibleExpr *E) { void PCHStmtWriter::VisitChooseExpr(ChooseExpr *E) { VisitExpr(E); - Writer.WriteSubStmt(E->getCond()); - Writer.WriteSubStmt(E->getLHS()); - Writer.WriteSubStmt(E->getRHS()); + Writer.AddStmt(E->getCond()); + Writer.AddStmt(E->getLHS()); + Writer.AddStmt(E->getRHS()); Writer.AddSourceLocation(E->getBuiltinLoc(), Record); Writer.AddSourceLocation(E->getRParenLoc(), Record); Code = pch::EXPR_CHOOSE; @@ -669,7 +743,7 @@ void PCHStmtWriter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { VisitExpr(E); Record.push_back(E->getNumSubExprs()); for (unsigned I = 0, N = E->getNumSubExprs(); I != N; ++I) - Writer.WriteSubStmt(E->getExpr(I)); + Writer.AddStmt(E->getExpr(I)); Writer.AddSourceLocation(E->getBuiltinLoc(), Record); Writer.AddSourceLocation(E->getRParenLoc(), Record); Code = pch::EXPR_SHUFFLE_VECTOR; @@ -688,6 +762,7 @@ void PCHStmtWriter::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) { Writer.AddSourceLocation(E->getLocation(), Record); Record.push_back(E->isByRef()); Record.push_back(E->isConstQualAdded()); + Writer.AddStmt(E->getCopyConstructorExpr()); Code = pch::EXPR_BLOCK_DECL_REF; } @@ -697,7 +772,7 @@ void PCHStmtWriter::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) { void PCHStmtWriter::VisitObjCStringLiteral(ObjCStringLiteral *E) { VisitExpr(E); - Writer.WriteSubStmt(E->getString()); + Writer.AddStmt(E->getString()); Writer.AddSourceLocation(E->getAtLoc(), Record); Code = pch::EXPR_OBJC_STRING_LITERAL; } @@ -730,7 +805,7 @@ void PCHStmtWriter::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { VisitExpr(E); Writer.AddDeclRef(E->getDecl(), Record); Writer.AddSourceLocation(E->getLocation(), Record); - Writer.WriteSubStmt(E->getBase()); + Writer.AddStmt(E->getBase()); Record.push_back(E->isArrow()); Record.push_back(E->isFreeIvar()); Code = pch::EXPR_OBJC_IVAR_REF_EXPR; @@ -740,7 +815,7 @@ void PCHStmtWriter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) { VisitExpr(E); Writer.AddDeclRef(E->getProperty(), Record); Writer.AddSourceLocation(E->getLocation(), Record); - Writer.WriteSubStmt(E->getBase()); + Writer.AddStmt(E->getBase()); Code = pch::EXPR_OBJC_PROPERTY_REF_EXPR; } @@ -752,7 +827,7 @@ void PCHStmtWriter::VisitObjCImplicitSetterGetterRefExpr( // NOTE: InterfaceDecl and Base are mutually exclusive. Writer.AddDeclRef(E->getInterfaceDecl(), Record); - Writer.WriteSubStmt(E->getBase()); + Writer.AddStmt(E->getBase()); Writer.AddSourceLocation(E->getLocation(), Record); Writer.AddSourceLocation(E->getClassLoc(), Record); Code = pch::EXPR_OBJC_KVC_REF_EXPR; @@ -764,7 +839,7 @@ void PCHStmtWriter::VisitObjCMessageExpr(ObjCMessageExpr *E) { Record.push_back((unsigned)E->getReceiverKind()); // FIXME: stable encoding switch (E->getReceiverKind()) { case ObjCMessageExpr::Instance: - Writer.WriteSubStmt(E->getInstanceReceiver()); + Writer.AddStmt(E->getInstanceReceiver()); break; case ObjCMessageExpr::Class: @@ -791,7 +866,7 @@ void PCHStmtWriter::VisitObjCMessageExpr(ObjCMessageExpr *E) { for (CallExpr::arg_iterator Arg = E->arg_begin(), ArgEnd = E->arg_end(); Arg != ArgEnd; ++Arg) - Writer.WriteSubStmt(*Arg); + Writer.AddStmt(*Arg); Code = pch::EXPR_OBJC_MESSAGE_EXPR; } @@ -803,16 +878,16 @@ void PCHStmtWriter::VisitObjCSuperExpr(ObjCSuperExpr *E) { void PCHStmtWriter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) { VisitStmt(S); - Writer.WriteSubStmt(S->getElement()); - Writer.WriteSubStmt(S->getCollection()); - Writer.WriteSubStmt(S->getBody()); + Writer.AddStmt(S->getElement()); + Writer.AddStmt(S->getCollection()); + Writer.AddStmt(S->getBody()); Writer.AddSourceLocation(S->getForLoc(), Record); Writer.AddSourceLocation(S->getRParenLoc(), Record); Code = pch::STMT_OBJC_FOR_COLLECTION; } void PCHStmtWriter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) { - Writer.WriteSubStmt(S->getCatchBody()); + Writer.AddStmt(S->getCatchBody()); Writer.AddDeclRef(S->getCatchParamDecl(), Record); Writer.AddSourceLocation(S->getAtCatchLoc(), Record); Writer.AddSourceLocation(S->getRParenLoc(), Record); @@ -820,7 +895,7 @@ void PCHStmtWriter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) { } void PCHStmtWriter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) { - Writer.WriteSubStmt(S->getFinallyBody()); + Writer.AddStmt(S->getFinallyBody()); Writer.AddSourceLocation(S->getAtFinallyLoc(), Record); Code = pch::STMT_OBJC_FINALLY; } @@ -828,24 +903,24 @@ void PCHStmtWriter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) { void PCHStmtWriter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) { Record.push_back(S->getNumCatchStmts()); Record.push_back(S->getFinallyStmt() != 0); - Writer.WriteSubStmt(S->getTryBody()); + Writer.AddStmt(S->getTryBody()); for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) - Writer.WriteSubStmt(S->getCatchStmt(I)); + Writer.AddStmt(S->getCatchStmt(I)); if (S->getFinallyStmt()) - Writer.WriteSubStmt(S->getFinallyStmt()); + Writer.AddStmt(S->getFinallyStmt()); Writer.AddSourceLocation(S->getAtTryLoc(), Record); Code = pch::STMT_OBJC_AT_TRY; } void PCHStmtWriter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) { - Writer.WriteSubStmt(S->getSynchExpr()); - Writer.WriteSubStmt(S->getSynchBody()); + Writer.AddStmt(S->getSynchExpr()); + Writer.AddStmt(S->getSynchBody()); Writer.AddSourceLocation(S->getAtSynchronizedLoc(), Record); Code = pch::STMT_OBJC_AT_SYNCHRONIZED; } void PCHStmtWriter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) { - Writer.WriteSubStmt(S->getThrowExpr()); + Writer.AddStmt(S->getThrowExpr()); Writer.AddSourceLocation(S->getThrowLoc(), Record); Code = pch::STMT_OBJC_AT_THROW; } @@ -867,17 +942,24 @@ void PCHStmtWriter::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) { void PCHStmtWriter::VisitCXXConstructExpr(CXXConstructExpr *E) { VisitExpr(E); + Record.push_back(E->getNumArgs()); + for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) + Writer.AddStmt(E->getArg(I)); Writer.AddDeclRef(E->getConstructor(), Record); Writer.AddSourceLocation(E->getLocation(), Record); Record.push_back(E->isElidable()); Record.push_back(E->requiresZeroInitialization()); - Record.push_back(E->getNumArgs()); - for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) - Writer.WriteSubStmt(E->getArg(I)); Record.push_back(E->getConstructionKind()); // FIXME: stable encoding Code = pch::EXPR_CXX_CONSTRUCT; } +void PCHStmtWriter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) { + VisitCXXConstructExpr(E); + Writer.AddSourceLocation(E->getTypeBeginLoc(), Record); + Writer.AddSourceLocation(E->getRParenLoc(), Record); + Code = pch::EXPR_CXX_TEMPORARY_OBJECT; +} + void PCHStmtWriter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) { VisitExplicitCastExpr(E); Writer.AddSourceLocation(E->getOperatorLoc(), Record); @@ -930,7 +1012,7 @@ void PCHStmtWriter::VisitCXXTypeidExpr(CXXTypeidExpr *E) { Writer.AddTypeSourceInfo(E->getTypeOperandSourceInfo(), Record); Code = pch::EXPR_CXX_TYPEID_TYPE; } else { - Writer.WriteSubStmt(E->getExprOperand()); + Writer.AddStmt(E->getExprOperand()); Code = pch::EXPR_CXX_TYPEID_EXPR; } } @@ -945,19 +1027,20 @@ void PCHStmtWriter::VisitCXXThisExpr(CXXThisExpr *E) { void PCHStmtWriter::VisitCXXThrowExpr(CXXThrowExpr *E) { VisitExpr(E); Writer.AddSourceLocation(E->getThrowLoc(), Record); - Writer.WriteSubStmt(E->getSubExpr()); + Writer.AddStmt(E->getSubExpr()); Code = pch::EXPR_CXX_THROW; } void PCHStmtWriter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) { VisitExpr(E); + + bool HasOtherExprStored = E->Param.getInt(); + // Store these first, the reader reads them before creation. + Record.push_back(HasOtherExprStored); + if (HasOtherExprStored) + Writer.AddStmt(E->getExpr()); + Writer.AddDeclRef(E->getParam(), Record); Writer.AddSourceLocation(E->getUsedLocation(), Record); - if (E->isExprStored()) { - Record.push_back(1); - Writer.WriteSubStmt(E->getExpr()); - } else { - Record.push_back(0); - } Code = pch::EXPR_CXX_DEFAULT_ARG; } @@ -965,21 +1048,28 @@ void PCHStmtWriter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) { void PCHStmtWriter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { VisitExpr(E); Writer.AddCXXTemporary(E->getTemporary(), Record); - Writer.WriteSubStmt(E->getSubExpr()); + Writer.AddStmt(E->getSubExpr()); Code = pch::EXPR_CXX_BIND_TEMPORARY; } -void PCHStmtWriter::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) { +void PCHStmtWriter::VisitCXXBindReferenceExpr(CXXBindReferenceExpr *E) { + VisitExpr(E); + Writer.AddStmt(E->getSubExpr()); + Record.push_back(E->extendsLifetime()); + Record.push_back(E->requiresTemporaryCopy()); + Code = pch::EXPR_CXX_BIND_REFERENCE; +} + +void PCHStmtWriter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { VisitExpr(E); Writer.AddSourceLocation(E->getTypeBeginLoc(), Record); Writer.AddSourceLocation(E->getRParenLoc(), Record); - Code = pch::EXPR_CXX_ZERO_INIT_VALUE; + Code = pch::EXPR_CXX_SCALAR_VALUE_INIT; } void PCHStmtWriter::VisitCXXNewExpr(CXXNewExpr *E) { VisitExpr(E); Record.push_back(E->isGlobalNew()); - Record.push_back(E->isParenTypeId()); Record.push_back(E->hasInitializer()); Record.push_back(E->isArray()); Record.push_back(E->getNumPlacementArgs()); @@ -987,15 +1077,48 @@ void PCHStmtWriter::VisitCXXNewExpr(CXXNewExpr *E) { Writer.AddDeclRef(E->getOperatorNew(), Record); Writer.AddDeclRef(E->getOperatorDelete(), Record); Writer.AddDeclRef(E->getConstructor(), Record); + Writer.AddSourceRange(E->getTypeIdParens(), Record); Writer.AddSourceLocation(E->getStartLoc(), Record); Writer.AddSourceLocation(E->getEndLoc(), Record); for (CXXNewExpr::arg_iterator I = E->raw_arg_begin(), e = E->raw_arg_end(); I != e; ++I) - Writer.WriteSubStmt(*I); + Writer.AddStmt(*I); Code = pch::EXPR_CXX_NEW; } +void PCHStmtWriter::VisitCXXDeleteExpr(CXXDeleteExpr *E) { + VisitExpr(E); + Record.push_back(E->isGlobalDelete()); + Record.push_back(E->isArrayForm()); + Writer.AddDeclRef(E->getOperatorDelete(), Record); + Writer.AddStmt(E->getArgument()); + Writer.AddSourceLocation(E->getSourceRange().getBegin(), Record); + + Code = pch::EXPR_CXX_DELETE; +} + +void PCHStmtWriter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) { + VisitExpr(E); + + Writer.AddStmt(E->getBase()); + Record.push_back(E->isArrow()); + Writer.AddSourceLocation(E->getOperatorLoc(), Record); + Writer.AddNestedNameSpecifier(E->getQualifier(), Record); + Writer.AddSourceRange(E->getQualifierRange(), Record); + Writer.AddTypeSourceInfo(E->getScopeTypeInfo(), Record); + Writer.AddSourceLocation(E->getColonColonLoc(), Record); + Writer.AddSourceLocation(E->getTildeLoc(), Record); + + // PseudoDestructorTypeStorage. + Writer.AddIdentifierRef(E->getDestroyedTypeIdentifier(), Record); + if (E->getDestroyedTypeIdentifier()) + Writer.AddSourceLocation(E->getDestroyedTypeLoc(), Record); + else + Writer.AddTypeSourceInfo(E->getDestroyedTypeInfo(), Record); + + Code = pch::EXPR_CXX_PSEUDO_DESTRUCTOR; +} void PCHStmtWriter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) { VisitExpr(E); @@ -1003,10 +1126,132 @@ void PCHStmtWriter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) { for (unsigned i = 0, e = E->getNumTemporaries(); i != e; ++i) Writer.AddCXXTemporary(E->getTemporary(i), Record); - Writer.WriteSubStmt(E->getSubExpr()); + Writer.AddStmt(E->getSubExpr()); Code = pch::EXPR_CXX_EXPR_WITH_TEMPORARIES; } +void +PCHStmtWriter::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){ + VisitExpr(E); + + // Don't emit anything here, NumTemplateArgs must be emitted first. + + if (E->hasExplicitTemplateArgs()) { + const ExplicitTemplateArgumentList &Args + = *E->getExplicitTemplateArgumentList(); + assert(Args.NumTemplateArgs && + "Num of template args was zero! PCH reading will mess up!"); + Record.push_back(Args.NumTemplateArgs); + AddExplicitTemplateArgumentList(Args); + } else { + Record.push_back(0); + } + + if (!E->isImplicitAccess()) + Writer.AddStmt(E->getBase()); + else + Writer.AddStmt(0); + Writer.AddTypeRef(E->getBaseType(), Record); + Record.push_back(E->isArrow()); + Writer.AddSourceLocation(E->getOperatorLoc(), Record); + Writer.AddNestedNameSpecifier(E->getQualifier(), Record); + Writer.AddSourceRange(E->getQualifierRange(), Record); + Writer.AddDeclRef(E->getFirstQualifierFoundInScope(), Record); + Writer.AddDeclarationName(E->getMember(), Record); + Writer.AddSourceLocation(E->getMemberLoc(), Record); + Code = pch::EXPR_CXX_DEPENDENT_SCOPE_MEMBER; +} + +void +PCHStmtWriter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) { + VisitExpr(E); + + // Don't emit anything here, NumTemplateArgs must be emitted first. + + if (E->hasExplicitTemplateArgs()) { + const ExplicitTemplateArgumentList &Args = E->getExplicitTemplateArgs(); + assert(Args.NumTemplateArgs && + "Num of template args was zero! PCH reading will mess up!"); + Record.push_back(Args.NumTemplateArgs); + AddExplicitTemplateArgumentList(Args); + } else { + Record.push_back(0); + } + + Writer.AddDeclarationName(E->getDeclName(), Record); + Writer.AddSourceLocation(E->getLocation(), Record); + Writer.AddSourceRange(E->getQualifierRange(), Record); + Writer.AddNestedNameSpecifier(E->getQualifier(), Record); + Code = pch::EXPR_CXX_DEPENDENT_SCOPE_DECL_REF; +} + +void +PCHStmtWriter::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) { + VisitExpr(E); + Record.push_back(E->arg_size()); + for (CXXUnresolvedConstructExpr::arg_iterator + ArgI = E->arg_begin(), ArgE = E->arg_end(); ArgI != ArgE; ++ArgI) + Writer.AddStmt(*ArgI); + Writer.AddSourceLocation(E->getTypeBeginLoc(), Record); + Writer.AddTypeRef(E->getTypeAsWritten(), Record); + Writer.AddSourceLocation(E->getLParenLoc(), Record); + Writer.AddSourceLocation(E->getRParenLoc(), Record); + Code = pch::EXPR_CXX_UNRESOLVED_CONSTRUCT; +} + +void PCHStmtWriter::VisitOverloadExpr(OverloadExpr *E) { + VisitExpr(E); + + // Don't emit anything here, NumTemplateArgs must be emitted first. + + if (E->hasExplicitTemplateArgs()) { + const ExplicitTemplateArgumentList &Args = E->getExplicitTemplateArgs(); + assert(Args.NumTemplateArgs && + "Num of template args was zero! PCH reading will mess up!"); + Record.push_back(Args.NumTemplateArgs); + AddExplicitTemplateArgumentList(Args); + } else { + Record.push_back(0); + } + + Record.push_back(E->getNumDecls()); + for (OverloadExpr::decls_iterator + OvI = E->decls_begin(), OvE = E->decls_end(); OvI != OvE; ++OvI) { + Writer.AddDeclRef(OvI.getDecl(), Record); + Record.push_back(OvI.getAccess()); + } + + Writer.AddDeclarationName(E->getName(), Record); + Writer.AddNestedNameSpecifier(E->getQualifier(), Record); + Writer.AddSourceRange(E->getQualifierRange(), Record); + Writer.AddSourceLocation(E->getNameLoc(), Record); +} + +void PCHStmtWriter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) { + VisitOverloadExpr(E); + Record.push_back(E->isArrow()); + Record.push_back(E->hasUnresolvedUsing()); + Writer.AddStmt(!E->isImplicitAccess() ? E->getBase() : 0); + Writer.AddTypeRef(E->getBaseType(), Record); + Writer.AddSourceLocation(E->getOperatorLoc(), Record); + Code = pch::EXPR_CXX_UNRESOLVED_MEMBER; +} + +void PCHStmtWriter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) { + VisitOverloadExpr(E); + Record.push_back(E->requiresADL()); + Record.push_back(E->isOverloaded()); + Writer.AddDeclRef(E->getNamingClass(), Record); + Code = pch::EXPR_CXX_UNRESOLVED_LOOKUP; +} + +void PCHStmtWriter::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) { + VisitExpr(E); + Record.push_back(E->getTrait()); + Writer.AddSourceRange(E->getSourceRange(), Record); + Writer.AddTypeRef(E->getQueriedType(), Record); + Code = pch::EXPR_CXX_UNARY_TYPE_TRAIT; +} //===----------------------------------------------------------------------===// // PCHWriter Implementation @@ -1044,21 +1289,38 @@ void PCHWriter::WriteSubStmt(Stmt *S) { RecordData Record; PCHStmtWriter Writer(*this, Record); ++NumStatements; - + if (!S) { Stream.EmitRecord(pch::STMT_NULL_PTR, Record); return; } + // Redirect PCHWriter::AddStmt to collect sub stmts. + llvm::SmallVector<Stmt *, 16> SubStmts; + CollectedStmts = &SubStmts; + Writer.Code = pch::STMT_NULL_PTR; Writer.Visit(S); #ifndef NDEBUG if (Writer.Code == pch::STMT_NULL_PTR) { - S->dump(); + SourceManager &SrcMgr + = DeclIDs.begin()->first->getASTContext().getSourceManager(); + S->dump(SrcMgr); assert(0 && "Unhandled sub statement writing PCH file"); } #endif + + // Revert PCHWriter::AddStmt. + CollectedStmts = &StmtsToEmit; + + // Write the sub stmts in reverse order, last to first. When reading them back + // we will read them in correct order by "pop"ing them from the Stmts stack. + // This simplifies reading and allows to store a variable number of sub stmts + // without knowing it in advance. + while (!SubStmts.empty()) + WriteSubStmt(SubStmts.pop_back_val()); + Stream.EmitRecord(Writer.Code, Record); } @@ -1066,34 +1328,16 @@ void PCHWriter::WriteSubStmt(Stmt *S) { /// queue via AddStmt(). void PCHWriter::FlushStmts() { RecordData Record; - PCHStmtWriter Writer(*this, Record); for (unsigned I = 0, N = StmtsToEmit.size(); I != N; ++I) { - ++NumStatements; - Stmt *S = StmtsToEmit[I]; - - if (!S) { - Stream.EmitRecord(pch::STMT_NULL_PTR, Record); - continue; - } - - Writer.Code = pch::STMT_NULL_PTR; - Writer.Visit(S); -#ifndef NDEBUG - if (Writer.Code == pch::STMT_NULL_PTR) { - S->dump(); - assert(0 && "Unhandled expression writing PCH file"); - } -#endif - Stream.EmitRecord(Writer.Code, Record); - + WriteSubStmt(StmtsToEmit[I]); + assert(N == StmtsToEmit.size() && "Substatement writen via AddStmt rather than WriteSubStmt!"); // Note that we are at the end of a full expression. Any // expression records that follow this one are part of a different // expression. - Record.clear(); Stream.EmitRecord(pch::STMT_STOP, Record); } diff --git a/contrib/llvm/tools/clang/lib/Frontend/PrintParserCallbacks.cpp b/contrib/llvm/tools/clang/lib/Frontend/PrintParserCallbacks.cpp index b032233..9220677 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/PrintParserCallbacks.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/PrintParserCallbacks.cpp @@ -819,7 +819,8 @@ namespace { SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, - bool ParenTypeId, Declarator &D, + SourceRange TypeIdParens, + Declarator &D, SourceLocation ConstructorLParen, MultiExprArg ConstructorArgs, SourceLocation ConstructorRParen) { diff --git a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp index b6c18b7..73bca9a 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp @@ -23,6 +23,7 @@ #include "clang/Lex/TokenConcatenation.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/StringRef.h" #include "llvm/Config/config.h" #include "llvm/Support/raw_ostream.h" #include <cstdio> @@ -117,7 +118,7 @@ public: virtual void Ident(SourceLocation Loc, const std::string &str); virtual void PragmaComment(SourceLocation Loc, const IdentifierInfo *Kind, const std::string &Str); - + virtual void PragmaMessage(SourceLocation Loc, llvm::StringRef Str); bool HandleFirstTokOnLine(Token &Tok); bool MoveToLine(SourceLocation Loc) { @@ -174,20 +175,6 @@ void PrintPPOutputPPCallbacks::WriteLineInfo(unsigned LineNo, /// #line directive. This returns false if already at the specified line, true /// if some newlines were emitted. bool PrintPPOutputPPCallbacks::MoveToLine(unsigned LineNo) { - if (DisableLineMarkers) { - if (LineNo == CurLine) return false; - - CurLine = LineNo; - - if (!EmittedTokensOnThisLine && !EmittedMacroOnThisLine) - return true; - - OS << '\n'; - EmittedTokensOnThisLine = false; - EmittedMacroOnThisLine = false; - return true; - } - // If this line is "close enough" to the original line, just print newlines, // otherwise print a #line directive. if (LineNo-CurLine <= 8) { @@ -199,8 +186,17 @@ bool PrintPPOutputPPCallbacks::MoveToLine(unsigned LineNo) { const char *NewLines = "\n\n\n\n\n\n\n\n"; OS.write(NewLines, LineNo-CurLine); } - } else { + } else if (!DisableLineMarkers) { + // Emit a #line or line marker. WriteLineInfo(LineNo, 0, 0); + } else { + // Okay, we're in -P mode, which turns off line markers. However, we still + // need to emit a newline between tokens on different lines. + if (EmittedTokensOnThisLine || EmittedMacroOnThisLine) { + OS << '\n'; + EmittedTokensOnThisLine = false; + EmittedMacroOnThisLine = false; + } } CurLine = LineNo; @@ -311,6 +307,29 @@ void PrintPPOutputPPCallbacks::PragmaComment(SourceLocation Loc, EmittedTokensOnThisLine = true; } +void PrintPPOutputPPCallbacks::PragmaMessage(SourceLocation Loc, + llvm::StringRef Str) { + MoveToLine(Loc); + OS << "#pragma message("; + + OS << '"'; + + for (unsigned i = 0, e = Str.size(); i != e; ++i) { + unsigned char Char = Str[i]; + if (isprint(Char) && Char != '\\' && Char != '"') + OS << (char)Char; + else // Output anything hard as an octal escape. + OS << '\\' + << (char)('0'+ ((Char >> 6) & 7)) + << (char)('0'+ ((Char >> 3) & 7)) + << (char)('0'+ ((Char >> 0) & 7)); + } + OS << '"'; + + OS << ')'; + EmittedTokensOnThisLine = true; +} + /// HandleFirstTokOnLine - When emitting a preprocessed file in -E mode, this /// is called for the first token on each new line. If this really is the start @@ -372,7 +391,7 @@ struct UnknownPragmaHandler : public PragmaHandler { PrintPPOutputPPCallbacks *Callbacks; UnknownPragmaHandler(const char *prefix, PrintPPOutputPPCallbacks *callbacks) - : PragmaHandler(0), Prefix(prefix), Callbacks(callbacks) {} + : Prefix(prefix), Callbacks(callbacks) {} virtual void HandlePragma(Preprocessor &PP, Token &PragmaTok) { // Figure out what line we went to and insert the appropriate number of // newline characters. @@ -397,8 +416,9 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok, PrintPPOutputPPCallbacks *Callbacks, llvm::raw_ostream &OS) { char Buffer[256]; - Token PrevPrevTok; - Token PrevTok; + Token PrevPrevTok, PrevTok; + PrevPrevTok.startToken(); + PrevTok.startToken(); while (1) { // If this token is at the start of a line, emit newlines if needed. @@ -454,6 +474,9 @@ static int MacroIDCompare(const void* a, const void* b) { } static void DoPrintMacros(Preprocessor &PP, llvm::raw_ostream *OS) { + // Ignore unknown pragmas. + PP.AddPragmaHandler(new EmptyPragmaHandler()); + // -dM mode just scans and ignores all tokens in the files, then dumps out // the macro table at the end. PP.EnterMainSourceFile(); @@ -494,7 +517,7 @@ void clang::DoPrintPreprocessedInput(Preprocessor &PP, llvm::raw_ostream *OS, PrintPPOutputPPCallbacks *Callbacks = new PrintPPOutputPPCallbacks(PP, *OS, !Opts.ShowLineMarkers, Opts.ShowMacros); - PP.AddPragmaHandler(0, new UnknownPragmaHandler("#pragma", Callbacks)); + PP.AddPragmaHandler(new UnknownPragmaHandler("#pragma", Callbacks)); PP.AddPragmaHandler("GCC", new UnknownPragmaHandler("#pragma GCC", Callbacks)); diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp index 6ccf4f1..1b5b7e2 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp @@ -70,7 +70,7 @@ PrintIncludeStack(SourceLocation Loc, const SourceManager &SM) { /// HighlightRange - Given a SourceRange and a line number, highlight (with ~'s) /// any characters in LineNo that intersect the SourceRange. -void TextDiagnosticPrinter::HighlightRange(const SourceRange &R, +void TextDiagnosticPrinter::HighlightRange(const CharSourceRange &R, const SourceManager &SM, unsigned LineNo, FileID FID, std::string &CaretLine, @@ -112,8 +112,10 @@ void TextDiagnosticPrinter::HighlightRange(const SourceRange &R, if (EndColNo) { --EndColNo; // Zero base the col #. - // Add in the length of the token, so that we cover multi-char tokens. - EndColNo += Lexer::MeasureTokenLength(End, SM, *LangOpts); + // Add in the length of the token, so that we cover multi-char tokens if + // this is a token range. + if (R.isTokenRange()) + EndColNo += Lexer::MeasureTokenLength(End, SM, *LangOpts); } else { EndColNo = CaretLine.size(); } @@ -121,21 +123,24 @@ void TextDiagnosticPrinter::HighlightRange(const SourceRange &R, assert(StartColNo <= EndColNo && "Invalid range!"); - // Pick the first non-whitespace column. - while (StartColNo < SourceLine.size() && - (SourceLine[StartColNo] == ' ' || SourceLine[StartColNo] == '\t')) - ++StartColNo; - - // Pick the last non-whitespace column. - if (EndColNo > SourceLine.size()) - EndColNo = SourceLine.size(); - while (EndColNo-1 && - (SourceLine[EndColNo-1] == ' ' || SourceLine[EndColNo-1] == '\t')) - --EndColNo; - - // If the start/end passed each other, then we are trying to highlight a range - // that just exists in whitespace, which must be some sort of other bug. - assert(StartColNo <= EndColNo && "Trying to highlight whitespace??"); + // Check that a token range does not highlight only whitespace. + if (R.isTokenRange()) { + // Pick the first non-whitespace column. + while (StartColNo < SourceLine.size() && + (SourceLine[StartColNo] == ' ' || SourceLine[StartColNo] == '\t')) + ++StartColNo; + + // Pick the last non-whitespace column. + if (EndColNo > SourceLine.size()) + EndColNo = SourceLine.size(); + while (EndColNo-1 && + (SourceLine[EndColNo-1] == ' ' || SourceLine[EndColNo-1] == '\t')) + --EndColNo; + + // If the start/end passed each other, then we are trying to highlight a range + // that just exists in whitespace, which must be some sort of other bug. + assert(StartColNo <= EndColNo && "Trying to highlight whitespace??"); + } // Fill the range with ~'s. for (unsigned i = StartColNo; i < EndColNo; ++i) @@ -281,7 +286,7 @@ static void SelectInterestingSourceRegion(std::string &SourceLine, } void TextDiagnosticPrinter::EmitCaretDiagnostic(SourceLocation Loc, - SourceRange *Ranges, + CharSourceRange *Ranges, unsigned NumRanges, const SourceManager &SM, const FixItHint *Hints, @@ -312,10 +317,12 @@ void TextDiagnosticPrinter::EmitCaretDiagnostic(SourceLocation Loc, // Map the ranges. for (unsigned i = 0; i != NumRanges; ++i) { - SourceLocation S = Ranges[i].getBegin(), E = Ranges[i].getEnd(); - if (S.isMacroID()) S = SM.getImmediateSpellingLoc(S); - if (E.isMacroID()) E = SM.getImmediateSpellingLoc(E); - Ranges[i] = SourceRange(S, E); + CharSourceRange &R = Ranges[i]; + SourceLocation S = R.getBegin(), E = R.getEnd(); + if (S.isMacroID()) + R.setBegin(SM.getImmediateSpellingLoc(S)); + if (E.isMacroID()) + R.setEnd(SM.getImmediateSpellingLoc(E)); } if (!Suppressed) { @@ -777,7 +784,9 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level, continue; // Add in the length of the token, so that we cover multi-char tokens. - unsigned TokSize = Lexer::MeasureTokenLength(E, SM, *LangOpts); + unsigned TokSize = 0; + if (Info.getRange(i).isTokenRange()) + TokSize = Lexer::MeasureTokenLength(E, SM, *LangOpts); OS << '{' << SM.getLineNumber(BInfo.first, BInfo.second) << ':' << SM.getColumnNumber(BInfo.first, BInfo.second) << '-' @@ -904,15 +913,15 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level, LastCaretDiagnosticWasNote = (Level == Diagnostic::Note); // Get the ranges into a local array we can hack on. - SourceRange Ranges[20]; + CharSourceRange Ranges[20]; unsigned NumRanges = Info.getNumRanges(); assert(NumRanges < 20 && "Out of space"); for (unsigned i = 0; i != NumRanges; ++i) Ranges[i] = Info.getRange(i); unsigned NumHints = Info.getNumFixItHints(); - for (unsigned idx = 0; idx < NumHints; ++idx) { - const FixItHint &Hint = Info.getFixItHint(idx); + for (unsigned i = 0; i != NumHints; ++i) { + const FixItHint &Hint = Info.getFixItHint(i); if (Hint.RemoveRange.isValid()) { assert(NumRanges < 20 && "Out of space"); Ranges[NumRanges++] = Hint.RemoveRange; diff --git a/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp b/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp index 84c4f5d..8cc5616 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp @@ -35,6 +35,8 @@ void clang::ProcessWarningOptions(Diagnostic &Diags, const DiagnosticOptions &Opts) { Diags.setSuppressSystemWarnings(true); // Default to -Wno-system-headers Diags.setIgnoreAllWarnings(Opts.IgnoreWarnings); + Diags.setShowOverloads( + static_cast<Diagnostic::OverloadsShown>(Opts.ShowOverloads)); // Handle -ferror-limit if (Opts.ErrorLimit) diff --git a/contrib/llvm/tools/clang/lib/Headers/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Headers/CMakeLists.txt index 047fdb3..97a99d6 100644 --- a/contrib/llvm/tools/clang/lib/Headers/CMakeLists.txt +++ b/contrib/llvm/tools/clang/lib/Headers/CMakeLists.txt @@ -1,6 +1,5 @@ set(files altivec.h - arm_neon.h emmintrin.h float.h iso646.h @@ -22,6 +21,14 @@ else () set(output_dir ${LLVM_BINARY_DIR}/lib/clang/${CLANG_VERSION}/include) endif () +# Generate arm_neon.h +set(LLVM_TARGET_DEFINITIONS ${CLANG_SOURCE_DIR}/include/clang/Basic/arm_neon.td) +tablegen(arm_neon.h.inc -gen-arm-neon) + +add_custom_command(OUTPUT ${output_dir}/arm_neon.h + DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h.inc + COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/arm_neon.h.inc ${output_dir}/arm_neon.h + COMMENT "Copying clang's arm_neon.h...") foreach( f ${files} ) set( src ${CMAKE_CURRENT_SOURCE_DIR}/${f} ) @@ -33,8 +40,8 @@ foreach( f ${files} ) endforeach( f ) add_custom_target(clang-headers ALL - DEPENDS ${files}) + DEPENDS ${files} ${output_dir}/arm_neon.h) -install(FILES ${files} +install(FILES ${files} ${output_dir}/arm_neon.h PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ DESTINATION lib${LLVM_LIBDIR_SUFFIX}/clang/${CLANG_VERSION}/include) diff --git a/contrib/llvm/tools/clang/lib/Headers/Makefile b/contrib/llvm/tools/clang/lib/Headers/Makefile index cb36e84..ebb8384 100644 --- a/contrib/llvm/tools/clang/lib/Headers/Makefile +++ b/contrib/llvm/tools/clang/lib/Headers/Makefile @@ -7,10 +7,15 @@ # ##===----------------------------------------------------------------------===## -LEVEL = ../../../.. -include $(LEVEL)/Makefile.common +CLANG_LEVEL := ../.. -CLANG_VERSION := $(shell cat $(PROJ_SRC_DIR)/../../VER) +BUILT_SOURCES = arm_neon.h.inc +TABLEGEN_INC_FILES_COMMON = 1 + +include $(CLANG_LEVEL)/Makefile + +CLANG_VERSION := $(word 3,$(shell grep "CLANG_VERSION " \ + $(PROJ_OBJ_DIR)/$(CLANG_LEVEL)/include/clang/Basic/Version.inc)) HeaderDir := $(PROJ_OBJ_ROOT)/$(BuildMode)/lib/clang/$(CLANG_VERSION)/include @@ -19,7 +24,11 @@ HEADERS := $(notdir $(wildcard $(PROJ_SRC_DIR)/*.h)) OBJHEADERS := $(addprefix $(HeaderDir)/, $(HEADERS)) -$(OBJHEADERS): $(HeaderDir)/%.h: $(PROJ_SRC_DIR)/%.h $(HeaderDir)/.dir +$(OBJHEADERS): $(HeaderDir)/%.h: $(PROJ_SRC_DIR)/%.h $(HeaderDir)/.dir $(HeaderDir)/arm_neon.h + $(Verb) cp $< $@ + $(Echo) Copying $(notdir $<) to build dir + +$(HeaderDir)/arm_neon.h: $(BUILT_SOURCES) $(HeaderDir)/.dir $(Verb) cp $< $@ $(Echo) Copying $(notdir $<) to build dir @@ -38,3 +47,7 @@ $(INSTHEADERS): $(PROJ_headers)/%.h: $(HeaderDir)/%.h | $(PROJ_headers) $(Echo) Installing compiler include file: $(notdir $<) install-local:: $(INSTHEADERS) + +$(ObjDir)/arm_neon.h.inc.tmp : $(CLANG_LEVEL)/include/clang/Basic/arm_neon.td $(TBLGEN) $(ObjDir)/.dir + $(Echo) "Building Clang arm_neon.h.inc with tblgen" + $(Verb) $(TableGen) -gen-arm-neon -o $(call SYSPATH, $@) $< diff --git a/contrib/llvm/tools/clang/lib/Headers/altivec.h b/contrib/llvm/tools/clang/lib/Headers/altivec.h index 1cd0db8..d3d5ad9 100644 --- a/contrib/llvm/tools/clang/lib/Headers/altivec.h +++ b/contrib/llvm/tools/clang/lib/Headers/altivec.h @@ -20,6 +20,9 @@ * \*===----------------------------------------------------------------------===*/ +// TODO: add functions for 'vector bool ..' and 'vector pixel' argument types according to +// the 'AltiVec Technology Programming Interface Manual' + #ifndef __ALTIVEC_H #define __ALTIVEC_H @@ -34,534 +37,629 @@ #define __CR6_LT 2 #define __CR6_LT_REV 3 -#define _ATTRS_o_ai __attribute__((__overloadable__, __always_inline__)) +#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__)) + +static vector signed char __ATTRS_o_ai +vec_perm(vector signed char a, vector signed char b, vector unsigned char c); + +static vector unsigned char __ATTRS_o_ai +vec_perm(vector unsigned char a, vector unsigned char b, vector unsigned char c); + +static vector short __ATTRS_o_ai +vec_perm(vector short a, vector short b, vector unsigned char c); + +static vector unsigned short __ATTRS_o_ai +vec_perm(vector unsigned short a, vector unsigned short b, vector unsigned char c); + +static vector int __ATTRS_o_ai +vec_perm(vector int a, vector int b, vector unsigned char c); + +static vector unsigned int __ATTRS_o_ai +vec_perm(vector unsigned int a, vector unsigned int b, vector unsigned char c); + +static vector float __ATTRS_o_ai +vec_perm(vector float a, vector float b, vector unsigned char c); /* vec_abs */ -#define __builtin_vec_abs vec_abs #define __builtin_altivec_abs_v16qi vec_abs #define __builtin_altivec_abs_v8hi vec_abs #define __builtin_altivec_abs_v4si vec_abs -static vector signed char _ATTRS_o_ai +static vector signed char __ATTRS_o_ai vec_abs(vector signed char a) { return __builtin_altivec_vmaxsb(a, -a); } -static vector signed short _ATTRS_o_ai +static vector signed short __ATTRS_o_ai vec_abs(vector signed short a) { return __builtin_altivec_vmaxsh(a, -a); } -static vector signed int _ATTRS_o_ai +static vector signed int __ATTRS_o_ai vec_abs(vector signed int a) { return __builtin_altivec_vmaxsw(a, -a); } -static vector float _ATTRS_o_ai +static vector float __ATTRS_o_ai vec_abs(vector float a) { - vector unsigned int res = (vector unsigned int)a & - (vector unsigned int)(0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF); + vector unsigned int res = (vector unsigned int)a & (vector unsigned int)(0x7FFFFFFF); return (vector float)res; } /* vec_abss */ -#define __builtin_vec_abss vec_abss #define __builtin_altivec_abss_v16qi vec_abss #define __builtin_altivec_abss_v8hi vec_abss #define __builtin_altivec_abss_v4si vec_abss -static vector signed char _ATTRS_o_ai +static vector signed char __ATTRS_o_ai vec_abss(vector signed char a) { - return __builtin_altivec_vmaxsb(a, __builtin_altivec_vsubsbs( - (vector signed char)(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), a)); + return __builtin_altivec_vmaxsb(a, __builtin_altivec_vsubsbs((vector signed char)(0), a)); } -static vector signed short _ATTRS_o_ai +static vector signed short __ATTRS_o_ai vec_abss(vector signed short a) { - return __builtin_altivec_vmaxsh(a, __builtin_altivec_vsubshs( - (vector signed short)(0, 0, 0, 0, 0, 0, 0, 0), a)); + return __builtin_altivec_vmaxsh(a, __builtin_altivec_vsubshs((vector signed short)(0), a)); } -static vector signed int _ATTRS_o_ai +static vector signed int __ATTRS_o_ai vec_abss(vector signed int a) { - return __builtin_altivec_vmaxsw(a, __builtin_altivec_vsubsws( - (vector signed int)(0, 0, 0, 0), a)); + return __builtin_altivec_vmaxsw(a, __builtin_altivec_vsubsws((vector signed int)(0), a)); } /* vec_add */ -#define __builtin_altivec_vaddubm vec_add -#define __builtin_altivec_vadduhm vec_add -#define __builtin_altivec_vadduwm vec_add -#define __builtin_altivec_vaddfp vec_add -#define __builtin_vec_vaddubm vec_add -#define __builtin_vec_vadduhm vec_add -#define __builtin_vec_vadduwm vec_add -#define __builtin_vec_vaddfp vec_add -#define vec_vaddubm vec_add -#define vec_vadduhm vec_add -#define vec_vadduwm vec_add -#define vec_vaddfp vec_add - -static vector signed char _ATTRS_o_ai +static vector signed char __ATTRS_o_ai vec_add(vector signed char a, vector signed char b) { return a + b; } -static vector unsigned char _ATTRS_o_ai +static vector unsigned char __ATTRS_o_ai vec_add(vector unsigned char a, vector unsigned char b) { return a + b; } -static vector short _ATTRS_o_ai +static vector short __ATTRS_o_ai vec_add(vector short a, vector short b) { return a + b; } -static vector unsigned short _ATTRS_o_ai +static vector unsigned short __ATTRS_o_ai vec_add(vector unsigned short a, vector unsigned short b) { return a + b; } -static vector int _ATTRS_o_ai +static vector int __ATTRS_o_ai vec_add(vector int a, vector int b) { return a + b; } -static vector unsigned int _ATTRS_o_ai +static vector unsigned int __ATTRS_o_ai vec_add(vector unsigned int a, vector unsigned int b) { return a + b; } -static vector float _ATTRS_o_ai +static vector float __ATTRS_o_ai vec_add(vector float a, vector float b) { return a + b; } +/* vec_vaddubm */ + +#define __builtin_altivec_vaddubm vec_vaddubm + +static vector signed char __ATTRS_o_ai +vec_vaddubm(vector signed char a, vector signed char b) +{ + return a + b; +} + +static vector unsigned char __ATTRS_o_ai +vec_vaddubm(vector unsigned char a, vector unsigned char b) +{ + return a + b; +} + +/* vec_vadduhm */ + +#define __builtin_altivec_vadduhm vec_vadduhm + +static vector short __ATTRS_o_ai +vec_vadduhm(vector short a, vector short b) +{ + return a + b; +} + +static vector unsigned short __ATTRS_o_ai +vec_vadduhm(vector unsigned short a, vector unsigned short b) +{ + return a + b; +} + +/* vec_vadduwm */ + +#define __builtin_altivec_vadduwm vec_vadduwm + +static vector int __ATTRS_o_ai +vec_vadduwm(vector int a, vector int b) +{ + return a + b; +} + +static vector unsigned int __ATTRS_o_ai +vec_vadduwm(vector unsigned int a, vector unsigned int b) +{ + return a + b; +} + +/* vec_vaddfp */ + +#define __builtin_altivec_vaddfp vec_vaddfp + +static vector float __attribute__((__always_inline__)) +vec_vaddfp(vector float a, vector float b) +{ + return a + b; +} + /* vec_addc */ -#define __builtin_vec_addc __builtin_altivec_vaddcuw -#define vec_vaddcuw __builtin_altivec_vaddcuw -#define vec_addc __builtin_altivec_vaddcuw +static vector unsigned int __attribute__((__always_inline__)) +vec_addc(vector unsigned int a, vector unsigned int b) +{ + return __builtin_altivec_vaddcuw(a, b); +} + +/* vec_vaddcuw */ + +static vector unsigned int __attribute__((__always_inline__)) +vec_vaddcuw(vector unsigned int a, vector unsigned int b) +{ + return __builtin_altivec_vaddcuw(a, b); +} /* vec_adds */ -#define __builtin_vec_vaddsbs __builtin_altivec_vaddsbs -#define __builtin_vec_vaddubs __builtin_altivec_vaddubs -#define __builtin_vec_vaddshs __builtin_altivec_vaddshs -#define __builtin_vec_vadduhs __builtin_altivec_vadduhs -#define __builtin_vec_vaddsws __builtin_altivec_vaddsws -#define __builtin_vec_vadduws __builtin_altivec_vadduws -#define vec_vaddsbs __builtin_altivec_vaddsbs -#define vec_vaddubs __builtin_altivec_vaddubs -#define vec_vaddshs __builtin_altivec_vaddshs -#define vec_vadduhs __builtin_altivec_vadduhs -#define vec_vaddsws __builtin_altivec_vaddsws -#define vec_vadduws __builtin_altivec_vadduws - -static vector signed char _ATTRS_o_ai +static vector signed char __ATTRS_o_ai vec_adds(vector signed char a, vector signed char b) { return __builtin_altivec_vaddsbs(a, b); } -static vector unsigned char _ATTRS_o_ai +static vector unsigned char __ATTRS_o_ai vec_adds(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vaddubs(a, b); } -static vector short _ATTRS_o_ai +static vector short __ATTRS_o_ai vec_adds(vector short a, vector short b) { return __builtin_altivec_vaddshs(a, b); } -static vector unsigned short _ATTRS_o_ai +static vector unsigned short __ATTRS_o_ai vec_adds(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vadduhs(a, b); } -static vector int _ATTRS_o_ai +static vector int __ATTRS_o_ai vec_adds(vector int a, vector int b) { return __builtin_altivec_vaddsws(a, b); } -static vector unsigned int _ATTRS_o_ai +static vector unsigned int __ATTRS_o_ai vec_adds(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vadduws(a, b); } -/* vec_sub */ +/* vec_vaddsbs */ -#define __builtin_altivec_vsububm vec_sub -#define __builtin_altivec_vsubuhm vec_sub -#define __builtin_altivec_vsubuwm vec_sub -#define __builtin_altivec_vsubfp vec_sub -#define __builtin_vec_vsububm vec_sub -#define __builtin_vec_vsubuhm vec_sub -#define __builtin_vec_vsubuwm vec_sub -#define __builtin_vec_vsubfp vec_sub -#define vec_vsububm vec_sub -#define vec_vsubuhm vec_sub -#define vec_vsubuwm vec_sub -#define vec_vsubfp vec_sub - -static vector signed char _ATTRS_o_ai -vec_sub(vector signed char a, vector signed char b) +static vector signed char __attribute__((__always_inline__)) +vec_vaddsbs(vector signed char a, vector signed char b) { - return a - b; + return __builtin_altivec_vaddsbs(a, b); } -static vector unsigned char _ATTRS_o_ai -vec_sub(vector unsigned char a, vector unsigned char b) +/* vec_vaddubs */ + +static vector unsigned char __attribute__((__always_inline__)) +vec_vaddubs(vector unsigned char a, vector unsigned char b) { - return a - b; + return __builtin_altivec_vaddubs(a, b); } -static vector short _ATTRS_o_ai -vec_sub(vector short a, vector short b) +/* vec_vaddshs */ + +static vector short __attribute__((__always_inline__)) +vec_vaddshs(vector short a, vector short b) { - return a - b; + return __builtin_altivec_vaddshs(a, b); } -static vector unsigned short _ATTRS_o_ai -vec_sub(vector unsigned short a, vector unsigned short b) +/* vec_vadduhs */ + +static vector unsigned short __attribute__((__always_inline__)) +vec_vadduhs(vector unsigned short a, vector unsigned short b) { - return a - b; + return __builtin_altivec_vadduhs(a, b); } -static vector int _ATTRS_o_ai -vec_sub(vector int a, vector int b) +/* vec_vaddsws */ + +static vector int __attribute__((__always_inline__)) +vec_vaddsws(vector int a, vector int b) { - return a - b; + return __builtin_altivec_vaddsws(a, b); } -static vector unsigned int _ATTRS_o_ai -vec_sub(vector unsigned int a, vector unsigned int b) +/* vec_vadduws */ + +static vector unsigned int __attribute__((__always_inline__)) +vec_vadduws(vector unsigned int a, vector unsigned int b) { - return a - b; + return __builtin_altivec_vadduws(a, b); } -static vector float _ATTRS_o_ai -vec_sub(vector float a, vector float b) +/* vec_and */ + +#define __builtin_altivec_vand vec_and + +static vector signed char __ATTRS_o_ai +vec_and(vector signed char a, vector signed char b) { - return a - b; + return a & b; } -/* vec_subs */ +static vector unsigned char __ATTRS_o_ai +vec_and(vector unsigned char a, vector unsigned char b) +{ + return a & b; +} -#define __builtin_vec_vsubsbs __builtin_altivec_vsubsbs -#define __builtin_vec_vsububs __builtin_altivec_vsububs -#define __builtin_vec_vsubshs __builtin_altivec_vsubshs -#define __builtin_vec_vsubuhs __builtin_altivec_vsubuhs -#define __builtin_vec_vsubsws __builtin_altivec_vsubsws -#define __builtin_vec_vsubuws __builtin_altivec_vsubuws -#define vec_vsubsbs __builtin_altivec_vsubsbs -#define vec_vsububs __builtin_altivec_vsububs -#define vec_vsubshs __builtin_altivec_vsubshs -#define vec_vsubuhs __builtin_altivec_vsubuhs -#define vec_vsubsws __builtin_altivec_vsubsws -#define vec_vsubuws __builtin_altivec_vsubuws - -static vector signed char _ATTRS_o_ai -vec_subs(vector signed char a, vector signed char b) +static vector short __ATTRS_o_ai +vec_and(vector short a, vector short b) { - return __builtin_altivec_vsubsbs(a, b); + return a & b; } -static vector unsigned char _ATTRS_o_ai -vec_subs(vector unsigned char a, vector unsigned char b) +static vector unsigned short __ATTRS_o_ai +vec_and(vector unsigned short a, vector unsigned short b) { - return __builtin_altivec_vsububs(a, b); + return a & b; } -static vector short _ATTRS_o_ai -vec_subs(vector short a, vector short b) +static vector int __ATTRS_o_ai +vec_and(vector int a, vector int b) { - return __builtin_altivec_vsubshs(a, b); + return a & b; } -static vector unsigned short _ATTRS_o_ai -vec_subs(vector unsigned short a, vector unsigned short b) +static vector unsigned int __ATTRS_o_ai +vec_and(vector unsigned int a, vector unsigned int b) { - return __builtin_altivec_vsubuhs(a, b); + return a & b; } -static vector int _ATTRS_o_ai -vec_subs(vector int a, vector int b) +static vector float __ATTRS_o_ai +vec_and(vector float a, vector float b) { - return __builtin_altivec_vsubsws(a, b); + vector unsigned int res = (vector unsigned int)a & (vector unsigned int)b; + return (vector float)res; } -static vector unsigned int _ATTRS_o_ai -vec_subs(vector unsigned int a, vector unsigned int b) +/* vec_vand */ + +static vector signed char __ATTRS_o_ai +vec_vand(vector signed char a, vector signed char b) { - return __builtin_altivec_vsubuws(a, b); + return a & b; } -/* vec_avg */ +static vector unsigned char __ATTRS_o_ai +vec_vand(vector unsigned char a, vector unsigned char b) +{ + return a & b; +} -#define __builtin_vec_vavgsb __builtin_altivec_vavgsb -#define __builtin_vec_vavgub __builtin_altivec_vavgub -#define __builtin_vec_vavgsh __builtin_altivec_vavgsh -#define __builtin_vec_vavguh __builtin_altivec_vavguh -#define __builtin_vec_vavgsw __builtin_altivec_vavgsw -#define __builtin_vec_vavguw __builtin_altivec_vavguw -#define vec_vavgsb __builtin_altivec_vavgsb -#define vec_vavgub __builtin_altivec_vavgub -#define vec_vavgsh __builtin_altivec_vavgsh -#define vec_vavguh __builtin_altivec_vavguh -#define vec_vavgsw __builtin_altivec_vavgsw -#define vec_vavguw __builtin_altivec_vavguw - -static vector signed char _ATTRS_o_ai -vec_avg(vector signed char a, vector signed char b) +static vector short __ATTRS_o_ai +vec_vand(vector short a, vector short b) { - return __builtin_altivec_vavgsb(a, b); + return a & b; } -static vector unsigned char _ATTRS_o_ai -vec_avg(vector unsigned char a, vector unsigned char b) +static vector unsigned short __ATTRS_o_ai +vec_vand(vector unsigned short a, vector unsigned short b) { - return __builtin_altivec_vavgub(a, b); + return a & b; } -static vector short _ATTRS_o_ai -vec_avg(vector short a, vector short b) +static vector int __ATTRS_o_ai +vec_vand(vector int a, vector int b) { - return __builtin_altivec_vavgsh(a, b); + return a & b; } -static vector unsigned short _ATTRS_o_ai -vec_avg(vector unsigned short a, vector unsigned short b) +static vector unsigned int __ATTRS_o_ai +vec_vand(vector unsigned int a, vector unsigned int b) { - return __builtin_altivec_vavguh(a, b); + return a & b; } -static vector int _ATTRS_o_ai -vec_avg(vector int a, vector int b) +static vector float __ATTRS_o_ai +vec_vand(vector float a, vector float b) { - return __builtin_altivec_vavgsw(a, b); + vector unsigned int res = (vector unsigned int)a & (vector unsigned int)b; + return (vector float)res; } -static vector unsigned int _ATTRS_o_ai -vec_avg(vector unsigned int a, vector unsigned int b) +/* vec_andc */ + +#define __builtin_altivec_vandc vec_andc + +static vector signed char __ATTRS_o_ai +vec_andc(vector signed char a, vector signed char b) { - return __builtin_altivec_vavguw(a, b); + return a & ~b; } -/* vec_st */ +static vector unsigned char __ATTRS_o_ai +vec_andc(vector unsigned char a, vector unsigned char b) +{ + return a & ~b; +} -#define __builtin_vec_st vec_st -#define vec_stvx vec_st +static vector short __ATTRS_o_ai +vec_andc(vector short a, vector short b) +{ + return a & ~b; +} -static void _ATTRS_o_ai -vec_st(vector signed char a, int b, vector signed char *c) +static vector unsigned short __ATTRS_o_ai +vec_andc(vector unsigned short a, vector unsigned short b) { - __builtin_altivec_stvx((vector int)a, b, (void *)c); + return a & ~b; } -static void _ATTRS_o_ai -vec_st(vector unsigned char a, int b, vector unsigned char *c) +static vector int __ATTRS_o_ai +vec_andc(vector int a, vector int b) { - __builtin_altivec_stvx((vector int)a, b, (void *)c); + return a & ~b; } -static void _ATTRS_o_ai -vec_st(vector short a, int b, vector short *c) +static vector unsigned int __ATTRS_o_ai +vec_andc(vector unsigned int a, vector unsigned int b) { - __builtin_altivec_stvx((vector int)a, b, (void *)c); + return a & ~b; } -static void _ATTRS_o_ai -vec_st(vector unsigned short a, int b, vector unsigned short *c) +static vector float __ATTRS_o_ai +vec_andc(vector float a, vector float b) { - __builtin_altivec_stvx((vector int)a, b, (void *)c); + vector unsigned int res = (vector unsigned int)a & ~(vector unsigned int)b; + return (vector float)res; } -static void _ATTRS_o_ai -vec_st(vector int a, int b, vector int *c) +/* vec_vandc */ + +static vector signed char __ATTRS_o_ai +vec_vandc(vector signed char a, vector signed char b) { - __builtin_altivec_stvx(a, b, (void *)c); + return a & ~b; } -static void _ATTRS_o_ai -vec_st(vector unsigned int a, int b, vector unsigned int *c) +static vector unsigned char __ATTRS_o_ai +vec_vandc(vector unsigned char a, vector unsigned char b) { - __builtin_altivec_stvx((vector int)a, b, (void *)c); + return a & ~b; } -static void _ATTRS_o_ai -vec_st(vector float a, int b, vector float *c) +static vector short __ATTRS_o_ai +vec_vandc(vector short a, vector short b) { - __builtin_altivec_stvx((vector int)a, b, (void *)c); + return a & ~b; } -/* vec_stl */ +static vector unsigned short __ATTRS_o_ai +vec_vandc(vector unsigned short a, vector unsigned short b) +{ + return a & ~b; +} -#define __builtin_vec_stl vec_stl -#define vec_stvxl vec_stl +static vector int __ATTRS_o_ai +vec_vandc(vector int a, vector int b) +{ + return a & ~b; +} -static void _ATTRS_o_ai -vec_stl(vector signed char a, int b, vector signed char *c) +static vector unsigned int __ATTRS_o_ai +vec_vandc(vector unsigned int a, vector unsigned int b) { - __builtin_altivec_stvxl((vector int)a, b, (void *)c); + return a & ~b; } -static void _ATTRS_o_ai -vec_stl(vector unsigned char a, int b, vector unsigned char *c) +static vector float __ATTRS_o_ai +vec_vandc(vector float a, vector float b) { - __builtin_altivec_stvxl((vector int)a, b, (void *)c); + vector unsigned int res = (vector unsigned int)a & ~(vector unsigned int)b; + return (vector float)res; } -static void _ATTRS_o_ai -vec_stl(vector short a, int b, vector short *c) +/* vec_avg */ + +static vector signed char __ATTRS_o_ai +vec_avg(vector signed char a, vector signed char b) { - __builtin_altivec_stvxl((vector int)a, b, (void *)c); + return __builtin_altivec_vavgsb(a, b); } -static void _ATTRS_o_ai -vec_stl(vector unsigned short a, int b, vector unsigned short *c) +static vector unsigned char __ATTRS_o_ai +vec_avg(vector unsigned char a, vector unsigned char b) { - __builtin_altivec_stvxl((vector int)a, b, (void *)c); + return __builtin_altivec_vavgub(a, b); } -static void _ATTRS_o_ai -vec_stl(vector int a, int b, vector int *c) +static vector short __ATTRS_o_ai +vec_avg(vector short a, vector short b) { - __builtin_altivec_stvxl(a, b, (void *)c); + return __builtin_altivec_vavgsh(a, b); } -static void _ATTRS_o_ai -vec_stl(vector unsigned int a, int b, vector unsigned int *c) +static vector unsigned short __ATTRS_o_ai +vec_avg(vector unsigned short a, vector unsigned short b) { - __builtin_altivec_stvxl((vector int)a, b, (void *)c); + return __builtin_altivec_vavguh(a, b); } -static void _ATTRS_o_ai -vec_stl(vector float a, int b, vector float *c) +static vector int __ATTRS_o_ai +vec_avg(vector int a, vector int b) { - __builtin_altivec_stvxl((vector int)a, b, (void *)c); + return __builtin_altivec_vavgsw(a, b); } -/* vec_ste */ +static vector unsigned int __ATTRS_o_ai +vec_avg(vector unsigned int a, vector unsigned int b) +{ + return __builtin_altivec_vavguw(a, b); +} -#define __builtin_vec_stvebx __builtin_altivec_stvebx -#define __builtin_vec_stvehx __builtin_altivec_stvehx -#define __builtin_vec_stvewx __builtin_altivec_stvewx -#define vec_stvebx __builtin_altivec_stvebx -#define vec_stvehx __builtin_altivec_stvehx -#define vec_stvewx __builtin_altivec_stvewx +/* vec_vavgsb */ -static void _ATTRS_o_ai -vec_ste(vector signed char a, int b, vector signed char *c) +static vector signed char __attribute__((__always_inline__)) +vec_vavgsb(vector signed char a, vector signed char b) { - __builtin_altivec_stvebx((vector char)a, b, (void *)c); + return __builtin_altivec_vavgsb(a, b); } -static void _ATTRS_o_ai -vec_ste(vector unsigned char a, int b, vector unsigned char *c) +/* vec_vavgub */ + +static vector unsigned char __attribute__((__always_inline__)) +vec_vavgub(vector unsigned char a, vector unsigned char b) { - __builtin_altivec_stvebx((vector char)a, b, (void *)c); + return __builtin_altivec_vavgub(a, b); } -static void _ATTRS_o_ai -vec_ste(vector short a, int b, vector short *c) +/* vec_vavgsh */ + +static vector short __attribute__((__always_inline__)) +vec_vavgsh(vector short a, vector short b) { - __builtin_altivec_stvehx(a, b, (void *)c); + return __builtin_altivec_vavgsh(a, b); } -static void _ATTRS_o_ai -vec_ste(vector unsigned short a, int b, vector unsigned short *c) +/* vec_vavguh */ + +static vector unsigned short __attribute__((__always_inline__)) +vec_vavguh(vector unsigned short a, vector unsigned short b) { - __builtin_altivec_stvehx((vector short)a, b, (void *)c); + return __builtin_altivec_vavguh(a, b); } -static void _ATTRS_o_ai -vec_ste(vector int a, int b, vector int *c) +/* vec_vavgsw */ + +static vector int __attribute__((__always_inline__)) +vec_vavgsw(vector int a, vector int b) +{ + return __builtin_altivec_vavgsw(a, b); +} + +/* vec_vavguw */ + +static vector unsigned int __attribute__((__always_inline__)) +vec_vavguw(vector unsigned int a, vector unsigned int b) { - __builtin_altivec_stvewx(a, b, (void *)c); + return __builtin_altivec_vavguw(a, b); } -static void _ATTRS_o_ai -vec_ste(vector unsigned int a, int b, vector unsigned int *c) +/* vec_ceil */ + +static vector float __attribute__((__always_inline__)) +vec_ceil(vector float a) { - __builtin_altivec_stvewx((vector int)a, b, (void *)c); + return __builtin_altivec_vrfip(a); } -static void _ATTRS_o_ai -vec_ste(vector float a, int b, vector float *c) +/* vec_vrfip */ + +static vector float __attribute__((__always_inline__)) +vec_vrfip(vector float a) { - __builtin_altivec_stvewx((vector int)a, b, (void *)c); + return __builtin_altivec_vrfip(a); } /* vec_cmpb */ -#define vec_cmpb __builtin_altivec_vcmpbfp -#define vec_vcmpbfp __builtin_altivec_vcmpbfp -#define __builtin_vec_cmpb __builtin_altivec_vcmpbfp +static vector int __attribute__((__always_inline__)) +vec_cmpb(vector float a, vector float b) +{ + return __builtin_altivec_vcmpbfp(a, b); +} + +/* vec_vcmpbfp */ -/* vec_cmpeq */ +static vector int __attribute__((__always_inline__)) +vec_vcmpbfp(vector float a, vector float b) +{ + return __builtin_altivec_vcmpbfp(a, b); +} -#define __builtin_vec_cmpeq vec_cmpeq +/* vec_cmpeq */ -static vector /*bool*/ char _ATTRS_o_ai +static vector /*bool*/ char __ATTRS_o_ai vec_cmpeq(vector signed char a, vector signed char b) { return __builtin_altivec_vcmpequb((vector char)a, (vector char)b); } -static vector /*bool*/ char _ATTRS_o_ai +static vector /*bool*/ char __ATTRS_o_ai vec_cmpeq(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vcmpequb((vector char)a, (vector char)b); } -static vector /*bool*/ short _ATTRS_o_ai +static vector /*bool*/ short __ATTRS_o_ai vec_cmpeq(vector short a, vector short b) { return __builtin_altivec_vcmpequh(a, b); } -static vector /*bool*/ short _ATTRS_o_ai +static vector /*bool*/ short __ATTRS_o_ai vec_cmpeq(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vcmpequh((vector short)a, (vector short)b); } -static vector /*bool*/ int _ATTRS_o_ai +static vector /*bool*/ int __ATTRS_o_ai vec_cmpeq(vector int a, vector int b) { return __builtin_altivec_vcmpequw(a, b); } -static vector /*bool*/ int _ATTRS_o_ai +static vector /*bool*/ int __ATTRS_o_ai vec_cmpeq(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vcmpequw((vector int)a, (vector int)b); } -static vector /*bool*/ int _ATTRS_o_ai +static vector /*bool*/ int __ATTRS_o_ai vec_cmpeq(vector float a, vector float b) { return __builtin_altivec_vcmpeqfp(a, b); @@ -569,72 +667,121 @@ vec_cmpeq(vector float a, vector float b) /* vec_cmpge */ -#define vec_cmpge __builtin_altivec_vcmpgefp -#define vec_vcmpgefp __builtin_altivec_vcmpgefp -#define __builtin_vec_cmpge __builtin_altivec_vcmpgefp +static vector /*bool*/ int __attribute__((__always_inline__)) +vec_cmpge(vector float a, vector float b) +{ + return __builtin_altivec_vcmpgefp(a, b); +} + +/* vec_vcmpgefp */ + +static vector /*bool*/ int __attribute__((__always_inline__)) +vec_vcmpgefp(vector float a, vector float b) +{ + return __builtin_altivec_vcmpgefp(a, b); +} /* vec_cmpgt */ -#define vec_vcmpgtsb __builtin_altivec_vcmpgtsb -#define vec_vcmpgtub __builtin_altivec_vcmpgtub -#define vec_vcmpgtsh __builtin_altivec_vcmpgtsh -#define vec_vcmpgtuh __builtin_altivec_vcmpgtuh -#define vec_vcmpgtsw __builtin_altivec_vcmpgtsw -#define vec_vcmpgtuw __builtin_altivec_vcmpgtuw -#define vec_vcmpgtfp __builtin_altivec_vcmpgtfp -#define __builtin_vec_vcmpgtsb __builtin_altivec_vcmpgtsb -#define __builtin_vec_vcmpgtub __builtin_altivec_vcmpgtub -#define __builtin_vec_vcmpgtsh __builtin_altivec_vcmpgtsh -#define __builtin_vec_vcmpgtuh __builtin_altivec_vcmpgtuh -#define __builtin_vec_vcmpgtsw __builtin_altivec_vcmpgtsw -#define __builtin_vec_vcmpgtuw __builtin_altivec_vcmpgtuw -#define __builtin_vec_vcmpgtfp __builtin_altivec_vcmpgtfp - -static vector /*bool*/ char _ATTRS_o_ai +static vector /*bool*/ char __ATTRS_o_ai vec_cmpgt(vector signed char a, vector signed char b) { return __builtin_altivec_vcmpgtsb(a, b); } -static vector /*bool*/ char _ATTRS_o_ai +static vector /*bool*/ char __ATTRS_o_ai vec_cmpgt(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vcmpgtub(a, b); } -static vector /*bool*/ short _ATTRS_o_ai +static vector /*bool*/ short __ATTRS_o_ai vec_cmpgt(vector short a, vector short b) { return __builtin_altivec_vcmpgtsh(a, b); } -static vector /*bool*/ short _ATTRS_o_ai +static vector /*bool*/ short __ATTRS_o_ai vec_cmpgt(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vcmpgtuh(a, b); } -static vector /*bool*/ int _ATTRS_o_ai +static vector /*bool*/ int __ATTRS_o_ai vec_cmpgt(vector int a, vector int b) { return __builtin_altivec_vcmpgtsw(a, b); } -static vector /*bool*/ int _ATTRS_o_ai +static vector /*bool*/ int __ATTRS_o_ai vec_cmpgt(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vcmpgtuw(a, b); } -static vector /*bool*/ int _ATTRS_o_ai +static vector /*bool*/ int __ATTRS_o_ai vec_cmpgt(vector float a, vector float b) { return __builtin_altivec_vcmpgtfp(a, b); } -/* vec_cmple */ +/* vec_vcmpgtsb */ -#define __builtin_vec_cmple vec_cmple +static vector /*bool*/ char __attribute__((__always_inline__)) +vec_vcmpgtsb(vector signed char a, vector signed char b) +{ + return __builtin_altivec_vcmpgtsb(a, b); +} + +/* vec_vcmpgtub */ + +static vector /*bool*/ char __attribute__((__always_inline__)) +vec_vcmpgtub(vector unsigned char a, vector unsigned char b) +{ + return __builtin_altivec_vcmpgtub(a, b); +} + +/* vec_vcmpgtsh */ + +static vector /*bool*/ short __attribute__((__always_inline__)) +vec_vcmpgtsh(vector short a, vector short b) +{ + return __builtin_altivec_vcmpgtsh(a, b); +} + +/* vec_vcmpgtuh */ + +static vector /*bool*/ short __attribute__((__always_inline__)) +vec_vcmpgtuh(vector unsigned short a, vector unsigned short b) +{ + return __builtin_altivec_vcmpgtuh(a, b); +} + +/* vec_vcmpgtsw */ + +static vector /*bool*/ int __attribute__((__always_inline__)) +vec_vcmpgtsw(vector int a, vector int b) +{ + return __builtin_altivec_vcmpgtsw(a, b); +} + +/* vec_vcmpgtuw */ + +static vector /*bool*/ int __attribute__((__always_inline__)) +vec_vcmpgtuw(vector unsigned int a, vector unsigned int b) +{ + return __builtin_altivec_vcmpgtuw(a, b); +} + +/* vec_vcmpgtfp */ + +static vector /*bool*/ int __attribute__((__always_inline__)) +vec_vcmpgtfp(vector float a, vector float b) +{ + return __builtin_altivec_vcmpgtfp(a, b); +} + +/* vec_cmple */ static vector /*bool*/ int __attribute__((__always_inline__)) vec_cmple(vector float a, vector float b) @@ -644,239 +791,4533 @@ vec_cmple(vector float a, vector float b) /* vec_cmplt */ -#define __builtin_vec_cmplt vec_cmplt - -static vector /*bool*/ char _ATTRS_o_ai +static vector /*bool*/ char __ATTRS_o_ai vec_cmplt(vector signed char a, vector signed char b) { return __builtin_altivec_vcmpgtsb(b, a); } -static vector /*bool*/ char _ATTRS_o_ai +static vector /*bool*/ char __ATTRS_o_ai vec_cmplt(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vcmpgtub(b, a); } -static vector /*bool*/ short _ATTRS_o_ai +static vector /*bool*/ short __ATTRS_o_ai vec_cmplt(vector short a, vector short b) { return __builtin_altivec_vcmpgtsh(b, a); } -static vector /*bool*/ short _ATTRS_o_ai +static vector /*bool*/ short __ATTRS_o_ai vec_cmplt(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vcmpgtuh(b, a); } -static vector /*bool*/ int _ATTRS_o_ai +static vector /*bool*/ int __ATTRS_o_ai vec_cmplt(vector int a, vector int b) { return __builtin_altivec_vcmpgtsw(b, a); } -static vector /*bool*/ int _ATTRS_o_ai +static vector /*bool*/ int __ATTRS_o_ai vec_cmplt(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vcmpgtuw(b, a); } -static vector /*bool*/ int _ATTRS_o_ai +static vector /*bool*/ int __ATTRS_o_ai vec_cmplt(vector float a, vector float b) { return __builtin_altivec_vcmpgtfp(b, a); } +/* vec_ctf */ + +static vector float __ATTRS_o_ai +vec_ctf(vector int a, int b) +{ + return __builtin_altivec_vcfsx(a, b); +} + +static vector float __ATTRS_o_ai +vec_ctf(vector unsigned int a, int b) +{ + return __builtin_altivec_vcfux((vector int)a, b); +} + +/* vec_vcfsx */ + +static vector float __attribute__((__always_inline__)) +vec_vcfsx(vector int a, int b) +{ + return __builtin_altivec_vcfsx(a, b); +} + +/* vec_vcfux */ + +static vector float __attribute__((__always_inline__)) +vec_vcfux(vector unsigned int a, int b) +{ + return __builtin_altivec_vcfux((vector int)a, b); +} + +/* vec_cts */ + +static vector int __attribute__((__always_inline__)) +vec_cts(vector float a, int b) +{ + return __builtin_altivec_vctsxs(a, b); +} + +/* vec_vctsxs */ + +static vector int __attribute__((__always_inline__)) +vec_vctsxs(vector float a, int b) +{ + return __builtin_altivec_vctsxs(a, b); +} + +/* vec_ctu */ + +static vector unsigned int __attribute__((__always_inline__)) +vec_ctu(vector float a, int b) +{ + return __builtin_altivec_vctuxs(a, b); +} + +/* vec_vctuxs */ + +static vector unsigned int __attribute__((__always_inline__)) +vec_vctuxs(vector float a, int b) +{ + return __builtin_altivec_vctuxs(a, b); +} + +/* vec_dss */ + +static void __attribute__((__always_inline__)) +vec_dss(int a) +{ + __builtin_altivec_dss(a); +} + +/* vec_dssall */ + +static void __attribute__((__always_inline__)) +vec_dssall(void) +{ + __builtin_altivec_dssall(); +} + +/* vec_dst */ + +static void __attribute__((__always_inline__)) +vec_dst(void *a, int b, int c) +{ + __builtin_altivec_dst(a, b, c); +} + +/* vec_dstst */ + +static void __attribute__((__always_inline__)) +vec_dstst(void *a, int b, int c) +{ + __builtin_altivec_dstst(a, b, c); +} + +/* vec_dststt */ + +static void __attribute__((__always_inline__)) +vec_dststt(void *a, int b, int c) +{ + __builtin_altivec_dststt(a, b, c); +} + +/* vec_dstt */ + +static void __attribute__((__always_inline__)) +vec_dstt(void *a, int b, int c) +{ + __builtin_altivec_dstt(a, b, c); +} + +/* vec_expte */ + +static vector float __attribute__((__always_inline__)) +vec_expte(vector float a) +{ + return __builtin_altivec_vexptefp(a); +} + +/* vec_vexptefp */ + +static vector float __attribute__((__always_inline__)) +vec_vexptefp(vector float a) +{ + return __builtin_altivec_vexptefp(a); +} + +/* vec_floor */ + +static vector float __attribute__((__always_inline__)) +vec_floor(vector float a) +{ + return __builtin_altivec_vrfim(a); +} + +/* vec_vrfim */ + +static vector float __attribute__((__always_inline__)) +vec_vrfim(vector float a) +{ + return __builtin_altivec_vrfim(a); +} + +/* vec_ld */ + +static vector signed char __ATTRS_o_ai +vec_ld(int a, vector signed char *b) +{ + return (vector signed char)__builtin_altivec_lvx(a, b); +} + +static vector signed char __ATTRS_o_ai +vec_ld(int a, signed char *b) +{ + return (vector signed char)__builtin_altivec_lvx(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_ld(int a, vector unsigned char *b) +{ + return (vector unsigned char)__builtin_altivec_lvx(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_ld(int a, unsigned char *b) +{ + return (vector unsigned char)__builtin_altivec_lvx(a, b); +} + +static vector short __ATTRS_o_ai +vec_ld(int a, vector short *b) +{ + return (vector short)__builtin_altivec_lvx(a, b); +} + +static vector short __ATTRS_o_ai +vec_ld(int a, short *b) +{ + return (vector short)__builtin_altivec_lvx(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_ld(int a, vector unsigned short *b) +{ + return (vector unsigned short)__builtin_altivec_lvx(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_ld(int a, unsigned short *b) +{ + return (vector unsigned short)__builtin_altivec_lvx(a, b); +} + +static vector int __ATTRS_o_ai +vec_ld(int a, vector int *b) +{ + return (vector int)__builtin_altivec_lvx(a, b); +} + +static vector int __ATTRS_o_ai +vec_ld(int a, int *b) +{ + return (vector int)__builtin_altivec_lvx(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_ld(int a, vector unsigned int *b) +{ + return (vector unsigned int)__builtin_altivec_lvx(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_ld(int a, unsigned int *b) +{ + return (vector unsigned int)__builtin_altivec_lvx(a, b); +} + +static vector float __ATTRS_o_ai +vec_ld(int a, vector float *b) +{ + return (vector float)__builtin_altivec_lvx(a, b); +} + +static vector float __ATTRS_o_ai +vec_ld(int a, float *b) +{ + return (vector float)__builtin_altivec_lvx(a, b); +} + +/* vec_lvx */ + +static vector signed char __ATTRS_o_ai +vec_lvx(int a, vector signed char *b) +{ + return (vector signed char)__builtin_altivec_lvx(a, b); +} + +static vector signed char __ATTRS_o_ai +vec_lvx(int a, signed char *b) +{ + return (vector signed char)__builtin_altivec_lvx(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvx(int a, vector unsigned char *b) +{ + return (vector unsigned char)__builtin_altivec_lvx(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvx(int a, unsigned char *b) +{ + return (vector unsigned char)__builtin_altivec_lvx(a, b); +} + +static vector short __ATTRS_o_ai +vec_lvx(int a, vector short *b) +{ + return (vector short)__builtin_altivec_lvx(a, b); +} + +static vector short __ATTRS_o_ai +vec_lvx(int a, short *b) +{ + return (vector short)__builtin_altivec_lvx(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_lvx(int a, vector unsigned short *b) +{ + return (vector unsigned short)__builtin_altivec_lvx(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_lvx(int a, unsigned short *b) +{ + return (vector unsigned short)__builtin_altivec_lvx(a, b); +} + +static vector int __ATTRS_o_ai +vec_lvx(int a, vector int *b) +{ + return (vector int)__builtin_altivec_lvx(a, b); +} + +static vector int __ATTRS_o_ai +vec_lvx(int a, int *b) +{ + return (vector int)__builtin_altivec_lvx(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_lvx(int a, vector unsigned int *b) +{ + return (vector unsigned int)__builtin_altivec_lvx(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_lvx(int a, unsigned int *b) +{ + return (vector unsigned int)__builtin_altivec_lvx(a, b); +} + +static vector float __ATTRS_o_ai +vec_lvx(int a, vector float *b) +{ + return (vector float)__builtin_altivec_lvx(a, b); +} + +static vector float __ATTRS_o_ai +vec_lvx(int a, float *b) +{ + return (vector float)__builtin_altivec_lvx(a, b); +} + +/* vec_lde */ + +static vector signed char __ATTRS_o_ai +vec_lde(int a, vector signed char *b) +{ + return (vector signed char)__builtin_altivec_lvebx(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lde(int a, vector unsigned char *b) +{ + return (vector unsigned char)__builtin_altivec_lvebx(a, b); +} + +static vector short __ATTRS_o_ai +vec_lde(int a, vector short *b) +{ + return (vector short)__builtin_altivec_lvehx(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_lde(int a, vector unsigned short *b) +{ + return (vector unsigned short)__builtin_altivec_lvehx(a, b); +} + +static vector int __ATTRS_o_ai +vec_lde(int a, vector int *b) +{ + return (vector int)__builtin_altivec_lvewx(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_lde(int a, vector unsigned int *b) +{ + return (vector unsigned int)__builtin_altivec_lvewx(a, b); +} + +static vector float __ATTRS_o_ai +vec_lde(int a, vector float *b) +{ + return (vector float)__builtin_altivec_lvewx(a, b); +} + +/* vec_lvebx */ + +static vector signed char __ATTRS_o_ai +vec_lvebx(int a, vector signed char *b) +{ + return (vector signed char)__builtin_altivec_lvebx(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvebx(int a, vector unsigned char *b) +{ + return (vector unsigned char)__builtin_altivec_lvebx(a, b); +} + +/* vec_lvehx */ + +static vector short __ATTRS_o_ai +vec_lvehx(int a, vector short *b) +{ + return (vector short)__builtin_altivec_lvehx(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_lvehx(int a, vector unsigned short *b) +{ + return (vector unsigned short)__builtin_altivec_lvehx(a, b); +} + +/* vec_lvewx */ + +static vector int __ATTRS_o_ai +vec_lvewx(int a, vector int *b) +{ + return (vector int)__builtin_altivec_lvewx(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_lvewx(int a, vector unsigned int *b) +{ + return (vector unsigned int)__builtin_altivec_lvewx(a, b); +} + +static vector float __ATTRS_o_ai +vec_lvewx(int a, vector float *b) +{ + return (vector float)__builtin_altivec_lvewx(a, b); +} + +/* vec_ldl */ + +static vector signed char __ATTRS_o_ai +vec_ldl(int a, vector signed char *b) +{ + return (vector signed char)__builtin_altivec_lvxl(a, b); +} + +static vector signed char __ATTRS_o_ai +vec_ldl(int a, signed char *b) +{ + return (vector signed char)__builtin_altivec_lvxl(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_ldl(int a, vector unsigned char *b) +{ + return (vector unsigned char)__builtin_altivec_lvxl(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_ldl(int a, unsigned char *b) +{ + return (vector unsigned char)__builtin_altivec_lvxl(a, b); +} + +static vector short __ATTRS_o_ai +vec_ldl(int a, vector short *b) +{ + return (vector short)__builtin_altivec_lvxl(a, b); +} + +static vector short __ATTRS_o_ai +vec_ldl(int a, short *b) +{ + return (vector short)__builtin_altivec_lvxl(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_ldl(int a, vector unsigned short *b) +{ + return (vector unsigned short)__builtin_altivec_lvxl(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_ldl(int a, unsigned short *b) +{ + return (vector unsigned short)__builtin_altivec_lvxl(a, b); +} + +static vector int __ATTRS_o_ai +vec_ldl(int a, vector int *b) +{ + return (vector int)__builtin_altivec_lvxl(a, b); +} + +static vector int __ATTRS_o_ai +vec_ldl(int a, int *b) +{ + return (vector int)__builtin_altivec_lvxl(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_ldl(int a, vector unsigned int *b) +{ + return (vector unsigned int)__builtin_altivec_lvxl(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_ldl(int a, unsigned int *b) +{ + return (vector unsigned int)__builtin_altivec_lvxl(a, b); +} + +static vector float __ATTRS_o_ai +vec_ldl(int a, vector float *b) +{ + return (vector float)__builtin_altivec_lvxl(a, b); +} + +static vector float __ATTRS_o_ai +vec_ldl(int a, float *b) +{ + return (vector float)__builtin_altivec_lvxl(a, b); +} + +/* vec_lvxl */ + +static vector signed char __ATTRS_o_ai +vec_lvxl(int a, vector signed char *b) +{ + return (vector signed char)__builtin_altivec_lvxl(a, b); +} + +static vector signed char __ATTRS_o_ai +vec_lvxl(int a, signed char *b) +{ + return (vector signed char)__builtin_altivec_lvxl(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvxl(int a, vector unsigned char *b) +{ + return (vector unsigned char)__builtin_altivec_lvxl(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvxl(int a, unsigned char *b) +{ + return (vector unsigned char)__builtin_altivec_lvxl(a, b); +} + +static vector short __ATTRS_o_ai +vec_lvxl(int a, vector short *b) +{ + return (vector short)__builtin_altivec_lvxl(a, b); +} + +static vector short __ATTRS_o_ai +vec_lvxl(int a, short *b) +{ + return (vector short)__builtin_altivec_lvxl(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_lvxl(int a, vector unsigned short *b) +{ + return (vector unsigned short)__builtin_altivec_lvxl(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_lvxl(int a, unsigned short *b) +{ + return (vector unsigned short)__builtin_altivec_lvxl(a, b); +} + +static vector int __ATTRS_o_ai +vec_lvxl(int a, vector int *b) +{ + return (vector int)__builtin_altivec_lvxl(a, b); +} + +static vector int __ATTRS_o_ai +vec_lvxl(int a, int *b) +{ + return (vector int)__builtin_altivec_lvxl(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_lvxl(int a, vector unsigned int *b) +{ + return (vector unsigned int)__builtin_altivec_lvxl(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_lvxl(int a, unsigned int *b) +{ + return (vector unsigned int)__builtin_altivec_lvxl(a, b); +} + +static vector float __ATTRS_o_ai +vec_lvxl(int a, vector float *b) +{ + return (vector float)__builtin_altivec_lvxl(a, b); +} + +static vector float __ATTRS_o_ai +vec_lvxl(int a, float *b) +{ + return (vector float)__builtin_altivec_lvxl(a, b); +} + +/* vec_loge */ + +static vector float __attribute__((__always_inline__)) +vec_loge(vector float a) +{ + return __builtin_altivec_vlogefp(a); +} + +/* vec_vlogefp */ + +static vector float __attribute__((__always_inline__)) +vec_vlogefp(vector float a) +{ + return __builtin_altivec_vlogefp(a); +} + +/* vec_lvsl */ + +static vector unsigned char __ATTRS_o_ai +vec_lvsl(int a, signed char *b) +{ + return (vector unsigned char)__builtin_altivec_lvsl(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvsl(int a, unsigned char *b) +{ + return (vector unsigned char)__builtin_altivec_lvsl(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvsl(int a, short *b) +{ + return (vector unsigned char)__builtin_altivec_lvsl(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvsl(int a, unsigned short *b) +{ + return (vector unsigned char)__builtin_altivec_lvsl(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvsl(int a, int *b) +{ + return (vector unsigned char)__builtin_altivec_lvsl(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvsl(int a, unsigned int *b) +{ + return (vector unsigned char)__builtin_altivec_lvsl(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvsl(int a, float *b) +{ + return (vector unsigned char)__builtin_altivec_lvsl(a, b); +} + +/* vec_lvsr */ + +static vector unsigned char __ATTRS_o_ai +vec_lvsr(int a, signed char *b) +{ + return (vector unsigned char)__builtin_altivec_lvsr(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvsr(int a, unsigned char *b) +{ + return (vector unsigned char)__builtin_altivec_lvsr(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvsr(int a, short *b) +{ + return (vector unsigned char)__builtin_altivec_lvsr(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvsr(int a, unsigned short *b) +{ + return (vector unsigned char)__builtin_altivec_lvsr(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvsr(int a, int *b) +{ + return (vector unsigned char)__builtin_altivec_lvsr(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvsr(int a, unsigned int *b) +{ + return (vector unsigned char)__builtin_altivec_lvsr(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_lvsr(int a, float *b) +{ + return (vector unsigned char)__builtin_altivec_lvsr(a, b); +} + +/* vec_madd */ + +static vector float __attribute__((__always_inline__)) +vec_madd(vector float a, vector float b, vector float c) +{ + return __builtin_altivec_vmaddfp(a, b, c); +} + +/* vec_vmaddfp */ + +static vector float __attribute__((__always_inline__)) +vec_vmaddfp(vector float a, vector float b, vector float c) +{ + return __builtin_altivec_vmaddfp(a, b, c); +} + +/* vec_madds */ + +static vector signed short __attribute__((__always_inline__)) +vec_madds(vector signed short a, vector signed short b, vector signed short c) +{ + return __builtin_altivec_vmhaddshs(a, b, c); +} + +/* vec_vmhaddshs */ +static vector signed short __attribute__((__always_inline__)) +vec_vmhaddshs(vector signed short a, vector signed short b, vector signed short c) +{ + return __builtin_altivec_vmhaddshs(a, b, c); +} + /* vec_max */ -#define __builtin_vec_vmaxsb __builtin_altivec_vmaxsb -#define __builtin_vec_vmaxub __builtin_altivec_vmaxub -#define __builtin_vec_vmaxsh __builtin_altivec_vmaxsh -#define __builtin_vec_vmaxuh __builtin_altivec_vmaxuh -#define __builtin_vec_vmaxsw __builtin_altivec_vmaxsw -#define __builtin_vec_vmaxuw __builtin_altivec_vmaxuw -#define __builtin_vec_vmaxfp __builtin_altivec_vmaxfp -#define vec_vmaxsb __builtin_altivec_vmaxsb -#define vec_vmaxub __builtin_altivec_vmaxub -#define vec_vmaxsh __builtin_altivec_vmaxsh -#define vec_vmaxuh __builtin_altivec_vmaxuh -#define vec_vmaxsw __builtin_altivec_vmaxsw -#define vec_vmaxuw __builtin_altivec_vmaxuw -#define vec_vmaxfp __builtin_altivec_vmaxfp -#define __builtin_vec_max vec_max - -static vector signed char _ATTRS_o_ai +static vector signed char __ATTRS_o_ai vec_max(vector signed char a, vector signed char b) { return __builtin_altivec_vmaxsb(a, b); } -static vector unsigned char _ATTRS_o_ai +static vector unsigned char __ATTRS_o_ai vec_max(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vmaxub(a, b); } -static vector short _ATTRS_o_ai +static vector short __ATTRS_o_ai vec_max(vector short a, vector short b) { return __builtin_altivec_vmaxsh(a, b); } -static vector unsigned short _ATTRS_o_ai +static vector unsigned short __ATTRS_o_ai vec_max(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vmaxuh(a, b); } -static vector int _ATTRS_o_ai +static vector int __ATTRS_o_ai vec_max(vector int a, vector int b) { return __builtin_altivec_vmaxsw(a, b); } -static vector unsigned int _ATTRS_o_ai +static vector unsigned int __ATTRS_o_ai vec_max(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vmaxuw(a, b); } -static vector float _ATTRS_o_ai +static vector float __ATTRS_o_ai vec_max(vector float a, vector float b) { return __builtin_altivec_vmaxfp(a, b); } +/* vec_vmaxsb */ + +static vector signed char __attribute__((__always_inline__)) +vec_vmaxsb(vector signed char a, vector signed char b) +{ + return __builtin_altivec_vmaxsb(a, b); +} + +/* vec_vmaxub */ + +static vector unsigned char __attribute__((__always_inline__)) +vec_vmaxub(vector unsigned char a, vector unsigned char b) +{ + return __builtin_altivec_vmaxub(a, b); +} + +/* vec_vmaxsh */ + +static vector short __attribute__((__always_inline__)) +vec_vmaxsh(vector short a, vector short b) +{ + return __builtin_altivec_vmaxsh(a, b); +} + +/* vec_vmaxuh */ + +static vector unsigned short __attribute__((__always_inline__)) +vec_vmaxuh(vector unsigned short a, vector unsigned short b) +{ + return __builtin_altivec_vmaxuh(a, b); +} + +/* vec_vmaxsw */ + +static vector int __attribute__((__always_inline__)) +vec_vmaxsw(vector int a, vector int b) +{ + return __builtin_altivec_vmaxsw(a, b); +} + +/* vec_vmaxuw */ + +static vector unsigned int __attribute__((__always_inline__)) +vec_vmaxuw(vector unsigned int a, vector unsigned int b) +{ + return __builtin_altivec_vmaxuw(a, b); +} + +/* vec_vmaxfp */ + +static vector float __attribute__((__always_inline__)) +vec_vmaxfp(vector float a, vector float b) +{ + return __builtin_altivec_vmaxfp(a, b); +} + +/* vec_mergeh */ + +static vector signed char __ATTRS_o_ai +vec_mergeh(vector signed char a, vector signed char b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x00, 0x10, 0x01, 0x11, 0x02, 0x12, 0x03, 0x13, + 0x04, 0x14, 0x05, 0x15, 0x06, 0x16, 0x07, 0x17)); +} + +static vector unsigned char __ATTRS_o_ai +vec_mergeh(vector unsigned char a, vector unsigned char b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x00, 0x10, 0x01, 0x11, 0x02, 0x12, 0x03, 0x13, + 0x04, 0x14, 0x05, 0x15, 0x06, 0x16, 0x07, 0x17)); +} + +static vector short __ATTRS_o_ai +vec_mergeh(vector short a, vector short b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x00, 0x01, 0x10, 0x11, 0x02, 0x03, 0x12, 0x13, + 0x04, 0x05, 0x14, 0x15, 0x06, 0x07, 0x16, 0x17)); +} + +static vector unsigned short __ATTRS_o_ai +vec_mergeh(vector unsigned short a, vector unsigned short b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x00, 0x01, 0x10, 0x11, 0x02, 0x03, 0x12, 0x13, + 0x04, 0x05, 0x14, 0x15, 0x06, 0x07, 0x16, 0x17)); +} + +static vector int __ATTRS_o_ai +vec_mergeh(vector int a, vector int b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13, + 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17)); +} + +static vector unsigned int __ATTRS_o_ai +vec_mergeh(vector unsigned int a, vector unsigned int b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13, + 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17)); +} + +static vector float __ATTRS_o_ai +vec_mergeh(vector float a, vector float b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13, + 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17)); +} + +/* vec_vmrghb */ + +#define __builtin_altivec_vmrghb vec_vmrghb + +static vector signed char __ATTRS_o_ai +vec_vmrghb(vector signed char a, vector signed char b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x00, 0x10, 0x01, 0x11, 0x02, 0x12, 0x03, 0x13, + 0x04, 0x14, 0x05, 0x15, 0x06, 0x16, 0x07, 0x17)); +} + +static vector unsigned char __ATTRS_o_ai +vec_vmrghb(vector unsigned char a, vector unsigned char b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x00, 0x10, 0x01, 0x11, 0x02, 0x12, 0x03, 0x13, + 0x04, 0x14, 0x05, 0x15, 0x06, 0x16, 0x07, 0x17)); +} + +/* vec_vmrghh */ + +#define __builtin_altivec_vmrghh vec_vmrghh + +static vector short __ATTRS_o_ai +vec_vmrghh(vector short a, vector short b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x00, 0x01, 0x10, 0x11, 0x02, 0x03, 0x12, 0x13, + 0x04, 0x05, 0x14, 0x15, 0x06, 0x07, 0x16, 0x17)); +} + +static vector unsigned short __ATTRS_o_ai +vec_vmrghh(vector unsigned short a, vector unsigned short b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x00, 0x01, 0x10, 0x11, 0x02, 0x03, 0x12, 0x13, + 0x04, 0x05, 0x14, 0x15, 0x06, 0x07, 0x16, 0x17)); +} + +/* vec_vmrghw */ + +#define __builtin_altivec_vmrghw vec_vmrghw + +static vector int __ATTRS_o_ai +vec_vmrghw(vector int a, vector int b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13, + 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17)); +} + +static vector unsigned int __ATTRS_o_ai +vec_vmrghw(vector unsigned int a, vector unsigned int b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13, + 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17)); +} + +static vector float __ATTRS_o_ai +vec_vmrghw(vector float a, vector float b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13, + 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17)); +} + +/* vec_mergel */ + +static vector signed char __ATTRS_o_ai +vec_mergel(vector signed char a, vector signed char b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, 0x0B, 0x1B, + 0x0C, 0x1C, 0x0D, 0x1D, 0x0E, 0x1E, 0x0F, 0x1F)); +} + +static vector unsigned char __ATTRS_o_ai +vec_mergel(vector unsigned char a, vector unsigned char b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, 0x0B, 0x1B, + 0x0C, 0x1C, 0x0D, 0x1D, 0x0E, 0x1E, 0x0F, 0x1F)); +} + +static vector short __ATTRS_o_ai +vec_mergel(vector short a, vector short b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, 0x1A, 0x1B, + 0x0C, 0x0D, 0x1C, 0x1D, 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static vector unsigned short __ATTRS_o_ai +vec_mergel(vector unsigned short a, vector unsigned short b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, 0x1A, 0x1B, + 0x0C, 0x0D, 0x1C, 0x1D, 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static vector int __ATTRS_o_ai +vec_mergel(vector int a, vector int b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B, + 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static vector unsigned int __ATTRS_o_ai +vec_mergel(vector unsigned int a, vector unsigned int b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B, + 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static vector float __ATTRS_o_ai +vec_mergel(vector float a, vector float b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B, + 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F)); +} + +/* vec_vmrglb */ + +#define __builtin_altivec_vmrglb vec_vmrglb + +static vector signed char __ATTRS_o_ai +vec_vmrglb(vector signed char a, vector signed char b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, 0x0B, 0x1B, + 0x0C, 0x1C, 0x0D, 0x1D, 0x0E, 0x1E, 0x0F, 0x1F)); +} + +static vector unsigned char __ATTRS_o_ai +vec_vmrglb(vector unsigned char a, vector unsigned char b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, 0x0B, 0x1B, + 0x0C, 0x1C, 0x0D, 0x1D, 0x0E, 0x1E, 0x0F, 0x1F)); +} + +/* vec_vmrglh */ + +#define __builtin_altivec_vmrglh vec_vmrglh + +static vector short __ATTRS_o_ai +vec_vmrglh(vector short a, vector short b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, 0x1A, 0x1B, + 0x0C, 0x0D, 0x1C, 0x1D, 0x0E, 0x0F, 0x1E, 0x1F)); +} + +static vector unsigned short __ATTRS_o_ai +vec_vmrglh(vector unsigned short a, vector unsigned short b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, 0x1A, 0x1B, + 0x0C, 0x0D, 0x1C, 0x1D, 0x0E, 0x0F, 0x1E, 0x1F)); +} + +/* vec_vmrglw */ + +#define __builtin_altivec_vmrglw vec_vmrglw + +static vector int __ATTRS_o_ai +vec_vmrglw(vector int a, vector int b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B, + 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static vector unsigned int __ATTRS_o_ai +vec_vmrglw(vector unsigned int a, vector unsigned int b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B, + 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F)); +} + +static vector float __ATTRS_o_ai +vec_vmrglw(vector float a, vector float b) +{ + return vec_perm(a, b, (vector unsigned char) + (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B, + 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F)); +} + /* vec_mfvscr */ -#define __builtin_vec_mfvscr __builtin_altivec_mfvscr -#define vec_mfvscr __builtin_altivec_mfvscr +static vector unsigned short __attribute__((__always_inline__)) +vec_mfvscr(void) +{ + return __builtin_altivec_mfvscr(); +} /* vec_min */ -#define __builtin_vec_vminsb __builtin_altivec_vminsb -#define __builtin_vec_vminub __builtin_altivec_vminub -#define __builtin_vec_vminsh __builtin_altivec_vminsh -#define __builtin_vec_vminuh __builtin_altivec_vminuh -#define __builtin_vec_vminsw __builtin_altivec_vminsw -#define __builtin_vec_vminuw __builtin_altivec_vminuw -#define __builtin_vec_vminfp __builtin_altivec_vminfp -#define vec_vminsb __builtin_altivec_vminsb -#define vec_vminub __builtin_altivec_vminub -#define vec_vminsh __builtin_altivec_vminsh -#define vec_vminuh __builtin_altivec_vminuh -#define vec_vminsw __builtin_altivec_vminsw -#define vec_vminuw __builtin_altivec_vminuw -#define vec_vminfp __builtin_altivec_vminfp -#define __builtin_vec_min vec_min - -static vector signed char _ATTRS_o_ai +static vector signed char __ATTRS_o_ai vec_min(vector signed char a, vector signed char b) { return __builtin_altivec_vminsb(a, b); } -static vector unsigned char _ATTRS_o_ai +static vector unsigned char __ATTRS_o_ai vec_min(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vminub(a, b); } -static vector short _ATTRS_o_ai +static vector short __ATTRS_o_ai vec_min(vector short a, vector short b) { return __builtin_altivec_vminsh(a, b); } -static vector unsigned short _ATTRS_o_ai +static vector unsigned short __ATTRS_o_ai vec_min(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vminuh(a, b); } -static vector int _ATTRS_o_ai +static vector int __ATTRS_o_ai vec_min(vector int a, vector int b) { return __builtin_altivec_vminsw(a, b); } -static vector unsigned int _ATTRS_o_ai +static vector unsigned int __ATTRS_o_ai vec_min(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vminuw(a, b); } -static vector float _ATTRS_o_ai +static vector float __ATTRS_o_ai vec_min(vector float a, vector float b) { return __builtin_altivec_vminfp(a, b); } +/* vec_vminsb */ + +static vector signed char __attribute__((__always_inline__)) +vec_vminsb(vector signed char a, vector signed char b) +{ + return __builtin_altivec_vminsb(a, b); +} + +/* vec_vminub */ + +static vector unsigned char __attribute__((__always_inline__)) +vec_vminub(vector unsigned char a, vector unsigned char b) +{ + return __builtin_altivec_vminub(a, b); +} + +/* vec_vminsh */ + +static vector short __attribute__((__always_inline__)) +vec_vminsh(vector short a, vector short b) +{ + return __builtin_altivec_vminsh(a, b); +} + +/* vec_vminuh */ + +static vector unsigned short __attribute__((__always_inline__)) +vec_vminuh(vector unsigned short a, vector unsigned short b) +{ + return __builtin_altivec_vminuh(a, b); +} + +/* vec_vminsw */ + +static vector int __attribute__((__always_inline__)) +vec_vminsw(vector int a, vector int b) +{ + return __builtin_altivec_vminsw(a, b); +} + +/* vec_vminuw */ + +static vector unsigned int __attribute__((__always_inline__)) +vec_vminuw(vector unsigned int a, vector unsigned int b) +{ + return __builtin_altivec_vminuw(a, b); +} + +/* vec_vminfp */ + +static vector float __attribute__((__always_inline__)) +vec_vminfp(vector float a, vector float b) +{ + return __builtin_altivec_vminfp(a, b); +} + +/* vec_mladd */ + +#define __builtin_altivec_vmladduhm vec_mladd + +static vector short __ATTRS_o_ai +vec_mladd(vector short a, vector short b, vector short c) +{ + return a * b + c; +} + +static vector short __ATTRS_o_ai +vec_mladd(vector short a, vector unsigned short b, vector unsigned short c) +{ + return a * (vector short)b + (vector short)c; +} + +static vector short __ATTRS_o_ai +vec_mladd(vector unsigned short a, vector short b, vector short c) +{ + return (vector short)a * b + c; +} + +static vector unsigned short __ATTRS_o_ai +vec_mladd(vector unsigned short a, vector unsigned short b, vector unsigned short c) +{ + return a * b + c; +} + +/* vec_vmladduhm */ + +static vector short __ATTRS_o_ai +vec_vmladduhm(vector short a, vector short b, vector short c) +{ + return a * b + c; +} + +static vector short __ATTRS_o_ai +vec_vmladduhm(vector short a, vector unsigned short b, vector unsigned short c) +{ + return a * (vector short)b + (vector short)c; +} + +static vector short __ATTRS_o_ai +vec_vmladduhm(vector unsigned short a, vector short b, vector short c) +{ + return (vector short)a * b + c; +} + +static vector unsigned short __ATTRS_o_ai +vec_vmladduhm(vector unsigned short a, vector unsigned short b, vector unsigned short c) +{ + return a * b + c; +} + +/* vec_mradds */ + +static vector short __attribute__((__always_inline__)) +vec_mradds(vector short a, vector short b, vector short c) +{ + return __builtin_altivec_vmhraddshs(a, b, c); +} + +/* vec_vmhraddshs */ + +static vector short __attribute__((__always_inline__)) +vec_vmhraddshs(vector short a, vector short b, vector short c) +{ + return __builtin_altivec_vmhraddshs(a, b, c); +} + +/* vec_msum */ + +static vector int __ATTRS_o_ai +vec_msum(vector signed char a, vector unsigned char b, vector int c) +{ + return __builtin_altivec_vmsummbm(a, b, c); +} + +static vector unsigned int __ATTRS_o_ai +vec_msum(vector unsigned char a, vector unsigned char b, vector unsigned int c) +{ + return __builtin_altivec_vmsumubm(a, b, c); +} + +static vector int __ATTRS_o_ai +vec_msum(vector short a, vector short b, vector int c) +{ + return __builtin_altivec_vmsumshm(a, b, c); +} + +static vector unsigned int __ATTRS_o_ai +vec_msum(vector unsigned short a, vector unsigned short b, vector unsigned int c) +{ + return __builtin_altivec_vmsumuhm(a, b, c); +} + +/* vec_vmsummbm */ + +static vector int __attribute__((__always_inline__)) +vec_vmsummbm(vector signed char a, vector unsigned char b, vector int c) +{ + return __builtin_altivec_vmsummbm(a, b, c); +} + +/* vec_vmsumubm */ + +static vector unsigned int __attribute__((__always_inline__)) +vec_vmsumubm(vector unsigned char a, vector unsigned char b, vector unsigned int c) +{ + return __builtin_altivec_vmsumubm(a, b, c); +} + +/* vec_vmsumshm */ + +static vector int __attribute__((__always_inline__)) +vec_vmsumshm(vector short a, vector short b, vector int c) +{ + return __builtin_altivec_vmsumshm(a, b, c); +} + +/* vec_vmsumuhm */ + +static vector unsigned int __attribute__((__always_inline__)) +vec_vmsumuhm(vector unsigned short a, vector unsigned short b, vector unsigned int c) +{ + return __builtin_altivec_vmsumuhm(a, b, c); +} + +/* vec_msums */ + +static vector int __ATTRS_o_ai +vec_msums(vector short a, vector short b, vector int c) +{ + return __builtin_altivec_vmsumshs(a, b, c); +} + +static vector unsigned int __ATTRS_o_ai +vec_msums(vector unsigned short a, vector unsigned short b, vector unsigned int c) +{ + return __builtin_altivec_vmsumuhs(a, b, c); +} + +/* vec_vmsumshs */ + +static vector int __attribute__((__always_inline__)) +vec_vmsumshs(vector short a, vector short b, vector int c) +{ + return __builtin_altivec_vmsumshs(a, b, c); +} + +/* vec_vmsumuhs */ + +static vector unsigned int __attribute__((__always_inline__)) +vec_vmsumuhs(vector unsigned short a, vector unsigned short b, vector unsigned int c) +{ + return __builtin_altivec_vmsumuhs(a, b, c); +} + /* vec_mtvscr */ -#define __builtin_vec_mtvscr __builtin_altivec_mtvscr -#define vec_mtvscr __builtin_altivec_mtvscr +static void __ATTRS_o_ai +vec_mtvscr(vector signed char a) +{ + __builtin_altivec_mtvscr((vector int)a); +} -/* ------------------------------ predicates ------------------------------------ */ +static void __ATTRS_o_ai +vec_mtvscr(vector unsigned char a) +{ + __builtin_altivec_mtvscr((vector int)a); +} -static int __attribute__((__always_inline__)) -__builtin_vec_vcmpeq_p(char CR6_param, vector float a, vector float b) +static void __ATTRS_o_ai +vec_mtvscr(vector short a) { - return __builtin_altivec_vcmpeqfp_p(CR6_param, a, b); + __builtin_altivec_mtvscr((vector int)a); } -static int __attribute__((__always_inline__)) -__builtin_vec_vcmpge_p(char CR6_param, vector float a, vector float b) +static void __ATTRS_o_ai +vec_mtvscr(vector unsigned short a) { - return __builtin_altivec_vcmpgefp_p(CR6_param, a, b); + __builtin_altivec_mtvscr((vector int)a); } -static int __attribute__((__always_inline__)) -__builtin_vec_vcmpgt_p(char CR6_param, vector float a, vector float b) +static void __ATTRS_o_ai +vec_mtvscr(vector int a) +{ + __builtin_altivec_mtvscr((vector int)a); +} + +static void __ATTRS_o_ai +vec_mtvscr(vector unsigned int a) +{ + __builtin_altivec_mtvscr((vector int)a); +} + +static void __ATTRS_o_ai +vec_mtvscr(vector float a) +{ + __builtin_altivec_mtvscr((vector int)a); +} + +/* vec_mule */ + +static vector short __ATTRS_o_ai +vec_mule(vector signed char a, vector signed char b) +{ + return __builtin_altivec_vmulesb(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_mule(vector unsigned char a, vector unsigned char b) +{ + return __builtin_altivec_vmuleub(a, b); +} + +static vector int __ATTRS_o_ai +vec_mule(vector short a, vector short b) +{ + return __builtin_altivec_vmulesh(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_mule(vector unsigned short a, vector unsigned short b) +{ + return __builtin_altivec_vmuleuh(a, b); +} + +/* vec_vmulesb */ + +static vector short __attribute__((__always_inline__)) +vec_vmulesb(vector signed char a, vector signed char b) +{ + return __builtin_altivec_vmulesb(a, b); +} + +/* vec_vmuleub */ + +static vector unsigned short __attribute__((__always_inline__)) +vec_vmuleub(vector unsigned char a, vector unsigned char b) +{ + return __builtin_altivec_vmuleub(a, b); +} + +/* vec_vmulesh */ + +static vector int __attribute__((__always_inline__)) +vec_vmulesh(vector short a, vector short b) +{ + return __builtin_altivec_vmulesh(a, b); +} + +/* vec_vmuleuh */ + +static vector unsigned int __attribute__((__always_inline__)) +vec_vmuleuh(vector unsigned short a, vector unsigned short b) +{ + return __builtin_altivec_vmuleuh(a, b); +} + +/* vec_mulo */ + +static vector short __ATTRS_o_ai +vec_mulo(vector signed char a, vector signed char b) +{ + return __builtin_altivec_vmulosb(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_mulo(vector unsigned char a, vector unsigned char b) +{ + return __builtin_altivec_vmuloub(a, b); +} + +static vector int __ATTRS_o_ai +vec_mulo(vector short a, vector short b) +{ + return __builtin_altivec_vmulosh(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_mulo(vector unsigned short a, vector unsigned short b) +{ + return __builtin_altivec_vmulouh(a, b); +} + +/* vec_vmulosb */ + +static vector short __attribute__((__always_inline__)) +vec_vmulosb(vector signed char a, vector signed char b) +{ + return __builtin_altivec_vmulosb(a, b); +} + +/* vec_vmuloub */ + +static vector unsigned short __attribute__((__always_inline__)) +vec_vmuloub(vector unsigned char a, vector unsigned char b) +{ + return __builtin_altivec_vmuloub(a, b); +} + +/* vec_vmulosh */ + +static vector int __attribute__((__always_inline__)) +vec_vmulosh(vector short a, vector short b) +{ + return __builtin_altivec_vmulosh(a, b); +} + +/* vec_vmulouh */ + +static vector unsigned int __attribute__((__always_inline__)) +vec_vmulouh(vector unsigned short a, vector unsigned short b) +{ + return __builtin_altivec_vmulouh(a, b); +} + +/* vec_nmsub */ + +static vector float __attribute__((__always_inline__)) +vec_nmsub(vector float a, vector float b, vector float c) +{ + return __builtin_altivec_vnmsubfp(a, b, c); +} + +/* vec_vnmsubfp */ + +static vector float __attribute__((__always_inline__)) +vec_vnmsubfp(vector float a, vector float b, vector float c) +{ + return __builtin_altivec_vnmsubfp(a, b, c); +} + +/* vec_nor */ + +#define __builtin_altivec_vnor vec_nor + +static vector signed char __ATTRS_o_ai +vec_nor(vector signed char a, vector signed char b) +{ + return ~(a | b); +} + +static vector unsigned char __ATTRS_o_ai +vec_nor(vector unsigned char a, vector unsigned char b) +{ + return ~(a | b); +} + +static vector short __ATTRS_o_ai +vec_nor(vector short a, vector short b) +{ + return ~(a | b); +} + +static vector unsigned short __ATTRS_o_ai +vec_nor(vector unsigned short a, vector unsigned short b) +{ + return ~(a | b); +} + +static vector int __ATTRS_o_ai +vec_nor(vector int a, vector int b) +{ + return ~(a | b); +} + +static vector unsigned int __ATTRS_o_ai +vec_nor(vector unsigned int a, vector unsigned int b) +{ + return ~(a | b); +} + +static vector float __ATTRS_o_ai +vec_nor(vector float a, vector float b) +{ + vector unsigned int res = ~((vector unsigned int)a | (vector unsigned int)b); + return (vector float)res; +} + +/* vec_vnor */ + +static vector signed char __ATTRS_o_ai +vec_vnor(vector signed char a, vector signed char b) +{ + return ~(a | b); +} + +static vector unsigned char __ATTRS_o_ai +vec_vnor(vector unsigned char a, vector unsigned char b) +{ + return ~(a | b); +} + +static vector short __ATTRS_o_ai +vec_vnor(vector short a, vector short b) +{ + return ~(a | b); +} + +static vector unsigned short __ATTRS_o_ai +vec_vnor(vector unsigned short a, vector unsigned short b) +{ + return ~(a | b); +} + +static vector int __ATTRS_o_ai +vec_vnor(vector int a, vector int b) +{ + return ~(a | b); +} + +static vector unsigned int __ATTRS_o_ai +vec_vnor(vector unsigned int a, vector unsigned int b) +{ + return ~(a | b); +} + +static vector float __ATTRS_o_ai +vec_vnor(vector float a, vector float b) +{ + vector unsigned int res = ~((vector unsigned int)a | (vector unsigned int)b); + return (vector float)res; +} + +/* vec_or */ + +#define __builtin_altivec_vor vec_or + +static vector signed char __ATTRS_o_ai +vec_or(vector signed char a, vector signed char b) +{ + return a | b; +} + +static vector unsigned char __ATTRS_o_ai +vec_or(vector unsigned char a, vector unsigned char b) +{ + return a | b; +} + +static vector short __ATTRS_o_ai +vec_or(vector short a, vector short b) +{ + return a | b; +} + +static vector unsigned short __ATTRS_o_ai +vec_or(vector unsigned short a, vector unsigned short b) +{ + return a | b; +} + +static vector int __ATTRS_o_ai +vec_or(vector int a, vector int b) +{ + return a | b; +} + +static vector unsigned int __ATTRS_o_ai +vec_or(vector unsigned int a, vector unsigned int b) +{ + return a | b; +} + +static vector float __ATTRS_o_ai +vec_or(vector float a, vector float b) +{ + vector unsigned int res = (vector unsigned int)a | (vector unsigned int)b; + return (vector float)res; +} + +/* vec_vor */ + +static vector signed char __ATTRS_o_ai +vec_vor(vector signed char a, vector signed char b) +{ + return a | b; +} + +static vector unsigned char __ATTRS_o_ai +vec_vor(vector unsigned char a, vector unsigned char b) +{ + return a | b; +} + +static vector short __ATTRS_o_ai +vec_vor(vector short a, vector short b) +{ + return a | b; +} + +static vector unsigned short __ATTRS_o_ai +vec_vor(vector unsigned short a, vector unsigned short b) +{ + return a | b; +} + +static vector int __ATTRS_o_ai +vec_vor(vector int a, vector int b) +{ + return a | b; +} + +static vector unsigned int __ATTRS_o_ai +vec_vor(vector unsigned int a, vector unsigned int b) +{ + return a | b; +} + +static vector float __ATTRS_o_ai +vec_vor(vector float a, vector float b) +{ + vector unsigned int res = (vector unsigned int)a | (vector unsigned int)b; + return (vector float)res; +} + +/* vec_pack */ + +static vector signed char __ATTRS_o_ai +vec_pack(vector signed short a, vector signed short b) +{ + return (vector signed char)vec_perm(a, b, (vector unsigned char) + (0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F)); +} + +static vector unsigned char __ATTRS_o_ai +vec_pack(vector unsigned short a, vector unsigned short b) +{ + return (vector unsigned char)vec_perm(a, b, (vector unsigned char) + (0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F)); +} + +static vector short __ATTRS_o_ai +vec_pack(vector int a, vector int b) +{ + return (vector short)vec_perm(a, b, (vector unsigned char) + (0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F)); +} + +static vector unsigned short __ATTRS_o_ai +vec_pack(vector unsigned int a, vector unsigned int b) +{ + return (vector unsigned short)vec_perm(a, b, (vector unsigned char) + (0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F)); +} + +/* vec_vpkuhum */ + +#define __builtin_altivec_vpkuhum vec_vpkuhum + +static vector signed char __ATTRS_o_ai +vec_vpkuhum(vector signed short a, vector signed short b) +{ + return (vector signed char)vec_perm(a, b, (vector unsigned char) + (0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F)); +} + +static vector unsigned char __ATTRS_o_ai +vec_vpkuhum(vector unsigned short a, vector unsigned short b) +{ + return (vector unsigned char)vec_perm(a, b, (vector unsigned char) + (0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, + 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F)); +} + +/* vec_vpkuwum */ + +#define __builtin_altivec_vpkuwum vec_vpkuwum + +static vector short __ATTRS_o_ai +vec_vpkuwum(vector int a, vector int b) +{ + return (vector short)vec_perm(a, b, (vector unsigned char) + (0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F)); +} + +static vector unsigned short __ATTRS_o_ai +vec_vpkuwum(vector unsigned int a, vector unsigned int b) +{ + return (vector unsigned short)vec_perm(a, b, (vector unsigned char) + (0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F, + 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F)); +} + +/* vec_packpx */ + +static vector pixel __attribute__((__always_inline__)) +vec_packpx(vector unsigned int a, vector unsigned int b) +{ + return (vector pixel)__builtin_altivec_vpkpx(a, b); +} + +/* vec_vpkpx */ + +static vector pixel __attribute__((__always_inline__)) +vec_vpkpx(vector unsigned int a, vector unsigned int b) +{ + return (vector pixel)__builtin_altivec_vpkpx(a, b); +} + +/* vec_packs */ + +static vector signed char __ATTRS_o_ai +vec_packs(vector short a, vector short b) +{ + return __builtin_altivec_vpkshss(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_packs(vector unsigned short a, vector unsigned short b) +{ + return __builtin_altivec_vpkuhus(a, b); +} + +static vector signed short __ATTRS_o_ai +vec_packs(vector int a, vector int b) +{ + return __builtin_altivec_vpkswss(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_packs(vector unsigned int a, vector unsigned int b) +{ + return __builtin_altivec_vpkuwus(a, b); +} + +/* vec_vpkshss */ + +static vector signed char __attribute__((__always_inline__)) +vec_vpkshss(vector short a, vector short b) +{ + return __builtin_altivec_vpkshss(a, b); +} + +/* vec_vpkuhus */ + +static vector unsigned char __attribute__((__always_inline__)) +vec_vpkuhus(vector unsigned short a, vector unsigned short b) +{ + return __builtin_altivec_vpkuhus(a, b); +} + +/* vec_vpkswss */ + +static vector signed short __attribute__((__always_inline__)) +vec_vpkswss(vector int a, vector int b) +{ + return __builtin_altivec_vpkswss(a, b); +} + +/* vec_vpkuwus */ + +static vector unsigned short __attribute__((__always_inline__)) +vec_vpkuwus(vector unsigned int a, vector unsigned int b) +{ + return __builtin_altivec_vpkuwus(a, b); +} + +/* vec_packsu */ + +static vector unsigned char __ATTRS_o_ai +vec_packsu(vector short a, vector short b) +{ + return __builtin_altivec_vpkshus(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_packsu(vector unsigned short a, vector unsigned short b) +{ + return __builtin_altivec_vpkuhus(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_packsu(vector int a, vector int b) +{ + return __builtin_altivec_vpkswus(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_packsu(vector unsigned int a, vector unsigned int b) +{ + return __builtin_altivec_vpkuwus(a, b); +} + +/* vec_vpkshus */ + +static vector unsigned char __ATTRS_o_ai +vec_vpkshus(vector short a, vector short b) +{ + return __builtin_altivec_vpkshus(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_vpkshus(vector unsigned short a, vector unsigned short b) +{ + return __builtin_altivec_vpkuhus(a, b); +} + +/* vec_vpkswus */ + +static vector unsigned short __ATTRS_o_ai +vec_vpkswus(vector int a, vector int b) +{ + return __builtin_altivec_vpkswus(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_vpkswus(vector unsigned int a, vector unsigned int b) +{ + return __builtin_altivec_vpkuwus(a, b); +} + +/* vec_perm */ + +vector signed char __ATTRS_o_ai +vec_perm(vector signed char a, vector signed char b, vector unsigned char c) +{ + return (vector signed char)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c); +} + +vector unsigned char __ATTRS_o_ai +vec_perm(vector unsigned char a, vector unsigned char b, vector unsigned char c) +{ + return (vector unsigned char)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c); +} + +vector short __ATTRS_o_ai +vec_perm(vector short a, vector short b, vector unsigned char c) +{ + return (vector short)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c); +} + +vector unsigned short __ATTRS_o_ai +vec_perm(vector unsigned short a, vector unsigned short b, vector unsigned char c) +{ + return (vector unsigned short)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c); +} + +vector int __ATTRS_o_ai +vec_perm(vector int a, vector int b, vector unsigned char c) +{ + return (vector int)__builtin_altivec_vperm_4si(a, b, c); +} + +vector unsigned int __ATTRS_o_ai +vec_perm(vector unsigned int a, vector unsigned int b, vector unsigned char c) +{ + return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c); +} + +vector float __ATTRS_o_ai +vec_perm(vector float a, vector float b, vector unsigned char c) +{ + return (vector float)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c); +} + +/* vec_vperm */ + +vector signed char __ATTRS_o_ai +vec_vperm(vector signed char a, vector signed char b, vector unsigned char c) +{ + return (vector signed char)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c); +} + +vector unsigned char __ATTRS_o_ai +vec_vperm(vector unsigned char a, vector unsigned char b, vector unsigned char c) +{ + return (vector unsigned char)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c); +} + +vector short __ATTRS_o_ai +vec_vperm(vector short a, vector short b, vector unsigned char c) +{ + return (vector short)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c); +} + +vector unsigned short __ATTRS_o_ai +vec_vperm(vector unsigned short a, vector unsigned short b, vector unsigned char c) +{ + return (vector unsigned short)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c); +} + +vector int __ATTRS_o_ai +vec_vperm(vector int a, vector int b, vector unsigned char c) +{ + return (vector int)__builtin_altivec_vperm_4si(a, b, c); +} + +vector unsigned int __ATTRS_o_ai +vec_vperm(vector unsigned int a, vector unsigned int b, vector unsigned char c) +{ + return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c); +} + +vector float __ATTRS_o_ai +vec_vperm(vector float a, vector float b, vector unsigned char c) +{ + return (vector float)__builtin_altivec_vperm_4si((vector int)a, (vector int)b, c); +} + +/* vec_re */ + +vector float __attribute__((__always_inline__)) +vec_re(vector float a) +{ + return __builtin_altivec_vrefp(a); +} + +/* vec_vrefp */ + +vector float __attribute__((__always_inline__)) +vec_vrefp(vector float a) +{ + return __builtin_altivec_vrefp(a); +} + +/* vec_rl */ + +static vector signed char __ATTRS_o_ai +vec_rl(vector signed char a, vector unsigned char b) +{ + return (vector signed char)__builtin_altivec_vrlb((vector char)a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_rl(vector unsigned char a, vector unsigned char b) +{ + return (vector unsigned char)__builtin_altivec_vrlb((vector char)a, b); +} + +static vector short __ATTRS_o_ai +vec_rl(vector short a, vector unsigned short b) +{ + return __builtin_altivec_vrlh(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_rl(vector unsigned short a, vector unsigned short b) +{ + return (vector unsigned short)__builtin_altivec_vrlh((vector short)a, b); +} + +static vector int __ATTRS_o_ai +vec_rl(vector int a, vector unsigned int b) +{ + return __builtin_altivec_vrlw(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_rl(vector unsigned int a, vector unsigned int b) +{ + return (vector unsigned int)__builtin_altivec_vrlw((vector int)a, b); +} + +/* vec_vrlb */ + +static vector signed char __ATTRS_o_ai +vec_vrlb(vector signed char a, vector unsigned char b) +{ + return (vector signed char)__builtin_altivec_vrlb((vector char)a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_vrlb(vector unsigned char a, vector unsigned char b) +{ + return (vector unsigned char)__builtin_altivec_vrlb((vector char)a, b); +} + +/* vec_vrlh */ + +static vector short __ATTRS_o_ai +vec_vrlh(vector short a, vector unsigned short b) +{ + return __builtin_altivec_vrlh(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_vrlh(vector unsigned short a, vector unsigned short b) +{ + return (vector unsigned short)__builtin_altivec_vrlh((vector short)a, b); +} + +/* vec_vrlw */ + +static vector int __ATTRS_o_ai +vec_vrlw(vector int a, vector unsigned int b) +{ + return __builtin_altivec_vrlw(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_vrlw(vector unsigned int a, vector unsigned int b) +{ + return (vector unsigned int)__builtin_altivec_vrlw((vector int)a, b); +} + +/* vec_round */ + +static vector float __attribute__((__always_inline__)) +vec_round(vector float a) +{ + return __builtin_altivec_vrfin(a); +} + +/* vec_vrfin */ + +static vector float __attribute__((__always_inline__)) +vec_vrfin(vector float a) +{ + return __builtin_altivec_vrfin(a); +} + +/* vec_rsqrte */ + +static __vector float __attribute__((__always_inline__)) +vec_rsqrte(vector float a) +{ + return __builtin_altivec_vrsqrtefp(a); +} + +/* vec_vrsqrtefp */ + +static __vector float __attribute__((__always_inline__)) +vec_vrsqrtefp(vector float a) +{ + return __builtin_altivec_vrsqrtefp(a); +} + +/* vec_sel */ + +#define __builtin_altivec_vsel_4si vec_sel + +static vector signed char __ATTRS_o_ai +vec_sel(vector signed char a, vector signed char b, vector unsigned char c) +{ + return (a & ~(vector signed char)c) | (b & (vector signed char)c); +} + +static vector unsigned char __ATTRS_o_ai +vec_sel(vector unsigned char a, vector unsigned char b, vector unsigned char c) +{ + return (a & ~c) | (b & c); +} + +static vector short __ATTRS_o_ai +vec_sel(vector short a, vector short b, vector unsigned short c) +{ + return (a & ~(vector short)c) | (b & (vector short)c); +} + +static vector unsigned short __ATTRS_o_ai +vec_sel(vector unsigned short a, vector unsigned short b, vector unsigned short c) +{ + return (a & ~c) | (b & c); +} + +static vector int __ATTRS_o_ai +vec_sel(vector int a, vector int b, vector unsigned int c) +{ + return (a & ~(vector int)c) | (b & (vector int)c); +} + +static vector unsigned int __ATTRS_o_ai +vec_sel(vector unsigned int a, vector unsigned int b, vector unsigned int c) +{ + return (a & ~c) | (b & c); +} + +static vector float __ATTRS_o_ai +vec_sel(vector float a, vector float b, vector unsigned int c) +{ + vector int res = ((vector int)a & ~(vector int)c) | ((vector int)b & (vector int)c); + return (vector float)res; +} + +/* vec_vsel */ + +static vector signed char __ATTRS_o_ai +vec_vsel(vector signed char a, vector signed char b, vector unsigned char c) +{ + return (a & ~(vector signed char)c) | (b & (vector signed char)c); +} + +static vector unsigned char __ATTRS_o_ai +vec_vsel(vector unsigned char a, vector unsigned char b, vector unsigned char c) +{ + return (a & ~c) | (b & c); +} + +static vector short __ATTRS_o_ai +vec_vsel(vector short a, vector short b, vector unsigned short c) +{ + return (a & ~(vector short)c) | (b & (vector short)c); +} + +static vector unsigned short __ATTRS_o_ai +vec_vsel(vector unsigned short a, vector unsigned short b, vector unsigned short c) +{ + return (a & ~c) | (b & c); +} + +static vector int __ATTRS_o_ai +vec_vsel(vector int a, vector int b, vector unsigned int c) +{ + return (a & ~(vector int)c) | (b & (vector int)c); +} + +static vector unsigned int __ATTRS_o_ai +vec_vsel(vector unsigned int a, vector unsigned int b, vector unsigned int c) +{ + return (a & ~c) | (b & c); +} + +static vector float __ATTRS_o_ai +vec_vsel(vector float a, vector float b, vector unsigned int c) +{ + vector int res = ((vector int)a & ~(vector int)c) | ((vector int)b & (vector int)c); + return (vector float)res; +} + +/* vec_sl */ + +static vector signed char __ATTRS_o_ai +vec_sl(vector signed char a, vector unsigned char b) +{ + return a << (vector signed char)b; +} + +static vector unsigned char __ATTRS_o_ai +vec_sl(vector unsigned char a, vector unsigned char b) +{ + return a << b; +} + +static vector short __ATTRS_o_ai +vec_sl(vector short a, vector unsigned short b) +{ + return a << (vector short)b; +} + +static vector unsigned short __ATTRS_o_ai +vec_sl(vector unsigned short a, vector unsigned short b) +{ + return a << b; +} + +static vector int __ATTRS_o_ai +vec_sl(vector int a, vector unsigned int b) +{ + return a << (vector int)b; +} + +static vector unsigned int __ATTRS_o_ai +vec_sl(vector unsigned int a, vector unsigned int b) +{ + return a << b; +} + +/* vec_vslb */ + +#define __builtin_altivec_vslb vec_vslb + +static vector signed char __ATTRS_o_ai +vec_vslb(vector signed char a, vector unsigned char b) +{ + return vec_sl(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_vslb(vector unsigned char a, vector unsigned char b) +{ + return vec_sl(a, b); +} + +/* vec_vslh */ + +#define __builtin_altivec_vslh vec_vslh + +static vector short __ATTRS_o_ai +vec_vslh(vector short a, vector unsigned short b) +{ + return vec_sl(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_vslh(vector unsigned short a, vector unsigned short b) +{ + return vec_sl(a, b); +} + +/* vec_vslw */ + +#define __builtin_altivec_vslw vec_vslw + +static vector int __ATTRS_o_ai +vec_vslw(vector int a, vector unsigned int b) +{ + return vec_sl(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_vslw(vector unsigned int a, vector unsigned int b) +{ + return vec_sl(a, b); +} + +/* vec_sld */ + +#define __builtin_altivec_vsldoi_4si vec_sld + +static vector signed char __ATTRS_o_ai +vec_sld(vector signed char a, vector signed char b, unsigned char c) +{ + return (vector signed char)vec_perm(a, b, (vector unsigned char) + (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7, + c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15)); +} + +static vector unsigned char __ATTRS_o_ai +vec_sld(vector unsigned char a, vector unsigned char b, unsigned char c) +{ + return (vector unsigned char)vec_perm(a, b, (vector unsigned char) + (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7, + c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15)); +} + +static vector short __ATTRS_o_ai +vec_sld(vector short a, vector short b, unsigned char c) +{ + return (vector short)vec_perm(a, b, (vector unsigned char) + (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7, + c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15)); +} + +static vector unsigned short __ATTRS_o_ai +vec_sld(vector unsigned short a, vector unsigned short b, unsigned char c) +{ + return (vector unsigned short)vec_perm(a, b, (vector unsigned char) + (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7, + c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15)); +} + +static vector int __ATTRS_o_ai +vec_sld(vector int a, vector int b, unsigned char c) +{ + return vec_perm(a, b, (vector unsigned char) + (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7, + c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15)); +} + +static vector unsigned int __ATTRS_o_ai +vec_sld(vector unsigned int a, vector unsigned int b, unsigned char c) +{ + return (vector unsigned int)vec_perm(a, b, (vector unsigned char) + (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7, + c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15)); +} + +static vector float __ATTRS_o_ai +vec_sld(vector float a, vector float b, unsigned char c) +{ + return (vector float)vec_perm(a, b, (vector unsigned char) + (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7, + c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15)); +} + +/* vec_vsldoi */ + +static vector signed char __ATTRS_o_ai +vec_vsldoi(vector signed char a, vector signed char b, unsigned char c) +{ + return (vector signed char)vec_perm(a, b, (vector unsigned char) + (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7, + c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15)); +} + +static vector unsigned char __ATTRS_o_ai +vec_vsldoi(vector unsigned char a, vector unsigned char b, unsigned char c) +{ + return (vector unsigned char)vec_perm(a, b, (vector unsigned char) + (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7, + c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15)); +} + +static vector short __ATTRS_o_ai +vec_vsldoi(vector short a, vector short b, unsigned char c) +{ + return (vector short)vec_perm(a, b, (vector unsigned char) + (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7, + c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15)); +} + +static vector unsigned short __ATTRS_o_ai +vec_vsldoi(vector unsigned short a, vector unsigned short b, unsigned char c) +{ + return (vector unsigned short)vec_perm(a, b, (vector unsigned char) + (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7, + c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15)); +} + +static vector int __ATTRS_o_ai +vec_vsldoi(vector int a, vector int b, unsigned char c) +{ + return vec_perm(a, b, (vector unsigned char) + (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7, + c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15)); +} + +static vector unsigned int __ATTRS_o_ai +vec_vsldoi(vector unsigned int a, vector unsigned int b, unsigned char c) +{ + return (vector unsigned int)vec_perm(a, b, (vector unsigned char) + (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7, + c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15)); +} + +static vector float __ATTRS_o_ai +vec_vsldoi(vector float a, vector float b, unsigned char c) +{ + return (vector float)vec_perm(a, b, (vector unsigned char) + (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7, + c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15)); +} + +/* vec_sll */ + +static vector signed char __ATTRS_o_ai +vec_sll(vector signed char a, vector unsigned char b) +{ + return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector signed char __ATTRS_o_ai +vec_sll(vector signed char a, vector unsigned short b) +{ + return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector signed char __ATTRS_o_ai +vec_sll(vector signed char a, vector unsigned int b) +{ + return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_sll(vector unsigned char a, vector unsigned char b) +{ + return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_sll(vector unsigned char a, vector unsigned short b) +{ + return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_sll(vector unsigned char a, vector unsigned int b) +{ + return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_sll(vector short a, vector unsigned char b) +{ + return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_sll(vector short a, vector unsigned short b) +{ + return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_sll(vector short a, vector unsigned int b) +{ + return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_sll(vector unsigned short a, vector unsigned char b) +{ + return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_sll(vector unsigned short a, vector unsigned short b) +{ + return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_sll(vector unsigned short a, vector unsigned int b) +{ + return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_sll(vector int a, vector unsigned char b) +{ + return (vector int)__builtin_altivec_vsl(a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_sll(vector int a, vector unsigned short b) +{ + return (vector int)__builtin_altivec_vsl(a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_sll(vector int a, vector unsigned int b) +{ + return (vector int)__builtin_altivec_vsl(a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_sll(vector unsigned int a, vector unsigned char b) +{ + return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_sll(vector unsigned int a, vector unsigned short b) +{ + return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_sll(vector unsigned int a, vector unsigned int b) +{ + return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +/* vec_vsl */ + +static vector signed char __ATTRS_o_ai +vec_vsl(vector signed char a, vector unsigned char b) +{ + return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector signed char __ATTRS_o_ai +vec_vsl(vector signed char a, vector unsigned short b) +{ + return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector signed char __ATTRS_o_ai +vec_vsl(vector signed char a, vector unsigned int b) +{ + return (vector signed char)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_vsl(vector unsigned char a, vector unsigned char b) +{ + return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_vsl(vector unsigned char a, vector unsigned short b) +{ + return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_vsl(vector unsigned char a, vector unsigned int b) +{ + return (vector unsigned char)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_vsl(vector short a, vector unsigned char b) +{ + return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_vsl(vector short a, vector unsigned short b) +{ + return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_vsl(vector short a, vector unsigned int b) +{ + return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_vsl(vector unsigned short a, vector unsigned char b) +{ + return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_vsl(vector unsigned short a, vector unsigned short b) +{ + return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_vsl(vector unsigned short a, vector unsigned int b) +{ + return (vector unsigned short)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_vsl(vector int a, vector unsigned char b) +{ + return (vector int)__builtin_altivec_vsl(a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_vsl(vector int a, vector unsigned short b) +{ + return (vector int)__builtin_altivec_vsl(a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_vsl(vector int a, vector unsigned int b) +{ + return (vector int)__builtin_altivec_vsl(a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_vsl(vector unsigned int a, vector unsigned char b) +{ + return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_vsl(vector unsigned int a, vector unsigned short b) +{ + return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_vsl(vector unsigned int a, vector unsigned int b) +{ + return (vector unsigned int)__builtin_altivec_vsl((vector int)a, (vector int)b); +} + +/* vec_slo */ + +static vector signed char __ATTRS_o_ai +vec_slo(vector signed char a, vector signed char b) +{ + return (vector signed char)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector signed char __ATTRS_o_ai +vec_slo(vector signed char a, vector unsigned char b) +{ + return (vector signed char)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_slo(vector unsigned char a, vector signed char b) +{ + return (vector unsigned char)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_slo(vector unsigned char a, vector unsigned char b) +{ + return (vector unsigned char)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_slo(vector short a, vector signed char b) +{ + return (vector short)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_slo(vector short a, vector unsigned char b) +{ + return (vector short)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_slo(vector unsigned short a, vector signed char b) +{ + return (vector unsigned short)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_slo(vector unsigned short a, vector unsigned char b) +{ + return (vector unsigned short)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_slo(vector int a, vector signed char b) +{ + return (vector int)__builtin_altivec_vslo(a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_slo(vector int a, vector unsigned char b) +{ + return (vector int)__builtin_altivec_vslo(a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_slo(vector unsigned int a, vector signed char b) +{ + return (vector unsigned int)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_slo(vector unsigned int a, vector unsigned char b) +{ + return (vector unsigned int)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector float __ATTRS_o_ai +vec_slo(vector float a, vector signed char b) +{ + return (vector float)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector float __ATTRS_o_ai +vec_slo(vector float a, vector unsigned char b) +{ + return (vector float)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +/* vec_vslo */ + +static vector signed char __ATTRS_o_ai +vec_vslo(vector signed char a, vector signed char b) +{ + return (vector signed char)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector signed char __ATTRS_o_ai +vec_vslo(vector signed char a, vector unsigned char b) +{ + return (vector signed char)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_vslo(vector unsigned char a, vector signed char b) +{ + return (vector unsigned char)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_vslo(vector unsigned char a, vector unsigned char b) { - return __builtin_altivec_vcmpgtfp_p(CR6_param, a, b); + return (vector unsigned char)__builtin_altivec_vslo((vector int)a, (vector int)b); } +static vector short __ATTRS_o_ai +vec_vslo(vector short a, vector signed char b) +{ + return (vector short)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_vslo(vector short a, vector unsigned char b) +{ + return (vector short)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_vslo(vector unsigned short a, vector signed char b) +{ + return (vector unsigned short)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_vslo(vector unsigned short a, vector unsigned char b) +{ + return (vector unsigned short)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_vslo(vector int a, vector signed char b) +{ + return (vector int)__builtin_altivec_vslo(a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_vslo(vector int a, vector unsigned char b) +{ + return (vector int)__builtin_altivec_vslo(a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_vslo(vector unsigned int a, vector signed char b) +{ + return (vector unsigned int)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_vslo(vector unsigned int a, vector unsigned char b) +{ + return (vector unsigned int)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector float __ATTRS_o_ai +vec_vslo(vector float a, vector signed char b) +{ + return (vector float)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +static vector float __ATTRS_o_ai +vec_vslo(vector float a, vector unsigned char b) +{ + return (vector float)__builtin_altivec_vslo((vector int)a, (vector int)b); +} + +/* vec_splat */ + +static vector signed char __ATTRS_o_ai +vec_splat(vector signed char a, unsigned char b) +{ + return (vector signed char)vec_perm(a, a, (vector unsigned char)(b)); +} + +static vector unsigned char __ATTRS_o_ai +vec_splat(vector unsigned char a, unsigned char b) +{ + return (vector unsigned char)vec_perm(a, a, (vector unsigned char)(b)); +} + +static vector short __ATTRS_o_ai +vec_splat(vector short a, unsigned char b) +{ + b *= 2; + return (vector short)vec_perm(a, a, (vector unsigned char) + (b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1)); +} + +static vector unsigned short __ATTRS_o_ai +vec_splat(vector unsigned short a, unsigned char b) +{ + b *= 2; + return (vector unsigned short)vec_perm(a, a, (vector unsigned char) + (b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1)); +} + +static vector int __ATTRS_o_ai +vec_splat(vector int a, unsigned char b) +{ + b *= 4; + return vec_perm(a, a, (vector unsigned char) + (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3)); +} + +static vector unsigned int __ATTRS_o_ai +vec_splat(vector unsigned int a, unsigned char b) +{ + b *= 4; + return (vector unsigned int)vec_perm(a, a, (vector unsigned char) + (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3)); +} + +static vector float __ATTRS_o_ai +vec_splat(vector float a, unsigned char b) +{ + b *= 4; + return (vector float)vec_perm(a, a, (vector unsigned char) + (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3)); +} + +/* vec_vspltb */ + +#define __builtin_altivec_vspltb vec_vspltb + +static vector signed char __ATTRS_o_ai +vec_vspltb(vector signed char a, unsigned char b) +{ + return (vector signed char)vec_perm(a, a, (vector unsigned char)(b)); +} + +static vector unsigned char __ATTRS_o_ai +vec_vspltb(vector unsigned char a, unsigned char b) +{ + return (vector unsigned char)vec_perm(a, a, (vector unsigned char)(b)); +} + +/* vec_vsplth */ + +#define __builtin_altivec_vsplth vec_vsplth + +static vector short __ATTRS_o_ai +vec_vsplth(vector short a, unsigned char b) +{ + b *= 2; + return (vector short)vec_perm(a, a, (vector unsigned char) + (b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1)); +} + +static vector unsigned short __ATTRS_o_ai +vec_vsplth(vector unsigned short a, unsigned char b) +{ + b *= 2; + return (vector unsigned short)vec_perm(a, a, (vector unsigned char) + (b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1, b, b+1)); +} + +/* vec_vspltw */ + +#define __builtin_altivec_vspltw vec_vspltw + +static vector int __ATTRS_o_ai +vec_vspltw(vector int a, unsigned char b) +{ + b *= 4; + return (vector int)vec_perm(a, a, (vector unsigned char) + (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3)); +} + +static vector unsigned int __ATTRS_o_ai +vec_vspltw(vector unsigned int a, unsigned char b) +{ + b *= 4; + return (vector unsigned int)vec_perm(a, a, (vector unsigned char) + (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3)); +} + +static vector float __ATTRS_o_ai +vec_vspltw(vector float a, unsigned char b) +{ + b *= 4; + return (vector float)vec_perm(a, a, (vector unsigned char) + (b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3, b, b+1, b+2, b+3)); +} + +/* vec_splat_s8 */ + +#define __builtin_altivec_vspltisb vec_splat_s8 + +// FIXME: parameter should be treated as 5-bit signed literal +static vector signed char __ATTRS_o_ai +vec_splat_s8(signed char a) +{ + return (vector signed char)(a); +} + +/* vec_vspltisb */ + +// FIXME: parameter should be treated as 5-bit signed literal +static vector signed char __ATTRS_o_ai +vec_vspltisb(signed char a) +{ + return (vector signed char)(a); +} + +/* vec_splat_s16 */ + +#define __builtin_altivec_vspltish vec_splat_s16 + +// FIXME: parameter should be treated as 5-bit signed literal +static vector short __ATTRS_o_ai +vec_splat_s16(signed char a) +{ + return (vector short)(a); +} + +/* vec_vspltish */ + +// FIXME: parameter should be treated as 5-bit signed literal +static vector short __ATTRS_o_ai +vec_vspltish(signed char a) +{ + return (vector short)(a); +} + +/* vec_splat_s32 */ + +#define __builtin_altivec_vspltisw vec_splat_s32 + +// FIXME: parameter should be treated as 5-bit signed literal +static vector int __ATTRS_o_ai +vec_splat_s32(signed char a) +{ + return (vector int)(a); +} + +/* vec_vspltisw */ + +// FIXME: parameter should be treated as 5-bit signed literal +static vector int __ATTRS_o_ai +vec_vspltisw(signed char a) +{ + return (vector int)(a); +} + +/* vec_splat_u8 */ + +// FIXME: parameter should be treated as 5-bit signed literal +static vector unsigned char __ATTRS_o_ai +vec_splat_u8(unsigned char a) +{ + return (vector unsigned char)(a); +} + +/* vec_splat_u16 */ + +// FIXME: parameter should be treated as 5-bit signed literal +static vector unsigned short __ATTRS_o_ai +vec_splat_u16(signed char a) +{ + return (vector unsigned short)(a); +} + +/* vec_splat_u32 */ + +// FIXME: parameter should be treated as 5-bit signed literal +static vector unsigned int __ATTRS_o_ai +vec_splat_u32(signed char a) +{ + return (vector unsigned int)(a); +} + +/* vec_sr */ + +static vector signed char __ATTRS_o_ai +vec_sr(vector signed char a, vector unsigned char b) +{ + return a >> (vector signed char)b; +} + +static vector unsigned char __ATTRS_o_ai +vec_sr(vector unsigned char a, vector unsigned char b) +{ + return a >> b; +} + +static vector short __ATTRS_o_ai +vec_sr(vector short a, vector unsigned short b) +{ + return a >> (vector short)b; +} + +static vector unsigned short __ATTRS_o_ai +vec_sr(vector unsigned short a, vector unsigned short b) +{ + return a >> b; +} + +static vector int __ATTRS_o_ai +vec_sr(vector int a, vector unsigned int b) +{ + return a >> (vector int)b; +} + +static vector unsigned int __ATTRS_o_ai +vec_sr(vector unsigned int a, vector unsigned int b) +{ + return a >> b; +} + +/* vec_vsrb */ + +#define __builtin_altivec_vsrb vec_vsrb + +static vector signed char __ATTRS_o_ai +vec_vsrb(vector signed char a, vector unsigned char b) +{ + return a >> (vector signed char)b; +} + +static vector unsigned char __ATTRS_o_ai +vec_vsrb(vector unsigned char a, vector unsigned char b) +{ + return a >> b; +} + +/* vec_vsrh */ + +#define __builtin_altivec_vsrh vec_vsrh + +static vector short __ATTRS_o_ai +vec_vsrh(vector short a, vector unsigned short b) +{ + return a >> (vector short)b; +} + +static vector unsigned short __ATTRS_o_ai +vec_vsrh(vector unsigned short a, vector unsigned short b) +{ + return a >> b; +} + +/* vec_vsrw */ + +#define __builtin_altivec_vsrw vec_vsrw + +static vector int __ATTRS_o_ai +vec_vsrw(vector int a, vector unsigned int b) +{ + return a >> (vector int)b; +} + +static vector unsigned int __ATTRS_o_ai +vec_vsrw(vector unsigned int a, vector unsigned int b) +{ + return a >> b; +} + +/* vec_sra */ + +static vector signed char __ATTRS_o_ai +vec_sra(vector signed char a, vector unsigned char b) +{ + return (vector signed char)__builtin_altivec_vsrab((vector char)a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_sra(vector unsigned char a, vector unsigned char b) +{ + return (vector unsigned char)__builtin_altivec_vsrab((vector char)a, b); +} + +static vector short __ATTRS_o_ai +vec_sra(vector short a, vector unsigned short b) +{ + return __builtin_altivec_vsrah(a, (vector unsigned short)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_sra(vector unsigned short a, vector unsigned short b) +{ + return (vector unsigned short)__builtin_altivec_vsrah((vector short)a, b); +} + +static vector int __ATTRS_o_ai +vec_sra(vector int a, vector unsigned int b) +{ + return __builtin_altivec_vsraw(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_sra(vector unsigned int a, vector unsigned int b) +{ + return (vector unsigned int)__builtin_altivec_vsraw((vector int)a, b); +} + +/* vec_vsrab */ + +static vector signed char __ATTRS_o_ai +vec_vsrab(vector signed char a, vector unsigned char b) +{ + return (vector signed char)__builtin_altivec_vsrab((vector char)a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_vsrab(vector unsigned char a, vector unsigned char b) +{ + return (vector unsigned char)__builtin_altivec_vsrab((vector char)a, b); +} + +/* vec_vsrah */ + +static vector short __ATTRS_o_ai +vec_vsrah(vector short a, vector unsigned short b) +{ + return __builtin_altivec_vsrah(a, (vector unsigned short)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_vsrah(vector unsigned short a, vector unsigned short b) +{ + return (vector unsigned short)__builtin_altivec_vsrah((vector short)a, b); +} + +/* vec_vsraw */ + +static vector int __ATTRS_o_ai +vec_vsraw(vector int a, vector unsigned int b) +{ + return __builtin_altivec_vsraw(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_vsraw(vector unsigned int a, vector unsigned int b) +{ + return (vector unsigned int)__builtin_altivec_vsraw((vector int)a, b); +} + +/* vec_srl */ + +static vector signed char __ATTRS_o_ai +vec_srl(vector signed char a, vector unsigned char b) +{ + return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector signed char __ATTRS_o_ai +vec_srl(vector signed char a, vector unsigned short b) +{ + return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector signed char __ATTRS_o_ai +vec_srl(vector signed char a, vector unsigned int b) +{ + return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_srl(vector unsigned char a, vector unsigned char b) +{ + return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_srl(vector unsigned char a, vector unsigned short b) +{ + return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_srl(vector unsigned char a, vector unsigned int b) +{ + return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_srl(vector short a, vector unsigned char b) +{ + return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_srl(vector short a, vector unsigned short b) +{ + return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_srl(vector short a, vector unsigned int b) +{ + return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_srl(vector unsigned short a, vector unsigned char b) +{ + return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_srl(vector unsigned short a, vector unsigned short b) +{ + return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_srl(vector unsigned short a, vector unsigned int b) +{ + return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_srl(vector int a, vector unsigned char b) +{ + return (vector int)__builtin_altivec_vsr(a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_srl(vector int a, vector unsigned short b) +{ + return (vector int)__builtin_altivec_vsr(a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_srl(vector int a, vector unsigned int b) +{ + return (vector int)__builtin_altivec_vsr(a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_srl(vector unsigned int a, vector unsigned char b) +{ + return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_srl(vector unsigned int a, vector unsigned short b) +{ + return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_srl(vector unsigned int a, vector unsigned int b) +{ + return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +/* vec_vsr */ + +static vector signed char __ATTRS_o_ai +vec_vsr(vector signed char a, vector unsigned char b) +{ + return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector signed char __ATTRS_o_ai +vec_vsr(vector signed char a, vector unsigned short b) +{ + return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector signed char __ATTRS_o_ai +vec_vsr(vector signed char a, vector unsigned int b) +{ + return (vector signed char)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_vsr(vector unsigned char a, vector unsigned char b) +{ + return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_vsr(vector unsigned char a, vector unsigned short b) +{ + return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_vsr(vector unsigned char a, vector unsigned int b) +{ + return (vector unsigned char)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_vsr(vector short a, vector unsigned char b) +{ + return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_vsr(vector short a, vector unsigned short b) +{ + return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_vsr(vector short a, vector unsigned int b) +{ + return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_vsr(vector unsigned short a, vector unsigned char b) +{ + return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_vsr(vector unsigned short a, vector unsigned short b) +{ + return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_vsr(vector unsigned short a, vector unsigned int b) +{ + return (vector unsigned short)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_vsr(vector int a, vector unsigned char b) +{ + return (vector int)__builtin_altivec_vsr(a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_vsr(vector int a, vector unsigned short b) +{ + return (vector int)__builtin_altivec_vsr(a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_vsr(vector int a, vector unsigned int b) +{ + return (vector int)__builtin_altivec_vsr(a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_vsr(vector unsigned int a, vector unsigned char b) +{ + return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_vsr(vector unsigned int a, vector unsigned short b) +{ + return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_vsr(vector unsigned int a, vector unsigned int b) +{ + return (vector unsigned int)__builtin_altivec_vsr((vector int)a, (vector int)b); +} + +/* vec_sro */ + +static vector signed char __ATTRS_o_ai +vec_sro(vector signed char a, vector signed char b) +{ + return (vector signed char)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector signed char __ATTRS_o_ai +vec_sro(vector signed char a, vector unsigned char b) +{ + return (vector signed char)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_sro(vector unsigned char a, vector signed char b) +{ + return (vector unsigned char)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_sro(vector unsigned char a, vector unsigned char b) +{ + return (vector unsigned char)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_sro(vector short a, vector signed char b) +{ + return (vector short)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_sro(vector short a, vector unsigned char b) +{ + return (vector short)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_sro(vector unsigned short a, vector signed char b) +{ + return (vector unsigned short)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_sro(vector unsigned short a, vector unsigned char b) +{ + return (vector unsigned short)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_sro(vector int a, vector signed char b) +{ + return (vector int)__builtin_altivec_vsro(a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_sro(vector int a, vector unsigned char b) +{ + return (vector int)__builtin_altivec_vsro(a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_sro(vector unsigned int a, vector signed char b) +{ + return (vector unsigned int)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_sro(vector unsigned int a, vector unsigned char b) +{ + return (vector unsigned int)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector float __ATTRS_o_ai +vec_sro(vector float a, vector signed char b) +{ + return (vector float)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector float __ATTRS_o_ai +vec_sro(vector float a, vector unsigned char b) +{ + return (vector float)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +/* vec_vsro */ + +static vector signed char __ATTRS_o_ai +vec_vsro(vector signed char a, vector signed char b) +{ + return (vector signed char)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector signed char __ATTRS_o_ai +vec_vsro(vector signed char a, vector unsigned char b) +{ + return (vector signed char)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_vsro(vector unsigned char a, vector signed char b) +{ + return (vector unsigned char)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector unsigned char __ATTRS_o_ai +vec_vsro(vector unsigned char a, vector unsigned char b) +{ + return (vector unsigned char)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_vsro(vector short a, vector signed char b) +{ + return (vector short)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector short __ATTRS_o_ai +vec_vsro(vector short a, vector unsigned char b) +{ + return (vector short)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_vsro(vector unsigned short a, vector signed char b) +{ + return (vector unsigned short)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector unsigned short __ATTRS_o_ai +vec_vsro(vector unsigned short a, vector unsigned char b) +{ + return (vector unsigned short)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_vsro(vector int a, vector signed char b) +{ + return (vector int)__builtin_altivec_vsro(a, (vector int)b); +} + +static vector int __ATTRS_o_ai +vec_vsro(vector int a, vector unsigned char b) +{ + return (vector int)__builtin_altivec_vsro(a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_vsro(vector unsigned int a, vector signed char b) +{ + return (vector unsigned int)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector unsigned int __ATTRS_o_ai +vec_vsro(vector unsigned int a, vector unsigned char b) +{ + return (vector unsigned int)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector float __ATTRS_o_ai +vec_vsro(vector float a, vector signed char b) +{ + return (vector float)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +static vector float __ATTRS_o_ai +vec_vsro(vector float a, vector unsigned char b) +{ + return (vector float)__builtin_altivec_vsro((vector int)a, (vector int)b); +} + +/* vec_st */ + +static void __ATTRS_o_ai +vec_st(vector signed char a, int b, vector signed char *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_st(vector signed char a, int b, signed char *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_st(vector unsigned char a, int b, vector unsigned char *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_st(vector unsigned char a, int b, unsigned char *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_st(vector short a, int b, vector short *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_st(vector short a, int b, short *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_st(vector unsigned short a, int b, vector unsigned short *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_st(vector unsigned short a, int b, unsigned short *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_st(vector int a, int b, vector int *c) +{ + __builtin_altivec_stvx(a, b, c); +} + +static void __ATTRS_o_ai +vec_st(vector int a, int b, int *c) +{ + __builtin_altivec_stvx(a, b, c); +} + +static void __ATTRS_o_ai +vec_st(vector unsigned int a, int b, vector unsigned int *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_st(vector unsigned int a, int b, unsigned int *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_st(vector float a, int b, vector float *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_st(vector float a, int b, float *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +/* vec_stvx */ + +static void __ATTRS_o_ai +vec_stvx(vector signed char a, int b, vector signed char *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvx(vector signed char a, int b, signed char *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvx(vector unsigned char a, int b, vector unsigned char *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvx(vector unsigned char a, int b, unsigned char *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvx(vector short a, int b, vector short *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvx(vector short a, int b, short *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvx(vector unsigned short a, int b, vector unsigned short *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvx(vector unsigned short a, int b, unsigned short *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvx(vector int a, int b, vector int *c) +{ + __builtin_altivec_stvx(a, b, c); +} + +static void __ATTRS_o_ai +vec_stvx(vector int a, int b, int *c) +{ + __builtin_altivec_stvx(a, b, c); +} + +static void __ATTRS_o_ai +vec_stvx(vector unsigned int a, int b, vector unsigned int *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvx(vector unsigned int a, int b, unsigned int *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvx(vector float a, int b, vector float *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvx(vector float a, int b, float *c) +{ + __builtin_altivec_stvx((vector int)a, b, c); +} + +/* vec_ste */ + +static void __ATTRS_o_ai +vec_ste(vector signed char a, int b, signed char *c) +{ + __builtin_altivec_stvebx((vector char)a, b, c); +} + +static void __ATTRS_o_ai +vec_ste(vector unsigned char a, int b, unsigned char *c) +{ + __builtin_altivec_stvebx((vector char)a, b, c); +} + +static void __ATTRS_o_ai +vec_ste(vector short a, int b, short *c) +{ + __builtin_altivec_stvehx(a, b, c); +} + +static void __ATTRS_o_ai +vec_ste(vector unsigned short a, int b, unsigned short *c) +{ + __builtin_altivec_stvehx((vector short)a, b, c); +} + +static void __ATTRS_o_ai +vec_ste(vector int a, int b, int *c) +{ + __builtin_altivec_stvewx(a, b, c); +} + +static void __ATTRS_o_ai +vec_ste(vector unsigned int a, int b, unsigned int *c) +{ + __builtin_altivec_stvewx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_ste(vector float a, int b, float *c) +{ + __builtin_altivec_stvewx((vector int)a, b, c); +} + +/* vec_stvebx */ + +static void __ATTRS_o_ai +vec_stvebx(vector signed char a, int b, signed char *c) +{ + __builtin_altivec_stvebx((vector char)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvebx(vector unsigned char a, int b, unsigned char *c) +{ + __builtin_altivec_stvebx((vector char)a, b, c); +} + +/* vec_stvehx */ + +static void __ATTRS_o_ai +vec_stvehx(vector short a, int b, short *c) +{ + __builtin_altivec_stvehx(a, b, c); +} + +static void __ATTRS_o_ai +vec_stvehx(vector unsigned short a, int b, unsigned short *c) +{ + __builtin_altivec_stvehx((vector short)a, b, c); +} + +/* vec_stvewx */ + +static void __ATTRS_o_ai +vec_stvewx(vector int a, int b, int *c) +{ + __builtin_altivec_stvewx(a, b, c); +} + +static void __ATTRS_o_ai +vec_stvewx(vector unsigned int a, int b, unsigned int *c) +{ + __builtin_altivec_stvewx((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvewx(vector float a, int b, float *c) +{ + __builtin_altivec_stvewx((vector int)a, b, c); +} + +/* vec_stl */ + +static void __ATTRS_o_ai +vec_stl(vector signed char a, int b, vector signed char *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stl(vector signed char a, int b, signed char *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stl(vector unsigned char a, int b, vector unsigned char *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stl(vector unsigned char a, int b, unsigned char *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stl(vector short a, int b, vector short *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stl(vector short a, int b, short *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stl(vector unsigned short a, int b, vector unsigned short *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stl(vector unsigned short a, int b, unsigned short *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stl(vector int a, int b, vector int *c) +{ + __builtin_altivec_stvxl(a, b, c); +} + +static void __ATTRS_o_ai +vec_stl(vector int a, int b, int *c) +{ + __builtin_altivec_stvxl(a, b, c); +} + +static void __ATTRS_o_ai +vec_stl(vector unsigned int a, int b, vector unsigned int *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stl(vector unsigned int a, int b, unsigned int *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stl(vector float a, int b, vector float *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stl(vector float a, int b, float *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +/* vec_stvxl */ + +static void __ATTRS_o_ai +vec_stvxl(vector signed char a, int b, vector signed char *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvxl(vector signed char a, int b, signed char *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvxl(vector unsigned char a, int b, vector unsigned char *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvxl(vector unsigned char a, int b, unsigned char *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvxl(vector short a, int b, vector short *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvxl(vector short a, int b, short *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvxl(vector unsigned short a, int b, vector unsigned short *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvxl(vector unsigned short a, int b, unsigned short *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvxl(vector int a, int b, vector int *c) +{ + __builtin_altivec_stvxl(a, b, c); +} + +static void __ATTRS_o_ai +vec_stvxl(vector int a, int b, int *c) +{ + __builtin_altivec_stvxl(a, b, c); +} + +static void __ATTRS_o_ai +vec_stvxl(vector unsigned int a, int b, vector unsigned int *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvxl(vector unsigned int a, int b, unsigned int *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvxl(vector float a, int b, vector float *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +static void __ATTRS_o_ai +vec_stvxl(vector float a, int b, float *c) +{ + __builtin_altivec_stvxl((vector int)a, b, c); +} + +/* vec_sub */ + +static vector signed char __ATTRS_o_ai +vec_sub(vector signed char a, vector signed char b) +{ + return a - b; +} + +static vector unsigned char __ATTRS_o_ai +vec_sub(vector unsigned char a, vector unsigned char b) +{ + return a - b; +} + +static vector short __ATTRS_o_ai +vec_sub(vector short a, vector short b) +{ + return a - b; +} + +static vector unsigned short __ATTRS_o_ai +vec_sub(vector unsigned short a, vector unsigned short b) +{ + return a - b; +} + +static vector int __ATTRS_o_ai +vec_sub(vector int a, vector int b) +{ + return a - b; +} + +static vector unsigned int __ATTRS_o_ai +vec_sub(vector unsigned int a, vector unsigned int b) +{ + return a - b; +} + +static vector float __ATTRS_o_ai +vec_sub(vector float a, vector float b) +{ + return a - b; +} + +/* vec_vsububm */ + +#define __builtin_altivec_vsububm vec_vsububm + +static vector signed char __ATTRS_o_ai +vec_vsububm(vector signed char a, vector signed char b) +{ + return a - b; +} + +static vector unsigned char __ATTRS_o_ai +vec_vsububm(vector unsigned char a, vector unsigned char b) +{ + return a - b; +} + +/* vec_vsubuhm */ + +#define __builtin_altivec_vsubuhm vec_vsubuhm + +static vector short __ATTRS_o_ai +vec_vsubuhm(vector short a, vector short b) +{ + return a - b; +} + +static vector unsigned short __ATTRS_o_ai +vec_vsubuhm(vector unsigned short a, vector unsigned short b) +{ + return a - b; +} + +/* vec_vsubuwm */ + +#define __builtin_altivec_vsubuwm vec_vsubuwm + +static vector int __ATTRS_o_ai +vec_vsubuwm(vector int a, vector int b) +{ + return a - b; +} + +static vector unsigned int __ATTRS_o_ai +vec_vsubuwm(vector unsigned int a, vector unsigned int b) +{ + return a - b; +} + +/* vec_vsubfp */ + +#define __builtin_altivec_vsubfp vec_vsubfp + +static vector float __attribute__((__always_inline__)) +vec_vsubfp(vector float a, vector float b) +{ + return a - b; +} + +/* vec_subc */ + +static vector unsigned int __attribute__((__always_inline__)) +vec_subc(vector unsigned int a, vector unsigned int b) +{ + return __builtin_altivec_vsubcuw(a, b); +} + +/* vec_vsubcuw */ + +static vector unsigned int __attribute__((__always_inline__)) +vec_vsubcuw(vector unsigned int a, vector unsigned int b) +{ + return __builtin_altivec_vsubcuw(a, b); +} + +/* vec_subs */ + +static vector signed char __ATTRS_o_ai +vec_subs(vector signed char a, vector signed char b) +{ + return __builtin_altivec_vsubsbs(a, b); +} + +static vector unsigned char __ATTRS_o_ai +vec_subs(vector unsigned char a, vector unsigned char b) +{ + return __builtin_altivec_vsububs(a, b); +} + +static vector short __ATTRS_o_ai +vec_subs(vector short a, vector short b) +{ + return __builtin_altivec_vsubshs(a, b); +} + +static vector unsigned short __ATTRS_o_ai +vec_subs(vector unsigned short a, vector unsigned short b) +{ + return __builtin_altivec_vsubuhs(a, b); +} + +static vector int __ATTRS_o_ai +vec_subs(vector int a, vector int b) +{ + return __builtin_altivec_vsubsws(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_subs(vector unsigned int a, vector unsigned int b) +{ + return __builtin_altivec_vsubuws(a, b); +} + +/* vec_vsubsbs */ + +static vector signed char __attribute__((__always_inline__)) +vec_vsubsbs(vector signed char a, vector signed char b) +{ + return __builtin_altivec_vsubsbs(a, b); +} + +/* vec_vsububs */ + +static vector unsigned char __attribute__((__always_inline__)) +vec_vsububs(vector unsigned char a, vector unsigned char b) +{ + return __builtin_altivec_vsububs(a, b); +} + +/* vec_vsubshs */ + +static vector short __attribute__((__always_inline__)) +vec_vsubshs(vector short a, vector short b) +{ + return __builtin_altivec_vsubshs(a, b); +} + +/* vec_vsubuhs */ + +static vector unsigned short __attribute__((__always_inline__)) +vec_vsubuhs(vector unsigned short a, vector unsigned short b) +{ + return __builtin_altivec_vsubuhs(a, b); +} + +/* vec_vsubsws */ + +static vector int __attribute__((__always_inline__)) +vec_vsubsws(vector int a, vector int b) +{ + return __builtin_altivec_vsubsws(a, b); +} + +/* vec_vsubuws */ + +static vector unsigned int __attribute__((__always_inline__)) +vec_vsubuws(vector unsigned int a, vector unsigned int b) +{ + return __builtin_altivec_vsubuws(a, b); +} + +/* vec_sum4s */ + +static vector int __ATTRS_o_ai +vec_sum4s(vector signed char a, vector int b) +{ + return __builtin_altivec_vsum4sbs(a, b); +} + +static vector unsigned int __ATTRS_o_ai +vec_sum4s(vector unsigned char a, vector unsigned int b) +{ + return __builtin_altivec_vsum4ubs(a, b); +} + +static vector int __ATTRS_o_ai +vec_sum4s(vector signed short a, vector int b) +{ + return __builtin_altivec_vsum4shs(a, b); +} + +/* vec_vsum4sbs */ + +static vector int __attribute__((__always_inline__)) +vec_vsum4sbs(vector signed char a, vector int b) +{ + return __builtin_altivec_vsum4sbs(a, b); +} + +/* vec_vsum4ubs */ + +static vector unsigned int __attribute__((__always_inline__)) +vec_vsum4ubs(vector unsigned char a, vector unsigned int b) +{ + return __builtin_altivec_vsum4ubs(a, b); +} + +/* vec_vsum4shs */ + +static vector int __attribute__((__always_inline__)) +vec_vsum4shs(vector signed short a, vector int b) +{ + return __builtin_altivec_vsum4shs(a, b); +} + +/* vec_sum2s */ + +static vector signed int __attribute__((__always_inline__)) +vec_sum2s(vector int a, vector int b) +{ + return __builtin_altivec_vsum2sws(a, b); +} + +/* vec_vsum2sws */ + +static vector signed int __attribute__((__always_inline__)) +vec_vsum2sws(vector int a, vector int b) +{ + return __builtin_altivec_vsum2sws(a, b); +} + +/* vec_sums */ + +static vector signed int __attribute__((__always_inline__)) +vec_sums(vector signed int a, vector signed int b) +{ + return __builtin_altivec_vsumsws(a, b); +} + +/* vec_vsumsws */ + +static vector signed int __attribute__((__always_inline__)) +vec_vsumsws(vector signed int a, vector signed int b) +{ + return __builtin_altivec_vsumsws(a, b); +} + +/* vec_trunc */ + +static vector float __attribute__((__always_inline__)) +vec_trunc(vector float a) +{ + return __builtin_altivec_vrfiz(a); +} + +/* vec_vrfiz */ + +static vector float __attribute__((__always_inline__)) +vec_vrfiz(vector float a) +{ + return __builtin_altivec_vrfiz(a); +} + +/* vec_unpackh */ + +static vector short __ATTRS_o_ai +vec_unpackh(vector signed char a) +{ + return __builtin_altivec_vupkhsb((vector char)a); +} + +static vector int __ATTRS_o_ai +vec_unpackh(vector short a) +{ + return __builtin_altivec_vupkhsh(a); +} + +/* vec_vupkhsb */ + +static vector short __attribute__((__always_inline__)) +vec_vupkhsb(vector signed char a) +{ + return __builtin_altivec_vupkhsb((vector char)a); +} + +/* vec_vupkhsh */ + +static vector int __attribute__((__always_inline__)) +vec_vupkhsh(vector short a) +{ + return __builtin_altivec_vupkhsh(a); +} + +/* vec_unpackl */ + +static vector short __ATTRS_o_ai +vec_unpackl(vector signed char a) +{ + return __builtin_altivec_vupklsb((vector char)a); +} + +static vector int __ATTRS_o_ai +vec_unpackl(vector short a) +{ + return __builtin_altivec_vupklsh(a); +} + +/* vec_vupklsb */ + +static vector short __attribute__((__always_inline__)) +vec_vupklsb(vector signed char a) +{ + return __builtin_altivec_vupklsb((vector char)a); +} + +/* vec_vupklsh */ + +static vector int __attribute__((__always_inline__)) +vec_vupklsh(vector short a) +{ + return __builtin_altivec_vupklsh(a); +} + +/* vec_xor */ + +#define __builtin_altivec_vxor vec_xor + +static vector signed char __ATTRS_o_ai +vec_xor(vector signed char a, vector signed char b) +{ + return a ^ b; +} + +static vector unsigned char __ATTRS_o_ai +vec_xor(vector unsigned char a, vector unsigned char b) +{ + return a ^ b; +} + +static vector short __ATTRS_o_ai +vec_xor(vector short a, vector short b) +{ + return a ^ b; +} + +static vector unsigned short __ATTRS_o_ai +vec_xor(vector unsigned short a, vector unsigned short b) +{ + return a ^ b; +} + +static vector int __ATTRS_o_ai +vec_xor(vector int a, vector int b) +{ + return a ^ b; +} + +static vector unsigned int __ATTRS_o_ai +vec_xor(vector unsigned int a, vector unsigned int b) +{ + return a ^ b; +} + +static vector float __ATTRS_o_ai +vec_xor(vector float a, vector float b) +{ + vector unsigned int res = (vector unsigned int)a ^ (vector unsigned int)b; + return (vector float)res; +} + +/* vec_vxor */ + +static vector signed char __ATTRS_o_ai +vec_vxor(vector signed char a, vector signed char b) +{ + return a ^ b; +} + +static vector unsigned char __ATTRS_o_ai +vec_vxor(vector unsigned char a, vector unsigned char b) +{ + return a ^ b; +} + +static vector short __ATTRS_o_ai +vec_vxor(vector short a, vector short b) +{ + return a ^ b; +} + +static vector unsigned short __ATTRS_o_ai +vec_vxor(vector unsigned short a, vector unsigned short b) +{ + return a ^ b; +} + +static vector int __ATTRS_o_ai +vec_vxor(vector int a, vector int b) +{ + return a ^ b; +} + +static vector unsigned int __ATTRS_o_ai +vec_vxor(vector unsigned int a, vector unsigned int b) +{ + return a ^ b; +} + +static vector float __ATTRS_o_ai +vec_vxor(vector float a, vector float b) +{ + vector unsigned int res = (vector unsigned int)a ^ (vector unsigned int)b; + return (vector float)res; +} + +/* ------------------------------ predicates ------------------------------------ */ + /* vec_all_eq */ -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_eq(vector signed char a, vector signed char b) { return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)a, (vector char)b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_eq(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)a, (vector char)b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_eq(vector short a, vector short b) { return __builtin_altivec_vcmpequh_p(__CR6_LT, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_eq(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_eq(vector int a, vector int b) { return __builtin_altivec_vcmpequw_p(__CR6_LT, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_eq(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)a, (vector int)b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_eq(vector float a, vector float b) { return __builtin_altivec_vcmpeqfp_p(__CR6_LT, a, b); @@ -884,87 +5325,87 @@ vec_all_eq(vector float a, vector float b) /* vec_all_ge */ -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_ge(vector signed char a, vector signed char b) { - return __builtin_altivec_vcmpgtsb_p(__CR6_LT, b, a); + return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_ge(vector unsigned char a, vector unsigned char b) { - return __builtin_altivec_vcmpgtub_p(__CR6_LT, b, a); + return __builtin_altivec_vcmpgtub_p(__CR6_EQ, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_ge(vector short a, vector short b) { - return __builtin_altivec_vcmpgtsh_p(__CR6_LT, b, a); + return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_ge(vector unsigned short a, vector unsigned short b) { - return __builtin_altivec_vcmpgtuh_p(__CR6_LT, b, a); + return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_ge(vector int a, vector int b) { - return __builtin_altivec_vcmpgtsw_p(__CR6_LT, b, a); + return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_ge(vector unsigned int a, vector unsigned int b) { - return __builtin_altivec_vcmpgtuw_p(__CR6_LT, b, a); + return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_ge(vector float a, vector float b) { - return __builtin_altivec_vcmpgtfp_p(__CR6_LT, b, a); + return __builtin_altivec_vcmpgefp_p(__CR6_LT, a, b); } /* vec_all_gt */ -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_gt(vector signed char a, vector signed char b) { return __builtin_altivec_vcmpgtsb_p(__CR6_LT, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_gt(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vcmpgtub_p(__CR6_LT, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_gt(vector short a, vector short b) { return __builtin_altivec_vcmpgtsh_p(__CR6_LT, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_gt(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vcmpgtuh_p(__CR6_LT, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_gt(vector int a, vector int b) { return __builtin_altivec_vcmpgtsw_p(__CR6_LT, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_gt(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vcmpgtuw_p(__CR6_LT, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_gt(vector float a, vector float b) { return __builtin_altivec_vcmpgtfp_p(__CR6_LT, a, b); @@ -980,87 +5421,87 @@ vec_all_in(vector float a, vector float b) /* vec_all_le */ -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_le(vector signed char a, vector signed char b) { return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_le(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vcmpgtub_p(__CR6_EQ, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_le(vector short a, vector short b) { return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_le(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_le(vector int a, vector int b) { return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_le(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_le(vector float a, vector float b) { - return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, a, b); + return __builtin_altivec_vcmpgefp_p(__CR6_LT, b, a); } /* vec_all_lt */ -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_lt(vector signed char a, vector signed char b) { return __builtin_altivec_vcmpgtsb_p(__CR6_LT, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_lt(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vcmpgtub_p(__CR6_LT, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_lt(vector short a, vector short b) { return __builtin_altivec_vcmpgtsh_p(__CR6_LT, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_lt(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vcmpgtuh_p(__CR6_LT, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_lt(vector int a, vector int b) { return __builtin_altivec_vcmpgtsw_p(__CR6_LT, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_lt(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vcmpgtuw_p(__CR6_LT, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_lt(vector float a, vector float b) { return __builtin_altivec_vcmpgtfp_p(__CR6_LT, b, a); @@ -1076,43 +5517,43 @@ vec_all_nan(vector float a) /* vec_all_ne */ -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_ne(vector signed char a, vector signed char b) { return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)a, (vector char)b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_ne(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)a, (vector char)b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_ne(vector short a, vector short b) { return __builtin_altivec_vcmpequh_p(__CR6_EQ, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_ne(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_ne(vector int a, vector int b) { return __builtin_altivec_vcmpequw_p(__CR6_EQ, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_ne(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)a, (vector int)b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_all_ne(vector float a, vector float b) { return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, a, b); @@ -1160,43 +5601,43 @@ vec_all_numeric(vector float a) /* vec_any_eq */ -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_eq(vector signed char a, vector signed char b) { return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_eq(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_eq(vector short a, vector short b) { return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_eq(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)a, (vector short)b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_eq(vector int a, vector int b) { return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_eq(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_eq(vector float a, vector float b) { return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, a, b); @@ -1204,87 +5645,87 @@ vec_any_eq(vector float a, vector float b) /* vec_any_ge */ -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_ge(vector signed char a, vector signed char b) { return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_ge(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_ge(vector short a, vector short b) { return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_ge(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_ge(vector int a, vector int b) { return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_ge(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_ge(vector float a, vector float b) { - return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, b, a); + return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, a, b); } /* vec_any_gt */ -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_gt(vector signed char a, vector signed char b) { return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_gt(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_gt(vector short a, vector short b) { return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_gt(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_gt(vector int a, vector int b) { return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_gt(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_gt(vector float a, vector float b) { return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, a, b); @@ -1292,87 +5733,87 @@ vec_any_gt(vector float a, vector float b) /* vec_any_le */ -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_le(vector signed char a, vector signed char b) { return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_le(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_le(vector short a, vector short b) { return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_le(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_le(vector int a, vector int b) { return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_le(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_le(vector float a, vector float b) { - return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, a, b); + return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, b, a); } /* vec_any_lt */ -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_lt(vector signed char a, vector signed char b) { return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_lt(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_lt(vector short a, vector short b) { return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_lt(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_lt(vector int a, vector int b) { return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_lt(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, b, a); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_lt(vector float a, vector float b) { return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, b, a); @@ -1388,43 +5829,43 @@ vec_any_nan(vector float a) /* vec_any_ne */ -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_ne(vector signed char a, vector signed char b) { return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_ne(vector unsigned char a, vector unsigned char b) { return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_ne(vector short a, vector short b) { return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_ne(vector unsigned short a, vector unsigned short b) { return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)a, (vector short)b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_ne(vector int a, vector int b) { return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, a, b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_ne(vector unsigned int a, vector unsigned int b) { return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b); } -static int _ATTRS_o_ai +static int __ATTRS_o_ai vec_any_ne(vector float a, vector float b) { return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, a, b); @@ -1478,6 +5919,6 @@ vec_any_out(vector float a, vector float b) return __builtin_altivec_vcmpbfp_p(__CR6_EQ_REV, a, b); } -#undef _ATTRS_o_ai +#undef __ATTRS_o_ai #endif /* __ALTIVEC_H */ diff --git a/contrib/llvm/tools/clang/lib/Headers/arm_neon.td b/contrib/llvm/tools/clang/lib/Headers/arm_neon.td new file mode 100644 index 0000000..7ffbfb4 --- /dev/null +++ b/contrib/llvm/tools/clang/lib/Headers/arm_neon.td @@ -0,0 +1,341 @@ +//===--- arm_neon.td - ARM NEON compiler interface ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the TableGen definitions from which the ARM NEON header +// file will be generated. See ARM document DUI0348B. +// +//===----------------------------------------------------------------------===// + +class Op; + +def OP_NONE : Op; +def OP_ADD : Op; +def OP_SUB : Op; +def OP_MUL : Op; +def OP_MLA : Op; +def OP_MLS : Op; +def OP_MUL_N : Op; +def OP_MLA_N : Op; +def OP_MLS_N : Op; +def OP_EQ : Op; +def OP_GE : Op; +def OP_LE : Op; +def OP_GT : Op; +def OP_LT : Op; +def OP_NEG : Op; +def OP_NOT : Op; +def OP_AND : Op; +def OP_OR : Op; +def OP_XOR : Op; +def OP_ANDN : Op; +def OP_ORN : Op; +def OP_CAST : Op; +def OP_HI : Op; +def OP_LO : Op; +def OP_CONC : Op; +def OP_DUP : Op; +def OP_SEL : Op; +def OP_REV64 : Op; +def OP_REV32 : Op; +def OP_REV16 : Op; + +class Inst <string p, string t, Op o> { + string Prototype = p; + string Types = t; + Op Operand = o; + bit isShift = 0; +} + +// Used to generate Builtins.def +class SInst<string p, string t> : Inst<p, t, OP_NONE> {} +class IInst<string p, string t> : Inst<p, t, OP_NONE> {} +class WInst<string p, string t> : Inst<p, t, OP_NONE> {} + +// prototype: return (arg, arg, ...) +// v: void +// t: best-fit integer (int/poly args) +// x: signed integer (int/float args) +// u: unsigned integer (int/float args) +// f: float (int args) +// d: default +// w: double width elements, same num elts +// n: double width elements, half num elts +// h: half width elements, double num elts +// e: half width elements, double num elts, unsigned +// i: constant int +// l: constant uint64 +// s: scalar of element type +// a: scalar of element type (splat to vector type) +// k: default elt width, double num elts +// #: array of default vectors +// p: pointer type +// c: const pointer type + +// sizes: +// c: char +// s: short +// i: int +// l: long +// f: float +// h: half-float + +// size modifiers: +// U: unsigned +// Q: 128b +// P: polynomial + +//////////////////////////////////////////////////////////////////////////////// +// E.3.1 Addition +def VADD : Inst<"ddd", "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_ADD>; +def VADDL : SInst<"wdd", "csiUcUsUi">; +def VADDW : SInst<"wwd", "csiUcUsUi">; +def VHADD : SInst<"ddd", "csiUcUsUiQcQsQiQUcQUsQUi">; +def VRHADD : SInst<"ddd", "csiUcUsUiQcQsQiQUcQUsQUi">; +def VQADD : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; +def VADDHN : IInst<"dww", "csiUcUsUi">; +def VRADDHN : IInst<"dww", "csiUcUsUi">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.2 Multiplication +def VMUL : Inst<"ddd", "csifUcUsUiPcQcQsQiQfQUcQUsQUiQPc", OP_MUL>; +def VMLA : Inst<"dddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLA>; +def VMLAL : SInst<"wwdd", "csiUcUsUi">; +def VMLS : Inst<"dddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLS>; +def VMLSL : SInst<"wwdd", "csiUcUsUi">; +def VQDMULH : SInst<"ddd", "siQsQi">; +def VQRDMULH : SInst<"ddd", "siQsQi">; +def VQDMLAL : SInst<"wwdd", "si">; +def VQDMLSL : SInst<"wwdd", "si">; +def VMULL : SInst<"wdd", "csiUcUsUiPc">; +def VQDMULL : SInst<"wdd", "si">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.3 Subtraction +def VSUB : Inst<"ddd", "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_SUB>; +def VSUBL : SInst<"wdd", "csiUcUsUi">; +def VSUBW : SInst<"wwd", "csiUcUsUi">; +def VQSUB : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; +def VHSUB : SInst<"ddd", "csiUcUsUiQcQsQiQUcQUsQUi">; +def VSUBHN : IInst<"dww", "csiUcUsUi">; +def VRSUBHN : IInst<"dww", "csiUcUsUi">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.4 Comparison +def VCEQ : Inst<"udd", "csifUcUsUiPcQcQsQiQfQUcQUsQUiQPc", OP_EQ>; +def VCGE : Inst<"udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GE>; +def VCLE : Inst<"udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LE>; +def VCGT : Inst<"udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GT>; +def VCLT : Inst<"udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LT>; +def VCAGE : IInst<"udd", "fQf">; +def VCALE : IInst<"udd", "fQf">; +def VCAGT : IInst<"udd", "fQf">; +def VCALT : IInst<"udd", "fQf">; +def VTST : WInst<"udd", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.5 Absolute Difference +def VABD : SInst<"ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">; +def VABDL : SInst<"wdd", "csiUcUsUi">; +def VABA : SInst<"dddd", "csiUcUsUiQcQsQiQUcQUsQUi">; +def VABAL : SInst<"wwdd", "csiUcUsUi">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.6 Max/Min +def VMAX : SInst<"ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">; +def VMIN : SInst<"ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.7 Pairdise Addition +def VPADD : IInst<"ddd", "csiUcUsUif">; +def VPADDL : SInst<"nd", "csiUcUsUiQcQsQiQUcQUsQUi">; +def VPADAL : SInst<"nnd", "csiUcUsUiQcQsQiQUcQUsQUi">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.8-9 Folding Max/Min +def VPMAX : SInst<"ddd", "csiUcUsUif">; +def VPMIN : SInst<"ddd", "csiUcUsUif">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.10 Reciprocal/Sqrt +def VRECPS : IInst<"ddd", "fQf">; +def VRSQRTS : IInst<"ddd", "fQf">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.11 Shifts by signed variable +def VSHL : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; +def VQSHL : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; +def VRSHL : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; +def VQRSHL : SInst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.12 Shifts by constant +let isShift = 1 in { +def VSHR_N : SInst<"ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; +def VSHL_N : IInst<"ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; +def VRSHR_N : SInst<"ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; +def VSRA_N : SInst<"dddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; +def VRSRA_N : SInst<"dddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; +def VQSHL_N : SInst<"ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; +def VQSHLU_N : SInst<"udi", "csilQcQsQiQl">; +def VSHRN_N : IInst<"hki", "silUsUiUl">; +def VQSHRUN_N : SInst<"eki", "sil">; +def VQRSHRUN_N : SInst<"eki", "sil">; +def VQSHRN_N : SInst<"hki", "silUsUiUl">; +def VRSHRN_N : IInst<"hki", "silUsUiUl">; +def VQRSHRN_N : SInst<"hki", "silUsUiUl">; +def VSHLL_N : SInst<"wdi", "csiUcUsUi">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.13 Shifts with insert +def VSRI_N : WInst<"dddi", "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">; +def VSLI_N : WInst<"dddi", "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">; +} + +//////////////////////////////////////////////////////////////////////////////// +// E.3.14 Loads and stores of a single vector +def VLD1 : WInst<"dc", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">; +def VLD1_LANE : WInst<"dci", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">; +def VLD1_DUP : WInst<"dc", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">; +def VST1 : WInst<"vpd", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">; +def VST1_LANE : WInst<"vpdi", "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.15 Loads and stores of an N-element structure +def VLD2 : WInst<"2c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">; +def VLD3 : WInst<"3c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">; +def VLD4 : WInst<"4c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">; +def VLD2_DUP : WInst<"2c", "UcUsUiUlcsilhfPcPs">; +def VLD3_DUP : WInst<"3c", "UcUsUiUlcsilhfPcPs">; +def VLD4_DUP : WInst<"4c", "UcUsUiUlcsilhfPcPs">; +def VLD2_LANE : WInst<"2ci", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">; +def VLD3_LANE : WInst<"3ci", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">; +def VLD4_LANE : WInst<"4ci", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">; +def VST2 : WInst<"vp2", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">; +def VST3 : WInst<"vp3", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">; +def VST4 : WInst<"vp4", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">; +def VST2_LANE : WInst<"vp2i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">; +def VST3_LANE : WInst<"vp3i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">; +def VST4_LANE : WInst<"vp4i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.16 Extract lanes from a vector +def VGET_LANE : IInst<"sdi", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.17 Set lanes within a vector +def VSET_LANE : IInst<"dsdi", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.18 Initialize a vector from bit pattern +def VCREATE: Inst<"dl", "csihfUcUsUiUlPcPsl", OP_CAST>; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.19 Set all lanes to same value +def VDUP_N : Inst<"ds", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl", OP_DUP>; +def VMOV_N : Inst<"ds", "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl", OP_DUP>; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.20 Combining vectors +def VCOMBINE : Inst<"kdd", "csilhfUcUsUiUlPcPs", OP_CONC>; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.21 Splitting vectors +def VGET_HIGH : Inst<"dk", "csilhfUcUsUiUlPcPs", OP_HI>; +def VGET_LOW : Inst<"dk", "csilhfUcUsUiUlPcPs", OP_LO>; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.22 Converting vectors +def VCVT_S32 : SInst<"xd", "fQf">; +def VCVT_U32 : SInst<"ud", "fQf">; +def VCVT_F16 : SInst<"hk", "f">; +def VCVT_N_S32 : SInst<"xdi", "fQf">; +def VCVT_N_U32 : SInst<"udi", "fQf">; +def VCVT_F32 : SInst<"fd", "iUiQiQUi">; +def VCVT_F32_F16 : SInst<"kh", "f">; +def VCVT_N_F32 : SInst<"fdi", "iUiQiQUi">; +def VMOVN : IInst<"hk", "silUsUiUl">; +def VMOVL : SInst<"wd", "csiUcUsUi">; +def VQMOVN : SInst<"hk", "silUsUiUl">; +def VQMOVUN : SInst<"ek", "sil">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.23-24 Table lookup, Extended table lookup +def VTBL1 : WInst<"ddt", "UccPc">; +def VTBL2 : WInst<"d2t", "UccPc">; +def VTBL3 : WInst<"d3t", "UccPc">; +def VTBL4 : WInst<"d4t", "UccPc">; +def VTBX1 : WInst<"dddt", "UccPc">; +def VTBX2 : WInst<"dd2t", "UccPc">; +def VTBX3 : WInst<"dd3t", "UccPc">; +def VTBX4 : WInst<"dd4t", "UccPc">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.25 Operations with a scalar value +def VMLA_LANE : IInst<"ddddi", "siUsUifQsQiQUsQUiQf">; +def VMLAL_LANE : SInst<"wwddi", "siUsUi">; +def VQDMLAL_LANE : SInst<"wwddi", "si">; +def VMLS_LANE : IInst<"ddddi", "siUsUifQsQiQUsQUiQf">; +def VMLSL_LANE : SInst<"wwddi", "siUsUi">; +def VQDMLSL_LANE : SInst<"wwddi", "si">; +def VMUL_N : Inst<"dds", "sifUsUiQsQiQfQUsQUi", OP_MUL_N>; +def VMULL_N : SInst<"wda", "siUsUi">; +def VMULL_LANE : SInst<"wddi", "siUsUi">; +def VQDMULL_N : SInst<"wda", "si">; +def VQDMULL_LANE : SInst<"wddi", "si">; +def VQDMULH_N : SInst<"dda", "siQsQi">; +def VQDMULH_LANE : SInst<"dddi", "siQsQi">; +def VQRDMULH_N : SInst<"dda", "siQsQi">; +def VQRDMULH_LANE : SInst<"dddi", "siQsQi">; +def VMLA_N : Inst<"ddda", "siUsUifQsQiQUsQUiQf", OP_MLA_N>; +def VMLAL_N : SInst<"wwda", "siUsUi">; +def VQDMLAL_N : SInst<"wwda", "si">; +def VMLS_N : Inst<"ddds", "siUsUifQsQiQUsQUiQf", OP_MLS_N>; +def VMLSL_N : SInst<"wwda", "siUsUi">; +def VQDMLSL_N : SInst<"wwda", "si">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.26 Vector Extract +def VEXT : WInst<"dddi", "cUcPcsUsPsiUilUlQcQUcQPcQsQUsQPsQiQUiQlQUl">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.27 Reverse vector elements (sdap endianness) +def VREV64 : Inst<"dd", "csiUcUsUiPcPsfQcQsQiQUcQUsQUiQPcQPsQf", OP_REV64>; +def VREV32 : Inst<"dd", "csUcUsPcQcQsQUcQUsQPc", OP_REV32>; +def VREV16 : Inst<"dd", "cUcPcQcQUcQPc", OP_REV16>; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.28 Other single operand arithmetic +def VABS : SInst<"dd", "csifQcQsQiQf">; +def VQABS : SInst<"dd", "csiQcQsQi">; +def VNEG : Inst<"dd", "csifQcQsQiQf", OP_NEG>; +def VQNEG : SInst<"dd", "csiQcQsQi">; +def VCLS : SInst<"dd", "csiQcQsQi">; +def VCLZ : IInst<"dd", "csiUcUsUiQcQsQiQUcQUsQUi">; +def VCNT : WInst<"dd", "UccPcQUcQcQPc">; +def VRECPE : SInst<"dd", "fUiQfQUi">; +def VRSQRTE : SInst<"dd", "fUiQfQUi">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.29 Logical operations +def VMVN : Inst<"dd", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc", OP_NOT>; +def VAND : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_AND>; +def VORR : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_OR>; +def VEOR : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_XOR>; +def VBIC : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ANDN>; +def VORN : Inst<"ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ORN>; +def VBSL : Inst<"dudd", "csilUcUsUiUlfPcPsQcQsQiQlQUcQUsQUiQUlQfQPcQPs", OP_SEL>; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.30 Transposition operations +def VTRN: WInst<"2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">; +def VZIP: WInst<"2dd", "csUcUsfPcPsQcQsQiQUcQUsQUiQfQPcQPs">; +def VUZP: WInst<"2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">; + +//////////////////////////////////////////////////////////////////////////////// +// E.3.31 Vector reinterpret cast operations diff --git a/contrib/llvm/tools/clang/lib/Headers/emmintrin.h b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h index 8afbe76..f297f36 100644 --- a/contrib/llvm/tools/clang/lib/Headers/emmintrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h @@ -1222,9 +1222,10 @@ _mm_movemask_epi8(__m128i a) 4, 5, 6, 7)) #define _mm_shufflehi_epi16(a, imm) \ ((__m128i)__builtin_shufflevector((__v8hi)(a), (__v8hi) {0}, 0, 1, 2, 3, \ - 4 + ((imm) & 0x3), 4 + ((imm) & 0xc) >> 2, \ - 4 + ((imm) & 0x30) >> 4, \ - 4 + ((imm) & 0xc0) >> 6)) + 4 + (((imm) & 0x03) >> 0), \ + 4 + (((imm) & 0x0c) >> 2), \ + 4 + (((imm) & 0x30) >> 4), \ + 4 + (((imm) & 0xc0) >> 6))) static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) _mm_unpackhi_epi8(__m128i a, __m128i b) diff --git a/contrib/llvm/tools/clang/lib/Headers/smmintrin.h b/contrib/llvm/tools/clang/lib/Headers/smmintrin.h index e271f99..4b0d9e7 100644 --- a/contrib/llvm/tools/clang/lib/Headers/smmintrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/smmintrin.h @@ -183,13 +183,13 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2) #define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N)) #define _mm_extract_ps(X, N) (__extension__ \ ({ union { int i; float f; } __t; \ - __v4sf __a = (__v4sf)X; \ + __v4sf __a = (__v4sf)(X); \ __t.f = __a[N]; \ __t.i;})) /* Miscellaneous insert and extract macros. */ /* Extract a single-precision float from X at index N into D. */ -#define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)X; \ +#define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \ (D) = __a[N]; })) /* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create @@ -201,25 +201,25 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2) _MM_MK_INSERTPS_NDX((N), 0, 0x0e)) /* Insert int into packed integer array at index. */ -#define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)X; \ +#define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \ __a[N] = I; \ __a;})) -#define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)X; \ +#define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)(X); \ __a[N] = I; \ __a;})) #ifdef __x86_64__ -#define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)X; \ +#define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)(X); \ __a[N] = I; \ __a;})) #endif /* __x86_64__ */ /* Extract int from packed integer array at index. */ -#define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)X; \ +#define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \ __a[N];})) -#define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)X; \ +#define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)(X); \ __a[N];})) #ifdef __x86_64__ -#define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)X; \ +#define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)(X); \ __a[N];})) #endif /* __x86_64 */ diff --git a/contrib/llvm/tools/clang/lib/Headers/stddef.h b/contrib/llvm/tools/clang/lib/Headers/stddef.h index 6868ad3..b1d0d52 100644 --- a/contrib/llvm/tools/clang/lib/Headers/stddef.h +++ b/contrib/llvm/tools/clang/lib/Headers/stddef.h @@ -45,6 +45,13 @@ typedef __typeof__(*L"") wchar_t; #define NULL ((void*)0) #endif +// Some C libraries expect to see a wint_t here. Others (notably MinGW) will use +// __WINT_TYPE__ directly; accomodate both by requiring __need_wint_t +#if defined(__need_wint_t) && !defined(_WINT_T) +#define _WINT_T +typedef __WINT_TYPE__ wint_t; +#endif + #define offsetof(t, d) __builtin_offsetof(t, d) #endif /* __STDDEF_H */ diff --git a/contrib/llvm/tools/clang/lib/Headers/stdint.h b/contrib/llvm/tools/clang/lib/Headers/stdint.h index 1785f31..9498ed5 100644 --- a/contrib/llvm/tools/clang/lib/Headers/stdint.h +++ b/contrib/llvm/tools/clang/lib/Headers/stdint.h @@ -233,8 +233,8 @@ typedef __uintn_t(__INTPTR_WIDTH__) uintptr_t; /* C99 7.18.1.5 Greatest-width integer types. */ -typedef __intn_t(__INTMAX_WIDTH__) intmax_t; -typedef __uintn_t(__INTMAX_WIDTH__) uintmax_t; +typedef __INTMAX_TYPE__ intmax_t; +typedef __UINTMAX_TYPE__ uintmax_t; /* C99 7.18.4 Macros for minimum-width integer constants. * diff --git a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h index 3e82e28..75e06b5 100644 --- a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h @@ -607,10 +607,10 @@ _mm_storer_ps(float *p, __m128 a) #define _MM_HINT_T2 3 #define _MM_HINT_NTA 0 -/* FIXME: We have to #define this because "sel" must be a constant integer, and +/* FIXME: We have to #define this because "sel" must be a constant integer, and Sema doesn't do any form of constant propagation yet. */ -#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)a, 0, sel)) +#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, sel)) static __inline__ void __attribute__((__always_inline__, __nodebug__)) _mm_stream_pi(__m64 *p, __m64 a) @@ -723,7 +723,7 @@ _mm_setcsr(unsigned int i) } #define _mm_shuffle_ps(a, b, mask) \ - (__builtin_shufflevector((__v4sf)a, (__v4sf)b, \ + (__builtin_shufflevector((__v4sf)(a), (__v4sf)(b), \ (mask) & 0x3, ((mask) & 0xc) >> 2, \ (((mask) & 0x30) >> 4) + 4, \ (((mask) & 0xc0) >> 6) + 4)) diff --git a/contrib/llvm/tools/clang/lib/Index/CallGraph.cpp b/contrib/llvm/tools/clang/lib/Index/CallGraph.cpp index 6403319..dedcc0e 100644 --- a/contrib/llvm/tools/clang/lib/Index/CallGraph.cpp +++ b/contrib/llvm/tools/clang/lib/Index/CallGraph.cpp @@ -55,7 +55,7 @@ void CGBuilder::VisitCallExpr(CallExpr *CE) { } } -CallGraph::CallGraph() : Root(0) { +CallGraph::CallGraph(Program &P) : Prog(P), Root(0) { ExternalCallingNode = getOrInsertFunction(Entity()); } diff --git a/contrib/llvm/tools/clang/lib/Index/Entity.cpp b/contrib/llvm/tools/clang/lib/Index/Entity.cpp index cd9d277..7a24719 100644 --- a/contrib/llvm/tools/clang/lib/Index/Entity.cpp +++ b/contrib/llvm/tools/clang/lib/Index/Entity.cpp @@ -42,14 +42,48 @@ public: EntityGetter(Program &prog, ProgramImpl &progImpl) : Prog(prog), ProgImpl(progImpl) { } + // Get an Entity. + Entity getEntity(Entity Parent, DeclarationName Name, + unsigned IdNS, bool isObjCInstanceMethod); + + // Get an Entity associated with the name in the global namespace. + Entity getGlobalEntity(llvm::StringRef Name); + Entity VisitNamedDecl(NamedDecl *D); Entity VisitVarDecl(VarDecl *D); + Entity VisitFieldDecl(FieldDecl *D); Entity VisitFunctionDecl(FunctionDecl *D); + Entity VisitTypeDecl(TypeDecl *D); }; } } +Entity EntityGetter::getEntity(Entity Parent, DeclarationName Name, + unsigned IdNS, bool isObjCInstanceMethod) { + llvm::FoldingSetNodeID ID; + EntityImpl::Profile(ID, Parent, Name, IdNS, isObjCInstanceMethod); + + ProgramImpl::EntitySetTy &Entities = ProgImpl.getEntities(); + void *InsertPos = 0; + if (EntityImpl *Ent = Entities.FindNodeOrInsertPos(ID, InsertPos)) + return Entity(Ent); + + void *Buf = ProgImpl.Allocate(sizeof(EntityImpl)); + EntityImpl *New = + new (Buf) EntityImpl(Parent, Name, IdNS, isObjCInstanceMethod); + Entities.InsertNode(New, InsertPos); + + return Entity(New); +} + +Entity EntityGetter::getGlobalEntity(llvm::StringRef Name) { + IdentifierInfo *II = &ProgImpl.getIdents().get(Name); + DeclarationName GlobName(II); + unsigned IdNS = Decl::IDNS_Ordinary; + return getEntity(Entity(), GlobName, IdNS, false); +} + Entity EntityGetter::VisitNamedDecl(NamedDecl *D) { Entity Parent; if (!D->getDeclContext()->isTranslationUnit()) { @@ -91,24 +125,14 @@ Entity EntityGetter::VisitNamedDecl(NamedDecl *D) { ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D); bool isObjCInstanceMethod = MD && MD->isInstanceMethod(); - - llvm::FoldingSetNodeID ID; - EntityImpl::Profile(ID, Parent, GlobName, IdNS, isObjCInstanceMethod); - - ProgramImpl::EntitySetTy &Entities = ProgImpl.getEntities(); - void *InsertPos = 0; - if (EntityImpl *Ent = Entities.FindNodeOrInsertPos(ID, InsertPos)) - return Entity(Ent); - - void *Buf = ProgImpl.Allocate(sizeof(EntityImpl)); - EntityImpl *New = - new (Buf) EntityImpl(Parent, GlobName, IdNS, isObjCInstanceMethod); - Entities.InsertNode(New, InsertPos); - - return Entity(New); + return getEntity(Parent, GlobName, IdNS, isObjCInstanceMethod); } Entity EntityGetter::VisitVarDecl(VarDecl *D) { + // Local variables have no linkage, make invalid Entities. + if (D->hasLocalStorage()) + return Entity(); + // If it's static it cannot be referred to by another translation unit. if (D->getStorageClass() == VarDecl::Static) return Entity(D); @@ -124,6 +148,18 @@ Entity EntityGetter::VisitFunctionDecl(FunctionDecl *D) { return VisitNamedDecl(D); } +Entity EntityGetter::VisitFieldDecl(FieldDecl *D) { + // Make FieldDecl an invalid Entity since it has no linkage. + return Entity(); +} + +Entity EntityGetter::VisitTypeDecl(TypeDecl *D) { + // Although in C++ class name has external linkage, usually the definition of + // the class is available in the same translation unit when it's needed. So we + // make all of them invalid Entity. + return Entity(); +} + //===----------------------------------------------------------------------===// // EntityImpl Implementation //===----------------------------------------------------------------------===// @@ -172,6 +208,12 @@ Entity EntityImpl::get(Decl *D, Program &Prog, ProgramImpl &ProgImpl) { return EntityGetter(Prog, ProgImpl).Visit(D); } +/// \brief Get an Entity associated with a global name. +Entity EntityImpl::get(llvm::StringRef Name, Program &Prog, + ProgramImpl &ProgImpl) { + return EntityGetter(Prog, ProgImpl).getGlobalEntity(Name); +} + std::string EntityImpl::getPrintableName() { return Name.getAsString(); } @@ -217,6 +259,11 @@ Entity Entity::get(Decl *D, Program &Prog) { return EntityImpl::get(D, Prog, ProgImpl); } +Entity Entity::get(llvm::StringRef Name, Program &Prog) { + ProgramImpl &ProgImpl = *static_cast<ProgramImpl*>(Prog.Impl); + return EntityImpl::get(Name, Prog, ProgImpl); +} + unsigned llvm::DenseMapInfo<Entity>::getHashValue(Entity E) { return DenseMapInfo<void*>::getHashValue(E.getAsOpaquePtr()); diff --git a/contrib/llvm/tools/clang/lib/Index/EntityImpl.h b/contrib/llvm/tools/clang/lib/Index/EntityImpl.h index cbce934..da52ccf 100644 --- a/contrib/llvm/tools/clang/lib/Index/EntityImpl.h +++ b/contrib/llvm/tools/clang/lib/Index/EntityImpl.h @@ -47,6 +47,7 @@ public: /// \brief Get an Entity associated with the given Decl. /// \returns Null if an Entity cannot refer to this Decl. static Entity get(Decl *D, Program &Prog, ProgramImpl &ProgImpl); + static Entity get(llvm::StringRef Name, Program &Prog, ProgramImpl &ProgImpl); std::string getPrintableName(); diff --git a/contrib/llvm/tools/clang/lib/Index/Indexer.cpp b/contrib/llvm/tools/clang/lib/Index/Indexer.cpp index 57bfc5b..7f21c4f 100644 --- a/contrib/llvm/tools/clang/lib/Index/Indexer.cpp +++ b/contrib/llvm/tools/clang/lib/Index/Indexer.cpp @@ -25,14 +25,22 @@ namespace { class EntityIndexer : public EntityHandler { TranslationUnit *TU; Indexer::MapTy ⤅ + Indexer::DefMapTy &DefMap; public: - EntityIndexer(TranslationUnit *tu, Indexer::MapTy &map) : TU(tu), Map(map) { } + EntityIndexer(TranslationUnit *tu, Indexer::MapTy &map, + Indexer::DefMapTy &defmap) + : TU(tu), Map(map), DefMap(defmap) { } virtual void Handle(Entity Ent) { if (Ent.isInternalToTU()) return; Map[Ent].insert(TU); + + Decl *D = Ent.getDecl(TU->getASTContext()); + if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) + if (FD->isThisDeclarationADefinition()) + DefMap[Ent] = std::make_pair(FD, TU); } }; @@ -62,7 +70,7 @@ void Indexer::IndexAST(TranslationUnit *TU) { assert(TU && "Passed null TranslationUnit"); ASTContext &Ctx = TU->getASTContext(); CtxTUMap[&Ctx] = TU; - EntityIndexer Idx(TU, Map); + EntityIndexer Idx(TU, Map, DefMap); Prog.FindEntities(Ctx, Idx); SelectorIndexer SelIdx(Prog, TU, SelMap); @@ -102,3 +110,12 @@ void Indexer::GetTranslationUnitsFor(GlobalSelector Sel, for (TUSetTy::iterator I = Set.begin(), E = Set.end(); I != E; ++I) Handler.Handle(*I); } + +std::pair<FunctionDecl *, TranslationUnit *> +Indexer::getDefinitionFor(Entity Ent) { + DefMapTy::iterator I = DefMap.find(Ent); + if (I == DefMap.end()) + return std::make_pair((FunctionDecl *)0, (TranslationUnit *)0); + else + return I->second; +} diff --git a/contrib/llvm/tools/clang/lib/Index/Makefile b/contrib/llvm/tools/clang/lib/Index/Makefile index 4d86713..e87e638 100644 --- a/contrib/llvm/tools/clang/lib/Index/Makefile +++ b/contrib/llvm/tools/clang/lib/Index/Makefile @@ -11,17 +11,9 @@ # ##===----------------------------------------------------------------------===## -LEVEL = ../../../.. -include $(LEVEL)/Makefile.config - +CLANG_LEVEL := ../.. LIBRARYNAME := clangIndex BUILD_ARCHIVE = 1 -ifeq ($(ARCH),PowerPC) -CXX.Flags += -maltivec -endif - -CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include - -include $(LEVEL)/Makefile.common +include $(CLANG_LEVEL)/Makefile diff --git a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp index cd153e1..91b14f6 100644 --- a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp @@ -752,19 +752,21 @@ void Lexer::LexStringLiteral(Token &Result, const char *CurPtr, bool Wide) { char C = getAndAdvanceChar(CurPtr, Result); while (C != '"') { - // Skip escaped characters. - if (C == '\\') { - // Skip the escaped character. + // Skip escaped characters. Escaped newlines will already be processed by + // getAndAdvanceChar. + if (C == '\\') C = getAndAdvanceChar(CurPtr, Result); - } else if (C == '\n' || C == '\r' || // Newline. - (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. + + if (C == '\n' || C == '\r' || // Newline. + (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. if (!isLexingRawMode() && !Features.AsmPreprocessor) Diag(BufferPtr, diag::err_unterminated_string); FormTokenWithChars(Result, CurPtr-1, tok::unknown); return; - } else if (C == 0) { - NulCharacter = CurPtr-1; } + + if (C == 0) + NulCharacter = CurPtr-1; C = getAndAdvanceChar(CurPtr, Result); } @@ -818,41 +820,33 @@ void Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) { void Lexer::LexCharConstant(Token &Result, const char *CurPtr) { const char *NulCharacter = 0; // Does this character contain the \0 character? - // Handle the common case of 'x' and '\y' efficiently. char C = getAndAdvanceChar(CurPtr, Result); if (C == '\'') { if (!isLexingRawMode() && !Features.AsmPreprocessor) Diag(BufferPtr, diag::err_empty_character); FormTokenWithChars(Result, CurPtr, tok::unknown); return; - } else if (C == '\\') { - // Skip the escaped character. - // FIXME: UCN's. - C = getAndAdvanceChar(CurPtr, Result); } - if (C && C != '\n' && C != '\r' && CurPtr[0] == '\'') { - ++CurPtr; - } else { - // Fall back on generic code for embedded nulls, newlines, wide chars. - do { - // Skip escaped characters. - if (C == '\\') { - // Skip the escaped character. - C = getAndAdvanceChar(CurPtr, Result); - } else if (C == '\n' || C == '\r' || // Newline. - (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. - if (!isLexingRawMode() && !Features.AsmPreprocessor) - Diag(BufferPtr, diag::err_unterminated_char); - FormTokenWithChars(Result, CurPtr-1, tok::unknown); - return; - } else if (C == 0) { - NulCharacter = CurPtr-1; - } + while (C != '\'') { + // Skip escaped characters. + if (C == '\\') { + // Skip the escaped character. + // FIXME: UCN's C = getAndAdvanceChar(CurPtr, Result); - } while (C != '\''); + } else if (C == '\n' || C == '\r' || // Newline. + (C == 0 && CurPtr-1 == BufferEnd)) { // End of file. + if (!isLexingRawMode() && !Features.AsmPreprocessor) + Diag(BufferPtr, diag::err_unterminated_char); + FormTokenWithChars(Result, CurPtr-1, tok::unknown); + return; + } else if (C == 0) { + NulCharacter = CurPtr-1; + } + C = getAndAdvanceChar(CurPtr, Result); } + // If a nul character existed in the character, warn about it. if (NulCharacter && !isLexingRawMode()) Diag(NulCharacter, diag::null_in_char); diff --git a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp index b73f236..b8fd3ce 100644 --- a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp @@ -169,9 +169,8 @@ static unsigned ProcessCharEscape(const char *&ThisTokBuf, /// we will likely rework our support for UCN's. static void ProcessUCNEscape(const char *&ThisTokBuf, const char *ThisTokEnd, char *&ResultBuf, bool &HadError, - SourceLocation Loc, bool IsWide, Preprocessor &PP, - bool Complain) -{ + SourceLocation Loc, Preprocessor &PP, + bool Complain) { // FIXME: Add a warning - UCN's are only valid in C++ & C99. // FIXME: Handle wide strings. @@ -835,11 +834,8 @@ StringLiteralParser(const Token *StringToks, unsigned NumStringToks, // TODO: Input character set mapping support. // Skip L marker for wide strings. - bool ThisIsWide = false; - if (ThisTokBuf[0] == 'L') { + if (ThisTokBuf[0] == 'L') ++ThisTokBuf; - ThisIsWide = true; - } assert(ThisTokBuf[0] == '"' && "Expected quote, lexer broken?"); ++ThisTokBuf; @@ -884,14 +880,13 @@ StringLiteralParser(const Token *StringToks, unsigned NumStringToks, // Is this a Universal Character Name escape? if (ThisTokBuf[1] == 'u' || ThisTokBuf[1] == 'U') { ProcessUCNEscape(ThisTokBuf, ThisTokEnd, ResultPtr, - hadError, StringToks[i].getLocation(), ThisIsWide, PP, - Complain); + hadError, StringToks[i].getLocation(), PP, Complain); continue; } // Otherwise, this is a non-UCN escape character. Process it. unsigned ResultChar = ProcessCharEscape(ThisTokBuf, ThisTokEnd, hadError, StringToks[i].getLocation(), - ThisIsWide, PP, Complain); + AnyWide, PP, Complain); // Note: our internal rep of wide char tokens is always little-endian. *ResultPtr++ = ResultChar & 0xFF; @@ -905,6 +900,8 @@ StringLiteralParser(const Token *StringToks, unsigned NumStringToks, if (Pascal) { ResultBuf[0] = ResultPtr-&ResultBuf[0]-1; + if (AnyWide) + ResultBuf[0] /= wchar_tByteWidth; // Verify that pascal strings aren't too large. if (GetStringLength() > 256 && Complain) { diff --git a/contrib/llvm/tools/clang/lib/Lex/Makefile b/contrib/llvm/tools/clang/lib/Lex/Makefile index bd3c7a8..938b8d5 100644 --- a/contrib/llvm/tools/clang/lib/Lex/Makefile +++ b/contrib/llvm/tools/clang/lib/Lex/Makefile @@ -11,8 +11,8 @@ # ##===----------------------------------------------------------------------===## -LEVEL = ../../../.. -include $(LEVEL)/Makefile.config +CLANG_LEVEL := ../.. +include $(CLANG_LEVEL)/../../Makefile.config LIBRARYNAME := clangLex BUILD_ARCHIVE = 1 @@ -21,7 +21,5 @@ ifeq ($(ARCH),PowerPC) CXX.Flags += -maltivec endif -CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include - -include $(LEVEL)/Makefile.common +include $(CLANG_LEVEL)/Makefile diff --git a/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp b/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp index 6aeb6fa..3310659 100644 --- a/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp @@ -45,6 +45,9 @@ void Preprocessor::Backtrack() { } void Preprocessor::CachingLex(Token &Result) { + if (!InCachingLexMode()) + return; + if (CachedLexPos < CachedTokens.size()) { Result = CachedTokens[CachedLexPos++]; return; @@ -60,13 +63,10 @@ void Preprocessor::CachingLex(Token &Result) { return; } - // We should cache the lexed token. - + // Cache the lexed token. EnterCachingLexMode(); - if (Result.isNot(tok::eof)) { - CachedTokens.push_back(Result); - ++CachedLexPos; - } + CachedTokens.push_back(Result); + ++CachedLexPos; } void Preprocessor::EnterCachingLexMode() { diff --git a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp index 71bb4fc..ebf606e 100644 --- a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp @@ -17,6 +17,7 @@ #include "clang/Lex/MacroInfo.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/FileManager.h" +#include "clang/Basic/TargetInfo.h" #include "clang/Lex/LexDiagnostic.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/Support/raw_ostream.h" @@ -510,6 +511,7 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) { //.Case("cxx_nullptr", false) //.Case("cxx_rvalue_references", false) //.Case("cxx_variadic_templates", false) + .Case("tls", PP.getTargetInfo().isTLSSupported()) .Default(false); } diff --git a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp index 92332a0..7bf4094 100644 --- a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp @@ -27,41 +27,47 @@ PragmaHandler::~PragmaHandler() { } //===----------------------------------------------------------------------===// +// EmptyPragmaHandler Implementation. +//===----------------------------------------------------------------------===// + +EmptyPragmaHandler::EmptyPragmaHandler() {} + +void EmptyPragmaHandler::HandlePragma(Preprocessor &PP, Token &FirstToken) {} + +//===----------------------------------------------------------------------===// // PragmaNamespace Implementation. //===----------------------------------------------------------------------===// PragmaNamespace::~PragmaNamespace() { - for (unsigned i = 0, e = Handlers.size(); i != e; ++i) - delete Handlers[i]; + for (llvm::StringMap<PragmaHandler*>::iterator + I = Handlers.begin(), E = Handlers.end(); I != E; ++I) + delete I->second; } /// FindHandler - Check to see if there is already a handler for the /// specified name. If not, return the handler for the null identifier if it /// exists, otherwise return null. If IgnoreNull is true (the default) then /// the null handler isn't returned on failure to match. -PragmaHandler *PragmaNamespace::FindHandler(const IdentifierInfo *Name, +PragmaHandler *PragmaNamespace::FindHandler(llvm::StringRef Name, bool IgnoreNull) const { - PragmaHandler *NullHandler = 0; - for (unsigned i = 0, e = Handlers.size(); i != e; ++i) { - if (Handlers[i]->getName() == Name) - return Handlers[i]; + if (PragmaHandler *Handler = Handlers.lookup(Name)) + return Handler; + return IgnoreNull ? 0 : Handlers.lookup(llvm::StringRef()); +} - if (Handlers[i]->getName() == 0) - NullHandler = Handlers[i]; - } - return IgnoreNull ? 0 : NullHandler; +void PragmaNamespace::AddPragma(PragmaHandler *Handler) { + assert(!Handlers.lookup(Handler->getName()) && + "A handler with this name is already registered in this namespace"); + llvm::StringMapEntry<PragmaHandler *> &Entry = + Handlers.GetOrCreateValue(Handler->getName()); + Entry.setValue(Handler); } void PragmaNamespace::RemovePragmaHandler(PragmaHandler *Handler) { - for (unsigned i = 0, e = Handlers.size(); i != e; ++i) { - if (Handlers[i] == Handler) { - Handlers[i] = Handlers.back(); - Handlers.pop_back(); - return; - } - } - assert(0 && "Handler not registered in this namespace"); + assert(Handlers.lookup(Handler->getName()) && + "Handler not registered in this namespace"); + Handlers.erase(Handler->getName()); } void PragmaNamespace::HandlePragma(Preprocessor &PP, Token &Tok) { @@ -70,7 +76,10 @@ void PragmaNamespace::HandlePragma(Preprocessor &PP, Token &Tok) { PP.LexUnexpandedToken(Tok); // Get the handler for this token. If there is no handler, ignore the pragma. - PragmaHandler *Handler = FindHandler(Tok.getIdentifierInfo(), false); + PragmaHandler *Handler + = FindHandler(Tok.getIdentifierInfo() ? Tok.getIdentifierInfo()->getName() + : llvm::StringRef(), + /*IgnoreNull=*/false); if (Handler == 0) { PP.Diag(Tok, diag::warn_pragma_ignored); return; @@ -411,31 +420,90 @@ void Preprocessor::HandlePragmaComment(Token &Tok) { Callbacks->PragmaComment(CommentLoc, II, ArgumentString); } +/// HandlePragmaMessage - Handle the microsoft #pragma message extension. The +/// syntax is: +/// #pragma message(messagestring) +/// messagestring is a string, which is fully macro expanded, and permits string +/// concatenation, embedded escape characters etc. See MSDN for more details. +void Preprocessor::HandlePragmaMessage(Token &Tok) { + SourceLocation MessageLoc = Tok.getLocation(); + Lex(Tok); + if (Tok.isNot(tok::l_paren)) { + Diag(MessageLoc, diag::err_pragma_message_malformed); + return; + } + + // Read the string. + Lex(Tok); + + + // We need at least one string. + if (Tok.isNot(tok::string_literal)) { + Diag(Tok.getLocation(), diag::err_pragma_message_malformed); + return; + } + + // String concatenation allows multiple strings, which can even come from + // macro expansion. + // "foo " "bar" "Baz" + llvm::SmallVector<Token, 4> StrToks; + while (Tok.is(tok::string_literal)) { + StrToks.push_back(Tok); + Lex(Tok); + } + + // Concatenate and parse the strings. + StringLiteralParser Literal(&StrToks[0], StrToks.size(), *this); + assert(!Literal.AnyWide && "Didn't allow wide strings in"); + if (Literal.hadError) + return; + if (Literal.Pascal) { + Diag(StrToks[0].getLocation(), diag::err_pragma_message_malformed); + return; + } + + llvm::StringRef MessageString(Literal.GetString(), Literal.GetStringLength()); + + if (Tok.isNot(tok::r_paren)) { + Diag(Tok.getLocation(), diag::err_pragma_message_malformed); + return; + } + Lex(Tok); // eat the r_paren. + + if (Tok.isNot(tok::eom)) { + Diag(Tok.getLocation(), diag::err_pragma_message_malformed); + return; + } + + // Output the message. + Diag(MessageLoc, diag::warn_pragma_message) << MessageString; + // If the pragma is lexically sound, notify any interested PPCallbacks. + if (Callbacks) + Callbacks->PragmaMessage(MessageLoc, MessageString); +} /// AddPragmaHandler - Add the specified pragma handler to the preprocessor. /// If 'Namespace' is non-null, then it is a token required to exist on the /// pragma line before the pragma string starts, e.g. "STDC" or "GCC". -void Preprocessor::AddPragmaHandler(const char *Namespace, +void Preprocessor::AddPragmaHandler(llvm::StringRef Namespace, PragmaHandler *Handler) { PragmaNamespace *InsertNS = PragmaHandlers; // If this is specified to be in a namespace, step down into it. - if (Namespace) { - IdentifierInfo *NSID = getIdentifierInfo(Namespace); - + if (!Namespace.empty()) { // If there is already a pragma handler with the name of this namespace, // we either have an error (directive with the same name as a namespace) or // we already have the namespace to insert into. - if (PragmaHandler *Existing = PragmaHandlers->FindHandler(NSID)) { + if (PragmaHandler *Existing = PragmaHandlers->FindHandler(Namespace)) { InsertNS = Existing->getIfNamespace(); assert(InsertNS != 0 && "Cannot have a pragma namespace and pragma" " handler with the same name!"); } else { // Otherwise, this namespace doesn't exist yet, create and insert the // handler for it. - InsertNS = new PragmaNamespace(NSID); + InsertNS = new PragmaNamespace(Namespace); PragmaHandlers->AddPragma(InsertNS); } } @@ -450,14 +518,13 @@ void Preprocessor::AddPragmaHandler(const char *Namespace, /// preprocessor. If \arg Namespace is non-null, then it should be the /// namespace that \arg Handler was added to. It is an error to remove /// a handler that has not been registered. -void Preprocessor::RemovePragmaHandler(const char *Namespace, +void Preprocessor::RemovePragmaHandler(llvm::StringRef Namespace, PragmaHandler *Handler) { PragmaNamespace *NS = PragmaHandlers; // If this is specified to be in a namespace, step down into it. - if (Namespace) { - IdentifierInfo *NSID = getIdentifierInfo(Namespace); - PragmaHandler *Existing = PragmaHandlers->FindHandler(NSID); + if (!Namespace.empty()) { + PragmaHandler *Existing = PragmaHandlers->FindHandler(Namespace); assert(Existing && "Namespace containing handler does not exist!"); NS = Existing->getIfNamespace(); @@ -475,7 +542,7 @@ void Preprocessor::RemovePragmaHandler(const char *Namespace, namespace { /// PragmaOnceHandler - "#pragma once" marks the file as atomically included. struct PragmaOnceHandler : public PragmaHandler { - PragmaOnceHandler(const IdentifierInfo *OnceID) : PragmaHandler(OnceID) {} + PragmaOnceHandler() : PragmaHandler("once") {} virtual void HandlePragma(Preprocessor &PP, Token &OnceTok) { PP.CheckEndOfDirective("pragma once"); PP.HandlePragmaOnce(OnceTok); @@ -485,7 +552,7 @@ struct PragmaOnceHandler : public PragmaHandler { /// PragmaMarkHandler - "#pragma mark ..." is ignored by the compiler, and the /// rest of the line is not lexed. struct PragmaMarkHandler : public PragmaHandler { - PragmaMarkHandler(const IdentifierInfo *MarkID) : PragmaHandler(MarkID) {} + PragmaMarkHandler() : PragmaHandler("mark") {} virtual void HandlePragma(Preprocessor &PP, Token &MarkTok) { PP.HandlePragmaMark(); } @@ -493,7 +560,7 @@ struct PragmaMarkHandler : public PragmaHandler { /// PragmaPoisonHandler - "#pragma poison x" marks x as not usable. struct PragmaPoisonHandler : public PragmaHandler { - PragmaPoisonHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {} + PragmaPoisonHandler() : PragmaHandler("poison") {} virtual void HandlePragma(Preprocessor &PP, Token &PoisonTok) { PP.HandlePragmaPoison(PoisonTok); } @@ -502,14 +569,14 @@ struct PragmaPoisonHandler : public PragmaHandler { /// PragmaSystemHeaderHandler - "#pragma system_header" marks the current file /// as a system header, which silences warnings in it. struct PragmaSystemHeaderHandler : public PragmaHandler { - PragmaSystemHeaderHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {} + PragmaSystemHeaderHandler() : PragmaHandler("system_header") {} virtual void HandlePragma(Preprocessor &PP, Token &SHToken) { PP.HandlePragmaSystemHeader(SHToken); PP.CheckEndOfDirective("pragma"); } }; struct PragmaDependencyHandler : public PragmaHandler { - PragmaDependencyHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {} + PragmaDependencyHandler() : PragmaHandler("dependency") {} virtual void HandlePragma(Preprocessor &PP, Token &DepToken) { PP.HandlePragmaDependency(DepToken); } @@ -523,9 +590,9 @@ struct PragmaDiagnosticHandler : public PragmaHandler { private: const bool ClangMode; public: - PragmaDiagnosticHandler(const IdentifierInfo *ID, - const bool clangMode) : PragmaHandler(ID), - ClangMode(clangMode) {} + explicit PragmaDiagnosticHandler(const bool clangMode) + : PragmaHandler("diagnostic"), ClangMode(clangMode) {} + virtual void HandlePragma(Preprocessor &PP, Token &DiagToken) { Token Tok; PP.LexUnexpandedToken(Tok); @@ -618,12 +685,20 @@ public: /// PragmaCommentHandler - "#pragma comment ...". struct PragmaCommentHandler : public PragmaHandler { - PragmaCommentHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {} + PragmaCommentHandler() : PragmaHandler("comment") {} virtual void HandlePragma(Preprocessor &PP, Token &CommentTok) { PP.HandlePragmaComment(CommentTok); } }; +/// PragmaMessageHandler - "#pragma message("...")". +struct PragmaMessageHandler : public PragmaHandler { + PragmaMessageHandler() : PragmaHandler("message") {} + virtual void HandlePragma(Preprocessor &PP, Token &CommentTok) { + PP.HandlePragmaMessage(CommentTok); + } +}; + // Pragma STDC implementations. enum STDCSetting { @@ -660,7 +735,7 @@ static STDCSetting LexOnOffSwitch(Preprocessor &PP) { /// PragmaSTDC_FP_CONTRACTHandler - "#pragma STDC FP_CONTRACT ...". struct PragmaSTDC_FP_CONTRACTHandler : public PragmaHandler { - PragmaSTDC_FP_CONTRACTHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {} + PragmaSTDC_FP_CONTRACTHandler() : PragmaHandler("FP_CONTRACT") {} virtual void HandlePragma(Preprocessor &PP, Token &Tok) { // We just ignore the setting of FP_CONTRACT. Since we don't do contractions // at all, our default is OFF and setting it to ON is an optimization hint @@ -672,7 +747,7 @@ struct PragmaSTDC_FP_CONTRACTHandler : public PragmaHandler { /// PragmaSTDC_FENV_ACCESSHandler - "#pragma STDC FENV_ACCESS ...". struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler { - PragmaSTDC_FENV_ACCESSHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {} + PragmaSTDC_FENV_ACCESSHandler() : PragmaHandler("FENV_ACCESS") {} virtual void HandlePragma(Preprocessor &PP, Token &Tok) { if (LexOnOffSwitch(PP) == STDC_ON) PP.Diag(Tok, diag::warn_stdc_fenv_access_not_supported); @@ -681,8 +756,8 @@ struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler { /// PragmaSTDC_CX_LIMITED_RANGEHandler - "#pragma STDC CX_LIMITED_RANGE ...". struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler { - PragmaSTDC_CX_LIMITED_RANGEHandler(const IdentifierInfo *ID) - : PragmaHandler(ID) {} + PragmaSTDC_CX_LIMITED_RANGEHandler() + : PragmaHandler("CX_LIMITED_RANGE") {} virtual void HandlePragma(Preprocessor &PP, Token &Tok) { LexOnOffSwitch(PP); } @@ -690,7 +765,7 @@ struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler { /// PragmaSTDC_UnknownHandler - "#pragma STDC ...". struct PragmaSTDC_UnknownHandler : public PragmaHandler { - PragmaSTDC_UnknownHandler() : PragmaHandler(0) {} + PragmaSTDC_UnknownHandler() {} virtual void HandlePragma(Preprocessor &PP, Token &UnknownTok) { // C99 6.10.6p2, unknown forms are not allowed. PP.Diag(UnknownTok, diag::ext_stdc_pragma_ignored); @@ -703,38 +778,28 @@ struct PragmaSTDC_UnknownHandler : public PragmaHandler { /// RegisterBuiltinPragmas - Install the standard preprocessor pragmas: /// #pragma GCC poison/system_header/dependency and #pragma once. void Preprocessor::RegisterBuiltinPragmas() { - AddPragmaHandler(0, new PragmaOnceHandler(getIdentifierInfo("once"))); - AddPragmaHandler(0, new PragmaMarkHandler(getIdentifierInfo("mark"))); + AddPragmaHandler(new PragmaOnceHandler()); + AddPragmaHandler(new PragmaMarkHandler()); // #pragma GCC ... - AddPragmaHandler("GCC", new PragmaPoisonHandler(getIdentifierInfo("poison"))); - AddPragmaHandler("GCC", new PragmaSystemHeaderHandler( - getIdentifierInfo("system_header"))); - AddPragmaHandler("GCC", new PragmaDependencyHandler( - getIdentifierInfo("dependency"))); - AddPragmaHandler("GCC", new PragmaDiagnosticHandler( - getIdentifierInfo("diagnostic"), - false)); + AddPragmaHandler("GCC", new PragmaPoisonHandler()); + AddPragmaHandler("GCC", new PragmaSystemHeaderHandler()); + AddPragmaHandler("GCC", new PragmaDependencyHandler()); + AddPragmaHandler("GCC", new PragmaDiagnosticHandler(false)); // #pragma clang ... - AddPragmaHandler("clang", new PragmaPoisonHandler( - getIdentifierInfo("poison"))); - AddPragmaHandler("clang", new PragmaSystemHeaderHandler( - getIdentifierInfo("system_header"))); - AddPragmaHandler("clang", new PragmaDependencyHandler( - getIdentifierInfo("dependency"))); - AddPragmaHandler("clang", new PragmaDiagnosticHandler( - getIdentifierInfo("diagnostic"), - true)); - - AddPragmaHandler("STDC", new PragmaSTDC_FP_CONTRACTHandler( - getIdentifierInfo("FP_CONTRACT"))); - AddPragmaHandler("STDC", new PragmaSTDC_FENV_ACCESSHandler( - getIdentifierInfo("FENV_ACCESS"))); - AddPragmaHandler("STDC", new PragmaSTDC_CX_LIMITED_RANGEHandler( - getIdentifierInfo("CX_LIMITED_RANGE"))); + AddPragmaHandler("clang", new PragmaPoisonHandler()); + AddPragmaHandler("clang", new PragmaSystemHeaderHandler()); + AddPragmaHandler("clang", new PragmaDependencyHandler()); + AddPragmaHandler("clang", new PragmaDiagnosticHandler(true)); + + AddPragmaHandler("STDC", new PragmaSTDC_FP_CONTRACTHandler()); + AddPragmaHandler("STDC", new PragmaSTDC_FENV_ACCESSHandler()); + AddPragmaHandler("STDC", new PragmaSTDC_CX_LIMITED_RANGEHandler()); AddPragmaHandler("STDC", new PragmaSTDC_UnknownHandler()); // MS extensions. - if (Features.Microsoft) - AddPragmaHandler(0, new PragmaCommentHandler(getIdentifierInfo("comment"))); + if (Features.Microsoft) { + AddPragmaHandler(new PragmaCommentHandler()); + AddPragmaHandler(new PragmaMessageHandler()); + } } diff --git a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp index ce6d9ab..51f7293 100644 --- a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp @@ -87,7 +87,7 @@ Preprocessor::Preprocessor(Diagnostic &diags, const LangOptions &opts, (Ident__VA_ARGS__ = getIdentifierInfo("__VA_ARGS__"))->setIsPoisoned(); // Initialize the pragma handlers. - PragmaHandlers = new PragmaNamespace(0); + PragmaHandlers = new PragmaNamespace(llvm::StringRef()); RegisterBuiltinPragmas(); // Initialize builtin macros like __LINE__ and friends. @@ -113,6 +113,14 @@ Preprocessor::~Preprocessor() { I->second->Destroy(BP); I->first->setHasMacroDefinition(false); } + for (std::vector<MacroInfo*>::iterator I = MICache.begin(), + E = MICache.end(); I != E; ++I) { + // We don't need to free the MacroInfo objects directly. These + // will be released when the BumpPtrAllocator 'BP' object gets + // destroyed. We still need to run the dtor, however, to free + // memory alocated by MacroInfo. + (*I)->Destroy(BP); + } // Free any cached macro expanders. for (unsigned i = 0, e = NumCachedTokenLexers; i != e; ++i) diff --git a/contrib/llvm/tools/clang/lib/Makefile b/contrib/llvm/tools/clang/lib/Makefile index 538bf43..4fca624 100755 --- a/contrib/llvm/tools/clang/lib/Makefile +++ b/contrib/llvm/tools/clang/lib/Makefile @@ -6,10 +6,10 @@ # License. See LICENSE.TXT for details. # ##===----------------------------------------------------------------------===## -LEVEL = ../../.. +CLANG_LEVEL := .. -PARALLEL_DIRS = Headers Runtime Basic Lex Parse AST Sema CodeGen Analysis \ +PARALLEL_DIRS = Headers Basic Lex Parse AST Sema CodeGen Analysis \ Checker Rewrite Frontend Index Driver -include $(LEVEL)/Makefile.common +include $(CLANG_LEVEL)/Makefile diff --git a/contrib/llvm/tools/clang/lib/Parse/AttributeList.cpp b/contrib/llvm/tools/clang/lib/Parse/AttributeList.cpp index 1ebff22..98d5d07 100644 --- a/contrib/llvm/tools/clang/lib/Parse/AttributeList.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/AttributeList.cpp @@ -119,6 +119,7 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) { .Case("cf_returns_not_retained", AT_cf_returns_not_retained) .Case("cf_returns_retained", AT_cf_returns_retained) .Case("reqd_work_group_size", AT_reqd_wg_size) + .Case("init_priority", AT_init_priority) .Case("no_instrument_function", AT_no_instrument_function) .Case("thiscall", AT_thiscall) .Case("__cdecl", AT_cdecl) diff --git a/contrib/llvm/tools/clang/lib/Parse/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Parse/CMakeLists.txt index bec1c6e..fafcf77 100644 --- a/contrib/llvm/tools/clang/lib/Parse/CMakeLists.txt +++ b/contrib/llvm/tools/clang/lib/Parse/CMakeLists.txt @@ -18,4 +18,4 @@ add_clang_library(clangParse Parser.cpp ) -add_dependencies(clangParse ClangDiagnosticParse) +add_dependencies(clangParse ClangAttrList ClangDiagnosticParse) diff --git a/contrib/llvm/tools/clang/lib/Parse/DeclSpec.cpp b/contrib/llvm/tools/clang/lib/Parse/DeclSpec.cpp index 5dc08b3..d2cd744 100644 --- a/contrib/llvm/tools/clang/lib/Parse/DeclSpec.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/DeclSpec.cpp @@ -253,7 +253,8 @@ bool DeclSpec::SetTypeSpecWidth(TSW W, SourceLocation Loc, return BadSpecifier(W, (TSW)TypeSpecWidth, PrevSpec, DiagID); TypeSpecWidth = W; TSWLoc = Loc; - if (TypeAltiVecVector && ((TypeSpecWidth == TSW_long) || (TypeSpecWidth == TSW_longlong))) { + if (TypeAltiVecVector && !TypeAltiVecBool && + ((TypeSpecWidth == TSW_long) || (TypeSpecWidth == TSW_longlong))) { PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType); DiagID = diag::warn_vector_long_decl_spec_combination; return true; @@ -290,13 +291,18 @@ bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc, DiagID = diag::err_invalid_decl_spec_combination; return true; } + if (TypeAltiVecVector && (T == TST_bool) && !TypeAltiVecBool) { + TypeAltiVecBool = true; + TSTLoc = Loc; + return false; + } TypeSpecType = T; TypeRep = Rep; TSTLoc = Loc; TypeSpecOwned = Owned; - if (TypeAltiVecVector && (TypeSpecType == TST_double)) { + if (TypeAltiVecVector && !TypeAltiVecBool && (TypeSpecType == TST_double)) { PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType); - DiagID = diag::err_invalid_vector_double_decl_spec_combination; + DiagID = diag::err_invalid_vector_decl_spec; return true; } return false; @@ -316,14 +322,12 @@ bool DeclSpec::SetTypeAltiVecVector(bool isAltiVecVector, SourceLocation Loc, bool DeclSpec::SetTypeAltiVecPixel(bool isAltiVecPixel, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID) { - if (!TypeAltiVecVector || (TypeSpecType != TST_unspecified)) { + if (!TypeAltiVecVector || TypeAltiVecPixel || + (TypeSpecType != TST_unspecified)) { PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType); DiagID = diag::err_invalid_pixel_decl_spec_combination; return true; } - TypeSpecType = TST_int; - TypeSpecSign = TSS_unsigned; - TypeSpecWidth = TSW_short; TypeAltiVecPixel = isAltiVecPixel; TSTLoc = Loc; return false; @@ -438,6 +442,42 @@ void DeclSpec::Finish(Diagnostic &D, Preprocessor &PP) { // Check the type specifier components first. SourceManager &SrcMgr = PP.getSourceManager(); + // Validate and finalize AltiVec vector declspec. + if (TypeAltiVecVector) { + if (TypeAltiVecBool) { + // Sign specifiers are not allowed with vector bool. (PIM 2.1) + if (TypeSpecSign != TSS_unspecified) { + Diag(D, TSSLoc, SrcMgr, diag::err_invalid_vector_bool_decl_spec) + << getSpecifierName((TSS)TypeSpecSign); + } + + // Only char/int are valid with vector bool. (PIM 2.1) + if (((TypeSpecType != TST_unspecified) && (TypeSpecType != TST_char) && + (TypeSpecType != TST_int)) || TypeAltiVecPixel) { + Diag(D, TSTLoc, SrcMgr, diag::err_invalid_vector_bool_decl_spec) + << (TypeAltiVecPixel ? "__pixel" : + getSpecifierName((TST)TypeSpecType)); + } + + // Only 'short' is valid with vector bool. (PIM 2.1) + if ((TypeSpecWidth != TSW_unspecified) && (TypeSpecWidth != TSW_short)) + Diag(D, TSWLoc, SrcMgr, diag::err_invalid_vector_bool_decl_spec) + << getSpecifierName((TSW)TypeSpecWidth); + + // Elements of vector bool are interpreted as unsigned. (PIM 2.1) + if ((TypeSpecType == TST_char) || (TypeSpecType == TST_int) || + (TypeSpecWidth != TSW_unspecified)) + TypeSpecSign = TSS_unsigned; + } + + if (TypeAltiVecPixel) { + //TODO: perform validation + TypeSpecType = TST_int; + TypeSpecSign = TSS_unsigned; + TypeSpecWidth = TSW_short; + } + } + // signed/unsigned are only valid with int/char/wchar_t. if (TypeSpecSign != TSS_unspecified) { if (TypeSpecType == TST_unspecified) @@ -513,7 +553,6 @@ void DeclSpec::Finish(Diagnostic &D, Preprocessor &PP) { ClearStorageClassSpecs(); } - // Okay, now we can infer the real type. // TODO: return "auto function" and other bad things based on the real type. diff --git a/contrib/llvm/tools/clang/lib/Parse/Makefile b/contrib/llvm/tools/clang/lib/Parse/Makefile index 6a5540f..238e02d 100644 --- a/contrib/llvm/tools/clang/lib/Parse/Makefile +++ b/contrib/llvm/tools/clang/lib/Parse/Makefile @@ -11,11 +11,9 @@ # ##===----------------------------------------------------------------------===## -LEVEL = ../../../.. +CLANG_LEVEL := ../.. LIBRARYNAME := clangParse BUILD_ARCHIVE = 1 -CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include - -include $(LEVEL)/Makefile.common +include $(CLANG_LEVEL)/Makefile diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp index 5405c0c..62a7ecd 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp @@ -35,10 +35,10 @@ Parser::ParseCXXInlineMethodDef(AccessSpecifier AS, Declarator &D, DeclPtrTy FnD; if (D.getDeclSpec().isFriendSpecified()) // FIXME: Friend templates - FnD = Actions.ActOnFriendFunctionDecl(CurScope, D, true, + FnD = Actions.ActOnFriendFunctionDecl(getCurScope(), D, true, move(TemplateParams)); else // FIXME: pass template information through - FnD = Actions.ActOnCXXMemberDeclarator(CurScope, AS, D, + FnD = Actions.ActOnCXXMemberDeclarator(getCurScope(), AS, D, move(TemplateParams), 0, 0, /*IsDefinition*/true); @@ -48,7 +48,7 @@ Parser::ParseCXXInlineMethodDef(AccessSpecifier AS, Declarator &D, getCurrentClass().MethodDefs.push_back(LexedMethod(FnD)); getCurrentClass().MethodDefs.back().TemplateScope - = CurScope->isTemplateParamScope(); + = getCurScope()->isTemplateParamScope(); CachedTokens &Toks = getCurrentClass().MethodDefs.back().Toks; tok::TokenKind kind = Tok.getKind(); @@ -95,7 +95,7 @@ void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) { bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope; ParseScope TemplateScope(this, Scope::TemplateParamScope, HasTemplateScope); if (HasTemplateScope) - Actions.ActOnReenterTemplateScope(CurScope, Class.TagOrTemplate); + Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate); // The current scope is still active if we're the top-level class. // Otherwise we'll need to push and enter a new scope. @@ -103,7 +103,7 @@ void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) { ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope, HasClassScope); if (HasClassScope) - Actions.ActOnStartDelayedMemberDeclarations(CurScope, Class.TagOrTemplate); + Actions.ActOnStartDelayedMemberDeclarations(getCurScope(), Class.TagOrTemplate); for (; !Class.MethodDecls.empty(); Class.MethodDecls.pop_front()) { LateParsedMethodDeclaration &LM = Class.MethodDecls.front(); @@ -111,10 +111,10 @@ void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) { // If this is a member template, introduce the template parameter scope. ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope); if (LM.TemplateScope) - Actions.ActOnReenterTemplateScope(CurScope, LM.Method); + Actions.ActOnReenterTemplateScope(getCurScope(), LM.Method); // Start the delayed C++ method declaration - Actions.ActOnStartDelayedCXXMethodDeclaration(CurScope, LM.Method); + Actions.ActOnStartDelayedCXXMethodDeclaration(getCurScope(), LM.Method); // Introduce the parameters into scope and parse their default // arguments. @@ -122,7 +122,7 @@ void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) { Scope::FunctionPrototypeScope|Scope::DeclScope); for (unsigned I = 0, N = LM.DefaultArgs.size(); I != N; ++I) { // Introduce the parameter into scope. - Actions.ActOnDelayedCXXMethodParameter(CurScope, LM.DefaultArgs[I].Param); + Actions.ActOnDelayedCXXMethodParameter(getCurScope(), LM.DefaultArgs[I].Param); if (CachedTokens *Toks = LM.DefaultArgs[I].Toks) { // Save the current token position. @@ -151,7 +151,7 @@ void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) { "ParseAssignmentExpression went over the default arg tokens!"); // There could be leftover tokens (e.g. because of an error). // Skip through until we reach the original token position. - while (Tok.getLocation() != origLoc) + while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof)) ConsumeAnyToken(); delete Toks; @@ -161,14 +161,14 @@ void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) { PrototypeScope.Exit(); // Finish the delayed C++ method declaration. - Actions.ActOnFinishDelayedCXXMethodDeclaration(CurScope, LM.Method); + Actions.ActOnFinishDelayedCXXMethodDeclaration(getCurScope(), LM.Method); } for (unsigned I = 0, N = Class.NestedClasses.size(); I != N; ++I) ParseLexedMethodDeclarations(*Class.NestedClasses[I]); if (HasClassScope) - Actions.ActOnFinishDelayedMemberDeclarations(CurScope, Class.TagOrTemplate); + Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(), Class.TagOrTemplate); } /// ParseLexedMethodDefs - We finished parsing the member specification of a top @@ -178,7 +178,7 @@ void Parser::ParseLexedMethodDefs(ParsingClass &Class) { bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope; ParseScope TemplateScope(this, Scope::TemplateParamScope, HasTemplateScope); if (HasTemplateScope) - Actions.ActOnReenterTemplateScope(CurScope, Class.TagOrTemplate); + Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate); bool HasClassScope = !Class.TopLevelClass; ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope, @@ -190,7 +190,7 @@ void Parser::ParseLexedMethodDefs(ParsingClass &Class) { // If this is a member template, introduce the template parameter scope. ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope); if (LM.TemplateScope) - Actions.ActOnReenterTemplateScope(CurScope, LM.D); + Actions.ActOnReenterTemplateScope(getCurScope(), LM.D); // Save the current token position. SourceLocation origLoc = Tok.getLocation(); @@ -209,15 +209,17 @@ void Parser::ParseLexedMethodDefs(ParsingClass &Class) { // Parse the method body. Function body parsing code is similar enough // to be re-used for method bodies as well. ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope); - Actions.ActOnStartOfFunctionDef(CurScope, LM.D); + Actions.ActOnStartOfFunctionDef(getCurScope(), LM.D); if (Tok.is(tok::kw_try)) { ParseFunctionTryBlock(LM.D); assert(!PP.getSourceManager().isBeforeInTranslationUnit(origLoc, Tok.getLocation()) && "ParseFunctionTryBlock went over the cached tokens!"); - assert(Tok.getLocation() == origLoc && - "ParseFunctionTryBlock left tokens in the token stream!"); + // There could be leftover tokens (e.g. because of an error). + // Skip through until we reach the original token position. + while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof)) + ConsumeAnyToken(); continue; } if (Tok.is(tok::colon)) { @@ -232,11 +234,19 @@ void Parser::ParseLexedMethodDefs(ParsingClass &Class) { Actions.ActOnDefaultCtorInitializers(LM.D); ParseFunctionStatementBody(LM.D); - assert(!PP.getSourceManager().isBeforeInTranslationUnit(origLoc, - Tok.getLocation()) && - "We consumed more than the cached tokens!"); - assert(Tok.getLocation() == origLoc && - "Tokens were left in the token stream!"); + + if (Tok.getLocation() != origLoc) { + // Due to parsing error, we either went over the cached tokens or + // there are still cached tokens left. If it's the latter case skip the + // leftover tokens. + // Since this is an uncommon situation that should be avoided, use the + // expensive isBeforeInTranslationUnit call. + if (PP.getSourceManager().isBeforeInTranslationUnit(Tok.getLocation(), + origLoc)) + while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof)) + ConsumeAnyToken(); + + } } for (unsigned I = 0, N = Class.NestedClasses.size(); I != N; ++I) diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp index 3e7d4a1..62ef3ec 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp @@ -42,7 +42,7 @@ Action::TypeResult Parser::ParseTypeName(SourceRange *Range) { if (DeclaratorInfo.isInvalidType()) return true; - return Actions.ActOnTypeName(CurScope, DeclaratorInfo); + return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo); } /// ParseGNUAttributes - Parse a non-empty attributes list. @@ -309,6 +309,8 @@ AttributeList* Parser::ParseMicrosoftTypeAttributes(AttributeList *CurrAttr) { Parser::DeclGroupPtrTy Parser::ParseDeclaration(unsigned Context, SourceLocation &DeclEnd, CXX0XAttributeList Attr) { + ParenBraceBracketBalancer BalancerRAIIObj(*this); + DeclPtrTy SingleDecl; switch (Tok.getKind()) { case tok::kw_template: @@ -364,7 +366,7 @@ Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(unsigned Context, // declaration-specifiers init-declarator-list[opt] ';' if (Tok.is(tok::semi)) { if (RequireSemi) ConsumeToken(); - DeclPtrTy TheDecl = Actions.ParsedFreeStandingDeclSpec(CurScope, AS_none, + DeclPtrTy TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none, DS); DS.complete(TheDecl); return Actions.ConvertDeclToDeclGroup(TheDecl); @@ -393,12 +395,14 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS, return DeclGroupPtrTy(); } - if (AllowFunctionDefinitions && D.isFunctionDeclarator()) { - if (isDeclarationAfterDeclarator()) { - // Fall though. We have to check this first, though, because - // __attribute__ might be the start of a function definition in - // (extended) K&R C. - } else if (isStartOfFunctionDefinition()) { + // Check to see if we have a function *definition* which must have a body. + if (AllowFunctionDefinitions && D.isFunctionDeclarator() && + // Look at the next token to make sure that this isn't a function + // declaration. We have to check this because __attribute__ might be the + // start of a function definition in GCC-extended K&R C. + !isDeclarationAfterDeclarator()) { + + if (isStartOfFunctionDefinition(D)) { if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) { Diag(Tok, diag::err_function_declared_typedef); @@ -408,6 +412,14 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS, DeclPtrTy TheDecl = ParseFunctionDefinition(D); return Actions.ConvertDeclToDeclGroup(TheDecl); + } + + if (isDeclarationSpecifier()) { + // If there is an invalid declaration specifier right after the function + // prototype, then we must be in a missing semicolon case where this isn't + // actually a body. Just fall through into the code that handles it as a + // prototype, and let the top-level code handle the erroneous declspec + // where it would otherwise expect a comma or semicolon. } else { Diag(Tok, diag::err_expected_fn_body); SkipUntil(tok::semi); @@ -459,12 +471,17 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS, Context == Declarator::FileContext ? diag::err_invalid_token_after_toplevel_declarator : diag::err_expected_semi_declaration)) { - SkipUntil(tok::r_brace, true, true); - if (Tok.is(tok::semi)) - ConsumeToken(); + // Okay, there was no semicolon and one was expected. If we see a + // declaration specifier, just assume it was missing and continue parsing. + // Otherwise things are very confused and we skip to recover. + if (!isDeclarationSpecifier()) { + SkipUntil(tok::r_brace, true, true); + if (Tok.is(tok::semi)) + ConsumeToken(); + } } - return Actions.FinalizeDeclaratorGroup(CurScope, DS, + return Actions.FinalizeDeclaratorGroup(getCurScope(), DS, DeclsInGroup.data(), DeclsInGroup.size()); } @@ -516,12 +533,12 @@ Parser::DeclPtrTy Parser::ParseDeclarationAfterDeclarator(Declarator &D, DeclPtrTy ThisDecl; switch (TemplateInfo.Kind) { case ParsedTemplateInfo::NonTemplate: - ThisDecl = Actions.ActOnDeclarator(CurScope, D); + ThisDecl = Actions.ActOnDeclarator(getCurScope(), D); break; case ParsedTemplateInfo::Template: case ParsedTemplateInfo::ExplicitSpecialization: - ThisDecl = Actions.ActOnTemplateDeclarator(CurScope, + ThisDecl = Actions.ActOnTemplateDeclarator(getCurScope(), Action::MultiTemplateParamsArg(Actions, TemplateInfo.TemplateParams->data(), TemplateInfo.TemplateParams->size()), @@ -530,7 +547,7 @@ Parser::DeclPtrTy Parser::ParseDeclarationAfterDeclarator(Declarator &D, case ParsedTemplateInfo::ExplicitInstantiation: { Action::DeclResult ThisRes - = Actions.ActOnExplicitInstantiation(CurScope, + = Actions.ActOnExplicitInstantiation(getCurScope(), TemplateInfo.ExternLoc, TemplateInfo.TemplateLoc, D); @@ -553,13 +570,20 @@ Parser::DeclPtrTy Parser::ParseDeclarationAfterDeclarator(Declarator &D, } else { if (getLang().CPlusPlus && D.getCXXScopeSpec().isSet()) { EnterScope(0); - Actions.ActOnCXXEnterDeclInitializer(CurScope, ThisDecl); + Actions.ActOnCXXEnterDeclInitializer(getCurScope(), ThisDecl); } + if (Tok.is(tok::code_completion)) { + Actions.CodeCompleteInitializer(getCurScope(), ThisDecl); + ConsumeCodeCompletionToken(); + SkipUntil(tok::comma, true, true); + return ThisDecl; + } + OwningExprResult Init(ParseInitializer()); if (getLang().CPlusPlus && D.getCXXScopeSpec().isSet()) { - Actions.ActOnCXXExitDeclInitializer(CurScope, ThisDecl); + Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl); ExitScope(); } @@ -577,14 +601,14 @@ Parser::DeclPtrTy Parser::ParseDeclarationAfterDeclarator(Declarator &D, if (getLang().CPlusPlus && D.getCXXScopeSpec().isSet()) { EnterScope(0); - Actions.ActOnCXXEnterDeclInitializer(CurScope, ThisDecl); + Actions.ActOnCXXEnterDeclInitializer(getCurScope(), ThisDecl); } if (ParseExpressionList(Exprs, CommaLocs)) { SkipUntil(tok::r_paren); if (getLang().CPlusPlus && D.getCXXScopeSpec().isSet()) { - Actions.ActOnCXXExitDeclInitializer(CurScope, ThisDecl); + Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl); ExitScope(); } } else { @@ -595,7 +619,7 @@ Parser::DeclPtrTy Parser::ParseDeclarationAfterDeclarator(Declarator &D, "Unexpected number of commas!"); if (getLang().CPlusPlus && D.getCXXScopeSpec().isSet()) { - Actions.ActOnCXXExitDeclInitializer(CurScope, ThisDecl); + Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl); ExitScope(); } @@ -723,7 +747,7 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const char *TagName = 0; tok::TokenKind TagKind = tok::unknown; - switch (Actions.isTagName(*Tok.getIdentifierInfo(), CurScope)) { + switch (Actions.isTagName(*Tok.getIdentifierInfo(), getCurScope())) { default: break; case DeclSpec::TST_enum: TagName="enum" ;TagKind=tok::kw_enum ;break; case DeclSpec::TST_union: TagName="union" ;TagKind=tok::kw_union ;break; @@ -749,7 +773,7 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, // diagnostic and attempt to recover. Action::TypeTy *T = 0; if (Actions.DiagnoseUnknownTypeName(*Tok.getIdentifierInfo(), Loc, - CurScope, SS, T)) { + getCurScope(), SS, T)) { // The action emitted a diagnostic, so we don't have to. if (T) { // The action has suggested that the type T could be used. Set that as @@ -838,7 +862,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS, else if (ObjCImpDecl) CCC = Action::CCC_ObjCImplementation; - Actions.CodeCompleteOrdinaryName(CurScope, CCC); + Actions.CodeCompleteOrdinaryName(getCurScope(), CCC); ConsumeCodeCompletionToken(); } @@ -908,7 +932,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS, if ((DSContext == DSC_top_level || (DSContext == DSC_class && DS.isFriendSpecified())) && TemplateId->Name && - Actions.isCurrentClassName(*TemplateId->Name, CurScope, &SS)) { + Actions.isCurrentClassName(*TemplateId->Name, getCurScope(), &SS)) { if (isConstructorDeclarator()) { // The user meant this to be an out-of-line constructor // definition, but template arguments are not allowed @@ -954,7 +978,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS, // check whether this is a constructor declaration. if ((DSContext == DSC_top_level || (DSContext == DSC_class && DS.isFriendSpecified())) && - Actions.isCurrentClassName(*Next.getIdentifierInfo(), CurScope, + Actions.isCurrentClassName(*Next.getIdentifierInfo(), getCurScope(), &SS)) { if (isConstructorDeclarator()) goto DoneWithDeclSpec; @@ -970,7 +994,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS, } TypeTy *TypeRep = Actions.getTypeName(*Next.getIdentifierInfo(), - Next.getLocation(), CurScope, &SS); + Next.getLocation(), getCurScope(), &SS); // If the referenced identifier is not a type, then this declspec is // erroneous: We already checked about that it has no type specifier, and @@ -1054,7 +1078,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS, // It has to be available as a typedef too! TypeTy *TypeRep = Actions.getTypeName(*Tok.getIdentifierInfo(), - Tok.getLocation(), CurScope); + Tok.getLocation(), getCurScope()); // If this is not a typedef name, don't parse it as part of the declspec, // it must be an implicit int or an error. @@ -1066,7 +1090,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS, // If we're in a context where the identifier could be a class name, // check whether this is a constructor declaration. if (getLang().CPlusPlus && DSContext == DSC_class && - Actions.isCurrentClassName(*Tok.getIdentifierInfo(), CurScope) && + Actions.isCurrentClassName(*Tok.getIdentifierInfo(), getCurScope()) && isConstructorDeclarator()) goto DoneWithDeclSpec; @@ -1114,7 +1138,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS, // constructor name or specialization, check whether this is a // constructor declaration. if (getLang().CPlusPlus && DSContext == DSC_class && - Actions.isCurrentClassName(*TemplateId->Name, CurScope) && + Actions.isCurrentClassName(*TemplateId->Name, getCurScope()) && isConstructorDeclarator()) goto DoneWithDeclSpec; @@ -1677,7 +1701,7 @@ ParseStructDeclaration(DeclSpec &DS, FieldCallback &Fields) { // If there are no declarators, this is a free-standing declaration // specifier. Let the actions module cope with it. if (Tok.is(tok::semi)) { - Actions.ParsedFreeStandingDeclSpec(CurScope, AS_none, DS); + Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none, DS); return; } @@ -1753,7 +1777,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc, SourceLocation LBraceLoc = ConsumeBrace(); ParseScope StructScope(this, Scope::ClassScope|Scope::DeclScope); - Actions.ActOnTagStartDefinition(CurScope, TagDecl); + Actions.ActOnTagStartDefinition(getCurScope(), TagDecl); // Empty structs are an extension in C (C99 6.7.2.1p7), but are allowed in // C++. @@ -1770,6 +1794,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc, // Check for extraneous top-level semicolon. if (Tok.is(tok::semi)) { Diag(Tok, diag::ext_extra_struct_semi) + << DeclSpec::getSpecifierName((DeclSpec::TST)TagType) << FixItHint::CreateRemoval(Tok.getLocation()); ConsumeToken(); continue; @@ -1790,7 +1815,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc, virtual DeclPtrTy invoke(FieldDeclarator &FD) { // Install the declarator into the current TagDecl. - DeclPtrTy Field = P.Actions.ActOnField(P.CurScope, TagDecl, + DeclPtrTy Field = P.Actions.ActOnField(P.getCurScope(), TagDecl, FD.D.getDeclSpec().getSourceRange().getBegin(), FD.D, FD.BitfieldSize); FieldDecls.push_back(Field); @@ -1814,7 +1839,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc, continue; } llvm::SmallVector<DeclPtrTy, 16> Fields; - Actions.ActOnDefs(CurScope, TagDecl, Tok.getLocation(), + Actions.ActOnDefs(getCurScope(), TagDecl, Tok.getLocation(), Tok.getIdentifierInfo(), Fields); FieldDecls.insert(FieldDecls.end(), Fields.begin(), Fields.end()); ConsumeToken(); @@ -1842,12 +1867,12 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc, if (Tok.is(tok::kw___attribute)) AttrList.reset(ParseGNUAttributes()); - Actions.ActOnFields(CurScope, + Actions.ActOnFields(getCurScope(), RecordLoc, TagDecl, FieldDecls.data(), FieldDecls.size(), LBraceLoc, RBraceLoc, AttrList.get()); StructScope.Exit(); - Actions.ActOnTagFinishDefinition(CurScope, TagDecl, RBraceLoc); + Actions.ActOnTagFinishDefinition(getCurScope(), TagDecl, RBraceLoc); } @@ -1869,7 +1894,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS, // Parse the tag portion of this. if (Tok.is(tok::code_completion)) { // Code completion for an enum name. - Actions.CodeCompleteTag(CurScope, DeclSpec::TST_enum); + Actions.CodeCompleteTag(getCurScope(), DeclSpec::TST_enum); ConsumeCodeCompletionToken(); } @@ -1943,7 +1968,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS, SourceLocation TSTLoc = NameLoc.isValid()? NameLoc : StartLoc; const char *PrevSpec = 0; unsigned DiagID; - DeclPtrTy TagDecl = Actions.ActOnTag(CurScope, DeclSpec::TST_enum, TUK, + DeclPtrTy TagDecl = Actions.ActOnTag(getCurScope(), DeclSpec::TST_enum, TUK, StartLoc, SS, Name, NameLoc, Attr.get(), AS, Action::MultiTemplateParamsArg(Actions), @@ -1957,7 +1982,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS, return; } - TypeResult Type = Actions.ActOnDependentTag(CurScope, DeclSpec::TST_enum, + TypeResult Type = Actions.ActOnDependentTag(getCurScope(), DeclSpec::TST_enum, TUK, SS, Name, StartLoc, NameLoc); if (Type.isInvalid()) { @@ -2007,13 +2032,13 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS, void Parser::ParseEnumBody(SourceLocation StartLoc, DeclPtrTy EnumDecl) { // Enter the scope of the enum body and start the definition. ParseScope EnumScope(this, Scope::DeclScope); - Actions.ActOnTagStartDefinition(CurScope, EnumDecl); + Actions.ActOnTagStartDefinition(getCurScope(), EnumDecl); SourceLocation LBraceLoc = ConsumeBrace(); // C does not allow an empty enumerator-list, C++ does [dcl.enum]. if (Tok.is(tok::r_brace) && !getLang().CPlusPlus) - Diag(Tok, diag::ext_empty_struct_union_enum) << "enum"; + Diag(Tok, diag::error_empty_enum); llvm::SmallVector<DeclPtrTy, 32> EnumConstantDecls; @@ -2034,7 +2059,7 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, DeclPtrTy EnumDecl) { } // Install the enumerator constant into EnumDecl. - DeclPtrTy EnumConstDecl = Actions.ActOnEnumConstant(CurScope, EnumDecl, + DeclPtrTy EnumConstDecl = Actions.ActOnEnumConstant(getCurScope(), EnumDecl, LastEnumConstDecl, IdentLoc, Ident, EqualLoc, @@ -2063,10 +2088,10 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, DeclPtrTy EnumDecl) { Actions.ActOnEnumBody(StartLoc, LBraceLoc, RBraceLoc, EnumDecl, EnumConstantDecls.data(), EnumConstantDecls.size(), - CurScope, Attr.get()); + getCurScope(), Attr.get()); EnumScope.Exit(); - Actions.ActOnTagFinishDefinition(CurScope, EnumDecl, RBraceLoc); + Actions.ActOnTagFinishDefinition(getCurScope(), EnumDecl, RBraceLoc); } /// isTypeSpecifierQualifier - Return true if the current token could be the @@ -2351,7 +2376,7 @@ bool Parser::isConstructorDeclarator() { // If we need to, enter the specified scope. DeclaratorScopeObj DeclScopeObj(*this, SS); - if (SS.isSet() && Actions.ShouldEnterDeclaratorScope(CurScope, SS)) + if (SS.isSet() && Actions.ShouldEnterDeclaratorScope(getCurScope(), SS)) DeclScopeObj.EnterDeclaratorScope(); // Check whether the next token(s) are part of a declaration @@ -2640,7 +2665,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) { } if (D.getCXXScopeSpec().isValid()) { - if (Actions.ShouldEnterDeclaratorScope(CurScope, D.getCXXScopeSpec())) + if (Actions.ShouldEnterDeclaratorScope(getCurScope(), D.getCXXScopeSpec())) // Change the declaration context for name lookup, until this function // is exited (and the declarator has been parsed). DeclScopeObj.EnterDeclaratorScope(); @@ -2699,7 +2724,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) { // scope when parsing the parenthesized declarator, then exited // the scope already. Re-enter the scope, if we need to. if (D.getCXXScopeSpec().isSet()) { - if (Actions.ShouldEnterDeclaratorScope(CurScope, D.getCXXScopeSpec())) + if (Actions.ShouldEnterDeclaratorScope(getCurScope(), D.getCXXScopeSpec())) // Change the declaration context for name lookup, until this function // is exited (and the declarator has been parsed). DeclScopeObj.EnterDeclaratorScope(); @@ -3036,7 +3061,7 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D, // Inform the actions module about the parameter declarator, so it gets // added to the current scope. - DeclPtrTy Param = Actions.ActOnParamDeclarator(CurScope, ParmDecl); + DeclPtrTy Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDecl); // Parse the default argument, if any. We parse the default // arguments in all dialects; the semantic analysis in @@ -3194,7 +3219,7 @@ void Parser::ParseFunctionDeclaratorIdentifierList(SourceLocation LParenLoc, IdentifierInfo *ParmII = Tok.getIdentifierInfo(); // Reject 'typedef int y; int test(x, y)', but continue parsing. - if (Actions.getTypeName(*ParmII, Tok.getLocation(), CurScope)) + if (Actions.getTypeName(*ParmII, Tok.getLocation(), getCurScope())) Diag(Tok, diag::err_unexpected_typedef_ident) << ParmII; // Verify that the argument identifier has not already been mentioned. @@ -3458,7 +3483,7 @@ bool Parser::TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, default: break; } - } else if (Tok.getIdentifierInfo() == Ident_pixel && + } else if ((Tok.getIdentifierInfo() == Ident_pixel) && DS.isTypeAltiVecVector()) { isInvalid = DS.SetTypeAltiVecPixel(true, Loc, PrevSpec, DiagID); return true; diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp index 479c04c..590ba6c 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp @@ -49,7 +49,7 @@ Parser::DeclPtrTy Parser::ParseNamespace(unsigned Context, SourceLocation NamespaceLoc = ConsumeToken(); // eat the 'namespace'. if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteNamespaceDecl(CurScope); + Actions.CodeCompleteNamespaceDecl(getCurScope()); ConsumeCodeCompletionToken(); } @@ -87,9 +87,9 @@ Parser::DeclPtrTy Parser::ParseNamespace(unsigned Context, SourceLocation LBrace = ConsumeBrace(); - if (CurScope->isClassScope() || CurScope->isTemplateParamScope() || - CurScope->isInObjcMethodScope() || CurScope->getBlockParent() || - CurScope->getFnParent()) { + if (getCurScope()->isClassScope() || getCurScope()->isTemplateParamScope() || + getCurScope()->isInObjcMethodScope() || getCurScope()->getBlockParent() || + getCurScope()->getFnParent()) { Diag(LBrace, diag::err_namespace_nonnamespace_scope); SkipUntil(tok::r_brace, false); return DeclPtrTy(); @@ -99,7 +99,7 @@ Parser::DeclPtrTy Parser::ParseNamespace(unsigned Context, ParseScope NamespaceScope(this, Scope::DeclScope); DeclPtrTy NamespcDecl = - Actions.ActOnStartNamespaceDef(CurScope, IdentLoc, Ident, LBrace, + Actions.ActOnStartNamespaceDef(getCurScope(), IdentLoc, Ident, LBrace, AttrList.get()); PrettyStackTraceActionsDecl CrashInfo(NamespcDecl, NamespaceLoc, Actions, @@ -135,7 +135,7 @@ Parser::DeclPtrTy Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc, ConsumeToken(); // eat the '='. if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteNamespaceAliasDecl(CurScope); + Actions.CodeCompleteNamespaceAliasDecl(getCurScope()); ConsumeCodeCompletionToken(); } @@ -159,7 +159,7 @@ Parser::DeclPtrTy Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc, ExpectAndConsume(tok::semi, diag::err_expected_semi_after_namespace_name, "", tok::semi); - return Actions.ActOnNamespaceAliasDef(CurScope, NamespaceLoc, AliasLoc, Alias, + return Actions.ActOnNamespaceAliasDef(getCurScope(), NamespaceLoc, AliasLoc, Alias, SS, IdentLoc, Ident); } @@ -184,7 +184,7 @@ Parser::DeclPtrTy Parser::ParseLinkage(ParsingDeclSpec &DS, ParseScope LinkageScope(this, Scope::DeclScope); DeclPtrTy LinkageSpec - = Actions.ActOnStartLinkageSpecification(CurScope, + = Actions.ActOnStartLinkageSpecification(getCurScope(), /*FIXME: */SourceLocation(), Loc, Lang, Tok.is(tok::l_brace)? Tok.getLocation() @@ -197,7 +197,7 @@ Parser::DeclPtrTy Parser::ParseLinkage(ParsingDeclSpec &DS, if (Tok.isNot(tok::l_brace)) { ParseDeclarationOrFunctionDefinition(DS, Attr.AttrList); - return Actions.ActOnFinishLinkageSpecification(CurScope, LinkageSpec, + return Actions.ActOnFinishLinkageSpecification(getCurScope(), LinkageSpec, SourceLocation()); } @@ -216,7 +216,7 @@ Parser::DeclPtrTy Parser::ParseLinkage(ParsingDeclSpec &DS, } SourceLocation RBrace = MatchRHSPunctuation(tok::r_brace, LBrace); - return Actions.ActOnFinishLinkageSpecification(CurScope, LinkageSpec, RBrace); + return Actions.ActOnFinishLinkageSpecification(getCurScope(), LinkageSpec, RBrace); } /// ParseUsingDirectiveOrDeclaration - Parse C++ using using-declaration or @@ -230,7 +230,7 @@ Parser::DeclPtrTy Parser::ParseUsingDirectiveOrDeclaration(unsigned Context, SourceLocation UsingLoc = ConsumeToken(); if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteUsing(CurScope); + Actions.CodeCompleteUsing(getCurScope()); ConsumeCodeCompletionToken(); } @@ -267,7 +267,7 @@ Parser::DeclPtrTy Parser::ParseUsingDirective(unsigned Context, SourceLocation NamespcLoc = ConsumeToken(); if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteUsingDirective(CurScope); + Actions.CodeCompleteUsingDirective(getCurScope()); ConsumeCodeCompletionToken(); } @@ -304,7 +304,7 @@ Parser::DeclPtrTy Parser::ParseUsingDirective(unsigned Context, GNUAttr ? diag::err_expected_semi_after_attribute_list : diag::err_expected_semi_after_namespace_name, "", tok::semi); - return Actions.ActOnUsingDirective(CurScope, UsingLoc, NamespcLoc, SS, + return Actions.ActOnUsingDirective(getCurScope(), UsingLoc, NamespcLoc, SS, IdentLoc, NamespcName, Attr); } @@ -368,7 +368,7 @@ Parser::DeclPtrTy Parser::ParseUsingDeclaration(unsigned Context, AttrList ? "attributes list" : "using declaration", tok::semi); - return Actions.ActOnUsingDeclaration(CurScope, AS, true, UsingLoc, SS, Name, + return Actions.ActOnUsingDeclaration(getCurScope(), AS, true, UsingLoc, SS, Name, AttrList.get(), IsTypeName, TypenameLoc); } @@ -508,7 +508,7 @@ Parser::TypeResult Parser::ParseClassName(SourceLocation &EndLocation, // template-name was wrong. Try to fix that. TemplateNameKind TNK = TNK_Type_template; TemplateTy Template; - if (!Actions.DiagnoseUnknownTemplateName(*Id, IdLoc, CurScope, + if (!Actions.DiagnoseUnknownTemplateName(*Id, IdLoc, getCurScope(), SS, Template, TNK)) { Diag(IdLoc, diag::err_unknown_template_name) << Id; @@ -542,7 +542,7 @@ Parser::TypeResult Parser::ParseClassName(SourceLocation &EndLocation, } // We have an identifier; check whether it is actually a type. - TypeTy *Type = Actions.getTypeName(*Id, IdLoc, CurScope, SS, true); + TypeTy *Type = Actions.getTypeName(*Id, IdLoc, getCurScope(), SS, true); if (!Type) { Diag(IdLoc, diag::err_expected_class_name); return true; @@ -609,10 +609,24 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, if (Tok.is(tok::code_completion)) { // Code completion for a struct, class, or union name. - Actions.CodeCompleteTag(CurScope, TagType); + Actions.CodeCompleteTag(getCurScope(), TagType); ConsumeCodeCompletionToken(); } + // C++03 [temp.explicit] 14.7.2/8: + // The usual access checking rules do not apply to names used to specify + // explicit instantiations. + // + // As an extension we do not perform access checking on the names used to + // specify explicit specializations either. This is important to allow + // specializing traits classes for private types. + bool SuppressingAccessChecks = false; + if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation || + TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization) { + Actions.ActOnStartSuppressingAccessChecks(); + SuppressingAccessChecks = true; + } + AttributeList *AttrList = 0; // If attributes exist after tag, parse them. if (Tok.is(tok::kw___attribute)) @@ -670,7 +684,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, Name = Tok.getIdentifierInfo(); NameLoc = ConsumeToken(); - if (Tok.is(tok::less)) { + if (Tok.is(tok::less) && getLang().CPlusPlus) { // The name was supposed to refer to a template, but didn't. // Eat the template argument list and try to continue parsing this as // a class (or template thereof). @@ -713,8 +727,6 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, const_cast<ParsedTemplateInfo&>(TemplateInfo).ExternLoc = SourceLocation(); } - - } } else if (Tok.is(tok::annot_template_id)) { TemplateId = static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue()); @@ -734,10 +746,18 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, DS.SetTypeSpecError(); SkipUntil(tok::semi, false, true); TemplateId->Destroy(); + if (SuppressingAccessChecks) + Actions.ActOnStopSuppressingAccessChecks(); + return; } } + // As soon as we're finished parsing the class's template-id, turn access + // checking back on. + if (SuppressingAccessChecks) + Actions.ActOnStopSuppressingAccessChecks(); + // There are four options here. If we have 'struct foo;', then this // is either a forward declaration or a friend declaration, which // have to be treated differently. If we have 'struct foo {...' or @@ -799,7 +819,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, TUK == Action::TUK_Declaration) { // This is an explicit instantiation of a class template. TagOrTempResult - = Actions.ActOnExplicitInstantiation(CurScope, + = Actions.ActOnExplicitInstantiation(getCurScope(), TemplateInfo.ExternLoc, TemplateInfo.TemplateLoc, TagType, @@ -865,7 +885,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, // Build the class template specialization. TagOrTempResult - = Actions.ActOnClassTemplateSpecialization(CurScope, TagType, TUK, + = Actions.ActOnClassTemplateSpecialization(getCurScope(), TagType, TUK, StartLoc, SS, TemplateTy::make(TemplateId->Template), TemplateId->TemplateNameLoc, @@ -886,7 +906,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, // template struct Outer<int>::Inner; // TagOrTempResult - = Actions.ActOnExplicitInstantiation(CurScope, + = Actions.ActOnExplicitInstantiation(getCurScope(), TemplateInfo.ExternLoc, TemplateInfo.TemplateLoc, TagType, StartLoc, SS, Name, @@ -900,7 +920,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, bool IsDependent = false; // Declaration or definition of a class type - TagOrTempResult = Actions.ActOnTag(CurScope, TagType, TUK, StartLoc, SS, + TagOrTempResult = Actions.ActOnTag(getCurScope(), TagType, TUK, StartLoc, SS, Name, NameLoc, AttrList, AS, Action::MultiTemplateParamsArg(Actions, TemplateParams? &(*TemplateParams)[0] : 0, @@ -910,7 +930,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind, // If ActOnTag said the type was dependent, try again with the // less common call. if (IsDependent) - TypeResult = Actions.ActOnDependentTag(CurScope, TagType, TUK, + TypeResult = Actions.ActOnDependentTag(getCurScope(), TagType, TUK, SS, Name, StartLoc, NameLoc); } @@ -1152,7 +1172,7 @@ void Parser::HandleMemberFunctionDefaultArgs(Declarator& DeclaratorInfo, getCurrentClass().MethodDecls.push_back( LateParsedMethodDeclaration(ThisDecl)); LateMethod = &getCurrentClass().MethodDecls.back(); - LateMethod->TemplateScope = CurScope->isTemplateParamScope(); + LateMethod->TemplateScope = getCurScope()->isTemplateParamScope(); // Add all of the parameters prior to this one (they don't // have default arguments). @@ -1229,7 +1249,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS, tok::semi)) return; - Actions.ActOnUsingDeclaration(CurScope, AS, + Actions.ActOnUsingDeclaration(getCurScope(), AS, false, SourceLocation(), SS, Name, /* AttrList */ 0, @@ -1307,7 +1327,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS, if (Tok.is(tok::semi)) { ConsumeToken(); - Actions.ParsedFreeStandingDeclSpec(CurScope, AS, DS); + Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS, DS); return; } @@ -1375,7 +1395,6 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS, // declarator pure-specifier[opt] // declarator constant-initializer[opt] // identifier[opt] ':' constant-expression - if (Tok.is(tok::colon)) { ConsumeToken(); BitfieldSize = ParseConstantExpression(); @@ -1392,7 +1411,6 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS, // defaulted/deleted function-definition: // '=' 'default' [TODO] // '=' 'delete' - if (Tok.is(tok::equal)) { ConsumeToken(); if (getLang().CPlusPlus0x && Tok.is(tok::kw_delete)) { @@ -1405,6 +1423,17 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS, } } + // If a simple-asm-expr is present, parse it. + if (Tok.is(tok::kw_asm)) { + SourceLocation Loc; + OwningExprResult AsmLabel(ParseSimpleAsm(&Loc)); + if (AsmLabel.isInvalid()) + SkipUntil(tok::comma, true, true); + + DeclaratorInfo.setAsmLabel(AsmLabel.release()); + DeclaratorInfo.SetRangeEnd(Loc); + } + // If attributes exist after the declarator, parse them. if (Tok.is(tok::kw___attribute)) { SourceLocation Loc; @@ -1419,11 +1448,11 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS, DeclPtrTy ThisDecl; if (DS.isFriendSpecified()) { // TODO: handle initializers, bitfields, 'delete' - ThisDecl = Actions.ActOnFriendFunctionDecl(CurScope, DeclaratorInfo, + ThisDecl = Actions.ActOnFriendFunctionDecl(getCurScope(), DeclaratorInfo, /*IsDefinition*/ false, move(TemplateParams)); } else { - ThisDecl = Actions.ActOnCXXMemberDeclarator(CurScope, AS, + ThisDecl = Actions.ActOnCXXMemberDeclarator(getCurScope(), AS, DeclaratorInfo, move(TemplateParams), BitfieldSize.release(), @@ -1475,7 +1504,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS, return; } - Actions.FinalizeDeclaratorGroup(CurScope, DS, DeclsInGroup.data(), + Actions.FinalizeDeclaratorGroup(getCurScope(), DS, DeclsInGroup.data(), DeclsInGroup.size()); } @@ -1499,7 +1528,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc, // classes are *not* considered to be nested classes. bool NonNestedClass = true; if (!ClassStack.empty()) { - for (const Scope *S = CurScope; S; S = S->getParent()) { + for (const Scope *S = getCurScope(); S; S = S->getParent()) { if (S->isClassScope()) { // We're inside a class scope, so this is a nested class. NonNestedClass = false; @@ -1526,7 +1555,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc, ParsingClassDefinition ParsingDef(*this, TagDecl, NonNestedClass); if (TagDecl) - Actions.ActOnTagStartDefinition(CurScope, TagDecl); + Actions.ActOnTagStartDefinition(getCurScope(), TagDecl); if (Tok.is(tok::colon)) { ParseBaseClause(TagDecl); @@ -1535,7 +1564,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc, Diag(Tok, diag::err_expected_lbrace_after_base_specifiers); if (TagDecl) - Actions.ActOnTagDefinitionError(CurScope, TagDecl); + Actions.ActOnTagDefinitionError(getCurScope(), TagDecl); return; } } @@ -1544,12 +1573,8 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc, SourceLocation LBraceLoc = ConsumeBrace(); - if (!TagDecl) { - SkipUntil(tok::r_brace, false, false); - return; - } - - Actions.ActOnStartCXXMemberDeclarations(CurScope, TagDecl, LBraceLoc); + if (TagDecl) + Actions.ActOnStartCXXMemberDeclarations(getCurScope(), TagDecl, LBraceLoc); // C++ 11p3: Members of a class defined with the keyword class are private // by default. Members of a class defined with the keywords struct or union @@ -1560,43 +1585,55 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc, else CurAS = AS_public; - // While we still have something to read, read the member-declarations. - while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) { - // Each iteration of this loop reads one member-declaration. + SourceLocation RBraceLoc; + if (TagDecl) { + // While we still have something to read, read the member-declarations. + while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) { + // Each iteration of this loop reads one member-declaration. + + // Check for extraneous top-level semicolon. + if (Tok.is(tok::semi)) { + Diag(Tok, diag::ext_extra_struct_semi) + << DeclSpec::getSpecifierName((DeclSpec::TST)TagType) + << FixItHint::CreateRemoval(Tok.getLocation()); + ConsumeToken(); + continue; + } - // Check for extraneous top-level semicolon. - if (Tok.is(tok::semi)) { - Diag(Tok, diag::ext_extra_struct_semi) - << FixItHint::CreateRemoval(Tok.getLocation()); - ConsumeToken(); - continue; - } + AccessSpecifier AS = getAccessSpecifierIfPresent(); + if (AS != AS_none) { + // Current token is a C++ access specifier. + CurAS = AS; + SourceLocation ASLoc = Tok.getLocation(); + ConsumeToken(); + if (Tok.is(tok::colon)) + Actions.ActOnAccessSpecifier(AS, ASLoc, Tok.getLocation()); + else + Diag(Tok, diag::err_expected_colon); + ConsumeToken(); + continue; + } - AccessSpecifier AS = getAccessSpecifierIfPresent(); - if (AS != AS_none) { - // Current token is a C++ access specifier. - CurAS = AS; - ConsumeToken(); - ExpectAndConsume(tok::colon, diag::err_expected_colon); - continue; - } + // FIXME: Make sure we don't have a template here. - // FIXME: Make sure we don't have a template here. + // Parse all the comma separated declarators. + ParseCXXClassMemberDeclaration(CurAS); + } - // Parse all the comma separated declarators. - ParseCXXClassMemberDeclaration(CurAS); + RBraceLoc = MatchRHSPunctuation(tok::r_brace, LBraceLoc); + } else { + SkipUntil(tok::r_brace, false, false); } - SourceLocation RBraceLoc = MatchRHSPunctuation(tok::r_brace, LBraceLoc); - // If attributes exist after class contents, parse them. llvm::OwningPtr<AttributeList> AttrList; if (Tok.is(tok::kw___attribute)) AttrList.reset(ParseGNUAttributes()); - Actions.ActOnFinishCXXMemberSpecification(CurScope, RecordLoc, TagDecl, - LBraceLoc, RBraceLoc, - AttrList.get()); + if (TagDecl) + Actions.ActOnFinishCXXMemberSpecification(getCurScope(), RecordLoc, TagDecl, + LBraceLoc, RBraceLoc, + AttrList.get()); // C++ 9.2p2: Within the class member-specification, the class is regarded as // complete within function bodies, default arguments, @@ -1605,15 +1642,18 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc, // // FIXME: Only function bodies and constructor ctor-initializers are // parsed correctly, fix the rest. - if (NonNestedClass) { + if (TagDecl && NonNestedClass) { // We are not inside a nested class. This class and its nested classes // are complete and we can parse the delayed portions of method // declarations and the lexed inline method definitions. + SourceLocation SavedPrevTokLocation = PrevTokLocation; ParseLexedMethodDeclarations(getCurrentClass()); ParseLexedMethodDefs(getCurrentClass()); + PrevTokLocation = SavedPrevTokLocation; } - Actions.ActOnTagFinishDefinition(CurScope, TagDecl, RBraceLoc); + if (TagDecl) + Actions.ActOnTagFinishDefinition(getCurScope(), TagDecl, RBraceLoc); // Leave the class scope. ParsingDef.Pop(); @@ -1726,7 +1766,7 @@ Parser::MemInitResult Parser::ParseMemInitializer(DeclPtrTy ConstructorDecl) { SourceLocation RParenLoc = MatchRHSPunctuation(tok::r_paren, LParenLoc); - return Actions.ActOnMemInitializer(ConstructorDecl, CurScope, SS, II, + return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II, TemplateTypeTy, IdLoc, LParenLoc, ArgExprs.take(), ArgExprs.size(), CommaLocs.data(), @@ -1840,9 +1880,9 @@ void Parser::PopParsingClass() { // This nested class has some members that will need to be processed // after the top-level class is completely defined. Therefore, add // it to the list of nested classes within its parent. - assert(CurScope->isClassScope() && "Nested class outside of class scope?"); + assert(getCurScope()->isClassScope() && "Nested class outside of class scope?"); ClassStack.top()->NestedClasses.push_back(Victim); - Victim->TemplateScope = CurScope->getParent()->isTemplateParamScope(); + Victim->TemplateScope = getCurScope()->getParent()->isTemplateParamScope(); } /// ParseCXX0XAttributes - Parse a C++0x attribute-specifier. Currently only diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp index b036e56..e7973f7 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp @@ -210,7 +210,7 @@ Parser::ParseExpressionWithLeadingExtension(SourceLocation ExtLoc) { if (LHS.isInvalid()) return move(LHS); } - LHS = Actions.ActOnUnaryOp(CurScope, ExtLoc, tok::kw___extension__, + LHS = Actions.ActOnUnaryOp(getCurScope(), ExtLoc, tok::kw___extension__, move(LHS)); if (LHS.isInvalid()) return move(LHS); @@ -221,7 +221,7 @@ Parser::ParseExpressionWithLeadingExtension(SourceLocation ExtLoc) { /// Parser::OwningExprResult Parser::ParseAssignmentExpression() { if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteOrdinaryName(CurScope, Action::CCC_Expression); + Actions.CodeCompleteOrdinaryName(getCurScope(), Action::CCC_Expression); ConsumeCodeCompletionToken(); } @@ -343,6 +343,14 @@ Parser::ParseRHSOfBinaryExpression(OwningExprResult LHS, prec::Level MinPrec) { } } + // Code completion for the right-hand side of an assignment expression + // goes through a special hook that takes the left-hand side into account. + if (Tok.is(tok::code_completion) && NextTokPrec == prec::Assignment) { + Actions.CodeCompleteAssignmentRHS(getCurScope(), LHS.get()); + ConsumeCodeCompletionToken(); + return ExprError(); + } + // Parse another leaf here for the RHS of the operator. // ParseCastExpression works here because all RHS expressions in C have it // as a prefix, at least. However, in C++, an assignment-expression could @@ -399,7 +407,7 @@ Parser::ParseRHSOfBinaryExpression(OwningExprResult LHS, prec::Level MinPrec) { SourceRange(Actions.getExprRange(LHS.get()).getBegin(), Actions.getExprRange(RHS.get()).getEnd())); - LHS = Actions.ActOnBinOp(CurScope, OpToken.getLocation(), + LHS = Actions.ActOnBinOp(getCurScope(), OpToken.getLocation(), OpToken.getKind(), move(LHS), move(RHS)); } else LHS = Actions.ActOnConditionalOp(OpToken.getLocation(), ColonLoc, @@ -572,7 +580,8 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression, Res = ParseParenExpression(ParenExprType, false/*stopIfCastExr*/, TypeOfCast, CastTy, RParenLoc); - if (Res.isInvalid()) return move(Res); + if (Res.isInvalid()) + return move(Res); } switch (ParenExprType) { @@ -638,9 +647,9 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression, // Support 'Class.property' and 'super.property' notation. if (getLang().ObjC1 && Tok.is(tok::period) && - (Actions.getTypeName(II, ILoc, CurScope) || + (Actions.getTypeName(II, ILoc, getCurScope()) || // Allow the base to be 'super' if in an objc-method. - (&II == Ident_super && CurScope->isInObjcMethodScope()))) { + (&II == Ident_super && getCurScope()->isInObjcMethodScope()))) { SourceLocation DotLoc = ConsumeToken(); if (Tok.isNot(tok::identifier)) { @@ -662,8 +671,9 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression, UnqualifiedId Name; CXXScopeSpec ScopeSpec; Name.setIdentifier(&II, ILoc); - Res = Actions.ActOnIdExpression(CurScope, ScopeSpec, Name, + Res = Actions.ActOnIdExpression(getCurScope(), ScopeSpec, Name, Tok.is(tok::l_paren), false); + // These can be followed by postfix-expr pieces. return ParsePostfixExpressionSuffix(move(Res)); } @@ -698,7 +708,7 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression, SourceLocation SavedLoc = ConsumeToken(); Res = ParseCastExpression(true); if (!Res.isInvalid()) - Res = Actions.ActOnUnaryOp(CurScope, SavedLoc, SavedKind, move(Res)); + Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, move(Res)); return move(Res); } case tok::amp: { // unary-expression: '&' cast-expression @@ -706,7 +716,7 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression, SourceLocation SavedLoc = ConsumeToken(); Res = ParseCastExpression(false, true); if (!Res.isInvalid()) - Res = Actions.ActOnUnaryOp(CurScope, SavedLoc, SavedKind, move(Res)); + Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, move(Res)); return move(Res); } @@ -720,7 +730,7 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression, SourceLocation SavedLoc = ConsumeToken(); Res = ParseCastExpression(false); if (!Res.isInvalid()) - Res = Actions.ActOnUnaryOp(CurScope, SavedLoc, SavedKind, move(Res)); + Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, move(Res)); return move(Res); } @@ -730,7 +740,7 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression, SourceLocation SavedLoc = ConsumeToken(); Res = ParseCastExpression(false); if (!Res.isInvalid()) - Res = Actions.ActOnUnaryOp(CurScope, SavedLoc, SavedKind, move(Res)); + Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, move(Res)); return move(Res); } case tok::kw_sizeof: // unary-expression: 'sizeof' unary-expression @@ -905,7 +915,7 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression, case tok::caret: return ParsePostfixExpressionSuffix(ParseBlockLiteralExpression()); case tok::code_completion: - Actions.CodeCompleteOrdinaryName(CurScope, Action::CCC_Expression); + Actions.CodeCompleteOrdinaryName(getCurScope(), Action::CCC_Expression); ConsumeCodeCompletionToken(); return ParseCastExpression(isUnaryExpression, isAddressOfOperand, NotCastExpr, TypeOfCast); @@ -951,13 +961,23 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) { default: // Not a postfix-expression suffix. return move(LHS); case tok::l_square: { // postfix-expression: p-e '[' expression ']' + // If we have a array postfix expression that starts on a new line and + // Objective-C is enabled, it is highly likely that the user forgot a + // semicolon after the base expression and that the array postfix-expr is + // actually another message send. In this case, do some look-ahead to see + // if the contents of the square brackets are obviously not a valid + // expression and recover by pretending there is no suffix. + if (getLang().ObjC1 && Tok.isAtStartOfLine() && + isSimpleObjCMessageExpression()) + return move(LHS); + Loc = ConsumeBracket(); OwningExprResult Idx(ParseExpression()); SourceLocation RLoc = Tok.getLocation(); if (!LHS.isInvalid() && !Idx.isInvalid() && Tok.is(tok::r_square)) { - LHS = Actions.ActOnArraySubscriptExpr(CurScope, move(LHS), Loc, + LHS = Actions.ActOnArraySubscriptExpr(getCurScope(), move(LHS), Loc, move(Idx), RLoc); } else LHS = ExprError(); @@ -973,8 +993,13 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) { Loc = ConsumeParen(); + if (LHS.isInvalid()) { + SkipUntil(tok::r_paren); + return ExprError(); + } + if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteCall(CurScope, LHS.get(), 0, 0); + Actions.CodeCompleteCall(getCurScope(), LHS.get(), 0, 0); ConsumeCodeCompletionToken(); } @@ -995,7 +1020,7 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) { if (!LHS.isInvalid()) { assert((ArgExprs.size() == 0 || ArgExprs.size()-1 == CommaLocs.size())&& "Unexpected number of commas!"); - LHS = Actions.ActOnCallExpr(CurScope, move(LHS), Loc, + LHS = Actions.ActOnCallExpr(getCurScope(), move(LHS), Loc, move_arg(ArgExprs), CommaLocs.data(), Tok.getLocation()); } @@ -1014,7 +1039,7 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) { Action::TypeTy *ObjectType = 0; bool MayBePseudoDestructor = false; if (getLang().CPlusPlus && !LHS.isInvalid()) { - LHS = Actions.ActOnStartCXXMemberReference(CurScope, move(LHS), + LHS = Actions.ActOnStartCXXMemberReference(getCurScope(), move(LHS), OpLoc, OpKind, ObjectType, MayBePseudoDestructor); if (LHS.isInvalid()) @@ -1022,11 +1047,13 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) { ParseOptionalCXXScopeSpecifier(SS, ObjectType, false, &MayBePseudoDestructor); + if (SS.isNotEmpty()) + ObjectType = 0; } if (Tok.is(tok::code_completion)) { // Code completion for a member access expression. - Actions.CodeCompleteMemberReferenceExpr(CurScope, LHS.get(), + Actions.CodeCompleteMemberReferenceExpr(getCurScope(), LHS.get(), OpLoc, OpKind == tok::arrow); ConsumeCodeCompletionToken(); @@ -1053,7 +1080,7 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) { return ExprError(); if (!LHS.isInvalid()) - LHS = Actions.ActOnMemberAccessExpr(CurScope, move(LHS), OpLoc, + LHS = Actions.ActOnMemberAccessExpr(getCurScope(), move(LHS), OpLoc, OpKind, SS, Name, ObjCImpDecl, Tok.is(tok::l_paren)); break; @@ -1061,7 +1088,7 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) { case tok::plusplus: // postfix-expression: postfix-expression '++' case tok::minusminus: // postfix-expression: postfix-expression '--' if (!LHS.isInvalid()) { - LHS = Actions.ActOnPostfixUnaryOp(CurScope, Tok.getLocation(), + LHS = Actions.ActOnPostfixUnaryOp(getCurScope(), Tok.getLocation(), Tok.getKind(), move(LHS)); } ConsumeToken(); @@ -1309,7 +1336,7 @@ Parser::OwningExprResult Parser::ParseBuiltinPrimaryExpression() { } else if (Ty.isInvalid()) { Res = ExprError(); } else { - Res = Actions.ActOnBuiltinOffsetOf(CurScope, StartLoc, TypeLoc, + Res = Actions.ActOnBuiltinOffsetOf(getCurScope(), StartLoc, TypeLoc, Ty.get(), &Comps[0], Comps.size(), ConsumeParen()); } @@ -1451,7 +1478,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, // Reject the cast of super idiom in ObjC. if (Tok.is(tok::identifier) && getLang().ObjC1 && Tok.getIdentifierInfo() == Ident_super && - CurScope->isInObjcMethodScope() && + getCurScope()->isInObjcMethodScope() && GetLookAheadToken(1).isNot(tok::period)) { Diag(Tok.getLocation(), diag::err_illegal_super_cast) << SourceRange(OpenLoc, RParenLoc); @@ -1462,7 +1489,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, // TODO: For cast expression with CastTy. Result = ParseCastExpression(false, false, CastTy); if (!Result.isInvalid()) - Result = Actions.ActOnCastExpr(CurScope, OpenLoc, CastTy, RParenLoc, + Result = Actions.ActOnCastExpr(getCurScope(), OpenLoc, CastTy, RParenLoc, move(Result)); return move(Result); } @@ -1561,7 +1588,7 @@ bool Parser::ParseExpressionList(ExprListTy &Exprs, CommaLocsTy &CommaLocs, while (1) { if (Tok.is(tok::code_completion)) { if (Completer) - (Actions.*Completer)(CurScope, Data, Exprs.data(), Exprs.size()); + (Actions.*Completer)(getCurScope(), Data, Exprs.data(), Exprs.size()); ConsumeCodeCompletionToken(); } @@ -1603,7 +1630,7 @@ void Parser::ParseBlockId() { } // Inform sema that we are starting a block. - Actions.ActOnBlockArguments(DeclaratorInfo, CurScope); + Actions.ActOnBlockArguments(DeclaratorInfo, getCurScope()); } /// ParseBlockLiteralExpression - Parse a block literal, which roughly looks @@ -1631,7 +1658,7 @@ Parser::OwningExprResult Parser::ParseBlockLiteralExpression() { Scope::DeclScope); // Inform sema that we are starting a block. - Actions.ActOnBlockStart(CaretLoc, CurScope); + Actions.ActOnBlockStart(CaretLoc, getCurScope()); // Parse the return type if present. DeclSpec DS; @@ -1654,7 +1681,7 @@ Parser::OwningExprResult Parser::ParseBlockLiteralExpression() { // If there was an error parsing the arguments, they may have // tried to use ^(x+y) which requires an argument list. Just // skip the whole block literal. - Actions.ActOnBlockError(CaretLoc, CurScope); + Actions.ActOnBlockError(CaretLoc, getCurScope()); return ExprError(); } @@ -1665,7 +1692,7 @@ Parser::OwningExprResult Parser::ParseBlockLiteralExpression() { } // Inform sema that we are starting a block. - Actions.ActOnBlockArguments(ParamInfo, CurScope); + Actions.ActOnBlockArguments(ParamInfo, getCurScope()); } else if (!Tok.is(tok::l_brace)) { ParseBlockId(); } else { @@ -1686,7 +1713,7 @@ Parser::OwningExprResult Parser::ParseBlockLiteralExpression() { } // Inform sema that we are starting a block. - Actions.ActOnBlockArguments(ParamInfo, CurScope); + Actions.ActOnBlockArguments(ParamInfo, getCurScope()); } @@ -1694,14 +1721,14 @@ Parser::OwningExprResult Parser::ParseBlockLiteralExpression() { if (!Tok.is(tok::l_brace)) { // Saw something like: ^expr Diag(Tok, diag::err_expected_expression); - Actions.ActOnBlockError(CaretLoc, CurScope); + Actions.ActOnBlockError(CaretLoc, getCurScope()); return ExprError(); } OwningStmtResult Stmt(ParseCompoundStatementBody()); if (!Stmt.isInvalid()) - Result = Actions.ActOnBlockStmtExpr(CaretLoc, move(Stmt), CurScope); + Result = Actions.ActOnBlockStmtExpr(CaretLoc, move(Stmt), getCurScope()); else - Actions.ActOnBlockError(CaretLoc, CurScope); + Actions.ActOnBlockError(CaretLoc, getCurScope()); return move(Result); } diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp index 46f1d94..579d3bd 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp @@ -81,7 +81,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, // '::' - Global scope qualifier. SourceLocation CCLoc = ConsumeToken(); SS.setBeginLoc(CCLoc); - SS.setScopeRep(Actions.ActOnCXXGlobalScopeSpecifier(CurScope, CCLoc)); + SS.setScopeRep(Actions.ActOnCXXGlobalScopeSpecifier(getCurScope(), CCLoc)); SS.setEndLoc(CCLoc); HasScopeSpecifier = true; } @@ -109,7 +109,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, if (Tok.is(tok::code_completion)) { // Code completion for a nested-name-specifier, where the code // code completion token follows the '::'. - Actions.CodeCompleteQualifiedId(CurScope, SS, EnteringContext); + Actions.CodeCompleteQualifiedId(getCurScope(), SS, EnteringContext); ConsumeCodeCompletionToken(); } } @@ -164,13 +164,18 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, // Commit to parsing the template-id. TPA.Commit(); - TemplateTy Template - = Actions.ActOnDependentTemplateName(TemplateKWLoc, SS, TemplateName, - ObjectType, EnteringContext); - if (!Template) - return true; - if (AnnotateTemplateIdToken(Template, TNK_Dependent_template_name, - &SS, TemplateName, TemplateKWLoc, false)) + TemplateTy Template; + if (TemplateNameKind TNK = Actions.ActOnDependentTemplateName(getCurScope(), + TemplateKWLoc, + SS, + TemplateName, + ObjectType, + EnteringContext, + Template)) { + if (AnnotateTemplateIdToken(Template, TNK, &SS, TemplateName, + TemplateKWLoc, false)) + return true; + } else return true; continue; @@ -209,7 +214,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, if (TypeToken.getAnnotationValue()) SS.setScopeRep( - Actions.ActOnCXXNestedNameSpecifier(CurScope, SS, + Actions.ActOnCXXNestedNameSpecifier(getCurScope(), SS, TypeToken.getAnnotationValue(), TypeToken.getAnnotationRange(), CCLoc)); @@ -239,7 +244,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, // If we get foo:bar, this is almost certainly a typo for foo::bar. Recover // and emit a fixit hint for it. if (Next.is(tok::colon) && !ColonIsSacred) { - if (Actions.IsInvalidUnlessNestedName(CurScope, SS, II, ObjectType, + if (Actions.IsInvalidUnlessNestedName(getCurScope(), SS, II, ObjectType, EnteringContext) && // If the token after the colon isn't an identifier, it's still an // error, but they probably meant something else strange so don't @@ -255,7 +260,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, if (Next.is(tok::coloncolon)) { if (CheckForDestructor && GetLookAheadToken(2).is(tok::tilde) && - !Actions.isNonTypeNestedNameSpecifier(CurScope, SS, Tok.getLocation(), + !Actions.isNonTypeNestedNameSpecifier(getCurScope(), SS, Tok.getLocation(), II, ObjectType)) { *MayBePseudoDestructor = true; return false; @@ -273,12 +278,10 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, HasScopeSpecifier = true; } - if (SS.isInvalid()) - continue; - - SS.setScopeRep( - Actions.ActOnCXXNestedNameSpecifier(CurScope, SS, IdLoc, CCLoc, II, - ObjectType, EnteringContext)); + if (!SS.isInvalid()) + SS.setScopeRep( + Actions.ActOnCXXNestedNameSpecifier(getCurScope(), SS, IdLoc, CCLoc, II, + ObjectType, EnteringContext)); SS.setEndLoc(CCLoc); continue; } @@ -290,7 +293,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, UnqualifiedId TemplateName; TemplateName.setIdentifier(&II, Tok.getLocation()); bool MemberOfUnknownSpecialization; - if (TemplateNameKind TNK = Actions.isTemplateName(CurScope, SS, + if (TemplateNameKind TNK = Actions.isTemplateName(getCurScope(), SS, TemplateName, ObjectType, EnteringContext, @@ -319,18 +322,20 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, << II.getName() << FixItHint::CreateInsertion(Tok.getLocation(), "template "); - Template = Actions.ActOnDependentTemplateName(Tok.getLocation(), SS, - TemplateName, ObjectType, - EnteringContext); - if (!Template.get()) + if (TemplateNameKind TNK + = Actions.ActOnDependentTemplateName(getCurScope(), + Tok.getLocation(), SS, + TemplateName, ObjectType, + EnteringContext, Template)) { + // Consume the identifier. + ConsumeToken(); + if (AnnotateTemplateIdToken(Template, TNK, &SS, TemplateName, + SourceLocation(), false)) + return true; + } + else return true; - - // Consume the identifier. - ConsumeToken(); - if (AnnotateTemplateIdToken(Template, TNK_Dependent_template_name, &SS, - TemplateName, SourceLocation(), false)) - return true; - + continue; } } @@ -426,7 +431,7 @@ Parser::OwningExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) { } } - return Actions.ActOnIdExpression(CurScope, SS, Name, Tok.is(tok::l_paren), + return Actions.ActOnIdExpression(getCurScope(), SS, Name, Tok.is(tok::l_paren), isAddressOfOperand); } @@ -607,7 +612,7 @@ Parser::ParseCXXPseudoDestructor(ExprArg Base, SourceLocation OpLoc, /*TemplateKWLoc*/SourceLocation())) return ExprError(); - return Actions.ActOnPseudoDestructorExpr(CurScope, move(Base), OpLoc, OpKind, + return Actions.ActOnPseudoDestructorExpr(getCurScope(), move(Base), OpLoc, OpKind, SS, FirstTypeName, CCLoc, TildeLoc, SecondTypeName, Tok.is(tok::l_paren)); @@ -673,7 +678,7 @@ Parser::OwningExprResult Parser::ParseCXXThis() { Parser::OwningExprResult Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) { Declarator DeclaratorInfo(DS, Declarator::TypeNameContext); - TypeTy *TypeRep = Actions.ActOnTypeName(CurScope, DeclaratorInfo).get(); + TypeTy *TypeRep = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get(); assert(Tok.is(tok::l_paren) && "Expected '('!"); SourceLocation LParenLoc = ConsumeParen(); @@ -728,7 +733,7 @@ bool Parser::ParseCXXCondition(OwningExprResult &ExprResult, SourceLocation Loc, bool ConvertToBoolean) { if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteOrdinaryName(CurScope, Action::CCC_Condition); + Actions.CodeCompleteOrdinaryName(getCurScope(), Action::CCC_Condition); ConsumeCodeCompletionToken(); } @@ -742,7 +747,7 @@ bool Parser::ParseCXXCondition(OwningExprResult &ExprResult, // If required, convert to a boolean value. if (ConvertToBoolean) ExprResult - = Actions.ActOnBooleanCondition(CurScope, Loc, move(ExprResult)); + = Actions.ActOnBooleanCondition(getCurScope(), Loc, move(ExprResult)); return ExprResult.isInvalid(); } @@ -774,7 +779,7 @@ bool Parser::ParseCXXCondition(OwningExprResult &ExprResult, } // Type-check the declaration itself. - Action::DeclResult Dcl = Actions.ActOnCXXConditionDeclaration(CurScope, + Action::DeclResult Dcl = Actions.ActOnCXXConditionDeclaration(getCurScope(), DeclaratorInfo); DeclResult = Dcl.get(); ExprResult = ExprError(); @@ -1011,15 +1016,14 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, case UnqualifiedId::IK_OperatorFunctionId: case UnqualifiedId::IK_LiteralOperatorId: if (AssumeTemplateId) { - Template = Actions.ActOnDependentTemplateName(TemplateKWLoc, SS, - Id, ObjectType, - EnteringContext); - TNK = TNK_Dependent_template_name; - if (!Template.get()) - return true; + TNK = Actions.ActOnDependentTemplateName(getCurScope(), TemplateKWLoc, SS, + Id, ObjectType, EnteringContext, + Template); + if (TNK == TNK_Non_template) + return true; } else { bool MemberOfUnknownSpecialization; - TNK = Actions.isTemplateName(CurScope, SS, Id, ObjectType, + TNK = Actions.isTemplateName(getCurScope(), SS, Id, ObjectType, EnteringContext, Template, MemberOfUnknownSpecialization); @@ -1042,11 +1046,10 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, Diag(Id.StartLocation, diag::err_missing_dependent_template_keyword) << Name << FixItHint::CreateInsertion(Id.StartLocation, "template "); - Template = Actions.ActOnDependentTemplateName(TemplateKWLoc, SS, - Id, ObjectType, - EnteringContext); - TNK = TNK_Dependent_template_name; - if (!Template.get()) + TNK = Actions.ActOnDependentTemplateName(getCurScope(), TemplateKWLoc, + SS, Id, ObjectType, + EnteringContext, Template); + if (TNK == TNK_Non_template) return true; } } @@ -1056,7 +1059,7 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, UnqualifiedId TemplateName; bool MemberOfUnknownSpecialization; TemplateName.setIdentifier(Name, NameLoc); - TNK = Actions.isTemplateName(CurScope, SS, TemplateName, ObjectType, + TNK = Actions.isTemplateName(getCurScope(), SS, TemplateName, ObjectType, EnteringContext, Template, MemberOfUnknownSpecialization); break; @@ -1067,14 +1070,13 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, bool MemberOfUnknownSpecialization; TemplateName.setIdentifier(Name, NameLoc); if (ObjectType) { - Template = Actions.ActOnDependentTemplateName(TemplateKWLoc, SS, - TemplateName, ObjectType, - EnteringContext); - TNK = TNK_Dependent_template_name; - if (!Template.get()) + TNK = Actions.ActOnDependentTemplateName(getCurScope(), TemplateKWLoc, SS, + TemplateName, ObjectType, + EnteringContext, Template); + if (TNK == TNK_Non_template) return true; } else { - TNK = Actions.isTemplateName(CurScope, SS, TemplateName, ObjectType, + TNK = Actions.isTemplateName(getCurScope(), SS, TemplateName, ObjectType, EnteringContext, Template, MemberOfUnknownSpecialization); @@ -1271,7 +1273,7 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, case tok::code_completion: { // Code completion for the operator name. - Actions.CodeCompleteOperatorName(CurScope); + Actions.CodeCompleteOperatorName(getCurScope()); // Consume the operator token. ConsumeCodeCompletionToken(); @@ -1332,7 +1334,7 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParseDeclaratorInternal(D, /*DirectDeclParser=*/0); // Finish up the type. - Action::TypeResult Ty = Actions.ActOnTypeName(CurScope, D); + Action::TypeResult Ty = Actions.ActOnTypeName(getCurScope(), D); if (Ty.isInvalid()) return true; @@ -1404,9 +1406,9 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, } if (AllowConstructorName && - Actions.isCurrentClassName(*Id, CurScope, &SS)) { + Actions.isCurrentClassName(*Id, getCurScope(), &SS)) { // We have parsed a constructor name. - Result.setConstructorName(Actions.getTypeName(*Id, IdLoc, CurScope, + Result.setConstructorName(Actions.getTypeName(*Id, IdLoc, getCurScope(), &SS, false), IdLoc, IdLoc); } else { @@ -1431,7 +1433,7 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, // If the template-name names the current class, then this is a constructor if (AllowConstructorName && TemplateId->Name && - Actions.isCurrentClassName(*TemplateId->Name, CurScope, &SS)) { + Actions.isCurrentClassName(*TemplateId->Name, getCurScope(), &SS)) { if (SS.isSet()) { // C++ [class.qual]p2 specifies that a qualified template-name // is taken as the constructor name where a constructor can be @@ -1444,7 +1446,7 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc)); Result.setConstructorName(Actions.getTypeName(*TemplateId->Name, TemplateId->TemplateNameLoc, - CurScope, + getCurScope(), &SS, false), TemplateId->TemplateNameLoc, TemplateId->RAngleLoc); @@ -1517,7 +1519,7 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, // Note that this is a destructor name. Action::TypeTy *Ty = Actions.getDestructorName(TildeLoc, *ClassName, - ClassNameLoc, CurScope, + ClassNameLoc, getCurScope(), SS, ObjectType, EnteringContext); if (!Ty) @@ -1570,7 +1572,7 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) { ExprVector PlacementArgs(Actions); SourceLocation PlacementLParen, PlacementRParen; - bool ParenTypeId; + SourceRange TypeIdParens; DeclSpec DS; Declarator DeclaratorInfo(DS, Declarator::TypeNameContext); if (Tok.is(tok::l_paren)) { @@ -1589,17 +1591,17 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) { if (PlacementArgs.empty()) { // Reset the placement locations. There was no placement. + TypeIdParens = SourceRange(PlacementLParen, PlacementRParen); PlacementLParen = PlacementRParen = SourceLocation(); - ParenTypeId = true; } else { // We still need the type. if (Tok.is(tok::l_paren)) { - SourceLocation LParen = ConsumeParen(); + TypeIdParens.setBegin(ConsumeParen()); ParseSpecifierQualifierList(DS); DeclaratorInfo.SetSourceRange(DS.getSourceRange()); ParseDeclarator(DeclaratorInfo); - MatchRHSPunctuation(tok::r_paren, LParen); - ParenTypeId = true; + TypeIdParens.setEnd(MatchRHSPunctuation(tok::r_paren, + TypeIdParens.getBegin())); } else { if (ParseCXXTypeSpecifierSeq(DS)) DeclaratorInfo.setInvalidType(true); @@ -1608,7 +1610,6 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) { ParseDeclaratorInternal(DeclaratorInfo, &Parser::ParseDirectNewDeclarator); } - ParenTypeId = false; } } } else { @@ -1621,7 +1622,6 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) { ParseDeclaratorInternal(DeclaratorInfo, &Parser::ParseDirectNewDeclarator); } - ParenTypeId = false; } if (DeclaratorInfo.isInvalidType()) { SkipUntil(tok::semi, /*StopAtSemi=*/true, /*DontConsume=*/true); @@ -1649,7 +1649,7 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) { return Actions.ActOnCXXNew(Start, UseGlobal, PlacementLParen, move_arg(PlacementArgs), PlacementRParen, - ParenTypeId, DeclaratorInfo, ConstructorLParen, + TypeIdParens, DeclaratorInfo, ConstructorLParen, move_arg(ConstructorArgs), ConstructorRParen); } @@ -1851,7 +1851,7 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType, // will be consumed. Result = ParseCastExpression(false/*isUnaryExpression*/, false/*isAddressofOperand*/, - NotCastExpr, false); + NotCastExpr, 0/*TypeOfCast*/); } // If we parsed a cast-expression, it's really a type-id, otherwise it's @@ -1893,7 +1893,7 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType, // Result is what ParseCastExpression returned earlier. if (!Result.isInvalid()) - Result = Actions.ActOnCastExpr(CurScope, LParenLoc, CastTy, RParenLoc, + Result = Actions.ActOnCastExpr(getCurScope(), LParenLoc, CastTy, RParenLoc, move(Result)); return move(Result); } diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp index a382a9a..8451aeb 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp @@ -146,7 +146,7 @@ Parser::OwningExprResult Parser::ParseInitializerWithPotentialDesignator() { if (getLang().ObjC1 && getLang().CPlusPlus) { // Send to 'super'. if (Tok.is(tok::identifier) && Tok.getIdentifierInfo() == Ident_super && - NextToken().isNot(tok::period) && CurScope->isInObjcMethodScope()) { + NextToken().isNot(tok::period) && getCurScope()->isInObjcMethodScope()) { CheckArrayDesignatorSyntax(*this, StartLoc, Desig); return ParseAssignmentExprWithObjCMessageExprStart(StartLoc, ConsumeToken(), 0, @@ -184,7 +184,7 @@ Parser::OwningExprResult Parser::ParseInitializerWithPotentialDesignator() { // This is a message send to super: [super foo] // This is a message sent to an expr: [super.bar foo] switch (Action::ObjCMessageKind Kind - = Actions.getObjCMessageKind(CurScope, II, IILoc, + = Actions.getObjCMessageKind(getCurScope(), II, IILoc, II == Ident_super, NextToken().is(tok::period), ReceiverType)) { diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp index 9cfe734..68473a5 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp @@ -31,7 +31,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtDirectives() { SourceLocation AtLoc = ConsumeToken(); // the "@" if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCAtDirective(CurScope, ObjCImpDecl, false); + Actions.CodeCompleteObjCAtDirective(getCurScope(), ObjCImpDecl, false); ConsumeCodeCompletionToken(); } @@ -130,7 +130,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtInterfaceDeclaration( // Code completion after '@interface'. if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCInterfaceDecl(CurScope); + Actions.CodeCompleteObjCInterfaceDecl(getCurScope()); ConsumeCodeCompletionToken(); } @@ -148,7 +148,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtInterfaceDeclaration( SourceLocation categoryLoc, rparenLoc; IdentifierInfo *categoryId = 0; if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCInterfaceCategory(CurScope, nameId, nameLoc); + Actions.CodeCompleteObjCInterfaceCategory(getCurScope(), nameId, nameLoc); ConsumeCodeCompletionToken(); } @@ -203,7 +203,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtInterfaceDeclaration( // Code completion of superclass names. if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCSuperclass(CurScope, nameId, nameLoc); + Actions.CodeCompleteObjCSuperclass(getCurScope(), nameId, nameLoc); ConsumeCodeCompletionToken(); } @@ -283,7 +283,7 @@ struct Parser::ObjCPropertyCallback : FieldCallback { FD.D.getIdentifier()); bool isOverridingProperty = false; DeclPtrTy Property = - P.Actions.ActOnProperty(P.CurScope, AtLoc, FD, OCDS, + P.Actions.ActOnProperty(P.getCurScope(), AtLoc, FD, OCDS, GetterSel, SetterSel, IDecl, &isOverridingProperty, MethodImplKind); @@ -347,7 +347,7 @@ void Parser::ParseObjCInterfaceDeclList(DeclPtrTy interfaceDecl, // Code completion within an Objective-C interface. if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteOrdinaryName(CurScope, + Actions.CodeCompleteOrdinaryName(getCurScope(), ObjCImpDecl? Action::CCC_ObjCImplementation : Action::CCC_ObjCInterface); ConsumeCodeCompletionToken(); @@ -370,7 +370,7 @@ void Parser::ParseObjCInterfaceDeclList(DeclPtrTy interfaceDecl, // Otherwise, we have an @ directive, eat the @. SourceLocation AtLoc = ConsumeToken(); // the "@" if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCAtDirective(CurScope, ObjCImpDecl, true); + Actions.CodeCompleteObjCAtDirective(getCurScope(), ObjCImpDecl, true); ConsumeCodeCompletionToken(); break; } @@ -437,7 +437,7 @@ void Parser::ParseObjCInterfaceDeclList(DeclPtrTy interfaceDecl, // We break out of the big loop in two cases: when we see @end or when we see // EOF. In the former case, eat the @end. In the later case, emit an error. if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCAtDirective(CurScope, ObjCImpDecl, true); + Actions.CodeCompleteObjCAtDirective(getCurScope(), ObjCImpDecl, true); ConsumeCodeCompletionToken(); } else if (Tok.isObjCAtKeyword(tok::objc_end)) ConsumeToken(); // the "end" identifier @@ -446,7 +446,7 @@ void Parser::ParseObjCInterfaceDeclList(DeclPtrTy interfaceDecl, // Insert collected methods declarations into the @interface object. // This passes in an invalid SourceLocation for AtEndLoc when EOF is hit. - Actions.ActOnAtEnd(CurScope, AtEnd, interfaceDecl, + Actions.ActOnAtEnd(getCurScope(), AtEnd, interfaceDecl, allMethods.data(), allMethods.size(), allProperties.data(), allProperties.size(), allTUVariables.data(), allTUVariables.size()); @@ -476,7 +476,7 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS, DeclPtrTy ClassDecl, while (1) { if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCPropertyFlags(CurScope, DS); + Actions.CodeCompleteObjCPropertyFlags(getCurScope(), DS); ConsumeCodeCompletionToken(); } const IdentifierInfo *II = Tok.getIdentifierInfo(); @@ -509,10 +509,10 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS, DeclPtrTy ClassDecl, if (Tok.is(tok::code_completion)) { if (II->getNameStart()[0] == 's') - Actions.CodeCompleteObjCPropertySetter(CurScope, ClassDecl, + Actions.CodeCompleteObjCPropertySetter(getCurScope(), ClassDecl, Methods, NumMethods); else - Actions.CodeCompleteObjCPropertyGetter(CurScope, ClassDecl, + Actions.CodeCompleteObjCPropertyGetter(getCurScope(), ClassDecl, Methods, NumMethods); ConsumeCodeCompletionToken(); } @@ -780,7 +780,7 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDecl(SourceLocation mLoc, ParsingDeclRAIIObject PD(*this); if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCMethodDecl(CurScope, mType == tok::minus, + Actions.CodeCompleteObjCMethodDecl(getCurScope(), mType == tok::minus, /*ReturnType=*/0, IDecl); ConsumeCodeCompletionToken(); } @@ -797,7 +797,7 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDecl(SourceLocation mLoc, MethodAttrs.reset(ParseGNUAttributes()); if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCMethodDecl(CurScope, mType == tok::minus, + Actions.CodeCompleteObjCMethodDecl(getCurScope(), mType == tok::minus, ReturnType, IDecl); ConsumeCodeCompletionToken(); } @@ -856,6 +856,20 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDecl(SourceLocation mLoc, if (getLang().ObjC2 && Tok.is(tok::kw___attribute)) ArgInfo.ArgAttrs = ParseGNUAttributes(); + // Code completion for the next piece of the selector. + if (Tok.is(tok::code_completion)) { + ConsumeCodeCompletionToken(); + KeyIdents.push_back(SelIdent); + Actions.CodeCompleteObjCMethodDeclSelector(getCurScope(), + mType == tok::minus, + /*AtParameterName=*/true, + ReturnType, + KeyIdents.data(), + KeyIdents.size()); + KeyIdents.pop_back(); + break; + } + if (Tok.isNot(tok::identifier)) { Diag(Tok, diag::err_expected_ident); // missing argument name. break; @@ -868,6 +882,18 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDecl(SourceLocation mLoc, ArgInfos.push_back(ArgInfo); KeyIdents.push_back(SelIdent); + // Code completion for the next piece of the selector. + if (Tok.is(tok::code_completion)) { + ConsumeCodeCompletionToken(); + Actions.CodeCompleteObjCMethodDeclSelector(getCurScope(), + mType == tok::minus, + /*AtParameterName=*/false, + ReturnType, + KeyIdents.data(), + KeyIdents.size()); + break; + } + // Check for another keyword selector. SourceLocation Loc; SelIdent = ParseObjCSelectorPiece(Loc); @@ -892,7 +918,7 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDecl(SourceLocation mLoc, Declarator ParmDecl(DS, Declarator::PrototypeContext); ParseDeclarator(ParmDecl); IdentifierInfo *ParmII = ParmDecl.getIdentifier(); - DeclPtrTy Param = Actions.ActOnParamDeclarator(CurScope, ParmDecl); + DeclPtrTy Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDecl); CParamInfo.push_back(DeclaratorChunk::ParamInfo(ParmII, ParmDecl.getIdentifierLoc(), Param, @@ -1014,7 +1040,7 @@ void Parser::ParseObjCClassInstanceVariables(DeclPtrTy interfaceDecl, // Check for extraneous top-level semicolon. if (Tok.is(tok::semi)) { - Diag(Tok, diag::ext_extra_struct_semi) + Diag(Tok, diag::ext_extra_ivar_semi) << FixItHint::CreateRemoval(Tok.getLocation()); ConsumeToken(); continue; @@ -1025,7 +1051,7 @@ void Parser::ParseObjCClassInstanceVariables(DeclPtrTy interfaceDecl, ConsumeToken(); // eat the @ sign if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCAtVisibility(CurScope); + Actions.CodeCompleteObjCAtVisibility(getCurScope()); ConsumeCodeCompletionToken(); } @@ -1044,7 +1070,7 @@ void Parser::ParseObjCClassInstanceVariables(DeclPtrTy interfaceDecl, } if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteOrdinaryName(CurScope, + Actions.CodeCompleteOrdinaryName(getCurScope(), Action::CCC_ObjCInstanceVariableList); ConsumeCodeCompletionToken(); } @@ -1063,7 +1089,7 @@ void Parser::ParseObjCClassInstanceVariables(DeclPtrTy interfaceDecl, DeclPtrTy invoke(FieldDeclarator &FD) { // Install the declarator into the interface decl. DeclPtrTy Field - = P.Actions.ActOnIvar(P.CurScope, + = P.Actions.ActOnIvar(P.getCurScope(), FD.D.getDeclSpec().getSourceRange().getBegin(), IDecl, FD.D, FD.BitfieldSize, visibility); if (Field) @@ -1087,7 +1113,7 @@ void Parser::ParseObjCClassInstanceVariables(DeclPtrTy interfaceDecl, SourceLocation RBraceLoc = MatchRHSPunctuation(tok::r_brace, LBraceLoc); // Call ActOnFields() even if we don't have any decls. This is useful // for code rewriting tools that need to be aware of the empty list. - Actions.ActOnFields(CurScope, atLoc, interfaceDecl, + Actions.ActOnFields(getCurScope(), atLoc, interfaceDecl, AllIvarDecls.data(), AllIvarDecls.size(), LBraceLoc, RBraceLoc, 0); return; @@ -1116,7 +1142,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc, ConsumeToken(); // the "protocol" identifier if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCProtocolDecl(CurScope); + Actions.CodeCompleteObjCProtocolDecl(getCurScope()); ConsumeCodeCompletionToken(); } @@ -1202,7 +1228,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtImplementationDeclaration( // Code completion after '@implementation'. if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCImplementationDecl(CurScope); + Actions.CodeCompleteObjCImplementationDecl(getCurScope()); ConsumeCodeCompletionToken(); } @@ -1221,7 +1247,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtImplementationDeclaration( IdentifierInfo *categoryId = 0; if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCImplementationCategory(CurScope, nameId, nameLoc); + Actions.CodeCompleteObjCImplementationCategory(getCurScope(), nameId, nameLoc); ConsumeCodeCompletionToken(); } @@ -1277,7 +1303,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtEndDeclaration(SourceRange atEnd) { DeclPtrTy Result = ObjCImpDecl; ConsumeToken(); // the "end" identifier if (ObjCImpDecl) { - Actions.ActOnAtEnd(CurScope, atEnd, ObjCImpDecl); + Actions.ActOnAtEnd(getCurScope(), atEnd, ObjCImpDecl); ObjCImpDecl = DeclPtrTy(); PendingObjCImpDecl.pop_back(); } @@ -1292,7 +1318,7 @@ Parser::DeclGroupPtrTy Parser::RetrievePendingObjCImpDecl() { if (PendingObjCImpDecl.empty()) return Actions.ConvertDeclToDeclGroup(DeclPtrTy()); DeclPtrTy ImpDecl = PendingObjCImpDecl.pop_back_val(); - Actions.ActOnAtEnd(CurScope, SourceRange(), ImpDecl); + Actions.ActOnAtEnd(getCurScope(), SourceRange(), ImpDecl); return Actions.ConvertDeclToDeclGroup(ImpDecl); } @@ -1341,7 +1367,7 @@ Parser::DeclPtrTy Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) { while (true) { if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCPropertyDefinition(CurScope, ObjCImpDecl); + Actions.CodeCompleteObjCPropertyDefinition(getCurScope(), ObjCImpDecl); ConsumeCodeCompletionToken(); } @@ -1359,7 +1385,7 @@ Parser::DeclPtrTy Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) { ConsumeToken(); // consume '=' if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCPropertySynthesizeIvar(CurScope, propertyId, + Actions.CodeCompleteObjCPropertySynthesizeIvar(getCurScope(), propertyId, ObjCImpDecl); ConsumeCodeCompletionToken(); } @@ -1371,7 +1397,7 @@ Parser::DeclPtrTy Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) { propertyIvar = Tok.getIdentifierInfo(); ConsumeToken(); // consume ivar-name } - Actions.ActOnPropertyImplDecl(CurScope, atLoc, propertyLoc, true, ObjCImpDecl, + Actions.ActOnPropertyImplDecl(getCurScope(), atLoc, propertyLoc, true, ObjCImpDecl, propertyId, propertyIvar); if (Tok.isNot(tok::comma)) break; @@ -1399,7 +1425,7 @@ Parser::DeclPtrTy Parser::ParseObjCPropertyDynamic(SourceLocation atLoc) { SourceLocation loc = ConsumeToken(); // consume dynamic while (true) { if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCPropertyDefinition(CurScope, ObjCImpDecl); + Actions.CodeCompleteObjCPropertyDefinition(getCurScope(), ObjCImpDecl); ConsumeCodeCompletionToken(); } @@ -1411,7 +1437,7 @@ Parser::DeclPtrTy Parser::ParseObjCPropertyDynamic(SourceLocation atLoc) { IdentifierInfo *propertyId = Tok.getIdentifierInfo(); SourceLocation propertyLoc = ConsumeToken(); // consume property name - Actions.ActOnPropertyImplDecl(CurScope, atLoc, propertyLoc, false, ObjCImpDecl, + Actions.ActOnPropertyImplDecl(getCurScope(), atLoc, propertyLoc, false, ObjCImpDecl, propertyId, 0); if (Tok.isNot(tok::comma)) @@ -1442,7 +1468,7 @@ Parser::OwningStmtResult Parser::ParseObjCThrowStmt(SourceLocation atLoc) { } // consume ';' ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@throw"); - return Actions.ActOnObjCAtThrowStmt(atLoc, move(Res), CurScope); + return Actions.ActOnObjCAtThrowStmt(atLoc, move(Res), getCurScope()); } /// objc-synchronized-statement: @@ -1536,7 +1562,7 @@ Parser::OwningStmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) { // Inform the actions module about the declarator, so it // gets added to the current scope. - FirstPart = Actions.ActOnObjCExceptionDecl(CurScope, ParmDecl); + FirstPart = Actions.ActOnObjCExceptionDecl(getCurScope(), ParmDecl); } else ConsumeToken(); // consume '...' @@ -1633,7 +1659,7 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDefinition() { // Tell the actions module that we have entered a method definition with the // specified Declarator for the method. - Actions.ActOnStartOfObjCMethodDef(CurScope, MDecl); + Actions.ActOnStartOfObjCMethodDef(getCurScope(), MDecl); OwningStmtResult FnBody(ParseCompoundStatementBody()); @@ -1653,7 +1679,7 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDefinition() { Parser::OwningStmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc) { if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteObjCAtStatement(CurScope); + Actions.CodeCompleteObjCAtStatement(getCurScope()); ConsumeCodeCompletionToken(); return StmtError(); } @@ -1684,7 +1710,7 @@ Parser::OwningStmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc) { Parser::OwningExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) { switch (Tok.getKind()) { case tok::code_completion: - Actions.CodeCompleteObjCAtExpression(CurScope); + Actions.CodeCompleteObjCAtExpression(getCurScope()); ConsumeCodeCompletionToken(); return ExprError(); @@ -1730,7 +1756,6 @@ Parser::OwningExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) { /// expression /// simple-type-specifier /// typename-specifier - bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) { if (Tok.is(tok::identifier) || Tok.is(tok::coloncolon) || Tok.is(tok::kw_typename) || Tok.is(tok::annot_cxxscope)) @@ -1785,7 +1810,7 @@ bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) { // typename-specifier we parsed into a type and parse the // remainder of the class message. Declarator DeclaratorInfo(DS, Declarator::TypeNameContext); - TypeResult Type = Actions.ActOnTypeName(CurScope, DeclaratorInfo); + TypeResult Type = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo); if (Type.isInvalid()) return true; @@ -1794,6 +1819,18 @@ bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) { return false; } +/// \brief Determine whether the parser is currently referring to a an +/// Objective-C message send, using a simplified heuristic to avoid overhead. +/// +/// This routine will only return true for a subset of valid message-send +/// expressions. +bool Parser::isSimpleObjCMessageExpression() { + assert(Tok.is(tok::l_square) && getLang().ObjC1 && + "Incorrect start for isSimpleObjCMessageExpression"); + return GetLookAheadToken(1).is(tok::identifier) && + GetLookAheadToken(2).is(tok::identifier); +} + /// objc-message-expr: /// '[' objc-receiver objc-message-args ']' /// @@ -1807,6 +1844,13 @@ Parser::OwningExprResult Parser::ParseObjCMessageExpression() { assert(Tok.is(tok::l_square) && "'[' expected"); SourceLocation LBracLoc = ConsumeBracket(); // consume '[' + if (Tok.is(tok::code_completion)) { + Actions.CodeCompleteObjCMessageReceiver(getCurScope()); + ConsumeCodeCompletionToken(); + SkipUntil(tok::r_square); + return ExprError(); + } + if (getLang().CPlusPlus) { // We completely separate the C and C++ cases because C++ requires // more complicated (read: slower) parsing. @@ -1815,7 +1859,7 @@ Parser::OwningExprResult Parser::ParseObjCMessageExpression() { // FIXME: This doesn't benefit from the same typo-correction we // get in Objective-C. if (Tok.is(tok::identifier) && Tok.getIdentifierInfo() == Ident_super && - NextToken().isNot(tok::period) && CurScope->isInObjcMethodScope()) + NextToken().isNot(tok::period) && getCurScope()->isInObjcMethodScope()) return ParseObjCMessageExpressionBody(LBracLoc, ConsumeToken(), 0, ExprArg(Actions)); @@ -1833,11 +1877,13 @@ Parser::OwningExprResult Parser::ParseObjCMessageExpression() { return ParseObjCMessageExpressionBody(LBracLoc, SourceLocation(), TypeOrExpr, ExprArg(Actions)); - } else if (Tok.is(tok::identifier)) { + } + + if (Tok.is(tok::identifier)) { IdentifierInfo *Name = Tok.getIdentifierInfo(); SourceLocation NameLoc = Tok.getLocation(); TypeTy *ReceiverType; - switch (Actions.getObjCMessageKind(CurScope, Name, NameLoc, + switch (Actions.getObjCMessageKind(getCurScope(), Name, NameLoc, Name == Ident_super, NextToken().is(tok::period), ReceiverType)) { @@ -1919,11 +1965,11 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc, ExprArg ReceiverExpr) { if (Tok.is(tok::code_completion)) { if (SuperLoc.isValid()) - Actions.CodeCompleteObjCSuperMessage(CurScope, SuperLoc, 0, 0); + Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc, 0, 0); else if (ReceiverType) - Actions.CodeCompleteObjCClassMessage(CurScope, ReceiverType, 0, 0); + Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType, 0, 0); else - Actions.CodeCompleteObjCInstanceMessage(CurScope, ReceiverExpr.get(), + Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr.get(), 0, 0); ConsumeCodeCompletionToken(); } @@ -1968,15 +2014,15 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc, // Code completion after each argument. if (Tok.is(tok::code_completion)) { if (SuperLoc.isValid()) - Actions.CodeCompleteObjCSuperMessage(CurScope, SuperLoc, + Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc, KeyIdents.data(), KeyIdents.size()); else if (ReceiverType) - Actions.CodeCompleteObjCClassMessage(CurScope, ReceiverType, + Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType, KeyIdents.data(), KeyIdents.size()); else - Actions.CodeCompleteObjCInstanceMessage(CurScope, ReceiverExpr.get(), + Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr.get(), KeyIdents.data(), KeyIdents.size()); ConsumeCodeCompletionToken(); @@ -2034,18 +2080,18 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc, Selector Sel = PP.getSelectorTable().getSelector(nKeys, &KeyIdents[0]); if (SuperLoc.isValid()) - return Actions.ActOnSuperMessage(CurScope, SuperLoc, Sel, + return Actions.ActOnSuperMessage(getCurScope(), SuperLoc, Sel, LBracLoc, SelectorLoc, RBracLoc, Action::MultiExprArg(Actions, KeyExprs.take(), KeyExprs.size())); else if (ReceiverType) - return Actions.ActOnClassMessage(CurScope, ReceiverType, Sel, + return Actions.ActOnClassMessage(getCurScope(), ReceiverType, Sel, LBracLoc, SelectorLoc, RBracLoc, Action::MultiExprArg(Actions, KeyExprs.take(), KeyExprs.size())); - return Actions.ActOnInstanceMessage(CurScope, move(ReceiverExpr), Sel, + return Actions.ActOnInstanceMessage(getCurScope(), move(ReceiverExpr), Sel, LBracLoc, SelectorLoc, RBracLoc, Action::MultiExprArg(Actions, KeyExprs.take(), diff --git a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp index c4e4a52..64a4c16 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp @@ -110,7 +110,7 @@ void PragmaPackHandler::HandlePragma(Preprocessor &PP, Token &PackTok) { LParenLoc, RParenLoc); } -// #pragma 'options' 'align' '=' {'natural', 'mac68k', 'power', 'reset'} +// #pragma 'options' 'align' '=' {'native','natural','mac68k','power','reset'} void PragmaOptionsHandler::HandlePragma(Preprocessor &PP, Token &OptionsTok) { SourceLocation OptionsLoc = OptionsTok.getLocation(); @@ -120,7 +120,7 @@ void PragmaOptionsHandler::HandlePragma(Preprocessor &PP, Token &OptionsTok) { PP.Diag(Tok.getLocation(), diag::warn_pragma_options_expected_align); return; } - + PP.Lex(Tok); if (Tok.isNot(tok::equal)) { PP.Diag(Tok.getLocation(), diag::warn_pragma_options_expected_equal); @@ -136,8 +136,12 @@ void PragmaOptionsHandler::HandlePragma(Preprocessor &PP, Token &OptionsTok) { Action::PragmaOptionsAlignKind Kind = Action::POAK_Natural; const IdentifierInfo *II = Tok.getIdentifierInfo(); - if (II->isStr("natural")) + if (II->isStr("native")) + Kind = Action::POAK_Native; + else if (II->isStr("natural")) Kind = Action::POAK_Natural; + else if (II->isStr("packed")) + Kind = Action::POAK_Packed; else if (II->isStr("power")) Kind = Action::POAK_Power; else if (II->isStr("mac68k")) @@ -223,7 +227,7 @@ void PragmaUnusedHandler::HandlePragma(Preprocessor &PP, Token &UnusedTok) { // Perform the action to handle the pragma. Actions.ActOnPragmaUnused(Identifiers.data(), Identifiers.size(), - parser.CurScope, UnusedLoc, LParenLoc, RParenLoc); + parser.getCurScope(), UnusedLoc, LParenLoc, RParenLoc); } // #pragma weak identifier diff --git a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h index d9d06a1..929ec46 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h +++ b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h @@ -23,8 +23,8 @@ namespace clang { class PragmaOptionsHandler : public PragmaHandler { Action &Actions; public: - PragmaOptionsHandler(const IdentifierInfo *N, Action &A) : PragmaHandler(N), - Actions(A) {} + explicit PragmaOptionsHandler(Action &A) : PragmaHandler("options"), + Actions(A) {} virtual void HandlePragma(Preprocessor &PP, Token &FirstToken); }; @@ -32,8 +32,8 @@ public: class PragmaPackHandler : public PragmaHandler { Action &Actions; public: - PragmaPackHandler(const IdentifierInfo *N, Action &A) : PragmaHandler(N), - Actions(A) {} + explicit PragmaPackHandler(Action &A) : PragmaHandler("pack"), + Actions(A) {} virtual void HandlePragma(Preprocessor &PP, Token &FirstToken); }; @@ -42,8 +42,8 @@ class PragmaUnusedHandler : public PragmaHandler { Action &Actions; Parser &parser; public: - PragmaUnusedHandler(const IdentifierInfo *N, Action &A, Parser& p) - : PragmaHandler(N), Actions(A), parser(p) {} + PragmaUnusedHandler(Action &A, Parser& p) + : PragmaHandler("unused"), Actions(A), parser(p) {} virtual void HandlePragma(Preprocessor &PP, Token &FirstToken); }; @@ -51,8 +51,8 @@ public: class PragmaWeakHandler : public PragmaHandler { Action &Actions; public: - PragmaWeakHandler(const IdentifierInfo *N, Action &A) - : PragmaHandler(N), Actions(A) {} + explicit PragmaWeakHandler(Action &A) + : PragmaHandler("weak"), Actions(A) {} virtual void HandlePragma(Preprocessor &PP, Token &FirstToken); }; diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp index 98c0058..c908ed9 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp @@ -77,6 +77,8 @@ Parser::OwningStmtResult Parser::ParseStatementOrDeclaration(bool OnlyStatement) { const char *SemiError = 0; OwningStmtResult Res(Actions); + + ParenBraceBracketBalancer BalancerRAIIObj(*this); CXX0XAttributeList Attr; if (getLang().CPlusPlus0x && isCXX0XAttributeSpecifier()) @@ -96,8 +98,8 @@ Parser::ParseStatementOrDeclaration(bool OnlyStatement) { } case tok::code_completion: - Actions.CodeCompleteOrdinaryName(CurScope, Action::CCC_Statement); - ConsumeToken(); + Actions.CodeCompleteOrdinaryName(getCurScope(), Action::CCC_Statement); + ConsumeCodeCompletionToken(); return ParseStatementOrDeclaration(OnlyStatement); case tok::identifier: @@ -282,7 +284,7 @@ Parser::OwningStmtResult Parser::ParseCaseStatement(AttributeList *Attr) { SourceLocation CaseLoc = ConsumeToken(); // eat the 'case'. if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteCase(CurScope); + Actions.CodeCompleteCase(getCurScope()); ConsumeCodeCompletionToken(); } @@ -402,7 +404,7 @@ Parser::OwningStmtResult Parser::ParseDefaultStatement(AttributeList *Attr) { return StmtError(); return Actions.ActOnDefaultStmt(DefaultLoc, ColonLoc, - move(SubStmt), CurScope); + move(SubStmt), getCurScope()); } @@ -552,7 +554,7 @@ bool Parser::ParseParenExprOrCondition(OwningExprResult &ExprResult, // If required, convert to a boolean value. if (!ExprResult.isInvalid() && ConvertToBoolean) ExprResult - = Actions.ActOnBooleanCondition(CurScope, Loc, move(ExprResult)); + = Actions.ActOnBooleanCondition(getCurScope(), Loc, move(ExprResult)); } // If the parser was confused by the condition and we don't have a ')', try to @@ -668,10 +670,10 @@ Parser::OwningStmtResult Parser::ParseIfStatement(AttributeList *Attr) { // Regardless of whether or not InnerScope actually pushed a scope, set the // ElseScope flag for the innermost scope so we can diagnose use of the if // condition variable in C++. - unsigned OldFlags = CurScope->getFlags(); - CurScope->setFlags(OldFlags | Scope::ElseScope); + unsigned OldFlags = getCurScope()->getFlags(); + getCurScope()->setFlags(OldFlags | Scope::ElseScope); ElseStmt = ParseStatement(); - CurScope->setFlags(OldFlags); + getCurScope()->setFlags(OldFlags); // Pop the 'else' scope if needed. InnerScope.Exit(); @@ -997,7 +999,7 @@ Parser::OwningStmtResult Parser::ParseForStatement(AttributeList *Attr) { DeclPtrTy SecondVar; if (Tok.is(tok::code_completion)) { - Actions.CodeCompleteOrdinaryName(CurScope, + Actions.CodeCompleteOrdinaryName(getCurScope(), C99orCXXorObjC? Action::CCC_ForInit : Action::CCC_Expression); ConsumeCodeCompletionToken(); @@ -1061,7 +1063,7 @@ Parser::OwningStmtResult Parser::ParseForStatement(AttributeList *Attr) { else { Second = ParseExpression(); if (!Second.isInvalid()) - Second = Actions.ActOnBooleanCondition(CurScope, ForLoc, + Second = Actions.ActOnBooleanCondition(getCurScope(), ForLoc, move(Second)); } SecondPartIsInvalid = Second.isInvalid(); @@ -1170,7 +1172,7 @@ Parser::OwningStmtResult Parser::ParseContinueStatement(AttributeList *Attr) { delete Attr; SourceLocation ContinueLoc = ConsumeToken(); // eat the 'continue'. - return Actions.ActOnContinueStmt(ContinueLoc, CurScope); + return Actions.ActOnContinueStmt(ContinueLoc, getCurScope()); } /// ParseBreakStatement @@ -1184,7 +1186,7 @@ Parser::OwningStmtResult Parser::ParseBreakStatement(AttributeList *Attr) { delete Attr; SourceLocation BreakLoc = ConsumeToken(); // eat the 'break'. - return Actions.ActOnBreakStmt(BreakLoc, CurScope); + return Actions.ActOnBreakStmt(BreakLoc, getCurScope()); } /// ParseReturnStatement @@ -1199,6 +1201,13 @@ Parser::OwningStmtResult Parser::ParseReturnStatement(AttributeList *Attr) { OwningExprResult R(Actions); if (Tok.isNot(tok::semi)) { + if (Tok.is(tok::code_completion)) { + Actions.CodeCompleteReturn(getCurScope()); + ConsumeCodeCompletionToken(); + SkipUntil(tok::semi, false, true); + return StmtError(); + } + R = ParseExpression(); if (R.isInvalid()) { // Skip to the semicolon, but don't consume it. SkipUntil(tok::semi, false, true); @@ -1588,7 +1597,7 @@ Parser::OwningStmtResult Parser::ParseCXXCatchBlock() { return StmtError(); Declarator ExDecl(DS, Declarator::CXXCatchContext); ParseDeclarator(ExDecl); - ExceptionDecl = Actions.ActOnExceptionDeclarator(CurScope, ExDecl); + ExceptionDecl = Actions.ActOnExceptionDeclarator(getCurScope(), ExDecl); } else ConsumeToken(); diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp index c87ddad..e1aaf91 100644 --- a/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp @@ -201,7 +201,7 @@ Parser::ParseSingleDeclarationAfterTemplate( if (Tok.is(tok::semi)) { DeclEnd = ConsumeToken(); - DeclPtrTy Decl = Actions.ParsedFreeStandingDeclSpec(CurScope, AS, DS); + DeclPtrTy Decl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS, DS); DS.complete(Decl); return Decl; } @@ -238,7 +238,7 @@ Parser::ParseSingleDeclarationAfterTemplate( } if (DeclaratorInfo.isFunctionDeclarator() && - isStartOfFunctionDefinition()) { + isStartOfFunctionDefinition(DeclaratorInfo)) { if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) { Diag(Tok, diag::err_function_declared_typedef); @@ -341,8 +341,37 @@ Parser::ParseTemplateParameterList(unsigned Depth, /// \brief Determine whether the parser is at the start of a template /// type parameter. bool Parser::isStartOfTemplateTypeParameter() { - if (Tok.is(tok::kw_class)) - return true; + if (Tok.is(tok::kw_class)) { + // "class" may be the start of an elaborated-type-specifier or a + // type-parameter. Per C++ [temp.param]p3, we prefer the type-parameter. + switch (NextToken().getKind()) { + case tok::equal: + case tok::comma: + case tok::greater: + case tok::greatergreater: + case tok::ellipsis: + return true; + + case tok::identifier: + // This may be either a type-parameter or an elaborated-type-specifier. + // We have to look further. + break; + + default: + return false; + } + + switch (GetLookAheadToken(2).getKind()) { + case tok::equal: + case tok::comma: + case tok::greater: + case tok::greatergreater: + return true; + + default: + return false; + } + } if (Tok.isNot(tok::kw_typename)) return false; @@ -442,22 +471,19 @@ Parser::DeclPtrTy Parser::ParseTypeParameter(unsigned Depth, unsigned Position){ return DeclPtrTy(); } - DeclPtrTy TypeParam = Actions.ActOnTypeParameter(CurScope, TypenameKeyword, - Ellipsis, EllipsisLoc, - KeyLoc, ParamName, NameLoc, - Depth, Position); - - // Grab a default type id (if given). + // Grab a default argument (if available). + // Per C++0x [basic.scope.pdecl]p9, we parse the default argument before + // we introduce the type parameter into the local scope. + SourceLocation EqualLoc; + TypeTy *DefaultArg = 0; if (Tok.is(tok::equal)) { - SourceLocation EqualLoc = ConsumeToken(); - SourceLocation DefaultLoc = Tok.getLocation(); - TypeResult DefaultType = ParseTypeName(); - if (!DefaultType.isInvalid()) - Actions.ActOnTypeParameterDefault(TypeParam, EqualLoc, DefaultLoc, - DefaultType.get()); + EqualLoc = ConsumeToken(); + DefaultArg = ParseTypeName().get(); } - - return TypeParam; + + return Actions.ActOnTypeParameter(getCurScope(), TypenameKeyword, Ellipsis, + EllipsisLoc, KeyLoc, ParamName, NameLoc, + Depth, Position, EqualLoc, DefaultArg); } /// ParseTemplateTemplateParameter - Handle the parsing of template @@ -512,28 +538,28 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) { TemplateParams.size(), RAngleLoc); - Parser::DeclPtrTy Param - = Actions.ActOnTemplateTemplateParameter(CurScope, TemplateLoc, - ParamList, ParamName, - NameLoc, Depth, Position); - - // Get the a default value, if given. + // Grab a default argument (if available). + // Per C++0x [basic.scope.pdecl]p9, we parse the default argument before + // we introduce the template parameter into the local scope. + SourceLocation EqualLoc; + ParsedTemplateArgument DefaultArg; if (Tok.is(tok::equal)) { - SourceLocation EqualLoc = ConsumeToken(); - ParsedTemplateArgument Default = ParseTemplateTemplateArgument(); - if (Default.isInvalid()) { + EqualLoc = ConsumeToken(); + DefaultArg = ParseTemplateTemplateArgument(); + if (DefaultArg.isInvalid()) { Diag(Tok.getLocation(), diag::err_default_template_template_parameter_not_template); static const tok::TokenKind EndToks[] = { tok::comma, tok::greater, tok::greatergreater }; SkipUntil(EndToks, 3, true, true); - return Param; - } else if (Param) - Actions.ActOnTemplateTemplateParameterDefault(Param, EqualLoc, Default); + } } - - return Param; + + return Actions.ActOnTemplateTemplateParameter(getCurScope(), TemplateLoc, + ParamList, ParamName, + NameLoc, Depth, Position, + EqualLoc, DefaultArg); } /// ParseNonTypeTemplateParameter - Handle the parsing of non-type @@ -542,13 +568,6 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) { /// template-parameter: /// ... /// parameter-declaration -/// -/// NOTE: It would be ideal to simply call out to ParseParameterDeclaration(), -/// but that didn't work out to well. Instead, this tries to recrate the basic -/// parsing of parameter declarations, but tries to constrain it for template -/// parameters. -/// FIXME: We need to make a ParseParameterDeclaration that works for -/// non-type template parameters and normal function parameters. Parser::DeclPtrTy Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) { SourceLocation StartLoc = Tok.getLocation(); @@ -572,13 +591,13 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) { return DeclPtrTy(); } - // Create the parameter. - DeclPtrTy Param = Actions.ActOnNonTypeTemplateParameter(CurScope, ParamDecl, - Depth, Position); - // If there is a default value, parse it. + // Per C++0x [basic.scope.pdecl]p9, we parse the default argument before + // we introduce the template parameter into the local scope. + SourceLocation EqualLoc; + OwningExprResult DefaultArg(Actions); if (Tok.is(tok::equal)) { - SourceLocation EqualLoc = ConsumeToken(); + EqualLoc = ConsumeToken(); // C++ [temp.param]p15: // When parsing a default template-argument for a non-type @@ -587,15 +606,15 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) { // operator. GreaterThanIsOperatorScope G(GreaterThanIsOperator, false); - OwningExprResult DefaultArg = ParseAssignmentExpression(); + DefaultArg = ParseAssignmentExpression(); if (DefaultArg.isInvalid()) SkipUntil(tok::comma, tok::greater, true, true); - else if (Param) - Actions.ActOnNonTypeTemplateParameterDefault(Param, EqualLoc, - move(DefaultArg)); } - return Param; + // Create the parameter. + return Actions.ActOnNonTypeTemplateParameter(getCurScope(), ParamDecl, + Depth, Position, EqualLoc, + move(DefaultArg)); } /// \brief Parses a template-id that after the template name has @@ -885,15 +904,14 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() { // If the next token signals the end of a template argument, // then we have a dependent template name that could be a template // template argument. - if (isEndOfTemplateArgument(Tok)) { - TemplateTy Template - = Actions.ActOnDependentTemplateName(TemplateLoc, SS, Name, + TemplateTy Template; + if (isEndOfTemplateArgument(Tok) && + Actions.ActOnDependentTemplateName(getCurScope(), TemplateLoc, SS, Name, /*ObjectType=*/0, - /*EnteringContext=*/false); - if (Template.get()) - return ParsedTemplateArgument(SS, Template, Name.StartLocation); - } - } + /*EnteringContext=*/false, + Template)) + return ParsedTemplateArgument(SS, Template, Name.StartLocation); + } } else if (Tok.is(tok::identifier)) { // We may have a (non-dependent) template name. TemplateTy Template; @@ -903,7 +921,7 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() { if (isEndOfTemplateArgument(Tok)) { bool MemberOfUnknownSpecialization; - TemplateNameKind TNK = Actions.isTemplateName(CurScope, SS, Name, + TemplateNameKind TNK = Actions.isTemplateName(getCurScope(), SS, Name, /*ObjectType=*/0, /*EnteringContext=*/false, Template, diff --git a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp index 2968970..ac78f11 100644 --- a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp +++ b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp @@ -26,30 +26,24 @@ Parser::Parser(Preprocessor &pp, Action &actions) GreaterThanIsOperator(true), ColonIsSacred(false), TemplateParameterDepth(0) { Tok.setKind(tok::eof); - CurScope = 0; + Actions.CurScope = 0; NumCachedScopes = 0; ParenCount = BracketCount = BraceCount = 0; ObjCImpDecl = DeclPtrTy(); // Add #pragma handlers. These are removed and destroyed in the // destructor. - OptionsHandler.reset(new - PragmaOptionsHandler(&PP.getIdentifierTable().get("options"), - actions)); - PP.AddPragmaHandler(0, OptionsHandler.get()); - - PackHandler.reset(new - PragmaPackHandler(&PP.getIdentifierTable().get("pack"), actions)); - PP.AddPragmaHandler(0, PackHandler.get()); - - UnusedHandler.reset(new - PragmaUnusedHandler(&PP.getIdentifierTable().get("unused"), actions, - *this)); - PP.AddPragmaHandler(0, UnusedHandler.get()); - - WeakHandler.reset(new - PragmaWeakHandler(&PP.getIdentifierTable().get("weak"), actions)); - PP.AddPragmaHandler(0, WeakHandler.get()); + OptionsHandler.reset(new PragmaOptionsHandler(actions)); + PP.AddPragmaHandler(OptionsHandler.get()); + + PackHandler.reset(new PragmaPackHandler(actions)); + PP.AddPragmaHandler(PackHandler.get()); + + UnusedHandler.reset(new PragmaUnusedHandler(actions, *this)); + PP.AddPragmaHandler(UnusedHandler.get()); + + WeakHandler.reset(new PragmaWeakHandler(actions)); + PP.AddPragmaHandler(WeakHandler.get()); } /// If a crash happens while the parser is active, print out a line indicating @@ -261,25 +255,25 @@ bool Parser::SkipUntil(const tok::TokenKind *Toks, unsigned NumToks, void Parser::EnterScope(unsigned ScopeFlags) { if (NumCachedScopes) { Scope *N = ScopeCache[--NumCachedScopes]; - N->Init(CurScope, ScopeFlags); - CurScope = N; + N->Init(getCurScope(), ScopeFlags); + Actions.CurScope = N; } else { - CurScope = new Scope(CurScope, ScopeFlags); + Actions.CurScope = new Scope(getCurScope(), ScopeFlags); } - CurScope->setNumErrorsAtStart(Diags.getNumErrors()); + getCurScope()->setNumErrorsAtStart(Diags.getNumErrors()); } /// ExitScope - Pop a scope off the scope stack. void Parser::ExitScope() { - assert(CurScope && "Scope imbalance!"); + assert(getCurScope() && "Scope imbalance!"); // Inform the actions module that this scope is going away if there are any // decls in it. - if (!CurScope->decl_empty()) - Actions.ActOnPopScope(Tok.getLocation(), CurScope); + if (!getCurScope()->decl_empty()) + Actions.ActOnPopScope(Tok.getLocation(), getCurScope()); - Scope *OldScope = CurScope; - CurScope = OldScope->getParent(); + Scope *OldScope = getCurScope(); + Actions.CurScope = OldScope->getParent(); if (NumCachedScopes == ScopeCacheSize) delete OldScope; @@ -296,20 +290,21 @@ void Parser::ExitScope() { Parser::~Parser() { // If we still have scopes active, delete the scope tree. - delete CurScope; - + delete getCurScope(); + Actions.CurScope = 0; + // Free the scope cache. for (unsigned i = 0, e = NumCachedScopes; i != e; ++i) delete ScopeCache[i]; // Remove the pragma handlers we installed. - PP.RemovePragmaHandler(0, OptionsHandler.get()); + PP.RemovePragmaHandler(OptionsHandler.get()); OptionsHandler.reset(); - PP.RemovePragmaHandler(0, PackHandler.get()); + PP.RemovePragmaHandler(PackHandler.get()); PackHandler.reset(); - PP.RemovePragmaHandler(0, UnusedHandler.get()); + PP.RemovePragmaHandler(UnusedHandler.get()); UnusedHandler.reset(); - PP.RemovePragmaHandler(0, WeakHandler.get()); + PP.RemovePragmaHandler(WeakHandler.get()); WeakHandler.reset(); } @@ -320,9 +315,9 @@ void Parser::Initialize() { ConsumeToken(); // Create the translation unit scope. Install it as the current scope. - assert(CurScope == 0 && "A scope is already active?"); + assert(getCurScope() == 0 && "A scope is already active?"); EnterScope(Scope::DeclScope); - Actions.ActOnTranslationUnitScope(Tok.getLocation(), CurScope); + Actions.ActOnTranslationUnitScope(Tok.getLocation(), getCurScope()); if (Tok.is(tok::eof) && !getLang().CPlusPlus) // Empty source file is an extension in C @@ -375,7 +370,7 @@ void Parser::ParseTranslationUnit() { /*parse them all*/; ExitScope(); - assert(CurScope == 0 && "Scope imbalance!"); + assert(getCurScope() == 0 && "Scope imbalance!"); } /// ParseExternalDeclaration: @@ -401,6 +396,8 @@ void Parser::ParseTranslationUnit() { /// /// [C++0x/GNU] 'extern' 'template' declaration Parser::DeclGroupPtrTy Parser::ParseExternalDeclaration(CXX0XAttributeList Attr) { + ParenBraceBracketBalancer BalancerRAIIObj(*this); + DeclPtrTy SingleDecl; switch (Tok.getKind()) { case tok::semi: @@ -455,7 +452,7 @@ Parser::DeclGroupPtrTy Parser::ParseExternalDeclaration(CXX0XAttributeList Attr) SingleDecl = ParseObjCMethodDefinition(); break; case tok::code_completion: - Actions.CodeCompleteOrdinaryName(CurScope, + Actions.CodeCompleteOrdinaryName(getCurScope(), ObjCImpDecl? Action::CCC_ObjCImplementation : Action::CCC_Namespace); ConsumeCodeCompletionToken(); @@ -497,7 +494,7 @@ Parser::DeclGroupPtrTy Parser::ParseExternalDeclaration(CXX0XAttributeList Attr) /// \brief Determine whether the current token, if it occurs after a /// declarator, continues a declaration or declaration list. -bool Parser::isDeclarationAfterDeclarator() { +bool Parser::isDeclarationAfterDeclarator() const { return Tok.is(tok::equal) || // int X()= -> not a function def Tok.is(tok::comma) || // int X(), -> not a function def Tok.is(tok::semi) || // int X(); -> not a function def @@ -509,12 +506,17 @@ bool Parser::isDeclarationAfterDeclarator() { /// \brief Determine whether the current token, if it occurs after a /// declarator, indicates the start of a function definition. -bool Parser::isStartOfFunctionDefinition() { +bool Parser::isStartOfFunctionDefinition(const ParsingDeclarator &Declarator) { + assert(Declarator.getTypeObject(0).Kind == DeclaratorChunk::Function && + "Isn't a function declarator"); if (Tok.is(tok::l_brace)) // int X() {} return true; - if (!getLang().CPlusPlus) - return isDeclarationSpecifier(); // int X(f) int f; {} + // Handle K&R C argument lists: int X(f) int f; {} + if (!getLang().CPlusPlus && + Declarator.getTypeObject(0).Fun.isKNRPrototype()) + return isDeclarationSpecifier(); + return Tok.is(tok::colon) || // X() : Base() {} (used for ctors) Tok.is(tok::kw_try); // X() try { ... } } @@ -549,7 +551,7 @@ Parser::ParseDeclarationOrFunctionDefinition(ParsingDeclSpec &DS, // declaration-specifiers init-declarator-list[opt] ';' if (Tok.is(tok::semi)) { ConsumeToken(); - DeclPtrTy TheDecl = Actions.ParsedFreeStandingDeclSpec(CurScope, AS, DS); + DeclPtrTy TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS, DS); DS.complete(TheDecl); return Actions.ConvertDeclToDeclGroup(TheDecl); } @@ -637,7 +639,7 @@ Parser::DeclPtrTy Parser::ParseFunctionDefinition(ParsingDeclarator &D, // If this declaration was formed with a K&R-style identifier list for the // arguments, parse declarations for all of the args next. // int foo(a,b) int a; float b; {} - if (!FTI.hasPrototype && FTI.NumArgs != 0) + if (FTI.isKNRPrototype()) ParseKNRParamDeclarations(D); // We should have either an opening brace or, in a C++ constructor, @@ -660,12 +662,12 @@ Parser::DeclPtrTy Parser::ParseFunctionDefinition(ParsingDeclarator &D, // Tell the actions module that we have entered a function definition with the // specified Declarator for the function. DeclPtrTy Res = TemplateInfo.TemplateParams? - Actions.ActOnStartOfFunctionTemplateDef(CurScope, + Actions.ActOnStartOfFunctionTemplateDef(getCurScope(), Action::MultiTemplateParamsArg(Actions, TemplateInfo.TemplateParams->data(), TemplateInfo.TemplateParams->size()), D) - : Actions.ActOnStartOfFunctionDef(CurScope, D); + : Actions.ActOnStartOfFunctionDef(getCurScope(), D); // Break out of the ParsingDeclarator context before we parse the body. D.complete(Res); @@ -751,7 +753,7 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) { // Ask the actions module to compute the type for this declarator. Action::DeclPtrTy Param = - Actions.ActOnParamDeclarator(CurScope, ParmDeclarator); + Actions.ActOnParamDeclarator(getCurScope(), ParmDeclarator); if (Param && // A missing identifier has already been diagnosed. @@ -807,7 +809,7 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) { } // The actions module must verify that all arguments were declared. - Actions.ActOnFinishKNRParamDeclarations(CurScope, D, Tok.getLocation()); + Actions.ActOnFinishKNRParamDeclarations(getCurScope(), D, Tok.getLocation()); } @@ -919,7 +921,8 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) { TypeResult Ty; if (Tok.is(tok::identifier)) { // FIXME: check whether the next token is '<', first! - Ty = Actions.ActOnTypenameType(TypenameLoc, SS, *Tok.getIdentifierInfo(), + Ty = Actions.ActOnTypenameType(getCurScope(), TypenameLoc, SS, + *Tok.getIdentifierInfo(), Tok.getLocation()); } else if (Tok.is(tok::annot_template_id)) { TemplateIdAnnotation *TemplateId @@ -934,7 +937,8 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) { assert(Tok.is(tok::annot_typename) && "AnnotateTemplateIdTokenAsType isn't working properly"); if (Tok.getAnnotationValue()) - Ty = Actions.ActOnTypenameType(TypenameLoc, SS, SourceLocation(), + Ty = Actions.ActOnTypenameType(getCurScope(), TypenameLoc, SS, + SourceLocation(), Tok.getAnnotationValue()); else Ty = true; @@ -964,7 +968,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) { if (Tok.is(tok::identifier)) { // Determine whether the identifier is a type name. if (TypeTy *Ty = Actions.getTypeName(*Tok.getIdentifierInfo(), - Tok.getLocation(), CurScope, &SS)) { + Tok.getLocation(), getCurScope(), &SS)) { // This is a typename. Replace the current token in-place with an // annotation type token. Tok.setKind(tok::annot_typename); @@ -993,7 +997,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) { TemplateName.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation()); bool MemberOfUnknownSpecialization; if (TemplateNameKind TNK - = Actions.isTemplateName(CurScope, SS, TemplateName, + = Actions.isTemplateName(getCurScope(), SS, TemplateName, /*ObjectType=*/0, EnteringContext, Template, MemberOfUnknownSpecialization)) { // Consume the identifier. @@ -1084,19 +1088,19 @@ bool Parser::TryAnnotateCXXScopeToken(bool EnteringContext) { } void Parser::CodeCompletionRecovery() { - for (Scope *S = CurScope; S; S = S->getParent()) { + for (Scope *S = getCurScope(); S; S = S->getParent()) { if (S->getFlags() & Scope::FnScope) { - Actions.CodeCompleteOrdinaryName(CurScope, Action::CCC_RecoveryInFunction); + Actions.CodeCompleteOrdinaryName(getCurScope(), Action::CCC_RecoveryInFunction); return; } if (S->getFlags() & Scope::ClassScope) { - Actions.CodeCompleteOrdinaryName(CurScope, Action::CCC_Class); + Actions.CodeCompleteOrdinaryName(getCurScope(), Action::CCC_Class); return; } } - Actions.CodeCompleteOrdinaryName(CurScope, Action::CCC_Namespace); + Actions.CodeCompleteOrdinaryName(getCurScope(), Action::CCC_Namespace); } // Anchor the Parser::FieldCallback vtable to this translation unit. diff --git a/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h b/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h index 06bbbc2..addc795 100644 --- a/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h +++ b/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h @@ -80,6 +80,23 @@ namespace clang { } }; + /// \brief RAII object that makes sure paren/bracket/brace count is correct + /// after declaration/statement parsing, even when there's a parsing error. + class ParenBraceBracketBalancer { + Parser &P; + unsigned short ParenCount, BracketCount, BraceCount; + public: + ParenBraceBracketBalancer(Parser &p) + : P(p), ParenCount(p.ParenCount), BracketCount(p.BracketCount), + BraceCount(p.BraceCount) { } + + ~ParenBraceBracketBalancer() { + P.ParenCount = ParenCount; + P.BracketCount = BracketCount; + P.BraceCount = BraceCount; + } + }; + } // end namespace clang #endif diff --git a/contrib/llvm/tools/clang/lib/Rewrite/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Rewrite/CMakeLists.txt index ce9e1ed..ce728af 100644 --- a/contrib/llvm/tools/clang/lib/Rewrite/CMakeLists.txt +++ b/contrib/llvm/tools/clang/lib/Rewrite/CMakeLists.txt @@ -2,8 +2,14 @@ set(LLVM_NO_RTTI 1) add_clang_library(clangRewrite DeltaTree.cpp + FixItRewriter.cpp + FrontendActions.cpp + HTMLPrint.cpp HTMLRewrite.cpp + RewriteMacros.cpp + RewriteObjC.cpp RewriteRope.cpp + RewriteTest.cpp Rewriter.cpp TokenRewriter.cpp ) diff --git a/contrib/llvm/tools/clang/lib/Frontend/FixItRewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp index 7c9a566..29ac7e3 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/FixItRewriter.cpp +++ b/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp @@ -13,7 +13,7 @@ // //===----------------------------------------------------------------------===// -#include "clang/Frontend/FixItRewriter.h" +#include "clang/Rewrite/FixItRewriter.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/SourceManager.h" diff --git a/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp new file mode 100644 index 0000000..6da3b4b --- /dev/null +++ b/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp @@ -0,0 +1,106 @@ +//===--- FrontendActions.cpp ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "clang/Rewrite/FrontendActions.h" +#include "clang/AST/ASTConsumer.h" +#include "clang/Lex/Preprocessor.h" +#include "clang/Parse/Parser.h" +#include "clang/Basic/FileManager.h" +#include "clang/Frontend/CompilerInstance.h" +#include "clang/Frontend/FrontendDiagnostic.h" +#include "clang/Frontend/Utils.h" +#include "clang/Rewrite/ASTConsumers.h" +#include "clang/Rewrite/FixItRewriter.h" +#include "clang/Rewrite/Rewriters.h" +#include "llvm/ADT/OwningPtr.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/System/Path.h" +using namespace clang; + +//===----------------------------------------------------------------------===// +// AST Consumer Actions +//===----------------------------------------------------------------------===// + +ASTConsumer *HTMLPrintAction::CreateASTConsumer(CompilerInstance &CI, + llvm::StringRef InFile) { + if (llvm::raw_ostream *OS = CI.createDefaultOutputFile(false, InFile)) + return CreateHTMLPrinter(OS, CI.getPreprocessor()); + return 0; +} + +FixItAction::FixItAction() {} +FixItAction::~FixItAction() {} + +ASTConsumer *FixItAction::CreateASTConsumer(CompilerInstance &CI, + llvm::StringRef InFile) { + return new ASTConsumer(); +} + +class FixItActionSuffixInserter : public FixItPathRewriter { + std::string NewSuffix; + +public: + explicit FixItActionSuffixInserter(std::string NewSuffix) + : NewSuffix(NewSuffix) {} + + std::string RewriteFilename(const std::string &Filename) { + llvm::sys::Path Path(Filename); + std::string Suffix = Path.getSuffix(); + Path.eraseSuffix(); + Path.appendSuffix(NewSuffix + "." + Suffix); + return Path.c_str(); + } +}; + +bool FixItAction::BeginSourceFileAction(CompilerInstance &CI, + llvm::StringRef Filename) { + const FrontendOptions &FEOpts = getCompilerInstance().getFrontendOpts(); + if (!FEOpts.FixItSuffix.empty()) { + PathRewriter.reset(new FixItActionSuffixInserter(FEOpts.FixItSuffix)); + } else { + PathRewriter.reset(); + } + Rewriter.reset(new FixItRewriter(CI.getDiagnostics(), CI.getSourceManager(), + CI.getLangOpts(), PathRewriter.get())); + return true; +} + +void FixItAction::EndSourceFileAction() { + // Otherwise rewrite all files. + Rewriter->WriteFixedFiles(); +} + +//===----------------------------------------------------------------------===// +// Preprocessor Actions +//===----------------------------------------------------------------------===// + +ASTConsumer *RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI, + llvm::StringRef InFile) { + if (llvm::raw_ostream *OS = CI.createDefaultOutputFile(false, InFile, "cpp")) + return CreateObjCRewriter(InFile, OS, + CI.getDiagnostics(), CI.getLangOpts(), + CI.getDiagnosticOpts().NoRewriteMacros); + return 0; +} + +void RewriteMacrosAction::ExecuteAction() { + CompilerInstance &CI = getCompilerInstance(); + llvm::raw_ostream *OS = CI.createDefaultOutputFile(true, getCurrentFile()); + if (!OS) return; + + RewriteMacrosInInput(CI.getPreprocessor(), OS); +} + +void RewriteTestAction::ExecuteAction() { + CompilerInstance &CI = getCompilerInstance(); + llvm::raw_ostream *OS = CI.createDefaultOutputFile(false, getCurrentFile()); + if (!OS) return; + + DoRewriteTest(CI.getPreprocessor(), OS); +} diff --git a/contrib/llvm/tools/clang/lib/Frontend/HTMLPrint.cpp b/contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp index 9ea8cb3..f66bfcb 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/HTMLPrint.cpp +++ b/contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp @@ -11,7 +11,7 @@ // //===----------------------------------------------------------------------===// -#include "clang/Frontend/ASTConsumers.h" +#include "clang/Rewrite/ASTConsumers.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" diff --git a/contrib/llvm/tools/clang/lib/Rewrite/Makefile b/contrib/llvm/tools/clang/lib/Rewrite/Makefile index 04c3530..1c5b8a8 100644 --- a/contrib/llvm/tools/clang/lib/Rewrite/Makefile +++ b/contrib/llvm/tools/clang/lib/Rewrite/Makefile @@ -11,11 +11,9 @@ # ##===----------------------------------------------------------------------===## -LEVEL = ../../../.. +CLANG_LEVEL := ../.. LIBRARYNAME := clangRewrite BUILD_ARCHIVE = 1 -CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include - -include $(LEVEL)/Makefile.common +include $(CLANG_LEVEL)/Makefile diff --git a/contrib/llvm/tools/clang/lib/Frontend/RewriteMacros.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp index 954e8e2..910fa6b 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/RewriteMacros.cpp +++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp @@ -12,7 +12,7 @@ // //===----------------------------------------------------------------------===// -#include "clang/Frontend/Utils.h" +#include "clang/Rewrite/Rewriters.h" #include "clang/Rewrite/Rewriter.h" #include "clang/Lex/Preprocessor.h" #include "clang/Basic/SourceManager.h" diff --git a/contrib/llvm/tools/clang/lib/Frontend/RewriteObjC.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp index 5dd7bdf..489fec9 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/RewriteObjC.cpp +++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp @@ -11,7 +11,7 @@ // //===----------------------------------------------------------------------===// -#include "clang/Frontend/ASTConsumers.h" +#include "clang/Rewrite/ASTConsumers.h" #include "clang/Rewrite/Rewriter.h" #include "clang/AST/AST.h" #include "clang/AST/ASTConsumer.h" @@ -268,6 +268,8 @@ namespace { void RewriteMethodDeclaration(ObjCMethodDecl *Method); void RewriteProperty(ObjCPropertyDecl *prop); void RewriteFunctionDecl(FunctionDecl *FD); + void RewriteBlockPointerType(std::string& Str, QualType Type); + void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD); void RewriteBlockLiteralFunctionDecl(FunctionDecl *FD); void RewriteObjCQualifiedInterfaceTypes(Decl *Dcl); void RewriteTypeOfDecl(VarDecl *VD); @@ -835,11 +837,12 @@ void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID, Getr += ")"; // close the precedence "scope" for "*". // Now, emit the argument types (if any). - if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)) { + if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)){ Getr += "("; for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) { if (i) Getr += ", "; - std::string ParamStr = FT->getArgType(i).getAsString(); + std::string ParamStr = FT->getArgType(i).getAsString( + Context->PrintingPolicy); Getr += ParamStr; } if (FT->isVariadic()) { @@ -1047,11 +1050,12 @@ void RewriteObjC::RewriteTypeIntoString(QualType T, std::string &ResultStr, else if (const BlockPointerType *BPT = retType->getAs<BlockPointerType>()) PointeeTy = BPT->getPointeeType(); if ((FPRetType = PointeeTy->getAs<FunctionType>())) { - ResultStr += FPRetType->getResultType().getAsString(); + ResultStr += FPRetType->getResultType().getAsString( + Context->PrintingPolicy); ResultStr += "(*"; } } else - ResultStr += T.getAsString(); + ResultStr += T.getAsString(Context->PrintingPolicy); } void RewriteObjC::RewriteObjCMethodDecl(ObjCMethodDecl *OMD, @@ -1107,10 +1111,11 @@ void RewriteObjC::RewriteObjCMethodDecl(ObjCMethodDecl *OMD, ResultStr += " *"; } else - ResultStr += Context->getObjCClassType().getAsString(); + ResultStr += Context->getObjCClassType().getAsString( + Context->PrintingPolicy); ResultStr += " self, "; - ResultStr += Context->getObjCSelType().getAsString(); + ResultStr += Context->getObjCSelType().getAsString(Context->PrintingPolicy); ResultStr += " _cmd"; // Method arguments. @@ -1144,7 +1149,8 @@ void RewriteObjC::RewriteObjCMethodDecl(ObjCMethodDecl *OMD, ResultStr += "("; for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) { if (i) ResultStr += ", "; - std::string ParamStr = FT->getArgType(i).getAsString(); + std::string ParamStr = FT->getArgType(i).getAsString( + Context->PrintingPolicy); ResultStr += ParamStr; } if (FT->isVariadic()) { @@ -1560,7 +1566,7 @@ Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S, // Simply use 'id' for all qualified types. elementTypeAsString = "id"; else - elementTypeAsString = ElementType.getAsString(); + elementTypeAsString = ElementType.getAsString(Context->PrintingPolicy); buf += elementTypeAsString; buf += " "; elementName = D->getNameAsCString(); @@ -1576,7 +1582,7 @@ Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S, // Simply use 'id' for all qualified types. elementTypeAsString = "id"; else - elementTypeAsString = VD->getType().getAsString(); + elementTypeAsString = VD->getType().getAsString(Context->PrintingPolicy); } // struct __objcFastEnumerationState enumState = { 0 }; @@ -2107,8 +2113,8 @@ CallExpr *RewriteObjC::SynthesizeCallToFunctionDecl( const FunctionType *FT = msgSendType->getAs<FunctionType>(); CallExpr *Exp = - new (Context) CallExpr(*Context, ICE, args, nargs, FT->getResultType(), - EndLoc); + new (Context) CallExpr(*Context, ICE, args, nargs, + FT->getCallResultType(*Context), EndLoc); return Exp; } @@ -2275,7 +2281,7 @@ void RewriteObjC::RewriteTypeOfDecl(VarDecl *ND) { } // FIXME. This will not work for multiple declarators; as in: // __typeof__(a) b,c,d; - std::string TypeAsString(QT.getAsString()); + std::string TypeAsString(QT.getAsString(Context->PrintingPolicy)); SourceLocation DeclLoc = ND->getTypeSpecStartLoc(); const char *startBuf = SM->getCharacterData(DeclLoc); if (ND->getInit()) { @@ -2326,8 +2332,8 @@ void RewriteObjC::RewriteFunctionDecl(FunctionDecl *FD) { RewriteObjCQualifiedInterfaceTypes(FD); } -static void RewriteBlockPointerType(std::string& Str, QualType Type) { - std::string TypeString(Type.getAsString()); +void RewriteObjC::RewriteBlockPointerType(std::string& Str, QualType Type) { + std::string TypeString(Type.getAsString(Context->PrintingPolicy)); const char *argPtr = TypeString.c_str(); if (!strchr(argPtr, '^')) { Str += TypeString; @@ -2340,9 +2346,10 @@ static void RewriteBlockPointerType(std::string& Str, QualType Type) { } // FIXME. Consolidate this routine with RewriteBlockPointerType. -static void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD) { +void RewriteObjC::RewriteBlockPointerTypeVariable(std::string& Str, + ValueDecl *VD) { QualType Type = VD->getType(); - std::string TypeString(Type.getAsString()); + std::string TypeString(Type.getAsString(Context->PrintingPolicy)); const char *argPtr = TypeString.c_str(); int paren = 0; while (*argPtr) { @@ -2376,7 +2383,7 @@ void RewriteObjC::RewriteBlockLiteralFunctionDecl(FunctionDecl *FD) { if (!proto) return; QualType Type = proto->getResultType(); - std::string FdStr = Type.getAsString(); + std::string FdStr = Type.getAsString(Context->PrintingPolicy); FdStr += " "; FdStr += FD->getNameAsCString(); FdStr += "("; @@ -4099,7 +4106,7 @@ std::string RewriteObjC::SynthesizeBlockFunc(BlockExpr *CE, int i, const FunctionType *AFT = CE->getFunctionType(); QualType RT = AFT->getResultType(); std::string StructRef = "struct " + Tag; - std::string S = "static " + RT.getAsString() + " __" + + std::string S = "static " + RT.getAsString(Context->PrintingPolicy) + " __" + funcName + "_" + "block_func_" + utostr(i); BlockDecl *BD = CE->getBlockDecl(); @@ -5644,7 +5651,7 @@ void RewriteObjC::HandleDeclInMainFile(Decl *D) { RewriteBlocksInFunctionProtoType(FD->getType(), FD); // FIXME: If this should support Obj-C++, support CXXTryStmt - if (CompoundStmt *Body = FD->getCompoundBody()) { + if (CompoundStmt *Body = dyn_cast_or_null<CompoundStmt>(FD->getBody())) { CurFunctionDef = FD; CurFunctionDeclToDeclareForBlock = FD; CollectPropertySetters(Body); diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp index fdb6fc3..e290921 100644 --- a/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp +++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp @@ -532,7 +532,7 @@ RopePieceBTreeInterior::HandleChildPiece(unsigned i, RopePieceBTreeNode *RHS) { (getNumChildren()-i-1)*sizeof(Children[0])); Children[i+1] = RHS; ++NumChildren; - return false; + return 0; } // Okay, this node is full. Split it in half, moving WidthFactor children to diff --git a/contrib/llvm/tools/clang/lib/Frontend/RewriteTest.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp index 0414678..3620700 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/RewriteTest.cpp +++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp @@ -11,7 +11,7 @@ // //===----------------------------------------------------------------------===// -#include "clang/Frontend/Utils.h" +#include "clang/Rewrite/Rewriters.h" #include "clang/Lex/Preprocessor.h" #include "clang/Rewrite/TokenRewriter.h" #include "llvm/Support/raw_ostream.h" diff --git a/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp index 376678a..92e2b03 100644 --- a/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp +++ b/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp @@ -40,7 +40,7 @@ void RewriteBuffer::RemoveText(unsigned OrigOffset, unsigned Size) { AddReplaceDelta(OrigOffset, -Size); } -void RewriteBuffer::InsertText(unsigned OrigOffset, const llvm::StringRef &Str, +void RewriteBuffer::InsertText(unsigned OrigOffset, llvm::StringRef Str, bool InsertAfter) { // Nothing to insert, exit early. @@ -57,7 +57,7 @@ void RewriteBuffer::InsertText(unsigned OrigOffset, const llvm::StringRef &Str, /// buffer with a new string. This is effectively a combined "remove+insert" /// operation. void RewriteBuffer::ReplaceText(unsigned OrigOffset, unsigned OrigLength, - const llvm::StringRef &NewStr) { + llvm::StringRef NewStr) { unsigned RealOffset = getMappedOffset(OrigOffset, true); Buffer.erase(RealOffset, OrigLength); Buffer.insert(RealOffset, NewStr.begin(), NewStr.end()); @@ -72,7 +72,7 @@ void RewriteBuffer::ReplaceText(unsigned OrigOffset, unsigned OrigLength, /// getRangeSize - Return the size in bytes of the specified range if they /// are in the same file. If not, this returns -1. -int Rewriter::getRangeSize(SourceRange Range) const { +int Rewriter::getRangeSize(const CharSourceRange &Range) const { if (!isRewritable(Range.getBegin()) || !isRewritable(Range.getEnd())) return -1; @@ -97,12 +97,18 @@ int Rewriter::getRangeSize(SourceRange Range) const { // Adjust the end offset to the end of the last token, instead of being the - // start of the last token. - EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts); + // start of the last token if this is a token range. + if (Range.isTokenRange()) + EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts); return EndOff-StartOff; } +int Rewriter::getRangeSize(SourceRange Range) const { + return getRangeSize(CharSourceRange::getTokenRange(Range)); +} + + /// getRewrittenText - Return the rewritten form of the text in the specified /// range. If the start or end of the range was unrewritable or if they are /// in different buffers, this returns an empty string. @@ -179,7 +185,7 @@ RewriteBuffer &Rewriter::getEditBuffer(FileID FID) { /// InsertText - Insert the specified string at the specified location in the /// original buffer. -bool Rewriter::InsertText(SourceLocation Loc, const llvm::StringRef &Str, +bool Rewriter::InsertText(SourceLocation Loc, llvm::StringRef Str, bool InsertAfter) { if (!isRewritable(Loc)) return true; FileID FID; @@ -201,7 +207,7 @@ bool Rewriter::RemoveText(SourceLocation Start, unsigned Length) { /// buffer with a new string. This is effectively a combined "remove/insert" /// operation. bool Rewriter::ReplaceText(SourceLocation Start, unsigned OrigLength, - const llvm::StringRef &NewStr) { + llvm::StringRef NewStr) { if (!isRewritable(Start)) return true; FileID StartFileID; unsigned StartOffs = getLocationOffsetAndFileID(Start, StartFileID); diff --git a/contrib/llvm/tools/clang/lib/Runtime/Makefile b/contrib/llvm/tools/clang/lib/Runtime/Makefile deleted file mode 100644 index 580215a..0000000 --- a/contrib/llvm/tools/clang/lib/Runtime/Makefile +++ /dev/null @@ -1,101 +0,0 @@ -##===- clang/lib/Runtime/Makefile --------------------------*- Makefile -*-===## -# -# The LLVM Compiler Infrastructure -# -# This file is distributed under the University of Illinois Open Source -# License. See LICENSE.TXT for details. -# -##===----------------------------------------------------------------------===## -# -# This file defines support for building the Clang runtime libraries (which are -# implemented by compiler-rt) and placing them in the proper locations in the -# Clang resources directory (i.e., where the driver expects them). -# -##===----------------------------------------------------------------------===## - -LEVEL = ../../../.. -include $(LEVEL)/Makefile.common - -CLANG_VERSION := $(shell cat $(PROJ_SRC_DIR)/../../VER) -ResourceDir := $(PROJ_OBJ_ROOT)/$(BuildMode)/lib/clang/$(CLANG_VERSION) -PROJ_resources := $(DESTDIR)$(PROJ_prefix)/lib/clang/$(CLANG_VERSION) - -ResourceLibDir := $(ResourceDir)/lib -PROJ_resources_lib := $(PROJ_resources)/lib - -# Expect compiler-rt to be in llvm/projects/compiler-rt -COMPILERRT_SRC_ROOT := $(LLVM_SRC_ROOT)/projects/compiler-rt - -ifndef CLANG_NO_RUNTIME -ifeq ($(shell test -d $(COMPILERRT_SRC_ROOT) && echo OK),OK) - -# Select the compiler-rt configuration to use, and install directory. -# -# FIXME: Eventually, we want some kind of configure support for this. We want to -# build/install runtime libraries for as many targets as clang was configured to -# support. -RuntimeDirs := -ifeq ($(OS),Darwin) -RuntimeDirs += darwin -RuntimeLibrary.darwin.Configs = 10.4 armv6 cc_kext -endif - -# Rule to build the compiler-rt libraries we need. -# -# We build all the libraries in a single shot to avoid recursive make as much as -# possible. -BuildRuntimeLibraries: - $(Verb) $(MAKE) -C $(COMPILERRT_SRC_ROOT) \ - ProjSrcRoot=$(COMPILERRT_SRC_ROOT) \ - ProjObjRoot=$(PROJ_OBJ_DIR) \ - $(RuntimeDirs:%=clang_%) -.PHONY: BuildRuntimeLibraries -CleanRuntimeLibraries: - $(Verb) $(MAKE) -C $(COMPILERRT_SRC_ROOT) \ - ProjSrcRoot=$(COMPILERRT_SRC_ROOT) \ - ProjObjRoot=$(PROJ_OBJ_DIR) \ - clean -.PHONY: CleanRuntimeLibraries - -$(PROJ_resources_lib): - $(Verb) $(MKDIR) $@ - -# Expand rules for copying/installing each individual library. We can't use -# implicit rules here because we need to match against multiple things. -define RuntimeLibraryTemplate -$(PROJ_OBJ_DIR)/clang_$1/%/libcompiler_rt.a: BuildRuntimeLibraries - @true -.PRECIOUS: $(PROJ_OBJ_DIR)/clang_$1/%/libcompiler_rt.a - -# Rule to copy the libraries to their resource directory location. -$(ResourceLibDir)/$1/libclang_rt.%.a: \ - $(PROJ_OBJ_DIR)/clang_$1/%/libcompiler_rt.a \ - $(ResourceLibDir)/$1/.dir - $(Echo) Copying runtime library $1/$$* to build dir - $(Verb) cp $(PROJ_OBJ_DIR)/clang_$1/$$*/libcompiler_rt.a $$@ -RuntimeLibrary.$1: \ - $(RuntimeLibrary.$1.Configs:%=$(ResourceLibDir)/$1/libclang_rt.%.a) -.PHONY: RuntimeLibrary.$1 - -$(PROJ_resources_lib)/$1: $(PROJ_resources_lib) - $(Verb) $(MKDIR) $$@ - -$(PROJ_resources_lib)/$1/libclang_rt.%.a: \ - $(ResourceLibDir)/$1/libclang_rt.%.a | $(PROJ_resources_lib)/$1 - $(Echo) Installing compiler runtime library: $1/$$* - $(Verb) $(DataInstall) $$< $(PROJ_resources_lib)/$1 - -# Rule to install runtime libraries. -RuntimeLibraryInstall.$1: \ - $(RuntimeLibrary.$1.Configs:%=$(PROJ_resources_lib)/$1/libclang_rt.%.a) -.PHONY: RuntimeLibraryInstall.$1 -endef -$(foreach lib,$(RuntimeDirs), $(eval $(call RuntimeLibraryTemplate,$(lib)))) - -# Hook into the standard Makefile rules. -all-local:: $(RuntimeDirs:%=RuntimeLibrary.%) -install-local:: $(RuntimeDirs:%=RuntimeLibraryInstall.%) -clean-local:: CleanRuntimeLibraries - -endif -endif diff --git a/contrib/llvm/tools/clang/lib/Sema/CMakeLists.txt b/contrib/llvm/tools/clang/lib/Sema/CMakeLists.txt index b54e8eb..70b4792 100644 --- a/contrib/llvm/tools/clang/lib/Sema/CMakeLists.txt +++ b/contrib/llvm/tools/clang/lib/Sema/CMakeLists.txt @@ -34,4 +34,5 @@ add_clang_library(clangSema TargetAttributesSema.cpp ) -add_dependencies(clangSema ClangDiagnosticSema ClangStmtNodes) +add_dependencies(clangSema ClangARMNeon ClangAttrClasses ClangAttrList + ClangDiagnosticSema ClangDeclNodes ClangStmtNodes) diff --git a/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp index 543c1b6..3431ac6 100644 --- a/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp @@ -65,6 +65,7 @@ class JumpScopeChecker { public: JumpScopeChecker(Stmt *Body, Sema &S); private: + void BuildScopeInformation(Decl *D, unsigned &ParentScope); void BuildScopeInformation(Stmt *S, unsigned ParentScope); void VerifyJumps(); void VerifyIndirectJumps(); @@ -130,11 +131,13 @@ static std::pair<unsigned,unsigned> InDiag = diag::note_protected_by_variable_init; CanQualType T = VD->getType()->getCanonicalTypeUnqualified(); - while (CanQual<ArrayType> AT = T->getAs<ArrayType>()) - T = AT->getElementType(); - if (CanQual<RecordType> RT = T->getAs<RecordType>()) - if (!cast<CXXRecordDecl>(RT->getDecl())->hasTrivialDestructor()) - OutDiag = diag::note_exits_dtor; + if (!T->isDependentType()) { + while (CanQual<ArrayType> AT = T->getAs<ArrayType>()) + T = AT->getElementType(); + if (CanQual<RecordType> RT = T->getAs<RecordType>()) + if (!cast<CXXRecordDecl>(RT->getDecl())->hasTrivialDestructor()) + OutDiag = diag::note_exits_dtor; + } } return std::make_pair(InDiag, OutDiag); @@ -148,13 +151,33 @@ static std::pair<unsigned,unsigned> return std::make_pair(0U, 0U); } +/// \brief Build scope information for a declaration that is part of a DeclStmt. +void JumpScopeChecker::BuildScopeInformation(Decl *D, unsigned &ParentScope) { + bool isCPlusPlus = this->S.getLangOptions().CPlusPlus; + + // If this decl causes a new scope, push and switch to it. + std::pair<unsigned,unsigned> Diags + = GetDiagForGotoScopeDecl(D, isCPlusPlus); + if (Diags.first || Diags.second) { + Scopes.push_back(GotoScope(ParentScope, Diags.first, Diags.second, + D->getLocation())); + ParentScope = Scopes.size()-1; + } + + // If the decl has an initializer, walk it with the potentially new + // scope we just installed. + if (VarDecl *VD = dyn_cast<VarDecl>(D)) + if (Expr *Init = VD->getInit()) + BuildScopeInformation(Init, ParentScope); +} /// BuildScopeInformation - The statements from CI to CE are known to form a /// coherent VLA scope with a specified parent node. Walk through the /// statements, adding any labels or gotos to LabelAndGotoScopes and recursively /// walking the AST as needed. void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) { - + bool SkipFirstSubStmt = false; + // If we found a label, remember that it is in ParentScope scope. switch (S->getStmtClass()) { case Stmt::LabelStmtClass: @@ -172,8 +195,16 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) { IndirectJumps.push_back(cast<IndirectGotoStmt>(S)); break; - case Stmt::GotoStmtClass: case Stmt::SwitchStmtClass: + // Evaluate the condition variable before entering the scope of the switch + // statement. + if (VarDecl *Var = cast<SwitchStmt>(S)->getConditionVariable()) { + BuildScopeInformation(Var, ParentScope); + SkipFirstSubStmt = true; + } + // Fall through + + case Stmt::GotoStmtClass: // Remember both what scope a goto is in as well as the fact that we have // it. This makes the second scan not have to walk the AST again. LabelAndGotoScopes[S] = ParentScope; @@ -186,33 +217,22 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) { for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end(); CI != E; ++CI) { + if (SkipFirstSubStmt) { + SkipFirstSubStmt = false; + continue; + } + Stmt *SubStmt = *CI; if (SubStmt == 0) continue; - bool isCPlusPlus = this->S.getLangOptions().CPlusPlus; - // If this is a declstmt with a VLA definition, it defines a scope from here // to the end of the containing context. if (DeclStmt *DS = dyn_cast<DeclStmt>(SubStmt)) { // The decl statement creates a scope if any of the decls in it are VLAs // or have the cleanup attribute. for (DeclStmt::decl_iterator I = DS->decl_begin(), E = DS->decl_end(); - I != E; ++I) { - // If this decl causes a new scope, push and switch to it. - std::pair<unsigned,unsigned> Diags - = GetDiagForGotoScopeDecl(*I, isCPlusPlus); - if (Diags.first || Diags.second) { - Scopes.push_back(GotoScope(ParentScope, Diags.first, Diags.second, - (*I)->getLocation())); - ParentScope = Scopes.size()-1; - } - - // If the decl has an initializer, walk it with the potentially new - // scope we just installed. - if (VarDecl *VD = dyn_cast<VarDecl>(*I)) - if (Expr *Init = VD->getInit()) - BuildScopeInformation(Init, ParentScope); - } + I != E; ++I) + BuildScopeInformation(*I, ParentScope); continue; } diff --git a/contrib/llvm/tools/clang/lib/Sema/Lookup.h b/contrib/llvm/tools/clang/lib/Sema/Lookup.h index 0961299..271bb5b 100644 --- a/contrib/llvm/tools/clang/lib/Sema/Lookup.h +++ b/contrib/llvm/tools/clang/lib/Sema/Lookup.h @@ -424,6 +424,11 @@ public: Diagnose = false; } + /// Determines whether this lookup is suppressing diagnostics. + bool isSuppressingDiagnostics() const { + return Diagnose; + } + /// Sets a 'context' source range. void setContextRange(SourceRange SR) { NameContextRange = SR; diff --git a/contrib/llvm/tools/clang/lib/Sema/Makefile b/contrib/llvm/tools/clang/lib/Sema/Makefile index 3a5a99a..90f2dff 100644 --- a/contrib/llvm/tools/clang/lib/Sema/Makefile +++ b/contrib/llvm/tools/clang/lib/Sema/Makefile @@ -12,11 +12,9 @@ # ##===----------------------------------------------------------------------===## -LEVEL = ../../../.. +CLANG_LEVEL := ../.. LIBRARYNAME := clangSema BUILD_ARCHIVE = 1 -CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include - -include $(LEVEL)/Makefile.common +include $(CLANG_LEVEL)/Makefile diff --git a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp index 523b196..cddc84e 100644 --- a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp @@ -17,6 +17,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/APFloat.h" +#include "clang/Sema/ExternalSemaSource.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTDiagnostic.h" @@ -43,7 +44,10 @@ void Sema::ActOnTranslationUnitScope(SourceLocation Loc, Scope *S) { TUScope = S; PushDeclContext(S, Context.getTranslationUnitDecl()); - if (PP.getTargetInfo().getPointerWidth(0) >= 64) { + VAListTagName = PP.getIdentifierInfo("__va_list_tag"); + + if (!Context.isInt128Installed() && // May be set by PCHReader. + PP.getTargetInfo().getPointerWidth(0) >= 64) { TypeSourceInfo *TInfo; // Install [u]int128_t for 64-bit targets. @@ -58,6 +62,7 @@ void Sema::ActOnTranslationUnitScope(SourceLocation Loc, Scope *S) { SourceLocation(), &Context.Idents.get("__uint128_t"), TInfo), TUScope); + Context.setInt128Installed(); } @@ -122,8 +127,8 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, IdResolver(pp.getLangOptions()), StdNamespace(0), StdBadAlloc(0), GlobalNewDeleteDeclared(false), CompleteTranslationUnit(CompleteTranslationUnit), - NumSFINAEErrors(0), NonInstantiationEntries(0), - CurrentInstantiationScope(0), TyposCorrected(0), + NumSFINAEErrors(0), SuppressAccessChecking(false), + NonInstantiationEntries(0), CurrentInstantiationScope(0), TyposCorrected(0), AnalysisWarnings(*this) { TUScope = 0; @@ -223,7 +228,8 @@ void Sema::ActOnEndOfTranslationUnit() { // Remove functions that turned out to be used. UnusedStaticFuncs.erase(std::remove_if(UnusedStaticFuncs.begin(), UnusedStaticFuncs.end(), - std::mem_fun(&FunctionDecl::isUsed)), + std::bind2nd(std::mem_fun(&FunctionDecl::isUsed), + true)), UnusedStaticFuncs.end()); // Check for #pragma weak identifiers that were never declared @@ -381,6 +387,34 @@ Sema::Diag(SourceLocation Loc, const PartialDiagnostic& PD) { return Builder; } +/// \brief Determines the active Scope associated with the given declaration +/// context. +/// +/// This routine maps a declaration context to the active Scope object that +/// represents that declaration context in the parser. It is typically used +/// from "scope-less" code (e.g., template instantiation, lazy creation of +/// declarations) that injects a name for name-lookup purposes and, therefore, +/// must update the Scope. +/// +/// \returns The scope corresponding to the given declaraion context, or NULL +/// if no such scope is open. +Scope *Sema::getScopeForContext(DeclContext *Ctx) { + + if (!Ctx) + return 0; + + Ctx = Ctx->getPrimaryContext(); + for (Scope *S = getCurScope(); S; S = S->getParent()) { + // Ignore scopes that cannot have declarations. This is important for + // out-of-line definitions of static class members. + if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) + if (DeclContext *Entity = static_cast<DeclContext *> (S->getEntity())) + if (Ctx == Entity->getPrimaryContext()) + return S; + } + + return 0; +} /// \brief Enter a new function scope void Sema::PushFunctionScope() { @@ -425,3 +459,6 @@ BlockScopeInfo *Sema::getCurBlock() { return dyn_cast<BlockScopeInfo>(FunctionScopes.back()); } + +// Pin this vtable to this file. +ExternalSemaSource::~ExternalSemaSource() {} diff --git a/contrib/llvm/tools/clang/lib/Sema/Sema.h b/contrib/llvm/tools/clang/lib/Sema/Sema.h index dfc45ac..8336918 100644 --- a/contrib/llvm/tools/clang/lib/Sema/Sema.h +++ b/contrib/llvm/tools/clang/lib/Sema/Sema.h @@ -118,7 +118,8 @@ struct FunctionScopeInfo { /// \brief Set true when a function, method contains a VLA or ObjC try block, /// which introduce scopes that need to be checked for goto conditions. If a - /// function does not contain this, then it need not have the jump checker run on it. + /// function does not contain this, then it need not have the jump checker run + /// on it. bool NeedsScopeChecking; /// \brief The number of errors that had occurred before starting this @@ -155,24 +156,25 @@ struct FunctionScopeInfo { /// \brief Retains information about a block that is currently being parsed. struct BlockScopeInfo : FunctionScopeInfo { - llvm::SmallVector<ParmVarDecl*, 8> Params; - bool hasPrototype; - bool isVariadic; bool hasBlockDeclRefExprs; BlockDecl *TheDecl; - + /// TheScope - This is the scope for the block itself, which contains /// arguments etc. Scope *TheScope; - /// ReturnType - This will get set to block result type, by looking at - /// return types, if any, in the block body. + /// ReturnType - The return type of the block, or null if the block + /// signature didn't provide an explicit return type. QualType ReturnType; + /// BlockType - The function type of the block, if one was given. + /// Its return type may be BuiltinType::Dependent. + QualType FunctionType; + BlockScopeInfo(unsigned NumErrors, Scope *BlockScope, BlockDecl *Block) - : FunctionScopeInfo(NumErrors), hasPrototype(false), isVariadic(false), - hasBlockDeclRefExprs(false), TheDecl(Block), TheScope(BlockScope) + : FunctionScopeInfo(NumErrors), hasBlockDeclRefExprs(false), + TheDecl(Block), TheScope(BlockScope) { IsBlockInfo = true; } @@ -239,6 +241,10 @@ public: /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; + /// VAListTagName - The declaration name corresponding to __va_list_tag. + /// This is used as part of a hack to omit that class from ADL results. + DeclarationName VAListTagName; + /// A RAII object to temporarily push a declaration context. class ContextRAII { private: @@ -669,6 +675,8 @@ public: virtual void ActOnEndOfTranslationUnit(); + Scope *getScopeForContext(DeclContext *Ctx); + void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); void PopFunctionOrBlockScope(); @@ -713,9 +721,13 @@ public: // QualType adjustParameterType(QualType T); - QualType BuildPointerType(QualType T, unsigned Quals, + QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs); + QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVR) { + return BuildQualifiedType(T, Loc, Qualifiers::fromCVRMask(CVR)); + } + QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); - QualType BuildReferenceType(QualType T, bool LValueRef, unsigned Quals, + QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, @@ -727,13 +739,12 @@ public: bool Variadic, unsigned Quals, SourceLocation Loc, DeclarationName Entity); QualType BuildMemberPointerType(QualType T, QualType Class, - unsigned Quals, SourceLocation Loc, + SourceLocation Loc, DeclarationName Entity); - QualType BuildBlockPointerType(QualType T, unsigned Quals, + QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); - QualType GetTypeForDeclarator(Declarator &D, Scope *S, - TypeSourceInfo **TInfo = 0, - TagDecl **OwnedDecl = 0); + TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S, + TagDecl **OwnedDecl = 0); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Create a LocInfoType to hold the given QualType and TypeSourceInfo. @@ -761,8 +772,6 @@ public: const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); - bool UnwrapSimilarPointerTypes(QualType& T1, QualType& T2); - virtual TypeResult ActOnTypeName(Scope *S, Declarator &D); bool RequireCompleteType(SourceLocation Loc, QualType T, @@ -837,6 +846,9 @@ public: bool &OverloadableAttrRequired); void CheckMain(FunctionDecl *FD); virtual DeclPtrTy ActOnParamDeclarator(Scope *S, Declarator &D); + ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, + SourceLocation Loc, + QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, TypeSourceInfo *TSInfo, QualType T, IdentifierInfo *Name, @@ -1094,10 +1106,19 @@ public: /// non-function. Ovl_NonFunction }; - OverloadKind CheckOverload(FunctionDecl *New, + OverloadKind CheckOverload(Scope *S, + FunctionDecl *New, const LookupResult &OldDecls, - NamedDecl *&OldDecl); - bool IsOverload(FunctionDecl *New, FunctionDecl *Old); + NamedDecl *&OldDecl, + bool IsForUsingDecl); + bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); + + bool TryImplicitConversion(InitializationSequence &Sequence, + const InitializedEntity &Entity, + Expr *From, + bool SuppressUserConversions, + bool AllowExplicit, + bool InOverloadResolution); ImplicitConversionSequence TryImplicitConversion(Expr* From, QualType ToType, @@ -1170,6 +1191,16 @@ public: ImplicitConversionSequence TryContextuallyConvertToObjCId(Expr *From); bool PerformContextuallyConvertToObjCId(Expr *&From); + OwningExprResult + ConvertToIntegralOrEnumerationType(SourceLocation Loc, ExprArg FromE, + const PartialDiagnostic &NotIntDiag, + const PartialDiagnostic &IncompleteDiag, + const PartialDiagnostic &ExplicitConvDiag, + const PartialDiagnostic &ExplicitConvNote, + const PartialDiagnostic &AmbigDiag, + const PartialDiagnostic &AmbigNote, + const PartialDiagnostic &ConvDiag); + bool PerformObjectMemberConversion(Expr *&From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, @@ -1448,6 +1479,8 @@ public: void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); + DeclContext::lookup_result LookupConstructors(CXXRecordDecl *Class); + CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); void ArgumentDependentLookup(DeclarationName Name, bool Operator, Expr **Args, unsigned NumArgs, @@ -1457,7 +1490,7 @@ public: VisibleDeclConsumer &Consumer); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer); - + /// \brief The context in which typo-correction occurs. /// /// The typo-correction context affects which keywords (if any) are @@ -1508,7 +1541,7 @@ public: // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); - void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AttrList); + void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL); void WarnUndefinedMethod(SourceLocation ImpLoc, ObjCMethodDecl *method, bool &IncompleteImpl, unsigned DiagID); @@ -1555,16 +1588,9 @@ public: /// CollectImmediateProperties - This routine collects all properties in /// the class and its conforming protocols; but not those it its super class. void CollectImmediateProperties(ObjCContainerDecl *CDecl, - llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap); + llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap, + llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& SuperPropMap); - /// ProtocolConformsToSuperClass - Returns true if class has a super class - /// and it, or its nested super class conforms to the protocol. - bool ProtocolConformsToSuperClass(const ObjCInterfaceDecl *IDecl, - const ObjCProtocolDecl *PDecl); - /// ProtocolConformsToProtocol - Returns true if 2nd Protocol (PDecl) is - /// qualified by the 1st. - bool ProtocolConformsToProtocol(const ObjCProtocolDecl *NestedProtocol, - const ObjCProtocolDecl *PDecl); /// LookupPropertyDecl - Looks up a property in the current class and all /// its protocols. @@ -1583,7 +1609,7 @@ public: const bool isReadWrite, const unsigned Attributes, bool *isOverridingProperty, - QualType T, + TypeSourceInfo *T, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to @@ -1596,7 +1622,8 @@ public: Selector SetterSel, const bool isAssign, const bool isReadWrite, - const unsigned Attributes, QualType T, + const unsigned Attributes, + TypeSourceInfo *T, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = 0); @@ -1935,7 +1962,8 @@ public: OwningExprResult LookupMemberExpr(LookupResult &R, Expr *&Base, bool &IsArrow, SourceLocation OpLoc, CXXScopeSpec &SS, - DeclPtrTy ObjCImpDecl); + DeclPtrTy ObjCImpDecl, + bool HasTemplateArgs); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, @@ -2100,6 +2128,7 @@ public: AttributeList *AttrList); virtual void ActOnFinishNamespaceDef(DeclPtrTy Dcl, SourceLocation RBrace); + NamespaceDecl *getStdNamespace(); virtual DeclPtrTy ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, @@ -2196,26 +2225,69 @@ public: /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); + /// \brief Declare the implicit default constructor for the given class. + /// + /// \param ClassDecl The class declaration into which the implicit + /// default constructor will be added. + /// + /// \returns The implicitly-declared default constructor. + CXXConstructorDecl *DeclareImplicitDefaultConstructor( + CXXRecordDecl *ClassDecl); + /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); + /// \brief Declare the implicit destructor for the given class. + /// + /// \param ClassDecl The class declaration into which the implicit + /// destructor will be added. + /// + /// \returns The implicitly-declared destructor. + CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); + /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, - CXXDestructorDecl *Destructor); + CXXDestructorDecl *Destructor); + /// \brief Declare the implicit copy constructor for the given class. + /// + /// \param S The scope of the class, which may be NULL if this is a + /// template instantiation. + /// + /// \param ClassDecl The class declaration into which the implicit + /// copy constructor will be added. + /// + /// \returns The implicitly-declared copy constructor. + CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); + /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor, unsigned TypeQuals); - /// \brief Defined and implicitly-declared copy assignment operator. + /// \brief Declare the implicit copy assignment operator for the given class. + /// + /// \param S The scope of the class, which may be NULL if this is a + /// template instantiation. + /// + /// \param ClassDecl The class declaration into which the implicit + /// copy-assignment operator will be added. + /// + /// \returns The implicitly-declared copy assignment operator. + CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); + + /// \brief Defined an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); + /// \brief Force the declaration of any implicitly-declared members of this + /// class. + void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); + /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. @@ -2295,7 +2367,7 @@ public: SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, - bool ParenTypeId, Declarator &D, + SourceRange TypeIdParens, Declarator &D, SourceLocation ConstructorLParen, MultiExprArg ConstructorArgs, SourceLocation ConstructorRParen); @@ -2303,7 +2375,7 @@ public: SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, - bool ParenTypeId, + SourceRange TypeIdParens, QualType AllocType, SourceLocation TypeLoc, SourceRange TypeRange, @@ -2529,6 +2601,10 @@ public: virtual bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS); + virtual DeclPtrTy ActOnAccessSpecifier(AccessSpecifier Access, + SourceLocation ASLoc, + SourceLocation ColonLoc); + virtual DeclPtrTy ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, @@ -2605,14 +2681,14 @@ public: /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); - void AddImplicitlyDeclaredMembersToClass(Scope *S, CXXRecordDecl *ClassDecl); + void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); virtual void ActOnMemInitializers(DeclPtrTy ConstructorDecl, SourceLocation ColonLoc, MemInitTy **MemInits, unsigned NumMemInits, bool AnyErrors); - void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); + void CheckCompletedCXXClass(CXXRecordDecl *Record); virtual void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, DeclPtrTy TagDecl, SourceLocation LBrac, @@ -2644,7 +2720,7 @@ public: QualType CheckConstructorDeclarator(Declarator &D, QualType R, FunctionDecl::StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); - QualType CheckDestructorDeclarator(Declarator &D, + QualType CheckDestructorDeclarator(Declarator &D, QualType R, FunctionDecl::StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, @@ -2718,6 +2794,7 @@ public: const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); + //===--------------------------------------------------------------------===// // C++ Access Control // @@ -2744,7 +2821,8 @@ public: AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, - AccessSpecifier Access); + AccessSpecifier Access, + bool IsCopyBindingRefToTemp = false); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag); @@ -2772,6 +2850,12 @@ public: void HandleDelayedAccessCheck(DelayedDiagnostic &DD, Decl *Ctx); + /// A flag to suppress access checking. + bool SuppressAccessChecking; + + void ActOnStartSuppressingAccessChecks(); + void ActOnStopSuppressingAccessChecks(); + enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, @@ -2826,29 +2910,25 @@ public: SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, - unsigned Depth, unsigned Position); - virtual void ActOnTypeParameterDefault(DeclPtrTy TypeParam, - SourceLocation EqualLoc, - SourceLocation DefaultLoc, - TypeTy *Default); + unsigned Depth, unsigned Position, + SourceLocation EqualLoc, + TypeTy *DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); virtual DeclPtrTy ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, - unsigned Position); - virtual void ActOnNonTypeTemplateParameterDefault(DeclPtrTy TemplateParam, - SourceLocation EqualLoc, - ExprArg Default); + unsigned Position, + SourceLocation EqualLoc, + ExprArg DefaultArg); virtual DeclPtrTy ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParamsTy *Params, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, - unsigned Position); - virtual void ActOnTemplateTemplateParameterDefault(DeclPtrTy TemplateParam, - SourceLocation EqualLoc, - const ParsedTemplateArgument &Default); + unsigned Position, + SourceLocation EqualLoc, + const ParsedTemplateArgument &DefaultArg); virtual TemplateParamsTy * ActOnTemplateParameterList(unsigned Depth, @@ -2876,7 +2956,8 @@ public: TemplateParameterList **ParamLists, unsigned NumParamLists, bool IsFriend, - bool &IsExplicitSpecialization); + bool &IsExplicitSpecialization, + bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, @@ -2912,11 +2993,13 @@ public: SourceLocation NameLoc, const TemplateArgumentListInfo &TemplateArgs); - virtual TemplateTy ActOnDependentTemplateName(SourceLocation TemplateKWLoc, - CXXScopeSpec &SS, - UnqualifiedId &Name, - TypeTy *ObjectType, - bool EnteringContext); + virtual TemplateNameKind ActOnDependentTemplateName(Scope *S, + SourceLocation TemplateKWLoc, + CXXScopeSpec &SS, + UnqualifiedId &Name, + TypeTy *ObjectType, + bool EnteringContext, + TemplateTy &Template); bool CheckClassTemplatePartialSpecializationArgs( TemplateParameterList *TemplateParams, @@ -2940,7 +3023,7 @@ public: Declarator &D); virtual DeclPtrTy ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope, - MultiTemplateParamsArg TemplateParameterLists, + MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool @@ -2948,7 +3031,7 @@ public: TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, - SourceLocation PrevPointOfInstantiation, + SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, @@ -3096,25 +3179,29 @@ public: /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// + /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. virtual TypeResult - ActOnTypenameType(SourceLocation TypenameLoc, const CXXScopeSpec &SS, - const IdentifierInfo &II, SourceLocation IdLoc); + ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, + const CXXScopeSpec &SS, const IdentifierInfo &II, + SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// + /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param Ty the type that the typename specifier refers to. virtual TypeResult - ActOnTypenameType(SourceLocation TypenameLoc, const CXXScopeSpec &SS, - SourceLocation TemplateLoc, TypeTy *Ty); + ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, + const CXXScopeSpec &SS, SourceLocation TemplateLoc, + TypeTy *Ty); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, @@ -3478,6 +3565,12 @@ public: /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; + /// \brief The stack of calls expression undergoing template instantiation. + /// + /// The top of this stack is used by a fixit instantiating unresolved + /// function calls to fix the AST to match the textual change it prints. + llvm::SmallVector<CallExpr *, 8> CallsUndergoingInstantiation; + /// \brief A stack object to be created when performing template /// instantiation. /// @@ -3878,7 +3971,7 @@ public: SourceLocation *IdentLocs, unsigned NumElts); - virtual DeclPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc, + virtual DeclPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, const IdentifierLocPair *IdentList, unsigned NumElts, AttributeList *attrList); @@ -4057,6 +4150,9 @@ public: /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); + /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. + void AddAlignedAttr(SourceLocation AttrLoc, Decl *D, Expr *E); + /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. @@ -4232,7 +4328,7 @@ public: bool IgnoreBaseAccess = false); bool PerformImplicitConversion(Expr *&From, QualType ToType, const StandardConversionSequence& SCS, - AssignmentAction Action, bool IgnoreBaseAccess); + AssignmentAction Action,bool IgnoreBaseAccess); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). @@ -4253,11 +4349,12 @@ public: QualType CheckShiftOperands( // C99 6.5.7 Expr *&lex, Expr *&rex, SourceLocation OpLoc, bool isCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 - Expr *&lex, Expr *&rex, SourceLocation OpLoc, unsigned Opc, bool isRelational); + Expr *&lex, Expr *&rex, SourceLocation OpLoc, unsigned Opc, + bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] Expr *&lex, Expr *&rex, SourceLocation OpLoc, bool isCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] - Expr *&lex, Expr *&rex, SourceLocation OpLoc); + Expr *&lex, Expr *&rex, SourceLocation OpLoc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. @@ -4413,6 +4510,7 @@ public: //@{ virtual void CodeCompleteOrdinaryName(Scope *S, CodeCompletionContext CompletionContext); + virtual void CodeCompleteExpression(Scope *S, QualType T); virtual void CodeCompleteMemberReferenceExpr(Scope *S, ExprTy *Base, SourceLocation OpLoc, bool IsArrow); @@ -4420,6 +4518,10 @@ public: virtual void CodeCompleteCase(Scope *S); virtual void CodeCompleteCall(Scope *S, ExprTy *Fn, ExprTy **Args, unsigned NumArgs); + virtual void CodeCompleteInitializer(Scope *S, DeclPtrTy D); + virtual void CodeCompleteReturn(Scope *S); + virtual void CodeCompleteAssignmentRHS(Scope *S, ExprTy *LHS); + virtual void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); virtual void CodeCompleteUsing(Scope *S); @@ -4440,7 +4542,7 @@ public: virtual void CodeCompleteObjCPropertySetter(Scope *S, DeclPtrTy ClassDecl, DeclPtrTy *Methods, unsigned NumMethods); - + virtual void CodeCompleteObjCMessageReceiver(Scope *S); virtual void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, IdentifierInfo **SelIdents, unsigned NumSelIdents); @@ -4473,6 +4575,13 @@ public: bool IsInstanceMethod, TypeTy *ReturnType, DeclPtrTy IDecl); + virtual void CodeCompleteObjCMethodDeclSelector(Scope *S, + bool IsInstanceMethod, + bool AtParameterName, + TypeTy *ReturnType, + IdentifierInfo **SelIdents, + unsigned NumSelIdents); + //@} //===--------------------------------------------------------------------===// @@ -4491,6 +4600,9 @@ private: Action::OwningExprResult CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); + bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); + bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); + bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); @@ -4503,7 +4615,7 @@ private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinObjectSize(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); - bool SemaBuiltinAtomicOverloaded(CallExpr *TheCall); + OwningExprResult SemaBuiltinAtomicOverloaded(OwningExprResult TheCallResult); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaCheckStringLiteral(const Expr *E, const CallExpr *TheCall, diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp index 444ee79..e110e3d 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp @@ -870,6 +870,10 @@ static void DiagnoseAccessPath(Sema &S, << BS->getSourceRange() << (BaseAccess == AS_protected) << (BS->getAccessSpecifierAsWritten() == AS_none); + + if (D) + S.Diag(D->getLocation(), diag::note_field_decl); + return; } } @@ -1020,6 +1024,9 @@ static Sema::AccessResult CheckAccess(Sema &S, SourceLocation Loc, if (Entity.getAccess() == AS_public) return Sema::AR_accessible; + if (S.SuppressAccessChecking) + return Sema::AR_accessible; + // If we're currently parsing a top-level declaration, delay // diagnostics. This is the only case where parsing a declaration // can actually change our effective context for the purposes of @@ -1153,9 +1160,10 @@ Sema::AccessResult Sema::CheckDestructorAccess(SourceLocation Loc, /// Checks access to a constructor. Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc, - CXXConstructorDecl *Constructor, - const InitializedEntity &Entity, - AccessSpecifier Access) { + CXXConstructorDecl *Constructor, + const InitializedEntity &Entity, + AccessSpecifier Access, + bool IsCopyBindingRefToTemp) { if (!getLangOptions().AccessControl || Access == AS_public) return AR_accessible; @@ -1166,7 +1174,9 @@ Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc, QualType()); switch (Entity.getKind()) { default: - AccessEntity.setDiag(diag::err_access_ctor); + AccessEntity.setDiag(IsCopyBindingRefToTemp + ? diag::ext_rvalue_to_reference_access_ctor + : diag::err_access_ctor); break; case InitializedEntity::EK_Base: @@ -1327,3 +1337,15 @@ void Sema::CheckLookupAccess(const LookupResult &R) { } } } + +void Sema::ActOnStartSuppressingAccessChecks() { + assert(!SuppressAccessChecking && + "Tried to start access check suppression when already started."); + SuppressAccessChecking = true; +} + +void Sema::ActOnStopSuppressingAccessChecks() { + assert(SuppressAccessChecking && + "Tried to stop access check suprression when already stopped."); + SuppressAccessChecking = false; +} diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp index 82978c9..69f27b0 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp @@ -135,13 +135,24 @@ void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, return; } - // We don't support #pragma options align=power. switch (Kind) { + // For all targets we support native and natural are the same. + // + // FIXME: This is not true on Darwin/PPC. + case POAK_Native: + case POAK_Power: case POAK_Natural: Context->push(0); Context->setAlignment(0); break; + // Note that '#pragma options align=packed' is not equivalent to attribute + // packed, it has a different precedence relative to attribute aligned. + case POAK_Packed: + Context->push(0); + Context->setAlignment(1); + break; + case POAK_Mac68k: // Check if the target supports this. if (!PP.getTargetInfo().hasAlignMac68kSupport()) { diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCXXCast.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCXXCast.cpp index 9b95552..b8e27e7 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaCXXCast.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaCXXCast.cpp @@ -153,7 +153,8 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, case tok::kw_const_cast: if (!TypeDependent) CheckConstCast(*this, Ex, DestType, OpRange, DestRange); - return Owned(new (Context) CXXConstCastExpr(DestType.getNonReferenceType(), + return Owned(new (Context) CXXConstCastExpr( + DestType.getNonLValueExprType(Context), Ex, DestTInfo, OpLoc)); case tok::kw_dynamic_cast: { @@ -161,7 +162,8 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, CXXBaseSpecifierArray BasePath; if (!TypeDependent) CheckDynamicCast(*this, Ex, DestType, OpRange, DestRange, Kind, BasePath); - return Owned(new (Context)CXXDynamicCastExpr(DestType.getNonReferenceType(), + return Owned(new (Context)CXXDynamicCastExpr( + DestType.getNonLValueExprType(Context), Kind, Ex, BasePath, DestTInfo, OpLoc)); } @@ -170,7 +172,7 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, if (!TypeDependent) CheckReinterpretCast(*this, Ex, DestType, OpRange, DestRange, Kind); return Owned(new (Context) CXXReinterpretCastExpr( - DestType.getNonReferenceType(), + DestType.getNonLValueExprType(Context), Kind, Ex, CXXBaseSpecifierArray(), DestTInfo, OpLoc)); } @@ -180,7 +182,8 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, if (!TypeDependent) CheckStaticCast(*this, Ex, DestType, OpRange, Kind, BasePath); - return Owned(new (Context) CXXStaticCastExpr(DestType.getNonReferenceType(), + return Owned(new (Context) CXXStaticCastExpr( + DestType.getNonLValueExprType(Context), Kind, Ex, BasePath, DestTInfo, OpLoc)); } @@ -233,6 +236,15 @@ bool UnwrapDissimilarPointerTypes(QualType& T1, QualType& T2) { T2 = T2MPType->getPointeeType(); return true; } + + const BlockPointerType *T1BPType = T1->getAs<BlockPointerType>(), + *T2BPType = T2->getAs<BlockPointerType>(); + if (T1BPType && T2BPType) { + T1 = T1BPType->getPointeeType(); + T2 = T2BPType->getPointeeType(); + return true; + } + return false; } @@ -246,9 +258,11 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType) { // C++ 4.4. We piggyback on Sema::IsQualificationConversion for this, since // the rules are non-trivial. So first we construct Tcv *...cv* as described // in C++ 5.2.11p8. - assert((SrcType->isAnyPointerType() || SrcType->isMemberPointerType()) && + assert((SrcType->isAnyPointerType() || SrcType->isMemberPointerType() || + SrcType->isBlockPointerType()) && "Source type is not pointer or pointer to member."); - assert((DestType->isAnyPointerType() || DestType->isMemberPointerType()) && + assert((DestType->isAnyPointerType() || DestType->isMemberPointerType() || + DestType->isBlockPointerType()) && "Destination type is not pointer or pointer to member."); QualType UnwrappedSrcType = Self.Context.getCanonicalType(SrcType), @@ -257,10 +271,16 @@ CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType) { // Find the qualifications. while (UnwrapDissimilarPointerTypes(UnwrappedSrcType, UnwrappedDestType)) { - cv1.push_back(UnwrappedSrcType.getQualifiers()); - cv2.push_back(UnwrappedDestType.getQualifiers()); + Qualifiers SrcQuals; + Self.Context.getUnqualifiedArrayType(UnwrappedSrcType, SrcQuals); + cv1.push_back(SrcQuals); + + Qualifiers DestQuals; + Self.Context.getUnqualifiedArrayType(UnwrappedDestType, DestQuals); + cv2.push_back(DestQuals); } - assert(cv1.size() > 0 && "Must have at least one pointer level."); + if (cv1.empty()) + return false; // Construct void pointers with those qualifiers (in reverse order of // unwrapping, of course). @@ -1014,7 +1034,7 @@ static TryCastResult TryConstCast(Sema &Self, Expr *SrcExpr, QualType DestType, // in multi-level pointers may change, but the level count must be the same, // as must be the final pointee type. while (SrcType != DestType && - Self.UnwrapSimilarPointerTypes(SrcType, DestType)) { + Self.Context.UnwrapSimilarPointerTypes(SrcType, DestType)) { Qualifiers Quals; SrcType = Self.Context.getUnqualifiedArrayType(SrcType, Quals); DestType = Self.Context.getUnqualifiedArrayType(DestType, Quals); @@ -1032,6 +1052,8 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr, const SourceRange &OpRange, unsigned &msg, CastExpr::CastKind &Kind) { + bool IsLValueCast = false; + DestType = Self.Context.getCanonicalType(DestType); QualType SrcType = SrcExpr->getType(); if (const ReferenceType *DestTypeTmp = DestType->getAs<ReferenceType>()) { @@ -1049,6 +1071,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr, // This code does this transformation for the checked types. DestType = Self.Context.getPointerType(DestTypeTmp->getPointeeType()); SrcType = Self.Context.getPointerType(SrcType); + IsLValueCast = true; } // Canonicalize source for comparison. @@ -1075,13 +1098,12 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr, } // A valid member pointer cast. - Kind = CastExpr::CK_BitCast; + Kind = IsLValueCast? CastExpr::CK_LValueBitCast : CastExpr::CK_BitCast; return TC_Success; } // See below for the enumeral issue. - if (SrcType->isNullPtrType() && DestType->isIntegralType() && - !DestType->isEnumeralType()) { + if (SrcType->isNullPtrType() && DestType->isIntegralType(Self.Context)) { // C++0x 5.2.10p4: A pointer can be explicitly converted to any integral // type large enough to hold it. A value of std::nullptr_t can be // converted to an integral type; the conversion has the same meaning @@ -1098,9 +1120,9 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr, bool destIsVector = DestType->isVectorType(); bool srcIsVector = SrcType->isVectorType(); if (srcIsVector || destIsVector) { - bool srcIsScalar = SrcType->isIntegralType() && !SrcType->isEnumeralType(); - bool destIsScalar = - DestType->isIntegralType() && !DestType->isEnumeralType(); + // FIXME: Should this also apply to floating point types? + bool srcIsScalar = SrcType->isIntegralType(Self.Context); + bool destIsScalar = DestType->isIntegralType(Self.Context); // Check if this is a cast between a vector and something else. if (!(srcIsScalar && destIsVector) && !(srcIsVector && destIsScalar) && @@ -1124,8 +1146,10 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr, return TC_Failed; } - bool destIsPtr = DestType->isAnyPointerType(); - bool srcIsPtr = SrcType->isAnyPointerType(); + bool destIsPtr = DestType->isAnyPointerType() || + DestType->isBlockPointerType(); + bool srcIsPtr = SrcType->isAnyPointerType() || + SrcType->isBlockPointerType(); if (!destIsPtr && !srcIsPtr) { // Except for std::nullptr_t->integer and lvalue->reference, which are // handled above, at least one of the two arguments must be a pointer. @@ -1143,9 +1167,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr, return TC_Success; } - // Note: Clang treats enumeration types as integral types. If this is ever - // changed for C++, the additional check here will be redundant. - if (DestType->isIntegralType() && !DestType->isEnumeralType()) { + if (DestType->isIntegralType(Self.Context)) { assert(srcIsPtr && "One type must be a pointer"); // C++ 5.2.10p4: A pointer can be explicitly converted to any integral // type large enough to hold it. @@ -1158,7 +1180,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr, return TC_Success; } - if (SrcType->isIntegralType() || SrcType->isEnumeralType()) { + if (SrcType->isIntegralOrEnumerationType()) { assert(destIsPtr && "One type must be a pointer"); // C++ 5.2.10p5: A value of integral or enumeration type can be explicitly // converted to a pointer. @@ -1178,14 +1200,22 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr, msg = diag::err_bad_cxx_cast_const_away; return TC_Failed; } + + // Cannot convert between block pointers and Objective-C object pointers. + if ((SrcType->isBlockPointerType() && DestType->isObjCObjectPointerType()) || + (DestType->isBlockPointerType() && SrcType->isObjCObjectPointerType())) + return TC_NotApplicable; + + // Any pointer can be cast to an Objective-C pointer type with a C-style + // cast. if (CStyle && DestType->isObjCObjectPointerType()) { Kind = CastExpr::CK_AnyPointerToObjCPointerCast; return TC_Success; } - + // Not casting away constness, so the only remaining check is for compatible // pointer categories. - Kind = CastExpr::CK_BitCast; + Kind = IsLValueCast? CastExpr::CK_LValueBitCast : CastExpr::CK_BitCast; if (SrcType->isFunctionPointerType()) { if (DestType->isFunctionPointerType()) { @@ -1211,7 +1241,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, Expr *SrcExpr, Self.Diag(OpRange.getBegin(), diag::ext_cast_fn_obj) << OpRange; return TC_Success; } - + // C++ 5.2.10p7: A pointer to an object can be explicitly converted to // a pointer to an object of different type. // Void pointers are not specified, but supported by every compiler out there. diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp index c0ec9e9..f56573a 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp @@ -96,7 +96,7 @@ DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS, // injected class name of the named class template, we're entering // into that class template definition. QualType Injected - = ClassTemplate->getInjectedClassNameSpecialization(Context); + = ClassTemplate->getInjectedClassNameSpecialization(); if (Context.hasSameType(Injected, ContextType)) return ClassTemplate->getTemplatedDecl(); @@ -458,8 +458,10 @@ Sema::CXXScopeTy *Sema::BuildCXXNestedNameSpecifier(Scope *S, if (NamedDecl *ND = Found.getAsSingle<NamedDecl>()) Diag(ND->getLocation(), diag::note_previous_decl) << ND->getDeclName(); - } else + } else { Found.clear(); + Found.setLookupName(&II); + } } NamedDecl *SD = Found.getAsSingle<NamedDecl>(); diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp index 6fc36c2..7ccd0c5 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp @@ -26,7 +26,10 @@ #include "clang/Lex/Preprocessor.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Support/raw_ostream.h" #include "clang/Basic/TargetBuiltins.h" +#include "clang/Basic/TargetInfo.h" #include <limits> using namespace clang; @@ -199,21 +202,119 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { case Builtin::BI__sync_bool_compare_and_swap: case Builtin::BI__sync_lock_test_and_set: case Builtin::BI__sync_lock_release: - if (SemaBuiltinAtomicOverloaded(TheCall)) - return ExprError(); - break; - - // Target specific builtins start here. + return SemaBuiltinAtomicOverloaded(move(TheCallResult)); + } + + // Since the target specific builtins for each arch overlap, only check those + // of the arch we are compiling for. + if (BuiltinID >= Builtin::FirstTSBuiltin) { + switch (Context.Target.getTriple().getArch()) { + case llvm::Triple::arm: + case llvm::Triple::thumb: + if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall)) + return ExprError(); + break; + case llvm::Triple::x86: + case llvm::Triple::x86_64: + if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall)) + return ExprError(); + break; + default: + break; + } + } + + return move(TheCallResult); +} + +bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { + switch (BuiltinID) { case X86::BI__builtin_ia32_palignr128: case X86::BI__builtin_ia32_palignr: { llvm::APSInt Result; if (SemaBuiltinConstantArg(TheCall, 2, Result)) - return ExprError(); + return true; break; } } + return false; +} - return move(TheCallResult); +// Get the valid immediate range for the specified NEON type code. +static unsigned RFT(unsigned t, bool shift = false) { + bool quad = t & 0x10; + + switch (t & 0x7) { + case 0: // i8 + return shift ? 7 : (8 << (int)quad) - 1; + case 1: // i16 + return shift ? 15 : (4 << (int)quad) - 1; + case 2: // i32 + return shift ? 31 : (2 << (int)quad) - 1; + case 3: // i64 + return shift ? 63 : (1 << (int)quad) - 1; + case 4: // f32 + assert(!shift && "cannot shift float types!"); + return (2 << (int)quad) - 1; + case 5: // poly8 + assert(!shift && "cannot shift polynomial types!"); + return (8 << (int)quad) - 1; + case 6: // poly16 + assert(!shift && "cannot shift polynomial types!"); + return (4 << (int)quad) - 1; + case 7: // float16 + assert(!shift && "cannot shift float types!"); + return (4 << (int)quad) - 1; + } + return 0; +} + +bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { + llvm::APSInt Result; + + unsigned mask = 0; + unsigned TV = 0; + switch (BuiltinID) { +#define GET_NEON_OVERLOAD_CHECK +#include "clang/Basic/arm_neon.inc" +#undef GET_NEON_OVERLOAD_CHECK + } + + // For NEON intrinsics which are overloaded on vector element type, validate + // the immediate which specifies which variant to emit. + if (mask) { + unsigned ArgNo = TheCall->getNumArgs()-1; + if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) + return true; + + TV = Result.getLimitedValue(32); + if ((TV > 31) || (mask & (1 << TV)) == 0) + return Diag(TheCall->getLocStart(), diag::err_invalid_neon_type_code) + << TheCall->getArg(ArgNo)->getSourceRange(); + } + + // For NEON intrinsics which take an immediate value as part of the + // instruction, range check them here. + unsigned i = 0, l = 0, u = 0; + switch (BuiltinID) { + default: return false; +#define GET_NEON_IMMEDIATE_CHECK +#include "clang/Basic/arm_neon.inc" +#undef GET_NEON_IMMEDIATE_CHECK + }; + + // Check that the immediate argument is actually a constant. + if (SemaBuiltinConstantArg(TheCall, i, Result)) + return true; + + // Range check against the upper/lower values for this isntruction. + unsigned Val = Result.getZExtValue(); + if (Val < l || Val > (u + l)) + return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range) + << llvm::utostr(l) << llvm::utostr(u+l) + << TheCall->getArg(i)->getSourceRange(); + + return false; } /// CheckFunctionCall - Check a direct function call for various correctness @@ -279,32 +380,40 @@ bool Sema::CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall) { /// /// This function goes through and does final semantic checking for these /// builtins, -bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) { +Sema::OwningExprResult +Sema::SemaBuiltinAtomicOverloaded(OwningExprResult TheCallResult) { + CallExpr *TheCall = (CallExpr *)TheCallResult.get(); DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); // Ensure that we have at least one argument to do type inference from. - if (TheCall->getNumArgs() < 1) - return Diag(TheCall->getLocEnd(), - diag::err_typecheck_call_too_few_args_at_least) - << 0 << 1 << TheCall->getNumArgs() - << TheCall->getCallee()->getSourceRange(); + if (TheCall->getNumArgs() < 1) { + Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least) + << 0 << 1 << TheCall->getNumArgs() + << TheCall->getCallee()->getSourceRange(); + return ExprError(); + } // Inspect the first argument of the atomic builtin. This should always be // a pointer type, whose element is an integral scalar or pointer type. // Because it is a pointer type, we don't have to worry about any implicit // casts here. + // FIXME: We don't allow floating point scalars as input. Expr *FirstArg = TheCall->getArg(0); - if (!FirstArg->getType()->isPointerType()) - return Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer) - << FirstArg->getType() << FirstArg->getSourceRange(); + if (!FirstArg->getType()->isPointerType()) { + Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer) + << FirstArg->getType() << FirstArg->getSourceRange(); + return ExprError(); + } - QualType ValType = FirstArg->getType()->getAs<PointerType>()->getPointeeType(); + QualType ValType = + FirstArg->getType()->getAs<PointerType>()->getPointeeType(); if (!ValType->isIntegerType() && !ValType->isPointerType() && - !ValType->isBlockPointerType()) - return Diag(DRE->getLocStart(), - diag::err_atomic_builtin_must_be_pointer_intptr) - << FirstArg->getType() << FirstArg->getSourceRange(); + !ValType->isBlockPointerType()) { + Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intptr) + << FirstArg->getType() << FirstArg->getSourceRange(); + return ExprError(); + } // We need to figure out which concrete builtin this maps onto. For example, // __sync_fetch_and_add with a 2 byte object turns into @@ -342,8 +451,9 @@ bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) { case 8: SizeIndex = 3; break; case 16: SizeIndex = 4; break; default: - return Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size) - << FirstArg->getType() << FirstArg->getSourceRange(); + Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size) + << FirstArg->getType() << FirstArg->getSourceRange(); + return ExprError(); } // Each of these builtins has one pointer argument, followed by some number of @@ -383,12 +493,12 @@ bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) { // Now that we know how many fixed arguments we expect, first check that we // have at least that many. - if (TheCall->getNumArgs() < 1+NumFixed) - return Diag(TheCall->getLocEnd(), - diag::err_typecheck_call_too_few_args_at_least) - << 0 << 1+NumFixed << TheCall->getNumArgs() - << TheCall->getCallee()->getSourceRange(); - + if (TheCall->getNumArgs() < 1+NumFixed) { + Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least) + << 0 << 1+NumFixed << TheCall->getNumArgs() + << TheCall->getCallee()->getSourceRange(); + return ExprError(); + } // Get the decl for the concrete builtin from this, we can tell what the // concrete integer type we should convert to is. @@ -400,6 +510,8 @@ bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) { TUScope, false, DRE->getLocStart())); const FunctionProtoType *BuiltinFT = NewBuiltinDecl->getType()->getAs<FunctionProtoType>(); + + QualType OrigValType = ValType; ValType = BuiltinFT->getArgType(0)->getAs<PointerType>()->getPointeeType(); // If the first type needs to be converted (e.g. void** -> int*), do it now. @@ -426,7 +538,7 @@ bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) { CastExpr::CastKind Kind = CastExpr::CK_Unknown; CXXBaseSpecifierArray BasePath; if (CheckCastTypes(Arg->getSourceRange(), ValType, Arg, Kind, BasePath)) - return true; + return ExprError(); // Okay, we have something that *can* be converted to the right type. Check // to see if there is a potentially weird extension going on here. This can @@ -448,10 +560,30 @@ bool Sema::SemaBuiltinAtomicOverloaded(CallExpr *TheCall) { UsualUnaryConversions(PromotedCall); TheCall->setCallee(PromotedCall); - // Change the result type of the call to match the result type of the decl. - TheCall->setType(NewBuiltinDecl->getResultType()); - return false; + TheCall->setType(NewBuiltinDecl->getCallResultType()); + + // If the value type was converted to an integer when processing the + // arguments (e.g. void* -> int), we need to convert the result back. + if (!Context.hasSameUnqualifiedType(ValType, OrigValType)) { + Expr *E = TheCallResult.takeAs<Expr>(); + + assert(ValType->isIntegerType() && + "We always convert atomic operation values to integers."); + // FIXME: Handle floating point value type here too. + CastExpr::CastKind Kind; + if (OrigValType->isIntegerType()) + Kind = CastExpr::CK_IntegralCast; + else if (OrigValType->hasPointerRepresentation()) + Kind = CastExpr::CK_IntegralToPointer; + else + llvm_unreachable("Unhandled original value type!"); + + ImpCastExprToType(E, OrigValType, Kind); + return Owned(E); + } + + return move(TheCallResult); } @@ -511,7 +643,7 @@ bool Sema::SemaBuiltinVAStart(CallExpr *TheCall) { BlockScopeInfo *CurBlock = getCurBlock(); bool isVariadic; if (CurBlock) - isVariadic = CurBlock->isVariadic; + isVariadic = CurBlock->TheDecl->isVariadic(); else if (FunctionDecl *FD = getCurFunctionDecl()) isVariadic = FD->isVariadic(); else @@ -633,45 +765,54 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. // This is declared to take (...), so we have to check everything. Action::OwningExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { - if (TheCall->getNumArgs() < 3) + if (TheCall->getNumArgs() < 2) return ExprError(Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least) - << 0 /*function call*/ << 3 << TheCall->getNumArgs() + << 0 /*function call*/ << 2 << TheCall->getNumArgs() << TheCall->getSourceRange()); - unsigned numElements = std::numeric_limits<unsigned>::max(); + // Determine which of the following types of shufflevector we're checking: + // 1) unary, vector mask: (lhs, mask) + // 2) binary, vector mask: (lhs, rhs, mask) + // 3) binary, scalar mask: (lhs, rhs, index, ..., index) + QualType resType = TheCall->getArg(0)->getType(); + unsigned numElements = 0; + if (!TheCall->getArg(0)->isTypeDependent() && !TheCall->getArg(1)->isTypeDependent()) { - QualType FAType = TheCall->getArg(0)->getType(); - QualType SAType = TheCall->getArg(1)->getType(); - - if (!FAType->isVectorType() || !SAType->isVectorType()) { + QualType LHSType = TheCall->getArg(0)->getType(); + QualType RHSType = TheCall->getArg(1)->getType(); + + if (!LHSType->isVectorType() || !RHSType->isVectorType()) { Diag(TheCall->getLocStart(), diag::err_shufflevector_non_vector) << SourceRange(TheCall->getArg(0)->getLocStart(), TheCall->getArg(1)->getLocEnd()); return ExprError(); } - - if (!Context.hasSameUnqualifiedType(FAType, SAType)) { + + numElements = LHSType->getAs<VectorType>()->getNumElements(); + unsigned numResElements = TheCall->getNumArgs() - 2; + + // Check to see if we have a call with 2 vector arguments, the unary shuffle + // with mask. If so, verify that RHS is an integer vector type with the + // same number of elts as lhs. + if (TheCall->getNumArgs() == 2) { + if (!RHSType->isIntegerType() || + RHSType->getAs<VectorType>()->getNumElements() != numElements) + Diag(TheCall->getLocStart(), diag::err_shufflevector_incompatible_vector) + << SourceRange(TheCall->getArg(1)->getLocStart(), + TheCall->getArg(1)->getLocEnd()); + numResElements = numElements; + } + else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { Diag(TheCall->getLocStart(), diag::err_shufflevector_incompatible_vector) << SourceRange(TheCall->getArg(0)->getLocStart(), TheCall->getArg(1)->getLocEnd()); return ExprError(); - } - - numElements = FAType->getAs<VectorType>()->getNumElements(); - if (TheCall->getNumArgs() != numElements+2) { - if (TheCall->getNumArgs() < numElements+2) - return ExprError(Diag(TheCall->getLocEnd(), - diag::err_typecheck_call_too_few_args) - << 0 /*function call*/ - << numElements+2 << TheCall->getNumArgs() - << TheCall->getSourceRange()); - return ExprError(Diag(TheCall->getLocEnd(), - diag::err_typecheck_call_too_many_args) - << 0 /*function call*/ - << numElements+2 << TheCall->getNumArgs() - << TheCall->getSourceRange()); + } else if (numElements != numResElements) { + QualType eltType = LHSType->getAs<VectorType>()->getElementType(); + resType = Context.getVectorType(eltType, numResElements, + VectorType::NotAltiVec); } } @@ -680,9 +821,11 @@ Action::OwningExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { TheCall->getArg(i)->isValueDependent()) continue; - llvm::APSInt Result; - if (SemaBuiltinConstantArg(TheCall, i, Result)) - return ExprError(); + llvm::APSInt Result(32); + if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context)) + return ExprError(Diag(TheCall->getLocStart(), + diag::err_shufflevector_nonconstant_argument) + << TheCall->getArg(i)->getSourceRange()); if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2) return ExprError(Diag(TheCall->getLocStart(), @@ -698,7 +841,7 @@ Action::OwningExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { } return Owned(new (Context) ShuffleVectorExpr(Context, exprs.begin(), - exprs.size(), exprs[0]->getType(), + exprs.size(), resType, TheCall->getCallee()->getLocStart(), TheCall->getRParenLoc())); } @@ -1081,15 +1224,23 @@ public: unsigned specifierLen); private: SourceRange getFormatStringRange(); - SourceRange getFormatSpecifierRange(const char *startSpecifier, - unsigned specifierLen); + CharSourceRange getFormatSpecifierRange(const char *startSpecifier, + unsigned specifierLen); SourceLocation getLocationOfByte(const char *x); bool HandleAmount(const analyze_printf::OptionalAmount &Amt, unsigned k, const char *startSpecifier, unsigned specifierLen); - void HandleFlags(const analyze_printf::FormatSpecifier &FS, - llvm::StringRef flag, llvm::StringRef cspec, - const char *startSpecifier, unsigned specifierLen); + void HandleInvalidAmount(const analyze_printf::FormatSpecifier &FS, + const analyze_printf::OptionalAmount &Amt, + unsigned type, + const char *startSpecifier, unsigned specifierLen); + void HandleFlag(const analyze_printf::FormatSpecifier &FS, + const analyze_printf::OptionalFlag &flag, + const char *startSpecifier, unsigned specifierLen); + void HandleIgnoredFlag(const analyze_printf::FormatSpecifier &FS, + const analyze_printf::OptionalFlag &ignoredFlag, + const analyze_printf::OptionalFlag &flag, + const char *startSpecifier, unsigned specifierLen); const Expr *getDataArg(unsigned i) const; }; @@ -1099,10 +1250,15 @@ SourceRange CheckPrintfHandler::getFormatStringRange() { return OrigFormatExpr->getSourceRange(); } -SourceRange CheckPrintfHandler:: +CharSourceRange CheckPrintfHandler:: getFormatSpecifierRange(const char *startSpecifier, unsigned specifierLen) { - return SourceRange(getLocationOfByte(startSpecifier), - getLocationOfByte(startSpecifier+specifierLen-1)); + SourceLocation Start = getLocationOfByte(startSpecifier); + SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); + + // Advance the end SourceLocation by one due to half-open ranges. + End = End.getFileLocWithOffset(1); + + return CharSourceRange::getCharRange(Start, End); } SourceLocation CheckPrintfHandler::getLocationOfByte(const char *x) { @@ -1174,16 +1330,6 @@ const Expr *CheckPrintfHandler::getDataArg(unsigned i) const { return TheCall->getArg(FirstDataArg + i); } -void CheckPrintfHandler::HandleFlags(const analyze_printf::FormatSpecifier &FS, - llvm::StringRef flag, - llvm::StringRef cspec, - const char *startSpecifier, - unsigned specifierLen) { - const analyze_printf::ConversionSpecifier &CS = FS.getConversionSpecifier(); - S.Diag(getLocationOfByte(CS.getStart()), diag::warn_printf_nonsensical_flag) - << flag << cspec << getFormatSpecifierRange(startSpecifier, specifierLen); -} - bool CheckPrintfHandler::HandleAmount(const analyze_printf::OptionalAmount &Amt, unsigned k, const char *startSpecifier, @@ -1228,6 +1374,62 @@ CheckPrintfHandler::HandleAmount(const analyze_printf::OptionalAmount &Amt, return true; } +void CheckPrintfHandler::HandleInvalidAmount( + const analyze_printf::FormatSpecifier &FS, + const analyze_printf::OptionalAmount &Amt, + unsigned type, + const char *startSpecifier, + unsigned specifierLen) { + const analyze_printf::ConversionSpecifier &CS = FS.getConversionSpecifier(); + switch (Amt.getHowSpecified()) { + case analyze_printf::OptionalAmount::Constant: + S.Diag(getLocationOfByte(Amt.getStart()), + diag::warn_printf_nonsensical_optional_amount) + << type + << CS.toString() + << getFormatSpecifierRange(startSpecifier, specifierLen) + << FixItHint::CreateRemoval(getFormatSpecifierRange(Amt.getStart(), + Amt.getConstantLength())); + break; + + default: + S.Diag(getLocationOfByte(Amt.getStart()), + diag::warn_printf_nonsensical_optional_amount) + << type + << CS.toString() + << getFormatSpecifierRange(startSpecifier, specifierLen); + break; + } +} + +void CheckPrintfHandler::HandleFlag(const analyze_printf::FormatSpecifier &FS, + const analyze_printf::OptionalFlag &flag, + const char *startSpecifier, + unsigned specifierLen) { + // Warn about pointless flag with a fixit removal. + const analyze_printf::ConversionSpecifier &CS = FS.getConversionSpecifier(); + S.Diag(getLocationOfByte(flag.getPosition()), + diag::warn_printf_nonsensical_flag) + << flag.toString() << CS.toString() + << getFormatSpecifierRange(startSpecifier, specifierLen) + << FixItHint::CreateRemoval(getFormatSpecifierRange(flag.getPosition(), 1)); +} + +void CheckPrintfHandler::HandleIgnoredFlag( + const analyze_printf::FormatSpecifier &FS, + const analyze_printf::OptionalFlag &ignoredFlag, + const analyze_printf::OptionalFlag &flag, + const char *startSpecifier, + unsigned specifierLen) { + // Warn about ignored flag with a fixit removal. + S.Diag(getLocationOfByte(ignoredFlag.getPosition()), + diag::warn_printf_ignored_flag) + << ignoredFlag.toString() << flag.toString() + << getFormatSpecifierRange(startSpecifier, specifierLen) + << FixItHint::CreateRemoval(getFormatSpecifierRange( + ignoredFlag.getPosition(), 1)); +} + bool CheckPrintfHandler::HandleFormatSpecifier(const analyze_printf::FormatSpecifier &FS, @@ -1315,34 +1517,57 @@ CheckPrintfHandler::HandleFormatSpecifier(const analyze_printf::FormatSpecifier return HandleInvalidConversionSpecifier(FS, startSpecifier, specifierLen); } - // Are we using '%n'? Issue a warning about this being - // a possible security issue. + // Check for invalid use of field width + if (!FS.hasValidFieldWidth()) { + HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, + startSpecifier, specifierLen); + } + + // Check for invalid use of precision + if (!FS.hasValidPrecision()) { + HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, + startSpecifier, specifierLen); + } + + // Check each flag does not conflict with any other component. + if (!FS.hasValidLeadingZeros()) + HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); + if (!FS.hasValidPlusPrefix()) + HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); + if (!FS.hasValidSpacePrefix()) + HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); + if (!FS.hasValidAlternativeForm()) + HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); + if (!FS.hasValidLeftJustified()) + HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); + + // Check that flags are not ignored by another flag + if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' + HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), + startSpecifier, specifierLen); + if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' + HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), + startSpecifier, specifierLen); + + // Check the length modifier is valid with the given conversion specifier. + const LengthModifier &LM = FS.getLengthModifier(); + if (!FS.hasValidLengthModifier()) + S.Diag(getLocationOfByte(LM.getStart()), + diag::warn_printf_nonsensical_length) + << LM.toString() << CS.toString() + << getFormatSpecifierRange(startSpecifier, specifierLen) + << FixItHint::CreateRemoval(getFormatSpecifierRange(LM.getStart(), + LM.getLength())); + + // Are we using '%n'? if (CS.getKind() == ConversionSpecifier::OutIntPtrArg) { + // Issue a warning about this being a possible security issue. S.Diag(getLocationOfByte(CS.getStart()), diag::warn_printf_write_back) << getFormatSpecifierRange(startSpecifier, specifierLen); // Continue checking the other format specifiers. return true; } - if (CS.getKind() == ConversionSpecifier::VoidPtrArg) { - if (FS.getPrecision().getHowSpecified() != OptionalAmount::NotSpecified) - S.Diag(getLocationOfByte(CS.getStart()), - diag::warn_printf_nonsensical_precision) - << CS.getCharacters() - << getFormatSpecifierRange(startSpecifier, specifierLen); - } - if (CS.getKind() == ConversionSpecifier::VoidPtrArg || - CS.getKind() == ConversionSpecifier::CStrArg) { - // FIXME: Instead of using "0", "+", etc., eventually get them from - // the FormatSpecifier. - if (FS.hasLeadingZeros()) - HandleFlags(FS, "0", CS.getCharacters(), startSpecifier, specifierLen); - if (FS.hasPlusPrefix()) - HandleFlags(FS, "+", CS.getCharacters(), startSpecifier, specifierLen); - if (FS.hasSpacePrefix()) - HandleFlags(FS, " ", CS.getCharacters(), startSpecifier, specifierLen); - } - // The remaining checks depend on the data arguments. if (HasVAListArg) return true; @@ -1377,11 +1602,32 @@ CheckPrintfHandler::HandleFormatSpecifier(const analyze_printf::FormatSpecifier if (ATR.matchesType(S.Context, ICE->getSubExpr()->getType())) return true; - S.Diag(getLocationOfByte(CS.getStart()), - diag::warn_printf_conversion_argument_type_mismatch) - << ATR.getRepresentativeType(S.Context) << Ex->getType() - << getFormatSpecifierRange(startSpecifier, specifierLen) - << Ex->getSourceRange(); + // We may be able to offer a FixItHint if it is a supported type. + FormatSpecifier fixedFS = FS; + bool success = fixedFS.fixType(Ex->getType()); + + if (success) { + // Get the fix string from the fixed format specifier + llvm::SmallString<128> buf; + llvm::raw_svector_ostream os(buf); + fixedFS.toString(os); + + S.Diag(getLocationOfByte(CS.getStart()), + diag::warn_printf_conversion_argument_type_mismatch) + << ATR.getRepresentativeType(S.Context) << Ex->getType() + << getFormatSpecifierRange(startSpecifier, specifierLen) + << Ex->getSourceRange() + << FixItHint::CreateReplacement( + getFormatSpecifierRange(startSpecifier, specifierLen), + os.str()); + } + else { + S.Diag(getLocationOfByte(CS.getStart()), + diag::warn_printf_conversion_argument_type_mismatch) + << ATR.getRepresentativeType(S.Context) << Ex->getType() + << getFormatSpecifierRange(startSpecifier, specifierLen) + << Ex->getSourceRange(); + } } return true; diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp index d8c1a5c..5528875 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp @@ -119,10 +119,19 @@ namespace { /// nested-name-specifiers that would otherwise be filtered out. bool AllowNestedNameSpecifiers; + /// \brief If set, the type that we would prefer our resulting value + /// declarations to have. + /// + /// Closely matching the preferred type gives a boost to a result's + /// priority. + CanQualType PreferredType; + /// \brief A list of shadow maps, which is used to model name hiding at /// different levels of, e.g., the inheritance hierarchy. std::list<ShadowMap> ShadowMaps; + void AdjustResultPriorityForPreferredType(Result &R); + public: explicit ResultBuilder(Sema &SemaRef, LookupFilter Filter = 0) : SemaRef(SemaRef), Filter(Filter), AllowNestedNameSpecifiers(false) { } @@ -147,6 +156,11 @@ namespace { unsigned size() const { return Results.size(); } bool empty() const { return Results.empty(); } + /// \brief Specify the preferred type. + void setPreferredType(QualType T) { + PreferredType = SemaRef.Context.getCanonicalType(T); + } + /// \brief Specify whether nested-name-specifiers are allowed. void allowNestedNameSpecifiers(bool Allow = true) { AllowNestedNameSpecifiers = Allow; @@ -212,6 +226,7 @@ namespace { /// //@{ bool IsOrdinaryName(NamedDecl *ND) const; + bool IsOrdinaryNonTypeName(NamedDecl *ND) const; bool IsOrdinaryNonValueName(NamedDecl *ND) const; bool IsNestedNameSpecifier(NamedDecl *ND) const; bool IsEnum(NamedDecl *ND) const; @@ -222,6 +237,7 @@ namespace { bool IsType(NamedDecl *ND) const; bool IsMember(NamedDecl *ND) const; bool IsObjCIvar(NamedDecl *ND) const; + bool IsObjCMessageReceiver(NamedDecl *ND) const; //@} }; } @@ -355,8 +371,6 @@ getRequiredQualification(ASTContext &Context, Result = NestedNameSpecifier::Create(Context, Result, false, Context.getTypeDeclType(TD).getTypePtr()); - else - assert(Parent->isTranslationUnit()); } return Result; } @@ -393,13 +407,16 @@ bool ResultBuilder::isInterestingDecl(NamedDecl *ND, return false; // Filter out names reserved for the implementation (C99 7.1.3, - // C++ [lib.global.names]). Users don't need to see those. + // C++ [lib.global.names]) if they come from a system header. // // FIXME: Add predicate for this. if (Id->getLength() >= 2) { const char *Name = Id->getNameStart(); if (Name[0] == '_' && - (Name[1] == '_' || (Name[1] >= 'A' && Name[1] <= 'Z'))) + (Name[1] == '_' || (Name[1] >= 'A' && Name[1] <= 'Z')) && + (ND->getLocation().isInvalid() || + SemaRef.SourceMgr.isInSystemHeader( + SemaRef.SourceMgr.getSpellingLoc(ND->getLocation())))) return false; } } @@ -458,6 +475,134 @@ bool ResultBuilder::CheckHiddenResult(Result &R, DeclContext *CurContext, return false; } +enum SimplifiedTypeClass { + STC_Arithmetic, + STC_Array, + STC_Block, + STC_Function, + STC_ObjectiveC, + STC_Other, + STC_Pointer, + STC_Record, + STC_Void +}; + +/// \brief A simplified classification of types used to determine whether two +/// types are "similar enough" when adjusting priorities. +static SimplifiedTypeClass getSimplifiedTypeClass(CanQualType T) { + switch (T->getTypeClass()) { + case Type::Builtin: + switch (cast<BuiltinType>(T)->getKind()) { + case BuiltinType::Void: + return STC_Void; + + case BuiltinType::NullPtr: + return STC_Pointer; + + case BuiltinType::Overload: + case BuiltinType::Dependent: + case BuiltinType::UndeducedAuto: + return STC_Other; + + case BuiltinType::ObjCId: + case BuiltinType::ObjCClass: + case BuiltinType::ObjCSel: + return STC_ObjectiveC; + + default: + return STC_Arithmetic; + } + return STC_Other; + + case Type::Complex: + return STC_Arithmetic; + + case Type::Pointer: + return STC_Pointer; + + case Type::BlockPointer: + return STC_Block; + + case Type::LValueReference: + case Type::RValueReference: + return getSimplifiedTypeClass(T->getAs<ReferenceType>()->getPointeeType()); + + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + case Type::DependentSizedArray: + return STC_Array; + + case Type::DependentSizedExtVector: + case Type::Vector: + case Type::ExtVector: + return STC_Arithmetic; + + case Type::FunctionProto: + case Type::FunctionNoProto: + return STC_Function; + + case Type::Record: + return STC_Record; + + case Type::Enum: + return STC_Arithmetic; + + case Type::ObjCObject: + case Type::ObjCInterface: + case Type::ObjCObjectPointer: + return STC_ObjectiveC; + + default: + return STC_Other; + } +} + +/// \brief Get the type that a given expression will have if this declaration +/// is used as an expression in its "typical" code-completion form. +static QualType getDeclUsageType(ASTContext &C, NamedDecl *ND) { + ND = cast<NamedDecl>(ND->getUnderlyingDecl()); + + if (TypeDecl *Type = dyn_cast<TypeDecl>(ND)) + return C.getTypeDeclType(Type); + if (ObjCInterfaceDecl *Iface = dyn_cast<ObjCInterfaceDecl>(ND)) + return C.getObjCInterfaceType(Iface); + + QualType T; + if (FunctionDecl *Function = dyn_cast<FunctionDecl>(ND)) + T = Function->getCallResultType(); + else if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND)) + T = Method->getSendResultType(); + else if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(ND)) + T = FunTmpl->getTemplatedDecl()->getCallResultType(); + else if (EnumConstantDecl *Enumerator = dyn_cast<EnumConstantDecl>(ND)) + T = C.getTypeDeclType(cast<EnumDecl>(Enumerator->getDeclContext())); + else if (ObjCPropertyDecl *Property = dyn_cast<ObjCPropertyDecl>(ND)) + T = Property->getType(); + else if (ValueDecl *Value = dyn_cast<ValueDecl>(ND)) + T = Value->getType(); + else + return QualType(); + + return T.getNonReferenceType(); +} + +void ResultBuilder::AdjustResultPriorityForPreferredType(Result &R) { + QualType T = getDeclUsageType(SemaRef.Context, R.Declaration); + if (T.isNull()) + return; + + CanQualType TC = SemaRef.Context.getCanonicalType(T); + // Check for exactly-matching types (modulo qualifiers). + if (SemaRef.Context.hasSameUnqualifiedType(PreferredType, TC)) + R.Priority /= CCF_ExactTypeMatch; + // Check for nearly-matching types, based on classification of each. + else if ((getSimplifiedTypeClass(PreferredType) + == getSimplifiedTypeClass(TC)) && + !(PreferredType->isEnumeralType() && TC->isEnumeralType())) + R.Priority /= CCF_SimilarTypeMatch; +} + void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) { assert(!ShadowMaps.empty() && "Must enter into a results scope"); @@ -542,8 +687,9 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) { if (AsNestedNameSpecifier) { R.StartsNestedNameSpecifier = true; R.Priority = CCP_NestedNameSpecifier; - } - + } else if (!PreferredType.isNull()) + AdjustResultPriorityForPreferredType(R); + // If this result is supposed to have an informative qualifier, add one. if (R.QualifierIsInformative && !R.Qualifier && !R.StartsNestedNameSpecifier) { @@ -616,6 +762,9 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext, if (InBaseClass) R.Priority += CCD_InBaseClass; + if (!PreferredType.isNull()) + AdjustResultPriorityForPreferredType(R); + // Insert this result into the set of results. Results.push_back(R); } @@ -645,9 +794,11 @@ void ResultBuilder::ExitScope() { /// \brief Determines whether this given declaration will be found by /// ordinary name lookup. bool ResultBuilder::IsOrdinaryName(NamedDecl *ND) const { + ND = cast<NamedDecl>(ND->getUnderlyingDecl()); + unsigned IDNS = Decl::IDNS_Ordinary; if (SemaRef.getLangOptions().CPlusPlus) - IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace; + IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member; else if (SemaRef.getLangOptions().ObjC1 && isa<ObjCIvarDecl>(ND)) return true; @@ -655,14 +806,33 @@ bool ResultBuilder::IsOrdinaryName(NamedDecl *ND) const { } /// \brief Determines whether this given declaration will be found by +/// ordinary name lookup but is not a type name. +bool ResultBuilder::IsOrdinaryNonTypeName(NamedDecl *ND) const { + ND = cast<NamedDecl>(ND->getUnderlyingDecl()); + if (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND)) + return false; + + unsigned IDNS = Decl::IDNS_Ordinary; + if (SemaRef.getLangOptions().CPlusPlus) + IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member; + else if (SemaRef.getLangOptions().ObjC1 && isa<ObjCIvarDecl>(ND)) + return true; + + return ND->getIdentifierNamespace() & IDNS; +} + +/// \brief Determines whether this given declaration will be found by /// ordinary name lookup. bool ResultBuilder::IsOrdinaryNonValueName(NamedDecl *ND) const { + ND = cast<NamedDecl>(ND->getUnderlyingDecl()); + unsigned IDNS = Decl::IDNS_Ordinary; if (SemaRef.getLangOptions().CPlusPlus) IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace; return (ND->getIdentifierNamespace() & IDNS) && - !isa<ValueDecl>(ND) && !isa<FunctionTemplateDecl>(ND); + !isa<ValueDecl>(ND) && !isa<FunctionTemplateDecl>(ND) && + !isa<ObjCPropertyDecl>(ND); } /// \brief Determines whether the given declaration is suitable as the @@ -732,6 +902,49 @@ bool ResultBuilder::IsMember(NamedDecl *ND) const { isa<ObjCPropertyDecl>(ND); } +static bool isObjCReceiverType(ASTContext &C, QualType T) { + T = C.getCanonicalType(T); + switch (T->getTypeClass()) { + case Type::ObjCObject: + case Type::ObjCInterface: + case Type::ObjCObjectPointer: + return true; + + case Type::Builtin: + switch (cast<BuiltinType>(T)->getKind()) { + case BuiltinType::ObjCId: + case BuiltinType::ObjCClass: + case BuiltinType::ObjCSel: + return true; + + default: + break; + } + return false; + + default: + break; + } + + if (!C.getLangOptions().CPlusPlus) + return false; + + // FIXME: We could perform more analysis here to determine whether a + // particular class type has any conversions to Objective-C types. For now, + // just accept all class types. + return T->isDependentType() || T->isRecordType(); +} + +bool ResultBuilder::IsObjCMessageReceiver(NamedDecl *ND) const { + QualType T = getDeclUsageType(SemaRef.Context, ND); + if (T.isNull()) + return false; + + T = SemaRef.Context.getBaseElementType(T); + return isObjCReceiverType(SemaRef.Context, T); +} + + /// \rief Determines whether the given declaration is an Objective-C /// instance variable. bool ResultBuilder::IsObjCIvar(NamedDecl *ND) const { @@ -788,27 +1001,26 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts, Results.AddResult(Result("class", CCP_Type)); Results.AddResult(Result("wchar_t", CCP_Type)); - if (Results.includeCodePatterns()) { - // typename qualified-id - CodeCompletionString *Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("typename"); - Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("qualified-id"); - Results.AddResult(Result(Pattern)); - } + // typename qualified-id + CodeCompletionString *Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("typename"); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddPlaceholderChunk("qualifier"); + Pattern->AddTextChunk("::"); + Pattern->AddPlaceholderChunk("name"); + Results.AddResult(Result(Pattern)); if (LangOpts.CPlusPlus0x) { Results.AddResult(Result("auto", CCP_Type)); Results.AddResult(Result("char16_t", CCP_Type)); Results.AddResult(Result("char32_t", CCP_Type)); - if (Results.includeCodePatterns()) { - CodeCompletionString *Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("decltype"); - Pattern->AddChunk(CodeCompletionString::CK_LeftParen); - Pattern->AddPlaceholderChunk("expression-or-type"); - Pattern->AddChunk(CodeCompletionString::CK_RightParen); - Results.AddResult(Result(Pattern)); - } + + CodeCompletionString *Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("decltype"); + Pattern->AddChunk(CodeCompletionString::CK_LeftParen); + Pattern->AddPlaceholderChunk("expression"); + Pattern->AddChunk(CodeCompletionString::CK_RightParen); + Results.AddResult(Result(Pattern)); } } @@ -819,14 +1031,18 @@ static void AddTypeSpecifierResults(const LangOptions &LangOpts, // Results.AddResult(Result("_Decimal64")); // Results.AddResult(Result("_Decimal128")); - if (Results.includeCodePatterns()) { - CodeCompletionString *Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("typeof"); - Pattern->AddChunk(CodeCompletionString::CK_LeftParen); - Pattern->AddPlaceholderChunk("expression-or-type"); - Pattern->AddChunk(CodeCompletionString::CK_RightParen); - Results.AddResult(Result(Pattern)); - } + CodeCompletionString *Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("typeof"); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddPlaceholderChunk("expression"); + Results.AddResult(Result(Pattern)); + + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("typeof"); + Pattern->AddChunk(CodeCompletionString::CK_LeftParen); + Pattern->AddPlaceholderChunk("type"); + Pattern->AddChunk(CodeCompletionString::CK_RightParen); + Results.AddResult(Result(Pattern)); } } @@ -887,6 +1103,44 @@ static void AddObjCInterfaceResults(const LangOptions &LangOpts, bool NeedAt); static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt); +static void AddTypedefResult(ResultBuilder &Results) { + CodeCompletionString *Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("typedef"); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddPlaceholderChunk("type"); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddPlaceholderChunk("name"); + Results.AddResult(CodeCompleteConsumer::Result(Pattern)); +} + +static bool WantTypesInContext(Action::CodeCompletionContext CCC, + const LangOptions &LangOpts) { + if (LangOpts.CPlusPlus) + return true; + + switch (CCC) { + case Action::CCC_Namespace: + case Action::CCC_Class: + case Action::CCC_ObjCInstanceVariableList: + case Action::CCC_Template: + case Action::CCC_MemberTemplate: + case Action::CCC_Statement: + case Action::CCC_RecoveryInFunction: + return true; + + case Action::CCC_ObjCInterface: + case Action::CCC_ObjCImplementation: + case Action::CCC_Expression: + case Action::CCC_Condition: + return false; + + case Action::CCC_ForInit: + return LangOpts.ObjC1 || LangOpts.C99; + } + + return false; +} + /// \brief Add language constructs that show up for "ordinary" names. static void AddOrdinaryNameResults(Action::CodeCompletionContext CCC, Scope *S, @@ -895,25 +1149,29 @@ static void AddOrdinaryNameResults(Action::CodeCompletionContext CCC, typedef CodeCompleteConsumer::Result Result; switch (CCC) { case Action::CCC_Namespace: - if (SemaRef.getLangOptions().CPlusPlus && Results.includeCodePatterns()) { - // namespace <identifier> { } - CodeCompletionString *Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("namespace"); - Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("identifier"); - Pattern->AddChunk(CodeCompletionString::CK_LeftBrace); - Pattern->AddPlaceholderChunk("declarations"); - Pattern->AddChunk(CodeCompletionString::CK_VerticalSpace); - Pattern->AddChunk(CodeCompletionString::CK_RightBrace); - Results.AddResult(Result(Pattern)); - + if (SemaRef.getLangOptions().CPlusPlus) { + CodeCompletionString *Pattern = 0; + + if (Results.includeCodePatterns()) { + // namespace <identifier> { declarations } + CodeCompletionString *Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("namespace"); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddPlaceholderChunk("identifier"); + Pattern->AddChunk(CodeCompletionString::CK_LeftBrace); + Pattern->AddPlaceholderChunk("declarations"); + Pattern->AddChunk(CodeCompletionString::CK_VerticalSpace); + Pattern->AddChunk(CodeCompletionString::CK_RightBrace); + Results.AddResult(Result(Pattern)); + } + // namespace identifier = identifier ; Pattern = new CodeCompletionString; Pattern->AddTypedTextChunk("namespace"); Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("identifier"); + Pattern->AddPlaceholderChunk("name"); Pattern->AddChunk(CodeCompletionString::CK_Equal); - Pattern->AddPlaceholderChunk("identifier"); + Pattern->AddPlaceholderChunk("namespace"); Results.AddResult(Result(Pattern)); // Using directives @@ -933,43 +1191,49 @@ static void AddOrdinaryNameResults(Action::CodeCompletionContext CCC, Pattern->AddChunk(CodeCompletionString::CK_RightParen); Results.AddResult(Result(Pattern)); - // Explicit template instantiation - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("template"); - Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("declaration"); - Results.AddResult(Result(Pattern)); + if (Results.includeCodePatterns()) { + // Explicit template instantiation + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("template"); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddPlaceholderChunk("declaration"); + Results.AddResult(Result(Pattern)); + } } if (SemaRef.getLangOptions().ObjC1) AddObjCTopLevelResults(Results, true); + AddTypedefResult(Results); // Fall through case Action::CCC_Class: - if (Results.includeCodePatterns()) - Results.AddResult(Result("typedef")); - - if (SemaRef.getLangOptions().CPlusPlus && Results.includeCodePatterns()) { + if (SemaRef.getLangOptions().CPlusPlus) { // Using declaration CodeCompletionString *Pattern = new CodeCompletionString; Pattern->AddTypedTextChunk("using"); Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("qualified-id"); + Pattern->AddPlaceholderChunk("qualifier"); + Pattern->AddTextChunk("::"); + Pattern->AddPlaceholderChunk("name"); Results.AddResult(Result(Pattern)); - // using typename qualified-id; (only in a dependent context) + // using typename qualifier::name (only in a dependent context) if (SemaRef.CurContext->isDependentContext()) { Pattern = new CodeCompletionString; Pattern->AddTypedTextChunk("using"); Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); Pattern->AddTextChunk("typename"); Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("qualified-id"); + Pattern->AddPlaceholderChunk("qualifier"); + Pattern->AddTextChunk("::"); + Pattern->AddPlaceholderChunk("name"); Results.AddResult(Result(Pattern)); } if (CCC == Action::CCC_Class) { + AddTypedefResult(Results); + // public: Pattern = new CodeCompletionString; Pattern->AddTypedTextChunk("public"); @@ -1025,8 +1289,7 @@ static void AddOrdinaryNameResults(Action::CodeCompletionContext CCC, case Action::CCC_RecoveryInFunction: case Action::CCC_Statement: { - if (Results.includeCodePatterns()) - Results.AddResult(Result("typedef")); + AddTypedefResult(Results); CodeCompletionString *Pattern = 0; if (SemaRef.getLangOptions().CPlusPlus && Results.includeCodePatterns()) { @@ -1081,10 +1344,11 @@ static void AddOrdinaryNameResults(Action::CodeCompletionContext CCC, } // Switch-specific statements. - if (!SemaRef.getSwitchStack().empty() && Results.includeCodePatterns()) { + if (!SemaRef.getSwitchStack().empty()) { // case expression: Pattern = new CodeCompletionString; Pattern->AddTypedTextChunk("case"); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); Pattern->AddPlaceholderChunk("expression"); Pattern->AddChunk(CodeCompletionString::CK_Colon); Results.AddResult(Result(Pattern)); @@ -1178,23 +1442,21 @@ static void AddOrdinaryNameResults(Action::CodeCompletionContext CCC, } Results.AddResult(Result(Pattern)); - if (Results.includeCodePatterns()) { - // goto identifier ; - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("goto"); - Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("identifier"); - Results.AddResult(Result(Pattern)); + // goto identifier ; + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("goto"); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddPlaceholderChunk("label"); + Results.AddResult(Result(Pattern)); - // Using directives - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("using"); - Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddTextChunk("namespace"); - Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("identifier"); - Results.AddResult(Result(Pattern)); - } + // Using directives + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("using"); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddTextChunk("namespace"); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddPlaceholderChunk("identifier"); + Results.AddResult(Result(Pattern)); } // Fall through (for statement expressions). @@ -1215,132 +1477,133 @@ static void AddOrdinaryNameResults(Action::CodeCompletionContext CCC, Results.AddResult(Result("true")); Results.AddResult(Result("false")); - if (Results.includeCodePatterns()) { - // dynamic_cast < type-id > ( expression ) - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("dynamic_cast"); - Pattern->AddChunk(CodeCompletionString::CK_LeftAngle); - Pattern->AddPlaceholderChunk("type-id"); - Pattern->AddChunk(CodeCompletionString::CK_RightAngle); - Pattern->AddChunk(CodeCompletionString::CK_LeftParen); - Pattern->AddPlaceholderChunk("expression"); - Pattern->AddChunk(CodeCompletionString::CK_RightParen); - Results.AddResult(Result(Pattern)); - - // static_cast < type-id > ( expression ) - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("static_cast"); - Pattern->AddChunk(CodeCompletionString::CK_LeftAngle); - Pattern->AddPlaceholderChunk("type-id"); - Pattern->AddChunk(CodeCompletionString::CK_RightAngle); - Pattern->AddChunk(CodeCompletionString::CK_LeftParen); - Pattern->AddPlaceholderChunk("expression"); - Pattern->AddChunk(CodeCompletionString::CK_RightParen); - Results.AddResult(Result(Pattern)); + // dynamic_cast < type-id > ( expression ) + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("dynamic_cast"); + Pattern->AddChunk(CodeCompletionString::CK_LeftAngle); + Pattern->AddPlaceholderChunk("type"); + Pattern->AddChunk(CodeCompletionString::CK_RightAngle); + Pattern->AddChunk(CodeCompletionString::CK_LeftParen); + Pattern->AddPlaceholderChunk("expression"); + Pattern->AddChunk(CodeCompletionString::CK_RightParen); + Results.AddResult(Result(Pattern)); + + // static_cast < type-id > ( expression ) + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("static_cast"); + Pattern->AddChunk(CodeCompletionString::CK_LeftAngle); + Pattern->AddPlaceholderChunk("type"); + Pattern->AddChunk(CodeCompletionString::CK_RightAngle); + Pattern->AddChunk(CodeCompletionString::CK_LeftParen); + Pattern->AddPlaceholderChunk("expression"); + Pattern->AddChunk(CodeCompletionString::CK_RightParen); + Results.AddResult(Result(Pattern)); - // reinterpret_cast < type-id > ( expression ) - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("reinterpret_cast"); - Pattern->AddChunk(CodeCompletionString::CK_LeftAngle); - Pattern->AddPlaceholderChunk("type-id"); - Pattern->AddChunk(CodeCompletionString::CK_RightAngle); - Pattern->AddChunk(CodeCompletionString::CK_LeftParen); - Pattern->AddPlaceholderChunk("expression"); - Pattern->AddChunk(CodeCompletionString::CK_RightParen); - Results.AddResult(Result(Pattern)); + // reinterpret_cast < type-id > ( expression ) + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("reinterpret_cast"); + Pattern->AddChunk(CodeCompletionString::CK_LeftAngle); + Pattern->AddPlaceholderChunk("type"); + Pattern->AddChunk(CodeCompletionString::CK_RightAngle); + Pattern->AddChunk(CodeCompletionString::CK_LeftParen); + Pattern->AddPlaceholderChunk("expression"); + Pattern->AddChunk(CodeCompletionString::CK_RightParen); + Results.AddResult(Result(Pattern)); - // const_cast < type-id > ( expression ) - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("const_cast"); - Pattern->AddChunk(CodeCompletionString::CK_LeftAngle); - Pattern->AddPlaceholderChunk("type-id"); - Pattern->AddChunk(CodeCompletionString::CK_RightAngle); - Pattern->AddChunk(CodeCompletionString::CK_LeftParen); - Pattern->AddPlaceholderChunk("expression"); - Pattern->AddChunk(CodeCompletionString::CK_RightParen); - Results.AddResult(Result(Pattern)); + // const_cast < type-id > ( expression ) + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("const_cast"); + Pattern->AddChunk(CodeCompletionString::CK_LeftAngle); + Pattern->AddPlaceholderChunk("type"); + Pattern->AddChunk(CodeCompletionString::CK_RightAngle); + Pattern->AddChunk(CodeCompletionString::CK_LeftParen); + Pattern->AddPlaceholderChunk("expression"); + Pattern->AddChunk(CodeCompletionString::CK_RightParen); + Results.AddResult(Result(Pattern)); - // typeid ( expression-or-type ) - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("typeid"); - Pattern->AddChunk(CodeCompletionString::CK_LeftParen); - Pattern->AddPlaceholderChunk("expression-or-type"); - Pattern->AddChunk(CodeCompletionString::CK_RightParen); - Results.AddResult(Result(Pattern)); + // typeid ( expression-or-type ) + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("typeid"); + Pattern->AddChunk(CodeCompletionString::CK_LeftParen); + Pattern->AddPlaceholderChunk("expression-or-type"); + Pattern->AddChunk(CodeCompletionString::CK_RightParen); + Results.AddResult(Result(Pattern)); - // new T ( ... ) - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("new"); - Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("type-id"); - Pattern->AddChunk(CodeCompletionString::CK_LeftParen); - Pattern->AddPlaceholderChunk("expressions"); - Pattern->AddChunk(CodeCompletionString::CK_RightParen); - Results.AddResult(Result(Pattern)); + // new T ( ... ) + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("new"); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddPlaceholderChunk("type"); + Pattern->AddChunk(CodeCompletionString::CK_LeftParen); + Pattern->AddPlaceholderChunk("expressions"); + Pattern->AddChunk(CodeCompletionString::CK_RightParen); + Results.AddResult(Result(Pattern)); - // new T [ ] ( ... ) - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("new"); - Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("type-id"); - Pattern->AddChunk(CodeCompletionString::CK_LeftBracket); - Pattern->AddPlaceholderChunk("size"); - Pattern->AddChunk(CodeCompletionString::CK_RightBracket); - Pattern->AddChunk(CodeCompletionString::CK_LeftParen); - Pattern->AddPlaceholderChunk("expressions"); - Pattern->AddChunk(CodeCompletionString::CK_RightParen); - Results.AddResult(Result(Pattern)); - - // delete expression - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("delete"); - Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("expression"); - Results.AddResult(Result(Pattern)); + // new T [ ] ( ... ) + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("new"); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddPlaceholderChunk("type"); + Pattern->AddChunk(CodeCompletionString::CK_LeftBracket); + Pattern->AddPlaceholderChunk("size"); + Pattern->AddChunk(CodeCompletionString::CK_RightBracket); + Pattern->AddChunk(CodeCompletionString::CK_LeftParen); + Pattern->AddPlaceholderChunk("expressions"); + Pattern->AddChunk(CodeCompletionString::CK_RightParen); + Results.AddResult(Result(Pattern)); - // delete [] expression - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("delete"); - Pattern->AddChunk(CodeCompletionString::CK_LeftBracket); - Pattern->AddChunk(CodeCompletionString::CK_RightBracket); - Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("expression"); - Results.AddResult(Result(Pattern)); + // delete expression + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("delete"); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddPlaceholderChunk("expression"); + Results.AddResult(Result(Pattern)); - // throw expression - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("throw"); - Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("expression"); - Results.AddResult(Result(Pattern)); - } + // delete [] expression + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("delete"); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddChunk(CodeCompletionString::CK_LeftBracket); + Pattern->AddChunk(CodeCompletionString::CK_RightBracket); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddPlaceholderChunk("expression"); + Results.AddResult(Result(Pattern)); + + // throw expression + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("throw"); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddPlaceholderChunk("expression"); + Results.AddResult(Result(Pattern)); // FIXME: Rethrow? } if (SemaRef.getLangOptions().ObjC1) { // Add "super", if we're in an Objective-C class with a superclass. - if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl()) - if (Method->getClassInterface()->getSuperClass()) - Results.AddResult(Result("super")); - + if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl()) { + // The interface can be NULL. + if (ObjCInterfaceDecl *ID = Method->getClassInterface()) + if (ID->getSuperClass()) + Results.AddResult(Result("super")); + } + AddObjCExpressionResults(Results, true); } - if (Results.includeCodePatterns()) { - // sizeof expression - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk("sizeof"); - Pattern->AddChunk(CodeCompletionString::CK_LeftParen); - Pattern->AddPlaceholderChunk("expression-or-type"); - Pattern->AddChunk(CodeCompletionString::CK_RightParen); - Results.AddResult(Result(Pattern)); - } + // sizeof expression + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk("sizeof"); + Pattern->AddChunk(CodeCompletionString::CK_LeftParen); + Pattern->AddPlaceholderChunk("expression-or-type"); + Pattern->AddChunk(CodeCompletionString::CK_RightParen); + Results.AddResult(Result(Pattern)); break; } } - AddTypeSpecifierResults(SemaRef.getLangOptions(), Results); + if (WantTypesInContext(CCC, SemaRef.getLangOptions())) + AddTypeSpecifierResults(SemaRef.getLangOptions(), Results); if (SemaRef.getLangOptions().CPlusPlus) Results.AddResult(Result("operator")); @@ -1702,9 +1965,9 @@ CodeCompleteConsumer::Result::CreateCodeCompletionString(Sema &S) { if (IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Idx)) Keyword += II->getName().str(); Keyword += ":"; - if (Idx < StartParameter || AllParametersAreInformative) { + if (Idx < StartParameter || AllParametersAreInformative) Result->AddInformativeChunk(Keyword); - } else if (Idx == StartParameter) + else if (Idx == StartParameter) Result->AddTypedTextChunk(Keyword); else Result->AddTextChunk(Keyword); @@ -1719,14 +1982,18 @@ CodeCompleteConsumer::Result::CreateCodeCompletionString(Sema &S) { Arg = "(" + Arg + ")"; if (IdentifierInfo *II = (*P)->getIdentifier()) Arg += II->getName().str(); - if (AllParametersAreInformative) + if (DeclaringEntity) + Result->AddTextChunk(Arg); + else if (AllParametersAreInformative) Result->AddInformativeChunk(Arg); else Result->AddPlaceholderChunk(Arg); } if (Method->isVariadic()) { - if (AllParametersAreInformative) + if (DeclaringEntity) + Result->AddTextChunk(", ..."); + else if (AllParametersAreInformative) Result->AddInformativeChunk(", ..."); else Result->AddPlaceholderChunk(", ..."); @@ -1921,12 +2188,25 @@ namespace { }; } -static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results) { +static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results, + bool TargetTypeIsPointer = false) { + typedef CodeCompleteConsumer::Result Result; + Results.EnterNewScope(); for (Preprocessor::macro_iterator M = PP.macro_begin(), MEnd = PP.macro_end(); - M != MEnd; ++M) - Results.AddResult(M->first); + M != MEnd; ++M) { + unsigned Priority = CCP_Macro; + + // Treat the "nil" and "NULL" macros as null pointer constants. + if (M->first->isStr("nil") || M->first->isStr("NULL")) { + Priority = CCP_Constant; + if (TargetTypeIsPointer) + Priority = Priority / CCF_SimilarTypeMatch; + } + + Results.AddResult(Result(M->first, Priority)); + } Results.ExitScope(); } @@ -1966,7 +2246,10 @@ void Sema::CodeCompleteOrdinaryName(Scope *S, case CCC_Statement: case CCC_ForInit: case CCC_Condition: - Results.setFilter(&ResultBuilder::IsOrdinaryName); + if (WantTypesInContext(CompletionContext, getLangOptions())) + Results.setFilter(&ResultBuilder::IsOrdinaryName); + else + Results.setFilter(&ResultBuilder::IsOrdinaryNonTypeName); break; case CCC_RecoveryInFunction: @@ -1986,6 +2269,36 @@ void Sema::CodeCompleteOrdinaryName(Scope *S, HandleCodeCompleteResults(this, CodeCompleter, Results.data(),Results.size()); } +/// \brief Perform code-completion in an expression context when we know what +/// type we're looking for. +void Sema::CodeCompleteExpression(Scope *S, QualType T) { + typedef CodeCompleteConsumer::Result Result; + ResultBuilder Results(*this); + + if (WantTypesInContext(CCC_Expression, getLangOptions())) + Results.setFilter(&ResultBuilder::IsOrdinaryName); + else + Results.setFilter(&ResultBuilder::IsOrdinaryNonTypeName); + Results.setPreferredType(T.getNonReferenceType()); + + CodeCompletionDeclConsumer Consumer(Results, CurContext); + LookupVisibleDecls(S, LookupOrdinaryName, Consumer); + + Results.EnterNewScope(); + AddOrdinaryNameResults(CCC_Expression, S, *this, Results); + Results.ExitScope(); + + bool PreferredTypeIsPointer = false; + if (!T.isNull()) + PreferredTypeIsPointer = T->isAnyPointerType() || + T->isMemberPointerType() || T->isBlockPointerType(); + + if (CodeCompleter->includeMacros()) + AddMacroResults(PP, Results, PreferredTypeIsPointer); + HandleCodeCompleteResults(this, CodeCompleter, Results.data(),Results.size()); +} + + static void AddObjCProperties(ObjCContainerDecl *Container, bool AllowCategories, DeclContext *CurContext, @@ -2254,6 +2567,17 @@ namespace { }; } +static bool anyNullArguments(Expr **Args, unsigned NumArgs) { + if (NumArgs && !Args) + return true; + + for (unsigned I = 0; I != NumArgs; ++I) + if (!Args[I]) + return true; + + return false; +} + void Sema::CodeCompleteCall(Scope *S, ExprTy *FnIn, ExprTy **ArgsIn, unsigned NumArgs) { if (!CodeCompleter) @@ -2268,7 +2592,7 @@ void Sema::CodeCompleteCall(Scope *S, ExprTy *FnIn, Expr **Args = (Expr **)ArgsIn; // Ignore type-dependent call expressions entirely. - if (Fn->isTypeDependent() || + if (!Fn || Fn->isTypeDependent() || anyNullArguments(Args, NumArgs) || Expr::hasAnyTypeDependentArguments(Args, NumArgs)) { CodeCompleteOrdinaryName(S, CCC_Expression); return; @@ -2292,7 +2616,8 @@ void Sema::CodeCompleteCall(Scope *S, ExprTy *FnIn, else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(NakedFn)) { FunctionDecl *FDecl = dyn_cast<FunctionDecl>(DRE->getDecl()); if (FDecl) { - if (!FDecl->getType()->getAs<FunctionProtoType>()) + if (!getLangOptions().CPlusPlus || + !FDecl->getType()->getAs<FunctionProtoType>()) Results.push_back(ResultCandidate(FDecl)); else // FIXME: access? @@ -2302,6 +2627,8 @@ void Sema::CodeCompleteCall(Scope *S, ExprTy *FnIn, } } + QualType ParamType; + if (!CandidateSet.empty()) { // Sort the overload candidate set by placing the best overloads first. std::stable_sort(CandidateSet.begin(), CandidateSet.end(), @@ -2314,14 +2641,85 @@ void Sema::CodeCompleteCall(Scope *S, ExprTy *FnIn, if (Cand->Viable) Results.push_back(ResultCandidate(Cand->Function)); } + + // From the viable candidates, try to determine the type of this parameter. + for (unsigned I = 0, N = Results.size(); I != N; ++I) { + if (const FunctionType *FType = Results[I].getFunctionType()) + if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FType)) + if (NumArgs < Proto->getNumArgs()) { + if (ParamType.isNull()) + ParamType = Proto->getArgType(NumArgs); + else if (!Context.hasSameUnqualifiedType( + ParamType.getNonReferenceType(), + Proto->getArgType(NumArgs).getNonReferenceType())) { + ParamType = QualType(); + break; + } + } + } + } else { + // Try to determine the parameter type from the type of the expression + // being called. + QualType FunctionType = Fn->getType(); + if (const PointerType *Ptr = FunctionType->getAs<PointerType>()) + FunctionType = Ptr->getPointeeType(); + else if (const BlockPointerType *BlockPtr + = FunctionType->getAs<BlockPointerType>()) + FunctionType = BlockPtr->getPointeeType(); + else if (const MemberPointerType *MemPtr + = FunctionType->getAs<MemberPointerType>()) + FunctionType = MemPtr->getPointeeType(); + + if (const FunctionProtoType *Proto + = FunctionType->getAs<FunctionProtoType>()) { + if (NumArgs < Proto->getNumArgs()) + ParamType = Proto->getArgType(NumArgs); + } } - CodeCompleteOrdinaryName(S, CCC_Expression); + if (ParamType.isNull()) + CodeCompleteOrdinaryName(S, CCC_Expression); + else + CodeCompleteExpression(S, ParamType); + if (!Results.empty()) CodeCompleter->ProcessOverloadCandidates(*this, NumArgs, Results.data(), Results.size()); } +void Sema::CodeCompleteInitializer(Scope *S, DeclPtrTy D) { + ValueDecl *VD = dyn_cast_or_null<ValueDecl>(D.getAs<Decl>()); + if (!VD) { + CodeCompleteOrdinaryName(S, CCC_Expression); + return; + } + + CodeCompleteExpression(S, VD->getType()); +} + +void Sema::CodeCompleteReturn(Scope *S) { + QualType ResultType; + if (isa<BlockDecl>(CurContext)) { + if (BlockScopeInfo *BSI = getCurBlock()) + ResultType = BSI->ReturnType; + } else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(CurContext)) + ResultType = Function->getResultType(); + else if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(CurContext)) + ResultType = Method->getResultType(); + + if (ResultType.isNull()) + CodeCompleteOrdinaryName(S, CCC_Expression); + else + CodeCompleteExpression(S, ResultType); +} + +void Sema::CodeCompleteAssignmentRHS(Scope *S, ExprTy *LHS) { + if (LHS) + CodeCompleteExpression(S, static_cast<Expr *>(LHS)->getType()); + else + CodeCompleteOrdinaryName(S, CCC_Expression); +} + void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext) { if (!SS.getScopeRep() || !CodeCompleter) @@ -2460,9 +2858,6 @@ void Sema::CodeCompleteOperatorName(Scope *S) { static void AddObjCImplementationResults(const LangOptions &LangOpts, ResultBuilder &Results, bool NeedAt) { - if (!Results.includeCodePatterns()) - return; - typedef CodeCompleteConsumer::Result Result; // Since we have an implementation, we can end it. Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,end))); @@ -2488,9 +2883,6 @@ static void AddObjCImplementationResults(const LangOptions &LangOpts, static void AddObjCInterfaceResults(const LangOptions &LangOpts, ResultBuilder &Results, bool NeedAt) { - if (!Results.includeCodePatterns()) - return; - typedef CodeCompleteConsumer::Result Result; // Since we have an interface or protocol, we can end it. @@ -2509,9 +2901,6 @@ static void AddObjCInterfaceResults(const LangOptions &LangOpts, } static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) { - if (!Results.includeCodePatterns()) - return; - typedef CodeCompleteConsumer::Result Result; CodeCompletionString *Pattern = 0; @@ -2519,31 +2908,33 @@ static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) { Pattern = new CodeCompletionString; Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,class)); Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("identifier"); - Results.AddResult(Result(Pattern)); - - // @interface name - // FIXME: Could introduce the whole pattern, including superclasses and - // such. - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,interface)); - Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("class"); + Pattern->AddPlaceholderChunk("name"); Results.AddResult(Result(Pattern)); - // @protocol name - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,protocol)); - Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("protocol"); - Results.AddResult(Result(Pattern)); + if (Results.includeCodePatterns()) { + // @interface name + // FIXME: Could introduce the whole pattern, including superclasses and + // such. + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,interface)); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddPlaceholderChunk("class"); + Results.AddResult(Result(Pattern)); - // @implementation name - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,implementation)); - Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddPlaceholderChunk("class"); - Results.AddResult(Result(Pattern)); + // @protocol name + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,protocol)); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddPlaceholderChunk("protocol"); + Results.AddResult(Result(Pattern)); + + // @implementation name + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,implementation)); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddPlaceholderChunk("class"); + Results.AddResult(Result(Pattern)); + } // @compatibility_alias name Pattern = new CodeCompletionString; @@ -2571,9 +2962,6 @@ void Sema::CodeCompleteObjCAtDirective(Scope *S, DeclPtrTy ObjCImpDecl, } static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) { - if (!Results.includeCodePatterns()) - return; - typedef CodeCompleteConsumer::Result Result; CodeCompletionString *Pattern = 0; @@ -2603,31 +2991,30 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) { } static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) { - if (!Results.includeCodePatterns()) - return; - typedef CodeCompleteConsumer::Result Result; CodeCompletionString *Pattern = 0; - // @try { statements } @catch ( declaration ) { statements } @finally - // { statements } - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,try)); - Pattern->AddChunk(CodeCompletionString::CK_LeftBrace); - Pattern->AddPlaceholderChunk("statements"); - Pattern->AddChunk(CodeCompletionString::CK_RightBrace); - Pattern->AddTextChunk("@catch"); - Pattern->AddChunk(CodeCompletionString::CK_LeftParen); - Pattern->AddPlaceholderChunk("parameter"); - Pattern->AddChunk(CodeCompletionString::CK_RightParen); - Pattern->AddChunk(CodeCompletionString::CK_LeftBrace); - Pattern->AddPlaceholderChunk("statements"); - Pattern->AddChunk(CodeCompletionString::CK_RightBrace); - Pattern->AddTextChunk("@finally"); - Pattern->AddChunk(CodeCompletionString::CK_LeftBrace); - Pattern->AddPlaceholderChunk("statements"); - Pattern->AddChunk(CodeCompletionString::CK_RightBrace); - Results.AddResult(Result(Pattern)); + if (Results.includeCodePatterns()) { + // @try { statements } @catch ( declaration ) { statements } @finally + // { statements } + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,try)); + Pattern->AddChunk(CodeCompletionString::CK_LeftBrace); + Pattern->AddPlaceholderChunk("statements"); + Pattern->AddChunk(CodeCompletionString::CK_RightBrace); + Pattern->AddTextChunk("@catch"); + Pattern->AddChunk(CodeCompletionString::CK_LeftParen); + Pattern->AddPlaceholderChunk("parameter"); + Pattern->AddChunk(CodeCompletionString::CK_RightParen); + Pattern->AddChunk(CodeCompletionString::CK_LeftBrace); + Pattern->AddPlaceholderChunk("statements"); + Pattern->AddChunk(CodeCompletionString::CK_RightBrace); + Pattern->AddTextChunk("@finally"); + Pattern->AddChunk(CodeCompletionString::CK_LeftBrace); + Pattern->AddPlaceholderChunk("statements"); + Pattern->AddChunk(CodeCompletionString::CK_RightBrace); + Results.AddResult(Result(Pattern)); + } // @throw Pattern = new CodeCompletionString; @@ -2636,25 +3023,24 @@ static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) { Pattern->AddPlaceholderChunk("expression"); Results.AddResult(Result(Pattern)); - // @synchronized ( expression ) { statements } - Pattern = new CodeCompletionString; - Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,synchronized)); - Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); - Pattern->AddChunk(CodeCompletionString::CK_LeftParen); - Pattern->AddPlaceholderChunk("expression"); - Pattern->AddChunk(CodeCompletionString::CK_RightParen); - Pattern->AddChunk(CodeCompletionString::CK_LeftBrace); - Pattern->AddPlaceholderChunk("statements"); - Pattern->AddChunk(CodeCompletionString::CK_RightBrace); - Results.AddResult(Result(Pattern)); + if (Results.includeCodePatterns()) { + // @synchronized ( expression ) { statements } + Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,synchronized)); + Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); + Pattern->AddChunk(CodeCompletionString::CK_LeftParen); + Pattern->AddPlaceholderChunk("expression"); + Pattern->AddChunk(CodeCompletionString::CK_RightParen); + Pattern->AddChunk(CodeCompletionString::CK_LeftBrace); + Pattern->AddPlaceholderChunk("statements"); + Pattern->AddChunk(CodeCompletionString::CK_RightBrace); + Results.AddResult(Result(Pattern)); + } } static void AddObjCVisibilityResults(const LangOptions &LangOpts, ResultBuilder &Results, bool NeedAt) { - if (!Results.includeCodePatterns()) - return; - typedef CodeCompleteConsumer::Result Result; Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,private))); Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,protected))); @@ -3021,6 +3407,31 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) { .Default(0); } +void Sema::CodeCompleteObjCMessageReceiver(Scope *S) { + typedef CodeCompleteConsumer::Result Result; + ResultBuilder Results(*this); + + // Find anything that looks like it could be a message receiver. + Results.setFilter(&ResultBuilder::IsObjCMessageReceiver); + CodeCompletionDeclConsumer Consumer(Results, CurContext); + Results.EnterNewScope(); + LookupVisibleDecls(S, LookupOrdinaryName, Consumer); + + // If we are in an Objective-C method inside a class that has a superclass, + // add "super" as an option. + if (ObjCMethodDecl *Method = getCurMethodDecl()) + if (ObjCInterfaceDecl *Iface = Method->getClassInterface()) + if (Iface->getSuperClass()) + Results.AddResult(Result("super")); + + Results.ExitScope(); + + if (CodeCompleter->includeMacros()) + AddMacroResults(PP, Results); + HandleCodeCompleteResults(this, CodeCompleter, Results.data(),Results.size()); + +} + void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, IdentifierInfo **SelIdents, unsigned NumSelIdents) { @@ -3113,9 +3524,9 @@ void Sema::CodeCompleteObjCClassMessage(Scope *S, TypeTy *Receiver, // If we have an external source, load the entire class method // pool from the PCH file. if (ExternalSource) { - for (uint32_t I = 0, N = ExternalSource->GetNumKnownSelectors(); I != N; - ++I) { - Selector Sel = ExternalSource->GetSelector(I); + for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors(); + I != N; ++I) { + Selector Sel = ExternalSource->GetExternalSelector(I); if (Sel.isNull() || FactoryMethodPool.count(Sel) || InstanceMethodPool.count(Sel)) continue; @@ -3214,9 +3625,9 @@ void Sema::CodeCompleteObjCInstanceMessage(Scope *S, ExprTy *Receiver, // If we have an external source, load the entire class method // pool from the PCH file. if (ExternalSource) { - for (uint32_t I = 0, N = ExternalSource->GetNumKnownSelectors(); I != N; - ++I) { - Selector Sel = ExternalSource->GetSelector(I); + for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors(); + I != N; ++I) { + Selector Sel = ExternalSource->GetExternalSelector(I); if (Sel.isNull() || InstanceMethodPool.count(Sel) || FactoryMethodPool.count(Sel)) continue; @@ -3550,14 +3961,11 @@ static void FindImplementableMethods(ASTContext &Context, // Add methods from any class extensions (but not from categories; // those should go into category implementations). - for (ObjCCategoryDecl *Cat = IFace->getCategoryList(); Cat; - Cat = Cat->getNextClassCategory()) { - if (!Cat->IsClassExtension()) - continue; - - FindImplementableMethods(Context, Cat, WantInstanceMethods, ReturnType, + for (const ObjCCategoryDecl *Cat = IFace->getFirstClassExtension(); Cat; + Cat = Cat->getNextClassExtension()) + FindImplementableMethods(Context, const_cast<ObjCCategoryDecl*>(Cat), + WantInstanceMethods, ReturnType, IsInImplementation, KnownMethods); - } } if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(Container)) { @@ -3714,7 +4122,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Pattern->AddTextChunk("..."); } - if (IsInImplementation) { + if (IsInImplementation && Results.includeCodePatterns()) { // We will be defining the method here, so add a compound statement. Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace); Pattern->AddChunk(CodeCompletionString::CK_LeftBrace); @@ -3739,3 +4147,70 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, HandleCodeCompleteResults(this, CodeCompleter, Results.data(),Results.size()); } + +void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S, + bool IsInstanceMethod, + bool AtParameterName, + TypeTy *ReturnTy, + IdentifierInfo **SelIdents, + unsigned NumSelIdents) { + llvm::DenseMap<Selector, ObjCMethodList> &Pool + = IsInstanceMethod? InstanceMethodPool : FactoryMethodPool; + + // If we have an external source, load the entire class method + // pool from the PCH file. + if (ExternalSource) { + for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors(); + I != N; ++I) { + Selector Sel = ExternalSource->GetExternalSelector(I); + if (Sel.isNull() || InstanceMethodPool.count(Sel) || + FactoryMethodPool.count(Sel)) + continue; + + ReadMethodPool(Sel, IsInstanceMethod); + } + } + + // Build the set of methods we can see. + typedef CodeCompleteConsumer::Result Result; + ResultBuilder Results(*this); + + if (ReturnTy) + Results.setPreferredType(GetTypeFromParser(ReturnTy).getNonReferenceType()); + + Results.EnterNewScope(); + for (llvm::DenseMap<Selector, ObjCMethodList>::iterator M = Pool.begin(), + MEnd = Pool.end(); + M != MEnd; + ++M) { + for (ObjCMethodList *MethList = &M->second; MethList && MethList->Method; + MethList = MethList->Next) { + if (!isAcceptableObjCMethod(MethList->Method, MK_Any, SelIdents, + NumSelIdents)) + continue; + + if (AtParameterName) { + // Suggest parameter names we've seen before. + if (NumSelIdents && NumSelIdents <= MethList->Method->param_size()) { + ParmVarDecl *Param = MethList->Method->param_begin()[NumSelIdents-1]; + if (Param->getIdentifier()) { + CodeCompletionString *Pattern = new CodeCompletionString; + Pattern->AddTypedTextChunk(Param->getIdentifier()->getName()); + Results.AddResult(Pattern); + } + } + + continue; + } + + Result R(MethList->Method, 0); + R.StartParameter = NumSelIdents; + R.AllParametersAreInformative = false; + R.DeclaringEntity = true; + Results.MaybeAddResult(R, CurContext); + } + } + + Results.ExitScope(); + HandleCodeCompleteResults(this, CodeCompleter, Results.data(),Results.size()); +} diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp index af02099..c1c898f 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp @@ -87,8 +87,8 @@ Sema::TypeTy *Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc, if (!isClassName) return 0; - // We know from the grammar that this name refers to a type, so build a - // DependentNameType node to describe the type. + // We know from the grammar that this name refers to a type, + // so build a dependent node to describe the type. return CheckTypenameType(ETK_None, (NestedNameSpecifier *)SS->getScopeRep(), II, SourceLocation(), SS->getRange(), NameLoc @@ -196,12 +196,6 @@ Sema::TypeTy *Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc, } else if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(IIDecl)) { T = Context.getObjCInterfaceType(IDecl); - } else if (UnresolvedUsingTypenameDecl *UUDecl = - dyn_cast<UnresolvedUsingTypenameDecl>(IIDecl)) { - // FIXME: preserve source structure information. - T = Context.getDependentNameType(ETK_None, - UUDecl->getTargetNestedNameSpecifier(), - &II); } else { // If it's not plausibly a type, suppress diagnostics. Result.suppressDiagnostics(); @@ -313,7 +307,7 @@ bool Sema::DiagnoseUnknownTypeName(const IdentifierInfo &II, << (NestedNameSpecifier *)SS->getScopeRep() << II.getName() << SourceRange(SS->getRange().getBegin(), IILoc) << FixItHint::CreateInsertion(SS->getRange().getBegin(), "typename "); - SuggestedType = ActOnTypenameType(SourceLocation(), *SS, II, IILoc).get(); + SuggestedType = ActOnTypenameType(S, SourceLocation(), *SS, II, IILoc).get(); } else { assert(SS && SS->isInvalid() && "Invalid scope specifier has already been diagnosed"); @@ -959,7 +953,7 @@ Sema::CXXSpecialMember Sema::getSpecialMember(const CXXMethodDecl *MD) { return Sema::CXXCopyAssignment; } -/// canREdefineFunction - checks if a function can be redefined. Currently, +/// canRedefineFunction - checks if a function can be redefined. Currently, /// only extern inline functions can be redefined, and even then only in /// GNU89 mode. static bool canRedefineFunction(const FunctionDecl *FD, @@ -1063,13 +1057,27 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) { } // FIXME: diagnose the other way around? - if (OldType->getNoReturnAttr() && - !NewType->getNoReturnAttr()) { + if (OldType->getNoReturnAttr() && !NewType->getNoReturnAttr()) { NewQType = Context.getNoReturnType(NewQType); New->setType(NewQType); assert(NewQType.isCanonical()); } + // Merge regparm attribute. + if (OldType->getRegParmType() != NewType->getRegParmType()) { + if (NewType->getRegParmType()) { + Diag(New->getLocation(), diag::err_regparm_mismatch) + << NewType->getRegParmType() + << OldType->getRegParmType(); + Diag(Old->getLocation(), diag::note_previous_declaration); + return true; + } + + NewQType = Context.getRegParmType(NewQType, OldType->getRegParmType()); + New->setType(NewQType); + assert(NewQType.isCanonical()); + } + if (getLangOptions().CPlusPlus) { // (C++98 13.1p2): // Certain function declarations cannot be overloaded: @@ -1446,6 +1454,17 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) { New->setInvalidDecl(); return; } + // c99 6.2.2 P4. + // For an identifier declared with the storage-class specifier extern in a + // scope in which a prior declaration of that identifier is visible, if + // the prior declaration specifies internal or external linkage, the linkage + // of the identifier at the later declaration is the same as the linkage + // specified at the prior declaration. + // FIXME. revisit this code. + if (New->hasExternalStorage() && + Old->getLinkage() == InternalLinkage && + New->getDeclContext() == Old->getDeclContext()) + New->setStorageClass(Old->getStorageClass()); // Keep a chain of previous declarations. New->setPreviousDeclaration(Old); @@ -1520,6 +1539,14 @@ Sema::DeclPtrTy Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, return DeclPtrTy::make(Tag); } + if (getLangOptions().CPlusPlus && + DS.getStorageClassSpec() != DeclSpec::SCS_typedef) + if (EnumDecl *Enum = dyn_cast_or_null<EnumDecl>(Tag)) + if (Enum->enumerator_begin() == Enum->enumerator_end() && + !Enum->getIdentifier() && !Enum->isInvalidDecl()) + Diag(Enum->getLocation(), diag::ext_no_declarators) + << DS.getSourceRange(); + if (!DS.isMissingDeclaratorOk() && DS.getTypeSpecType() != DeclSpec::TST_error) { // Warn about typedefs of enums without names, since this is an @@ -1770,6 +1797,8 @@ Sema::DeclPtrTy Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, << (int)Record->isUnion(); Invalid = true; } + } else if (isa<AccessSpecDecl>(*Mem)) { + // Any access specifier is fine. } else { // We have something that isn't a non-static data // member. Complain about it. @@ -1795,8 +1824,7 @@ Sema::DeclPtrTy Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, // Mock up a declarator. Declarator Dc(DS, Declarator::TypeNameContext); - TypeSourceInfo *TInfo = 0; - GetTypeForDeclarator(Dc, S, &TInfo); + TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S); assert(TInfo && "couldn't build declarator info for anonymous struct/union"); // Create a declaration for this anonymous struct/union. @@ -2091,8 +2119,8 @@ Sema::HandleDeclarator(Scope *S, Declarator &D, NamedDecl *New; - TypeSourceInfo *TInfo = 0; - QualType R = GetTypeForDeclarator(D, S, &TInfo); + TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); + QualType R = TInfo->getType(); LookupResult Previous(*this, Name, D.getIdentifierLoc(), LookupOrdinaryName, ForRedeclaration); @@ -2342,6 +2370,12 @@ Sema::ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, if (D.getDeclSpec().isThreadSpecified()) Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_invalid_thread); + if (D.getName().Kind != UnqualifiedId::IK_Identifier) { + Diag(D.getName().StartLocation, diag::err_typedef_not_identifier) + << D.getName().getSourceRange(); + return 0; + } + TypedefDecl *NewTD = ParseTypedefDecl(S, D, R, TInfo); if (!NewTD) return 0; @@ -2537,6 +2571,8 @@ Sema::ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC, // Match up the template parameter lists with the scope specifier, then // determine whether we have a template or a template specialization. bool isExplicitSpecialization = false; + unsigned NumMatchedTemplateParamLists = TemplateParamLists.size(); + bool Invalid = false; if (TemplateParameterList *TemplateParams = MatchTemplateParametersToScopeSpecifier( D.getDeclSpec().getSourceRange().getBegin(), @@ -2544,7 +2580,11 @@ Sema::ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC, (TemplateParameterList**)TemplateParamLists.get(), TemplateParamLists.size(), /*never a friend*/ false, - isExplicitSpecialization)) { + isExplicitSpecialization, + Invalid)) { + // All but one template parameter lists have been matching. + --NumMatchedTemplateParamLists; + if (TemplateParams->size() > 0) { // There is no such thing as a variable template. Diag(D.getIdentifierLoc(), diag::err_template_variable) @@ -2568,11 +2608,17 @@ Sema::ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC, VarDecl *NewVD = VarDecl::Create(Context, DC, D.getIdentifierLoc(), II, R, TInfo, SC, SCAsWritten); - if (D.isInvalidType()) + if (D.isInvalidType() || Invalid) NewVD->setInvalidDecl(); SetNestedNameSpecifier(NewVD, D); + if (NumMatchedTemplateParamLists > 0) { + NewVD->setTemplateParameterListsInfo(Context, + NumMatchedTemplateParamLists, + (TemplateParameterList**)TemplateParamLists.release()); + } + if (D.getDeclSpec().isThreadSpecified()) { if (NewVD->hasLocalStorage()) Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_thread_non_global); @@ -2831,6 +2877,23 @@ void Sema::CheckVariableDeclaration(VarDecl *NewVD, return NewVD->setInvalidDecl(); } + // Function pointers and references cannot have qualified function type, only + // function pointer-to-members can do that. + QualType Pointee; + unsigned PtrOrRef = 0; + if (const PointerType *Ptr = T->getAs<PointerType>()) + Pointee = Ptr->getPointeeType(); + else if (const ReferenceType *Ref = T->getAs<ReferenceType>()) { + Pointee = Ref->getPointeeType(); + PtrOrRef = 1; + } + if (!Pointee.isNull() && Pointee->isFunctionProtoType() && + Pointee->getAs<FunctionProtoType>()->getTypeQuals() != 0) { + Diag(NewVD->getLocation(), diag::err_invalid_qualified_function_pointer) + << PtrOrRef; + return NewVD->setInvalidDecl(); + } + if (!Previous.empty()) { Redeclaration = true; MergeVarDecl(NewVD, Previous); @@ -2858,7 +2921,7 @@ static bool FindOverriddenMethod(const CXXBaseSpecifier *Specifier, // FIXME: Do we care about other names here too? if (Name.getNameKind() == DeclarationName::CXXDestructorName) { - // We really want to find the base class constructor here. + // We really want to find the base class destructor here. QualType T = Data->S->Context.getTypeDeclType(BaseRecord); CanQualType CT = Data->S->Context.getCanonicalType(T); @@ -2868,8 +2931,9 @@ static bool FindOverriddenMethod(const CXXBaseSpecifier *Specifier, for (Path.Decls = BaseRecord->lookup(Name); Path.Decls.first != Path.Decls.second; ++Path.Decls.first) { - if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(*Path.Decls.first)) { - if (MD->isVirtual() && !Data->S->IsOverload(Data->Method, MD)) + NamedDecl *D = *Path.Decls.first; + if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) { + if (MD->isVirtual() && !Data->S->IsOverload(Data->Method, MD, false)) return true; } } @@ -2992,7 +3056,7 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, } else if (Name.getNameKind() == DeclarationName::CXXDestructorName) { // This is a C++ destructor declaration. if (DC->isRecord()) { - R = CheckDestructorDeclarator(D, SC); + R = CheckDestructorDeclarator(D, R, SC); NewFD = CXXDestructorDecl::Create(Context, cast<CXXRecordDecl>(DC), @@ -3093,6 +3157,8 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, FunctionTemplateDecl *FunctionTemplate = 0; bool isExplicitSpecialization = false; bool isFunctionTemplateSpecialization = false; + unsigned NumMatchedTemplateParamLists = TemplateParamLists.size(); + bool Invalid = false; if (TemplateParameterList *TemplateParams = MatchTemplateParametersToScopeSpecifier( D.getDeclSpec().getSourceRange().getBegin(), @@ -3100,7 +3166,11 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, (TemplateParameterList**)TemplateParamLists.get(), TemplateParamLists.size(), isFriend, - isExplicitSpecialization)) { + isExplicitSpecialization, + Invalid)) { + // All but one template parameter lists have been matching. + --NumMatchedTemplateParamLists; + if (TemplateParams->size() > 0) { // This is a function template @@ -3140,9 +3210,18 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, << FixItHint::CreateInsertion(InsertLoc, "<>"); } } + } - // FIXME: Free this memory properly. - TemplateParamLists.release(); + if (NumMatchedTemplateParamLists > 0) { + NewFD->setTemplateParameterListsInfo(Context, + NumMatchedTemplateParamLists, + (TemplateParameterList**)TemplateParamLists.release()); + } + + if (Invalid) { + NewFD->setInvalidDecl(); + if (FunctionTemplate) + FunctionTemplate->setInvalidDecl(); } // C++ [dcl.fct.spec]p5: @@ -3272,14 +3351,8 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, // Synthesize a parameter for each argument type. for (FunctionProtoType::arg_type_iterator AI = FT->arg_type_begin(), AE = FT->arg_type_end(); AI != AE; ++AI) { - ParmVarDecl *Param = ParmVarDecl::Create(Context, NewFD, - D.getIdentifierLoc(), 0, - *AI, - Context.getTrivialTypeSourceInfo(*AI, - D.getIdentifierLoc()), - VarDecl::None, - VarDecl::None, 0); - Param->setImplicit(); + ParmVarDecl *Param = + BuildParmVarDeclForTypedef(NewFD, D.getIdentifierLoc(), *AI); Params.push_back(Param); } } else { @@ -3456,7 +3529,7 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, if (Redeclaration && Previous.isSingleResult()) { const FunctionDecl *Def; FunctionDecl *PrevFD = dyn_cast<FunctionDecl>(Previous.getFoundDecl()); - if (PrevFD && PrevFD->getBody(Def) && D.hasAttributes()) { + if (PrevFD && PrevFD->hasBody(Def) && D.hasAttributes()) { Diag(NewFD->getLocation(), diag::warn_attribute_precede_definition); Diag(Def->getLocation(), diag::note_previous_definition); } @@ -3582,13 +3655,10 @@ void Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, } } - switch (CheckOverload(NewFD, Previous, OldDecl)) { + switch (CheckOverload(S, NewFD, Previous, OldDecl, + /*NewIsUsingDecl*/ false)) { case Ovl_Match: Redeclaration = true; - if (isa<UsingShadowDecl>(OldDecl) && CurContext->isRecord()) { - HideUsingShadowDecl(S, cast<UsingShadowDecl>(OldDecl)); - Redeclaration = false; - } break; case Ovl_NonFunction: @@ -3647,7 +3717,7 @@ void Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, CXXRecordDecl *Record = Destructor->getParent(); QualType ClassType = Context.getTypeDeclType(Record); - // FIXME: Shouldn't we be able to perform thisc heck even when the class + // FIXME: Shouldn't we be able to perform this check even when the class // type is dependent? Both gcc and edg can handle that. if (!ClassType->isDependentType()) { DeclarationName Name @@ -3943,7 +4013,7 @@ void Sema::AddInitializerToDecl(DeclPtrTy dcl, ExprArg init, bool DirectInit) { QualType T = VDecl->getType(); if (!T->isDependentType() && (!Context.getCanonicalType(T).isConstQualified() || - !T->isIntegralType())) { + !T->isIntegralOrEnumerationType())) { Diag(VDecl->getLocation(), diag::err_member_initialization) << VDecl->getDeclName() << Init->getSourceRange(); VDecl->setInvalidDecl(); @@ -3954,7 +4024,7 @@ void Sema::AddInitializerToDecl(DeclPtrTy dcl, ExprArg init, bool DirectInit) { // can specify a constant-initializer which shall be an // integral constant expression (5.19). if (!Init->isTypeDependent() && - !Init->getType()->isIntegralType()) { + !Init->getType()->isIntegralOrEnumerationType()) { // We have a non-dependent, non-integral or enumeration type. Diag(Init->getSourceRange().getBegin(), diag::err_in_class_initializer_non_integral_type) @@ -4264,9 +4334,9 @@ Sema::ActOnParamDeclarator(Scope *S, Declarator &D) { if (getLangOptions().CPlusPlus) CheckExtraCXXDefaultArguments(D); - TypeSourceInfo *TInfo = 0; TagDecl *OwnedDecl = 0; - QualType parmDeclType = GetTypeForDeclarator(D, S, &TInfo, &OwnedDecl); + TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S, &OwnedDecl); + QualType parmDeclType = TInfo->getType(); if (getLangOptions().CPlusPlus && OwnedDecl && OwnedDecl->isDefinition()) { // C++ [dcl.fct]p6: @@ -4331,6 +4401,18 @@ Sema::ActOnParamDeclarator(Scope *S, Declarator &D) { return DeclPtrTy::make(New); } +/// \brief Synthesizes a variable for a parameter arising from a +/// typedef. +ParmVarDecl *Sema::BuildParmVarDeclForTypedef(DeclContext *DC, + SourceLocation Loc, + QualType T) { + ParmVarDecl *Param = ParmVarDecl::Create(Context, DC, Loc, 0, + T, Context.getTrivialTypeSourceInfo(T, Loc), + VarDecl::None, VarDecl::None, 0); + Param->setImplicit(); + return Param; +} + ParmVarDecl *Sema::CheckParameter(DeclContext *DC, TypeSourceInfo *TSInfo, QualType T, IdentifierInfo *Name, @@ -4489,7 +4571,7 @@ Sema::DeclPtrTy Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, DeclPtrTy D) { // But don't complain if we're in GNU89 mode and the previous definition // was an extern inline function. const FunctionDecl *Definition; - if (FD->getBody(Definition) && + if (FD->hasBody(Definition) && !canRedefineFunction(Definition, getLangOptions())) { Diag(FD->getLocation(), diag::err_redefinition) << FD->getDeclName(); Diag(Definition->getLocation(), diag::note_previous_definition); @@ -4964,16 +5046,25 @@ Sema::DeclPtrTy Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, // FIXME: Check explicit specializations more carefully. bool isExplicitSpecialization = false; + unsigned NumMatchedTemplateParamLists = TemplateParameterLists.size(); + bool Invalid = false; if (TUK != TUK_Reference) { if (TemplateParameterList *TemplateParams = MatchTemplateParametersToScopeSpecifier(KWLoc, SS, (TemplateParameterList**)TemplateParameterLists.get(), TemplateParameterLists.size(), TUK == TUK_Friend, - isExplicitSpecialization)) { + isExplicitSpecialization, + Invalid)) { + // All but one template parameter lists have been matching. + --NumMatchedTemplateParamLists; + if (TemplateParams->size() > 0) { // This is a declaration or definition of a class template (which may // be a member of another template). + if (Invalid) + return DeclPtrTy(); + OwnedDecl = false; DeclResult Result = CheckClassTemplate(S, TagSpec, TUK, KWLoc, SS, Name, NameLoc, Attr, @@ -4988,14 +5079,11 @@ Sema::DeclPtrTy Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, isExplicitSpecialization = true; } } - - TemplateParameterLists.release(); } DeclContext *SearchDC = CurContext; DeclContext *DC = CurContext; bool isStdBadAlloc = false; - bool Invalid = false; RedeclarationKind Redecl = ForRedeclaration; if (TUK == TUK_Friend || TUK == TUK_Reference) @@ -5231,7 +5319,8 @@ Sema::DeclPtrTy Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, // for the consumer of this Decl to know it doesn't own it. // For our current ASTs this shouldn't be a problem, but will // need to be changed with DeclGroups. - if (TUK == TUK_Reference || TUK == TUK_Friend) + if ((TUK == TUK_Reference && !PrevTagDecl->getFriendObjectKind()) || + TUK == TUK_Friend) return DeclPtrTy::make(PrevTagDecl); // Diagnose attempts to redefine a tag. @@ -5364,10 +5453,17 @@ CreateNewDecl: New = EnumDecl::Create(Context, SearchDC, Loc, Name, KWLoc, cast_or_null<EnumDecl>(PrevDecl)); // If this is an undefined enum, warn. - if (TUK != TUK_Definition && !Invalid) { - unsigned DK = getLangOptions().CPlusPlus? diag::err_forward_ref_enum - : diag::ext_forward_ref_enum; - Diag(Loc, DK); + if (TUK != TUK_Definition && !Invalid) { + TagDecl *Def; + if (PrevDecl && (Def = cast<EnumDecl>(PrevDecl)->getDefinition())) { + Diag(Loc, diag::ext_forward_ref_enum_def) + << New; + Diag(Def->getLocation(), diag::note_previous_definition); + } else { + Diag(Loc, + getLangOptions().CPlusPlus? diag::err_forward_ref_enum + : diag::ext_forward_ref_enum); + } } } else { // struct/union/class @@ -5392,6 +5488,11 @@ CreateNewDecl: NestedNameSpecifier *NNS = static_cast<NestedNameSpecifier*>(SS.getScopeRep()); New->setQualifierInfo(NNS, SS.getRange()); + if (NumMatchedTemplateParamLists > 0) { + New->setTemplateParameterListsInfo(Context, + NumMatchedTemplateParamLists, + (TemplateParameterList**) TemplateParameterLists.release()); + } } else Invalid = true; @@ -5547,7 +5648,7 @@ bool Sema::VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, // C99 6.7.2.1p4 - verify the field type. // C++ 9.6p3: A bit-field shall have integral or enumeration type. - if (!FieldTy->isDependentType() && !FieldTy->isIntegralType()) { + if (!FieldTy->isDependentType() && !FieldTy->isIntegralOrEnumerationType()) { // Handle incomplete types with specific error. if (RequireCompleteType(FieldLoc, FieldTy, diag::err_field_incomplete)) return true; @@ -5629,8 +5730,8 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record, SourceLocation Loc = DeclStart; if (II) Loc = D.getIdentifierLoc(); - TypeSourceInfo *TInfo = 0; - QualType T = GetTypeForDeclarator(D, S, &TInfo); + TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); + QualType T = TInfo->getType(); if (getLangOptions().CPlusPlus) CheckExtraCXXDefaultArguments(D); @@ -5740,6 +5841,24 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T, ZeroWidth = false; } + // Check that 'mutable' is consistent with the type of the declaration. + if (!InvalidDecl && Mutable) { + unsigned DiagID = 0; + if (T->isReferenceType()) + DiagID = diag::err_mutable_reference; + else if (T.isConstQualified()) + DiagID = diag::err_mutable_const; + + if (DiagID) { + SourceLocation ErrLoc = Loc; + if (D && D->getDeclSpec().getStorageClassSpecLoc().isValid()) + ErrLoc = D->getDeclSpec().getStorageClassSpecLoc(); + Diag(ErrLoc, DiagID); + Mutable = false; + InvalidDecl = true; + } + } + FieldDecl *NewFD = FieldDecl::Create(Context, Record, Loc, II, T, TInfo, BitWidth, Mutable); if (InvalidDecl) @@ -5761,41 +5880,42 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T, if (const RecordType *RT = EltTy->getAs<RecordType>()) { CXXRecordDecl* RDecl = cast<CXXRecordDecl>(RT->getDecl()); - - if (!RDecl->hasTrivialConstructor()) - CXXRecord->setHasTrivialConstructor(false); - if (!RDecl->hasTrivialCopyConstructor()) - CXXRecord->setHasTrivialCopyConstructor(false); - if (!RDecl->hasTrivialCopyAssignment()) - CXXRecord->setHasTrivialCopyAssignment(false); - if (!RDecl->hasTrivialDestructor()) - CXXRecord->setHasTrivialDestructor(false); - - // C++ 9.5p1: An object of a class with a non-trivial - // constructor, a non-trivial copy constructor, a non-trivial - // destructor, or a non-trivial copy assignment operator - // cannot be a member of a union, nor can an array of such - // objects. - // TODO: C++0x alters this restriction significantly. - if (Record->isUnion()) { - // We check for copy constructors before constructors - // because otherwise we'll never get complaints about - // copy constructors. - - CXXSpecialMember member = CXXInvalid; + if (RDecl->getDefinition()) { + if (!RDecl->hasTrivialConstructor()) + CXXRecord->setHasTrivialConstructor(false); if (!RDecl->hasTrivialCopyConstructor()) - member = CXXCopyConstructor; - else if (!RDecl->hasTrivialConstructor()) - member = CXXConstructor; - else if (!RDecl->hasTrivialCopyAssignment()) - member = CXXCopyAssignment; - else if (!RDecl->hasTrivialDestructor()) - member = CXXDestructor; - - if (member != CXXInvalid) { - Diag(Loc, diag::err_illegal_union_member) << Name << member; - DiagnoseNontrivial(RT, member); - NewFD->setInvalidDecl(); + CXXRecord->setHasTrivialCopyConstructor(false); + if (!RDecl->hasTrivialCopyAssignment()) + CXXRecord->setHasTrivialCopyAssignment(false); + if (!RDecl->hasTrivialDestructor()) + CXXRecord->setHasTrivialDestructor(false); + + // C++ 9.5p1: An object of a class with a non-trivial + // constructor, a non-trivial copy constructor, a non-trivial + // destructor, or a non-trivial copy assignment operator + // cannot be a member of a union, nor can an array of such + // objects. + // TODO: C++0x alters this restriction significantly. + if (Record->isUnion()) { + // We check for copy constructors before constructors + // because otherwise we'll never get complaints about + // copy constructors. + + CXXSpecialMember member = CXXInvalid; + if (!RDecl->hasTrivialCopyConstructor()) + member = CXXCopyConstructor; + else if (!RDecl->hasTrivialConstructor()) + member = CXXConstructor; + else if (!RDecl->hasTrivialCopyAssignment()) + member = CXXCopyAssignment; + else if (!RDecl->hasTrivialDestructor()) + member = CXXDestructor; + + if (member != CXXInvalid) { + Diag(Loc, diag::err_illegal_union_member) << Name << member; + DiagnoseNontrivial(RT, member); + NewFD->setInvalidDecl(); + } } } } @@ -5842,7 +5962,7 @@ void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) { typedef CXXRecordDecl::ctor_iterator ctor_iter; for (ctor_iter ci = RD->ctor_begin(), ce = RD->ctor_end(); ci != ce;++ci){ const FunctionDecl *body = 0; - ci->getBody(body); + ci->hasBody(body); if (!body || !cast<CXXConstructorDecl>(body)->isImplicitlyDefined()) { SourceLocation CtorLoc = ci->getLocation(); Diag(CtorLoc, diag::note_nontrivial_user_defined) << QT << member; @@ -5876,7 +5996,7 @@ void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) { case CXXDestructor: if (RD->hasUserDeclaredDestructor()) { - SourceLocation DtorLoc = RD->getDestructor(Context)->getLocation(); + SourceLocation DtorLoc = LookupDestructor(RD)->getLocation(); Diag(DtorLoc, diag::note_nontrivial_user_defined) << QT << member; return; } @@ -5985,8 +6105,8 @@ Sema::DeclPtrTy Sema::ActOnIvar(Scope *S, // FIXME: Unnamed fields can be handled in various different ways, for // example, unnamed unions inject all members into the struct namespace! - TypeSourceInfo *TInfo = 0; - QualType T = GetTypeForDeclarator(D, S, &TInfo); + TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); + QualType T = TInfo->getType(); if (BitWidth) { // 6.7.2.1p3, 6.7.2.1p4 @@ -6188,6 +6308,12 @@ void Sema::ActOnFields(Scope* S, (FD->getType()->isObjCObjectPointerType() || FD->getType().isObjCGCStrong())) Record->setHasObjectMember(true); + else if (Context.getAsArrayType(FD->getType())) { + QualType BaseType = Context.getBaseElementType(FD->getType()); + if (Record && BaseType->isRecordType() && + BaseType->getAs<RecordType>()->getDecl()->hasObjectMember()) + Record->setHasObjectMember(true); + } // Keep track of the number of named members. if (FD->getIdentifier()) ++NumNamedMembers; @@ -6241,7 +6367,7 @@ void Sema::ActOnFields(Scope* S, static bool isRepresentableIntegerValue(ASTContext &Context, llvm::APSInt &Value, QualType T) { - assert(T->isIntegralType() && "Integral type required!"); + assert(T->isIntegralType(Context) && "Integral type required!"); unsigned BitWidth = Context.getIntWidth(T); if (Value.isUnsigned() || Value.isNonNegative()) @@ -6255,7 +6381,7 @@ static bool isRepresentableIntegerValue(ASTContext &Context, static QualType getNextLargerIntegralType(ASTContext &Context, QualType T) { // FIXME: Int128/UInt128 support, which also needs to be introduced into // enum checking below. - assert(T->isIntegralType() && "Integral type required!"); + assert(T->isIntegralType(Context) && "Integral type required!"); const unsigned NumTypes = 4; QualType SignedIntegralTypes[NumTypes] = { Context.ShortTy, Context.IntTy, Context.LongTy, Context.LongLongTy diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp index c6dcc3b..3b82f58 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp @@ -913,7 +913,7 @@ static void HandleWeakImportAttr(Decl *D, const AttributeList &Attr, Sema &S) { if (VarDecl *VD = dyn_cast<VarDecl>(D)) { isDef = (!VD->hasExternalStorage() || VD->getInit()); } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { - isDef = FD->getBody(); + isDef = FD->hasBody(); } else if (isa<ObjCPropertyDecl>(D) || isa<ObjCMethodDecl>(D)) { // We ignore weak import on properties and methods return; @@ -1180,6 +1180,54 @@ static FormatAttrKind getFormatAttrKind(llvm::StringRef Format) { return InvalidFormat; } +/// Handle __attribute__((init_priority(priority))) attributes based on +/// http://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html +static void HandleInitPriorityAttr(Decl *d, const AttributeList &Attr, + Sema &S) { + if (!S.getLangOptions().CPlusPlus) { + S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName(); + return; + } + + if (!isa<VarDecl>(d) || S.getCurFunctionOrMethodDecl()) { + S.Diag(Attr.getLoc(), diag::err_init_priority_object_attr); + Attr.setInvalid(); + return; + } + QualType T = dyn_cast<VarDecl>(d)->getType(); + if (S.Context.getAsArrayType(T)) + T = S.Context.getBaseElementType(T); + if (!T->getAs<RecordType>()) { + S.Diag(Attr.getLoc(), diag::err_init_priority_object_attr); + Attr.setInvalid(); + return; + } + + if (Attr.getNumArgs() != 1) { + S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1; + Attr.setInvalid(); + return; + } + Expr *priorityExpr = static_cast<Expr *>(Attr.getArg(0)); + + llvm::APSInt priority(32); + if (priorityExpr->isTypeDependent() || priorityExpr->isValueDependent() || + !priorityExpr->isIntegerConstantExpr(priority, S.Context)) { + S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int) + << "init_priority" << priorityExpr->getSourceRange(); + Attr.setInvalid(); + return; + } + unsigned prioritynum = priority.getZExtValue(); + if (prioritynum < 101 || prioritynum > 65535) { + S.Diag(Attr.getLoc(), diag::err_attribute_argument_outof_range) + << priorityExpr->getSourceRange(); + Attr.setInvalid(); + return; + } + d->addAttr(::new (S.Context) InitPriorityAttr(prioritynum)); +} + /// Handle __attribute__((format(type,idx,firstarg))) attributes based on /// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html static void HandleFormatAttr(Decl *d, const AttributeList &Attr, Sema &S) { @@ -1362,9 +1410,10 @@ static void HandleTransparentUnionAttr(Decl *d, const AttributeList &Attr, FieldDecl *FirstField = *Field; QualType FirstType = FirstField->getType(); - if (FirstType->isFloatingType() || FirstType->isVectorType()) { + if (FirstType->hasFloatingRepresentation() || FirstType->isVectorType()) { S.Diag(FirstField->getLocation(), - diag::warn_transparent_union_attribute_floating); + diag::warn_transparent_union_attribute_floating) + << FirstType->isVectorType() << FirstType; return; } @@ -1410,7 +1459,7 @@ static void HandleAnnotateAttr(Decl *d, const AttributeList &Attr, Sema &S) { d->addAttr(::new (S.Context) AnnotateAttr(S.Context, SE->getString())); } -static void HandleAlignedAttr(Decl *d, const AttributeList &Attr, Sema &S) { +static void HandleAlignedAttr(Decl *D, const AttributeList &Attr, Sema &S) { // check the attribute arguments. if (Attr.getNumArgs() > 1) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1; @@ -1421,30 +1470,36 @@ static void HandleAlignedAttr(Decl *d, const AttributeList &Attr, Sema &S) { // than GNU's, and should error out when it is used to specify a // weaker alignment, rather than being silently ignored. - unsigned Align = 0; if (Attr.getNumArgs() == 0) { // FIXME: This should be the target specific maximum alignment. // (For now we just use 128 bits which is the maximum on X86). - Align = 128; - d->addAttr(::new (S.Context) AlignedAttr(Align)); + D->addAttr(::new (S.Context) AlignedAttr(128)); + return; + } + + S.AddAlignedAttr(Attr.getLoc(), D, static_cast<Expr *>(Attr.getArg(0))); +} + +void Sema::AddAlignedAttr(SourceLocation AttrLoc, Decl *D, Expr *E) { + if (E->isTypeDependent() || E->isValueDependent()) { + // Save dependent expressions in the AST to be instantiated. + D->addAttr(::new (Context) AlignedAttr(E)); return; } - Expr *alignmentExpr = static_cast<Expr *>(Attr.getArg(0)); llvm::APSInt Alignment(32); - if (alignmentExpr->isTypeDependent() || alignmentExpr->isValueDependent() || - !alignmentExpr->isIntegerConstantExpr(Alignment, S.Context)) { - S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int) - << "aligned" << alignmentExpr->getSourceRange(); + if (!E->isIntegerConstantExpr(Alignment, Context)) { + Diag(AttrLoc, diag::err_attribute_argument_not_int) + << "aligned" << E->getSourceRange(); return; } if (!llvm::isPowerOf2_64(Alignment.getZExtValue())) { - S.Diag(Attr.getLoc(), diag::err_attribute_aligned_not_power_of_two) - << alignmentExpr->getSourceRange(); + Diag(AttrLoc, diag::err_attribute_aligned_not_power_of_two) + << E->getSourceRange(); return; } - d->addAttr(::new (S.Context) AlignedAttr(Alignment.getZExtValue() * 8)); + D->addAttr(::new (Context) AlignedAttr(Alignment.getZExtValue() * 8)); } /// HandleModeAttr - This attribute modifies the width of a decl with primitive @@ -1525,7 +1580,7 @@ static void HandleModeAttr(Decl *D, const AttributeList &Attr, Sema &S) { if (!OldTy->getAs<BuiltinType>() && !OldTy->isComplexType()) S.Diag(Attr.getLoc(), diag::err_mode_not_primitive); else if (IntegerMode) { - if (!OldTy->isIntegralType()) + if (!OldTy->isIntegralOrEnumerationType()) S.Diag(Attr.getLoc(), diag::err_mode_wrong_type); } else if (ComplexMode) { if (!OldTy->isComplexType()) @@ -1650,6 +1705,23 @@ static void HandleNoInlineAttr(Decl *d, const AttributeList &Attr, Sema &S) { d->addAttr(::new (S.Context) NoInlineAttr()); } +static void HandleNoInstrumentFunctionAttr(Decl *d, const AttributeList &Attr, + Sema &S) { + // check the attribute arguments. + if (Attr.getNumArgs() != 0) { + S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0; + return; + } + + if (!isa<FunctionDecl>(d)) { + S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type) + << Attr.getName() << 0 /*function*/; + return; + } + + d->addAttr(::new (S.Context) NoInstrumentFunctionAttr()); +} + static void HandleGNUInlineAttr(Decl *d, const AttributeList &Attr, Sema &S) { // check the attribute arguments. if (Attr.getNumArgs() != 0) { @@ -1951,6 +2023,9 @@ static void ProcessDeclAttribute(Scope *scope, Decl *D, case AttributeList::AT_reqd_wg_size: HandleReqdWorkGroupSize(D, Attr, S); break; + case AttributeList::AT_init_priority: + HandleInitPriorityAttr(D, Attr, S); break; + case AttributeList::AT_packed: HandlePackedAttr (D, Attr, S); break; case AttributeList::AT_section: HandleSectionAttr (D, Attr, S); break; case AttributeList::AT_unavailable: HandleUnavailableAttr (D, Attr, S); break; @@ -1979,9 +2054,11 @@ static void ProcessDeclAttribute(Scope *scope, Decl *D, case AttributeList::AT_noinline: HandleNoInlineAttr (D, Attr, S); break; case AttributeList::AT_regparm: HandleRegparmAttr (D, Attr, S); break; case AttributeList::IgnoredAttribute: - case AttributeList::AT_no_instrument_function: // Interacts with -pg. // Just ignore break; + case AttributeList::AT_no_instrument_function: // Interacts with -pg. + HandleNoInstrumentFunctionAttr(D, Attr, S); + break; case AttributeList::AT_stdcall: case AttributeList::AT_cdecl: case AttributeList::AT_fastcall: @@ -1992,7 +2069,8 @@ static void ProcessDeclAttribute(Scope *scope, Decl *D, // Ask target about the attribute. const TargetAttributesSema &TargetAttrs = S.getTargetAttributesSema(); if (!TargetAttrs.ProcessDeclAttribute(scope, D, Attr, S)) - S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName(); + S.Diag(Attr.getLoc(), diag::warn_unknown_attribute_ignored) + << Attr.getName(); break; } } diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp index 148d146..bd97df2 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp @@ -871,6 +871,17 @@ std::string Sema::getAmbiguousPathsDisplayString(CXXBasePaths &Paths) { // C++ class member Handling //===----------------------------------------------------------------------===// +/// ActOnAccessSpecifier - Parsed an access specifier followed by a colon. +Sema::DeclPtrTy +Sema::ActOnAccessSpecifier(AccessSpecifier Access, + SourceLocation ASLoc, SourceLocation ColonLoc) { + assert(Access != AS_none && "Invalid kind for syntactic access specifier!"); + AccessSpecDecl* ASDecl = AccessSpecDecl::Create(Context, Access, CurContext, + ASLoc, ColonLoc); + CurContext->addHiddenDecl(ASDecl); + return DeclPtrTy::make(ASDecl); +} + /// ActOnCXXMemberDeclarator - This is invoked when a C++ class member /// declarator is parsed. 'AS' is the access specifier, 'BW' specifies the /// bitfield width if there is one and 'InitExpr' specifies the initializer if @@ -886,10 +897,18 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, Expr *Init = static_cast<Expr*>(InitExpr); SourceLocation Loc = D.getIdentifierLoc(); - bool isFunc = D.isFunctionDeclarator(); - + assert(isa<CXXRecordDecl>(CurContext)); assert(!DS.isFriendSpecified()); + bool isFunc = false; + if (D.isFunctionDeclarator()) + isFunc = true; + else if (D.getNumTypeObjects() == 0 && + D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_typename) { + QualType TDType = GetTypeFromParser(DS.getTypeRep()); + isFunc = TDType->isFunctionType(); + } + // C++ 9.2p6: A member shall not be declared to have automatic storage // duration (auto, register) or with the extern storage-class-specifier. // C++ 7.1.1p8: The mutable specifier can be applied only to names of class @@ -911,22 +930,6 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, // FIXME: It would be nicer if the keyword was ignored only for this // declarator. Otherwise we could get follow-up errors. D.getMutableDeclSpec().ClearStorageClassSpecs(); - } else { - QualType T = GetTypeForDeclarator(D, S); - diag::kind err = static_cast<diag::kind>(0); - if (T->isReferenceType()) - err = diag::err_mutable_reference; - else if (T.isConstQualified()) - err = diag::err_mutable_const; - if (err != 0) { - if (DS.getStorageClassSpecLoc().isValid()) - Diag(DS.getStorageClassSpecLoc(), err); - else - Diag(DS.getThreadSpecLoc(), err); - // FIXME: It would be nicer if the keyword was ignored only for this - // declarator. Otherwise we could get follow-up errors. - D.getMutableDeclSpec().ClearStorageClassSpecs(); - } } break; default: @@ -938,18 +941,6 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, D.getMutableDeclSpec().ClearStorageClassSpecs(); } - if (!isFunc && - D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_typename && - D.getNumTypeObjects() == 0) { - // Check also for this case: - // - // typedef int f(); - // f a; - // - QualType TDType = GetTypeFromParser(DS.getTypeRep()); - isFunc = TDType->isFunctionType(); - } - bool isInstField = ((DS.getStorageClassSpec() == DeclSpec::SCS_unspecified || DS.getStorageClassSpec() == DeclSpec::SCS_mutable) && !isFunc); @@ -1148,6 +1139,7 @@ Sema::ActOnMemInitializer(DeclPtrTy ConstructorD, return true; R.clear(); + R.setLookupName(MemberOrBase); } } @@ -1226,18 +1218,25 @@ Sema::ActOnMemInitializer(DeclPtrTy ConstructorD, /// containing the field that is being initialized. Returns true if there is an /// uninitialized field was used an updates the SourceLocation parameter; false /// otherwise. -static bool InitExprContainsUninitializedFields(const Stmt* S, - const FieldDecl* LhsField, - SourceLocation* L) { - const MemberExpr* ME = dyn_cast<MemberExpr>(S); - if (ME) { - const NamedDecl* RhsField = ME->getMemberDecl(); +static bool InitExprContainsUninitializedFields(const Stmt *S, + const FieldDecl *LhsField, + SourceLocation *L) { + if (isa<CallExpr>(S)) { + // Do not descend into function calls or constructors, as the use + // of an uninitialized field may be valid. One would have to inspect + // the contents of the function/ctor to determine if it is safe or not. + // i.e. Pass-by-value is never safe, but pass-by-reference and pointers + // may be safe, depending on what the function/ctor does. + return false; + } + if (const MemberExpr *ME = dyn_cast<MemberExpr>(S)) { + const NamedDecl *RhsField = ME->getMemberDecl(); if (RhsField == LhsField) { // Initializing a field with itself. Throw a warning. // But wait; there are exceptions! // Exception #1: The field may not belong to this record. // e.g. Foo(const Foo& rhs) : A(rhs.A) {} - const Expr* base = ME->getBase(); + const Expr *base = ME->getBase(); if (base != NULL && !isa<CXXThisExpr>(base->IgnoreParenCasts())) { // Even though the field matches, it does not belong to this record. return false; @@ -1248,21 +1247,16 @@ static bool InitExprContainsUninitializedFields(const Stmt* S, return true; } } - bool found = false; - for (Stmt::const_child_iterator it = S->child_begin(); - it != S->child_end() && found == false; - ++it) { - if (isa<CallExpr>(S)) { - // Do not descend into function calls or constructors, as the use - // of an uninitialized field may be valid. One would have to inspect - // the contents of the function/ctor to determine if it is safe or not. - // i.e. Pass-by-value is never safe, but pass-by-reference and pointers - // may be safe, depending on what the function/ctor does. + for (Stmt::const_child_iterator it = S->child_begin(), e = S->child_end(); + it != e; ++it) { + if (!*it) { + // An expression such as 'member(arg ?: "")' may trigger this. continue; } - found = InitExprContainsUninitializedFields(*it, LhsField, L); + if (InitExprContainsUninitializedFields(*it, LhsField, L)) + return true; } - return found; + return false; } Sema::MemInitResult @@ -1375,8 +1369,48 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, for (unsigned i = 0; i < NumArgs; i++) HasDependentArg |= Args[i]->isTypeDependent(); - SourceLocation BaseLoc = BaseTInfo->getTypeLoc().getLocalSourceRange().getBegin(); - if (BaseType->isDependentType() || HasDependentArg) { + SourceLocation BaseLoc + = BaseTInfo->getTypeLoc().getLocalSourceRange().getBegin(); + + if (!BaseType->isDependentType() && !BaseType->isRecordType()) + return Diag(BaseLoc, diag::err_base_init_does_not_name_class) + << BaseType << BaseTInfo->getTypeLoc().getLocalSourceRange(); + + // C++ [class.base.init]p2: + // [...] Unless the mem-initializer-id names a nonstatic data + // member of the constructor’s class or a direct or virtual base + // of that class, the mem-initializer is ill-formed. A + // mem-initializer-list can initialize a base class using any + // name that denotes that base class type. + bool Dependent = BaseType->isDependentType() || HasDependentArg; + + // Check for direct and virtual base classes. + const CXXBaseSpecifier *DirectBaseSpec = 0; + const CXXBaseSpecifier *VirtualBaseSpec = 0; + if (!Dependent) { + FindBaseInitializer(*this, ClassDecl, BaseType, DirectBaseSpec, + VirtualBaseSpec); + + // C++ [base.class.init]p2: + // Unless the mem-initializer-id names a nonstatic data member of the + // constructor's class or a direct or virtual base of that class, the + // mem-initializer is ill-formed. + if (!DirectBaseSpec && !VirtualBaseSpec) { + // If the class has any dependent bases, then it's possible that + // one of those types will resolve to the same type as + // BaseType. Therefore, just treat this as a dependent base + // class initialization. FIXME: Should we try to check the + // initialization anyway? It seems odd. + if (ClassDecl->hasAnyDependentBases()) + Dependent = true; + else + return Diag(BaseLoc, diag::err_not_direct_base_or_virtual) + << BaseType << Context.getTypeDeclType(ClassDecl) + << BaseTInfo->getTypeLoc().getLocalSourceRange(); + } + } + + if (Dependent) { // Can't check initialization for a base of dependent type or when // any of the arguments are type-dependent expressions. OwningExprResult BaseInit @@ -1396,23 +1430,6 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, BaseInit.takeAs<Expr>(), RParenLoc); } - - if (!BaseType->isRecordType()) - return Diag(BaseLoc, diag::err_base_init_does_not_name_class) - << BaseType << BaseTInfo->getTypeLoc().getLocalSourceRange(); - - // C++ [class.base.init]p2: - // [...] Unless the mem-initializer-id names a nonstatic data - // member of the constructor’s class or a direct or virtual base - // of that class, the mem-initializer is ill-formed. A - // mem-initializer-list can initialize a base class using any - // name that denotes that base class type. - - // Check for direct and virtual base classes. - const CXXBaseSpecifier *DirectBaseSpec = 0; - const CXXBaseSpecifier *VirtualBaseSpec = 0; - FindBaseInitializer(*this, ClassDecl, BaseType, DirectBaseSpec, - VirtualBaseSpec); // C++ [base.class.init]p2: // If a mem-initializer-id is ambiguous because it designates both @@ -1421,14 +1438,6 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, if (DirectBaseSpec && VirtualBaseSpec) return Diag(BaseLoc, diag::err_base_init_direct_and_virtual) << BaseType << BaseTInfo->getTypeLoc().getLocalSourceRange(); - // C++ [base.class.init]p2: - // Unless the mem-initializer-id names a nonstatic data membeer of the - // constructor's class ot a direst or virtual base of that class, the - // mem-initializer is ill-formed. - if (!DirectBaseSpec && !VirtualBaseSpec) - return Diag(BaseLoc, diag::err_not_direct_base_or_virtual) - << BaseType << Context.getTypeDeclType(ClassDecl) - << BaseTInfo->getTypeLoc().getLocalSourceRange(); CXXBaseSpecifier *BaseSpec = const_cast<CXXBaseSpecifier *>(DirectBaseSpec); @@ -1571,8 +1580,9 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor, if (Field->isInvalidDecl()) return true; + SourceLocation Loc = Constructor->getLocation(); + if (ImplicitInitKind == IIK_Copy) { - SourceLocation Loc = Constructor->getLocation(); ParmVarDecl *Param = Constructor->getParamDecl(0); QualType ParamType = Param->getType().getNonReferenceType(); @@ -1680,7 +1690,7 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor, if (FieldBaseElementType->isRecordType()) { InitializedEntity InitEntity = InitializedEntity::InitializeMember(Field); InitializationKind InitKind = - InitializationKind::CreateDefault(Constructor->getLocation()); + InitializationKind::CreateDefault(Loc); InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, 0, 0); Sema::OwningExprResult MemberInit = @@ -1692,10 +1702,9 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor, CXXMemberInit = new (SemaRef.Context) CXXBaseOrMemberInitializer(SemaRef.Context, - Field, SourceLocation(), - SourceLocation(), + Field, Loc, Loc, MemberInit.takeAs<Expr>(), - SourceLocation()); + Loc); return false; } @@ -1744,38 +1753,67 @@ struct BaseAndFieldInfo { }; } +static void RecordFieldInitializer(BaseAndFieldInfo &Info, + FieldDecl *Top, FieldDecl *Field, + CXXBaseOrMemberInitializer *Init) { + // If the member doesn't need to be initialized, Init will still be null. + if (!Init) + return; + + Info.AllToInit.push_back(Init); + if (Field != Top) { + Init->setMember(Top); + Init->setAnonUnionMember(Field); + } +} + static bool CollectFieldInitializer(BaseAndFieldInfo &Info, FieldDecl *Top, FieldDecl *Field) { - // Overwhelmingly common case: we have a direct initializer for this field. + // Overwhelmingly common case: we have a direct initializer for this field. if (CXXBaseOrMemberInitializer *Init = Info.AllBaseFields.lookup(Field)) { - Info.AllToInit.push_back(Init); - - if (Field != Top) { - Init->setMember(Top); - Init->setAnonUnionMember(Field); - } + RecordFieldInitializer(Info, Top, Field, Init); return false; } if (Info.IIK == IIK_Default && Field->isAnonymousStructOrUnion()) { const RecordType *FieldClassType = Field->getType()->getAs<RecordType>(); assert(FieldClassType && "anonymous struct/union without record type"); - - // Walk through the members, tying in any initializers for fields - // we find. The earlier semantic checks should prevent redundant - // initialization of union members, given the requirement that - // union members never have non-trivial default constructors. - - // TODO: in C++0x, it might be legal to have union members with - // non-trivial default constructors in unions. Revise this - // implementation then with the appropriate semantics. CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(FieldClassType->getDecl()); - for (RecordDecl::field_iterator FA = FieldClassDecl->field_begin(), - EA = FieldClassDecl->field_end(); FA != EA; FA++) - if (CollectFieldInitializer(Info, Top, *FA)) - return true; + + // Even though union members never have non-trivial default + // constructions in C++03, we still build member initializers for aggregate + // record types which can be union members, and C++0x allows non-trivial + // default constructors for union members, so we ensure that only one + // member is initialized for these. + if (FieldClassDecl->isUnion()) { + // First check for an explicit initializer for one field. + for (RecordDecl::field_iterator FA = FieldClassDecl->field_begin(), + EA = FieldClassDecl->field_end(); FA != EA; FA++) { + if (CXXBaseOrMemberInitializer *Init = Info.AllBaseFields.lookup(*FA)) { + RecordFieldInitializer(Info, Top, *FA, Init); + + // Once we've initialized a field of an anonymous union, the union + // field in the class is also initialized, so exit immediately. + return false; + } + } + + // Fallthrough and construct a default initializer for the union as + // a whole, which can call its default constructor if such a thing exists + // (C++0x perhaps). FIXME: It's not clear that this is the correct + // behavior going forward with C++0x, when anonymous unions there are + // finalized, we should revisit this. + } else { + // For structs, we simply descend through to initialize all members where + // necessary. + for (RecordDecl::field_iterator FA = FieldClassDecl->field_begin(), + EA = FieldClassDecl->field_end(); FA != EA; FA++) { + if (CollectFieldInitializer(Info, Top, *FA)) + return true; + } + } } // Don't try to build an implicit initializer if there were semantic @@ -1787,15 +1825,8 @@ static bool CollectFieldInitializer(BaseAndFieldInfo &Info, CXXBaseOrMemberInitializer *Init = 0; if (BuildImplicitMemberInitializer(Info.S, Info.Ctor, Info.IIK, Field, Init)) return true; - - // If the member doesn't need to be initialized, Init will still be null. - if (!Init) return false; - Info.AllToInit.push_back(Init); - if (Top != Field) { - Init->setMember(Top); - Init->setAnonUnionMember(Field); - } + RecordFieldInitializer(Info, Top, Field, Init); return false; } @@ -2199,7 +2230,7 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location, if (FieldClassDecl->hasTrivialDestructor()) continue; - CXXDestructorDecl *Dtor = FieldClassDecl->getDestructor(Context); + CXXDestructorDecl *Dtor = LookupDestructor(FieldClassDecl); CheckDestructorAccess(Field->getLocation(), Dtor, PDiag(diag::err_access_dtor_field) << Field->getDeclName() @@ -2225,7 +2256,7 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location, if (BaseClassDecl->hasTrivialDestructor()) continue; - CXXDestructorDecl *Dtor = BaseClassDecl->getDestructor(Context); + CXXDestructorDecl *Dtor = LookupDestructor(BaseClassDecl); // FIXME: caret should be on the start of the class name CheckDestructorAccess(Base->getSourceRange().getBegin(), Dtor, @@ -2252,7 +2283,7 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location, if (BaseClassDecl->hasTrivialDestructor()) continue; - CXXDestructorDecl *Dtor = BaseClassDecl->getDestructor(Context); + CXXDestructorDecl *Dtor = LookupDestructor(BaseClassDecl); CheckDestructorAccess(ClassDecl->getLocation(), Dtor, PDiag(diag::err_access_dtor_vbase) << VBase->getType()); @@ -2326,6 +2357,10 @@ bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T, CXXFinalOverriderMap FinalOverriders; RD->getFinalOverriders(FinalOverriders); + // Keep a set of seen pure methods so we won't diagnose the same method + // more than once. + llvm::SmallPtrSet<const CXXMethodDecl *, 8> SeenPureMethods; + for (CXXFinalOverriderMap::iterator M = FinalOverriders.begin(), MEnd = FinalOverriders.end(); M != MEnd; @@ -2345,6 +2380,9 @@ bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T, if (!SO->second.front().Method->isPure()) continue; + if (!SeenPureMethods.insert(SO->second.front().Method)) + continue; + Diag(SO->second.front().Method->getLocation(), diag::note_pure_virtual_function) << SO->second.front().Method->getDeclName(); @@ -2422,12 +2460,12 @@ namespace { /// \brief Perform semantic checks on a class definition that has been /// completing, introducing implicitly-declared members, checking for /// abstract types, etc. -void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) { +void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) { if (!Record || Record->isInvalidDecl()) return; if (!Record->isDependentType()) - AddImplicitlyDeclaredMembersToClass(S, Record); + AddImplicitlyDeclaredMembersToClass(Record); if (Record->isInvalidDecl()) return; @@ -2546,268 +2584,101 @@ void Sema::ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, (DeclPtrTy*)FieldCollector->getCurFields(), FieldCollector->getCurNumFields(), LBrac, RBrac, AttrList); - CheckCompletedCXXClass(S, - dyn_cast_or_null<CXXRecordDecl>(TagDecl.getAs<Decl>())); + CheckCompletedCXXClass( + dyn_cast_or_null<CXXRecordDecl>(TagDecl.getAs<Decl>())); +} + +namespace { + /// \brief Helper class that collects exception specifications for + /// implicitly-declared special member functions. + class ImplicitExceptionSpecification { + ASTContext &Context; + bool AllowsAllExceptions; + llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; + llvm::SmallVector<QualType, 4> Exceptions; + + public: + explicit ImplicitExceptionSpecification(ASTContext &Context) + : Context(Context), AllowsAllExceptions(false) { } + + /// \brief Whether the special member function should have any + /// exception specification at all. + bool hasExceptionSpecification() const { + return !AllowsAllExceptions; + } + + /// \brief Whether the special member function should have a + /// throw(...) exception specification (a Microsoft extension). + bool hasAnyExceptionSpecification() const { + return false; + } + + /// \brief The number of exceptions in the exception specification. + unsigned size() const { return Exceptions.size(); } + + /// \brief The set of exceptions in the exception specification. + const QualType *data() const { return Exceptions.data(); } + + /// \brief Note that + void CalledDecl(CXXMethodDecl *Method) { + // If we already know that we allow all exceptions, do nothing. + if (AllowsAllExceptions || !Method) + return; + + const FunctionProtoType *Proto + = Method->getType()->getAs<FunctionProtoType>(); + + // If this function can throw any exceptions, make a note of that. + if (!Proto->hasExceptionSpec() || Proto->hasAnyExceptionSpec()) { + AllowsAllExceptions = true; + ExceptionsSeen.clear(); + Exceptions.clear(); + return; + } + + // Record the exceptions in this function's exception specification. + for (FunctionProtoType::exception_iterator E = Proto->exception_begin(), + EEnd = Proto->exception_end(); + E != EEnd; ++E) + if (ExceptionsSeen.insert(Context.getCanonicalType(*E))) + Exceptions.push_back(*E); + } + }; } + /// AddImplicitlyDeclaredMembersToClass - Adds any implicitly-declared /// special functions, such as the default constructor, copy /// constructor, or destructor, to the given C++ class (C++ /// [special]p1). This routine can only be executed just before the /// definition of the class is complete. -/// -/// The scope, if provided, is the class scope. -void Sema::AddImplicitlyDeclaredMembersToClass(Scope *S, - CXXRecordDecl *ClassDecl) { - CanQualType ClassType - = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl)); - - // FIXME: Implicit declarations have exception specifications, which are - // the union of the specifications of the implicitly called functions. - - if (!ClassDecl->hasUserDeclaredConstructor()) { - // C++ [class.ctor]p5: - // A default constructor for a class X is a constructor of class X - // that can be called without an argument. If there is no - // user-declared constructor for class X, a default constructor is - // implicitly declared. An implicitly-declared default constructor - // is an inline public member of its class. - DeclarationName Name - = Context.DeclarationNames.getCXXConstructorName(ClassType); - CXXConstructorDecl *DefaultCon = - CXXConstructorDecl::Create(Context, ClassDecl, - ClassDecl->getLocation(), Name, - Context.getFunctionType(Context.VoidTy, - 0, 0, false, 0, - /*FIXME*/false, false, - 0, 0, - FunctionType::ExtInfo()), - /*TInfo=*/0, - /*isExplicit=*/false, - /*isInline=*/true, - /*isImplicitlyDeclared=*/true); - DefaultCon->setAccess(AS_public); - DefaultCon->setImplicit(); - DefaultCon->setTrivial(ClassDecl->hasTrivialConstructor()); - if (S) - PushOnScopeChains(DefaultCon, S, true); - else - ClassDecl->addDecl(DefaultCon); - } +void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) { + if (!ClassDecl->hasUserDeclaredConstructor()) + ++ASTContext::NumImplicitDefaultConstructors; - if (!ClassDecl->hasUserDeclaredCopyConstructor()) { - // C++ [class.copy]p4: - // If the class definition does not explicitly declare a copy - // constructor, one is declared implicitly. - - // C++ [class.copy]p5: - // The implicitly-declared copy constructor for a class X will - // have the form - // - // X::X(const X&) - // - // if - bool HasConstCopyConstructor = true; - - // -- each direct or virtual base class B of X has a copy - // constructor whose first parameter is of type const B& or - // const volatile B&, and - for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(); - HasConstCopyConstructor && Base != ClassDecl->bases_end(); ++Base) { - const CXXRecordDecl *BaseClassDecl - = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); - HasConstCopyConstructor - = BaseClassDecl->hasConstCopyConstructor(Context); - } - - // -- for all the nonstatic data members of X that are of a - // class type M (or array thereof), each such class type - // has a copy constructor whose first parameter is of type - // const M& or const volatile M&. - for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(); - HasConstCopyConstructor && Field != ClassDecl->field_end(); - ++Field) { - QualType FieldType = (*Field)->getType(); - if (const ArrayType *Array = Context.getAsArrayType(FieldType)) - FieldType = Array->getElementType(); - if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) { - const CXXRecordDecl *FieldClassDecl - = cast<CXXRecordDecl>(FieldClassType->getDecl()); - HasConstCopyConstructor - = FieldClassDecl->hasConstCopyConstructor(Context); - } - } - - // Otherwise, the implicitly declared copy constructor will have - // the form - // - // X::X(X&) - QualType ArgType = ClassType; - if (HasConstCopyConstructor) - ArgType = ArgType.withConst(); - ArgType = Context.getLValueReferenceType(ArgType); - - // An implicitly-declared copy constructor is an inline public - // member of its class. - DeclarationName Name - = Context.DeclarationNames.getCXXConstructorName(ClassType); - CXXConstructorDecl *CopyConstructor - = CXXConstructorDecl::Create(Context, ClassDecl, - ClassDecl->getLocation(), Name, - Context.getFunctionType(Context.VoidTy, - &ArgType, 1, - false, 0, - /*FIXME: hasExceptionSpec*/false, - false, 0, 0, - FunctionType::ExtInfo()), - /*TInfo=*/0, - /*isExplicit=*/false, - /*isInline=*/true, - /*isImplicitlyDeclared=*/true); - CopyConstructor->setAccess(AS_public); - CopyConstructor->setImplicit(); - CopyConstructor->setTrivial(ClassDecl->hasTrivialCopyConstructor()); - - // Add the parameter to the constructor. - ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyConstructor, - ClassDecl->getLocation(), - /*IdentifierInfo=*/0, - ArgType, /*TInfo=*/0, - VarDecl::None, - VarDecl::None, 0); - CopyConstructor->setParams(&FromParam, 1); - if (S) - PushOnScopeChains(CopyConstructor, S, true); - else - ClassDecl->addDecl(CopyConstructor); - } + if (!ClassDecl->hasUserDeclaredCopyConstructor()) + ++ASTContext::NumImplicitCopyConstructors; if (!ClassDecl->hasUserDeclaredCopyAssignment()) { - // Note: The following rules are largely analoguous to the copy - // constructor rules. Note that virtual bases are not taken into account - // for determining the argument type of the operator. Note also that - // operators taking an object instead of a reference are allowed. - // - // C++ [class.copy]p10: - // If the class definition does not explicitly declare a copy - // assignment operator, one is declared implicitly. - // The implicitly-defined copy assignment operator for a class X - // will have the form - // - // X& X::operator=(const X&) - // - // if - bool HasConstCopyAssignment = true; - - // -- each direct base class B of X has a copy assignment operator - // whose parameter is of type const B&, const volatile B& or B, - // and - for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(); - HasConstCopyAssignment && Base != ClassDecl->bases_end(); ++Base) { - assert(!Base->getType()->isDependentType() && - "Cannot generate implicit members for class with dependent bases."); - const CXXRecordDecl *BaseClassDecl - = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); - const CXXMethodDecl *MD = 0; - HasConstCopyAssignment = BaseClassDecl->hasConstCopyAssignment(Context, - MD); - } - - // -- for all the nonstatic data members of X that are of a class - // type M (or array thereof), each such class type has a copy - // assignment operator whose parameter is of type const M&, - // const volatile M& or M. - for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(); - HasConstCopyAssignment && Field != ClassDecl->field_end(); - ++Field) { - QualType FieldType = (*Field)->getType(); - if (const ArrayType *Array = Context.getAsArrayType(FieldType)) - FieldType = Array->getElementType(); - if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) { - const CXXRecordDecl *FieldClassDecl - = cast<CXXRecordDecl>(FieldClassType->getDecl()); - const CXXMethodDecl *MD = 0; - HasConstCopyAssignment - = FieldClassDecl->hasConstCopyAssignment(Context, MD); - } - } - - // Otherwise, the implicitly declared copy assignment operator will - // have the form - // - // X& X::operator=(X&) - QualType ArgType = ClassType; - QualType RetType = Context.getLValueReferenceType(ArgType); - if (HasConstCopyAssignment) - ArgType = ArgType.withConst(); - ArgType = Context.getLValueReferenceType(ArgType); - - // An implicitly-declared copy assignment operator is an inline public - // member of its class. - DeclarationName Name = - Context.DeclarationNames.getCXXOperatorName(OO_Equal); - CXXMethodDecl *CopyAssignment = - CXXMethodDecl::Create(Context, ClassDecl, ClassDecl->getLocation(), Name, - Context.getFunctionType(RetType, &ArgType, 1, - false, 0, - /*FIXME: hasExceptionSpec*/false, - false, 0, 0, - FunctionType::ExtInfo()), - /*TInfo=*/0, /*isStatic=*/false, - /*StorageClassAsWritten=*/FunctionDecl::None, - /*isInline=*/true); - CopyAssignment->setAccess(AS_public); - CopyAssignment->setImplicit(); - CopyAssignment->setTrivial(ClassDecl->hasTrivialCopyAssignment()); - CopyAssignment->setCopyAssignment(true); - - // Add the parameter to the operator. - ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyAssignment, - ClassDecl->getLocation(), - /*Id=*/0, - ArgType, /*TInfo=*/0, - VarDecl::None, - VarDecl::None, 0); - CopyAssignment->setParams(&FromParam, 1); - - // Don't call addedAssignmentOperator. There is no way to distinguish an - // implicit from an explicit assignment operator. - if (S) - PushOnScopeChains(CopyAssignment, S, true); - else - ClassDecl->addDecl(CopyAssignment); - AddOverriddenMethods(ClassDecl, CopyAssignment); + ++ASTContext::NumImplicitCopyAssignmentOperators; + + // If we have a dynamic class, then the copy assignment operator may be + // virtual, so we have to declare it immediately. This ensures that, e.g., + // it shows up in the right place in the vtable and that we diagnose + // problems with the implicit exception specification. + if (ClassDecl->isDynamicClass()) + DeclareImplicitCopyAssignment(ClassDecl); } if (!ClassDecl->hasUserDeclaredDestructor()) { - // C++ [class.dtor]p2: - // If a class has no user-declared destructor, a destructor is - // declared implicitly. An implicitly-declared destructor is an - // inline public member of its class. - QualType Ty = Context.getFunctionType(Context.VoidTy, - 0, 0, false, 0, - /*FIXME: hasExceptionSpec*/false, - false, 0, 0, FunctionType::ExtInfo()); - - DeclarationName Name - = Context.DeclarationNames.getCXXDestructorName(ClassType); - CXXDestructorDecl *Destructor - = CXXDestructorDecl::Create(Context, ClassDecl, - ClassDecl->getLocation(), Name, Ty, - /*isInline=*/true, - /*isImplicitlyDeclared=*/true); - Destructor->setAccess(AS_public); - Destructor->setImplicit(); - Destructor->setTrivial(ClassDecl->hasTrivialDestructor()); - if (S) - PushOnScopeChains(Destructor, S, true); - else - ClassDecl->addDecl(Destructor); - - // This could be uniqued if it ever proves significant. - Destructor->setTypeSourceInfo(Context.getTrivialTypeSourceInfo(Ty)); + ++ASTContext::NumImplicitDestructors; - AddOverriddenMethods(ClassDecl, Destructor); + // If we have a dynamic class, then the destructor may be virtual, so we + // have to declare the destructor immediately. This ensures that, e.g., it + // shows up in the right place in the vtable and that we diagnose problems + // with the implicit exception specification. + if (ClassDecl->isDynamicClass()) + DeclareImplicitDestructor(ClassDecl); } } @@ -2952,9 +2823,7 @@ QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R, // Rebuild the function type "R" without any type qualifiers (in // case any of the errors above fired) and with "void" as the - // return type, since constructors don't have return types. We - // *always* have to do this, because GetTypeForDeclarator will - // put in a result type of "int" when none was specified. + // return type, since constructors don't have return types. const FunctionProtoType *Proto = R->getAs<FunctionProtoType>(); return Context.getFunctionType(Context.VoidTy, Proto->arg_type_begin(), Proto->getNumArgs(), @@ -2990,8 +2859,11 @@ void Sema::CheckConstructor(CXXConstructorDecl *Constructor) { QualType ClassTy = Context.getTagDeclType(ClassDecl); if (Context.getCanonicalType(ParamType).getUnqualifiedType() == ClassTy) { SourceLocation ParamLoc = Constructor->getParamDecl(0)->getLocation(); + const char *ConstRef + = Constructor->getParamDecl(0)->getIdentifier() ? "const &" + : " const &"; Diag(ParamLoc, diag::err_constructor_byvalue_arg) - << FixItHint::CreateInsertion(ParamLoc, " const &"); + << FixItHint::CreateInsertion(ParamLoc, ConstRef); // FIXME: Rather that making the constructor invalid, we should endeavor // to fix the type. @@ -3026,6 +2898,8 @@ bool Sema::CheckDestructor(CXXDestructorDecl *Destructor) { Context.DeclarationNames.getCXXOperatorName(OO_Delete); if (FindDeallocationFunction(Loc, RD, Name, OperatorDelete)) return true; + + MarkDeclarationReferenced(Loc, OperatorDelete); Destructor->setOperatorDelete(OperatorDelete); } @@ -3046,7 +2920,7 @@ FTIHasSingleVoidArgument(DeclaratorChunk::FunctionTypeInfo &FTI) { /// emit diagnostics and set the declarator to invalid. Even if this happens, /// will be updated to reflect a well-formed type for the destructor and /// returned. -QualType Sema::CheckDestructorDeclarator(Declarator &D, +QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R, FunctionDecl::StorageClass& SC) { // C++ [class.dtor]p1: // [...] A typedef-name that names a class is a class-name @@ -3054,11 +2928,9 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D, // be used as the identifier in the declarator for a destructor // declaration. QualType DeclaratorType = GetTypeFromParser(D.getName().DestructorName); - if (isa<TypedefType>(DeclaratorType)) { + if (isa<TypedefType>(DeclaratorType)) Diag(D.getIdentifierLoc(), diag::err_destructor_typedef_name) << DeclaratorType; - D.setInvalidType(); - } // C++ [class.dtor]p2: // A destructor is used to destroy objects of its class type. A @@ -3072,9 +2944,10 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D, if (!D.isInvalidType()) Diag(D.getIdentifierLoc(), diag::err_destructor_cannot_be) << "static" << SourceRange(D.getDeclSpec().getStorageClassSpecLoc()) - << SourceRange(D.getIdentifierLoc()); + << SourceRange(D.getIdentifierLoc()) + << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc()); + SC = FunctionDecl::None; - D.setInvalidType(); } if (D.getDeclSpec().hasTypeSpecifier() && !D.isInvalidType()) { // Destructors don't have return types, but the parser will @@ -3122,11 +2995,17 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D, // Rebuild the function type "R" without any type qualifiers or // parameters (in case any of the errors above fired) and with // "void" as the return type, since destructors don't have return - // types. We *always* have to do this, because GetTypeForDeclarator - // will put in a result type of "int" when none was specified. - // FIXME: Exceptions! + // types. + const FunctionProtoType *Proto = R->getAs<FunctionProtoType>(); + if (!Proto) + return QualType(); + return Context.getFunctionType(Context.VoidTy, 0, 0, false, 0, - false, false, 0, 0, FunctionType::ExtInfo()); + Proto->hasExceptionSpec(), + Proto->hasAnyExceptionSpec(), + Proto->getNumExceptions(), + Proto->exception_begin(), + Proto->getExtInfo()); } /// CheckConversionDeclarator - Called by ActOnDeclarator to check the @@ -3434,6 +3313,21 @@ void Sema::ActOnFinishNamespaceDef(DeclPtrTy D, SourceLocation RBrace) { PopDeclContext(); } +/// \brief Retrieve the special "std" namespace, which may require us to +/// implicitly define the namespace. +NamespaceDecl *Sema::getStdNamespace() { + if (!StdNamespace) { + // The "std" namespace has not yet been defined, so build one implicitly. + StdNamespace = NamespaceDecl::Create(Context, + Context.getTranslationUnitDecl(), + SourceLocation(), + &PP.getIdentifierTable().get("std")); + StdNamespace->setImplicit(true); + } + + return StdNamespace; +} + Sema::DeclPtrTy Sema::ActOnUsingDirective(Scope *S, SourceLocation UsingLoc, SourceLocation NamespcLoc, @@ -3447,13 +3341,49 @@ Sema::DeclPtrTy Sema::ActOnUsingDirective(Scope *S, assert(S->getFlags() & Scope::DeclScope && "Invalid Scope."); UsingDirectiveDecl *UDir = 0; - + NestedNameSpecifier *Qualifier = 0; + if (SS.isSet()) + Qualifier = static_cast<NestedNameSpecifier *>(SS.getScopeRep()); + // Lookup namespace name. LookupResult R(*this, NamespcName, IdentLoc, LookupNamespaceName); LookupParsedName(R, S, &SS); if (R.isAmbiguous()) return DeclPtrTy(); + if (R.empty()) { + // Allow "using namespace std;" or "using namespace ::std;" even if + // "std" hasn't been defined yet, for GCC compatibility. + if ((!Qualifier || Qualifier->getKind() == NestedNameSpecifier::Global) && + NamespcName->isStr("std")) { + Diag(IdentLoc, diag::ext_using_undefined_std); + R.addDecl(getStdNamespace()); + R.resolveKind(); + } + // Otherwise, attempt typo correction. + else if (DeclarationName Corrected = CorrectTypo(R, S, &SS, 0, false, + CTC_NoKeywords, 0)) { + if (R.getAsSingle<NamespaceDecl>() || + R.getAsSingle<NamespaceAliasDecl>()) { + if (DeclContext *DC = computeDeclContext(SS, false)) + Diag(IdentLoc, diag::err_using_directive_member_suggest) + << NamespcName << DC << Corrected << SS.getRange() + << FixItHint::CreateReplacement(IdentLoc, Corrected.getAsString()); + else + Diag(IdentLoc, diag::err_using_directive_suggest) + << NamespcName << Corrected + << FixItHint::CreateReplacement(IdentLoc, Corrected.getAsString()); + Diag(R.getFoundDecl()->getLocation(), diag::note_namespace_defined_here) + << Corrected; + + NamespcName = Corrected.getAsIdentifierInfo(); + } else { + R.clear(); + R.setLookupName(NamespcName); + } + } + } + if (!R.empty()) { NamedDecl *Named = R.getFoundDecl(); assert((isa<NamespaceDecl>(Named) || isa<NamespaceAliasDecl>(Named)) @@ -3566,6 +3496,28 @@ Sema::DeclPtrTy Sema::ActOnUsingDeclaration(Scope *S, return DeclPtrTy::make(UD); } +/// \brief Determine whether a using declaration considers the given +/// declarations as "equivalent", e.g., if they are redeclarations of +/// the same entity or are both typedefs of the same type. +static bool +IsEquivalentForUsingDecl(ASTContext &Context, NamedDecl *D1, NamedDecl *D2, + bool &SuppressRedeclaration) { + if (D1->getCanonicalDecl() == D2->getCanonicalDecl()) { + SuppressRedeclaration = false; + return true; + } + + if (TypedefDecl *TD1 = dyn_cast<TypedefDecl>(D1)) + if (TypedefDecl *TD2 = dyn_cast<TypedefDecl>(D2)) { + SuppressRedeclaration = true; + return Context.hasSameType(TD1->getUnderlyingType(), + TD2->getUnderlyingType()); + } + + return false; +} + + /// Determines whether to create a using shadow decl for a particular /// decl, given the set of decls existing prior to this using lookup. bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig, @@ -3632,8 +3584,9 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig, for (LookupResult::iterator I = Previous.begin(), E = Previous.end(); I != E; ++I) { NamedDecl *D = (*I)->getUnderlyingDecl(); - if (D->getCanonicalDecl() == Target->getCanonicalDecl()) - return false; + bool Result; + if (IsEquivalentForUsingDecl(Context, D, Target, Result)) + return Result; (isa<TagDecl>(D) ? Tag : NonTag) = D; } @@ -3646,7 +3599,7 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig, FD = cast<FunctionDecl>(Target); NamedDecl *OldDecl = 0; - switch (CheckOverload(FD, Previous, OldDecl)) { + switch (CheckOverload(0, FD, Previous, OldDecl, /*IsForUsingDecl*/ true)) { case Ovl_Overload: return false; @@ -3656,11 +3609,6 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig, // We found a decl with the exact signature. case Ovl_Match: - if (isa<UsingShadowDecl>(OldDecl)) { - // Silently ignore the possible conflict. - return false; - } - // If we're in a record, we want to hide the target, so we // return true (without a diagnostic) to tell the caller not to // build a shadow decl. @@ -4162,8 +4110,33 @@ Sema::DeclPtrTy Sema::ActOnNamespaceAliasDef(Scope *S, return DeclPtrTy(); if (R.empty()) { - Diag(NamespaceLoc, diag::err_expected_namespace_name) << SS.getRange(); - return DeclPtrTy(); + if (DeclarationName Corrected = CorrectTypo(R, S, &SS, 0, false, + CTC_NoKeywords, 0)) { + if (R.getAsSingle<NamespaceDecl>() || + R.getAsSingle<NamespaceAliasDecl>()) { + if (DeclContext *DC = computeDeclContext(SS, false)) + Diag(IdentLoc, diag::err_using_directive_member_suggest) + << Ident << DC << Corrected << SS.getRange() + << FixItHint::CreateReplacement(IdentLoc, Corrected.getAsString()); + else + Diag(IdentLoc, diag::err_using_directive_suggest) + << Ident << Corrected + << FixItHint::CreateReplacement(IdentLoc, Corrected.getAsString()); + + Diag(R.getFoundDecl()->getLocation(), diag::note_namespace_defined_here) + << Corrected; + + Ident = Corrected.getAsIdentifierInfo(); + } else { + R.clear(); + R.setLookupName(Ident); + } + } + + if (R.empty()) { + Diag(NamespaceLoc, diag::err_expected_namespace_name) << SS.getRange(); + return DeclPtrTy(); + } } NamespaceAliasDecl *AliasDecl = @@ -4200,10 +4173,108 @@ namespace { }; } +CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor( + CXXRecordDecl *ClassDecl) { + // C++ [class.ctor]p5: + // A default constructor for a class X is a constructor of class X + // that can be called without an argument. If there is no + // user-declared constructor for class X, a default constructor is + // implicitly declared. An implicitly-declared default constructor + // is an inline public member of its class. + assert(!ClassDecl->hasUserDeclaredConstructor() && + "Should not build implicit default constructor!"); + + // C++ [except.spec]p14: + // An implicitly declared special member function (Clause 12) shall have an + // exception-specification. [...] + ImplicitExceptionSpecification ExceptSpec(Context); + + // Direct base-class destructors. + for (CXXRecordDecl::base_class_iterator B = ClassDecl->bases_begin(), + BEnd = ClassDecl->bases_end(); + B != BEnd; ++B) { + if (B->isVirtual()) // Handled below. + continue; + + if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) { + CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl()); + if (!BaseClassDecl->hasDeclaredDefaultConstructor()) + ExceptSpec.CalledDecl(DeclareImplicitDefaultConstructor(BaseClassDecl)); + else if (CXXConstructorDecl *Constructor + = BaseClassDecl->getDefaultConstructor()) + ExceptSpec.CalledDecl(Constructor); + } + } + + // Virtual base-class destructors. + for (CXXRecordDecl::base_class_iterator B = ClassDecl->vbases_begin(), + BEnd = ClassDecl->vbases_end(); + B != BEnd; ++B) { + if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) { + CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl()); + if (!BaseClassDecl->hasDeclaredDefaultConstructor()) + ExceptSpec.CalledDecl(DeclareImplicitDefaultConstructor(BaseClassDecl)); + else if (CXXConstructorDecl *Constructor + = BaseClassDecl->getDefaultConstructor()) + ExceptSpec.CalledDecl(Constructor); + } + } + + // Field destructors. + for (RecordDecl::field_iterator F = ClassDecl->field_begin(), + FEnd = ClassDecl->field_end(); + F != FEnd; ++F) { + if (const RecordType *RecordTy + = Context.getBaseElementType(F->getType())->getAs<RecordType>()) { + CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RecordTy->getDecl()); + if (!FieldClassDecl->hasDeclaredDefaultConstructor()) + ExceptSpec.CalledDecl( + DeclareImplicitDefaultConstructor(FieldClassDecl)); + else if (CXXConstructorDecl *Constructor + = FieldClassDecl->getDefaultConstructor()) + ExceptSpec.CalledDecl(Constructor); + } + } + + + // Create the actual constructor declaration. + CanQualType ClassType + = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl)); + DeclarationName Name + = Context.DeclarationNames.getCXXConstructorName(ClassType); + CXXConstructorDecl *DefaultCon + = CXXConstructorDecl::Create(Context, ClassDecl, + ClassDecl->getLocation(), Name, + Context.getFunctionType(Context.VoidTy, + 0, 0, false, 0, + ExceptSpec.hasExceptionSpecification(), + ExceptSpec.hasAnyExceptionSpecification(), + ExceptSpec.size(), + ExceptSpec.data(), + FunctionType::ExtInfo()), + /*TInfo=*/0, + /*isExplicit=*/false, + /*isInline=*/true, + /*isImplicitlyDeclared=*/true); + DefaultCon->setAccess(AS_public); + DefaultCon->setImplicit(); + DefaultCon->setTrivial(ClassDecl->hasTrivialConstructor()); + + // Note that we have declared this constructor. + ClassDecl->setDeclaredDefaultConstructor(true); + ++ASTContext::NumImplicitDefaultConstructorsDeclared; + + if (Scope *S = getScopeForContext(ClassDecl)) + PushOnScopeChains(DefaultCon, S, false); + ClassDecl->addDecl(DefaultCon); + + return DefaultCon; +} + void Sema::DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor) { assert((Constructor->isImplicit() && Constructor->isDefaultConstructor() && - !Constructor->isUsed()) && + !Constructor->isUsed(false)) && "DefineImplicitDefaultConstructor - call it for implicit default ctor"); CXXRecordDecl *ClassDecl = Constructor->getParent(); @@ -4222,9 +4293,90 @@ void Sema::DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, } } +CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) { + // C++ [class.dtor]p2: + // If a class has no user-declared destructor, a destructor is + // declared implicitly. An implicitly-declared destructor is an + // inline public member of its class. + + // C++ [except.spec]p14: + // An implicitly declared special member function (Clause 12) shall have + // an exception-specification. + ImplicitExceptionSpecification ExceptSpec(Context); + + // Direct base-class destructors. + for (CXXRecordDecl::base_class_iterator B = ClassDecl->bases_begin(), + BEnd = ClassDecl->bases_end(); + B != BEnd; ++B) { + if (B->isVirtual()) // Handled below. + continue; + + if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) + ExceptSpec.CalledDecl( + LookupDestructor(cast<CXXRecordDecl>(BaseType->getDecl()))); + } + + // Virtual base-class destructors. + for (CXXRecordDecl::base_class_iterator B = ClassDecl->vbases_begin(), + BEnd = ClassDecl->vbases_end(); + B != BEnd; ++B) { + if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) + ExceptSpec.CalledDecl( + LookupDestructor(cast<CXXRecordDecl>(BaseType->getDecl()))); + } + + // Field destructors. + for (RecordDecl::field_iterator F = ClassDecl->field_begin(), + FEnd = ClassDecl->field_end(); + F != FEnd; ++F) { + if (const RecordType *RecordTy + = Context.getBaseElementType(F->getType())->getAs<RecordType>()) + ExceptSpec.CalledDecl( + LookupDestructor(cast<CXXRecordDecl>(RecordTy->getDecl()))); + } + + // Create the actual destructor declaration. + QualType Ty = Context.getFunctionType(Context.VoidTy, + 0, 0, false, 0, + ExceptSpec.hasExceptionSpecification(), + ExceptSpec.hasAnyExceptionSpecification(), + ExceptSpec.size(), + ExceptSpec.data(), + FunctionType::ExtInfo()); + + CanQualType ClassType + = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl)); + DeclarationName Name + = Context.DeclarationNames.getCXXDestructorName(ClassType); + CXXDestructorDecl *Destructor + = CXXDestructorDecl::Create(Context, ClassDecl, + ClassDecl->getLocation(), Name, Ty, + /*isInline=*/true, + /*isImplicitlyDeclared=*/true); + Destructor->setAccess(AS_public); + Destructor->setImplicit(); + Destructor->setTrivial(ClassDecl->hasTrivialDestructor()); + + // Note that we have declared this destructor. + ClassDecl->setDeclaredDestructor(true); + ++ASTContext::NumImplicitDestructorsDeclared; + + // Introduce this destructor into its scope. + if (Scope *S = getScopeForContext(ClassDecl)) + PushOnScopeChains(Destructor, S, false); + ClassDecl->addDecl(Destructor); + + // This could be uniqued if it ever proves significant. + Destructor->setTypeSourceInfo(Context.getTrivialTypeSourceInfo(Ty)); + + AddOverriddenMethods(ClassDecl, Destructor); + + return Destructor; +} + void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor) { - assert((Destructor->isImplicit() && !Destructor->isUsed()) && + assert((Destructor->isImplicit() && !Destructor->isUsed(false)) && "DefineImplicitDestructor - call it for implicit default dtor"); CXXRecordDecl *ClassDecl = Destructor->getParent(); assert(ClassDecl && "DefineImplicitDestructor - invalid destructor"); @@ -4448,12 +4600,197 @@ BuildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T, Loc, move(Copy)); } +/// \brief Determine whether the given class has a copy assignment operator +/// that accepts a const-qualified argument. +static bool hasConstCopyAssignment(Sema &S, const CXXRecordDecl *CClass) { + CXXRecordDecl *Class = const_cast<CXXRecordDecl *>(CClass); + + if (!Class->hasDeclaredCopyAssignment()) + S.DeclareImplicitCopyAssignment(Class); + + QualType ClassType = S.Context.getCanonicalType(S.Context.getTypeDeclType(Class)); + DeclarationName OpName + = S.Context.DeclarationNames.getCXXOperatorName(OO_Equal); + + DeclContext::lookup_const_iterator Op, OpEnd; + for (llvm::tie(Op, OpEnd) = Class->lookup(OpName); Op != OpEnd; ++Op) { + // C++ [class.copy]p9: + // A user-declared copy assignment operator is a non-static non-template + // member function of class X with exactly one parameter of type X, X&, + // const X&, volatile X& or const volatile X&. + const CXXMethodDecl* Method = dyn_cast<CXXMethodDecl>(*Op); + if (!Method) + continue; + + if (Method->isStatic()) + continue; + if (Method->getPrimaryTemplate()) + continue; + const FunctionProtoType *FnType = + Method->getType()->getAs<FunctionProtoType>(); + assert(FnType && "Overloaded operator has no prototype."); + // Don't assert on this; an invalid decl might have been left in the AST. + if (FnType->getNumArgs() != 1 || FnType->isVariadic()) + continue; + bool AcceptsConst = true; + QualType ArgType = FnType->getArgType(0); + if (const LValueReferenceType *Ref = ArgType->getAs<LValueReferenceType>()){ + ArgType = Ref->getPointeeType(); + // Is it a non-const lvalue reference? + if (!ArgType.isConstQualified()) + AcceptsConst = false; + } + if (!S.Context.hasSameUnqualifiedType(ArgType, ClassType)) + continue; + + // We have a single argument of type cv X or cv X&, i.e. we've found the + // copy assignment operator. Return whether it accepts const arguments. + return AcceptsConst; + } + assert(Class->isInvalidDecl() && + "No copy assignment operator declared in valid code."); + return false; +} + +CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) { + // Note: The following rules are largely analoguous to the copy + // constructor rules. Note that virtual bases are not taken into account + // for determining the argument type of the operator. Note also that + // operators taking an object instead of a reference are allowed. + + + // C++ [class.copy]p10: + // If the class definition does not explicitly declare a copy + // assignment operator, one is declared implicitly. + // The implicitly-defined copy assignment operator for a class X + // will have the form + // + // X& X::operator=(const X&) + // + // if + bool HasConstCopyAssignment = true; + + // -- each direct base class B of X has a copy assignment operator + // whose parameter is of type const B&, const volatile B& or B, + // and + for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(), + BaseEnd = ClassDecl->bases_end(); + HasConstCopyAssignment && Base != BaseEnd; ++Base) { + assert(!Base->getType()->isDependentType() && + "Cannot generate implicit members for class with dependent bases."); + const CXXRecordDecl *BaseClassDecl + = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); + HasConstCopyAssignment = hasConstCopyAssignment(*this, BaseClassDecl); + } + + // -- for all the nonstatic data members of X that are of a class + // type M (or array thereof), each such class type has a copy + // assignment operator whose parameter is of type const M&, + // const volatile M& or M. + for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(), + FieldEnd = ClassDecl->field_end(); + HasConstCopyAssignment && Field != FieldEnd; + ++Field) { + QualType FieldType = Context.getBaseElementType((*Field)->getType()); + if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) { + const CXXRecordDecl *FieldClassDecl + = cast<CXXRecordDecl>(FieldClassType->getDecl()); + HasConstCopyAssignment = hasConstCopyAssignment(*this, FieldClassDecl); + } + } + + // Otherwise, the implicitly declared copy assignment operator will + // have the form + // + // X& X::operator=(X&) + QualType ArgType = Context.getTypeDeclType(ClassDecl); + QualType RetType = Context.getLValueReferenceType(ArgType); + if (HasConstCopyAssignment) + ArgType = ArgType.withConst(); + ArgType = Context.getLValueReferenceType(ArgType); + + // C++ [except.spec]p14: + // An implicitly declared special member function (Clause 12) shall have an + // exception-specification. [...] + ImplicitExceptionSpecification ExceptSpec(Context); + for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(), + BaseEnd = ClassDecl->bases_end(); + Base != BaseEnd; ++Base) { + CXXRecordDecl *BaseClassDecl + = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); + + if (!BaseClassDecl->hasDeclaredCopyAssignment()) + DeclareImplicitCopyAssignment(BaseClassDecl); + + if (CXXMethodDecl *CopyAssign + = BaseClassDecl->getCopyAssignmentOperator(HasConstCopyAssignment)) + ExceptSpec.CalledDecl(CopyAssign); + } + for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(), + FieldEnd = ClassDecl->field_end(); + Field != FieldEnd; + ++Field) { + QualType FieldType = Context.getBaseElementType((*Field)->getType()); + if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) { + CXXRecordDecl *FieldClassDecl + = cast<CXXRecordDecl>(FieldClassType->getDecl()); + + if (!FieldClassDecl->hasDeclaredCopyAssignment()) + DeclareImplicitCopyAssignment(FieldClassDecl); + + if (CXXMethodDecl *CopyAssign + = FieldClassDecl->getCopyAssignmentOperator(HasConstCopyAssignment)) + ExceptSpec.CalledDecl(CopyAssign); + } + } + + // An implicitly-declared copy assignment operator is an inline public + // member of its class. + DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal); + CXXMethodDecl *CopyAssignment + = CXXMethodDecl::Create(Context, ClassDecl, ClassDecl->getLocation(), Name, + Context.getFunctionType(RetType, &ArgType, 1, + false, 0, + ExceptSpec.hasExceptionSpecification(), + ExceptSpec.hasAnyExceptionSpecification(), + ExceptSpec.size(), + ExceptSpec.data(), + FunctionType::ExtInfo()), + /*TInfo=*/0, /*isStatic=*/false, + /*StorageClassAsWritten=*/FunctionDecl::None, + /*isInline=*/true); + CopyAssignment->setAccess(AS_public); + CopyAssignment->setImplicit(); + CopyAssignment->setTrivial(ClassDecl->hasTrivialCopyAssignment()); + CopyAssignment->setCopyAssignment(true); + + // Add the parameter to the operator. + ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyAssignment, + ClassDecl->getLocation(), + /*Id=*/0, + ArgType, /*TInfo=*/0, + VarDecl::None, + VarDecl::None, 0); + CopyAssignment->setParams(&FromParam, 1); + + // Note that we have added this copy-assignment operator. + ClassDecl->setDeclaredCopyAssignment(true); + ++ASTContext::NumImplicitCopyAssignmentOperatorsDeclared; + + if (Scope *S = getScopeForContext(ClassDecl)) + PushOnScopeChains(CopyAssignment, S, false); + ClassDecl->addDecl(CopyAssignment); + + AddOverriddenMethods(ClassDecl, CopyAssignment); + return CopyAssignment; +} + void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *CopyAssignOperator) { assert((CopyAssignOperator->isImplicit() && CopyAssignOperator->isOverloadedOperator() && CopyAssignOperator->getOverloadedOperator() == OO_Equal && - !CopyAssignOperator->isUsed()) && + !CopyAssignOperator->isUsed(false)) && "DefineImplicitCopyAssignment called for wrong function"); CXXRecordDecl *ClassDecl = CopyAssignOperator->getParent(); @@ -4554,6 +4891,8 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation, // \brief Reference to the __builtin_memcpy function. Expr *BuiltinMemCpyRef = 0; + // \brief Reference to the __builtin_objc_memmove_collectable function. + Expr *CollectableMemCpyRef = 0; // Assign non-static members. for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(), @@ -4630,9 +4969,35 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation, // Take the address of the field references for "from" and "to". From = CreateBuiltinUnaryOp(Loc, UnaryOperator::AddrOf, move(From)); To = CreateBuiltinUnaryOp(Loc, UnaryOperator::AddrOf, move(To)); - + + bool NeedsCollectableMemCpy = + (BaseType->isRecordType() && + BaseType->getAs<RecordType>()->getDecl()->hasObjectMember()); + + if (NeedsCollectableMemCpy) { + if (!CollectableMemCpyRef) { + // Create a reference to the __builtin_objc_memmove_collectable function. + LookupResult R(*this, + &Context.Idents.get("__builtin_objc_memmove_collectable"), + Loc, LookupOrdinaryName); + LookupName(R, TUScope, true); + + FunctionDecl *CollectableMemCpy = R.getAsSingle<FunctionDecl>(); + if (!CollectableMemCpy) { + // Something went horribly wrong earlier, and we will have + // complained about it. + Invalid = true; + continue; + } + + CollectableMemCpyRef = BuildDeclRefExpr(CollectableMemCpy, + CollectableMemCpy->getType(), + Loc, 0).takeAs<Expr>(); + assert(CollectableMemCpyRef && "Builtin reference cannot fail"); + } + } // Create a reference to the __builtin_memcpy builtin function. - if (!BuiltinMemCpyRef) { + else if (!BuiltinMemCpyRef) { LookupResult R(*this, &Context.Idents.get("__builtin_memcpy"), Loc, LookupOrdinaryName); LookupName(R, TUScope, true); @@ -4658,10 +5023,18 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation, llvm::SmallVector<SourceLocation, 4> Commas; // FIXME: Silly Commas.push_back(Loc); Commas.push_back(Loc); - OwningExprResult Call = ActOnCallExpr(/*Scope=*/0, - Owned(BuiltinMemCpyRef->Retain()), - Loc, move_arg(CallArgs), - Commas.data(), Loc); + OwningExprResult Call = ExprError(); + if (NeedsCollectableMemCpy) + Call = ActOnCallExpr(/*Scope=*/0, + Owned(CollectableMemCpyRef->Retain()), + Loc, move_arg(CallArgs), + Commas.data(), Loc); + else + Call = ActOnCallExpr(/*Scope=*/0, + Owned(BuiltinMemCpyRef->Retain()), + Loc, move_arg(CallArgs), + Commas.data(), Loc); + assert(!Call.isInvalid() && "Call to __builtin_memcpy cannot fail!"); Statements.push_back(Call.takeAs<Expr>()); continue; @@ -4712,12 +5085,185 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CopyAssignOperator->setBody(Body.takeAs<Stmt>()); } +CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor( + CXXRecordDecl *ClassDecl) { + // C++ [class.copy]p4: + // If the class definition does not explicitly declare a copy + // constructor, one is declared implicitly. + + // C++ [class.copy]p5: + // The implicitly-declared copy constructor for a class X will + // have the form + // + // X::X(const X&) + // + // if + bool HasConstCopyConstructor = true; + + // -- each direct or virtual base class B of X has a copy + // constructor whose first parameter is of type const B& or + // const volatile B&, and + for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(), + BaseEnd = ClassDecl->bases_end(); + HasConstCopyConstructor && Base != BaseEnd; + ++Base) { + // Virtual bases are handled below. + if (Base->isVirtual()) + continue; + + CXXRecordDecl *BaseClassDecl + = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); + if (!BaseClassDecl->hasDeclaredCopyConstructor()) + DeclareImplicitCopyConstructor(BaseClassDecl); + + HasConstCopyConstructor + = BaseClassDecl->hasConstCopyConstructor(Context); + } + + for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(), + BaseEnd = ClassDecl->vbases_end(); + HasConstCopyConstructor && Base != BaseEnd; + ++Base) { + CXXRecordDecl *BaseClassDecl + = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); + if (!BaseClassDecl->hasDeclaredCopyConstructor()) + DeclareImplicitCopyConstructor(BaseClassDecl); + + HasConstCopyConstructor + = BaseClassDecl->hasConstCopyConstructor(Context); + } + + // -- for all the nonstatic data members of X that are of a + // class type M (or array thereof), each such class type + // has a copy constructor whose first parameter is of type + // const M& or const volatile M&. + for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(), + FieldEnd = ClassDecl->field_end(); + HasConstCopyConstructor && Field != FieldEnd; + ++Field) { + QualType FieldType = Context.getBaseElementType((*Field)->getType()); + if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) { + CXXRecordDecl *FieldClassDecl + = cast<CXXRecordDecl>(FieldClassType->getDecl()); + if (!FieldClassDecl->hasDeclaredCopyConstructor()) + DeclareImplicitCopyConstructor(FieldClassDecl); + + HasConstCopyConstructor + = FieldClassDecl->hasConstCopyConstructor(Context); + } + } + + // Otherwise, the implicitly declared copy constructor will have + // the form + // + // X::X(X&) + QualType ClassType = Context.getTypeDeclType(ClassDecl); + QualType ArgType = ClassType; + if (HasConstCopyConstructor) + ArgType = ArgType.withConst(); + ArgType = Context.getLValueReferenceType(ArgType); + + // C++ [except.spec]p14: + // An implicitly declared special member function (Clause 12) shall have an + // exception-specification. [...] + ImplicitExceptionSpecification ExceptSpec(Context); + unsigned Quals = HasConstCopyConstructor? Qualifiers::Const : 0; + for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(), + BaseEnd = ClassDecl->bases_end(); + Base != BaseEnd; + ++Base) { + // Virtual bases are handled below. + if (Base->isVirtual()) + continue; + + CXXRecordDecl *BaseClassDecl + = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); + if (!BaseClassDecl->hasDeclaredCopyConstructor()) + DeclareImplicitCopyConstructor(BaseClassDecl); + + if (CXXConstructorDecl *CopyConstructor + = BaseClassDecl->getCopyConstructor(Context, Quals)) + ExceptSpec.CalledDecl(CopyConstructor); + } + for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(), + BaseEnd = ClassDecl->vbases_end(); + Base != BaseEnd; + ++Base) { + CXXRecordDecl *BaseClassDecl + = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); + if (!BaseClassDecl->hasDeclaredCopyConstructor()) + DeclareImplicitCopyConstructor(BaseClassDecl); + + if (CXXConstructorDecl *CopyConstructor + = BaseClassDecl->getCopyConstructor(Context, Quals)) + ExceptSpec.CalledDecl(CopyConstructor); + } + for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(), + FieldEnd = ClassDecl->field_end(); + Field != FieldEnd; + ++Field) { + QualType FieldType = Context.getBaseElementType((*Field)->getType()); + if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) { + CXXRecordDecl *FieldClassDecl + = cast<CXXRecordDecl>(FieldClassType->getDecl()); + if (!FieldClassDecl->hasDeclaredCopyConstructor()) + DeclareImplicitCopyConstructor(FieldClassDecl); + + if (CXXConstructorDecl *CopyConstructor + = FieldClassDecl->getCopyConstructor(Context, Quals)) + ExceptSpec.CalledDecl(CopyConstructor); + } + } + + // An implicitly-declared copy constructor is an inline public + // member of its class. + DeclarationName Name + = Context.DeclarationNames.getCXXConstructorName( + Context.getCanonicalType(ClassType)); + CXXConstructorDecl *CopyConstructor + = CXXConstructorDecl::Create(Context, ClassDecl, + ClassDecl->getLocation(), Name, + Context.getFunctionType(Context.VoidTy, + &ArgType, 1, + false, 0, + ExceptSpec.hasExceptionSpecification(), + ExceptSpec.hasAnyExceptionSpecification(), + ExceptSpec.size(), + ExceptSpec.data(), + FunctionType::ExtInfo()), + /*TInfo=*/0, + /*isExplicit=*/false, + /*isInline=*/true, + /*isImplicitlyDeclared=*/true); + CopyConstructor->setAccess(AS_public); + CopyConstructor->setImplicit(); + CopyConstructor->setTrivial(ClassDecl->hasTrivialCopyConstructor()); + + // Note that we have declared this constructor. + ClassDecl->setDeclaredCopyConstructor(true); + ++ASTContext::NumImplicitCopyConstructorsDeclared; + + // Add the parameter to the constructor. + ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyConstructor, + ClassDecl->getLocation(), + /*IdentifierInfo=*/0, + ArgType, /*TInfo=*/0, + VarDecl::None, + VarDecl::None, 0); + CopyConstructor->setParams(&FromParam, 1); + if (Scope *S = getScopeForContext(ClassDecl)) + PushOnScopeChains(CopyConstructor, S, false); + ClassDecl->addDecl(CopyConstructor); + + return CopyConstructor; +} + void Sema::DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *CopyConstructor, unsigned TypeQuals) { assert((CopyConstructor->isImplicit() && CopyConstructor->isCopyConstructor(TypeQuals) && - !CopyConstructor->isUsed()) && + !CopyConstructor->isUsed(false)) && "DefineImplicitCopyConstructor - call it for implicit copy ctor"); CXXRecordDecl *ClassDecl = CopyConstructor->getParent(); @@ -4810,7 +5356,7 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) { CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Record->getDecl()); if (!ClassDecl->isInvalidDecl() && !VD->isInvalidDecl() && !ClassDecl->hasTrivialDestructor() && !ClassDecl->isDependentContext()) { - CXXDestructorDecl *Destructor = ClassDecl->getDestructor(Context); + CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); MarkDeclarationReferenced(VD->getLocation(), Destructor); CheckDestructorAccess(VD->getLocation(), Destructor, PDiag(diag::err_access_dtor_var) @@ -5477,8 +6023,8 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S, QualType ExDeclType, /// ActOnExceptionDeclarator - Parsed the exception-declarator in a C++ catch /// handler. Sema::DeclPtrTy Sema::ActOnExceptionDeclarator(Scope *S, Declarator &D) { - TypeSourceInfo *TInfo = 0; - QualType ExDeclType = GetTypeForDeclarator(D, S, &TInfo); + TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); + QualType ExDeclType = TInfo->getType(); bool Invalid = D.isInvalidType(); IdentifierInfo *II = D.getIdentifier(); @@ -5632,14 +6178,11 @@ Sema::DeclPtrTy Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, // friend templates because ActOnTag never produces a ClassTemplateDecl // for a TUK_Friend. Declarator TheDeclarator(DS, Declarator::MemberContext); - TypeSourceInfo *TSI; - QualType T = GetTypeForDeclarator(TheDeclarator, S, &TSI); + TypeSourceInfo *TSI = GetTypeForDeclarator(TheDeclarator, S); + QualType T = TSI->getType(); if (TheDeclarator.isInvalidType()) return DeclPtrTy(); - if (!TSI) - TSI = Context.getTrivialTypeSourceInfo(T, DS.getSourceRange().getBegin()); - // This is definitely an error in C++98. It's probably meant to // be forbidden in C++0x, too, but the specification is just // poorly written. @@ -5701,8 +6244,8 @@ Sema::ActOnFriendFunctionDecl(Scope *S, assert(DS.getStorageClassSpec() == DeclSpec::SCS_unspecified); SourceLocation Loc = D.getIdentifierLoc(); - TypeSourceInfo *TInfo = 0; - QualType T = GetTypeForDeclarator(D, S, &TInfo); + TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); + QualType T = TInfo->getType(); // C++ [class.friend]p1 // A friend of a class is a function or class.... @@ -5759,13 +6302,18 @@ Sema::ActOnFriendFunctionDecl(Scope *S, LookupQualifiedName(Previous, DC); - // If searching in that context implicitly found a declaration in - // a different context, treat it like it wasn't found at all. + // Ignore things found implicitly in the wrong scope. // TODO: better diagnostics for this case. Suggesting the right // qualified scope would be nice... - // FIXME: getRepresentativeDecl() is not right here at all - if (Previous.empty() || - !Previous.getRepresentativeDecl()->getDeclContext()->Equals(DC)) { + LookupResult::Filter F = Previous.makeFilter(); + while (F.hasNext()) { + NamedDecl *D = F.next(); + if (!D->getDeclContext()->getLookupContext()->Equals(DC)) + F.erase(); + } + F.done(); + + if (Previous.empty()) { D.setInvalidType(); Diag(Loc, diag::err_qualified_friend_not_found) << Name << T; return DeclPtrTy(); @@ -6061,9 +6609,9 @@ Sema::ActOnCXXConditionDeclaration(Scope *S, Declarator &D) { assert(D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef && "Parser allowed 'typedef' as storage class of condition decl."); - TypeSourceInfo *TInfo = 0; TagDecl *OwnedTag = 0; - QualType Ty = GetTypeForDeclarator(D, S, &TInfo, &OwnedTag); + TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S, &OwnedTag); + QualType Ty = TInfo->getType(); if (Ty->isFunctionType()) { // The declarator shall not specify a function... // We exit without creating a CXXConditionDeclExpr because a FunctionDecl @@ -6127,7 +6675,7 @@ bool Sema::DefineUsedVTables() { if (const CXXMethodDecl *KeyFunction = Context.getKeyFunction(DynamicClasses[I])) { const FunctionDecl *Definition = 0; - if (KeyFunction->getBody(Definition)) + if (KeyFunction->hasBody(Definition)) MarkVTableUsed(Definition->getLocation(), DynamicClasses[I], true); } } @@ -6150,7 +6698,7 @@ bool Sema::DefineUsedVTables() { // defined in another translation unit, we don't need to emit the // vtable even though we're using it. const CXXMethodDecl *KeyFunction = Context.getKeyFunction(Class); - if (KeyFunction && !KeyFunction->getBody()) { + if (KeyFunction && !KeyFunction->hasBody()) { switch (KeyFunction->getTemplateSpecializationKind()) { case TSK_Undeclared: case TSK_ExplicitSpecialization: @@ -6198,7 +6746,7 @@ bool Sema::DefineUsedVTables() { // Optionally warn if we're emitting a weak vtable. if (Class->getLinkage() == ExternalLinkage && Class->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) { - if (!KeyFunction || (KeyFunction->getBody() && KeyFunction->isInlined())) + if (!KeyFunction || (KeyFunction->hasBody() && KeyFunction->isInlined())) Diag(Class->getLocation(), diag::warn_weak_vtable) << Class; } } @@ -6279,8 +6827,7 @@ void Sema::SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation) { = Context.getBaseElementType(Field->getType()) ->getAs<RecordType>()) { CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); - if (CXXDestructorDecl *Destructor - = const_cast<CXXDestructorDecl*>(RD->getDestructor(Context))) { + if (CXXDestructorDecl *Destructor = LookupDestructor(RD)) { MarkDeclarationReferenced(Field->getLocation(), Destructor); CheckDestructorAccess(Field->getLocation(), Destructor, PDiag(diag::err_access_dtor_ivar) diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp index 3b05f5a..21aeb59 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp @@ -414,7 +414,7 @@ ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc) { - ObjCCategoryDecl *CDecl = 0; + ObjCCategoryDecl *CDecl; ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true); /// Check that class of this category is already completely declared. @@ -429,28 +429,21 @@ ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, return DeclPtrTy::make(CDecl); } - if (!CategoryName) { - // Class extensions require a special treatment. Use an existing one. - // Note that 'getClassExtension()' can return NULL. - CDecl = IDecl->getClassExtension(); - if (IDecl->getImplementation()) { - Diag(ClassLoc, diag::err_class_extension_after_impl) << ClassName; - Diag(IDecl->getImplementation()->getLocation(), - diag::note_implementation_declared); - } + if (!CategoryName && IDecl->getImplementation()) { + Diag(ClassLoc, diag::err_class_extension_after_impl) << ClassName; + Diag(IDecl->getImplementation()->getLocation(), + diag::note_implementation_declared); } - if (!CDecl) { - CDecl = ObjCCategoryDecl::Create(Context, CurContext, AtInterfaceLoc, - ClassLoc, CategoryLoc, CategoryName); - // FIXME: PushOnScopeChains? - CurContext->addDecl(CDecl); + CDecl = ObjCCategoryDecl::Create(Context, CurContext, AtInterfaceLoc, + ClassLoc, CategoryLoc, CategoryName); + // FIXME: PushOnScopeChains? + CurContext->addDecl(CDecl); - CDecl->setClassInterface(IDecl); - // Insert first use of class extension to the list of class's categories. - if (!CategoryName) - CDecl->insertNextClassCategory(); - } + CDecl->setClassInterface(IDecl); + // Insert class extension to the list of class's categories. + if (!CategoryName) + CDecl->insertNextClassCategory(); // If the interface is deprecated, warn about it. (void)DiagnoseUseOfDecl(IDecl, ClassLoc); @@ -969,13 +962,11 @@ void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, CheckProtocolMethodDefs(IMPDecl->getLocation(), *PI, IncompleteImpl, InsMap, ClsMap, I); // Check class extensions (unnamed categories) - for (ObjCCategoryDecl *Categories = I->getCategoryList(); - Categories; Categories = Categories->getNextClassCategory()) { - if (Categories->IsClassExtension()) { - ImplMethodsVsClassMethods(S, IMPDecl, Categories, IncompleteImpl); - break; - } - } + for (const ObjCCategoryDecl *Categories = I->getFirstClassExtension(); + Categories; Categories = Categories->getNextClassExtension()) + ImplMethodsVsClassMethods(S, IMPDecl, + const_cast<ObjCCategoryDecl*>(Categories), + IncompleteImpl); } else if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl)) { // For extended class, unimplemented methods in its protocols will // be reported in the primary class. @@ -1775,9 +1766,9 @@ Sema::DeclPtrTy Sema::ActOnObjCExceptionDecl(Scope *S, Declarator &D) { if (getLangOptions().CPlusPlus) CheckExtraCXXDefaultArguments(D); - TypeSourceInfo *TInfo = 0; TagDecl *OwnedDecl = 0; - QualType ExceptionType = GetTypeForDeclarator(D, S, &TInfo, &OwnedDecl); + TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S, &OwnedDecl); + QualType ExceptionType = TInfo->getType(); if (getLangOptions().CPlusPlus && OwnedDecl && OwnedDecl->isDefinition()) { // Objective-C++: Types shall not be defined in exception types. @@ -1821,7 +1812,8 @@ void Sema::CollectIvarsToConstructOrDestruct(const ObjCInterfaceDecl *OI, } // Find ivars to construct/destruct in class extension. - if (const ObjCCategoryDecl *CDecl = OI->getClassExtension()) { + for (const ObjCCategoryDecl *CDecl = OI->getFirstClassExtension(); CDecl; + CDecl = CDecl->getNextClassExtension()) { for (ObjCCategoryDecl::ivar_iterator I = CDecl->ivar_begin(), E = CDecl->ivar_end(); I != E; ++I) { ObjCIvarDecl *Iv = (*I); diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp index 7d73fe4..34a479a 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp @@ -249,6 +249,10 @@ bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID, SourceLocation NewLoc, bool *MissingExceptionSpecification, bool *MissingEmptyExceptionSpecification) { + // Just completely ignore this under -fno-exceptions. + if (!getLangOptions().Exceptions) + return false; + if (MissingExceptionSpecification) *MissingExceptionSpecification = false; @@ -318,6 +322,11 @@ bool Sema::CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc) { + + // Just auto-succeed under -fno-exceptions. + if (!getLangOptions().Exceptions) + return false; + // FIXME: As usual, we could be more specific in our error messages, but // that better waits until we've got types with source locations. diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp index f745352..5f46a97 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp @@ -164,7 +164,7 @@ void Sema::DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, if (!sentinelExpr) return; if (sentinelExpr->isTypeDependent()) return; if (sentinelExpr->isValueDependent()) return; - if (sentinelExpr->getType()->isPointerType() && + if (sentinelExpr->getType()->isAnyPointerType() && sentinelExpr->IgnoreParenCasts()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) return; @@ -388,7 +388,7 @@ Sema::ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks) { if (Literal.Pascal) StrTy = Context.UnsignedCharTy; // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). - if (getLangOptions().CPlusPlus || getLangOptions().ConstStrings ) + if (getLangOptions().CPlusPlus || getLangOptions().ConstStrings) StrTy.addConst(); // Get an array type for the string, according to C99 6.4.5. This includes @@ -475,6 +475,7 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, SourceLocation Loc, if (isa<NonTypeTemplateParmDecl>(VD)) { // Non-type template parameters can be referenced anywhere they are // visible. + Ty = Ty.getNonLValueExprType(Context); } else if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(CurContext)) { if (const FunctionDecl *FD = MD->getParent()->isLocalClass()) { if (VD->hasLocalStorage() && VD->getDeclContext() != CurContext) { @@ -677,26 +678,6 @@ static void DecomposeUnqualifiedId(Sema &SemaRef, } } -/// Decompose the given template name into a list of lookup results. -/// -/// The unqualified ID must name a non-dependent template, which can -/// be more easily tested by checking whether DecomposeUnqualifiedId -/// found template arguments. -static void DecomposeTemplateName(LookupResult &R, const UnqualifiedId &Id) { - assert(Id.getKind() == UnqualifiedId::IK_TemplateId); - TemplateName TName = - Sema::TemplateTy::make(Id.TemplateId->Template).getAsVal<TemplateName>(); - - if (TemplateDecl *TD = TName.getAsTemplateDecl()) - R.addDecl(TD); - else if (OverloadedTemplateStorage *OT = TName.getAsOverloadedTemplate()) - for (OverloadedTemplateStorage::iterator I = OT->begin(), E = OT->end(); - I != E; ++I) - R.addDecl(*I); - - R.resolveKind(); -} - /// Determines whether the given record is "fully-formed" at the given /// location, i.e. whether a qualified lookup into it is assured of /// getting consistent results already. @@ -889,8 +870,8 @@ static void DiagnoseInstanceReference(Sema &SemaRef, /// Diagnose an empty lookup. /// /// \return false if new lookup candidates were found -bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, - LookupResult &R, CorrectTypoContext CTC) { +bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, + CorrectTypoContext CTC) { DeclarationName Name = R.getLookupName(); unsigned diagnostic = diag::err_undeclared_var_use; @@ -906,7 +887,7 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, // unqualified lookup. This is useful when (for example) the // original lookup would not have found something because it was a // dependent name. - for (DeclContext *DC = SS.isEmpty()? CurContext : 0; + for (DeclContext *DC = SS.isEmpty() ? CurContext : 0; DC; DC = DC->getParent()) { if (isa<CXXRecordDecl>(DC)) { LookupQualifiedName(R, DC); @@ -923,11 +904,29 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, // Give a code modification hint to insert 'this->'. // TODO: fixit for inserting 'Base<T>::' in the other cases. // Actually quite difficult! - if (isInstance) + if (isInstance) { Diag(R.getNameLoc(), diagnostic) << Name << FixItHint::CreateInsertion(R.getNameLoc(), "this->"); - else + + UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>( + CallsUndergoingInstantiation.back()->getCallee()); + CXXMethodDecl *DepMethod = cast<CXXMethodDecl>( + CurMethod->getInstantiatedFromMemberFunction()); + QualType DepThisType = DepMethod->getThisType(Context); + CXXThisExpr *DepThis = new (Context) CXXThisExpr(R.getNameLoc(), + DepThisType, false); + TemplateArgumentListInfo TList; + if (ULE->hasExplicitTemplateArgs()) + ULE->copyTemplateArgumentsInto(TList); + CXXDependentScopeMemberExpr *DepExpr = + CXXDependentScopeMemberExpr::Create( + Context, DepThis, DepThisType, true, SourceLocation(), + ULE->getQualifier(), ULE->getQualifierRange(), NULL, Name, + R.getNameLoc(), &TList); + CallsUndergoingInstantiation.back()->setCallee(DepExpr); + } else { Diag(R.getNameLoc(), diagnostic) << Name; + } // Do we really want to note all of these? for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) @@ -941,7 +940,7 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, // We didn't find anything, so try to correct for a typo. DeclarationName Corrected; - if (S && (Corrected = CorrectTypo(R, S, &SS, false, CTC))) { + if (S && (Corrected = CorrectTypo(R, S, &SS, 0, false, CTC))) { if (!R.empty()) { if (isa<ValueDecl>(*R.begin()) || isa<FunctionTemplateDecl>(*R.begin())) { if (SS.isEmpty()) @@ -1746,8 +1745,29 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS, // Variable will be bound by-copy, make it const within the closure. ExprTy.addConst(); - return Owned(new (Context) BlockDeclRefExpr(VD, ExprTy, Loc, false, - constAdded)); + QualType T = VD->getType(); + BlockDeclRefExpr *BDRE = new (Context) BlockDeclRefExpr(VD, + ExprTy, Loc, false, + constAdded); + if (getLangOptions().CPlusPlus) { + if (!T->isDependentType() && !T->isReferenceType()) { + Expr *E = new (Context) + DeclRefExpr(const_cast<ValueDecl*>(BDRE->getDecl()), T, + SourceLocation()); + + OwningExprResult Res = PerformCopyInitialization( + InitializedEntity::InitializeBlock(VD->getLocation(), + T, false), + SourceLocation(), + Owned(E)); + if (!Res.isInvalid()) { + Res = MaybeCreateCXXExprWithTemporaries(move(Res)); + Expr *Init = Res.takeAs<Expr>(); + BDRE->setCopyConstructorExpr(Init); + } + } + } + return Owned(BDRE); } // If this reference is not in a block or if the referenced variable is // within the block, create a normal DeclRefExpr. @@ -2560,13 +2580,23 @@ bool Sema::CheckQualifiedMemberReference(Expr *BaseExpr, static bool LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R, SourceRange BaseRange, const RecordType *RTy, - SourceLocation OpLoc, CXXScopeSpec &SS) { + SourceLocation OpLoc, CXXScopeSpec &SS, + bool HasTemplateArgs) { RecordDecl *RDecl = RTy->getDecl(); if (SemaRef.RequireCompleteType(OpLoc, QualType(RTy, 0), SemaRef.PDiag(diag::err_typecheck_incomplete_tag) << BaseRange)) return true; + if (HasTemplateArgs) { + // LookupTemplateName doesn't expect these both to exist simultaneously. + QualType ObjectType = SS.isSet() ? QualType() : QualType(RTy, 0); + + bool MOUS; + SemaRef.LookupTemplateName(R, 0, SS, ObjectType, false, MOUS); + return false; + } + DeclContext *DC = RDecl; if (SS.isSet()) { // If the member name was a qualified-id, look into the @@ -2610,6 +2640,7 @@ LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R, return false; } else { R.clear(); + R.setLookupName(Name); } return false; @@ -2640,14 +2671,14 @@ Sema::BuildMemberReferenceExpr(ExprArg BaseArg, QualType BaseType, if (IsArrow) RecordTy = RecordTy->getAs<PointerType>()->getPointeeType(); if (LookupMemberExprInRecord(*this, R, SourceRange(), RecordTy->getAs<RecordType>(), - OpLoc, SS)) + OpLoc, SS, TemplateArgs != 0)) return ExprError(); // Explicit member accesses. } else { OwningExprResult Result = LookupMemberExpr(R, Base, IsArrow, OpLoc, - SS, /*ObjCImpDecl*/ DeclPtrTy()); + SS, /*ObjCImpDecl*/ DeclPtrTy(), TemplateArgs != 0); if (Result.isInvalid()) { Owned(Base); @@ -2860,7 +2891,7 @@ Sema::OwningExprResult Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr, bool &IsArrow, SourceLocation OpLoc, CXXScopeSpec &SS, - DeclPtrTy ObjCImpDecl) { + DeclPtrTy ObjCImpDecl, bool HasTemplateArgs) { assert(BaseExpr && "no base expression"); // Perform default conversions. @@ -2893,6 +2924,7 @@ Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr, OwningExprResult NewBase = ActOnCallExpr(0, ExprArg(*this, BaseExpr), Loc, MultiExprArg(*this, 0, 0), 0, Loc); + BaseExpr = 0; if (NewBase.isInvalid()) return ExprError(); @@ -2973,7 +3005,7 @@ Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr, QualType PType; if (Getter) - PType = Getter->getResultType(); + PType = Getter->getSendResultType(); else // Get the expression type from Setter's incoming parameter. PType = (*(Setter->param_end() -1))->getType(); @@ -3037,7 +3069,7 @@ Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr, // Handle field access to simple records. if (const RecordType *RTy = BaseType->getAs<RecordType>()) { if (LookupMemberExprInRecord(*this, R, BaseExpr->getSourceRange(), - RTy, OpLoc, SS)) + RTy, OpLoc, SS, HasTemplateArgs)) return ExprError(); return Owned((Expr*) 0); } @@ -3069,6 +3101,9 @@ Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr, IV->getNameAsString()); Diag(IV->getLocation(), diag::note_previous_decl) << IV->getDeclName(); + } else { + Res.clear(); + Res.setLookupName(Member); } } @@ -3146,7 +3181,7 @@ Sema::LookupMemberExpr(LookupResult &R, Expr *&BaseExpr, return ExprError(); return Owned(ObjCMessageExpr::Create(Context, - OMD->getResultType().getNonReferenceType(), + OMD->getSendResultType(), OpLoc, BaseExpr, Sel, OMD, NULL, 0, MemberLoc)); } @@ -3239,44 +3274,24 @@ Sema::OwningExprResult Sema::ActOnMemberAccessExpr(Scope *S, ExprArg BaseArg, TemplateArgs); } else { LookupResult R(*this, Name, NameLoc, LookupMemberName); - if (TemplateArgs) { - // Re-use the lookup done for the template name. - DecomposeTemplateName(R, Id); - - // Re-derive the naming class. - if (SS.isSet()) { - NestedNameSpecifier *Qualifier - = static_cast<NestedNameSpecifier *>(SS.getScopeRep()); - if (const Type *Ty = Qualifier->getAsType()) - if (CXXRecordDecl *NamingClass = Ty->getAsCXXRecordDecl()) - R.setNamingClass(NamingClass); - } else { - QualType BaseType = Base->getType(); - if (const PointerType *Ptr = BaseType->getAs<PointerType>()) - BaseType = Ptr->getPointeeType(); - if (CXXRecordDecl *NamingClass = BaseType->getAsCXXRecordDecl()) - R.setNamingClass(NamingClass); - } - } else { - Result = LookupMemberExpr(R, Base, IsArrow, OpLoc, - SS, ObjCImpDecl); + Result = LookupMemberExpr(R, Base, IsArrow, OpLoc, + SS, ObjCImpDecl, TemplateArgs != 0); - if (Result.isInvalid()) { - Owned(Base); - return ExprError(); - } + if (Result.isInvalid()) { + Owned(Base); + return ExprError(); + } - if (Result.get()) { - // The only way a reference to a destructor can be used is to - // immediately call it, which falls into this case. If the - // next token is not a '(', produce a diagnostic and build the - // call now. - if (!HasTrailingLParen && - Id.getKind() == UnqualifiedId::IK_DestructorName) - return DiagnoseDtorReference(NameLoc, move(Result)); + if (Result.get()) { + // The only way a reference to a destructor can be used is to + // immediately call it, which falls into this case. If the + // next token is not a '(', produce a diagnostic and build the + // call now. + if (!HasTrailingLParen && + Id.getKind() == UnqualifiedId::IK_DestructorName) + return DiagnoseDtorReference(NameLoc, move(Result)); - return move(Result); - } + return move(Result); } Result = BuildMemberReferenceExpr(ExprArg(*this, Base), Base->getType(), @@ -3304,9 +3319,10 @@ Sema::OwningExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc, MultiLevelTemplateArgumentList ArgList = getTemplateInstantiationArgs(FD, 0, /*RelativeToPrimary=*/true); - InstantiatingTemplate Inst(*this, CallLoc, Param, - ArgList.getInnermost().getFlatArgumentList(), - ArgList.getInnermost().flat_size()); + std::pair<const TemplateArgument *, unsigned> Innermost + = ArgList.getInnermost(); + InstantiatingTemplate Inst(*this, CallLoc, Param, Innermost.first, + Innermost.second); OwningExprResult Result = SubstExpr(UninstExpr, ArgList); if (Result.isInvalid()) @@ -3560,7 +3576,7 @@ Sema::ActOnCallExpr(Scope *S, ExprArg fn, SourceLocation LParenLoc, BO->getOpcode() == BinaryOperator::PtrMemI) { if (const FunctionProtoType *FPT = BO->getType()->getAs<FunctionProtoType>()) { - QualType ResultTy = FPT->getResultType().getNonReferenceType(); + QualType ResultTy = FPT->getCallResultType(Context); ExprOwningPtr<CXXMemberCallExpr> TheCall(this, new (Context) CXXMemberCallExpr(Context, BO, Args, @@ -3650,7 +3666,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, return ExprError(); // We know the result type of the call, set it. - TheCall->setType(FuncT->getResultType().getNonReferenceType()); + TheCall->setType(FuncT->getCallResultType(Context)); if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FuncT)) { if (ConvertArgumentsForCall(&*TheCall, Fn, FDecl, Proto, Args, NumArgs, @@ -3663,7 +3679,7 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, // Check if we have too few/too many template arguments, based // on our knowledge of the function definition. const FunctionDecl *Def = 0; - if (FDecl->getBody(Def) && NumArgs != Def->param_size()) { + if (FDecl->hasBody(Def) && NumArgs != Def->param_size()) { const FunctionProtoType *Proto = Def->getType()->getAs<FunctionProtoType>(); if (!Proto || !(Proto->isVariadic() && NumArgs >= Def->param_size())) { @@ -3893,12 +3909,13 @@ bool Sema::CheckCastTypes(SourceRange TyR, QualType castType, Expr *&castExpr, if (!castType->isArithmeticType()) { QualType castExprType = castExpr->getType(); - if (!castExprType->isIntegralType() && castExprType->isArithmeticType()) + if (!castExprType->isIntegralType(Context) && + castExprType->isArithmeticType()) return Diag(castExpr->getLocStart(), diag::err_cast_pointer_from_non_pointer_int) << castExprType << castExpr->getSourceRange(); } else if (!castExpr->getType()->isArithmeticType()) { - if (!castType->isIntegralType() && castType->isArithmeticType()) + if (!castType->isIntegralType(Context) && castType->isArithmeticType()) return Diag(castExpr->getLocStart(), diag::err_cast_pointer_to_non_pointer_int) << castType << castExpr->getSourceRange(); @@ -3992,7 +4009,8 @@ Sema::BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, return ExprError(); Op.release(); - return Owned(new (Context) CStyleCastExpr(Ty->getType().getNonReferenceType(), + return Owned(new (Context) CStyleCastExpr( + Ty->getType().getNonLValueExprType(Context), Kind, castExpr, BasePath, Ty, LParenLoc, RParenLoc)); } @@ -4021,15 +4039,26 @@ Sema::ActOnCastOfParenListExpr(Scope *S, SourceLocation LParenLoc, TypeSourceInfo *TInfo) { ParenListExpr *PE = (ParenListExpr *)Op.get(); QualType Ty = TInfo->getType(); + bool isAltiVecLiteral = false; - // If this is an altivec initializer, '(' type ')' '(' init, ..., init ')' - // then handle it as such. + // Check for an altivec literal, + // i.e. all the elements are integer constants. if (getLangOptions().AltiVec && Ty->isVectorType()) { if (PE->getNumExprs() == 0) { Diag(PE->getExprLoc(), diag::err_altivec_empty_initializer); return ExprError(); } + if (PE->getNumExprs() == 1) { + if (!PE->getExpr(0)->getType()->isVectorType()) + isAltiVecLiteral = true; + } + else + isAltiVecLiteral = true; + } + // If this is an altivec initializer, '(' type ')' '(' init, ..., init ')' + // then handle it as such. + if (isAltiVecLiteral) { llvm::SmallVector<Expr *, 8> initExprs; for (unsigned i = 0, e = PE->getNumExprs(); i != e; ++i) initExprs.push_back(PE->getExpr(i)); @@ -4634,7 +4663,7 @@ Sema::CheckAssignmentConstraints(QualType lhsType, QualType rhsType) { if (lhsType->isExtVectorType()) { if (rhsType->isExtVectorType()) return lhsType == rhsType ? Compatible : Incompatible; - if (!rhsType->isVectorType() && rhsType->isArithmeticType()) + if (rhsType->isArithmeticType()) return Compatible; } @@ -4877,7 +4906,7 @@ Sema::CheckSingleAssignmentConstraints(QualType lhsType, Expr *&rExpr) { // The getNonReferenceType() call makes sure that the resulting expression // does not have reference type. if (result != Incompatible && rExpr->getType() != lhsType) - ImpCastExprToType(rExpr, lhsType.getNonReferenceType(), + ImpCastExprToType(rExpr, lhsType.getNonLValueExprType(Context), CastExpr::CK_Unknown); return result; } @@ -4932,7 +4961,7 @@ QualType Sema::CheckVectorOperands(SourceLocation Loc, Expr *&lex, Expr *&rex) { // Handle the case of an ext vector and scalar. if (const ExtVectorType *LV = lhsType->getAs<ExtVectorType>()) { QualType EltTy = LV->getElementType(); - if (EltTy->isIntegralType() && rhsType->isIntegralType()) { + if (EltTy->isIntegralType(Context) && rhsType->isIntegralType(Context)) { if (Context.getIntegerTypeOrder(EltTy, rhsType) >= 0) { ImpCastExprToType(rex, lhsType, CastExpr::CK_IntegralCast); if (swapped) std::swap(rex, lex); @@ -5263,6 +5292,16 @@ QualType Sema::CheckShiftOperands(Expr *&lex, Expr *&rex, SourceLocation Loc, return LHSTy; } +static bool IsWithinTemplateSpecialization(Decl *D) { + if (DeclContext *DC = D->getDeclContext()) { + if (isa<ClassTemplateSpecializationDecl>(DC)) + return true; + if (FunctionDecl *FD = dyn_cast<FunctionDecl>(DC)) + return FD->isFunctionTemplateSpecialization(); + } + return false; +} + // C99 6.5.8, C++ [expr.rel] QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational) { @@ -5272,30 +5311,55 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc, if (lex->getType()->isVectorType() || rex->getType()->isVectorType()) return CheckVectorCompareOperands(lex, rex, Loc, isRelational); - // C99 6.5.8p3 / C99 6.5.9p4 - if (lex->getType()->isArithmeticType() && rex->getType()->isArithmeticType()) - UsualArithmeticConversions(lex, rex); - else { - UsualUnaryConversions(lex); - UsualUnaryConversions(rex); - } QualType lType = lex->getType(); QualType rType = rex->getType(); - if (!lType->isFloatingType() - && !(lType->isBlockPointerType() && isRelational)) { + if (!lType->hasFloatingRepresentation() && + !(lType->isBlockPointerType() && isRelational)) { // For non-floating point types, check for self-comparisons of the form // x == x, x != x, x < x, etc. These always evaluate to a constant, and // often indicate logic errors in the program. - // NOTE: Don't warn about comparisons of enum constants. These can arise - // from macro expansions, and are usually quite deliberate. + // + // NOTE: Don't warn about comparison expressions resulting from macro + // expansion. Also don't warn about comparisons which are only self + // comparisons within a template specialization. The warnings should catch + // obvious cases in the definition of the template anyways. The idea is to + // warn when the typed comparison operator will always evaluate to the same + // result. Expr *LHSStripped = lex->IgnoreParens(); Expr *RHSStripped = rex->IgnoreParens(); - if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LHSStripped)) - if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RHSStripped)) - if (DRL->getDecl() == DRR->getDecl() && - !isa<EnumConstantDecl>(DRL->getDecl())) - DiagRuntimeBehavior(Loc, PDiag(diag::warn_selfcomparison)); + if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LHSStripped)) { + if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RHSStripped)) { + if (DRL->getDecl() == DRR->getDecl() && !Loc.isMacroID() && + !IsWithinTemplateSpecialization(DRL->getDecl())) { + DiagRuntimeBehavior(Loc, PDiag(diag::warn_comparison_always) + << 0 // self- + << (Opc == BinaryOperator::EQ + || Opc == BinaryOperator::LE + || Opc == BinaryOperator::GE)); + } else if (lType->isArrayType() && rType->isArrayType() && + !DRL->getDecl()->getType()->isReferenceType() && + !DRR->getDecl()->getType()->isReferenceType()) { + // what is it always going to eval to? + char always_evals_to; + switch(Opc) { + case BinaryOperator::EQ: // e.g. array1 == array2 + always_evals_to = 0; // false + break; + case BinaryOperator::NE: // e.g. array1 != array2 + always_evals_to = 1; // true + break; + default: + // best we can say is 'a constant' + always_evals_to = 2; // e.g. array1 <= array2 + break; + } + DiagRuntimeBehavior(Loc, PDiag(diag::warn_comparison_always) + << 1 // array + << always_evals_to); + } + } + } if (isa<CastExpr>(LHSStripped)) LHSStripped = LHSStripped->IgnoreParenCasts(); @@ -5338,6 +5402,17 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc, } } + // C99 6.5.8p3 / C99 6.5.9p4 + if (lex->getType()->isArithmeticType() && rex->getType()->isArithmeticType()) + UsualArithmeticConversions(lex, rex); + else { + UsualUnaryConversions(lex); + UsualUnaryConversions(rex); + } + + lType = lex->getType(); + rType = rex->getType(); + // The result of comparisons is 'bool' in C++, 'int' in C. QualType ResultTy = getLangOptions().CPlusPlus ? Context.BoolTy:Context.IntTy; @@ -5346,7 +5421,7 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc, return ResultTy; } else { // Check for comparisons of floating point operands using != and ==. - if (lType->isFloatingType() && rType->isFloatingType()) + if (lType->hasFloatingRepresentation()) CheckFloatComparison(Loc,lex,rex); if (lType->isArithmeticType() && rType->isArithmeticType()) @@ -5358,9 +5433,8 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc, bool RHSIsNull = rex->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull); - // All of the following pointer related warnings are GCC extensions, except - // when handling null pointer constants. One day, we can consider making them - // errors (when -pedantic-errors is enabled). + // All of the following pointer-related warnings are GCC extensions, except + // when handling null pointer constants. if (lType->isPointerType() && rType->isPointerType()) { // C99 6.5.8p2 QualType LCanPointeeTy = Context.getCanonicalType(lType->getAs<PointerType>()->getPointeeType()); @@ -5374,10 +5448,19 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc, (LCanPointeeTy->isVoidType() || RCanPointeeTy->isVoidType())) { // Valid unless comparison between non-null pointer and function pointer // This is a gcc extension compatibility comparison. + // In a SFINAE context, we treat this as a hard error to maintain + // conformance with the C++ standard. if ((LCanPointeeTy->isFunctionType() || RCanPointeeTy->isFunctionType()) && !LHSIsNull && !RHSIsNull) { - Diag(Loc, diag::ext_typecheck_comparison_of_fptr_to_void) + Diag(Loc, + isSFINAEContext()? + diag::err_typecheck_comparison_of_fptr_to_void + : diag::ext_typecheck_comparison_of_fptr_to_void) << lType << rType << lex->getSourceRange() << rex->getSourceRange(); + + if (isSFINAEContext()) + return QualType(); + ImpCastExprToType(rex, lType, CastExpr::CK_BitCast); return ResultTy; } @@ -5541,40 +5624,36 @@ QualType Sema::CheckCompareOperands(Expr *&lex, Expr *&rex, SourceLocation Loc, return ResultTy; } } - if (lType->isAnyPointerType() && rType->isIntegerType()) { + if ((lType->isAnyPointerType() && rType->isIntegerType()) || + (lType->isIntegerType() && rType->isAnyPointerType())) { unsigned DiagID = 0; - if (RHSIsNull) { - if (isRelational) + bool isError = false; + if ((LHSIsNull && lType->isIntegerType()) || + (RHSIsNull && rType->isIntegerType())) { + if (isRelational && !getLangOptions().CPlusPlus) DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_and_zero; - } else if (isRelational) + } else if (isRelational && !getLangOptions().CPlusPlus) DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_integer; - else + else if (getLangOptions().CPlusPlus) { + DiagID = diag::err_typecheck_comparison_of_pointer_integer; + isError = true; + } else DiagID = diag::ext_typecheck_comparison_of_pointer_integer; if (DiagID) { Diag(Loc, DiagID) << lType << rType << lex->getSourceRange() << rex->getSourceRange(); + if (isError) + return QualType(); } - ImpCastExprToType(rex, lType, CastExpr::CK_IntegralToPointer); - return ResultTy; - } - if (lType->isIntegerType() && rType->isAnyPointerType()) { - unsigned DiagID = 0; - if (LHSIsNull) { - if (isRelational) - DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_and_zero; - } else if (isRelational) - DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_integer; + + if (lType->isIntegerType()) + ImpCastExprToType(lex, rType, CastExpr::CK_IntegralToPointer); else - DiagID = diag::ext_typecheck_comparison_of_pointer_integer; - - if (DiagID) { - Diag(Loc, DiagID) - << lType << rType << lex->getSourceRange() << rex->getSourceRange(); - } - ImpCastExprToType(lex, rType, CastExpr::CK_IntegralToPointer); + ImpCastExprToType(rex, lType, CastExpr::CK_IntegralToPointer); return ResultTy; } + // Handle block pointers. if (!isRelational && RHSIsNull && lType->isBlockPointerType() && rType->isIntegerType()) { @@ -5608,16 +5687,20 @@ QualType Sema::CheckVectorCompareOperands(Expr *&lex, Expr *&rex, // For non-floating point types, check for self-comparisons of the form // x == x, x != x, x < x, etc. These always evaluate to a constant, and // often indicate logic errors in the program. - if (!lType->isFloatingType()) { + if (!lType->hasFloatingRepresentation()) { if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(lex->IgnoreParens())) if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(rex->IgnoreParens())) if (DRL->getDecl() == DRR->getDecl()) - DiagRuntimeBehavior(Loc, PDiag(diag::warn_selfcomparison)); + DiagRuntimeBehavior(Loc, + PDiag(diag::warn_comparison_always) + << 0 // self- + << 2 // "a constant" + ); } // Check for comparisons of floating point operands using != and ==. - if (!isRelational && lType->isFloatingType()) { - assert (rType->isFloatingType()); + if (!isRelational && lType->hasFloatingRepresentation()) { + assert (rType->hasFloatingRepresentation()); CheckFloatComparison(Loc,lex,rex); } @@ -5652,7 +5735,25 @@ inline QualType Sema::CheckBitwiseOperands( } inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14] - Expr *&lex, Expr *&rex, SourceLocation Loc) { + Expr *&lex, Expr *&rex, SourceLocation Loc, unsigned Opc) { + + // Diagnose cases where the user write a logical and/or but probably meant a + // bitwise one. We do this when the LHS is a non-bool integer and the RHS + // is a constant. + if (lex->getType()->isIntegerType() && !lex->getType()->isBooleanType() && + rex->getType()->isIntegerType() && rex->isEvaluatable(Context) && + // Don't warn if the RHS is a (constant folded) boolean expression like + // "sizeof(int) == 4". + !rex->isKnownToHaveBooleanValue() && + // Don't warn in macros. + !Loc.isMacroID()) + Diag(Loc, diag::warn_logical_instead_of_bitwise) + << rex->getSourceRange() + << (Opc == BinaryOperator::LAnd ? "&&" : "||") + << (Opc == BinaryOperator::LAnd ? "&" : "|"); + + + if (!Context.getLangOptions().CPlusPlus) { UsualUnaryConversions(lex); UsualUnaryConversions(rex); @@ -5663,25 +5764,14 @@ inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14] return Context.IntTy; } + // The following is safe because we only use this method for + // non-overloadable operands. + // C++ [expr.log.and]p1 // C++ [expr.log.or]p1 - // The operands are both implicitly converted to type bool (clause 4). - StandardConversionSequence LHS; - if (!IsStandardConversion(lex, Context.BoolTy, - /*InOverloadResolution=*/false, LHS)) - return InvalidOperands(Loc, lex, rex); - - if (PerformImplicitConversion(lex, Context.BoolTy, LHS, - AA_Passing, /*IgnoreBaseAccess=*/false)) - return InvalidOperands(Loc, lex, rex); - - StandardConversionSequence RHS; - if (!IsStandardConversion(rex, Context.BoolTy, - /*InOverloadResolution=*/false, RHS)) - return InvalidOperands(Loc, lex, rex); - - if (PerformImplicitConversion(rex, Context.BoolTy, RHS, - AA_Passing, /*IgnoreBaseAccess=*/false)) + // The operands are both contextually converted to type bool. + if (PerformContextuallyConvertToBool(lex) || + PerformContextuallyConvertToBool(rex)) return InvalidOperands(Loc, lex, rex); // C++ [expr.log.and]p2 @@ -5786,11 +5876,22 @@ QualType Sema::CheckAssignmentOperands(Expr *LHS, Expr *&RHS, QualType LHSType = LHS->getType(); QualType RHSType = CompoundType.isNull() ? RHS->getType() : CompoundType; - AssignConvertType ConvTy; if (CompoundType.isNull()) { + QualType LHSTy(LHSType); // Simple assignment "x = y". - ConvTy = CheckSingleAssignmentConstraints(LHSType, RHS); + if (const ObjCImplicitSetterGetterRefExpr *OISGE = + dyn_cast<ObjCImplicitSetterGetterRefExpr>(LHS)) { + // If using property-dot syntax notation for assignment, and there is a + // setter, RHS expression is being passed to the setter argument. So, + // type conversion (and comparison) is RHS to setter's argument type. + if (const ObjCMethodDecl *SetterMD = OISGE->getSetterMethod()) { + ObjCMethodDecl::param_iterator P = SetterMD->param_begin(); + LHSTy = (*P)->getType(); + } + } + + ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); // Special case of NSObject attributes on c-style pointer types. if (ConvTy == IncompatiblePointer && ((Context.isObjCNSObjectType(LHSType) && @@ -5829,6 +5930,23 @@ QualType Sema::CheckAssignmentOperands(Expr *LHS, Expr *&RHS, RHS, AA_Assigning)) return QualType(); + + // Check to see if the destination operand is a dereferenced null pointer. If + // so, and if not volatile-qualified, this is undefined behavior that the + // optimizer will delete, so warn about it. People sometimes try to use this + // to get a deterministic trap and are surprised by clang's behavior. This + // only handles the pattern "*null = whatever", which is a very syntactic + // check. + if (UnaryOperator *UO = dyn_cast<UnaryOperator>(LHS->IgnoreParenCasts())) + if (UO->getOpcode() == UnaryOperator::Deref && + UO->getSubExpr()->IgnoreParenCasts()-> + isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull) && + !UO->getType().isVolatileQualified()) { + Diag(UO->getOperatorLoc(), diag::warn_indirection_through_null) + << UO->getSubExpr()->getSourceRange(); + Diag(UO->getOperatorLoc(), diag::note_indirection_through_null); + } + // C99 6.5.16p3: The type of an assignment expression is the type of the // left operand unless the left operand has qualified type, in which case // it is the unqualified version of the type of the left operand. @@ -5841,6 +5959,8 @@ QualType Sema::CheckAssignmentOperands(Expr *LHS, Expr *&RHS, // C99 6.5.17 QualType Sema::CheckCommaOperands(Expr *LHS, Expr *&RHS, SourceLocation Loc) { + DiagnoseUnusedExprResult(LHS); + // Comma performs lvalue conversion (C99 6.3.2.1), but not unary conversions. // C++ does not perform this conversion (C++ [expr.comma]p1). if (!getLangOptions().CPlusPlus) @@ -6025,13 +6145,17 @@ QualType Sema::CheckAddressOfOperand(Expr *op, SourceLocation OpLoc) { return Context.getMemberPointerType(op->getType(), Context.getTypeDeclType(cast<RecordDecl>(dcl->getDeclContext())) .getTypePtr()); - } else if (lval == Expr::LV_ClassTemporary) { + } + + if (lval == Expr::LV_ClassTemporary) { Diag(OpLoc, isSFINAEContext()? diag::err_typecheck_addrof_class_temporary : diag::ext_typecheck_addrof_class_temporary) << op->getType() << op->getSourceRange(); if (isSFINAEContext()) return QualType(); - } else if (lval != Expr::LV_Valid && lval != Expr::LV_IncompleteVoidType) { + } else if (isa<ObjCSelectorExpr>(op)) + return Context.getPointerType(op->getType()); + else if (lval != Expr::LV_Valid && lval != Expr::LV_IncompleteVoidType) { // C99 6.5.3.2p1 // The operand must be either an l-value or a function designator if (!op->getType()->isFunctionType()) { @@ -6112,26 +6236,32 @@ QualType Sema::CheckAddressOfOperand(Expr *op, SourceLocation OpLoc) { return Context.getPointerType(op->getType()); } +/// CheckIndirectionOperand - Type check unary indirection (prefix '*'). QualType Sema::CheckIndirectionOperand(Expr *Op, SourceLocation OpLoc) { if (Op->isTypeDependent()) return Context.DependentTy; UsualUnaryConversions(Op); - QualType Ty = Op->getType(); - - // Note that per both C89 and C99, this is always legal, even if ptype is an - // incomplete type or void. It would be possible to warn about dereferencing - // a void pointer, but it's completely well-defined, and such a warning is - // unlikely to catch any mistakes. - if (const PointerType *PT = Ty->getAs<PointerType>()) - return PT->getPointeeType(); - - if (const ObjCObjectPointerType *OPT = Ty->getAs<ObjCObjectPointerType>()) - return OPT->getPointeeType(); - - Diag(OpLoc, diag::err_typecheck_indirection_requires_pointer) - << Ty << Op->getSourceRange(); - return QualType(); + QualType OpTy = Op->getType(); + QualType Result; + + // Note that per both C89 and C99, indirection is always legal, even if OpTy + // is an incomplete type or void. It would be possible to warn about + // dereferencing a void pointer, but it's completely well-defined, and such a + // warning is unlikely to catch any mistakes. + if (const PointerType *PT = OpTy->getAs<PointerType>()) + Result = PT->getPointeeType(); + else if (const ObjCObjectPointerType *OPT = + OpTy->getAs<ObjCObjectPointerType>()) + Result = OPT->getPointeeType(); + + if (Result.isNull()) { + Diag(OpLoc, diag::err_typecheck_indirection_requires_pointer) + << OpTy << Op->getSourceRange(); + return QualType(); + } + + return Result; } static inline BinaryOperator::Opcode ConvertTokenKindToBinaryOpcode( @@ -6251,7 +6381,7 @@ Action::OwningExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc, break; case BinaryOperator::LAnd: case BinaryOperator::LOr: - ResultTy = CheckLogicalOperands(lhs, rhs, OpLoc); + ResultTy = CheckLogicalOperands(lhs, rhs, OpLoc, Opc); break; case BinaryOperator::MulAssign: case BinaryOperator::DivAssign: @@ -6479,7 +6609,8 @@ Action::OwningExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc, resultType = Input->getType(); if (resultType->isDependentType()) break; - if (resultType->isArithmeticType()) // C99 6.5.3.3p1 + if (resultType->isArithmeticType() || // C99 6.5.3.3p1 + resultType->isVectorType()) break; else if (getLangOptions().CPlusPlus && // C++ [expr.unary.op]p6-7 resultType->isEnumeralType()) @@ -6980,83 +7111,99 @@ void Sema::ActOnBlockStart(SourceLocation CaretLoc, Scope *BlockScope) { BlockDecl *Block = BlockDecl::Create(Context, CurContext, CaretLoc); PushBlockScope(BlockScope, Block); CurContext->addDecl(Block); - PushDeclContext(BlockScope, Block); + if (BlockScope) + PushDeclContext(BlockScope, Block); + else + CurContext = Block; } void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) { assert(ParamInfo.getIdentifier()==0 && "block-id should have no identifier!"); BlockScopeInfo *CurBlock = getCurBlock(); - if (ParamInfo.getNumTypeObjects() == 0 - || ParamInfo.getTypeObject(0).Kind != DeclaratorChunk::Function) { - ProcessDeclAttributes(CurScope, CurBlock->TheDecl, ParamInfo); - QualType T = GetTypeForDeclarator(ParamInfo, CurScope); - - if (T->isArrayType()) { - Diag(ParamInfo.getSourceRange().getBegin(), - diag::err_block_returns_array); - return; - } - - // The parameter list is optional, if there was none, assume (). - if (!T->isFunctionType()) - T = Context.getFunctionType(T, 0, 0, false, 0, false, false, 0, 0, - FunctionType::ExtInfo()); + TypeSourceInfo *Sig = GetTypeForDeclarator(ParamInfo, CurScope); + CurBlock->TheDecl->setSignatureAsWritten(Sig); + QualType T = Sig->getType(); + + bool isVariadic; + QualType RetTy; + if (const FunctionType *Fn = T->getAs<FunctionType>()) { + CurBlock->FunctionType = T; + RetTy = Fn->getResultType(); + isVariadic = + !isa<FunctionProtoType>(Fn) || cast<FunctionProtoType>(Fn)->isVariadic(); + } else { + RetTy = T; + isVariadic = false; + } - CurBlock->hasPrototype = true; - CurBlock->isVariadic = false; - // Check for a valid sentinel attribute on this block. - if (CurBlock->TheDecl->getAttr<SentinelAttr>()) { - Diag(ParamInfo.getAttributes()->getLoc(), - diag::warn_attribute_sentinel_not_variadic) << 1; - // FIXME: remove the attribute. - } - QualType RetTy = T.getTypePtr()->getAs<FunctionType>()->getResultType(); + CurBlock->TheDecl->setIsVariadic(isVariadic); - // Do not allow returning a objc interface by-value. - if (RetTy->isObjCObjectType()) { - Diag(ParamInfo.getSourceRange().getBegin(), - diag::err_object_cannot_be_passed_returned_by_value) << 0 << RetTy; - return; - } + // Don't allow returning an array by value. + if (RetTy->isArrayType()) { + Diag(ParamInfo.getSourceRange().getBegin(), diag::err_block_returns_array); + return; + } - CurBlock->ReturnType = RetTy; + // Don't allow returning a objc interface by value. + if (RetTy->isObjCObjectType()) { + Diag(ParamInfo.getSourceRange().getBegin(), + diag::err_object_cannot_be_passed_returned_by_value) << 0 << RetTy; return; } - // Analyze arguments to block. - assert(ParamInfo.getTypeObject(0).Kind == DeclaratorChunk::Function && - "Not a function declarator!"); - DeclaratorChunk::FunctionTypeInfo &FTI = ParamInfo.getTypeObject(0).Fun; - - CurBlock->hasPrototype = FTI.hasPrototype; - CurBlock->isVariadic = true; - - // Check for C99 6.7.5.3p10 - foo(void) is a non-varargs function that takes - // no arguments, not a function that takes a single void argument. - if (FTI.hasPrototype && - FTI.NumArgs == 1 && !FTI.isVariadic && FTI.ArgInfo[0].Ident == 0 && - (!FTI.ArgInfo[0].Param.getAs<ParmVarDecl>()->getType().getCVRQualifiers()&& - FTI.ArgInfo[0].Param.getAs<ParmVarDecl>()->getType()->isVoidType())) { - // empty arg list, don't push any params. - CurBlock->isVariadic = false; - } else if (FTI.hasPrototype) { - for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i) { - ParmVarDecl *Param = FTI.ArgInfo[i].Param.getAs<ParmVarDecl>(); + // Context.DependentTy is used as a placeholder for a missing block + // return type. TODO: what should we do with declarators like: + // ^ * { ... } + // If the answer is "apply template argument deduction".... + if (RetTy != Context.DependentTy) + CurBlock->ReturnType = RetTy; + + // Push block parameters from the declarator if we had them. + llvm::SmallVector<ParmVarDecl*, 8> Params; + if (isa<FunctionProtoType>(T)) { + FunctionProtoTypeLoc TL = cast<FunctionProtoTypeLoc>(Sig->getTypeLoc()); + for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) { + ParmVarDecl *Param = TL.getArg(I); if (Param->getIdentifier() == 0 && !Param->isImplicit() && !Param->isInvalidDecl() && !getLangOptions().CPlusPlus) Diag(Param->getLocation(), diag::err_parameter_name_omitted); - CurBlock->Params.push_back(Param); + Params.push_back(Param); + } + + // Fake up parameter variables if we have a typedef, like + // ^ fntype { ... } + } else if (const FunctionProtoType *Fn = T->getAs<FunctionProtoType>()) { + for (FunctionProtoType::arg_type_iterator + I = Fn->arg_type_begin(), E = Fn->arg_type_end(); I != E; ++I) { + ParmVarDecl *Param = + BuildParmVarDeclForTypedef(CurBlock->TheDecl, + ParamInfo.getSourceRange().getBegin(), + *I); + Params.push_back(Param); } - CurBlock->isVariadic = FTI.isVariadic; } - CurBlock->TheDecl->setParams(CurBlock->Params.data(), - CurBlock->Params.size()); - CurBlock->TheDecl->setIsVariadic(CurBlock->isVariadic); + + // Set the parameters on the block decl. + if (!Params.empty()) + CurBlock->TheDecl->setParams(Params.data(), Params.size()); + + // Finally we can process decl attributes. ProcessDeclAttributes(CurScope, CurBlock->TheDecl, ParamInfo); + if (!isVariadic && CurBlock->TheDecl->getAttr<SentinelAttr>()) { + Diag(ParamInfo.getAttributes()->getLoc(), + diag::warn_attribute_sentinel_not_variadic) << 1; + // FIXME: remove the attribute. + } + + // Put the parameter variables in scope. We can bail out immediately + // if we don't have any. + if (Params.empty()) + return; + bool ShouldCheckShadow = Diags.getDiagnosticLevel(diag::warn_decl_shadow) != Diagnostic::Ignored; @@ -7072,25 +7219,6 @@ void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) { PushOnScopeChains(*AI, CurBlock->TheScope); } } - - // Check for a valid sentinel attribute on this block. - if (!CurBlock->isVariadic && - CurBlock->TheDecl->getAttr<SentinelAttr>()) { - Diag(ParamInfo.getAttributes()->getLoc(), - diag::warn_attribute_sentinel_not_variadic) << 1; - // FIXME: remove the attribute. - } - - // Analyze the return type. - QualType T = GetTypeForDeclarator(ParamInfo, CurScope); - QualType RetTy = T->getAs<FunctionType>()->getResultType(); - - // Do not allow returning a objc interface by-value. - if (RetTy->isObjCObjectType()) { - Diag(ParamInfo.getSourceRange().getBegin(), - diag::err_object_cannot_be_passed_returned_by_value) << 0 << RetTy; - } else if (!RetTy->isDependentType()) - CurBlock->ReturnType = RetTy; } /// ActOnBlockError - If there is an error parsing a block, this callback @@ -7111,29 +7239,59 @@ Sema::OwningExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc, Diag(CaretLoc, diag::err_blocks_disable); BlockScopeInfo *BSI = cast<BlockScopeInfo>(FunctionScopes.back()); - + PopDeclContext(); QualType RetTy = Context.VoidTy; if (!BSI->ReturnType.isNull()) RetTy = BSI->ReturnType; - llvm::SmallVector<QualType, 8> ArgTypes; - for (unsigned i = 0, e = BSI->Params.size(); i != e; ++i) - ArgTypes.push_back(BSI->Params[i]->getType()); - bool NoReturn = BSI->TheDecl->getAttr<NoReturnAttr>(); QualType BlockTy; - if (!BSI->hasPrototype) - BlockTy = Context.getFunctionType(RetTy, 0, 0, false, 0, false, false, 0, 0, - FunctionType::ExtInfo(NoReturn, 0, CC_Default)); - else - BlockTy = Context.getFunctionType(RetTy, ArgTypes.data(), ArgTypes.size(), - BSI->isVariadic, 0, false, false, 0, 0, - FunctionType::ExtInfo(NoReturn, 0, CC_Default)); + + // If the user wrote a function type in some form, try to use that. + if (!BSI->FunctionType.isNull()) { + const FunctionType *FTy = BSI->FunctionType->getAs<FunctionType>(); + + FunctionType::ExtInfo Ext = FTy->getExtInfo(); + if (NoReturn && !Ext.getNoReturn()) Ext = Ext.withNoReturn(true); + + // Turn protoless block types into nullary block types. + if (isa<FunctionNoProtoType>(FTy)) { + BlockTy = Context.getFunctionType(RetTy, 0, 0, false, 0, + false, false, 0, 0, Ext); + + // Otherwise, if we don't need to change anything about the function type, + // preserve its sugar structure. + } else if (FTy->getResultType() == RetTy && + (!NoReturn || FTy->getNoReturnAttr())) { + BlockTy = BSI->FunctionType; + + // Otherwise, make the minimal modifications to the function type. + } else { + const FunctionProtoType *FPT = cast<FunctionProtoType>(FTy); + BlockTy = Context.getFunctionType(RetTy, + FPT->arg_type_begin(), + FPT->getNumArgs(), + FPT->isVariadic(), + /*quals*/ 0, + FPT->hasExceptionSpec(), + FPT->hasAnyExceptionSpec(), + FPT->getNumExceptions(), + FPT->exception_begin(), + Ext); + } + + // If we don't have a function type, just build one from nothing. + } else { + BlockTy = Context.getFunctionType(RetTy, 0, 0, false, 0, + false, false, 0, 0, + FunctionType::ExtInfo(NoReturn, 0, CC_Default)); + } // FIXME: Check that return/parameter types are complete/non-abstract - DiagnoseUnusedParameters(BSI->Params.begin(), BSI->Params.end()); + DiagnoseUnusedParameters(BSI->TheDecl->param_begin(), + BSI->TheDecl->param_end()); BlockTy = Context.getBlockPointerType(BlockTy); // If needed, diagnose invalid gotos and switches in the block. @@ -7210,7 +7368,8 @@ Sema::OwningExprResult Sema::ActOnVAArg(SourceLocation BuiltinLoc, // FIXME: Warn if a non-POD type is passed in. expr.release(); - return Owned(new (Context) VAArgExpr(BuiltinLoc, E, T.getNonReferenceType(), + return Owned(new (Context) VAArgExpr(BuiltinLoc, E, + T.getNonLValueExprType(Context), RPLoc)); } @@ -7445,7 +7604,7 @@ Sema::PopExpressionEvaluationContext() { void Sema::MarkDeclarationReferenced(SourceLocation Loc, Decl *D) { assert(D && "No declaration?"); - if (D->isUsed()) + if (D->isUsed(false)) return; // Mark a parameter or variable declaration "used", regardless of whether we're in a @@ -7488,24 +7647,24 @@ void Sema::MarkDeclarationReferenced(SourceLocation Loc, Decl *D) { if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) { unsigned TypeQuals; if (Constructor->isImplicit() && Constructor->isDefaultConstructor()) { - if (!Constructor->isUsed()) + if (!Constructor->isUsed(false)) DefineImplicitDefaultConstructor(Loc, Constructor); } else if (Constructor->isImplicit() && Constructor->isCopyConstructor(TypeQuals)) { - if (!Constructor->isUsed()) + if (!Constructor->isUsed(false)) DefineImplicitCopyConstructor(Loc, Constructor, TypeQuals); } MarkVTableUsed(Loc, Constructor->getParent()); } else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(D)) { - if (Destructor->isImplicit() && !Destructor->isUsed()) + if (Destructor->isImplicit() && !Destructor->isUsed(false)) DefineImplicitDestructor(Loc, Destructor); if (Destructor->isVirtual()) MarkVTableUsed(Loc, Destructor->getParent()); } else if (CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(D)) { if (MethodDecl->isImplicit() && MethodDecl->isOverloadedOperator() && MethodDecl->getOverloadedOperator() == OO_Equal) { - if (!MethodDecl->isUsed()) + if (!MethodDecl->isUsed(false)) DefineImplicitCopyAssignment(Loc, MethodDecl); } else if (MethodDecl->isVirtual()) MarkVTableUsed(Loc, MethodDecl->getParent()); @@ -7569,45 +7728,46 @@ void Sema::MarkDeclarationReferenced(SourceLocation Loc, Decl *D) { } namespace { - // Mark all of the declarations referenced + // Mark all of the declarations referenced // FIXME: Not fully implemented yet! We need to have a better understanding - // of when we're entering + // of when we're entering class MarkReferencedDecls : public RecursiveASTVisitor<MarkReferencedDecls> { Sema &S; SourceLocation Loc; - + public: typedef RecursiveASTVisitor<MarkReferencedDecls> Inherited; - + MarkReferencedDecls(Sema &S, SourceLocation Loc) : S(S), Loc(Loc) { } - - bool VisitTemplateArgument(const TemplateArgument &Arg); - bool VisitRecordType(RecordType *T); + + bool TraverseTemplateArgument(const TemplateArgument &Arg); + bool TraverseRecordType(RecordType *T); }; } -bool MarkReferencedDecls::VisitTemplateArgument(const TemplateArgument &Arg) { +bool MarkReferencedDecls::TraverseTemplateArgument( + const TemplateArgument &Arg) { if (Arg.getKind() == TemplateArgument::Declaration) { S.MarkDeclarationReferenced(Loc, Arg.getAsDecl()); } - - return Inherited::VisitTemplateArgument(Arg); + + return Inherited::TraverseTemplateArgument(Arg); } -bool MarkReferencedDecls::VisitRecordType(RecordType *T) { +bool MarkReferencedDecls::TraverseRecordType(RecordType *T) { if (ClassTemplateSpecializationDecl *Spec = dyn_cast<ClassTemplateSpecializationDecl>(T->getDecl())) { const TemplateArgumentList &Args = Spec->getTemplateArgs(); - return VisitTemplateArguments(Args.getFlatArgumentList(), - Args.flat_size()); + return TraverseTemplateArguments(Args.getFlatArgumentList(), + Args.flat_size()); } - return false; + return true; } void Sema::MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T) { MarkReferencedDecls Marker(*this, Loc); - Marker.Visit(Context.getCanonicalType(T)); + Marker.TraverseType(Context.getCanonicalType(T)); } /// \brief Emit a diagnostic that describes an effect on the run-time behavior diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp index 97de96a..090400f 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp @@ -17,6 +17,7 @@ #include "clang/AST/ASTContext.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/ExprCXX.h" +#include "clang/AST/ExprObjC.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/TargetInfo.h" @@ -52,6 +53,8 @@ Action::TypeTy *Sema::getDestructorName(SourceLocation TildeLoc, // } // // See also PR6358 and PR6359. + // For this reason, we're currently only doing the C++03 version of this + // code; the C++0x version has to wait until we get a proper spec. QualType SearchType; DeclContext *LookupCtx = 0; bool isDependent = false; @@ -68,50 +71,33 @@ Action::TypeTy *Sema::getDestructorName(SourceLocation TildeLoc, bool AlreadySearched = false; bool LookAtPrefix = true; - if (!getLangOptions().CPlusPlus0x) { - // C++ [basic.lookup.qual]p6: - // If a pseudo-destructor-name (5.2.4) contains a nested-name-specifier, - // the type-names are looked up as types in the scope designated by the - // nested-name-specifier. In a qualified-id of the form: - // - // ::[opt] nested-name-specifier ̃ class-name - // - // where the nested-name-specifier designates a namespace scope, and in - // a qualified-id of the form: - // - // ::opt nested-name-specifier class-name :: ̃ class-name - // - // the class-names are looked up as types in the scope designated by - // the nested-name-specifier. - // - // Here, we check the first case (completely) and determine whether the - // code below is permitted to look at the prefix of the - // nested-name-specifier (as we do in C++0x). - DeclContext *DC = computeDeclContext(SS, EnteringContext); - if (DC && DC->isFileContext()) { - AlreadySearched = true; - LookupCtx = DC; - isDependent = false; - } else if (DC && isa<CXXRecordDecl>(DC)) - LookAtPrefix = false; - } - - // C++0x [basic.lookup.qual]p6: - // If a pseudo-destructor-name (5.2.4) contains a - // nested-name-specifier, the type-names are looked up as types - // in the scope designated by the nested-name-specifier. Similarly, in - // a qualified-id of the form: + // C++ [basic.lookup.qual]p6: + // If a pseudo-destructor-name (5.2.4) contains a nested-name-specifier, + // the type-names are looked up as types in the scope designated by the + // nested-name-specifier. In a qualified-id of the form: + // + // ::[opt] nested-name-specifier ̃ class-name // - // :: [opt] nested-name-specifier[opt] class-name :: ~class-name + // where the nested-name-specifier designates a namespace scope, and in + // a qualified-id of the form: // - // the second class-name is looked up in the same scope as the first. + // ::opt nested-name-specifier class-name :: ̃ class-name // - // To implement this, we look at the prefix of the - // nested-name-specifier we were given, and determine the lookup - // context from that. + // the class-names are looked up as types in the scope designated by + // the nested-name-specifier. // - // We also fold in the second case from the C++03 rules quoted further - // above. + // Here, we check the first case (completely) and determine whether the + // code below is permitted to look at the prefix of the + // nested-name-specifier. + DeclContext *DC = computeDeclContext(SS, EnteringContext); + if (DC && DC->isFileContext()) { + AlreadySearched = true; + LookupCtx = DC; + isDependent = false; + } else if (DC && isa<CXXRecordDecl>(DC)) + LookAtPrefix = false; + + // The second case from the C++03 rules quoted further above. NestedNameSpecifier *Prefix = 0; if (AlreadySearched) { // Nothing left to do. @@ -120,11 +106,6 @@ Action::TypeTy *Sema::getDestructorName(SourceLocation TildeLoc, PrefixSS.setScopeRep(Prefix); LookupCtx = computeDeclContext(PrefixSS, EnteringContext); isDependent = isDependentScopeSpecifier(PrefixSS); - } else if (getLangOptions().CPlusPlus0x && - (LookupCtx = computeDeclContext(SS, EnteringContext))) { - if (!LookupCtx->isTranslationUnit()) - LookupCtx = LookupCtx->getParent(); - isDependent = LookupCtx && LookupCtx->isDependentContext(); } else if (ObjectTypePtr) { LookupCtx = computeDeclContext(SearchType); isDependent = SearchType->isDependentType(); @@ -284,7 +265,10 @@ Sema::OwningExprResult Sema::BuildCXXTypeId(QualType TypeInfoType, // that is the operand of typeid are always ignored. // If the type of the type-id is a class type or a reference to a class // type, the class shall be completely-defined. - QualType T = Operand->getType().getNonReferenceType(); + Qualifiers Quals; + QualType T + = Context.getUnqualifiedArrayType(Operand->getType().getNonReferenceType(), + Quals); if (T->getAs<RecordType>() && RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid)) return ExprError(); @@ -328,9 +312,11 @@ Sema::OwningExprResult Sema::BuildCXXTypeId(QualType TypeInfoType, // cv-qualified type, the result of the typeid expression refers to a // std::type_info object representing the cv-unqualified referenced // type. - if (T.hasQualifiers()) { - ImpCastExprToType(E, T.getUnqualifiedType(), CastExpr::CK_NoOp, - E->isLvalue(Context)); + Qualifiers Quals; + QualType UnqualT = Context.getUnqualifiedArrayType(T, Quals); + if (!Context.hasSameType(T, UnqualT)) { + T = UnqualT; + ImpCastExprToType(E, UnqualT, CastExpr::CK_NoOp, E->isLvalue(Context)); Operand.release(); Operand = Owned(E); } @@ -453,11 +439,28 @@ bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *&E) { return true; E = Res.takeAs<Expr>(); + // If the exception has class type, we need additional handling. + const RecordType *RecordTy = Ty->getAs<RecordType>(); + if (!RecordTy) + return false; + CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); + // If we are throwing a polymorphic class type or pointer thereof, // exception handling will make use of the vtable. - if (const RecordType *RecordTy = Ty->getAs<RecordType>()) - MarkVTableUsed(ThrowLoc, cast<CXXRecordDecl>(RecordTy->getDecl())); - + MarkVTableUsed(ThrowLoc, RD); + + // If the class has a non-trivial destructor, we must be able to call it. + if (RD->hasTrivialDestructor()) + return false; + + CXXDestructorDecl *Destructor + = const_cast<CXXDestructorDecl*>(LookupDestructor(RD)); + if (!Destructor) + return false; + + MarkDeclarationReferenced(E->getExprLoc(), Destructor); + CheckDestructorAccess(E->getExprLoc(), Destructor, + PDiag(diag::err_access_dtor_exception) << Ty); return false; } @@ -537,33 +540,26 @@ Sema::ActOnCXXTypeConstructExpr(SourceRange TypeRange, TypeTy *TypeRep, exprs.release(); - return Owned(new (Context) CXXFunctionalCastExpr(Ty.getNonReferenceType(), + return Owned(new (Context) CXXFunctionalCastExpr( + Ty.getNonLValueExprType(Context), TInfo, TyBeginLoc, Kind, Exprs[0], BasePath, RParenLoc)); } - if (const RecordType *RT = Ty->getAs<RecordType>()) { - CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl()); - - if (NumExprs > 1 || !Record->hasTrivialConstructor() || - !Record->hasTrivialDestructor()) { - InitializedEntity Entity = InitializedEntity::InitializeTemporary(Ty); - InitializationKind Kind - = NumExprs ? InitializationKind::CreateDirect(TypeRange.getBegin(), - LParenLoc, RParenLoc) - : InitializationKind::CreateValue(TypeRange.getBegin(), - LParenLoc, RParenLoc); - InitializationSequence InitSeq(*this, Entity, Kind, Exprs, NumExprs); - OwningExprResult Result = InitSeq.Perform(*this, Entity, Kind, - move(exprs)); - - // FIXME: Improve AST representation? - return move(Result); - } - - // Fall through to value-initialize an object of class type that - // doesn't have a user-declared default constructor. + if (Ty->isRecordType()) { + InitializedEntity Entity = InitializedEntity::InitializeTemporary(Ty); + InitializationKind Kind + = NumExprs ? InitializationKind::CreateDirect(TypeRange.getBegin(), + LParenLoc, RParenLoc) + : InitializationKind::CreateValue(TypeRange.getBegin(), + LParenLoc, RParenLoc); + InitializationSequence InitSeq(*this, Entity, Kind, Exprs, NumExprs); + OwningExprResult Result = InitSeq.Perform(*this, Entity, Kind, + move(exprs)); + + // FIXME: Improve AST representation? + return move(Result); } // C++ [expr.type.conv]p1: @@ -582,7 +578,7 @@ Sema::ActOnCXXTypeConstructExpr(SourceRange TypeRange, TypeTy *TypeRep, // rvalue of the specified type, which is value-initialized. // exprs.release(); - return Owned(new (Context) CXXZeroInitValueExpr(Ty, TyBeginLoc, RParenLoc)); + return Owned(new (Context) CXXScalarValueInitExpr(Ty, TyBeginLoc, RParenLoc)); } @@ -594,7 +590,7 @@ Sema::ActOnCXXTypeConstructExpr(SourceRange TypeRange, TypeTy *TypeRep, Action::OwningExprResult Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, - SourceLocation PlacementRParen, bool ParenTypeId, + SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, SourceLocation ConstructorLParen, MultiExprArg ConstructorArgs, SourceLocation ConstructorRParen) { @@ -610,17 +606,6 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, return ExprError(Diag(Chunk.Loc, diag::err_array_new_needs_size) << D.getSourceRange()); - if (ParenTypeId) { - // Can't have dynamic array size when the type-id is in parentheses. - Expr *NumElts = (Expr *)Chunk.Arr.NumElts; - if (!NumElts->isTypeDependent() && !NumElts->isValueDependent() && - !NumElts->isIntegerConstantExpr(Context)) { - Diag(D.getTypeObject(0).Loc, diag::err_new_paren_array_nonconst) - << NumElts->getSourceRange(); - return ExprError(); - } - } - ArraySize = static_cast<Expr*>(Chunk.Arr.NumElts); D.DropFirstTypeObject(); } @@ -644,19 +629,20 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, } //FIXME: Store TypeSourceInfo in CXXNew expression. - TypeSourceInfo *TInfo = 0; - QualType AllocType = GetTypeForDeclarator(D, /*Scope=*/0, &TInfo); + TypeSourceInfo *TInfo = GetTypeForDeclarator(D, /*Scope=*/0); + QualType AllocType = TInfo->getType(); if (D.isInvalidType()) return ExprError(); - + + SourceRange R = TInfo->getTypeLoc().getSourceRange(); return BuildCXXNew(StartLoc, UseGlobal, PlacementLParen, move(PlacementArgs), PlacementRParen, - ParenTypeId, + TypeIdParens, AllocType, D.getSourceRange().getBegin(), - D.getSourceRange(), + R, Owned(ArraySize), ConstructorLParen, move(ConstructorArgs), @@ -668,7 +654,7 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, - bool ParenTypeId, + SourceRange TypeIdParens, QualType AllocType, SourceLocation TypeLoc, SourceRange TypeRange, @@ -697,11 +683,29 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal, // or enumeration type with a non-negative value." Expr *ArraySize = (Expr *)ArraySizeE.get(); if (ArraySize && !ArraySize->isTypeDependent()) { + QualType SizeType = ArraySize->getType(); - if (!SizeType->isIntegralType() && !SizeType->isEnumeralType()) - return ExprError(Diag(ArraySize->getSourceRange().getBegin(), - diag::err_array_size_not_integral) - << SizeType << ArraySize->getSourceRange()); + + OwningExprResult ConvertedSize + = ConvertToIntegralOrEnumerationType(StartLoc, move(ArraySizeE), + PDiag(diag::err_array_size_not_integral), + PDiag(diag::err_array_size_incomplete_type) + << ArraySize->getSourceRange(), + PDiag(diag::err_array_size_explicit_conversion), + PDiag(diag::note_array_size_conversion), + PDiag(diag::err_array_size_ambiguous_conversion), + PDiag(diag::note_array_size_conversion), + PDiag(getLangOptions().CPlusPlus0x? 0 + : diag::ext_array_size_conversion)); + if (ConvertedSize.isInvalid()) + return ExprError(); + + ArraySize = ConvertedSize.takeAs<Expr>(); + ArraySizeE = Owned(ArraySize); + SizeType = ArraySize->getType(); + if (!SizeType->isIntegralOrEnumerationType()) + return ExprError(); + // Let's see if this is a constant < 0. If so, we reject it out of hand. // We don't care about special rules, so we tell the machinery it's not // evaluated - it gives us a result in more cases. @@ -714,6 +718,14 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal, return ExprError(Diag(ArraySize->getSourceRange().getBegin(), diag::err_typecheck_negative_array_size) << ArraySize->getSourceRange()); + } else if (TypeIdParens.isValid()) { + // Can't have dynamic array size when the type-id is in parentheses. + Diag(ArraySize->getLocStart(), diag::ext_new_paren_array_nonconst) + << ArraySize->getSourceRange() + << FixItHint::CreateRemoval(TypeIdParens.getBegin()) + << FixItHint::CreateRemoval(TypeIdParens.getEnd()); + + TypeIdParens = SourceRange(); } } @@ -828,13 +840,15 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal, PlacementArgs.release(); ConstructorArgs.release(); ArraySizeE.release(); + + // FIXME: The TypeSourceInfo should also be included in CXXNewExpr. return Owned(new (Context) CXXNewExpr(Context, UseGlobal, OperatorNew, - PlaceArgs, NumPlaceArgs, ParenTypeId, + PlaceArgs, NumPlaceArgs, TypeIdParens, ArraySize, Constructor, Init, ConsArgs, NumConsArgs, OperatorDelete, ResultType, StartLoc, Init ? ConstructorRParen : - SourceLocation())); + TypeRange.getEnd())); } /// CheckAllocatedType - Checks that a type is suitable as the allocated type @@ -1181,20 +1195,11 @@ void Sema::DeclareGlobalNewDelete() { // "std" or "bad_alloc" as necessary to form the exception specification. // However, we do not make these implicit declarations visible to name // lookup. - if (!StdNamespace) { - // The "std" namespace has not yet been defined, so build one implicitly. - StdNamespace = NamespaceDecl::Create(Context, - Context.getTranslationUnitDecl(), - SourceLocation(), - &PP.getIdentifierTable().get("std")); - StdNamespace->setImplicit(true); - } - if (!StdBadAlloc) { // The "std::bad_alloc" class has not yet been declared, so build it // implicitly. StdBadAlloc = CXXRecordDecl::Create(Context, TTK_Class, - StdNamespace, + getStdNamespace(), SourceLocation(), &PP.getIdentifierTable().get("bad_alloc"), SourceLocation(), 0); @@ -1291,11 +1296,15 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, if (Found.isAmbiguous()) return true; + Found.suppressDiagnostics(); + for (LookupResult::iterator F = Found.begin(), FEnd = Found.end(); F != FEnd; ++F) { if (CXXMethodDecl *Delete = dyn_cast<CXXMethodDecl>(*F)) if (Delete->isUsualDeallocationFunction()) { Operator = Delete; + CheckAllocationAccess(StartLoc, SourceRange(), Found.getNamingClass(), + F.getPair()); return false; } } @@ -1436,7 +1445,7 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, return ExprError(); if (!RD->hasTrivialDestructor()) - if (const CXXDestructorDecl *Dtor = RD->getDestructor(Context)) + if (const CXXDestructorDecl *Dtor = LookupDestructor(RD)) MarkDeclarationReferenced(StartLoc, const_cast<CXXDestructorDecl*>(Dtor)); } @@ -1517,7 +1526,7 @@ Sema::IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType) { // be converted to an rvalue of type "pointer to char"; a wide // string literal can be converted to an rvalue of type "pointer // to wchar_t" (C++ 4.2p2). - if (StringLiteral *StrLit = dyn_cast<StringLiteral>(From)) + if (StringLiteral *StrLit = dyn_cast<StringLiteral>(From->IgnoreParens())) if (const PointerType *ToPtrType = ToType->getAs<PointerType>()) if (const BuiltinType *ToPointeeType = ToPtrType->getPointeeType()->getAs<BuiltinType>()) { @@ -1776,7 +1785,7 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType, break; case ICK_Floating_Integral: - if (ToType->isFloatingType()) + if (ToType->isRealFloatingType()) ImpCastExprToType(From, ToType, CastExpr::CK_IntegralToFloating); else ImpCastExprToType(From, ToType, CastExpr::CK_FloatingToIntegral); @@ -1871,7 +1880,7 @@ Sema::PerformImplicitConversion(Expr *&From, QualType ToType, case ICK_Qualification: // FIXME: Not sure about lvalue vs rvalue here in the presence of rvalue // references. - ImpCastExprToType(From, ToType.getNonReferenceType(), + ImpCastExprToType(From, ToType.getNonLValueExprType(Context), CastExpr::CK_NoOp, ToType->isLValueReferenceType()); if (SCS.DeprecatedStringLiteralToCharPtr) @@ -1973,7 +1982,7 @@ QualType Sema::CheckPointerToMemberOperands( BasePath); } - if (isa<CXXZeroInitValueExpr>(rex->IgnoreParens())) { + if (isa<CXXScalarValueInitExpr>(rex->IgnoreParens())) { // Diagnose use of pointer-to-member type which when used as // the functional cast in a pointer-to-member expression. Diag(Loc, diag::err_pointer_to_member_type) << isIndirect; @@ -2583,6 +2592,16 @@ Sema::OwningExprResult Sema::MaybeBindToTemporary(Expr *E) { if (FTy->getResultType()->isReferenceType()) return Owned(E); } + else if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E)) { + QualType Ty = ME->getType(); + if (const PointerType *PT = Ty->getAs<PointerType>()) + Ty = PT->getPointeeType(); + else if (const BlockPointerType *BPT = Ty->getAs<BlockPointerType>()) + Ty = BPT->getPointeeType(); + if (Ty->isReferenceType()) + return Owned(E); + } + // That should be enough to guarantee that this type is complete. // If it has a trivial destructor, we can avoid the extra copy. @@ -2590,11 +2609,9 @@ Sema::OwningExprResult Sema::MaybeBindToTemporary(Expr *E) { if (RD->hasTrivialDestructor()) return Owned(E); - CXXTemporary *Temp = CXXTemporary::Create(Context, - RD->getDestructor(Context)); + CXXTemporary *Temp = CXXTemporary::Create(Context, LookupDestructor(RD)); ExprTemporaries.push_back(Temp); - if (CXXDestructorDecl *Destructor = - const_cast<CXXDestructorDecl*>(RD->getDestructor(Context))) { + if (CXXDestructorDecl *Destructor = LookupDestructor(RD)) { MarkDeclarationReferenced(E->getExprLoc(), Destructor); CheckDestructorAccess(E->getExprLoc(), Destructor, PDiag(diag::err_access_dtor_temp) @@ -2819,7 +2836,7 @@ Sema::OwningExprResult Sema::BuildPseudoDestructorExpr(ExprArg Base, if (ScopeTypeInfo) { QualType ScopeType = ScopeTypeInfo->getType(); if (!ScopeType->isDependentType() && !ObjectType->isDependentType() && - !Context.hasSameType(ScopeType, ObjectType)) { + !Context.hasSameUnqualifiedType(ScopeType, ObjectType)) { Diag(ScopeTypeInfo->getTypeLoc().getLocalSourceRange().getBegin(), diag::err_pseudo_dtor_type_mismatch) @@ -2891,7 +2908,8 @@ Sema::OwningExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, ExprArg Base, // record types and dependent types matter. void *ObjectTypePtrForLookup = 0; if (!SS.isSet()) { - ObjectTypePtrForLookup = (void *)ObjectType->getAs<RecordType>(); + ObjectTypePtrForLookup = const_cast<RecordType*>( + ObjectType->getAs<RecordType>()); if (!ObjectTypePtrForLookup && ObjectType->isDependentType()) ObjectTypePtrForLookup = Context.DependentTy.getAsOpaquePtr(); } @@ -3012,7 +3030,7 @@ CXXMemberCallExpr *Sema::BuildCXXMemberCallExpr(Expr *Exp, MemberExpr *ME = new (Context) MemberExpr(Exp, /*IsArrow=*/false, Method, SourceLocation(), Method->getType()); - QualType ResultType = Method->getResultType().getNonReferenceType(); + QualType ResultType = Method->getCallResultType(); MarkDeclarationReferenced(Exp->getLocStart(), Method); CXXMemberCallExpr *CE = new (Context) CXXMemberCallExpr(Context, ME, 0, 0, ResultType, diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp index 695a1be..9f43471 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp @@ -207,7 +207,7 @@ bool Sema::CheckMessageArgumentTypes(Expr **Args, unsigned NumArgs, return false; } - ReturnType = Method->getResultType().getNonReferenceType(); + ReturnType = Method->getSendResultType(); unsigned NumNamedArgs = Sel.getNumArgs(); // Method might have more arguments than selector indicates. This is due @@ -346,7 +346,7 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Selector Sel = PP.getSelectorTable().getNullarySelector(Member); ObjCMethodDecl *Getter = IFace->lookupInstanceMethod(Sel); if (DiagnosePropertyAccessorMismatch(PD, Getter, MemberLoc)) - ResTy = Getter->getResultType(); + ResTy = Getter->getSendResultType(); return Owned(new (Context) ObjCPropertyRefExpr(PD, ResTy, MemberLoc, BaseExpr)); } @@ -402,7 +402,7 @@ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, if (Getter) { QualType PType; - PType = Getter->getResultType(); + PType = Getter->getSendResultType(); return Owned(new (Context) ObjCImplicitSetterGetterRefExpr(Getter, PType, Setter, MemberLoc, BaseExpr)); } @@ -510,7 +510,7 @@ ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, QualType PType; if (Getter) - PType = Getter->getResultType(); + PType = Getter->getSendResultType(); else { for (ObjCMethodDecl::param_iterator PI = Setter->param_begin(), E = Setter->param_end(); PI != E; ++PI) @@ -1007,6 +1007,12 @@ Sema::OwningExprResult Sema::BuildInstanceMessage(ExprArg ReceiverE, if (CheckMessageArgumentTypes(Args, NumArgs, Sel, Method, false, LBracLoc, RBracLoc, ReturnType)) return ExprError(); + + if (!ReturnType->isVoidType()) { + if (RequireCompleteType(LBracLoc, ReturnType, + diag::err_illegal_message_expr_incomplete_type)) + return ExprError(); + } // Construct the appropriate ObjCMessageExpr instance. Expr *Result; diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp index 20f0c79..7ad1775 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp @@ -523,8 +523,9 @@ void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity, StructuredList->setSyntacticForm(IList); CheckListElementTypes(Entity, IList, T, /*SubobjectIsDesignatorContext=*/true, Index, StructuredList, StructuredIndex, TopLevelObject); - IList->setType(T.getNonReferenceType()); - StructuredList->setType(T.getNonReferenceType()); + QualType ExprTy = T.getNonLValueExprType(SemaRef.Context); + IList->setType(ExprTy); + StructuredList->setType(ExprTy); if (hadError) return; @@ -877,10 +878,15 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity, StructuredList, StructuredIndex); ++numEltsInit; } else { + QualType VecType; const VectorType *IVT = IType->getAs<VectorType>(); unsigned numIElts = IVT->getNumElements(); - QualType VecType = SemaRef.Context.getExtVectorType(elementType, - numIElts); + + if (IType->isExtVectorType()) + VecType = SemaRef.Context.getExtVectorType(elementType, numIElts); + else + VecType = SemaRef.Context.getVectorType(elementType, numIElts, + IVT->getAltiVecSpecific()); CheckSubElementType(ElementEntity, IList, VecType, Index, StructuredList, StructuredIndex); numEltsInit += numIElts; @@ -1114,7 +1120,7 @@ void InitListChecker::CheckStructUnionTypes(const InitializedEntity &Entity, } // Emit warnings for missing struct field initializers. - if (CheckForMissingFields && Field != FieldEnd && + if (InitializedSomething && CheckForMissingFields && Field != FieldEnd && !Field->getType()->isIncompleteArrayType() && !DeclType->isUnionType()) { // It is possible we have one or more unnamed bitfields remaining. // Find first (if any) named field and emit warning. @@ -1711,7 +1717,7 @@ InitListChecker::getStructuredSubobjectInit(InitListExpr *IList, unsigned Index, InitRange.getBegin(), 0, 0, InitRange.getEnd()); - Result->setType(CurrentObjectType.getNonReferenceType()); + Result->setType(CurrentObjectType.getNonLValueExprType(SemaRef.Context)); // Pre-allocate storage for the structured initializer list. unsigned NumElements = 0; @@ -1956,6 +1962,7 @@ DeclarationName InitializedEntity::getName() const { case EK_Base: case EK_ArrayElement: case EK_VectorElement: + case EK_BlockElement: return DeclarationName(); } @@ -1977,6 +1984,7 @@ DeclaratorDecl *InitializedEntity::getDecl() const { case EK_Base: case EK_ArrayElement: case EK_VectorElement: + case EK_BlockElement: return 0; } @@ -1998,6 +2006,7 @@ bool InitializedEntity::allowsNRVO() const { case EK_Base: case EK_ArrayElement: case EK_VectorElement: + case EK_BlockElement: break; } @@ -2195,7 +2204,7 @@ static void TryListInitialization(Sema &S, // FIXME: We only perform rudimentary checking of list // initializations at this point, then assume that any list // initialization of an array, aggregate, or scalar will be - // well-formed. We we actually "perform" list initialization, we'll + // well-formed. When we actually "perform" list initialization, we'll // do all of the necessary checking. C++0x initializer lists will // force us to perform more checking here. Sequence.setSequenceKind(InitializationSequence::ListInitialization); @@ -2236,8 +2245,6 @@ static void TryListInitialization(Sema &S, /// \brief Try a reference initialization that involves calling a conversion /// function. -/// -/// FIXME: look intos DRs 656, 896 static OverloadingResult TryRefInitWithConversionFunction(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, @@ -2271,11 +2278,8 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S, // The type we're converting to is a class type. Enumerate its constructors // to see if there is a suitable conversion. CXXRecordDecl *T1RecordDecl = cast<CXXRecordDecl>(T1RecordType->getDecl()); - DeclarationName ConstructorName - = S.Context.DeclarationNames.getCXXConstructorName( - S.Context.getCanonicalType(T1).getUnqualifiedType()); DeclContext::lookup_iterator Con, ConEnd; - for (llvm::tie(Con, ConEnd) = T1RecordDecl->lookup(ConstructorName); + for (llvm::tie(Con, ConEnd) = S.LookupConstructors(T1RecordDecl); Con != ConEnd; ++Con) { NamedDecl *D = *Con; DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess()); @@ -2328,7 +2332,7 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S, if (ConvTemplate) Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl()); else - Conv = cast<CXXConversionDecl>(*I); + Conv = cast<CXXConversionDecl>(D); // If the conversion function doesn't return a reference type, // it can't be considered for this conversion unless we're allowed to @@ -2367,13 +2371,14 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S, // Add the user-defined conversion step. Sequence.AddUserConversionStep(Function, Best->FoundDecl, - T2.getNonReferenceType()); + T2.getNonLValueExprType(S.Context)); // Determine whether we need to perform derived-to-base or // cv-qualification adjustments. bool NewDerivedToBase = false; Sema::ReferenceCompareResult NewRefRelationship - = S.CompareReferenceRelationship(DeclLoc, T1, T2.getNonReferenceType(), + = S.CompareReferenceRelationship(DeclLoc, T1, + T2.getNonLValueExprType(S.Context), NewDerivedToBase); if (NewRefRelationship == Sema::Ref_Incompatible) { // If the type we've converted to is not reference-related to the @@ -2398,14 +2403,14 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S, return OR_Success; } -/// \brief Attempt reference initialization (C++0x [dcl.init.list]) +/// \brief Attempt reference initialization (C++0x [dcl.init.ref]) static void TryReferenceInitialization(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, Expr *Initializer, InitializationSequence &Sequence) { Sequence.setSequenceKind(InitializationSequence::ReferenceBinding); - + QualType DestType = Entity.getType(); QualType cv1T1 = DestType->getAs<ReferenceType>()->getPointeeType(); Qualifiers T1Quals; @@ -2414,7 +2419,7 @@ static void TryReferenceInitialization(Sema &S, Qualifiers T2Quals; QualType T2 = S.Context.getUnqualifiedArrayType(cv2T2, T2Quals); SourceLocation DeclLoc = Initializer->getLocStart(); - + // If the initializer is the address of an overloaded function, try // to resolve the overloaded function. If all goes well, T2 is the // type of the resulting function. @@ -2428,29 +2433,33 @@ static void TryReferenceInitialization(Sema &S, Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed); return; } - + Sequence.AddAddressOverloadResolutionStep(Fn, Found); cv2T2 = Fn->getType(); T2 = cv2T2.getUnqualifiedType(); } - + // Compute some basic properties of the types and the initializer. bool isLValueRef = DestType->isLValueReferenceType(); bool isRValueRef = !isLValueRef; bool DerivedToBase = false; - Expr::isLvalueResult InitLvalue = Initializer->isLvalue(S.Context); + Expr::Classification InitCategory = Initializer->Classify(S.Context); Sema::ReferenceCompareResult RefRelationship = S.CompareReferenceRelationship(DeclLoc, cv1T1, cv2T2, DerivedToBase); - + // C++0x [dcl.init.ref]p5: // A reference to type "cv1 T1" is initialized by an expression of type // "cv2 T2" as follows: // // - If the reference is an lvalue reference and the initializer // expression + // Note the analogous bullet points for rvlaue refs to functions. Because + // there are no function rvalues in C++, rvalue refs to functions are treated + // like lvalue refs. OverloadingResult ConvOvlResult = OR_Success; - if (isLValueRef) { - if (InitLvalue == Expr::LV_Valid && + bool T1Function = T1->isFunctionType(); + if (isLValueRef || T1Function) { + if (InitCategory.isLValue() && RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) { // - is an lvalue (but is not a bit-field), and "cv1 T1" is // reference-compatible with "cv2 T2," or @@ -2478,10 +2487,13 @@ static void TryReferenceInitialization(Sema &S, // with "cv3 T3" (this conversion is selected by enumerating the // applicable conversion functions (13.3.1.6) and choosing the best // one through overload resolution (13.3)), - if (RefRelationship == Sema::Ref_Incompatible && T2->isRecordType()) { + // If we have an rvalue ref to function type here, the rhs must be + // an rvalue. + if (RefRelationship == Sema::Ref_Incompatible && T2->isRecordType() && + (isLValueRef || InitCategory.isRValue())) { ConvOvlResult = TryRefInitWithConversionFunction(S, Entity, Kind, Initializer, - /*AllowRValues=*/false, + /*AllowRValues=*/isRValueRef, Sequence); if (ConvOvlResult == OR_Success) return; @@ -2492,19 +2504,20 @@ static void TryReferenceInitialization(Sema &S, } } } - + // - Otherwise, the reference shall be an lvalue reference to a // non-volatile const type (i.e., cv1 shall be const), or the reference // shall be an rvalue reference and the initializer expression shall - // be an rvalue. + // be an rvalue or have a function type. + // We handled the function type stuff above. if (!((isLValueRef && T1Quals.hasConst() && !T1Quals.hasVolatile()) || - (isRValueRef && InitLvalue != Expr::LV_Valid))) { + (isRValueRef && InitCategory.isRValue()))) { if (ConvOvlResult && !Sequence.getFailedCandidateSet().empty()) Sequence.SetOverloadFailure( InitializationSequence::FK_ReferenceInitOverloadFailed, ConvOvlResult); else if (isLValueRef) - Sequence.SetFailed(InitLvalue == Expr::LV_Valid + Sequence.SetFailed(InitCategory.isLValue() ? (RefRelationship == Sema::Ref_Related ? InitializationSequence::FK_ReferenceInitDropsQualifiers : InitializationSequence::FK_NonConstLValueReferenceBindingToUnrelated) @@ -2512,15 +2525,15 @@ static void TryReferenceInitialization(Sema &S, else Sequence.SetFailed( InitializationSequence::FK_RValueReferenceBindingToLValue); - + return; } - - // - If T1 and T2 are class types and - if (T1->isRecordType() && T2->isRecordType()) { + + // - [If T1 is not a function type], if T2 is a class type and + if (!T1Function && T2->isRecordType()) { // - the initializer expression is an rvalue and "cv1 T1" is // reference-compatible with "cv2 T2", or - if (InitLvalue != Expr::LV_Valid && + if (InitCategory.isRValue() && RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) { // The corresponding bullet in C++03 [dcl.init.ref]p5 gives the // compiler the freedom to perform a copy here or bind to the @@ -2543,7 +2556,7 @@ static void TryReferenceInitialization(Sema &S, Sequence.AddReferenceBindingStep(cv1T1, /*bindingTemporary=*/true); return; } - + // - T1 is not reference-related to T2 and the initializer expression // can be implicitly converted to an rvalue of type "cv3 T3" (this // conversion is selected by enumerating the applicable conversion @@ -2576,15 +2589,17 @@ static void TryReferenceInitialization(Sema &S, // from the initializer expression using the rules for a non-reference // copy initialization (8.5). The reference is then bound to the // temporary. [...] + // Determine whether we are allowed to call explicit constructors or // explicit conversion operators. bool AllowExplicit = (Kind.getKind() == InitializationKind::IK_Direct); - ImplicitConversionSequence ICS - = S.TryImplicitConversion(Initializer, cv1T1, - /*SuppressUserConversions=*/false, AllowExplicit, - /*FIXME:InOverloadResolution=*/false); - - if (ICS.isBad()) { + + InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(cv1T1); + + if (S.TryImplicitConversion(Sequence, TempEntity, Initializer, + /*SuppressUserConversions*/ false, + AllowExplicit, + /*FIXME:InOverloadResolution=*/false)) { // FIXME: Use the conversion function set stored in ICS to turn // this into an overloading ambiguity diagnostic. However, we need // to keep that set as an OverloadCandidateSet rather than as some @@ -2609,8 +2624,6 @@ static void TryReferenceInitialization(Sema &S, return; } - // Perform the actual conversion. - Sequence.AddConversionSequenceStep(ICS, cv1T1); Sequence.AddReferenceBindingStep(cv1T1, /*bindingTemporary=*/true); return; } @@ -2661,11 +2674,8 @@ static void TryConstructorInitialization(Sema &S, CXXRecordDecl *DestRecordDecl = cast<CXXRecordDecl>(DestRecordType->getDecl()); - DeclarationName ConstructorName - = S.Context.DeclarationNames.getCXXConstructorName( - S.Context.getCanonicalType(DestType).getUnqualifiedType()); DeclContext::lookup_iterator Con, ConEnd; - for (llvm::tie(Con, ConEnd) = DestRecordDecl->lookup(ConstructorName); + for (llvm::tie(Con, ConEnd) = S.LookupConstructors(DestRecordDecl); Con != ConEnd; ++Con) { NamedDecl *D = *Con; DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess()); @@ -2764,8 +2774,7 @@ static void TryValueInitialization(Sema &S, // zero-initialized and, if T’s implicitly-declared default // constructor is non-trivial, that constructor is called. if ((ClassDecl->getTagKind() == TTK_Class || - ClassDecl->getTagKind() == TTK_Struct) && - !ClassDecl->hasTrivialConstructor()) { + ClassDecl->getTagKind() == TTK_Struct)) { Sequence.AddZeroInitializationStep(Entity.getType()); return TryConstructorInitialization(S, Entity, Kind, 0, 0, T, Sequence); } @@ -2841,15 +2850,11 @@ static void TryUserDefinedConversion(Sema &S, // Try to complete the type we're converting to. if (!S.RequireCompleteType(Kind.getLocation(), DestType, 0)) { - DeclarationName ConstructorName - = S.Context.DeclarationNames.getCXXConstructorName( - S.Context.getCanonicalType(DestType).getUnqualifiedType()); DeclContext::lookup_iterator Con, ConEnd; - for (llvm::tie(Con, ConEnd) = DestRecordDecl->lookup(ConstructorName); + for (llvm::tie(Con, ConEnd) = S.LookupConstructors(DestRecordDecl); Con != ConEnd; ++Con) { NamedDecl *D = *Con; DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess()); - bool SuppressUserConversions = false; // Find the constructor (which may be a template). CXXConstructorDecl *Constructor = 0; @@ -2858,17 +2863,8 @@ static void TryUserDefinedConversion(Sema &S, if (ConstructorTmpl) Constructor = cast<CXXConstructorDecl>( ConstructorTmpl->getTemplatedDecl()); - else { + else Constructor = cast<CXXConstructorDecl>(D); - - // If we're performing copy initialization using a copy constructor, - // we suppress user-defined conversions on the arguments. - // FIXME: Move constructors? - if (Kind.getKind() == InitializationKind::IK_Copy && - Constructor->isCopyConstructor()) - SuppressUserConversions = true; - - } if (!Constructor->isInvalidDecl() && Constructor->isConvertingConstructor(AllowExplicit)) { @@ -2876,11 +2872,11 @@ static void TryUserDefinedConversion(Sema &S, S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl, /*ExplicitArgs*/ 0, &Initializer, 1, CandidateSet, - SuppressUserConversions); + /*SuppressUserConversions=*/true); else S.AddOverloadCandidate(Constructor, FoundDecl, &Initializer, 1, CandidateSet, - SuppressUserConversions); + /*SuppressUserConversions=*/true); } } } @@ -2948,7 +2944,7 @@ static void TryUserDefinedConversion(Sema &S, } // Add the user-defined conversion step that calls the conversion function. - QualType ConvType = Function->getResultType().getNonReferenceType(); + QualType ConvType = Function->getCallResultType(); if (ConvType->getAs<RecordType>()) { // If we're converting to a class type, there may be an copy if // the resulting temporary object (possible to create an object of @@ -2973,25 +2969,22 @@ static void TryUserDefinedConversion(Sema &S, } } -/// \brief Attempt an implicit conversion (C++ [conv]) converting from one -/// non-class type to another. -static void TryImplicitConversion(Sema &S, - const InitializedEntity &Entity, - const InitializationKind &Kind, - Expr *Initializer, - InitializationSequence &Sequence) { +bool Sema::TryImplicitConversion(InitializationSequence &Sequence, + const InitializedEntity &Entity, + Expr *Initializer, + bool SuppressUserConversions, + bool AllowExplicitConversions, + bool InOverloadResolution) { ImplicitConversionSequence ICS - = S.TryImplicitConversion(Initializer, Entity.getType(), - /*SuppressUserConversions=*/true, - /*AllowExplicit=*/false, - /*InOverloadResolution=*/false); - - if (ICS.isBad()) { - Sequence.SetFailed(InitializationSequence::FK_ConversionFailed); - return; - } - + = TryImplicitConversion(Initializer, Entity.getType(), + SuppressUserConversions, + AllowExplicitConversions, + InOverloadResolution); + if (ICS.isBad()) return true; + + // Perform the actual conversion. Sequence.AddConversionSequenceStep(ICS, Entity.getType()); + return false; } InitializationSequence::InitializationSequence(Sema &S, @@ -3125,8 +3118,13 @@ InitializationSequence::InitializationSequence(Sema &S, // conversions (Clause 4) will be used, if necessary, to convert the // initializer expression to the cv-unqualified version of the // destination type; no user-defined conversions are considered. - setSequenceKind(StandardConversion); - TryImplicitConversion(S, Entity, Kind, Initializer, *this); + if (S.TryImplicitConversion(*this, Entity, Initializer, + /*SuppressUserConversions*/ true, + /*AllowExplicitConversions*/ false, + /*InOverloadResolution*/ false)) + SetFailed(InitializationSequence::FK_ConversionFailed); + else + setSequenceKind(StandardConversion); } InitializationSequence::~InitializationSequence() { @@ -3168,6 +3166,7 @@ getAssignmentAction(const InitializedEntity &Entity) { case InitializedEntity::EK_Member: case InitializedEntity::EK_ArrayElement: case InitializedEntity::EK_VectorElement: + case InitializedEntity::EK_BlockElement: return Sema::AA_Initializing; } @@ -3186,6 +3185,7 @@ static bool shouldBindAsTemporary(const InitializedEntity &Entity) { case InitializedEntity::EK_Base: case InitializedEntity::EK_VectorElement: case InitializedEntity::EK_Exception: + case InitializedEntity::EK_BlockElement: return false; case InitializedEntity::EK_Parameter: @@ -3205,6 +3205,7 @@ static bool shouldDestroyTemporary(const InitializedEntity &Entity) { case InitializedEntity::EK_New: case InitializedEntity::EK_Base: case InitializedEntity::EK_VectorElement: + case InitializedEntity::EK_BlockElement: return false; case InitializedEntity::EK_Variable: @@ -3289,6 +3290,7 @@ static Sema::OwningExprResult CopyObject(Sema &S, case InitializedEntity::EK_New: case InitializedEntity::EK_Base: case InitializedEntity::EK_VectorElement: + case InitializedEntity::EK_BlockElement: Loc = CurInitExpr->getLocStart(); break; } @@ -3298,12 +3300,9 @@ static Sema::OwningExprResult CopyObject(Sema &S, return move(CurInit); // Perform overload resolution using the class's copy constructors. - DeclarationName ConstructorName - = S.Context.DeclarationNames.getCXXConstructorName( - S.Context.getCanonicalType(S.Context.getTypeDeclType(Class))); DeclContext::lookup_iterator Con, ConEnd; OverloadCandidateSet CandidateSet(Loc); - for (llvm::tie(Con, ConEnd) = Class->lookup(ConstructorName); + for (llvm::tie(Con, ConEnd) = S.LookupConstructors(Class); Con != ConEnd; ++Con) { // Only consider copy constructors. CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(*Con); @@ -3324,12 +3323,16 @@ static Sema::OwningExprResult CopyObject(Sema &S, break; case OR_No_Viable_Function: - S.Diag(Loc, diag::err_temp_copy_no_viable) + S.Diag(Loc, IsExtraneousCopy && !S.isSFINAEContext() + ? diag::ext_rvalue_to_reference_temp_copy_no_viable + : diag::err_temp_copy_no_viable) << (int)Entity.getKind() << CurInitExpr->getType() << CurInitExpr->getSourceRange(); S.PrintOverloadCandidates(CandidateSet, Sema::OCD_AllCandidates, &CurInitExpr, 1); - return S.ExprError(); + if (!IsExtraneousCopy || S.isSFINAEContext()) + return S.ExprError(); + return move(CurInit); case OR_Ambiguous: S.Diag(Loc, diag::err_temp_copy_ambiguous) @@ -3353,7 +3356,7 @@ static Sema::OwningExprResult CopyObject(Sema &S, CurInit.release(); // Ownership transferred into MultiExprArg, below. S.CheckConstructorAccess(Loc, Constructor, Entity, - Best->FoundDecl.getAccess()); + Best->FoundDecl.getAccess(), IsExtraneousCopy); if (IsExtraneousCopy) { // If this is a totally extraneous copy for C++03 reference @@ -3699,8 +3702,8 @@ InitializationSequence::Perform(Sema &S, CurInitExpr = static_cast<Expr *>(CurInit.get()); QualType T = CurInitExpr->getType(); if (const RecordType *Record = T->getAs<RecordType>()) { - CXXDestructorDecl *Destructor - = cast<CXXRecordDecl>(Record->getDecl())->getDestructor(S.Context); + CXXDestructorDecl *Destructor + = S.LookupDestructor(cast<CXXRecordDecl>(Record->getDecl())); S.CheckDestructorAccess(CurInitExpr->getLocStart(), Destructor, S.PDiag(diag::err_access_dtor_temp) << T); S.MarkDeclarationReferenced(CurInitExpr->getLocStart(), Destructor); @@ -3836,7 +3839,7 @@ InitializationSequence::Perform(Sema &S, } else if (Kind.getKind() == InitializationKind::IK_Value && S.getLangOptions().CPlusPlus && !Kind.isImplicitValueInit()) { - CurInit = S.Owned(new (S.Context) CXXZeroInitValueExpr(Step->Type, + CurInit = S.Owned(new (S.Context) CXXScalarValueInitExpr(Step->Type, Kind.getRange().getBegin(), Kind.getRange().getEnd())); } else { diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaInit.h b/contrib/llvm/tools/clang/lib/Sema/SemaInit.h index a9064ed..44c36a7 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaInit.h +++ b/contrib/llvm/tools/clang/lib/Sema/SemaInit.h @@ -66,7 +66,10 @@ public: EK_Base, /// \brief The entity being initialized is an element of a vector. /// or vector. - EK_VectorElement + EK_VectorElement, + /// \brief The entity being initialized is a field of block descriptor for + /// the copied-in c++ object. + EK_BlockElement }; private: @@ -166,6 +169,11 @@ public: return InitializedEntity(EK_Result, ReturnLoc, Type, NRVO); } + static InitializedEntity InitializeBlock(SourceLocation BlockVarLoc, + QualType Type, bool NRVO) { + return InitializedEntity(EK_BlockElement, BlockVarLoc, Type, NRVO); + } + /// \brief Create the initialization entity for an exception object. static InitializedEntity InitializeException(SourceLocation ThrowLoc, QualType Type, bool NRVO) { diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp index 4555a86..2e65183 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp @@ -447,11 +447,114 @@ static bool LookupBuiltin(Sema &S, LookupResult &R) { return false; } +/// \brief Determine whether we can declare a special member function within +/// the class at this point. +static bool CanDeclareSpecialMemberFunction(ASTContext &Context, + const CXXRecordDecl *Class) { + // We need to have a definition for the class. + if (!Class->getDefinition() || Class->isDependentContext()) + return false; + + // We can't be in the middle of defining the class. + if (const RecordType *RecordTy + = Context.getTypeDeclType(Class)->getAs<RecordType>()) + return !RecordTy->isBeingDefined(); + + return false; +} + +void Sema::ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class) { + if (!CanDeclareSpecialMemberFunction(Context, Class)) + return; + + // If the default constructor has not yet been declared, do so now. + if (!Class->hasDeclaredDefaultConstructor()) + DeclareImplicitDefaultConstructor(Class); + + // If the copy constructor has not yet been declared, do so now. + if (!Class->hasDeclaredCopyConstructor()) + DeclareImplicitCopyConstructor(Class); + + // If the copy assignment operator has not yet been declared, do so now. + if (!Class->hasDeclaredCopyAssignment()) + DeclareImplicitCopyAssignment(Class); + + // If the destructor has not yet been declared, do so now. + if (!Class->hasDeclaredDestructor()) + DeclareImplicitDestructor(Class); +} + +/// \brief Determine whether this is the name of an implicitly-declared +/// special member function. +static bool isImplicitlyDeclaredMemberFunctionName(DeclarationName Name) { + switch (Name.getNameKind()) { + case DeclarationName::CXXConstructorName: + case DeclarationName::CXXDestructorName: + return true; + + case DeclarationName::CXXOperatorName: + return Name.getCXXOverloadedOperator() == OO_Equal; + + default: + break; + } + + return false; +} + +/// \brief If there are any implicit member functions with the given name +/// that need to be declared in the given declaration context, do so. +static void DeclareImplicitMemberFunctionsWithName(Sema &S, + DeclarationName Name, + const DeclContext *DC) { + if (!DC) + return; + + switch (Name.getNameKind()) { + case DeclarationName::CXXConstructorName: + if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC)) + if (Record->getDefinition() && + CanDeclareSpecialMemberFunction(S.Context, Record)) { + if (!Record->hasDeclaredDefaultConstructor()) + S.DeclareImplicitDefaultConstructor( + const_cast<CXXRecordDecl *>(Record)); + if (!Record->hasDeclaredCopyConstructor()) + S.DeclareImplicitCopyConstructor(const_cast<CXXRecordDecl *>(Record)); + } + break; + + case DeclarationName::CXXDestructorName: + if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC)) + if (Record->getDefinition() && !Record->hasDeclaredDestructor() && + CanDeclareSpecialMemberFunction(S.Context, Record)) + S.DeclareImplicitDestructor(const_cast<CXXRecordDecl *>(Record)); + break; + + case DeclarationName::CXXOperatorName: + if (Name.getCXXOverloadedOperator() != OO_Equal) + break; + + if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC)) + if (Record->getDefinition() && !Record->hasDeclaredCopyAssignment() && + CanDeclareSpecialMemberFunction(S.Context, Record)) + S.DeclareImplicitCopyAssignment(const_cast<CXXRecordDecl *>(Record)); + break; + + default: + break; + } +} + // Adds all qualifying matches for a name within a decl context to the // given lookup result. Returns true if any matches were found. static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) { bool Found = false; + // Lazily declare C++ special member functions. + if (S.getLangOptions().CPlusPlus) + DeclareImplicitMemberFunctionsWithName(S, R.getLookupName(), DC); + + // Perform lookup into this declaration context. DeclContext::lookup_const_iterator I, E; for (llvm::tie(I, E) = DC->lookup(R.getLookupName()); I != E; ++I) { NamedDecl *D = *I; @@ -640,6 +743,17 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) { DeclarationName Name = R.getLookupName(); + // If this is the name of an implicitly-declared special member function, + // go through the scope stack to implicitly declare + if (isImplicitlyDeclaredMemberFunctionName(Name)) { + for (Scope *PreS = S; PreS; PreS = PreS->getParent()) + if (DeclContext *DC = static_cast<DeclContext *>(PreS->getEntity())) + DeclareImplicitMemberFunctionsWithName(*this, Name, DC); + } + + // Implicitly declare member functions with the name we're looking for, if in + // fact we are in a scope where it matters. + Scope *Initial = S; IdentifierResolver::iterator I = IdResolver.begin(Name), @@ -1127,7 +1241,7 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, // If this isn't a C++ class, we aren't allowed to look into base // classes, we're done. CXXRecordDecl *LookupRec = dyn_cast<CXXRecordDecl>(LookupCtx); - if (!LookupRec) + if (!LookupRec || !LookupRec->getDefinition()) return false; // If we're performing qualified name lookup into a dependent class, @@ -1416,11 +1530,22 @@ bool Sema::DiagnoseAmbiguousLookup(LookupResult &Result) { return true; } +namespace { + struct AssociatedLookup { + AssociatedLookup(Sema &S, + Sema::AssociatedNamespaceSet &Namespaces, + Sema::AssociatedClassSet &Classes) + : S(S), Namespaces(Namespaces), Classes(Classes) { + } + + Sema &S; + Sema::AssociatedNamespaceSet &Namespaces; + Sema::AssociatedClassSet &Classes; + }; +} + static void -addAssociatedClassesAndNamespaces(QualType T, - ASTContext &Context, - Sema::AssociatedNamespaceSet &AssociatedNamespaces, - Sema::AssociatedClassSet &AssociatedClasses); +addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType T); static void CollectEnclosingNamespace(Sema::AssociatedNamespaceSet &Namespaces, DeclContext *Ctx) { @@ -1439,10 +1564,8 @@ static void CollectEnclosingNamespace(Sema::AssociatedNamespaceSet &Namespaces, // \brief Add the associated classes and namespaces for argument-dependent // lookup that involves a template argument (C++ [basic.lookup.koenig]p2). static void -addAssociatedClassesAndNamespaces(const TemplateArgument &Arg, - ASTContext &Context, - Sema::AssociatedNamespaceSet &AssociatedNamespaces, - Sema::AssociatedClassSet &AssociatedClasses) { +addAssociatedClassesAndNamespaces(AssociatedLookup &Result, + const TemplateArgument &Arg) { // C++ [basic.lookup.koenig]p2, last bullet: // -- [...] ; switch (Arg.getKind()) { @@ -1453,9 +1576,7 @@ addAssociatedClassesAndNamespaces(const TemplateArgument &Arg, // [...] the namespaces and classes associated with the types of the // template arguments provided for template type parameters (excluding // template template parameters) - addAssociatedClassesAndNamespaces(Arg.getAsType(), Context, - AssociatedNamespaces, - AssociatedClasses); + addAssociatedClassesAndNamespaces(Result, Arg.getAsType()); break; case TemplateArgument::Template: { @@ -1467,9 +1588,9 @@ addAssociatedClassesAndNamespaces(const TemplateArgument &Arg, = dyn_cast<ClassTemplateDecl>(Template.getAsTemplateDecl())) { DeclContext *Ctx = ClassTemplate->getDeclContext(); if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx)) - AssociatedClasses.insert(EnclosingClass); + Result.Classes.insert(EnclosingClass); // Add the associated namespace for this class. - CollectEnclosingNamespace(AssociatedNamespaces, Ctx); + CollectEnclosingNamespace(Result.Namespaces, Ctx); } break; } @@ -1485,9 +1606,7 @@ addAssociatedClassesAndNamespaces(const TemplateArgument &Arg, for (TemplateArgument::pack_iterator P = Arg.pack_begin(), PEnd = Arg.pack_end(); P != PEnd; ++P) - addAssociatedClassesAndNamespaces(*P, Context, - AssociatedNamespaces, - AssociatedClasses); + addAssociatedClassesAndNamespaces(Result, *P); break; } } @@ -1496,10 +1615,13 @@ addAssociatedClassesAndNamespaces(const TemplateArgument &Arg, // argument-dependent lookup with an argument of class type // (C++ [basic.lookup.koenig]p2). static void -addAssociatedClassesAndNamespaces(CXXRecordDecl *Class, - ASTContext &Context, - Sema::AssociatedNamespaceSet &AssociatedNamespaces, - Sema::AssociatedClassSet &AssociatedClasses) { +addAssociatedClassesAndNamespaces(AssociatedLookup &Result, + CXXRecordDecl *Class) { + + // Just silently ignore anything whose name is __va_list_tag. + if (Class->getDeclName() == Result.S.VAListTagName) + return; + // C++ [basic.lookup.koenig]p2: // [...] // -- If T is a class type (including unions), its associated @@ -1511,13 +1633,13 @@ addAssociatedClassesAndNamespaces(CXXRecordDecl *Class, // Add the class of which it is a member, if any. DeclContext *Ctx = Class->getDeclContext(); if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx)) - AssociatedClasses.insert(EnclosingClass); + Result.Classes.insert(EnclosingClass); // Add the associated namespace for this class. - CollectEnclosingNamespace(AssociatedNamespaces, Ctx); + CollectEnclosingNamespace(Result.Namespaces, Ctx); // Add the class itself. If we've already seen this class, we don't // need to visit base classes. - if (!AssociatedClasses.insert(Class)) + if (!Result.Classes.insert(Class)) return; // -- If T is a template-id, its associated namespaces and classes are @@ -1533,15 +1655,13 @@ addAssociatedClassesAndNamespaces(CXXRecordDecl *Class, = dyn_cast<ClassTemplateSpecializationDecl>(Class)) { DeclContext *Ctx = Spec->getSpecializedTemplate()->getDeclContext(); if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx)) - AssociatedClasses.insert(EnclosingClass); + Result.Classes.insert(EnclosingClass); // Add the associated namespace for this class. - CollectEnclosingNamespace(AssociatedNamespaces, Ctx); + CollectEnclosingNamespace(Result.Namespaces, Ctx); const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I) - addAssociatedClassesAndNamespaces(TemplateArgs[I], Context, - AssociatedNamespaces, - AssociatedClasses); + addAssociatedClassesAndNamespaces(Result, TemplateArgs[I]); } // Only recurse into base classes for complete types. @@ -1573,10 +1693,10 @@ addAssociatedClassesAndNamespaces(CXXRecordDecl *Class, if (!BaseType) continue; CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(BaseType->getDecl()); - if (AssociatedClasses.insert(BaseDecl)) { + if (Result.Classes.insert(BaseDecl)) { // Find the associated namespace for this base class. DeclContext *BaseCtx = BaseDecl->getDeclContext(); - CollectEnclosingNamespace(AssociatedNamespaces, BaseCtx); + CollectEnclosingNamespace(Result.Namespaces, BaseCtx); // Make sure we visit the bases of this base class. if (BaseDecl->bases_begin() != BaseDecl->bases_end()) @@ -1590,10 +1710,7 @@ addAssociatedClassesAndNamespaces(CXXRecordDecl *Class, // argument-dependent lookup with an argument of type T // (C++ [basic.lookup.koenig]p2). static void -addAssociatedClassesAndNamespaces(QualType T, - ASTContext &Context, - Sema::AssociatedNamespaceSet &AssociatedNamespaces, - Sema::AssociatedClassSet &AssociatedClasses) { +addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) { // C++ [basic.lookup.koenig]p2: // // For each argument type T in the function call, there is a set @@ -1604,109 +1721,137 @@ addAssociatedClassesAndNamespaces(QualType T, // argument). Typedef names and using-declarations used to specify // the types do not contribute to this set. The sets of namespaces // and classes are determined in the following way: - T = Context.getCanonicalType(T).getUnqualifiedType(); - // -- If T is a pointer to U or an array of U, its associated - // namespaces and classes are those associated with U. - // - // We handle this by unwrapping pointer and array types immediately, - // to avoid unnecessary recursion. + llvm::SmallVector<const Type *, 16> Queue; + const Type *T = Ty->getCanonicalTypeInternal().getTypePtr(); + while (true) { - if (const PointerType *Ptr = T->getAs<PointerType>()) - T = Ptr->getPointeeType(); - else if (const ArrayType *Ptr = Context.getAsArrayType(T)) - T = Ptr->getElementType(); - else + switch (T->getTypeClass()) { + +#define TYPE(Class, Base) +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: +#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: +#define ABSTRACT_TYPE(Class, Base) +#include "clang/AST/TypeNodes.def" + // T is canonical. We can also ignore dependent types because + // we don't need to do ADL at the definition point, but if we + // wanted to implement template export (or if we find some other + // use for associated classes and namespaces...) this would be + // wrong. break; - } - // -- If T is a fundamental type, its associated sets of - // namespaces and classes are both empty. - if (T->getAs<BuiltinType>()) - return; + // -- If T is a pointer to U or an array of U, its associated + // namespaces and classes are those associated with U. + case Type::Pointer: + T = cast<PointerType>(T)->getPointeeType().getTypePtr(); + continue; + case Type::ConstantArray: + case Type::IncompleteArray: + case Type::VariableArray: + T = cast<ArrayType>(T)->getElementType().getTypePtr(); + continue; - // -- If T is a class type (including unions), its associated - // classes are: the class itself; the class of which it is a - // member, if any; and its direct and indirect base - // classes. Its associated namespaces are the namespaces in - // which its associated classes are defined. - if (const RecordType *ClassType = T->getAs<RecordType>()) - if (CXXRecordDecl *ClassDecl - = dyn_cast<CXXRecordDecl>(ClassType->getDecl())) { - addAssociatedClassesAndNamespaces(ClassDecl, Context, - AssociatedNamespaces, - AssociatedClasses); - return; + // -- If T is a fundamental type, its associated sets of + // namespaces and classes are both empty. + case Type::Builtin: + break; + + // -- If T is a class type (including unions), its associated + // classes are: the class itself; the class of which it is a + // member, if any; and its direct and indirect base + // classes. Its associated namespaces are the namespaces in + // which its associated classes are defined. + case Type::Record: { + CXXRecordDecl *Class + = cast<CXXRecordDecl>(cast<RecordType>(T)->getDecl()); + addAssociatedClassesAndNamespaces(Result, Class); + break; } - // -- If T is an enumeration type, its associated namespace is - // the namespace in which it is defined. If it is class - // member, its associated class is the member’s class; else - // it has no associated class. - if (const EnumType *EnumT = T->getAs<EnumType>()) { - EnumDecl *Enum = EnumT->getDecl(); + // -- If T is an enumeration type, its associated namespace is + // the namespace in which it is defined. If it is class + // member, its associated class is the member’s class; else + // it has no associated class. + case Type::Enum: { + EnumDecl *Enum = cast<EnumType>(T)->getDecl(); - DeclContext *Ctx = Enum->getDeclContext(); - if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx)) - AssociatedClasses.insert(EnclosingClass); + DeclContext *Ctx = Enum->getDeclContext(); + if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx)) + Result.Classes.insert(EnclosingClass); - // Add the associated namespace for this class. - CollectEnclosingNamespace(AssociatedNamespaces, Ctx); + // Add the associated namespace for this class. + CollectEnclosingNamespace(Result.Namespaces, Ctx); - return; - } + break; + } - // -- If T is a function type, its associated namespaces and - // classes are those associated with the function parameter - // types and those associated with the return type. - if (const FunctionType *FnType = T->getAs<FunctionType>()) { - // Return type - addAssociatedClassesAndNamespaces(FnType->getResultType(), - Context, - AssociatedNamespaces, AssociatedClasses); + // -- If T is a function type, its associated namespaces and + // classes are those associated with the function parameter + // types and those associated with the return type. + case Type::FunctionProto: { + const FunctionProtoType *Proto = cast<FunctionProtoType>(T); + for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(), + ArgEnd = Proto->arg_type_end(); + Arg != ArgEnd; ++Arg) + Queue.push_back(Arg->getTypePtr()); + // fallthrough + } + case Type::FunctionNoProto: { + const FunctionType *FnType = cast<FunctionType>(T); + T = FnType->getResultType().getTypePtr(); + continue; + } - const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FnType); - if (!Proto) - return; + // -- If T is a pointer to a member function of a class X, its + // associated namespaces and classes are those associated + // with the function parameter types and return type, + // together with those associated with X. + // + // -- If T is a pointer to a data member of class X, its + // associated namespaces and classes are those associated + // with the member type together with those associated with + // X. + case Type::MemberPointer: { + const MemberPointerType *MemberPtr = cast<MemberPointerType>(T); + + // Queue up the class type into which this points. + Queue.push_back(MemberPtr->getClass()); + + // And directly continue with the pointee type. + T = MemberPtr->getPointeeType().getTypePtr(); + continue; + } - // Argument types - for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(), - ArgEnd = Proto->arg_type_end(); - Arg != ArgEnd; ++Arg) - addAssociatedClassesAndNamespaces(*Arg, Context, - AssociatedNamespaces, AssociatedClasses); + // As an extension, treat this like a normal pointer. + case Type::BlockPointer: + T = cast<BlockPointerType>(T)->getPointeeType().getTypePtr(); + continue; - return; - } + // References aren't covered by the standard, but that's such an + // obvious defect that we cover them anyway. + case Type::LValueReference: + case Type::RValueReference: + T = cast<ReferenceType>(T)->getPointeeType().getTypePtr(); + continue; - // -- If T is a pointer to a member function of a class X, its - // associated namespaces and classes are those associated - // with the function parameter types and return type, - // together with those associated with X. - // - // -- If T is a pointer to a data member of class X, its - // associated namespaces and classes are those associated - // with the member type together with those associated with - // X. - if (const MemberPointerType *MemberPtr = T->getAs<MemberPointerType>()) { - // Handle the type that the pointer to member points to. - addAssociatedClassesAndNamespaces(MemberPtr->getPointeeType(), - Context, - AssociatedNamespaces, - AssociatedClasses); - - // Handle the class type into which this points. - if (const RecordType *Class = MemberPtr->getClass()->getAs<RecordType>()) - addAssociatedClassesAndNamespaces(cast<CXXRecordDecl>(Class->getDecl()), - Context, - AssociatedNamespaces, - AssociatedClasses); + // These are fundamental types. + case Type::Vector: + case Type::ExtVector: + case Type::Complex: + break; - return; - } + // These are ignored by ADL. + case Type::ObjCObject: + case Type::ObjCInterface: + case Type::ObjCObjectPointer: + break; + } - // FIXME: What about block pointers? - // FIXME: What about Objective-C message sends? + if (Queue.empty()) break; + T = Queue.back(); + Queue.pop_back(); + } } /// \brief Find the associated classes and namespaces for @@ -1723,6 +1868,8 @@ Sema::FindAssociatedClassesAndNamespaces(Expr **Args, unsigned NumArgs, AssociatedNamespaces.clear(); AssociatedClasses.clear(); + AssociatedLookup Result(*this, AssociatedNamespaces, AssociatedClasses); + // C++ [basic.lookup.koenig]p2: // For each argument type T in the function call, there is a set // of zero or more associated namespaces and a set of zero or more @@ -1734,9 +1881,7 @@ Sema::FindAssociatedClassesAndNamespaces(Expr **Args, unsigned NumArgs, Expr *Arg = Args[ArgIdx]; if (Arg->getType() != Context.OverloadTy) { - addAssociatedClassesAndNamespaces(Arg->getType(), Context, - AssociatedNamespaces, - AssociatedClasses); + addAssociatedClassesAndNamespaces(Result, Arg->getType()); continue; } @@ -1752,17 +1897,11 @@ Sema::FindAssociatedClassesAndNamespaces(Expr **Args, unsigned NumArgs, if (unaryOp->getOpcode() == UnaryOperator::AddrOf) Arg = unaryOp->getSubExpr(); - // TODO: avoid the copies. This should be easy when the cases - // share a storage implementation. - llvm::SmallVector<NamedDecl*, 8> Functions; + UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(Arg); + if (!ULE) continue; - if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(Arg)) - Functions.append(ULE->decls_begin(), ULE->decls_end()); - else - continue; - - for (llvm::SmallVectorImpl<NamedDecl*>::iterator I = Functions.begin(), - E = Functions.end(); I != E; ++I) { + for (UnresolvedSetIterator I = ULE->decls_begin(), E = ULE->decls_end(); + I != E; ++I) { // Look through any using declarations to find the underlying function. NamedDecl *Fn = (*I)->getUnderlyingDecl(); @@ -1772,9 +1911,7 @@ Sema::FindAssociatedClassesAndNamespaces(Expr **Args, unsigned NumArgs, // Add the classes and namespaces associated with the parameter // types and return type of this function. - addAssociatedClassesAndNamespaces(FDecl->getType(), Context, - AssociatedNamespaces, - AssociatedClasses); + addAssociatedClassesAndNamespaces(Result, FDecl->getType()); } } } @@ -1874,6 +2011,36 @@ void Sema::LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, } } +/// \brief Look up the constructors for the given class. +DeclContext::lookup_result Sema::LookupConstructors(CXXRecordDecl *Class) { + // If the copy constructor has not yet been declared, do so now. + if (CanDeclareSpecialMemberFunction(Context, Class)) { + if (!Class->hasDeclaredDefaultConstructor()) + DeclareImplicitDefaultConstructor(Class); + if (!Class->hasDeclaredCopyConstructor()) + DeclareImplicitCopyConstructor(Class); + } + + CanQualType T = Context.getCanonicalType(Context.getTypeDeclType(Class)); + DeclarationName Name = Context.DeclarationNames.getCXXConstructorName(T); + return Class->lookup(Name); +} + +/// \brief Look for the destructor of the given class. +/// +/// During semantic analysis, this routine should be used in lieu of +/// CXXRecordDecl::getDestructor(). +/// +/// \returns The destructor for this class. +CXXDestructorDecl *Sema::LookupDestructor(CXXRecordDecl *Class) { + // If the destructor has not yet been declared, do so now. + if (CanDeclareSpecialMemberFunction(Context, Class) && + !Class->hasDeclaredDestructor()) + DeclareImplicitDestructor(Class); + + return Class->getDestructor(); +} + void ADLResult::insert(NamedDecl *New) { NamedDecl *&Old = Decls[cast<NamedDecl>(New->getCanonicalDecl())]; @@ -2172,6 +2339,9 @@ static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result, if (Visited.visitedContext(Ctx->getPrimaryContext())) return; + if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx)) + Result.getSema().ForceDeclarationOfImplicitMembers(Class); + // Enumerate all of the results in this context. for (DeclContext *CurCtx = Ctx->getPrimaryContext(); CurCtx; CurCtx = CurCtx->getNextContext()) { @@ -2556,7 +2726,7 @@ DeclarationName Sema::CorrectTypo(LookupResult &Res, Scope *S, CXXScopeSpec *SS, bool EnteringContext, CorrectTypoContext CTC, const ObjCObjectPointerType *OPT) { - if (Diags.hasFatalErrorOccurred()) + if (Diags.hasFatalErrorOccurred() || !getLangOptions().SpellChecking) return DeclarationName(); // Provide a stop gap for files that are just seriously broken. Trying diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp index 4c89a11..ff60599 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp @@ -41,7 +41,8 @@ Sema::DeclPtrTy Sema::ActOnProperty(Scope *S, SourceLocation AtLoc, !(Attributes & ObjCDeclSpec::DQ_PR_retain) && !(Attributes & ObjCDeclSpec::DQ_PR_copy))); - QualType T = GetTypeForDeclarator(FD.D, S); + TypeSourceInfo *TSI = GetTypeForDeclarator(FD.D, S); + QualType T = TSI->getType(); if (T->isReferenceType()) { Diag(AtLoc, diag::error_reference_property); return DeclPtrTy(); @@ -51,18 +52,22 @@ Sema::DeclPtrTy Sema::ActOnProperty(Scope *S, SourceLocation AtLoc, cast<ObjCContainerDecl>(ClassCategory.getAs<Decl>()); if (ObjCCategoryDecl *CDecl = dyn_cast<ObjCCategoryDecl>(ClassDecl)) - if (CDecl->IsClassExtension()) - return HandlePropertyInClassExtension(S, CDecl, AtLoc, - FD, GetterSel, SetterSel, - isAssign, isReadWrite, - Attributes, - isOverridingProperty, T, - MethodImplKind); - + if (CDecl->IsClassExtension()) { + DeclPtrTy Res = HandlePropertyInClassExtension(S, CDecl, AtLoc, + FD, GetterSel, SetterSel, + isAssign, isReadWrite, + Attributes, + isOverridingProperty, TSI, + MethodImplKind); + if (Res) + CheckObjCPropertyAttributes(Res, AtLoc, Attributes); + return Res; + } + DeclPtrTy Res = DeclPtrTy::make(CreatePropertyDecl(S, ClassDecl, AtLoc, FD, - GetterSel, SetterSel, - isAssign, isReadWrite, - Attributes, T, MethodImplKind)); + GetterSel, SetterSel, + isAssign, isReadWrite, + Attributes, TSI, MethodImplKind)); // Validate the attributes on the @property. CheckObjCPropertyAttributes(Res, AtLoc, Attributes); return Res; @@ -76,7 +81,7 @@ Sema::HandlePropertyInClassExtension(Scope *S, ObjCCategoryDecl *CDecl, const bool isReadWrite, const unsigned Attributes, bool *isOverridingProperty, - QualType T, + TypeSourceInfo *T, tok::ObjCKeywordKind MethodImplKind) { // Diagnose if this property is already in continuation class. @@ -122,6 +127,10 @@ Sema::HandlePropertyInClassExtension(Scope *S, ObjCCategoryDecl *CDecl, CreatePropertyDecl(S, CCPrimary, AtLoc, FD, GetterSel, SetterSel, isAssign, isReadWrite, Attributes, T, MethodImplKind, DC); + // Mark written attribute as having no attribute because + // this is not a user-written property declaration in primary + // class. + PDecl->setPropertyAttributesAsWritten(ObjCPropertyDecl::OBJC_PR_noattr); // A case of continuation class adding a new property in the class. This // is not what it was meant for. However, gcc supports it and so should we. @@ -133,7 +142,7 @@ Sema::HandlePropertyInClassExtension(Scope *S, ObjCCategoryDecl *CDecl, // The property 'PIDecl's readonly attribute will be over-ridden // with continuation class's readwrite property attribute! - unsigned PIkind = PIDecl->getPropertyAttributes(); + unsigned PIkind = PIDecl->getPropertyAttributesAsWritten(); if (isReadWrite && (PIkind & ObjCPropertyDecl::OBJC_PR_readonly)) { unsigned retainCopyNonatomic = (ObjCPropertyDecl::OBJC_PR_retain | @@ -190,11 +199,11 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S, const bool isAssign, const bool isReadWrite, const unsigned Attributes, - QualType T, + TypeSourceInfo *TInfo, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC){ - IdentifierInfo *PropertyId = FD.D.getIdentifier(); + QualType T = TInfo->getType(); // Issue a warning if property is 'assign' as default and its object, which is // gc'able conforms to NSCopying protocol @@ -215,7 +224,7 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S, DeclContext *DC = cast<DeclContext>(CDecl); ObjCPropertyDecl *PDecl = ObjCPropertyDecl::Create(Context, DC, FD.D.getIdentifierLoc(), - PropertyId, AtLoc, T); + PropertyId, AtLoc, TInfo); if (ObjCPropertyDecl *prevDecl = ObjCPropertyDecl::findPropertyDecl(DC, PropertyId)) { @@ -265,6 +274,8 @@ ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S, if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nonatomic); + PDecl->setPropertyAttributesAsWritten(PDecl->getPropertyAttributes()); + if (MethodImplKind == tok::objc_required) PDecl->setPropertyImplementation(ObjCPropertyDecl::Required); else if (MethodImplKind == tok::objc_optional) @@ -771,7 +782,8 @@ bool Sema::isPropertyReadonly(ObjCPropertyDecl *PDecl, /// CollectImmediateProperties - This routine collects all properties in /// the class and its conforming protocols; but not those it its super class. void Sema::CollectImmediateProperties(ObjCContainerDecl *CDecl, - llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap) { + llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap, + llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& SuperPropMap) { if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) { for (ObjCContainerDecl::prop_iterator P = IDecl->prop_begin(), E = IDecl->prop_end(); P != E; ++P) { @@ -781,10 +793,7 @@ void Sema::CollectImmediateProperties(ObjCContainerDecl *CDecl, // scan through class's protocols. for (ObjCInterfaceDecl::protocol_iterator PI = IDecl->protocol_begin(), E = IDecl->protocol_end(); PI != E; ++PI) - // Exclude property for protocols which conform to class's super-class, - // as super-class has to implement the property. - if (!ProtocolConformsToSuperClass(IDecl, (*PI))) - CollectImmediateProperties((*PI), PropMap); + CollectImmediateProperties((*PI), PropMap, SuperPropMap); } if (ObjCCategoryDecl *CATDecl = dyn_cast<ObjCCategoryDecl>(CDecl)) { if (!CATDecl->IsClassExtension()) @@ -796,20 +805,25 @@ void Sema::CollectImmediateProperties(ObjCContainerDecl *CDecl, // scan through class's protocols. for (ObjCInterfaceDecl::protocol_iterator PI = CATDecl->protocol_begin(), E = CATDecl->protocol_end(); PI != E; ++PI) - CollectImmediateProperties((*PI), PropMap); + CollectImmediateProperties((*PI), PropMap, SuperPropMap); } else if (ObjCProtocolDecl *PDecl = dyn_cast<ObjCProtocolDecl>(CDecl)) { for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(), E = PDecl->prop_end(); P != E; ++P) { ObjCPropertyDecl *Prop = (*P); - ObjCPropertyDecl *&PropEntry = PropMap[Prop->getIdentifier()]; - if (!PropEntry) - PropEntry = Prop; + ObjCPropertyDecl *PropertyFromSuper = SuperPropMap[Prop->getIdentifier()]; + // Exclude property for protocols which conform to class's super-class, + // as super-class has to implement the property. + if (!PropertyFromSuper || PropertyFromSuper != Prop) { + ObjCPropertyDecl *&PropEntry = PropMap[Prop->getIdentifier()]; + if (!PropEntry) + PropEntry = Prop; + } } // scan through protocol's protocols. for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(), E = PDecl->protocol_end(); PI != E; ++PI) - CollectImmediateProperties((*PI), PropMap); + CollectImmediateProperties((*PI), PropMap, SuperPropMap); } } @@ -854,33 +868,6 @@ static void CollectSuperClassPropertyImplementations(ObjCInterfaceDecl *CDecl, } } -/// ProtocolConformsToSuperClass - Returns true if class's given protocol -/// conforms to one of its super class's protocols. -bool Sema::ProtocolConformsToSuperClass(const ObjCInterfaceDecl *IDecl, - const ObjCProtocolDecl *PDecl) { - if (const ObjCInterfaceDecl *CDecl = IDecl->getSuperClass()) { - for (ObjCInterfaceDecl::protocol_iterator PI = CDecl->protocol_begin(), - E = CDecl->protocol_end(); PI != E; ++PI) { - if (ProtocolConformsToProtocol((*PI), PDecl)) - return true; - return ProtocolConformsToSuperClass(CDecl, PDecl); - } - } - return false; -} - -bool Sema::ProtocolConformsToProtocol(const ObjCProtocolDecl *NestedProtocol, - const ObjCProtocolDecl *PDecl) { - if (PDecl->getIdentifier() == NestedProtocol->getIdentifier()) - return true; - // scan through protocol's protocols. - for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(), - E = PDecl->protocol_end(); PI != E; ++PI) - if (ProtocolConformsToProtocol(NestedProtocol, (*PI))) - return true; - return false; -} - /// LookupPropertyDecl - Looks up a property in the current class and all /// its protocols. ObjCPropertyDecl *Sema::LookupPropertyDecl(const ObjCContainerDecl *CDecl, @@ -943,6 +930,10 @@ void Sema::DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, Prop->getPropertyImplementation() == ObjCPropertyDecl::Optional || IMPDecl->FindPropertyImplIvarDecl(Prop->getIdentifier())) continue; + // Property may have been synthesized by user. + if (IMPDecl->FindPropertyImplDecl(Prop->getIdentifier())) + continue; + ActOnPropertyImplDecl(S, IMPDecl->getLocation(), IMPDecl->getLocation(), true, DeclPtrTy::make(IMPDecl), Prop->getIdentifier(), Prop->getIdentifier()); @@ -952,8 +943,12 @@ void Sema::DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, const llvm::DenseSet<Selector>& InsMap) { + llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*> SuperPropMap; + if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) + CollectSuperClassPropertyImplementations(IDecl, SuperPropMap); + llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*> PropMap; - CollectImmediateProperties(CDecl, PropMap); + CollectImmediateProperties(CDecl, PropMap, SuperPropMap); if (PropMap.empty()) return; diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp index 2754d44..c4ab906 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp @@ -155,7 +155,9 @@ bool StandardConversionSequence::isPointerConversionToBool() const { // check for their presence as well as checking whether FromType is // a pointer. if (getToType(1)->isBooleanType() && - (getFromType()->isPointerType() || getFromType()->isBlockPointerType() || + (getFromType()->isPointerType() || + getFromType()->isObjCObjectPointerType() || + getFromType()->isBlockPointerType() || First == ICK_Array_To_Pointer || First == ICK_Function_To_Pointer)) return true; @@ -498,19 +500,54 @@ void OverloadCandidateSet::clear() { // identical (return types of functions are not part of the // signature), IsOverload returns false and MatchedDecl will be set to // point to the FunctionDecl for #2. +// +// 'NewIsUsingShadowDecl' indicates that 'New' is being introduced +// into a class by a using declaration. The rules for whether to hide +// shadow declarations ignore some properties which otherwise figure +// into a function template's signature. Sema::OverloadKind -Sema::CheckOverload(FunctionDecl *New, const LookupResult &Old, - NamedDecl *&Match) { +Sema::CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &Old, + NamedDecl *&Match, bool NewIsUsingDecl) { for (LookupResult::iterator I = Old.begin(), E = Old.end(); I != E; ++I) { - NamedDecl *OldD = (*I)->getUnderlyingDecl(); + NamedDecl *OldD = *I; + + bool OldIsUsingDecl = false; + if (isa<UsingShadowDecl>(OldD)) { + OldIsUsingDecl = true; + + // We can always introduce two using declarations into the same + // context, even if they have identical signatures. + if (NewIsUsingDecl) continue; + + OldD = cast<UsingShadowDecl>(OldD)->getTargetDecl(); + } + + // If either declaration was introduced by a using declaration, + // we'll need to use slightly different rules for matching. + // Essentially, these rules are the normal rules, except that + // function templates hide function templates with different + // return types or template parameter lists. + bool UseMemberUsingDeclRules = + (OldIsUsingDecl || NewIsUsingDecl) && CurContext->isRecord(); + if (FunctionTemplateDecl *OldT = dyn_cast<FunctionTemplateDecl>(OldD)) { - if (!IsOverload(New, OldT->getTemplatedDecl())) { + if (!IsOverload(New, OldT->getTemplatedDecl(), UseMemberUsingDeclRules)) { + if (UseMemberUsingDeclRules && OldIsUsingDecl) { + HideUsingShadowDecl(S, cast<UsingShadowDecl>(*I)); + continue; + } + Match = *I; return Ovl_Match; } } else if (FunctionDecl *OldF = dyn_cast<FunctionDecl>(OldD)) { - if (!IsOverload(New, OldF)) { + if (!IsOverload(New, OldF, UseMemberUsingDeclRules)) { + if (UseMemberUsingDeclRules && OldIsUsingDecl) { + HideUsingShadowDecl(S, cast<UsingShadowDecl>(*I)); + continue; + } + Match = *I; return Ovl_Match; } @@ -534,7 +571,8 @@ Sema::CheckOverload(FunctionDecl *New, const LookupResult &Old, return Ovl_Overload; } -bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old) { +bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old, + bool UseUsingDeclRules) { FunctionTemplateDecl *OldTemplate = Old->getDescribedFunctionTemplate(); FunctionTemplateDecl *NewTemplate = New->getDescribedFunctionTemplate(); @@ -579,7 +617,10 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old) { // // We check the return type and template parameter lists for function // templates first; the remaining checks follow. - if (NewTemplate && + // + // However, we don't consider either of these when deciding whether + // a member introduced by a shadow declaration is hidden. + if (!UseUsingDeclRules && NewTemplate && (!TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(), OldTemplate->getTemplateParameters(), false, TPL_TemplateMatch) || @@ -804,7 +845,7 @@ static bool IsVectorConversion(ASTContext &Context, QualType FromType, return false; // Vector splat from any arithmetic type to a vector. - if (!FromType->isVectorType() && FromType->isArithmeticType()) { + if (FromType->isArithmeticType()) { ICK = ICK_Vector_Splat; return true; } @@ -960,8 +1001,8 @@ Sema::IsStandardConversion(Expr* From, QualType ToType, // Complex promotion (Clang extension) SCS.Second = ICK_Complex_Promotion; FromType = ToType.getUnqualifiedType(); - } else if ((FromType->isIntegralType() || FromType->isEnumeralType()) && - (ToType->isIntegralType() && !ToType->isEnumeralType())) { + } else if (FromType->isIntegralOrEnumerationType() && + ToType->isIntegralType(Context)) { // Integral conversions (C++ 4.7). SCS.Second = ICK_Integral_Conversion; FromType = ToType.getUnqualifiedType(); @@ -974,15 +1015,14 @@ Sema::IsStandardConversion(Expr* From, QualType ToType, // Complex-real conversions (C99 6.3.1.7) SCS.Second = ICK_Complex_Real; FromType = ToType.getUnqualifiedType(); - } else if (FromType->isFloatingType() && ToType->isFloatingType()) { + } else if (FromType->isRealFloatingType() && ToType->isRealFloatingType()) { // Floating point conversions (C++ 4.8). SCS.Second = ICK_Floating_Conversion; FromType = ToType.getUnqualifiedType(); - } else if ((FromType->isFloatingType() && - ToType->isIntegralType() && (!ToType->isBooleanType() && - !ToType->isEnumeralType())) || - ((FromType->isIntegralType() || FromType->isEnumeralType()) && - ToType->isFloatingType())) { + } else if ((FromType->isRealFloatingType() && + ToType->isIntegralType(Context) && !ToType->isBooleanType()) || + (FromType->isIntegralOrEnumerationType() && + ToType->isRealFloatingType())) { // Floating-integral conversions (C++ 4.9). SCS.Second = ICK_Floating_Integral; FromType = ToType.getUnqualifiedType(); @@ -1141,7 +1181,7 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) { if (From) if (FieldDecl *MemberDecl = From->getBitField()) { APSInt BitWidth; - if (FromType->isIntegralType() && !FromType->isEnumeralType() && + if (FromType->isIntegralType(Context) && MemberDecl->getBitWidth()->isIntegerConstantExpr(BitWidth, Context)) { APSInt ToSize(BitWidth.getBitWidth(), BitWidth.isUnsigned()); ToSize = Context.getTypeSize(ToType); @@ -1271,7 +1311,7 @@ static bool isNullPointerConstantForConversion(Expr *Expr, // Handle value-dependent integral null pointer constants correctly. // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#903 if (Expr->isValueDependent() && !Expr->isTypeDependent() && - Expr->getType()->isIntegralType()) + Expr->getType()->isIntegerType() && !Expr->getType()->isEnumeralType()) return !InOverloadResolution; return Expr->isNullPointerConstant(Context, @@ -1622,6 +1662,12 @@ bool Sema::CheckPointerConversion(Expr *From, QualType ToType, bool IgnoreBaseAccess) { QualType FromType = From->getType(); + if (CXXBoolLiteralExpr* LitBool + = dyn_cast<CXXBoolLiteralExpr>(From->IgnoreParens())) + if (LitBool->getValue() == false) + Diag(LitBool->getExprLoc(), diag::warn_init_pointer_from_false) + << ToType; + if (const PointerType *FromPtrType = FromType->getAs<PointerType>()) if (const PointerType *ToPtrType = ToType->getAs<PointerType>()) { QualType FromPointeeType = FromPtrType->getPointeeType(), @@ -1779,7 +1825,7 @@ Sema::IsQualificationConversion(QualType FromType, QualType ToType) { // in multi-level pointers, subject to the following rules: [...] bool PreviousToQualsIncludeConst = true; bool UnwrappedAnyPointer = false; - while (UnwrapSimilarPointerTypes(FromType, ToType)) { + while (Context.UnwrapSimilarPointerTypes(FromType, ToType)) { // Within each iteration of the loop, we check the qualifiers to // determine if this still looks like a qualification // conversion. Then, if all is well, we unwrap one more level of @@ -1850,12 +1896,8 @@ OverloadingResult Sema::IsUserDefinedConversion(Expr *From, QualType ToType, // We're not going to find any constructors. } else if (CXXRecordDecl *ToRecordDecl = dyn_cast<CXXRecordDecl>(ToRecordType->getDecl())) { - DeclarationName ConstructorName - = Context.DeclarationNames.getCXXConstructorName( - Context.getCanonicalType(ToType).getUnqualifiedType()); DeclContext::lookup_iterator Con, ConEnd; - for (llvm::tie(Con, ConEnd) - = ToRecordDecl->lookup(ConstructorName); + for (llvm::tie(Con, ConEnd) = LookupConstructors(ToRecordDecl); Con != ConEnd; ++Con) { NamedDecl *D = *Con; DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess()); @@ -2067,6 +2109,16 @@ Sema::CompareImplicitConversionSequences(const ImplicitConversionSequence& ICS1, return ImplicitConversionSequence::Indistinguishable; } +static bool hasSimilarType(ASTContext &Context, QualType T1, QualType T2) { + while (Context.UnwrapSimilarPointerTypes(T1, T2)) { + Qualifiers Quals; + T1 = Context.getUnqualifiedArrayType(T1, Quals); + T2 = Context.getUnqualifiedArrayType(T2, Quals); + } + + return Context.hasSameUnqualifiedType(T1, T2); +} + // Per 13.3.3.2p3, compare the given standard conversion sequences to // determine if one is a proper subset of the other. static ImplicitConversionSequence::CompareKind @@ -2092,7 +2144,7 @@ compareStandardConversionSubsets(ASTContext &Context, Result = ImplicitConversionSequence::Worse; else return ImplicitConversionSequence::Indistinguishable; - } else if (!Context.hasSameType(SCS1.getToType(1), SCS2.getToType(1))) + } else if (!hasSimilarType(Context, SCS1.getToType(1), SCS2.getToType(1))) return ImplicitConversionSequence::Indistinguishable; if (SCS1.Third == SCS2.Third) { @@ -2299,7 +2351,7 @@ Sema::CompareQualificationConversions(const StandardConversionSequence& SCS1, ImplicitConversionSequence::CompareKind Result = ImplicitConversionSequence::Indistinguishable; - while (UnwrapSimilarPointerTypes(T1, T2)) { + while (Context.UnwrapSimilarPointerTypes(T1, T2)) { // Within each iteration of the loop, we check the qualifiers to // determine if this still looks like a qualification // conversion. Then, if all is well, we unwrap one more level of @@ -2566,6 +2618,95 @@ Sema::CompareReferenceRelationship(SourceLocation Loc, return Ref_Related; } +/// \brief Look for a user-defined conversion to an lvalue reference-compatible +/// with DeclType. Return true if something definite is found. +static bool +FindConversionToLValue(Sema &S, ImplicitConversionSequence &ICS, + QualType DeclType, SourceLocation DeclLoc, + Expr *Init, QualType T2, bool AllowExplicit) { + assert(T2->isRecordType() && "Can only find conversions of record types."); + CXXRecordDecl *T2RecordDecl + = dyn_cast<CXXRecordDecl>(T2->getAs<RecordType>()->getDecl()); + + OverloadCandidateSet CandidateSet(DeclLoc); + const UnresolvedSetImpl *Conversions + = T2RecordDecl->getVisibleConversionFunctions(); + for (UnresolvedSetImpl::iterator I = Conversions->begin(), + E = Conversions->end(); I != E; ++I) { + NamedDecl *D = *I; + CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext()); + if (isa<UsingShadowDecl>(D)) + D = cast<UsingShadowDecl>(D)->getTargetDecl(); + + FunctionTemplateDecl *ConvTemplate + = dyn_cast<FunctionTemplateDecl>(D); + CXXConversionDecl *Conv; + if (ConvTemplate) + Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl()); + else + Conv = cast<CXXConversionDecl>(D); + + // If the conversion function doesn't return a reference type, + // it can't be considered for this conversion. An rvalue reference + // is only acceptable if its referencee is a function type. + const ReferenceType *RefType = + Conv->getConversionType()->getAs<ReferenceType>(); + if (RefType && (RefType->isLValueReferenceType() || + RefType->getPointeeType()->isFunctionType()) && + (AllowExplicit || !Conv->isExplicit())) { + if (ConvTemplate) + S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(), ActingDC, + Init, DeclType, CandidateSet); + else + S.AddConversionCandidate(Conv, I.getPair(), ActingDC, Init, + DeclType, CandidateSet); + } + } + + OverloadCandidateSet::iterator Best; + switch (S.BestViableFunction(CandidateSet, DeclLoc, Best)) { + case OR_Success: + // C++ [over.ics.ref]p1: + // + // [...] If the parameter binds directly to the result of + // applying a conversion function to the argument + // expression, the implicit conversion sequence is a + // user-defined conversion sequence (13.3.3.1.2), with the + // second standard conversion sequence either an identity + // conversion or, if the conversion function returns an + // entity of a type that is a derived class of the parameter + // type, a derived-to-base Conversion. + if (!Best->FinalConversion.DirectBinding) + return false; + + ICS.setUserDefined(); + ICS.UserDefined.Before = Best->Conversions[0].Standard; + ICS.UserDefined.After = Best->FinalConversion; + ICS.UserDefined.ConversionFunction = Best->Function; + ICS.UserDefined.EllipsisConversion = false; + assert(ICS.UserDefined.After.ReferenceBinding && + ICS.UserDefined.After.DirectBinding && + "Expected a direct reference binding!"); + return true; + + case OR_Ambiguous: + ICS.setAmbiguous(); + for (OverloadCandidateSet::iterator Cand = CandidateSet.begin(); + Cand != CandidateSet.end(); ++Cand) + if (Cand->Viable) + ICS.Ambiguous.addConversion(Cand->Function); + return true; + + case OR_No_Viable_Function: + case OR_Deleted: + // There was no suitable conversion, or we found a deleted + // conversion; continue with other checks. + return false; + } + + return false; +} + /// \brief Compute an implicit conversion sequence for reference /// initialization. static ImplicitConversionSequence @@ -2595,149 +2736,72 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType, // Compute some basic properties of the types and the initializer. bool isRValRef = DeclType->isRValueReferenceType(); bool DerivedToBase = false; - Expr::isLvalueResult InitLvalue = Init->isLvalue(S.Context); + Expr::Classification InitCategory = Init->Classify(S.Context); Sema::ReferenceCompareResult RefRelationship = S.CompareReferenceRelationship(DeclLoc, T1, T2, DerivedToBase); - // C++ [over.ics.ref]p3: - // Except for an implicit object parameter, for which see 13.3.1, - // a standard conversion sequence cannot be formed if it requires - // binding an lvalue reference to non-const to an rvalue or - // binding an rvalue reference to an lvalue. - // - // FIXME: DPG doesn't trust this code. It seems far too early to - // abort because of a binding of an rvalue reference to an lvalue. - if (isRValRef && InitLvalue == Expr::LV_Valid) - return ICS; - - // C++0x [dcl.init.ref]p16: + // C++0x [dcl.init.ref]p5: // A reference to type "cv1 T1" is initialized by an expression // of type "cv2 T2" as follows: - // -- If the initializer expression - // -- is an lvalue (but is not a bit-field), and "cv1 T1" is - // reference-compatible with "cv2 T2," or - // - // Per C++ [over.ics.ref]p4, we don't check the bit-field property here. - if (InitLvalue == Expr::LV_Valid && - RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) { - // C++ [over.ics.ref]p1: - // When a parameter of reference type binds directly (8.5.3) - // to an argument expression, the implicit conversion sequence - // is the identity conversion, unless the argument expression - // has a type that is a derived class of the parameter type, - // in which case the implicit conversion sequence is a - // derived-to-base Conversion (13.3.3.1). - ICS.setStandard(); - ICS.Standard.First = ICK_Identity; - ICS.Standard.Second = DerivedToBase? ICK_Derived_To_Base : ICK_Identity; - ICS.Standard.Third = ICK_Identity; - ICS.Standard.FromTypePtr = T2.getAsOpaquePtr(); - ICS.Standard.setToType(0, T2); - ICS.Standard.setToType(1, T1); - ICS.Standard.setToType(2, T1); - ICS.Standard.ReferenceBinding = true; - ICS.Standard.DirectBinding = true; - ICS.Standard.RRefBinding = false; - ICS.Standard.CopyConstructor = 0; - - // Nothing more to do: the inaccessibility/ambiguity check for - // derived-to-base conversions is suppressed when we're - // computing the implicit conversion sequence (C++ - // [over.best.ics]p2). - return ICS; - } - - // -- has a class type (i.e., T2 is a class type), where T1 is - // not reference-related to T2, and can be implicitly - // converted to an lvalue of type "cv3 T3," where "cv1 T1" - // is reference-compatible with "cv3 T3" 92) (this - // conversion is selected by enumerating the applicable - // conversion functions (13.3.1.6) and choosing the best - // one through overload resolution (13.3)), - if (!isRValRef && !SuppressUserConversions && T2->isRecordType() && - !S.RequireCompleteType(DeclLoc, T2, 0) && - RefRelationship == Sema::Ref_Incompatible) { - CXXRecordDecl *T2RecordDecl - = dyn_cast<CXXRecordDecl>(T2->getAs<RecordType>()->getDecl()); - - OverloadCandidateSet CandidateSet(DeclLoc); - const UnresolvedSetImpl *Conversions - = T2RecordDecl->getVisibleConversionFunctions(); - for (UnresolvedSetImpl::iterator I = Conversions->begin(), - E = Conversions->end(); I != E; ++I) { - NamedDecl *D = *I; - CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext()); - if (isa<UsingShadowDecl>(D)) - D = cast<UsingShadowDecl>(D)->getTargetDecl(); - - FunctionTemplateDecl *ConvTemplate - = dyn_cast<FunctionTemplateDecl>(D); - CXXConversionDecl *Conv; - if (ConvTemplate) - Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl()); - else - Conv = cast<CXXConversionDecl>(D); - - // If the conversion function doesn't return a reference type, - // it can't be considered for this conversion. - if (Conv->getConversionType()->isLValueReferenceType() && - (AllowExplicit || !Conv->isExplicit())) { - if (ConvTemplate) - S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(), ActingDC, - Init, DeclType, CandidateSet); - else - S.AddConversionCandidate(Conv, I.getPair(), ActingDC, Init, - DeclType, CandidateSet); - } - } - - OverloadCandidateSet::iterator Best; - switch (S.BestViableFunction(CandidateSet, DeclLoc, Best)) { - case OR_Success: + // -- If reference is an lvalue reference and the initializer expression + // The next bullet point (T1 is a function) is pretty much equivalent to this + // one, so it's handled here. + if (!isRValRef || T1->isFunctionType()) { + // -- is an lvalue (but is not a bit-field), and "cv1 T1" is + // reference-compatible with "cv2 T2," or + // + // Per C++ [over.ics.ref]p4, we don't check the bit-field property here. + if (InitCategory.isLValue() && + RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) { // C++ [over.ics.ref]p1: - // - // [...] If the parameter binds directly to the result of - // applying a conversion function to the argument - // expression, the implicit conversion sequence is a - // user-defined conversion sequence (13.3.3.1.2), with the - // second standard conversion sequence either an identity - // conversion or, if the conversion function returns an - // entity of a type that is a derived class of the parameter - // type, a derived-to-base Conversion. - if (!Best->FinalConversion.DirectBinding) - break; - - ICS.setUserDefined(); - ICS.UserDefined.Before = Best->Conversions[0].Standard; - ICS.UserDefined.After = Best->FinalConversion; - ICS.UserDefined.ConversionFunction = Best->Function; - ICS.UserDefined.EllipsisConversion = false; - assert(ICS.UserDefined.After.ReferenceBinding && - ICS.UserDefined.After.DirectBinding && - "Expected a direct reference binding!"); - return ICS; - - case OR_Ambiguous: - ICS.setAmbiguous(); - for (OverloadCandidateSet::iterator Cand = CandidateSet.begin(); - Cand != CandidateSet.end(); ++Cand) - if (Cand->Viable) - ICS.Ambiguous.addConversion(Cand->Function); + // When a parameter of reference type binds directly (8.5.3) + // to an argument expression, the implicit conversion sequence + // is the identity conversion, unless the argument expression + // has a type that is a derived class of the parameter type, + // in which case the implicit conversion sequence is a + // derived-to-base Conversion (13.3.3.1). + ICS.setStandard(); + ICS.Standard.First = ICK_Identity; + ICS.Standard.Second = DerivedToBase? ICK_Derived_To_Base : ICK_Identity; + ICS.Standard.Third = ICK_Identity; + ICS.Standard.FromTypePtr = T2.getAsOpaquePtr(); + ICS.Standard.setToType(0, T2); + ICS.Standard.setToType(1, T1); + ICS.Standard.setToType(2, T1); + ICS.Standard.ReferenceBinding = true; + ICS.Standard.DirectBinding = true; + ICS.Standard.RRefBinding = isRValRef; + ICS.Standard.CopyConstructor = 0; + + // Nothing more to do: the inaccessibility/ambiguity check for + // derived-to-base conversions is suppressed when we're + // computing the implicit conversion sequence (C++ + // [over.best.ics]p2). return ICS; + } - case OR_No_Viable_Function: - case OR_Deleted: - // There was no suitable conversion, or we found a deleted - // conversion; continue with other checks. - break; + // -- has a class type (i.e., T2 is a class type), where T1 is + // not reference-related to T2, and can be implicitly + // converted to an lvalue of type "cv3 T3," where "cv1 T1" + // is reference-compatible with "cv3 T3" 92) (this + // conversion is selected by enumerating the applicable + // conversion functions (13.3.1.6) and choosing the best + // one through overload resolution (13.3)), + if (!SuppressUserConversions && T2->isRecordType() && + !S.RequireCompleteType(DeclLoc, T2, 0) && + RefRelationship == Sema::Ref_Incompatible) { + if (FindConversionToLValue(S, ICS, DeclType, DeclLoc, + Init, T2, AllowExplicit)) + return ICS; } } - // -- Otherwise, the reference shall be to a non-volatile const - // type (i.e., cv1 shall be const), or the reference shall be an - // rvalue reference and the initializer expression shall be an rvalue. + // -- Otherwise, the reference shall be an lvalue reference to a + // non-volatile const type (i.e., cv1 shall be const), or the reference + // shall be an rvalue reference and the initializer expression shall be + // an rvalue or have a function type. // // We actually handle one oddity of C++ [over.ics.ref] at this // point, which is that, due to p2 (which short-circuits reference @@ -2746,10 +2810,26 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType, // reference to bind to an rvalue. Hence the check for the presence // of "const" rather than checking for "const" being the only // qualifier. - if (!isRValRef && !T1.isConstQualified()) + // This is also the point where rvalue references and lvalue inits no longer + // go together. + if ((!isRValRef && !T1.isConstQualified()) || + (isRValRef && InitCategory.isLValue())) + return ICS; + + // -- If T1 is a function type, then + // -- if T2 is the same type as T1, the reference is bound to the + // initializer expression lvalue; + // -- if T2 is a class type and the initializer expression can be + // implicitly converted to an lvalue of type T1 [...], the + // reference is bound to the function lvalue that is the result + // of the conversion; + // This is the same as for the lvalue case above, so it was handled there. + // -- otherwise, the program is ill-formed. + // This is the one difference to the lvalue case. + if (T1->isFunctionType()) return ICS; - // -- if T2 is a class type and + // -- Otherwise, if T2 is a class type and // -- the initializer expression is an rvalue and "cv1 T1" // is reference-compatible with "cv2 T2," or // @@ -2768,7 +2848,7 @@ TryReferenceInit(Sema &S, Expr *&Init, QualType DeclType, // // We're only checking the first case here, which is a direct // binding in C++0x but not in C++03. - if (InitLvalue != Expr::LV_Valid && T2->isRecordType() && + if (InitCategory.isRValue() && T2->isRecordType() && RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) { ICS.setStandard(); ICS.Standard.First = ICK_Identity; @@ -3012,6 +3092,177 @@ bool Sema::PerformContextuallyConvertToObjCId(Expr *&From) { return true; } +/// \brief Attempt to convert the given expression to an integral or +/// enumeration type. +/// +/// This routine will attempt to convert an expression of class type to an +/// integral or enumeration type, if that class type only has a single +/// conversion to an integral or enumeration type. +/// +/// \param Loc The source location of the construct that requires the +/// conversion. +/// +/// \param FromE The expression we're converting from. +/// +/// \param NotIntDiag The diagnostic to be emitted if the expression does not +/// have integral or enumeration type. +/// +/// \param IncompleteDiag The diagnostic to be emitted if the expression has +/// incomplete class type. +/// +/// \param ExplicitConvDiag The diagnostic to be emitted if we're calling an +/// explicit conversion function (because no implicit conversion functions +/// were available). This is a recovery mode. +/// +/// \param ExplicitConvNote The note to be emitted with \p ExplicitConvDiag, +/// showing which conversion was picked. +/// +/// \param AmbigDiag The diagnostic to be emitted if there is more than one +/// conversion function that could convert to integral or enumeration type. +/// +/// \param AmbigNote The note to be emitted with \p AmbigDiag for each +/// usable conversion function. +/// +/// \param ConvDiag The diagnostic to be emitted if we are calling a conversion +/// function, which may be an extension in this case. +/// +/// \returns The expression, converted to an integral or enumeration type if +/// successful. +Sema::OwningExprResult +Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, ExprArg FromE, + const PartialDiagnostic &NotIntDiag, + const PartialDiagnostic &IncompleteDiag, + const PartialDiagnostic &ExplicitConvDiag, + const PartialDiagnostic &ExplicitConvNote, + const PartialDiagnostic &AmbigDiag, + const PartialDiagnostic &AmbigNote, + const PartialDiagnostic &ConvDiag) { + Expr *From = static_cast<Expr *>(FromE.get()); + + // We can't perform any more checking for type-dependent expressions. + if (From->isTypeDependent()) + return move(FromE); + + // If the expression already has integral or enumeration type, we're golden. + QualType T = From->getType(); + if (T->isIntegralOrEnumerationType()) + return move(FromE); + + // FIXME: Check for missing '()' if T is a function type? + + // If we don't have a class type in C++, there's no way we can get an + // expression of integral or enumeration type. + const RecordType *RecordTy = T->getAs<RecordType>(); + if (!RecordTy || !getLangOptions().CPlusPlus) { + Diag(Loc, NotIntDiag) + << T << From->getSourceRange(); + return move(FromE); + } + + // We must have a complete class type. + if (RequireCompleteType(Loc, T, IncompleteDiag)) + return move(FromE); + + // Look for a conversion to an integral or enumeration type. + UnresolvedSet<4> ViableConversions; + UnresolvedSet<4> ExplicitConversions; + const UnresolvedSetImpl *Conversions + = cast<CXXRecordDecl>(RecordTy->getDecl())->getVisibleConversionFunctions(); + + for (UnresolvedSetImpl::iterator I = Conversions->begin(), + E = Conversions->end(); + I != E; + ++I) { + if (CXXConversionDecl *Conversion + = dyn_cast<CXXConversionDecl>((*I)->getUnderlyingDecl())) + if (Conversion->getConversionType().getNonReferenceType() + ->isIntegralOrEnumerationType()) { + if (Conversion->isExplicit()) + ExplicitConversions.addDecl(I.getDecl(), I.getAccess()); + else + ViableConversions.addDecl(I.getDecl(), I.getAccess()); + } + } + + switch (ViableConversions.size()) { + case 0: + if (ExplicitConversions.size() == 1) { + DeclAccessPair Found = ExplicitConversions[0]; + CXXConversionDecl *Conversion + = cast<CXXConversionDecl>(Found->getUnderlyingDecl()); + + // The user probably meant to invoke the given explicit + // conversion; use it. + QualType ConvTy + = Conversion->getConversionType().getNonReferenceType(); + std::string TypeStr; + ConvTy.getAsStringInternal(TypeStr, Context.PrintingPolicy); + + Diag(Loc, ExplicitConvDiag) + << T << ConvTy + << FixItHint::CreateInsertion(From->getLocStart(), + "static_cast<" + TypeStr + ">(") + << FixItHint::CreateInsertion(PP.getLocForEndOfToken(From->getLocEnd()), + ")"); + Diag(Conversion->getLocation(), ExplicitConvNote) + << ConvTy->isEnumeralType() << ConvTy; + + // If we aren't in a SFINAE context, build a call to the + // explicit conversion function. + if (isSFINAEContext()) + return ExprError(); + + CheckMemberOperatorAccess(From->getExprLoc(), From, 0, Found); + From = BuildCXXMemberCallExpr(FromE.takeAs<Expr>(), Found, Conversion); + FromE = Owned(From); + } + + // We'll complain below about a non-integral condition type. + break; + + case 1: { + // Apply this conversion. + DeclAccessPair Found = ViableConversions[0]; + CheckMemberOperatorAccess(From->getExprLoc(), From, 0, Found); + + CXXConversionDecl *Conversion + = cast<CXXConversionDecl>(Found->getUnderlyingDecl()); + QualType ConvTy + = Conversion->getConversionType().getNonReferenceType(); + if (ConvDiag.getDiagID()) { + if (isSFINAEContext()) + return ExprError(); + + Diag(Loc, ConvDiag) + << T << ConvTy->isEnumeralType() << ConvTy << From->getSourceRange(); + } + + From = BuildCXXMemberCallExpr(FromE.takeAs<Expr>(), Found, + cast<CXXConversionDecl>(Found->getUnderlyingDecl())); + FromE = Owned(From); + break; + } + + default: + Diag(Loc, AmbigDiag) + << T << From->getSourceRange(); + for (unsigned I = 0, N = ViableConversions.size(); I != N; ++I) { + CXXConversionDecl *Conv + = cast<CXXConversionDecl>(ViableConversions[I]->getUnderlyingDecl()); + QualType ConvTy = Conv->getConversionType().getNonReferenceType(); + Diag(Conv->getLocation(), AmbigNote) + << ConvTy->isEnumeralType() << ConvTy; + } + return move(FromE); + } + + if (!From->getType()->isIntegralOrEnumerationType()) + Diag(Loc, NotIntDiag) + << From->getType() << From->getSourceRange(); + + return move(FromE); +} + /// AddOverloadCandidate - Adds the given function to the set of /// candidate functions, using the given function call arguments. If /// @p SuppressUserConversions, then don't allow user-defined @@ -3476,7 +3727,7 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion, // there are 0 arguments (i.e., nothing is allocated using ASTContext's // allocator). CallExpr Call(Context, &ConversionFn, 0, 0, - Conversion->getConversionType().getNonReferenceType(), + Conversion->getConversionType().getNonLValueExprType(Context), From->getLocStart()); ImplicitConversionSequence ICS = TryCopyInitialization(*this, &Call, ToType, @@ -4949,7 +5200,7 @@ Sema::isBetterOverloadCandidate(const OverloadCandidate& Cand1, // - F1 is a non-template function and F2 is a function template // specialization, or, if not that, - if (Cand1.Function && !Cand1.Function->getPrimaryTemplate() && + if ((!Cand1.Function || !Cand1.Function->getPrimaryTemplate()) && Cand2.Function && Cand2.Function->getPrimaryTemplate()) return true; @@ -5230,6 +5481,46 @@ void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand, unsigned I) { return; } + // Diagnose base -> derived pointer conversions. + unsigned BaseToDerivedConversion = 0; + if (const PointerType *FromPtrTy = FromTy->getAs<PointerType>()) { + if (const PointerType *ToPtrTy = ToTy->getAs<PointerType>()) { + if (ToPtrTy->getPointeeType().isAtLeastAsQualifiedAs( + FromPtrTy->getPointeeType()) && + !FromPtrTy->getPointeeType()->isIncompleteType() && + !ToPtrTy->getPointeeType()->isIncompleteType() && + S.IsDerivedFrom(ToPtrTy->getPointeeType(), + FromPtrTy->getPointeeType())) + BaseToDerivedConversion = 1; + } + } else if (const ObjCObjectPointerType *FromPtrTy + = FromTy->getAs<ObjCObjectPointerType>()) { + if (const ObjCObjectPointerType *ToPtrTy + = ToTy->getAs<ObjCObjectPointerType>()) + if (const ObjCInterfaceDecl *FromIface = FromPtrTy->getInterfaceDecl()) + if (const ObjCInterfaceDecl *ToIface = ToPtrTy->getInterfaceDecl()) + if (ToPtrTy->getPointeeType().isAtLeastAsQualifiedAs( + FromPtrTy->getPointeeType()) && + FromIface->isSuperClassOf(ToIface)) + BaseToDerivedConversion = 2; + } else if (const ReferenceType *ToRefTy = ToTy->getAs<ReferenceType>()) { + if (ToRefTy->getPointeeType().isAtLeastAsQualifiedAs(FromTy) && + !FromTy->isIncompleteType() && + !ToRefTy->getPointeeType()->isIncompleteType() && + S.IsDerivedFrom(ToRefTy->getPointeeType(), FromTy)) + BaseToDerivedConversion = 3; + } + + if (BaseToDerivedConversion) { + S.Diag(Fn->getLocation(), + diag::note_ovl_candidate_bad_base_to_derived_conv) + << (unsigned) FnKind << FnDesc + << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) + << (BaseToDerivedConversion - 1) + << FromTy << ToTy << I+1; + return; + } + // TODO: specialize more based on the kind of mismatch S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_conv) << (unsigned) FnKind << FnDesc @@ -5673,7 +5964,10 @@ Sema::PrintOverloadCandidates(OverloadCandidateSet& CandidateSet, Cands.push_back(Cand); else if (OCD == OCD_AllCandidates) { CompleteNonViableCandidate(*this, Cand, Args, NumArgs); - Cands.push_back(Cand); + if (Cand->Function || Cand->IsSurrogate) + Cands.push_back(Cand); + // Otherwise, this a non-viable builtin candidate. We do not, in general, + // want to list every possible builtin candidate. } } @@ -5683,17 +5977,26 @@ Sema::PrintOverloadCandidates(OverloadCandidateSet& CandidateSet, bool ReportedAmbiguousConversions = false; llvm::SmallVectorImpl<OverloadCandidate*>::iterator I, E; + const Diagnostic::OverloadsShown ShowOverloads = Diags.getShowOverloads(); + unsigned CandsShown = 0; for (I = Cands.begin(), E = Cands.end(); I != E; ++I) { OverloadCandidate *Cand = *I; + // Set an arbitrary limit on the number of candidate functions we'll spam + // the user with. FIXME: This limit should depend on details of the + // candidate list. + if (CandsShown >= 4 && ShowOverloads == Diagnostic::Ovl_Best) { + break; + } + ++CandsShown; + if (Cand->Function) NoteFunctionCandidate(*this, Cand, Args, NumArgs); else if (Cand->IsSurrogate) NoteSurrogateCandidate(*this, Cand); - - // This a builtin candidate. We do not, in general, want to list - // every possible builtin candidate. - else if (Cand->Viable) { + else { + assert(Cand->Viable && + "Non-viable built-in candidates are not added to Cands."); // Generally we only see ambiguities including viable builtin // operators if overload resolution got screwed up by an // ambiguous user-defined conversion. @@ -5709,6 +6012,9 @@ Sema::PrintOverloadCandidates(OverloadCandidateSet& CandidateSet, NoteBuiltinOperatorCandidate(*this, Opc, OpLoc, Cand); } } + + if (I != E) + Diag(OpLoc, diag::note_ovl_too_many_candidates) << int(E - I); } static bool CheckUnresolvedAccess(Sema &S, OverloadExpr *E, DeclAccessPair D) { @@ -5981,7 +6287,8 @@ FunctionDecl *Sema::ResolveSingleFunctionTemplateSpecialization(Expr *From) { // specified and it, along with any default template arguments, // identifies a single function template specialization, then the // template-id is an lvalue for the function template specialization. - FunctionTemplateDecl *FunctionTemplate = cast<FunctionTemplateDecl>(*I); + FunctionTemplateDecl *FunctionTemplate + = cast<FunctionTemplateDecl>((*I)->getUnderlyingDecl()); // C++ [over.over]p2: // If the name is a function template, template argument deduction is @@ -6159,7 +6466,7 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn, Sema::MultiExprArg(SemaRef, (void**) Args, NumArgs), CommaLocs, RParenLoc); } - + /// ResolveOverloadedCallFn - Given the call expression that calls Fn /// (which eventually refers to the declaration Func) and the call /// arguments Args/NumArgs, attempt to resolve the function call down @@ -6290,6 +6597,12 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn, } if (Input->isTypeDependent()) { + if (Fns.empty()) + return Owned(new (Context) UnaryOperator(input.takeAs<Expr>(), + Opc, + Context.DependentTy, + OpLoc)); + CXXRecordDecl *NamingClass = 0; // because lookup ignores member operators UnresolvedLookupExpr *Fn = UnresolvedLookupExpr::Create(Context, /*Dependent*/ true, NamingClass, @@ -6356,7 +6669,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn, DiagnoseUseOfDecl(Best->FoundDecl, OpLoc); // Determine the result type - QualType ResultTy = FnDecl->getResultType().getNonReferenceType(); + QualType ResultTy = FnDecl->getCallResultType(); // Build the actual expression node. Expr *FnExpr = new (Context) DeclRefExpr(FnDecl, FnDecl->getType(), @@ -6563,8 +6876,8 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc, // Determine the result type QualType ResultTy - = FnDecl->getType()->getAs<FunctionType>()->getResultType(); - ResultTy = ResultTy.getNonReferenceType(); + = FnDecl->getType()->getAs<FunctionType>() + ->getCallResultType(Context); // Build the actual expression node. Expr *FnExpr = new (Context) DeclRefExpr(FnDecl, FnDecl->getType(), @@ -6720,8 +7033,8 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, // Determine the result type QualType ResultTy - = FnDecl->getType()->getAs<FunctionType>()->getResultType(); - ResultTy = ResultTy.getNonReferenceType(); + = FnDecl->getType()->getAs<FunctionType>() + ->getCallResultType(Context); // Build the actual expression node. Expr *FnExpr = new (Context) DeclRefExpr(FnDecl, FnDecl->getType(), @@ -6909,7 +7222,7 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE, ExprOwningPtr<CXXMemberCallExpr> TheCall(this, new (Context) CXXMemberCallExpr(Context, MemExprE, Args, NumArgs, - Method->getResultType().getNonReferenceType(), + Method->getCallResultType(), RParenLoc)); // Check for a valid return type. @@ -7124,7 +7437,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Object, // Once we've built TheCall, all of the expressions are properly // owned. - QualType ResultTy = Method->getResultType().getNonReferenceType(); + QualType ResultTy = Method->getCallResultType(); ExprOwningPtr<CXXOperatorCallExpr> TheCall(this, new (Context) CXXOperatorCallExpr(Context, OO_Call, NewFn, MethodArgs, NumArgs + 1, @@ -7280,7 +7593,7 @@ Sema::BuildOverloadedArrowExpr(Scope *S, ExprArg BaseIn, SourceLocation OpLoc) { SourceLocation()); UsualUnaryConversions(FnExpr); - QualType ResultTy = Method->getResultType().getNonReferenceType(); + QualType ResultTy = Method->getCallResultType(); ExprOwningPtr<CXXOperatorCallExpr> TheCall(this, new (Context) CXXOperatorCallExpr(Context, OO_Arrow, FnExpr, &Base, 1, ResultTy, OpLoc)); diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp index 875b160..9c8f48b 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp @@ -92,12 +92,6 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) { if (const CXXExprWithTemporaries *Temps = dyn_cast<CXXExprWithTemporaries>(E)) E = Temps->getSubExpr(); - if (const CXXZeroInitValueExpr *Zero = dyn_cast<CXXZeroInitValueExpr>(E)) { - if (const RecordType *RecordT = Zero->getType()->getAs<RecordType>()) - if (CXXRecordDecl *RecordD = dyn_cast<CXXRecordDecl>(RecordT->getDecl())) - if (!RecordD->hasTrivialDestructor()) - return; - } if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { if (E->getType()->isVoidType()) @@ -304,7 +298,7 @@ Sema::ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, DeclPtrTy CondVar, DiagnoseUnusedExprResult(elseStmt); CondResult.release(); - return Owned(new (Context) IfStmt(IfLoc, ConditionVar, ConditionExpr, + return Owned(new (Context) IfStmt(Context, IfLoc, ConditionVar, ConditionExpr, thenStmt, ElseLoc, elseStmt)); } @@ -400,124 +394,16 @@ static bool EqEnumVals(const std::pair<llvm::APSInt, EnumConstantDecl*>& lhs, /// GetTypeBeforeIntegralPromotion - Returns the pre-promotion type of /// potentially integral-promoted expression @p expr. static QualType GetTypeBeforeIntegralPromotion(const Expr* expr) { - const ImplicitCastExpr *ImplicitCast = - dyn_cast_or_null<ImplicitCastExpr>(expr); - if (ImplicitCast != NULL) { + if (const CastExpr *ImplicitCast = dyn_cast<ImplicitCastExpr>(expr)) { const Expr *ExprBeforePromotion = ImplicitCast->getSubExpr(); QualType TypeBeforePromotion = ExprBeforePromotion->getType(); - if (TypeBeforePromotion->isIntegralType()) { + if (TypeBeforePromotion->isIntegralOrEnumerationType()) { return TypeBeforePromotion; } } return expr->getType(); } -/// \brief Check (and possibly convert) the condition in a switch -/// statement in C++. -static bool CheckCXXSwitchCondition(Sema &S, SourceLocation SwitchLoc, - Expr *&CondExpr) { - if (CondExpr->isTypeDependent()) - return false; - - QualType CondType = CondExpr->getType(); - - // C++ 6.4.2.p2: - // The condition shall be of integral type, enumeration type, or of a class - // type for which a single conversion function to integral or enumeration - // type exists (12.3). If the condition is of class type, the condition is - // converted by calling that conversion function, and the result of the - // conversion is used in place of the original condition for the remainder - // of this section. Integral promotions are performed. - - // Make sure that the condition expression has a complete type, - // otherwise we'll never find any conversions. - if (S.RequireCompleteType(SwitchLoc, CondType, - S.PDiag(diag::err_switch_incomplete_class_type) - << CondExpr->getSourceRange())) - return true; - - UnresolvedSet<4> ViableConversions; - UnresolvedSet<4> ExplicitConversions; - if (const RecordType *RecordTy = CondType->getAs<RecordType>()) { - const UnresolvedSetImpl *Conversions - = cast<CXXRecordDecl>(RecordTy->getDecl()) - ->getVisibleConversionFunctions(); - for (UnresolvedSetImpl::iterator I = Conversions->begin(), - E = Conversions->end(); I != E; ++I) { - if (CXXConversionDecl *Conversion - = dyn_cast<CXXConversionDecl>((*I)->getUnderlyingDecl())) - if (Conversion->getConversionType().getNonReferenceType() - ->isIntegralType()) { - if (Conversion->isExplicit()) - ExplicitConversions.addDecl(I.getDecl(), I.getAccess()); - else - ViableConversions.addDecl(I.getDecl(), I.getAccess()); - } - } - - switch (ViableConversions.size()) { - case 0: - if (ExplicitConversions.size() == 1) { - DeclAccessPair Found = ExplicitConversions[0]; - CXXConversionDecl *Conversion = - cast<CXXConversionDecl>(Found->getUnderlyingDecl()); - // The user probably meant to invoke the given explicit - // conversion; use it. - QualType ConvTy - = Conversion->getConversionType().getNonReferenceType(); - std::string TypeStr; - ConvTy.getAsStringInternal(TypeStr, S.Context.PrintingPolicy); - - S.Diag(SwitchLoc, diag::err_switch_explicit_conversion) - << CondType << ConvTy << CondExpr->getSourceRange() - << FixItHint::CreateInsertion(CondExpr->getLocStart(), - "static_cast<" + TypeStr + ">(") - << FixItHint::CreateInsertion( - S.PP.getLocForEndOfToken(CondExpr->getLocEnd()), - ")"); - S.Diag(Conversion->getLocation(), diag::note_switch_conversion) - << ConvTy->isEnumeralType() << ConvTy; - - // If we aren't in a SFINAE context, build a call to the - // explicit conversion function. - if (S.isSFINAEContext()) - return true; - - S.CheckMemberOperatorAccess(CondExpr->getExprLoc(), - CondExpr, 0, Found); - CondExpr = S.BuildCXXMemberCallExpr(CondExpr, Found, Conversion); - } - - // We'll complain below about a non-integral condition type. - break; - - case 1: { - // Apply this conversion. - DeclAccessPair Found = ViableConversions[0]; - S.CheckMemberOperatorAccess(CondExpr->getExprLoc(), - CondExpr, 0, Found); - CondExpr = S.BuildCXXMemberCallExpr(CondExpr, Found, - cast<CXXConversionDecl>(Found->getUnderlyingDecl())); - break; - } - - default: - S.Diag(SwitchLoc, diag::err_switch_multiple_conversions) - << CondType << CondExpr->getSourceRange(); - for (unsigned I = 0, N = ViableConversions.size(); I != N; ++I) { - CXXConversionDecl *Conv - = cast<CXXConversionDecl>(ViableConversions[I]->getUnderlyingDecl()); - QualType ConvTy = Conv->getConversionType().getNonReferenceType(); - S.Diag(Conv->getLocation(), diag::note_switch_conversion) - << ConvTy->isEnumeralType() << ConvTy; - } - return true; - } - } - - return false; -} - Action::OwningStmtResult Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, ExprArg Cond, DeclPtrTy CondVar) { @@ -531,21 +417,32 @@ Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, ExprArg Cond, Cond = move(CondE); } - Expr *CondExpr = Cond.takeAs<Expr>(); - if (!CondExpr) + if (!Cond.get()) return StmtError(); - if (getLangOptions().CPlusPlus && - CheckCXXSwitchCondition(*this, SwitchLoc, CondExpr)) - return StmtError(); - + Expr *CondExpr = static_cast<Expr *>(Cond.get()); + OwningExprResult ConvertedCond + = ConvertToIntegralOrEnumerationType(SwitchLoc, move(Cond), + PDiag(diag::err_typecheck_statement_requires_integer), + PDiag(diag::err_switch_incomplete_class_type) + << CondExpr->getSourceRange(), + PDiag(diag::err_switch_explicit_conversion), + PDiag(diag::note_switch_conversion), + PDiag(diag::err_switch_multiple_conversions), + PDiag(diag::note_switch_conversion), + PDiag(0)); + if (ConvertedCond.isInvalid()) + return StmtError(); + + CondExpr = ConvertedCond.takeAs<Expr>(); + if (!CondVar.get()) { CondExpr = MaybeCreateCXXExprWithTemporaries(CondExpr); if (!CondExpr) return StmtError(); } - SwitchStmt *SS = new (Context) SwitchStmt(ConditionVar, CondExpr); + SwitchStmt *SS = new (Context) SwitchStmt(Context, ConditionVar, CondExpr); getSwitchStack().push_back(SS); return Owned(SS); } @@ -584,11 +481,11 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, StmtArg Switch, // be represented by the promoted type. Therefore we need to find // the pre-promotion type of the switch condition. if (!CondExpr->isTypeDependent()) { - if (!CondType->isIntegerType()) { // C99 6.8.4.2p1 - Diag(SwitchLoc, diag::err_typecheck_statement_requires_integer) - << CondType << CondExpr->getSourceRange(); + // We have already converted the expression to an integral or enumeration + // type, when we started the switch statement. If we don't have an + // appropriate type now, just return an error. + if (!CondType->isIntegralOrEnumerationType()) return StmtError(); - } if (CondExpr->isKnownToHaveBooleanValue()) { // switch(bool_expr) {...} is often a programmer error, e.g. @@ -838,6 +735,8 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, StmtArg Switch, llvm::APSInt Val = (*EDI)->getInitVal(); if(Val.getBitWidth() < CondWidth) Val.extend(CondWidth); + else if (Val.getBitWidth() > CondWidth) + Val.trunc(CondWidth); Val.setIsSigned(CondIsSigned); EnumVals.push_back(std::make_pair(Val, (*EDI))); } @@ -929,8 +828,8 @@ Sema::ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, DiagnoseUnusedExprResult(bodyStmt); CondResult.release(); - return Owned(new (Context) WhileStmt(ConditionVar, ConditionExpr, bodyStmt, - WhileLoc)); + return Owned(new (Context) WhileStmt(Context, ConditionVar, ConditionExpr, + bodyStmt, WhileLoc)); } Action::OwningStmtResult @@ -999,9 +898,10 @@ Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, first.release(); body.release(); - return Owned(new (Context) ForStmt(First, SecondResult.takeAs<Expr>(), - ConditionVar, Third, Body, - ForLoc, LParenLoc, RParenLoc)); + return Owned(new (Context) ForStmt(Context, First, + SecondResult.takeAs<Expr>(), ConditionVar, + Third, Body, ForLoc, LParenLoc, + RParenLoc)); } Action::OwningStmtResult @@ -1517,14 +1417,14 @@ Sema::OwningStmtResult Sema::ActOnAsmStmt(SourceLocation AsmLoc, if (InTy->isIntegerType() || InTy->isPointerType()) InputDomain = AD_Int; - else if (InTy->isFloatingType()) + else if (InTy->isRealFloatingType()) InputDomain = AD_FP; else InputDomain = AD_Other; if (OutTy->isIntegerType() || OutTy->isPointerType()) OutputDomain = AD_Int; - else if (OutTy->isFloatingType()) + else if (OutTy->isRealFloatingType()) OutputDomain = AD_FP; else OutputDomain = AD_Other; diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp index 307be9d..f121954 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp @@ -27,12 +27,12 @@ using namespace clang; /// \brief Determine whether the declaration found is acceptable as the name /// of a template and, if so, return that template declaration. Otherwise, /// returns NULL. -static NamedDecl *isAcceptableTemplateName(ASTContext &Context, NamedDecl *D) { - if (!D) - return 0; +static NamedDecl *isAcceptableTemplateName(ASTContext &Context, + NamedDecl *Orig) { + NamedDecl *D = Orig->getUnderlyingDecl(); if (isa<TemplateDecl>(D)) - return D; + return Orig; if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) { // C++ [temp.local]p1: @@ -68,7 +68,7 @@ static void FilterAcceptableTemplateNames(ASTContext &C, LookupResult &R) { LookupResult::Filter filter = R.makeFilter(); while (filter.hasNext()) { NamedDecl *Orig = filter.next(); - NamedDecl *Repl = isAcceptableTemplateName(C, Orig->getUnderlyingDecl()); + NamedDecl *Repl = isAcceptableTemplateName(C, Orig); if (!Repl) filter.erase(); else if (Repl != Orig) { @@ -258,9 +258,9 @@ void Sema::LookupTemplateName(LookupResult &Found, // If we did not find any names, attempt to correct any typos. DeclarationName Name = Found.getLookupName(); if (DeclarationName Corrected = CorrectTypo(Found, S, &SS, LookupCtx, - false, CTC_CXXCasts)) { + false, CTC_CXXCasts)) { FilterAcceptableTemplateNames(Context, Found); - if (!Found.empty() && isa<TemplateDecl>(*Found.begin())) { + if (!Found.empty()) { if (LookupCtx) Diag(Found.getNameLoc(), diag::err_no_member_template_suggest) << Name << LookupCtx << Found.getLookupName() << SS.getRange() @@ -274,10 +274,10 @@ void Sema::LookupTemplateName(LookupResult &Found, if (TemplateDecl *Template = Found.getAsSingle<TemplateDecl>()) Diag(Template->getLocation(), diag::note_previous_decl) << Template->getDeclName(); - } else - Found.clear(); + } } else { Found.clear(); + Found.setLookupName(Name); } } @@ -303,7 +303,7 @@ void Sema::LookupTemplateName(LookupResult &Found, // - if the name is found in the context of the entire // postfix-expression and does not name a class template, the name // found in the class of the object expression is used, otherwise - } else { + } else if (!Found.isSuppressingDiagnostics()) { // - if the name found is a class template, it must refer to the same // entity as the one found in the class of the object expression, // otherwise the program is ill-formed. @@ -311,8 +311,9 @@ void Sema::LookupTemplateName(LookupResult &Found, Found.getFoundDecl()->getCanonicalDecl() != FoundOuter.getFoundDecl()->getCanonicalDecl()) { Diag(Found.getNameLoc(), - diag::err_nested_name_member_ref_lookup_ambiguous) - << Found.getLookupName(); + diag::ext_nested_name_member_ref_lookup_ambiguous) + << Found.getLookupName() + << ObjectType; Diag(Found.getRepresentativeDecl()->getLocation(), diag::note_ambig_member_ref_object_type) << ObjectType; @@ -458,7 +459,9 @@ Sema::DeclPtrTy Sema::ActOnTypeParameter(Scope *S, bool Typename, bool Ellipsis, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, - unsigned Depth, unsigned Position) { + unsigned Depth, unsigned Position, + SourceLocation EqualLoc, + TypeTy *DefaultArg) { assert(S->isTemplateParamScope() && "Template type parameter not in template parameter scope!"); bool Invalid = false; @@ -489,42 +492,31 @@ Sema::DeclPtrTy Sema::ActOnTypeParameter(Scope *S, bool Typename, bool Ellipsis, IdResolver.AddDecl(Param); } - return DeclPtrTy::make(Param); -} - -/// ActOnTypeParameterDefault - Adds a default argument (the type -/// Default) to the given template type parameter (TypeParam). -void Sema::ActOnTypeParameterDefault(DeclPtrTy TypeParam, - SourceLocation EqualLoc, - SourceLocation DefaultLoc, - TypeTy *DefaultT) { - TemplateTypeParmDecl *Parm - = cast<TemplateTypeParmDecl>(TypeParam.getAs<Decl>()); - - TypeSourceInfo *DefaultTInfo; - GetTypeFromParser(DefaultT, &DefaultTInfo); - - assert(DefaultTInfo && "expected source information for type"); - - // C++0x [temp.param]p9: - // A default template-argument may be specified for any kind of - // template-parameter that is not a template parameter pack. - if (Parm->isParameterPack()) { - Diag(DefaultLoc, diag::err_template_param_pack_default_arg); - return; - } - - // C++ [temp.param]p14: - // A template-parameter shall not be used in its own default argument. - // FIXME: Implement this check! Needs a recursive walk over the types. - - // Check the template argument itself. - if (CheckTemplateArgument(Parm, DefaultTInfo)) { - Parm->setInvalidDecl(); - return; + // Handle the default argument, if provided. + if (DefaultArg) { + TypeSourceInfo *DefaultTInfo; + GetTypeFromParser(DefaultArg, &DefaultTInfo); + + assert(DefaultTInfo && "expected source information for type"); + + // C++0x [temp.param]p9: + // A default template-argument may be specified for any kind of + // template-parameter that is not a template parameter pack. + if (Ellipsis) { + Diag(EqualLoc, diag::err_template_param_pack_default_arg); + return DeclPtrTy::make(Param); + } + + // Check the template argument itself. + if (CheckTemplateArgument(Param, DefaultTInfo)) { + Param->setInvalidDecl(); + return DeclPtrTy::make(Param);; + } + + Param->setDefaultArgument(DefaultTInfo, false); } - - Parm->setDefaultArgument(DefaultTInfo, false); + + return DeclPtrTy::make(Param); } /// \brief Check that the type of a non-type template parameter is @@ -548,7 +540,7 @@ Sema::CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc) { // (optionally cv-qualified) types: // // -- integral or enumeration type, - if (T->isIntegralType() || T->isEnumeralType() || + if (T->isIntegralOrEnumerationType() || // -- pointer to object or pointer to function, (T->isPointerType() && (T->getAs<PointerType>()->getPointeeType()->isObjectType() || @@ -579,15 +571,13 @@ Sema::CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc) { return QualType(); } -/// ActOnNonTypeTemplateParameter - Called when a C++ non-type -/// template parameter (e.g., "int Size" in "template<int Size> -/// class Array") has been parsed. S is the current scope and D is -/// the parsed declarator. Sema::DeclPtrTy Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, - unsigned Position) { - TypeSourceInfo *TInfo = 0; - QualType T = GetTypeForDeclarator(D, S, &TInfo); + unsigned Position, + SourceLocation EqualLoc, + ExprArg DefaultArg) { + TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); + QualType T = TInfo->getType(); assert(S->isTemplateParamScope() && "Non-type template parameter not in template parameter scope!"); @@ -621,34 +611,21 @@ Sema::DeclPtrTy Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, S->AddDecl(DeclPtrTy::make(Param)); IdResolver.AddDecl(Param); } - return DeclPtrTy::make(Param); -} - -/// \brief Adds a default argument to the given non-type template -/// parameter. -void Sema::ActOnNonTypeTemplateParameterDefault(DeclPtrTy TemplateParamD, - SourceLocation EqualLoc, - ExprArg DefaultE) { - NonTypeTemplateParmDecl *TemplateParm - = cast<NonTypeTemplateParmDecl>(TemplateParamD.getAs<Decl>()); - Expr *Default = static_cast<Expr *>(DefaultE.get()); - - // C++ [temp.param]p14: - // A template-parameter shall not be used in its own default argument. - // FIXME: Implement this check! Needs a recursive walk over the types. - - // Check the well-formedness of the default template argument. - TemplateArgument Converted; - if (CheckTemplateArgument(TemplateParm, TemplateParm->getType(), Default, - Converted)) { - TemplateParm->setInvalidDecl(); - return; + + // Check the well-formedness of the default template argument, if provided. + if (Expr *Default = static_cast<Expr *>(DefaultArg.get())) { + TemplateArgument Converted; + if (CheckTemplateArgument(Param, Param->getType(), Default, Converted)) { + Param->setInvalidDecl(); + return DeclPtrTy::make(Param);; + } + + Param->setDefaultArgument(DefaultArg.takeAs<Expr>(), false); } - - TemplateParm->setDefaultArgument(DefaultE.takeAs<Expr>()); + + return DeclPtrTy::make(Param); } - /// ActOnTemplateTemplateParameter - Called when a C++ template template /// parameter (e.g. T in template <template <typename> class T> class array) /// has been parsed. S is the current scope. @@ -658,7 +635,9 @@ Sema::DeclPtrTy Sema::ActOnTemplateTemplateParameter(Scope* S, IdentifierInfo *Name, SourceLocation NameLoc, unsigned Depth, - unsigned Position) { + unsigned Position, + SourceLocation EqualLoc, + const ParsedTemplateArgument &Default) { assert(S->isTemplateParamScope() && "Template template parameter not in template parameter scope!"); @@ -668,53 +647,33 @@ Sema::DeclPtrTy Sema::ActOnTemplateTemplateParameter(Scope* S, TmpLoc, Depth, Position, Name, (TemplateParameterList*)Params); - // Make sure the parameter is valid. - // FIXME: Decl object is not currently invalidated anywhere so this doesn't - // do anything yet. However, if the template parameter list or (eventual) - // default value is ever invalidated, that will propagate here. - bool Invalid = false; - if (Invalid) { - Param->setInvalidDecl(); - } - - // If the tt-param has a name, then link the identifier into the scope - // and lookup mechanisms. + // If the template template parameter has a name, then link the identifier + // into the scope and lookup mechanisms. if (Name) { S->AddDecl(DeclPtrTy::make(Param)); IdResolver.AddDecl(Param); } - return DeclPtrTy::make(Param); -} - -/// \brief Adds a default argument to the given template template -/// parameter. -void Sema::ActOnTemplateTemplateParameterDefault(DeclPtrTy TemplateParamD, - SourceLocation EqualLoc, - const ParsedTemplateArgument &Default) { - TemplateTemplateParmDecl *TemplateParm - = cast<TemplateTemplateParmDecl>(TemplateParamD.getAs<Decl>()); - - // C++ [temp.param]p14: - // A template-parameter shall not be used in its own default argument. - // FIXME: Implement this check! Needs a recursive walk over the types. - - // Check only that we have a template template argument. We don't want to - // try to check well-formedness now, because our template template parameter - // might have dependent types in its template parameters, which we wouldn't - // be able to match now. - // - // If none of the template template parameter's template arguments mention - // other template parameters, we could actually perform more checking here. - // However, it isn't worth doing. - TemplateArgumentLoc DefaultArg = translateTemplateArgument(*this, Default); - if (DefaultArg.getArgument().getAsTemplate().isNull()) { - Diag(DefaultArg.getLocation(), diag::err_template_arg_not_class_template) - << DefaultArg.getSourceRange(); - return; + if (!Default.isInvalid()) { + // Check only that we have a template template argument. We don't want to + // try to check well-formedness now, because our template template parameter + // might have dependent types in its template parameters, which we wouldn't + // be able to match now. + // + // If none of the template template parameter's template arguments mention + // other template parameters, we could actually perform more checking here. + // However, it isn't worth doing. + TemplateArgumentLoc DefaultArg = translateTemplateArgument(*this, Default); + if (DefaultArg.getArgument().getAsTemplate().isNull()) { + Diag(DefaultArg.getLocation(), diag::err_template_arg_not_class_template) + << DefaultArg.getSourceRange(); + return DeclPtrTy::make(Param); + } + + Param->setDefaultArgument(DefaultArg, false); } - TemplateParm->setDefaultArgument(DefaultArg); + return DeclPtrTy::make(Param); } /// ActOnTemplateParameterList - Builds a TemplateParameterList that @@ -925,7 +884,7 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, NewClass->setDescribedClassTemplate(NewTemplate); // Build the type for the class template declaration now. - QualType T = NewTemplate->getInjectedClassNameSpecialization(Context); + QualType T = NewTemplate->getInjectedClassNameSpecialization(); T = Context.getInjectedClassNameType(NewClass, T); assert(T->isDependentType() && "Class template type is not dependent?"); (void)T; @@ -1144,7 +1103,7 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams, NewNonTypeParm->getLocation(), NewNonTypeParm->getDefaultArgument()->getSourceRange())) { NewNonTypeParm->getDefaultArgument()->Destroy(Context); - NewNonTypeParm->setDefaultArgument(0); + NewNonTypeParm->removeDefaultArgument(); } // Merge default arguments for non-type template parameters @@ -1165,7 +1124,8 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams, // expression that points to a previous template template // parameter. NewNonTypeParm->setDefaultArgument( - OldNonTypeParm->getDefaultArgument()); + OldNonTypeParm->getDefaultArgument(), + /*Inherited=*/ true); PreviousDefaultArgLoc = OldNonTypeParm->getDefaultArgumentLoc(); } else if (NewNonTypeParm->hasDefaultArgument()) { SawDefaultArgument = true; @@ -1180,7 +1140,7 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams, DiagnoseDefaultTemplateArgument(*this, TPC, NewTemplateParm->getLocation(), NewTemplateParm->getDefaultArgument().getSourceRange())) - NewTemplateParm->setDefaultArgument(TemplateArgumentLoc()); + NewTemplateParm->removeDefaultArgument(); // Merge default arguments for template template parameters TemplateTemplateParmDecl *OldTemplateParm @@ -1199,7 +1159,8 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams, // FIXME: We need to create a new kind of "default argument" expression // that points to a previous template template parameter. NewTemplateParm->setDefaultArgument( - OldTemplateParm->getDefaultArgument()); + OldTemplateParm->getDefaultArgument(), + /*Inherited=*/ true); PreviousDefaultArgLoc = OldTemplateParm->getDefaultArgument().getLocation(); } else if (NewTemplateParm->hasDefaultArgument()) { @@ -1272,7 +1233,8 @@ Sema::MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc, TemplateParameterList **ParamLists, unsigned NumParamLists, bool IsFriend, - bool &IsExplicitSpecialization) { + bool &IsExplicitSpecialization, + bool &Invalid) { IsExplicitSpecialization = false; // Find the template-ids that occur within the nested-name-specifier. These @@ -1350,6 +1312,7 @@ Sema::MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc, diag::err_template_spec_needs_template_parameters) << TemplateId << SS.getRange(); + Invalid = true; } else { Diag(SS.getRange().getBegin(), diag::err_template_spec_needs_header) << SS.getRange() @@ -1412,7 +1375,13 @@ Sema::MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc, << ExplicitSpecializationsInSpecifier.back(); ExplicitSpecializationsInSpecifier.pop_back(); } - + + // We have a template parameter list with no corresponding scope, which + // means that the resulting template declaration can't be instantiated + // properly (we'll end up with dependent nodes when we shouldn't). + if (!isExplicitSpecHeader) + Invalid = true; + ++Idx; } } @@ -1445,7 +1414,6 @@ QualType Sema::CheckTemplateIdType(TemplateName Name, "Converted template argument list is too short!"); QualType CanonType; - bool IsCurrentInstantiation = false; if (Name.isDependent() || TemplateSpecializationType::anyDependentTemplateArguments( @@ -1502,7 +1470,6 @@ QualType Sema::CheckTemplateIdType(TemplateName Name, // class name type of the record we just found. assert(ICNT.isCanonical()); CanonType = ICNT; - IsCurrentInstantiation = true; break; } } @@ -1540,8 +1507,7 @@ QualType Sema::CheckTemplateIdType(TemplateName Name, // Build the fully-sugared type for this class template // specialization, which refers back to the class template // specialization we created or found. - return Context.getTemplateSpecializationType(Name, TemplateArgs, CanonType, - IsCurrentInstantiation); + return Context.getTemplateSpecializationType(Name, TemplateArgs, CanonType); } Action::TypeResult @@ -1687,12 +1653,18 @@ Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, /// example, given "MetaFun::template apply", the scope specifier \p /// SS will be "MetaFun::", \p TemplateKWLoc contains the location /// of the "template" keyword, and "apply" is the \p Name. -Sema::TemplateTy -Sema::ActOnDependentTemplateName(SourceLocation TemplateKWLoc, - CXXScopeSpec &SS, - UnqualifiedId &Name, - TypeTy *ObjectType, - bool EnteringContext) { +TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S, + SourceLocation TemplateKWLoc, + CXXScopeSpec &SS, + UnqualifiedId &Name, + TypeTy *ObjectType, + bool EnteringContext, + TemplateTy &Result) { + if (TemplateKWLoc.isValid() && S && !S->getTemplateParamParent() && + !getLangOptions().CPlusPlus0x) + Diag(TemplateKWLoc, diag::ext_template_outside_of_template) + << FixItHint::CreateRemoval(TemplateKWLoc); + DeclContext *LookupCtx = 0; if (SS.isSet()) LookupCtx = computeDeclContext(SS, EnteringContext); @@ -1714,26 +1686,25 @@ Sema::ActOnDependentTemplateName(SourceLocation TemplateKWLoc, // the "template" keyword prior to a template-name that was not a // dependent name. C++ DR468 relaxed this requirement (the // "template" keyword is now permitted). We follow the C++0x - // rules, even in C++03 mode, retroactively applying the DR. - TemplateTy Template; + // rules, even in C++03 mode with a warning, retroactively applying the DR. bool MemberOfUnknownSpecialization; TemplateNameKind TNK = isTemplateName(0, SS, Name, ObjectType, - EnteringContext, Template, + EnteringContext, Result, MemberOfUnknownSpecialization); if (TNK == TNK_Non_template && LookupCtx->isDependentContext() && isa<CXXRecordDecl>(LookupCtx) && cast<CXXRecordDecl>(LookupCtx)->hasAnyDependentBases()) { - // This is a dependent template. + // This is a dependent template. Handle it below. } else if (TNK == TNK_Non_template) { Diag(Name.getSourceRange().getBegin(), diag::err_template_kw_refers_to_non_template) << GetNameFromUnqualifiedId(Name) << Name.getSourceRange() << TemplateKWLoc; - return TemplateTy(); + return TNK_Non_template; } else { // We found something; return it. - return Template; + return TNK; } } @@ -1742,12 +1713,14 @@ Sema::ActOnDependentTemplateName(SourceLocation TemplateKWLoc, switch (Name.getKind()) { case UnqualifiedId::IK_Identifier: - return TemplateTy::make(Context.getDependentTemplateName(Qualifier, - Name.Identifier)); + Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier, + Name.Identifier)); + return TNK_Dependent_template_name; case UnqualifiedId::IK_OperatorFunctionId: - return TemplateTy::make(Context.getDependentTemplateName(Qualifier, + Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier, Name.OperatorFunctionId.Operator)); + return TNK_Dependent_template_name; case UnqualifiedId::IK_LiteralOperatorId: assert(false && "We don't support these; Parse shouldn't have allowed propagation"); @@ -1761,7 +1734,7 @@ Sema::ActOnDependentTemplateName(SourceLocation TemplateKWLoc, << GetNameFromUnqualifiedId(Name) << Name.getSourceRange() << TemplateKWLoc; - return TemplateTy(); + return TNK_Non_template; } bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, @@ -2768,7 +2741,7 @@ bool Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param, // conversions (4.7) are applied. QualType ParamType = InstantiatedParamType; QualType ArgType = Arg->getType(); - if (ParamType->isIntegralType() || ParamType->isEnumeralType()) { + if (ParamType->isIntegralOrEnumerationType()) { // C++ [temp.arg.nontype]p1: // A template-argument for a non-type, non-template // template-parameter shall be one of: @@ -2778,7 +2751,7 @@ bool Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param, // -- the name of a non-type template-parameter; or SourceLocation NonConstantLoc; llvm::APSInt Value; - if (!ArgType->isIntegralType() && !ArgType->isEnumeralType()) { + if (!ArgType->isIntegralOrEnumerationType()) { Diag(Arg->getSourceRange().getBegin(), diag::err_template_arg_not_integral_or_enumeral) << ArgType << Arg->getSourceRange(); @@ -3237,9 +3210,32 @@ Sema::TemplateParameterListsAreEqual(TemplateParameterList *New, return false; } - if (isa<TemplateTypeParmDecl>(*OldParm)) { - // Okay; all template type parameters are equivalent (since we - // know we're at the same index). + if (TemplateTypeParmDecl *OldTTP + = dyn_cast<TemplateTypeParmDecl>(*OldParm)) { + // Template type parameters are equivalent if either both are template + // type parameter packs or neither are (since we know we're at the same + // index). + TemplateTypeParmDecl *NewTTP = cast<TemplateTypeParmDecl>(*NewParm); + if (OldTTP->isParameterPack() != NewTTP->isParameterPack()) { + // FIXME: Implement the rules in C++0x [temp.arg.template]p5 that + // allow one to match a template parameter pack in the template + // parameter list of a template template parameter to one or more + // template parameters in the template parameter list of the + // corresponding template template argument. + if (Complain) { + unsigned NextDiag = diag::err_template_parameter_pack_non_pack; + if (TemplateArgLoc.isValid()) { + Diag(TemplateArgLoc, + diag::err_template_arg_template_params_mismatch); + NextDiag = diag::note_template_parameter_pack_non_pack; + } + Diag(NewTTP->getLocation(), NextDiag) + << 0 << NewTTP->isParameterPack(); + Diag(OldTTP->getLocation(), diag::note_template_parameter_pack_here) + << 0 << OldTTP->isParameterPack(); + } + return false; + } } else if (NonTypeTemplateParmDecl *OldNTTP = dyn_cast<NonTypeTemplateParmDecl>(*OldParm)) { // The types of non-type template parameters must agree. @@ -3634,12 +3630,21 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, // template. // FIXME: We probably shouldn't complain about these headers for // friend declarations. + bool Invalid = false; TemplateParameterList *TemplateParams = MatchTemplateParametersToScopeSpecifier(TemplateNameLoc, SS, (TemplateParameterList**)TemplateParameterLists.get(), TemplateParameterLists.size(), TUK == TUK_Friend, - isExplicitSpecialization); + isExplicitSpecialization, + Invalid); + if (Invalid) + return true; + + unsigned NumMatchedTemplateParamLists = TemplateParameterLists.size(); + if (TemplateParams) + --NumMatchedTemplateParamLists; + if (TemplateParams && TemplateParams->size() > 0) { isPartialSpecialization = true; @@ -3660,7 +3665,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, Diag(NTTP->getDefaultArgumentLoc(), diag::err_default_arg_in_partial_spec) << DefArg->getSourceRange(); - NTTP->setDefaultArgument(0); + NTTP->removeDefaultArgument(); DefArg->Destroy(Context); } } else { @@ -3669,7 +3674,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, Diag(TTP->getDefaultArgument().getLocation(), diag::err_default_arg_in_partial_spec) << TTP->getDefaultArgument().getSourceRange(); - TTP->setDefaultArgument(TemplateArgumentLoc()); + TTP->removeDefaultArgument(); } } } @@ -3831,6 +3836,11 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, PrevPartial, SequenceNumber); SetNestedNameSpecifier(Partial, SS); + if (NumMatchedTemplateParamLists > 0) { + Partial->setTemplateParameterListsInfo(Context, + NumMatchedTemplateParamLists, + (TemplateParameterList**) TemplateParameterLists.release()); + } if (PrevPartial) { ClassTemplate->getPartialSpecializations().RemoveNode(PrevPartial); @@ -3888,6 +3898,11 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, Converted, PrevDecl); SetNestedNameSpecifier(Specialization, SS); + if (NumMatchedTemplateParamLists > 0) { + Specialization->setTemplateParameterListsInfo(Context, + NumMatchedTemplateParamLists, + (TemplateParameterList**) TemplateParameterLists.release()); + } if (PrevDecl) { ClassTemplate->getSpecializations().RemoveNode(PrevDecl); @@ -3955,8 +3970,11 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TypeSourceInfo *WrittenTy = Context.getTemplateSpecializationTypeInfo(Name, TemplateNameLoc, TemplateArgs, CanonType); - if (TUK != TUK_Friend) + if (TUK != TUK_Friend) { Specialization->setTypeAsWritten(WrittenTy); + if (TemplateParams) + Specialization->setTemplateKeywordLoc(TemplateParams->getTemplateLoc()); + } TemplateArgsIn.release(); // C++ [temp.expl.spec]p9: @@ -4050,7 +4068,7 @@ static void StripImplicitInstantiation(NamedDecl *D) { /// \param PrevPointOfInstantiation if valid, indicates where the previus /// declaration was instantiated (either implicitly or explicitly). /// -/// \param SuppressNew will be set to true to indicate that the new +/// \param HasNoEffect will be set to true to indicate that the new /// specialization or instantiation has no effect and should be ignored. /// /// \returns true if there was an error that should prevent the introduction of @@ -4061,8 +4079,8 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPointOfInstantiation, - bool &SuppressNew) { - SuppressNew = false; + bool &HasNoEffect) { + HasNoEffect = false; switch (NewTSK) { case TSK_Undeclared: @@ -4119,7 +4137,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, switch (PrevTSK) { case TSK_ExplicitInstantiationDeclaration: // This explicit instantiation declaration is redundant (that's okay). - SuppressNew = true; + HasNoEffect = true; return false; case TSK_Undeclared: @@ -4134,7 +4152,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, // of a template appears after a declaration of an explicit // specialization for that template, the explicit instantiation has no // effect. - SuppressNew = true; + HasNoEffect = true; return false; case TSK_ExplicitInstantiationDefinition: @@ -4148,7 +4166,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, diag::note_explicit_instantiation_definition_here); assert(PrevPointOfInstantiation.isValid() && "Explicit instantiation without point of instantiation?"); - SuppressNew = true; + HasNoEffect = true; return false; } break; @@ -4177,7 +4195,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, Diag(PrevDecl->getLocation(), diag::note_previous_template_specialization); } - SuppressNew = true; + HasNoEffect = true; return false; case TSK_ExplicitInstantiationDeclaration: @@ -4194,7 +4212,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, << PrevDecl; Diag(PrevPointOfInstantiation, diag::note_previous_explicit_instantiation); - SuppressNew = true; + HasNoEffect = true; return false; } break; @@ -4343,14 +4361,14 @@ Sema::CheckFunctionTemplateSpecialization(FunctionDecl *FD, = Specialization->getTemplateSpecializationInfo(); assert(SpecInfo && "Function template specialization info missing?"); - bool SuppressNew = false; + bool HasNoEffect = false; if (!isFriend && CheckSpecializationInstantiationRedecl(FD->getLocation(), TSK_ExplicitSpecialization, Specialization, SpecInfo->getTemplateSpecializationKind(), SpecInfo->getPointOfInstantiation(), - SuppressNew)) + HasNoEffect)) return true; // Mark the prior declaration as an explicit specialization, so that later @@ -4477,13 +4495,13 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) { // use occurs; no diagnostic is required. assert(MSInfo && "Member specialization info missing?"); - bool SuppressNew = false; + bool HasNoEffect = false; if (CheckSpecializationInstantiationRedecl(Member->getLocation(), TSK_ExplicitSpecialization, Instantiation, MSInfo->getTemplateSpecializationKind(), MSInfo->getPointOfInstantiation(), - SuppressNew)) + HasNoEffect)) return true; // Check the scope of this explicit specialization. @@ -4544,13 +4562,21 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) { } /// \brief Check the scope of an explicit instantiation. -static void CheckExplicitInstantiationScope(Sema &S, NamedDecl *D, +/// +/// \returns true if a serious error occurs, false otherwise. +static bool CheckExplicitInstantiationScope(Sema &S, NamedDecl *D, SourceLocation InstLoc, bool WasQualifiedName) { DeclContext *ExpectedContext = D->getDeclContext()->getEnclosingNamespaceContext()->getLookupContext(); DeclContext *CurContext = S.CurContext->getLookupContext(); + if (CurContext->isRecord()) { + S.Diag(InstLoc, diag::err_explicit_instantiation_in_class) + << D; + return true; + } + // C++0x [temp.explicit]p2: // An explicit instantiation shall appear in an enclosing namespace of its // template. @@ -4571,7 +4597,7 @@ static void CheckExplicitInstantiationScope(Sema &S, NamedDecl *D, : diag::warn_explicit_instantiation_out_of_scope_0x) << D; S.Diag(D->getLocation(), diag::note_explicit_instantiation_here); - return; + return false; } // C++0x [temp.explicit]p2: @@ -4580,10 +4606,10 @@ static void CheckExplicitInstantiationScope(Sema &S, NamedDecl *D, // its template is declared or, if that namespace is inline (7.3.1), any // namespace from its enclosing namespace set. if (WasQualifiedName) - return; + return false; if (CurContext->Equals(ExpectedContext)) - return; + return false; S.Diag(InstLoc, S.getLangOptions().CPlusPlus0x? @@ -4591,6 +4617,7 @@ static void CheckExplicitInstantiationScope(Sema &S, NamedDecl *D, : diag::warn_explicit_instantiation_unqualified_wrong_namespace_0x) << D << ExpectedContext; S.Diag(D->getLocation(), diag::note_explicit_instantiation_here); + return false; } /// \brief Determine whether the given scope specifier has a template-id in it. @@ -4685,42 +4712,46 @@ Sema::ActOnExplicitInstantiation(Scope *S, ClassTemplateSpecializationDecl *PrevDecl = ClassTemplate->getSpecializations().FindNodeOrInsertPos(ID, InsertPos); + TemplateSpecializationKind PrevDecl_TSK + = PrevDecl ? PrevDecl->getTemplateSpecializationKind() : TSK_Undeclared; + // C++0x [temp.explicit]p2: // [...] An explicit instantiation shall appear in an enclosing // namespace of its template. [...] // // This is C++ DR 275. - CheckExplicitInstantiationScope(*this, ClassTemplate, TemplateNameLoc, - SS.isSet()); + if (CheckExplicitInstantiationScope(*this, ClassTemplate, TemplateNameLoc, + SS.isSet())) + return true; ClassTemplateSpecializationDecl *Specialization = 0; bool ReusedDecl = false; + bool HasNoEffect = false; if (PrevDecl) { - bool SuppressNew = false; if (CheckSpecializationInstantiationRedecl(TemplateNameLoc, TSK, - PrevDecl, - PrevDecl->getSpecializationKind(), + PrevDecl, PrevDecl_TSK, PrevDecl->getPointOfInstantiation(), - SuppressNew)) + HasNoEffect)) return DeclPtrTy::make(PrevDecl); - if (SuppressNew) - return DeclPtrTy::make(PrevDecl); - - if (PrevDecl->getSpecializationKind() == TSK_ImplicitInstantiation || - PrevDecl->getSpecializationKind() == TSK_Undeclared) { + // Even though HasNoEffect == true means that this explicit instantiation + // has no effect on semantics, we go on to put its syntax in the AST. + + if (PrevDecl_TSK == TSK_ImplicitInstantiation || + PrevDecl_TSK == TSK_Undeclared) { // Since the only prior class template specialization with these // arguments was referenced but not declared, reuse that - // declaration node as our own, updating its source location to - // reflect our new declaration. + // declaration node as our own, updating the source location + // for the template name to reflect our new declaration. + // (Other source locations will be updated later.) Specialization = PrevDecl; Specialization->setLocation(TemplateNameLoc); PrevDecl = 0; ReusedDecl = true; } } - + if (!Specialization) { // Create a new class template specialization declaration node for // this explicit specialization. @@ -4732,15 +4763,16 @@ Sema::ActOnExplicitInstantiation(Scope *S, Converted, PrevDecl); SetNestedNameSpecifier(Specialization, SS); - if (PrevDecl) { - // Remove the previous declaration from the folding set, since we want - // to introduce a new declaration. - ClassTemplate->getSpecializations().RemoveNode(PrevDecl); - ClassTemplate->getSpecializations().FindNodeOrInsertPos(ID, InsertPos); - } - - // Insert the new specialization. - ClassTemplate->getSpecializations().InsertNode(Specialization, InsertPos); + if (!HasNoEffect) { + if (PrevDecl) { + // Remove the previous declaration from the folding set, since we want + // to introduce a new declaration. + ClassTemplate->getSpecializations().RemoveNode(PrevDecl); + ClassTemplate->getSpecializations().FindNodeOrInsertPos(ID, InsertPos); + } + // Insert the new specialization. + ClassTemplate->getSpecializations().InsertNode(Specialization, InsertPos); + } } // Build the fully-sugared type for this explicit instantiation as @@ -4757,12 +4789,21 @@ Sema::ActOnExplicitInstantiation(Scope *S, Specialization->setTypeAsWritten(WrittenTy); TemplateArgsIn.release(); - if (!ReusedDecl) { - // Add the explicit instantiation into its lexical context. However, - // since explicit instantiations are never found by name lookup, we - // just put it into the declaration context directly. - Specialization->setLexicalDeclContext(CurContext); - CurContext->addDecl(Specialization); + // Set source locations for keywords. + Specialization->setExternLoc(ExternLoc); + Specialization->setTemplateKeywordLoc(TemplateLoc); + + // Add the explicit instantiation into its lexical context. However, + // since explicit instantiations are never found by name lookup, we + // just put it into the declaration context directly. + Specialization->setLexicalDeclContext(CurContext); + CurContext->addDecl(Specialization); + + // Syntax is now OK, so return if it has no other effect on semantics. + if (HasNoEffect) { + // Set the template specialization kind. + Specialization->setTemplateSpecializationKind(TSK); + return DeclPtrTy::make(Specialization); } // C++ [temp.explicit]p3: @@ -4777,8 +4818,10 @@ Sema::ActOnExplicitInstantiation(Scope *S, Specialization->getDefinition()); if (!Def) InstantiateClassTemplateSpecialization(TemplateNameLoc, Specialization, TSK); - else if (TSK == TSK_ExplicitInstantiationDefinition) + else if (TSK == TSK_ExplicitInstantiationDefinition) { MarkVTableUsed(TemplateNameLoc, Specialization, true); + Specialization->setPointOfInstantiation(Def->getPointOfInstantiation()); + } // Instantiate the members of this class template specialization. Def = cast_or_null<ClassTemplateSpecializationDecl>( @@ -4795,6 +4838,8 @@ Sema::ActOnExplicitInstantiation(Scope *S, InstantiateClassTemplateSpecializationMembers(TemplateNameLoc, Def, TSK); } + // Set the template specialization kind. + Specialization->setTemplateSpecializationKind(TSK); return DeclPtrTy::make(Specialization); } @@ -4847,7 +4892,7 @@ Sema::ActOnExplicitInstantiation(Scope *S, // // C++98 has the same restriction, just worded differently. if (!ScopeSpecifierHasTemplateId(SS)) - Diag(TemplateLoc, diag::err_explicit_instantiation_without_qualified_id) + Diag(TemplateLoc, diag::ext_explicit_instantiation_without_qualified_id) << Record << SS.getRange(); // C++0x [temp.explicit]p2: @@ -4872,15 +4917,15 @@ Sema::ActOnExplicitInstantiation(Scope *S, PrevDecl = Record; if (PrevDecl) { MemberSpecializationInfo *MSInfo = PrevDecl->getMemberSpecializationInfo(); - bool SuppressNew = false; + bool HasNoEffect = false; assert(MSInfo && "No member specialization information?"); if (CheckSpecializationInstantiationRedecl(TemplateLoc, TSK, PrevDecl, MSInfo->getTemplateSpecializationKind(), MSInfo->getPointOfInstantiation(), - SuppressNew)) + HasNoEffect)) return true; - if (SuppressNew) + if (HasNoEffect) return TagD; } @@ -4947,7 +4992,8 @@ Sema::DeclResult Sema::ActOnExplicitInstantiation(Scope *S, S = S->getParent(); // Determine the type of the declaration. - QualType R = GetTypeForDeclarator(D, S, 0); + TypeSourceInfo *T = GetTypeForDeclarator(D, S); + QualType R = T->getType(); if (R.isNull()) return true; @@ -5019,7 +5065,7 @@ Sema::DeclResult Sema::ActOnExplicitInstantiation(Scope *S, // C++98 has the same restriction, just worded differently. if (!ScopeSpecifierHasTemplateId(D.getCXXScopeSpec())) Diag(D.getIdentifierLoc(), - diag::err_explicit_instantiation_without_qualified_id) + diag::ext_explicit_instantiation_without_qualified_id) << Prev << D.getCXXScopeSpec().getRange(); // Check the scope of this explicit instantiation. @@ -5028,13 +5074,13 @@ Sema::DeclResult Sema::ActOnExplicitInstantiation(Scope *S, // Verify that it is okay to explicitly instantiate here. MemberSpecializationInfo *MSInfo = Prev->getMemberSpecializationInfo(); assert(MSInfo && "Missing static data member specialization info?"); - bool SuppressNew = false; + bool HasNoEffect = false; if (CheckSpecializationInstantiationRedecl(D.getIdentifierLoc(), TSK, Prev, MSInfo->getTemplateSpecializationKind(), MSInfo->getPointOfInstantiation(), - SuppressNew)) + HasNoEffect)) return true; - if (SuppressNew) + if (HasNoEffect) return DeclPtrTy(); // Instantiate static data member. @@ -5131,17 +5177,17 @@ Sema::DeclResult Sema::ActOnExplicitInstantiation(Scope *S, PrevDecl = Specialization; if (PrevDecl) { - bool SuppressNew = false; + bool HasNoEffect = false; if (CheckSpecializationInstantiationRedecl(D.getIdentifierLoc(), TSK, PrevDecl, PrevDecl->getTemplateSpecializationKind(), PrevDecl->getPointOfInstantiation(), - SuppressNew)) + HasNoEffect)) return true; // FIXME: We may still want to build some representation of this // explicit specialization. - if (SuppressNew) + if (HasNoEffect) return DeclPtrTy(); } @@ -5163,7 +5209,7 @@ Sema::DeclResult Sema::ActOnExplicitInstantiation(Scope *S, D.getCXXScopeSpec().isSet() && !ScopeSpecifierHasTemplateId(D.getCXXScopeSpec())) Diag(D.getIdentifierLoc(), - diag::err_explicit_instantiation_without_qualified_id) + diag::ext_explicit_instantiation_without_qualified_id) << Specialization << D.getCXXScopeSpec().getRange(); CheckExplicitInstantiationScope(*this, @@ -5200,31 +5246,20 @@ Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, return Context.getDependentNameType(Kwd, NNS, Name).getAsOpaquePtr(); } -static void FillTypeLoc(DependentNameTypeLoc TL, - SourceLocation TypenameLoc, - SourceRange QualifierRange, - SourceLocation NameLoc) { - TL.setKeywordLoc(TypenameLoc); - TL.setQualifierRange(QualifierRange); - TL.setNameLoc(NameLoc); -} - -static void FillTypeLoc(ElaboratedTypeLoc TL, - SourceLocation TypenameLoc, - SourceRange QualifierRange) { - // FIXME: inner locations. - TL.setKeywordLoc(TypenameLoc); - TL.setQualifierRange(QualifierRange); -} - Sema::TypeResult -Sema::ActOnTypenameType(SourceLocation TypenameLoc, const CXXScopeSpec &SS, - const IdentifierInfo &II, SourceLocation IdLoc) { +Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, + const CXXScopeSpec &SS, const IdentifierInfo &II, + SourceLocation IdLoc) { NestedNameSpecifier *NNS = static_cast<NestedNameSpecifier *>(SS.getScopeRep()); if (!NNS) return true; + if (TypenameLoc.isValid() && S && !S->getTemplateParamParent() && + !getLangOptions().CPlusPlus0x) + Diag(TypenameLoc, diag::ext_typename_outside_of_template) + << FixItHint::CreateRemoval(TypenameLoc); + QualType T = CheckTypenameType(ETK_Typename, NNS, II, TypenameLoc, SS.getRange(), IdLoc); if (T.isNull()) @@ -5233,44 +5268,82 @@ Sema::ActOnTypenameType(SourceLocation TypenameLoc, const CXXScopeSpec &SS, TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T); if (isa<DependentNameType>(T)) { DependentNameTypeLoc TL = cast<DependentNameTypeLoc>(TSI->getTypeLoc()); - // FIXME: fill inner type loc - FillTypeLoc(TL, TypenameLoc, SS.getRange(), IdLoc); + TL.setKeywordLoc(TypenameLoc); + TL.setQualifierRange(SS.getRange()); + TL.setNameLoc(IdLoc); } else { ElaboratedTypeLoc TL = cast<ElaboratedTypeLoc>(TSI->getTypeLoc()); - // FIXME: fill inner type loc - FillTypeLoc(TL, TypenameLoc, SS.getRange()); + TL.setKeywordLoc(TypenameLoc); + TL.setQualifierRange(SS.getRange()); + cast<TypeSpecTypeLoc>(TL.getNamedTypeLoc()).setNameLoc(IdLoc); } return CreateLocInfoType(T, TSI).getAsOpaquePtr(); } Sema::TypeResult -Sema::ActOnTypenameType(SourceLocation TypenameLoc, const CXXScopeSpec &SS, - SourceLocation TemplateLoc, TypeTy *Ty) { - QualType T = GetTypeFromParser(Ty); +Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, + const CXXScopeSpec &SS, SourceLocation TemplateLoc, + TypeTy *Ty) { + if (TypenameLoc.isValid() && S && !S->getTemplateParamParent() && + !getLangOptions().CPlusPlus0x) + Diag(TypenameLoc, diag::ext_typename_outside_of_template) + << FixItHint::CreateRemoval(TypenameLoc); + + TypeSourceInfo *InnerTSI = 0; + QualType T = GetTypeFromParser(Ty, &InnerTSI); NestedNameSpecifier *NNS = static_cast<NestedNameSpecifier *>(SS.getScopeRep()); - const TemplateSpecializationType *TemplateId - = T->getAs<TemplateSpecializationType>(); - assert(TemplateId && "Expected a template specialization type"); + + assert(isa<TemplateSpecializationType>(T) && + "Expected a template specialization type"); if (computeDeclContext(SS, false)) { // If we can compute a declaration context, then the "typename" // keyword was superfluous. Just build an ElaboratedType to keep // track of the nested-name-specifier. + + // Push the inner type, preserving its source locations if possible. + TypeLocBuilder Builder; + if (InnerTSI) + Builder.pushFullCopy(InnerTSI->getTypeLoc()); + else + Builder.push<TemplateSpecializationTypeLoc>(T).initialize(TemplateLoc); + T = Context.getElaboratedType(ETK_Typename, NNS, T); - TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T); - ElaboratedTypeLoc TL = cast<ElaboratedTypeLoc>(TSI->getTypeLoc()); - // FIXME: fill inner type loc - FillTypeLoc(TL, TypenameLoc, SS.getRange()); + ElaboratedTypeLoc TL = Builder.push<ElaboratedTypeLoc>(T); + TL.setKeywordLoc(TypenameLoc); + TL.setQualifierRange(SS.getRange()); + + TypeSourceInfo *TSI = Builder.getTypeSourceInfo(Context, T); return CreateLocInfoType(T, TSI).getAsOpaquePtr(); } - T = Context.getDependentNameType(ETK_Typename, NNS, TemplateId); + // TODO: it's really silly that we make a template specialization + // type earlier only to drop it again here. + TemplateSpecializationType *TST = cast<TemplateSpecializationType>(T); + DependentTemplateName *DTN = + TST->getTemplateName().getAsDependentTemplateName(); + assert(DTN && "dependent template has non-dependent name?"); + T = Context.getDependentTemplateSpecializationType(ETK_Typename, NNS, + DTN->getIdentifier(), + TST->getNumArgs(), + TST->getArgs()); TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T); - DependentNameTypeLoc TL = cast<DependentNameTypeLoc>(TSI->getTypeLoc()); - // FIXME: fill inner type loc - FillTypeLoc(TL, TypenameLoc, SS.getRange(), TemplateLoc); + DependentTemplateSpecializationTypeLoc TL = + cast<DependentTemplateSpecializationTypeLoc>(TSI->getTypeLoc()); + if (InnerTSI) { + TemplateSpecializationTypeLoc TSTL = + cast<TemplateSpecializationTypeLoc>(InnerTSI->getTypeLoc()); + TL.setLAngleLoc(TSTL.getLAngleLoc()); + TL.setRAngleLoc(TSTL.getRAngleLoc()); + for (unsigned I = 0, E = TST->getNumArgs(); I != E; ++I) + TL.setArgLocInfo(I, TSTL.getArgLocInfo(I)); + } else { + TL.initializeLocal(SourceLocation()); + } + TL.setKeywordLoc(TypenameLoc); + TL.setQualifierRange(SS.getRange()); return CreateLocInfoType(T, TSI).getAsOpaquePtr(); } @@ -5297,7 +5370,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword, // the "typename" keyword itself is superfluous. In C++03, the // program is actually ill-formed. However, DR 382 (in C++0x CD1) // allows such extraneous "typename" keywords, and we retroactively - // apply this DR to C++03 code. In any case we continue. + // apply this DR to C++03 code with only a warning. In any case we continue. if (RequireCompleteDeclContext(SS, Ctx)) return QualType(); @@ -5317,7 +5390,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword, return Context.getDependentNameType(Keyword, NNS, &II); case LookupResult::Found: - if (TypeDecl *Type = dyn_cast<TypeDecl>(Result.getFoundDecl())) { + if (TypeDecl *Type = dyn_cast<TypeDecl>(Result.getFoundDecl())) { // We found a type. Build an ElaboratedType, since the // typename-specifier was just sugar. return Context.getElaboratedType(ETK_Typename, NNS, @@ -5399,87 +5472,9 @@ namespace { Sema::OwningExprResult TransformExpr(Expr *E) { return getSema().Owned(E->Retain()); } - - /// \brief Transforms a typename type by determining whether the type now - /// refers to a member of the current instantiation, and then - /// type-checking and building an ElaboratedType (when possible). - QualType TransformDependentNameType(TypeLocBuilder &TLB, - DependentNameTypeLoc TL, - QualType ObjectType); }; } -QualType -CurrentInstantiationRebuilder::TransformDependentNameType(TypeLocBuilder &TLB, - DependentNameTypeLoc TL, - QualType ObjectType) { - DependentNameType *T = TL.getTypePtr(); - - NestedNameSpecifier *NNS - = TransformNestedNameSpecifier(T->getQualifier(), - TL.getQualifierRange(), - ObjectType); - if (!NNS) - return QualType(); - - // If the nested-name-specifier did not change, and we cannot compute the - // context corresponding to the nested-name-specifier, then this - // typename type will not change; exit early. - CXXScopeSpec SS; - SS.setRange(TL.getQualifierRange()); - SS.setScopeRep(NNS); - - QualType Result; - if (NNS == T->getQualifier() && getSema().computeDeclContext(SS) == 0) - Result = QualType(T, 0); - - // Rebuild the typename type, which will probably turn into a - // ElaboratedType. - else if (const TemplateSpecializationType *TemplateId = T->getTemplateId()) { - QualType NewTemplateId - = TransformType(QualType(TemplateId, 0)); - if (NewTemplateId.isNull()) - return QualType(); - - if (NNS == T->getQualifier() && - NewTemplateId == QualType(TemplateId, 0)) - Result = QualType(T, 0); - else - Result = getDerived().RebuildDependentNameType(T->getKeyword(), - NNS, NewTemplateId); - } else - Result = getDerived().RebuildDependentNameType(T->getKeyword(), NNS, - T->getIdentifier(), - TL.getKeywordLoc(), - TL.getQualifierRange(), - TL.getNameLoc()); - - if (Result.isNull()) - return QualType(); - - if (const ElaboratedType* ElabT = Result->getAs<ElaboratedType>()) { - QualType NamedT = ElabT->getNamedType(); - if (isa<TemplateSpecializationType>(NamedT)) { - TemplateSpecializationTypeLoc NamedTLoc - = TLB.push<TemplateSpecializationTypeLoc>(NamedT); - // FIXME: fill locations - NamedTLoc.initializeLocal(TL.getNameLoc()); - } else { - TLB.pushTypeSpec(NamedT).setNameLoc(TL.getNameLoc()); - } - ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result); - NewTL.setKeywordLoc(TL.getKeywordLoc()); - NewTL.setQualifierRange(TL.getQualifierRange()); - } - else { - DependentNameTypeLoc NewTL = TLB.push<DependentNameTypeLoc>(Result); - NewTL.setKeywordLoc(TL.getKeywordLoc()); - NewTL.setQualifierRange(TL.getQualifierRange()); - NewTL.setNameLoc(TL.getNameLoc()); - } - return Result; -} - /// \brief Rebuilds a type within the context of the current instantiation. /// /// The type \p T is part of the type of an out-of-line member definition of diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.h b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.h index ca59e27..b3f4651 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.h +++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.h @@ -36,10 +36,14 @@ namespace clang { /// When instantiating X<int>::Y<17>::f, the multi-level template argument /// list will contain a template argument list (int) at depth 0 and a /// template argument list (17) at depth 1. - struct MultiLevelTemplateArgumentList { + class MultiLevelTemplateArgumentList { + public: + typedef std::pair<const TemplateArgument *, unsigned> ArgList; + + private: /// \brief The template argument lists, stored from the innermost template /// argument list (first) to the outermost template argument list (last). - llvm::SmallVector<const TemplateArgumentList *, 4> TemplateArgumentLists; + llvm::SmallVector<ArgList, 4> TemplateArgumentLists; public: /// \brief Construct an empty set of template argument lists. @@ -48,7 +52,7 @@ namespace clang { /// \brief Construct a single-level template argument list. explicit MultiLevelTemplateArgumentList(const TemplateArgumentList &TemplateArgs) { - TemplateArgumentLists.push_back(&TemplateArgs); + addOuterTemplateArguments(&TemplateArgs); } /// \brief Determine the number of levels in this template argument @@ -58,8 +62,8 @@ namespace clang { /// \brief Retrieve the template argument at a given depth and index. const TemplateArgument &operator()(unsigned Depth, unsigned Index) const { assert(Depth < TemplateArgumentLists.size()); - assert(Index < TemplateArgumentLists[getNumLevels() - Depth - 1]->size()); - return TemplateArgumentLists[getNumLevels() - Depth - 1]->get(Index); + assert(Index < TemplateArgumentLists[getNumLevels() - Depth - 1].second); + return TemplateArgumentLists[getNumLevels() - Depth - 1].first[Index]; } /// \brief Determine whether there is a non-NULL template argument at the @@ -69,7 +73,7 @@ namespace clang { bool hasTemplateArgument(unsigned Depth, unsigned Index) const { assert(Depth < TemplateArgumentLists.size()); - if (Index >= TemplateArgumentLists[getNumLevels() - Depth - 1]->size()) + if (Index >= TemplateArgumentLists[getNumLevels() - Depth - 1].second) return false; return !(*this)(Depth, Index).isNull(); @@ -78,12 +82,21 @@ namespace clang { /// \brief Add a new outermost level to the multi-level template argument /// list. void addOuterTemplateArguments(const TemplateArgumentList *TemplateArgs) { - TemplateArgumentLists.push_back(TemplateArgs); + TemplateArgumentLists.push_back( + ArgList(TemplateArgs->getFlatArgumentList(), + TemplateArgs->flat_size())); + } + + /// \brief Add a new outmost level to the multi-level template argument + /// list. + void addOuterTemplateArguments(const TemplateArgument *Args, + unsigned NumArgs) { + TemplateArgumentLists.push_back(ArgList(Args, NumArgs)); } /// \brief Retrieve the innermost template argument list. - const TemplateArgumentList &getInnermost() const { - return *TemplateArgumentLists.front(); + const ArgList &getInnermost() const { + return TemplateArgumentLists.front(); } }; diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp index 88ceeca..403d554 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp @@ -2624,6 +2624,18 @@ MarkUsedTemplateParameters(Sema &SemaRef, QualType T, OnlyDeduced, Depth, Used); break; + case Type::DependentTemplateSpecialization: { + const DependentTemplateSpecializationType *Spec + = cast<DependentTemplateSpecializationType>(T); + if (!OnlyDeduced) + MarkUsedTemplateParameters(SemaRef, Spec->getQualifier(), + OnlyDeduced, Depth, Used); + for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I) + MarkUsedTemplateParameters(SemaRef, Spec->getArg(I), OnlyDeduced, Depth, + Used); + break; + } + case Type::TypeOf: if (!OnlyDeduced) MarkUsedTemplateParameters(SemaRef, diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp index 1adf594..0cdc8a1 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp @@ -63,7 +63,8 @@ Sema::getTemplateInstantiationArgs(NamedDecl *D, if (ClassTemplateSpecializationDecl *Spec = dyn_cast<ClassTemplateSpecializationDecl>(Ctx)) { // We're done when we hit an explicit specialization. - if (Spec->getSpecializationKind() == TSK_ExplicitSpecialization) + if (Spec->getSpecializationKind() == TSK_ExplicitSpecialization && + !isa<ClassTemplatePartialSpecializationDecl>(Spec)) break; Result.addOuterTemplateArguments(&Spec->getTemplateInstantiationArgs()); @@ -104,6 +105,15 @@ Sema::getTemplateInstantiationArgs(NamedDecl *D, RelativeToPrimary = false; continue; } + } else if (CXXRecordDecl *Rec = dyn_cast<CXXRecordDecl>(Ctx)) { + if (ClassTemplateDecl *ClassTemplate = Rec->getDescribedClassTemplate()) { + QualType T = ClassTemplate->getInjectedClassNameSpecialization(); + const TemplateSpecializationType *TST + = cast<TemplateSpecializationType>(Context.getCanonicalType(T)); + Result.addOuterTemplateArguments(TST->getArgs(), TST->getNumArgs()); + if (ClassTemplate->isMemberSpecialization()) + break; + } } Ctx = Ctx->getParent(); @@ -620,6 +630,14 @@ namespace { QualType TransformTemplateTypeParmType(TypeLocBuilder &TLB, TemplateTypeParmTypeLoc TL, QualType ObjectType); + + Sema::OwningExprResult TransformCallExpr(CallExpr *CE) { + getSema().CallsUndergoingInstantiation.push_back(CE); + OwningExprResult Result = + TreeTransform<TemplateInstantiator>::TransformCallExpr(CE); + getSema().CallsUndergoingInstantiation.pop_back(); + return move(Result); + } }; } @@ -1049,6 +1067,9 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm, NewParm->setHasInheritedDefaultArg(OldParm->hasInheritedDefaultArg()); CurrentInstantiationScope->InstantiatedLocal(OldParm, NewParm); + // Set DeclContext if inside a Block. + NewParm->setDeclContext(CurContext); + return NewParm; } @@ -1216,7 +1237,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation, ActOnFields(0, Instantiation->getLocation(), DeclPtrTy::make(Instantiation), Fields.data(), Fields.size(), SourceLocation(), SourceLocation(), 0); - CheckCompletedCXXClass(/*Scope=*/0, Instantiation); + CheckCompletedCXXClass(Instantiation); if (Instantiation->isInvalidDecl()) Invalid = true; @@ -1434,7 +1455,7 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation, SuppressNew) continue; - if (Function->getBody()) + if (Function->hasBody()) continue; if (TSK == TSK_ExplicitInstantiationDefinition) { @@ -1444,7 +1465,7 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation, // specialization and is only an explicit instantiation definition // of members whose definition is visible at the point of // instantiation. - if (!Pattern->getBody()) + if (!Pattern->hasBody()) continue; Function->setTemplateSpecializationKind(TSK, PointOfInstantiation); diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp index 834b86d..2fd3528 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp @@ -42,12 +42,13 @@ namespace { // FIXME: Once we get closer to completion, replace these manually-written // declarations with automatically-generated ones from - // clang/AST/DeclNodes.def. + // clang/AST/DeclNodes.inc. Decl *VisitTranslationUnitDecl(TranslationUnitDecl *D); Decl *VisitNamespaceDecl(NamespaceDecl *D); Decl *VisitNamespaceAliasDecl(NamespaceAliasDecl *D); Decl *VisitTypedefDecl(TypedefDecl *D); Decl *VisitVarDecl(VarDecl *D); + Decl *VisitAccessSpecDecl(AccessSpecDecl *D); Decl *VisitFieldDecl(FieldDecl *D); Decl *VisitStaticAssertDecl(StaticAssertDecl *D); Decl *VisitEnumDecl(EnumDecl *D); @@ -142,14 +143,29 @@ bool TemplateDeclInstantiator::SubstQualifier(const TagDecl *OldDecl, return false; } -// FIXME: Is this too simple? +// FIXME: Is this still too simple? void TemplateDeclInstantiator::InstantiateAttrs(Decl *Tmpl, Decl *New) { - for (const Attr *TmplAttr = Tmpl->getAttrs(); TmplAttr; + for (const Attr *TmplAttr = Tmpl->getAttrs(); TmplAttr; TmplAttr = TmplAttr->getNext()) { - + // FIXME: This should be generalized to more than just the AlignedAttr. + if (const AlignedAttr *Aligned = dyn_cast<AlignedAttr>(TmplAttr)) { + if (Aligned->isDependent()) { + // The alignment expression is not potentially evaluated. + EnterExpressionEvaluationContext Unevaluated(SemaRef, + Action::Unevaluated); + + OwningExprResult Result = SemaRef.SubstExpr(Aligned->getAlignmentExpr(), + TemplateArgs); + if (!Result.isInvalid()) + // FIXME: Is this the correct source location? + SemaRef.AddAlignedAttr(Aligned->getAlignmentExpr()->getExprLoc(), + New, Result.takeAs<Expr>()); + continue; + } + } + // FIXME: Is cloning correct for all attributes? Attr *NewAttr = TmplAttr->clone(SemaRef.Context); - New->addAttr(NewAttr); } } @@ -360,7 +376,9 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) { Var->setLexicalDeclContext(D->getLexicalDeclContext()); Var->setAccess(D->getAccess()); - Var->setUsed(D->isUsed()); + + if (!D->isStaticDataMember()) + Var->setUsed(D->isUsed(false)); // FIXME: In theory, we could have a previous declaration for variables that // are not static data members. @@ -373,15 +391,16 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) { SemaRef.CheckVariableDeclaration(Var, Previous, Redeclaration); if (D->isOutOfLine()) { - D->getLexicalDeclContext()->addDecl(Var); + if (!D->isStaticDataMember()) + D->getLexicalDeclContext()->addDecl(Var); Owner->makeDeclVisibleInContext(Var); } else { Owner->addDecl(Var); - if (Owner->isFunctionOrMethod()) SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Var); } - + InstantiateAttrs(D, Var); + // Link instantiations of static data members back to the template from // which they were instantiated. if (Var->isStaticDataMember()) @@ -436,6 +455,14 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) { return Var; } +Decl *TemplateDeclInstantiator::VisitAccessSpecDecl(AccessSpecDecl *D) { + AccessSpecDecl* AD + = AccessSpecDecl::Create(SemaRef.Context, D->getAccess(), Owner, + D->getAccessSpecifierLoc(), D->getColonLoc()); + Owner->addHiddenDecl(AD); + return AD; +} + Decl *TemplateDeclInstantiator::VisitFieldDecl(FieldDecl *D) { bool Invalid = false; TypeSourceInfo *DI = D->getTypeSourceInfo(); @@ -793,7 +820,7 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) { // Trigger creation of the type for the instantiation. SemaRef.Context.getInjectedClassNameType(RecordInst, - Inst->getInjectedClassNameSpecialization(SemaRef.Context)); + Inst->getInjectedClassNameSpecialization()); // Finish handling of friends. if (isFriend) { @@ -951,9 +978,10 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D, void *InsertPos = 0; if (FunctionTemplate && !TemplateParams) { llvm::FoldingSetNodeID ID; - FunctionTemplateSpecializationInfo::Profile(ID, - TemplateArgs.getInnermost().getFlatArgumentList(), - TemplateArgs.getInnermost().flat_size(), + std::pair<const TemplateArgument *, unsigned> Innermost + = TemplateArgs.getInnermost(); + FunctionTemplateSpecializationInfo::Profile(ID, Innermost.first, + Innermost.second, SemaRef.Context); FunctionTemplateSpecializationInfo *Info @@ -1062,8 +1090,12 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D, } } else if (FunctionTemplate) { // Record this function template specialization. + std::pair<const TemplateArgument *, unsigned> Innermost + = TemplateArgs.getInnermost(); Function->setFunctionTemplateSpecialization(FunctionTemplate, - &TemplateArgs.getInnermost(), + new (SemaRef.Context) TemplateArgumentList(SemaRef.Context, + Innermost.first, + Innermost.second), InsertPos); } else if (isFriend && D->isThisDeclarationADefinition()) { // TODO: should we remember this connection regardless of whether @@ -1154,7 +1186,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D, D->isThisDeclarationADefinition()) { // Check for a function body. const FunctionDecl *Definition = 0; - if (Function->getBody(Definition) && + if (Function->hasBody(Definition) && Definition->getTemplateSpecializationKind() == TSK_Undeclared) { SemaRef.Diag(Function->getLocation(), diag::err_redefinition) << Function->getDeclName(); @@ -1170,7 +1202,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D, ((*R)->getFriendObjectKind() != Decl::FOK_None)) { if (const FunctionDecl *RPattern = (*R)->getTemplateInstantiationPattern()) - if (RPattern->getBody(RPattern)) { + if (RPattern->hasBody(RPattern)) { SemaRef.Diag(Function->getLocation(), diag::err_redefinition) << Function->getDeclName(); SemaRef.Diag((*R)->getLocation(), diag::note_previous_definition); @@ -1200,9 +1232,10 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D, // template. Check whether there is already a function template // specialization for this particular set of template arguments. llvm::FoldingSetNodeID ID; - FunctionTemplateSpecializationInfo::Profile(ID, - TemplateArgs.getInnermost().getFlatArgumentList(), - TemplateArgs.getInnermost().flat_size(), + std::pair<const TemplateArgument *, unsigned> Innermost + = TemplateArgs.getInnermost(); + FunctionTemplateSpecializationInfo::Profile(ID, Innermost.first, + Innermost.second, SemaRef.Context); FunctionTemplateSpecializationInfo *Info @@ -1347,8 +1380,12 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D, Method->setDescribedFunctionTemplate(FunctionTemplate); } else if (FunctionTemplate) { // Record this function template specialization. + std::pair<const TemplateArgument *, unsigned> Innermost + = TemplateArgs.getInnermost(); Method->setFunctionTemplateSpecialization(FunctionTemplate, - &TemplateArgs.getInnermost(), + new (SemaRef.Context) TemplateArgumentList(SemaRef.Context, + Innermost.first, + Innermost.second), InsertPos); } else if (!isFriend) { // Record that this is an instantiation of a member function. @@ -1485,7 +1522,7 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl( if (Invalid) Param->setInvalidDecl(); - Param->setDefaultArgument(D->getDefaultArgument()); + Param->setDefaultArgument(D->getDefaultArgument(), false); // Introduce this template parameter's instantiation into the instantiation // scope. @@ -1513,7 +1550,7 @@ TemplateDeclInstantiator::VisitTemplateTemplateParmDecl( = TemplateTemplateParmDecl::Create(SemaRef.Context, Owner, D->getLocation(), D->getDepth() - 1, D->getPosition(), D->getIdentifier(), InstParams); - Param->setDefaultArgument(D->getDefaultArgument()); + Param->setDefaultArgument(D->getDefaultArgument(), false); // Introduce this template parameter's instantiation into the instantiation // scope. @@ -1966,6 +2003,8 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New, Proto->getExtInfo())); } + InstantiateAttrs(Tmpl, New); + return false; } @@ -2011,7 +2050,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive, bool DefinitionRequired) { - if (Function->isInvalidDecl() || Function->getBody()) + if (Function->isInvalidDecl() || Function->hasBody()) return; // Never instantiate an explicit specialization. @@ -2568,7 +2607,7 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, ClassTemplateDecl *ClassTemplate = Record->getDescribedClassTemplate(); if (ClassTemplate) { - T = ClassTemplate->getInjectedClassNameSpecialization(Context); + T = ClassTemplate->getInjectedClassNameSpecialization(); } else if (ClassTemplatePartialSpecializationDecl *PartialSpec = dyn_cast<ClassTemplatePartialSpecializationDecl>(Record)) { ClassTemplate = PartialSpec->getSpecializedTemplate(); diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp index 35efa61..a4fc98c 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp @@ -383,8 +383,12 @@ static QualType ConvertDeclSpecToType(Sema &TheSema, } else if (DS.isTypeAltiVecVector()) { unsigned typeSize = static_cast<unsigned>(Context.getTypeSize(Result)); assert(typeSize > 0 && "type size for vector must be greater than 0 bits"); - Result = Context.getVectorType(Result, 128/typeSize, true, - DS.isTypeAltiVecPixel()); + VectorType::AltiVecSpecific AltiVecSpec = VectorType::AltiVec; + if (DS.isTypeAltiVecPixel()) + AltiVecSpec = VectorType::Pixel; + else if (DS.isTypeAltiVecBool()) + AltiVecSpec = VectorType::Bool; + Result = Context.getVectorType(Result, 128/typeSize, AltiVecSpec); } assert(DS.getTypeSpecComplex() != DeclSpec::TSC_imaginary && @@ -472,12 +476,49 @@ static std::string getPrintableNameForEntity(DeclarationName Entity) { return "type name"; } +QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc, + Qualifiers Qs) { + // Enforce C99 6.7.3p2: "Types other than pointer types derived from + // object or incomplete types shall not be restrict-qualified." + if (Qs.hasRestrict()) { + unsigned DiagID = 0; + QualType ProblemTy; + + const Type *Ty = T->getCanonicalTypeInternal().getTypePtr(); + if (const ReferenceType *RTy = dyn_cast<ReferenceType>(Ty)) { + if (!RTy->getPointeeType()->isIncompleteOrObjectType()) { + DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee; + ProblemTy = T->getAs<ReferenceType>()->getPointeeType(); + } + } else if (const PointerType *PTy = dyn_cast<PointerType>(Ty)) { + if (!PTy->getPointeeType()->isIncompleteOrObjectType()) { + DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee; + ProblemTy = T->getAs<PointerType>()->getPointeeType(); + } + } else if (const MemberPointerType *PTy = dyn_cast<MemberPointerType>(Ty)) { + if (!PTy->getPointeeType()->isIncompleteOrObjectType()) { + DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee; + ProblemTy = T->getAs<PointerType>()->getPointeeType(); + } + } else if (!Ty->isDependentType()) { + // FIXME: this deserves a proper diagnostic + DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee; + ProblemTy = T; + } + + if (DiagID) { + Diag(Loc, DiagID) << ProblemTy; + Qs.removeRestrict(); + } + } + + return Context.getQualifiedType(T, Qs); +} + /// \brief Build a pointer type. /// /// \param T The type to which we'll be building a pointer. /// -/// \param Quals The cvr-qualifiers to be applied to the pointer type. -/// /// \param Loc The location of the entity whose type involves this /// pointer type or, if there is no such entity, the location of the /// type that will have pointer type. @@ -487,7 +528,7 @@ static std::string getPrintableNameForEntity(DeclarationName Entity) { /// /// \returns A suitable pointer type, if there are no /// errors. Otherwise, returns a NULL type. -QualType Sema::BuildPointerType(QualType T, unsigned Quals, +QualType Sema::BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity) { if (T->isReferenceType()) { // C++ 8.3.2p4: There shall be no ... pointers to references ... @@ -496,28 +537,16 @@ QualType Sema::BuildPointerType(QualType T, unsigned Quals, return QualType(); } - Qualifiers Qs = Qualifiers::fromCVRMask(Quals); - - // Enforce C99 6.7.3p2: "Types other than pointer types derived from - // object or incomplete types shall not be restrict-qualified." - if (Qs.hasRestrict() && !T->isIncompleteOrObjectType()) { - Diag(Loc, diag::err_typecheck_invalid_restrict_invalid_pointee) - << T; - Qs.removeRestrict(); - } - assert(!T->isObjCObjectType() && "Should build ObjCObjectPointerType"); // Build the pointer type. - return Context.getQualifiedType(Context.getPointerType(T), Qs); + return Context.getPointerType(T); } /// \brief Build a reference type. /// /// \param T The type to which we'll be building a reference. /// -/// \param CVR The cvr-qualifiers to be applied to the reference type. -/// /// \param Loc The location of the entity whose type involves this /// reference type or, if there is no such entity, the location of the /// type that will have reference type. @@ -528,10 +557,8 @@ QualType Sema::BuildPointerType(QualType T, unsigned Quals, /// \returns A suitable reference type, if there are no /// errors. Otherwise, returns a NULL type. QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue, - unsigned CVR, SourceLocation Loc, + SourceLocation Loc, DeclarationName Entity) { - Qualifiers Quals = Qualifiers::fromCVRMask(CVR); - bool LValueRef = SpelledAsLValue || T->getAs<LValueReferenceType>(); // C++0x [dcl.typedef]p9: If a typedef TD names a type that is a @@ -562,31 +589,10 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue, return QualType(); } - // Enforce C99 6.7.3p2: "Types other than pointer types derived from - // object or incomplete types shall not be restrict-qualified." - if (Quals.hasRestrict() && !T->isIncompleteOrObjectType()) { - Diag(Loc, diag::err_typecheck_invalid_restrict_invalid_pointee) - << T; - Quals.removeRestrict(); - } - - // C++ [dcl.ref]p1: - // [...] Cv-qualified references are ill-formed except when the - // cv-qualifiers are introduced through the use of a typedef - // (7.1.3) or of a template type argument (14.3), in which case - // the cv-qualifiers are ignored. - // - // We diagnose extraneous cv-qualifiers for the non-typedef, - // non-template type argument case within the parser. Here, we just - // ignore any extraneous cv-qualifiers. - Quals.removeConst(); - Quals.removeVolatile(); - // Handle restrict on references. if (LValueRef) - return Context.getQualifiedType( - Context.getLValueReferenceType(T, SpelledAsLValue), Quals); - return Context.getQualifiedType(Context.getRValueReferenceType(T), Quals); + return Context.getLValueReferenceType(T, SpelledAsLValue); + return Context.getRValueReferenceType(T); } /// \brief Build an array type. @@ -597,9 +603,6 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue, /// /// \param ArraySize Expression describing the size of the array. /// -/// \param Quals The cvr-qualifiers to be applied to the array's -/// element type. -/// /// \param Loc The location of the entity whose type involves this /// array type or, if there is no such entity, the location of the /// type that will have array type. @@ -815,7 +818,7 @@ QualType Sema::BuildFunctionType(QualType T, << T->isFunctionType() << T; return QualType(); } - + bool Invalid = false; for (unsigned Idx = 0; Idx < NumParamTypes; ++Idx) { QualType ParamType = adjustParameterType(ParamTypes[Idx]); @@ -846,10 +849,8 @@ QualType Sema::BuildFunctionType(QualType T, /// \returns a member pointer type, if successful, or a NULL type if there was /// an error. QualType Sema::BuildMemberPointerType(QualType T, QualType Class, - unsigned CVR, SourceLocation Loc, + SourceLocation Loc, DeclarationName Entity) { - Qualifiers Quals = Qualifiers::fromCVRMask(CVR); - // Verify that we're not building a pointer to pointer to function with // exception specification. if (CheckDistantExceptionSpec(T)) { @@ -863,7 +864,7 @@ QualType Sema::BuildMemberPointerType(QualType T, QualType Class, T = Context.getCanonicalType(T); } - // C++ 8.3.3p3: A pointer to member shall not pointer to ... a member + // C++ 8.3.3p3: A pointer to member shall not point to ... a member // with reference type, or "cv void." if (T->isReferenceType()) { Diag(Loc, diag::err_illegal_decl_mempointer_to_reference) @@ -877,24 +878,12 @@ QualType Sema::BuildMemberPointerType(QualType T, QualType Class, return QualType(); } - // Enforce C99 6.7.3p2: "Types other than pointer types derived from - // object or incomplete types shall not be restrict-qualified." - if (Quals.hasRestrict() && !T->isIncompleteOrObjectType()) { - Diag(Loc, diag::err_typecheck_invalid_restrict_invalid_pointee) - << T; - - // FIXME: If we're doing this as part of template instantiation, - // we should return immediately. - Quals.removeRestrict(); - } - if (!Class->isDependentType() && !Class->isRecordType()) { Diag(Loc, diag::err_mempointer_in_nonclass_type) << Class; return QualType(); } - return Context.getQualifiedType( - Context.getMemberPointerType(T, Class.getTypePtr()), Quals); + return Context.getMemberPointerType(T, Class.getTypePtr()); } /// \brief Build a block pointer type. @@ -912,7 +901,7 @@ QualType Sema::BuildMemberPointerType(QualType T, QualType Class, /// /// \returns A suitable block pointer type, if there are no /// errors. Otherwise, returns a NULL type. -QualType Sema::BuildBlockPointerType(QualType T, unsigned CVR, +QualType Sema::BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity) { if (!T->isFunctionType()) { @@ -920,8 +909,7 @@ QualType Sema::BuildBlockPointerType(QualType T, unsigned CVR, return QualType(); } - Qualifiers Quals = Qualifiers::fromCVRMask(CVR); - return Context.getQualifiedType(Context.getBlockPointerType(T), Quals); + return Context.getBlockPointerType(T); } QualType Sema::GetTypeFromParser(TypeTy *Ty, TypeSourceInfo **TInfo) { @@ -947,9 +935,11 @@ QualType Sema::GetTypeFromParser(TypeTy *Ty, TypeSourceInfo **TInfo) { /// If OwnedDecl is non-NULL, and this declarator's decl-specifier-seq /// owns the declaration of a type (e.g., the definition of a struct /// type), then *OwnedDecl will receive the owned declaration. -QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S, - TypeSourceInfo **TInfo, - TagDecl **OwnedDecl) { +/// +/// The result of this call will never be null, but the associated +/// type may be a null type if there's an unrecoverable error. +TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S, + TagDecl **OwnedDecl) { // Determine the type of the declarator. Not all forms of declarator // have a type. QualType T; @@ -980,22 +970,18 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S, // Constructors and destructors don't have return types. Use // "void" instead. T = Context.VoidTy; - - if (TInfo) - ReturnTypeInfo = Context.getTrivialTypeSourceInfo(T, - D.getName().StartLocation); break; case UnqualifiedId::IK_ConversionFunctionId: // The result type of a conversion function is the type that it // converts to. T = GetTypeFromParser(D.getName().ConversionFunctionId, - TInfo? &ReturnTypeInfo : 0); + &ReturnTypeInfo); break; } if (T.isNull()) - return T; + return Context.getNullTypeSourceInfo(); if (T == Context.UndeducedAutoTy) { int Error = -1; @@ -1059,8 +1045,9 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S, if (!LangOpts.Blocks) Diag(DeclType.Loc, diag::err_blocks_disable); - T = BuildBlockPointerType(T, DeclType.Cls.TypeQuals, D.getIdentifierLoc(), - Name); + T = BuildBlockPointerType(T, D.getIdentifierLoc(), Name); + if (DeclType.Cls.TypeQuals) + T = BuildQualifiedType(T, DeclType.Loc, DeclType.Cls.TypeQuals); break; case DeclaratorChunk::Pointer: // Verify that we're not building a pointer to pointer to function with @@ -1072,15 +1059,15 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S, } if (getLangOptions().ObjC1 && T->getAs<ObjCObjectType>()) { T = Context.getObjCObjectPointerType(T); - T = Context.getCVRQualifiedType(T, DeclType.Ptr.TypeQuals); + if (DeclType.Ptr.TypeQuals) + T = BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals); break; } - T = BuildPointerType(T, DeclType.Ptr.TypeQuals, DeclType.Loc, Name); + T = BuildPointerType(T, DeclType.Loc, Name); + if (DeclType.Ptr.TypeQuals) + T = BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals); break; case DeclaratorChunk::Reference: { - Qualifiers Quals; - if (DeclType.Ref.HasRestrict) Quals.addRestrict(); - // Verify that we're not building a reference to pointer to function with // exception specification. if (getLangOptions().CPlusPlus && CheckDistantExceptionSpec(T)) { @@ -1088,8 +1075,11 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S, D.setInvalidType(true); // Build the type anyway. } - T = BuildReferenceType(T, DeclType.Ref.LValueRef, Quals, - DeclType.Loc, Name); + T = BuildReferenceType(T, DeclType.Ref.LValueRef, DeclType.Loc, Name); + + Qualifiers Quals; + if (DeclType.Ref.HasRestrict) + T = BuildQualifiedType(T, DeclType.Loc, Qualifiers::Restrict); break; } case DeclaratorChunk::Array: { @@ -1139,6 +1129,48 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S, D.setInvalidType(true); } + // cv-qualifiers on return types are pointless except when the type is a + // class type in C++. + if (T.getCVRQualifiers() && D.getDeclSpec().getTypeQualifiers() && + (!getLangOptions().CPlusPlus || + (!T->isDependentType() && !T->isRecordType()))) { + unsigned Quals = D.getDeclSpec().getTypeQualifiers(); + std::string QualStr; + unsigned NumQuals = 0; + SourceLocation Loc; + if (Quals & Qualifiers::Const) { + Loc = D.getDeclSpec().getConstSpecLoc(); + ++NumQuals; + QualStr = "const"; + } + if (Quals & Qualifiers::Volatile) { + if (NumQuals == 0) { + Loc = D.getDeclSpec().getVolatileSpecLoc(); + QualStr = "volatile"; + } else + QualStr += " volatile"; + ++NumQuals; + } + if (Quals & Qualifiers::Restrict) { + if (NumQuals == 0) { + Loc = D.getDeclSpec().getRestrictSpecLoc(); + QualStr = "restrict"; + } else + QualStr += " restrict"; + ++NumQuals; + } + assert(NumQuals > 0 && "No known qualifiers?"); + + SemaDiagnosticBuilder DB = Diag(Loc, diag::warn_qual_return_type); + DB << QualStr << NumQuals; + if (Quals & Qualifiers::Const) + DB << FixItHint::CreateRemoval(D.getDeclSpec().getConstSpecLoc()); + if (Quals & Qualifiers::Volatile) + DB << FixItHint::CreateRemoval(D.getDeclSpec().getVolatileSpecLoc()); + if (Quals & Qualifiers::Restrict) + DB << FixItHint::CreateRemoval(D.getDeclSpec().getRestrictSpecLoc()); + } + if (getLangOptions().CPlusPlus && D.getDeclSpec().isTypeSpecOwned()) { // C++ [dcl.fct]p6: // Types shall not be defined in return or parameter types. @@ -1154,29 +1186,14 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S, D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) Diag(FTI.getThrowLoc(), diag::err_exception_spec_in_typedef); - if (FTI.NumArgs == 0) { - if (getLangOptions().CPlusPlus) { - // C++ 8.3.5p2: If the parameter-declaration-clause is empty, the - // function takes no arguments. - llvm::SmallVector<QualType, 4> Exceptions; - Exceptions.reserve(FTI.NumExceptions); - for (unsigned ei = 0, ee = FTI.NumExceptions; ei != ee; ++ei) { - // FIXME: Preserve type source info. - QualType ET = GetTypeFromParser(FTI.Exceptions[ei].Ty); - // Check that the type is valid for an exception spec, and drop it - // if not. - if (!CheckSpecifiedExceptionType(ET, FTI.Exceptions[ei].Range)) - Exceptions.push_back(ET); - } - T = Context.getFunctionType(T, NULL, 0, FTI.isVariadic, FTI.TypeQuals, - FTI.hasExceptionSpec, - FTI.hasAnyExceptionSpec, - Exceptions.size(), Exceptions.data(), - FunctionType::ExtInfo()); - } else if (FTI.isVariadic) { - // We allow a zero-parameter variadic function in C if the - // function is marked with the "overloadable" - // attribute. Scan for this attribute now. + if (!FTI.NumArgs && !FTI.isVariadic && !getLangOptions().CPlusPlus) { + // Simple void foo(), where the incoming T is the result type. + T = Context.getFunctionNoProtoType(T); + } else { + // We allow a zero-parameter variadic function in C if the + // function is marked with the "overloadable" attribute. Scan + // for this attribute now. + if (!FTI.NumArgs && FTI.isVariadic && !getLangOptions().CPlusPlus) { bool Overloadable = false; for (const AttributeList *Attrs = D.getAttributes(); Attrs; Attrs = Attrs->getNext()) { @@ -1188,21 +1205,20 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S, if (!Overloadable) Diag(FTI.getEllipsisLoc(), diag::err_ellipsis_first_arg); - T = Context.getFunctionType(T, NULL, 0, FTI.isVariadic, 0, - false, false, 0, 0, - FunctionType::ExtInfo()); - } else { - // Simple void foo(), where the incoming T is the result type. - T = Context.getFunctionNoProtoType(T); } - } else if (FTI.ArgInfo[0].Param == 0) { - // C99 6.7.5.3p3: Reject int(x,y,z) when it's not a function definition. - Diag(FTI.ArgInfo[0].IdentLoc, diag::err_ident_list_in_fn_declaration); - D.setInvalidType(true); - } else { + + if (FTI.NumArgs && FTI.ArgInfo[0].Param == 0) { + // C99 6.7.5.3p3: Reject int(x,y,z) when it's not a function + // definition. + Diag(FTI.ArgInfo[0].IdentLoc, diag::err_ident_list_in_fn_declaration); + D.setInvalidType(true); + break; + } + // Otherwise, we have a function with an argument list that is // potentially variadic. llvm::SmallVector<QualType, 16> ArgTys; + ArgTys.reserve(FTI.NumArgs); for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i) { ParmVarDecl *Param = @@ -1278,13 +1294,6 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S, break; } case DeclaratorChunk::MemberPointer: - // Verify that we're not building a pointer to pointer to function with - // exception specification. - if (getLangOptions().CPlusPlus && CheckDistantExceptionSpec(T)) { - Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec); - D.setInvalidType(true); - // Build the type anyway. - } // The scope spec must refer to a class, or be dependent. QualType ClsType; if (DeclType.Mem.Scope().isInvalid()) { @@ -1323,11 +1332,12 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S, } if (!ClsType.isNull()) - T = BuildMemberPointerType(T, ClsType, DeclType.Mem.TypeQuals, - DeclType.Loc, D.getIdentifier()); + T = BuildMemberPointerType(T, ClsType, DeclType.Loc, D.getIdentifier()); if (T.isNull()) { T = Context.IntTy; D.setInvalidType(true); + } else if (DeclType.Mem.TypeQuals) { + T = BuildQualifiedType(T, DeclType.Loc, DeclType.Mem.TypeQuals); } break; } @@ -1352,18 +1362,19 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S, // for a nonstatic member function, the function type to which a pointer // to member refers, or the top-level function type of a function typedef // declaration. + bool FreeFunction = (D.getContext() != Declarator::MemberContext && + (!D.getCXXScopeSpec().isSet() || + !computeDeclContext(D.getCXXScopeSpec(), /*FIXME:*/true)->isRecord())); if (FnTy->getTypeQuals() != 0 && D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef && - ((D.getContext() != Declarator::MemberContext && - (!D.getCXXScopeSpec().isSet() || - !computeDeclContext(D.getCXXScopeSpec(), /*FIXME:*/true) - ->isRecord())) || + (FreeFunction || D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static)) { if (D.isFunctionDeclarator()) Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_function_type); else Diag(D.getIdentifierLoc(), - diag::err_invalid_qualified_typedef_function_type_use); + diag::err_invalid_qualified_typedef_function_type_use) + << FreeFunction; // Strip the cv-quals from the type. T = Context.getFunctionType(FnTy->getResultType(), FnTy->arg_type_begin(), @@ -1372,6 +1383,11 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S, } } + // If there's a constexpr specifier, treat it as a top-level const. + if (D.getDeclSpec().isConstexprSpecified()) { + T.addConst(); + } + // Process any function attributes we might have delayed from the // declaration-specifiers. ProcessDelayedFnAttrs(*this, T, FnAttrsFromDeclSpec); @@ -1386,14 +1402,11 @@ QualType Sema::GetTypeForDeclarator(Declarator &D, Scope *S, DiagnoseDelayedFnAttrs(*this, FnAttrsFromPreviousChunk); - if (TInfo) { - if (D.isInvalidType()) - *TInfo = 0; - else - *TInfo = GetTypeSourceInfoForDeclarator(D, T, ReturnTypeInfo); - } - - return T; + if (T.isNull()) + return Context.getNullTypeSourceInfo(); + else if (D.isInvalidType()) + return Context.getTrivialTypeSourceInfo(T); + return GetTypeSourceInfoForDeclarator(D, T, ReturnTypeInfo); } namespace { @@ -1527,6 +1540,28 @@ namespace { // FIXME: load appropriate source location. TL.setNameLoc(DS.getTypeSpecTypeLoc()); } + void VisitDependentTemplateSpecializationTypeLoc( + DependentTemplateSpecializationTypeLoc TL) { + ElaboratedTypeKeyword Keyword + = TypeWithKeyword::getKeywordForTypeSpec(DS.getTypeSpecType()); + if (Keyword == ETK_Typename) { + TypeSourceInfo *TInfo = 0; + Sema::GetTypeFromParser(DS.getTypeRep(), &TInfo); + if (TInfo) { + TL.copy(cast<DependentTemplateSpecializationTypeLoc>( + TInfo->getTypeLoc())); + return; + } + } + TL.initializeLocal(SourceLocation()); + TL.setKeywordLoc(Keyword != ETK_None + ? DS.getTypeSpecTypeLoc() + : SourceLocation()); + const CXXScopeSpec& SS = DS.getTypeSpecScope(); + TL.setQualifierRange(SS.isEmpty() ? SourceRange() : SS.getRange()); + // FIXME: load appropriate source location. + TL.setNameLoc(DS.getTypeSpecTypeLoc()); + } void VisitTypeLoc(TypeLoc TL) { // FIXME: add other typespec types and change this to an assert. @@ -1651,53 +1686,14 @@ void LocInfoType::getAsStringInternal(std::string &Str, " GetTypeFromParser"); } -/// UnwrapSimilarPointerTypes - If T1 and T2 are pointer types that -/// may be similar (C++ 4.4), replaces T1 and T2 with the type that -/// they point to and return true. If T1 and T2 aren't pointer types -/// or pointer-to-member types, or if they are not similar at this -/// level, returns false and leaves T1 and T2 unchanged. Top-level -/// qualifiers on T1 and T2 are ignored. This function will typically -/// be called in a loop that successively "unwraps" pointer and -/// pointer-to-member types to compare them at each level. -bool Sema::UnwrapSimilarPointerTypes(QualType& T1, QualType& T2) { - const PointerType *T1PtrType = T1->getAs<PointerType>(), - *T2PtrType = T2->getAs<PointerType>(); - if (T1PtrType && T2PtrType) { - T1 = T1PtrType->getPointeeType(); - T2 = T2PtrType->getPointeeType(); - return true; - } - - const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(), - *T2MPType = T2->getAs<MemberPointerType>(); - if (T1MPType && T2MPType && - Context.getCanonicalType(T1MPType->getClass()) == - Context.getCanonicalType(T2MPType->getClass())) { - T1 = T1MPType->getPointeeType(); - T2 = T2MPType->getPointeeType(); - return true; - } - - if (getLangOptions().ObjC1) { - const ObjCObjectPointerType *T1OPType = T1->getAs<ObjCObjectPointerType>(), - *T2OPType = T2->getAs<ObjCObjectPointerType>(); - if (T1OPType && T2OPType) { - T1 = T1OPType->getPointeeType(); - T2 = T2OPType->getPointeeType(); - return true; - } - } - return false; -} - Sema::TypeResult Sema::ActOnTypeName(Scope *S, Declarator &D) { // C99 6.7.6: Type names have no identifier. This is already validated by // the parser. assert(D.getIdentifier() == 0 && "Type name should have no identifier!"); - TypeSourceInfo *TInfo = 0; TagDecl *OwnedTag = 0; - QualType T = GetTypeForDeclarator(D, S, &TInfo, &OwnedTag); + TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S, &OwnedTag); + QualType T = TInfo->getType(); if (D.isInvalidType()) return true; @@ -1714,9 +1710,7 @@ Sema::TypeResult Sema::ActOnTypeName(Scope *S, Declarator &D) { << Context.getTypeDeclType(OwnedTag); } - if (TInfo) - T = CreateLocInfoType(T, TInfo); - + T = CreateLocInfoType(T, TInfo); return T.getAsOpaquePtr(); } @@ -1934,7 +1928,8 @@ bool ProcessFnAttr(Sema &S, QualType &Type, const AttributeList &Attr) { /// The raw attribute should contain precisely 1 argument, the vector size for /// the variable, measured in bytes. If curType and rawAttr are well formed, /// this routine will return a new vector type. -static void HandleVectorSizeAttr(QualType& CurType, const AttributeList &Attr, Sema &S) { +static void HandleVectorSizeAttr(QualType& CurType, const AttributeList &Attr, + Sema &S) { // Check the attribute arugments. if (Attr.getNumArgs() != 1) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1; @@ -1977,7 +1972,8 @@ static void HandleVectorSizeAttr(QualType& CurType, const AttributeList &Attr, S // Success! Instantiate the vector type, the number of elements is > 0, and // not required to be a power of 2, unlike GCC. - CurType = S.Context.getVectorType(CurType, vectorSize/typeSize, false, false); + CurType = S.Context.getVectorType(CurType, vectorSize/typeSize, + VectorType::NotAltiVec); } void ProcessTypeAttributeList(Sema &S, QualType &Result, diff --git a/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h index a18701e..17103c5 100644 --- a/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h +++ b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h @@ -442,7 +442,7 @@ public: /// By default, performs semantic analysis when building the vector type. /// Subclasses may override this routine to provide different behavior. QualType RebuildVectorType(QualType ElementType, unsigned NumElements, - bool IsAltiVec, bool IsPixel); + VectorType::AltiVecSpecific AltiVecSpec); /// \brief Build a new extended vector type given the element type and /// number of elements. @@ -533,16 +533,30 @@ public: /// By default, builds a new DependentNameType type from the /// nested-name-specifier and the given type. Subclasses may override /// this routine to provide different behavior. - QualType RebuildDependentNameType(ElaboratedTypeKeyword Keyword, - NestedNameSpecifier *NNS, QualType T) { - if (NNS->isDependent()) { - // If the name is still dependent, just build a new dependent name type. - CXXScopeSpec SS; - SS.setScopeRep(NNS); - if (!SemaRef.computeDeclContext(SS)) - return SemaRef.Context.getDependentNameType(Keyword, NNS, - cast<TemplateSpecializationType>(T)); - } + QualType RebuildDependentTemplateSpecializationType( + ElaboratedTypeKeyword Keyword, + NestedNameSpecifier *NNS, + const IdentifierInfo *Name, + SourceLocation NameLoc, + const TemplateArgumentListInfo &Args) { + // Rebuild the template name. + // TODO: avoid TemplateName abstraction + TemplateName InstName = + getDerived().RebuildTemplateName(NNS, *Name, QualType()); + + if (InstName.isNull()) + return QualType(); + + // If it's still dependent, make a dependent specialization. + if (InstName.getAsDependentTemplateName()) + return SemaRef.Context.getDependentTemplateSpecializationType( + Keyword, NNS, Name, Args); + + // Otherwise, make an elaborated type wrapping a non-dependent + // specialization. + QualType T = + getDerived().RebuildTemplateSpecializationType(InstName, NameLoc, Args); + if (T.isNull()) return QualType(); return SemaRef.Context.getElaboratedType(Keyword, NNS, T); } @@ -1160,7 +1174,9 @@ public: SS.setScopeRep(Qualifier); } - QualType BaseType = ((Expr*) Base.get())->getType(); + Expr *BaseExpr = Base.takeAs<Expr>(); + getSema().DefaultFunctionArrayConversion(BaseExpr); + QualType BaseType = BaseExpr->getType(); // FIXME: this involves duplicating earlier analysis in a lot of // cases; we should avoid this when possible. @@ -1169,8 +1185,8 @@ public: R.addDecl(FoundDecl); R.resolveKind(); - return getSema().BuildMemberReferenceExpr(move(Base), BaseType, - OpLoc, isArrow, + return getSema().BuildMemberReferenceExpr(getSema().Owned(BaseExpr), + BaseType, OpLoc, isArrow, SS, FirstQualifierInScope, R, ExplicitTemplateArgs); } @@ -1561,7 +1577,7 @@ public: /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. - OwningExprResult RebuildCXXZeroInitValueExpr(SourceLocation TypeStartLoc, + OwningExprResult RebuildCXXScalarValueInitExpr(SourceLocation TypeStartLoc, SourceLocation LParenLoc, QualType T, SourceLocation RParenLoc) { @@ -1580,7 +1596,7 @@ public: SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, - bool ParenTypeId, + SourceRange TypeIdParens, QualType AllocType, SourceLocation TypeLoc, SourceRange TypeRange, @@ -1592,7 +1608,7 @@ public: PlacementLParen, move(PlacementArgs), PlacementRParen, - ParenTypeId, + TypeIdParens, AllocType, TypeLoc, TypeRange, @@ -1815,7 +1831,8 @@ public: Sema::LookupMemberName); OwningExprResult Result = getSema().LookupMemberExpr(R, Base, IsArrow, /*FIME:*/IvarLoc, - SS, DeclPtrTy()); + SS, DeclPtrTy(), + false); if (Result.isInvalid()) return getSema().ExprError(); @@ -1844,7 +1861,8 @@ public: bool IsArrow = false; OwningExprResult Result = getSema().LookupMemberExpr(R, Base, IsArrow, /*FIME:*/PropertyLoc, - SS, DeclPtrTy()); + SS, DeclPtrTy(), + false); if (Result.isInvalid()) return getSema().ExprError(); @@ -1892,7 +1910,8 @@ public: Sema::LookupMemberName); OwningExprResult Result = getSema().LookupMemberExpr(R, Base, IsArrow, /*FIME:*/IsaLoc, - SS, DeclPtrTy()); + SS, DeclPtrTy(), + false); if (Result.isInvalid()) return getSema().ExprError(); @@ -1933,7 +1952,7 @@ public: Expr **Subs = (Expr **)SubExprs.release(); CallExpr *TheCall = new (SemaRef.Context) CallExpr(SemaRef.Context, Callee, Subs, NumSubExprs, - Builtin->getResultType(), + Builtin->getCallResultType(), RParenLoc); OwningExprResult OwnedCall(SemaRef.Owned(TheCall)); @@ -2405,11 +2424,11 @@ TreeTransform<Derived>::TransformQualifiedType(TypeLocBuilder &TLB, if (Result->isFunctionType() || Result->isReferenceType()) return Result; - Result = SemaRef.Context.getQualifiedType(Result, Quals); - - TLB.push<QualifiedTypeLoc>(Result); - - // No location information to preserve. + if (!Quals.empty()) { + Result = SemaRef.BuildQualifiedType(Result, T.getBeginLoc(), Quals); + TLB.push<QualifiedTypeLoc>(Result); + // No location information to preserve. + } return Result; } @@ -2792,7 +2811,7 @@ QualType TreeTransform<Derived>::TransformVectorType(TypeLocBuilder &TLB, if (getDerived().AlwaysRebuild() || ElementType != T->getElementType()) { Result = getDerived().RebuildVectorType(ElementType, T->getNumElements(), - T->isAltiVec(), T->isPixel()); + T->getAltiVecSpecific()); if (Result.isNull()) return QualType(); } @@ -3298,46 +3317,23 @@ QualType TreeTransform<Derived>::TransformDependentNameType(TypeLocBuilder &TLB, if (!NNS) return QualType(); - QualType Result; - - if (const TemplateSpecializationType *TemplateId = T->getTemplateId()) { - QualType NewTemplateId - = getDerived().TransformType(QualType(TemplateId, 0)); - if (NewTemplateId.isNull()) - return QualType(); - - if (!getDerived().AlwaysRebuild() && - NNS == T->getQualifier() && - NewTemplateId == QualType(TemplateId, 0)) - return QualType(T, 0); - - Result = getDerived().RebuildDependentNameType(T->getKeyword(), NNS, - NewTemplateId); - } else { - Result = getDerived().RebuildDependentNameType(T->getKeyword(), NNS, - T->getIdentifier(), - TL.getKeywordLoc(), - TL.getQualifierRange(), - TL.getNameLoc()); - } + QualType Result + = getDerived().RebuildDependentNameType(T->getKeyword(), NNS, + T->getIdentifier(), + TL.getKeywordLoc(), + TL.getQualifierRange(), + TL.getNameLoc()); if (Result.isNull()) return QualType(); if (const ElaboratedType* ElabT = Result->getAs<ElaboratedType>()) { QualType NamedT = ElabT->getNamedType(); - if (isa<TemplateSpecializationType>(NamedT)) { - TemplateSpecializationTypeLoc NamedTLoc - = TLB.push<TemplateSpecializationTypeLoc>(NamedT); - // FIXME: fill locations - NamedTLoc.initializeLocal(TL.getNameLoc()); - } else { - TLB.pushTypeSpec(NamedT).setNameLoc(TL.getNameLoc()); - } + TLB.pushTypeSpec(NamedT).setNameLoc(TL.getNameLoc()); + ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result); NewTL.setKeywordLoc(TL.getKeywordLoc()); NewTL.setQualifierRange(TL.getQualifierRange()); - } - else { + } else { DependentNameTypeLoc NewTL = TLB.push<DependentNameTypeLoc>(Result); NewTL.setKeywordLoc(TL.getKeywordLoc()); NewTL.setQualifierRange(TL.getQualifierRange()); @@ -3347,6 +3343,62 @@ QualType TreeTransform<Derived>::TransformDependentNameType(TypeLocBuilder &TLB, } template<typename Derived> +QualType TreeTransform<Derived>:: + TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB, + DependentTemplateSpecializationTypeLoc TL, + QualType ObjectType) { + DependentTemplateSpecializationType *T = TL.getTypePtr(); + + NestedNameSpecifier *NNS + = getDerived().TransformNestedNameSpecifier(T->getQualifier(), + TL.getQualifierRange(), + ObjectType); + if (!NNS) + return QualType(); + + TemplateArgumentListInfo NewTemplateArgs; + NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc()); + NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc()); + + for (unsigned I = 0, E = T->getNumArgs(); I != E; ++I) { + TemplateArgumentLoc Loc; + if (getDerived().TransformTemplateArgument(TL.getArgLoc(I), Loc)) + return QualType(); + NewTemplateArgs.addArgument(Loc); + } + + QualType Result = getDerived().RebuildDependentTemplateSpecializationType( + T->getKeyword(), + NNS, + T->getIdentifier(), + TL.getNameLoc(), + NewTemplateArgs); + if (Result.isNull()) + return QualType(); + + if (const ElaboratedType *ElabT = dyn_cast<ElaboratedType>(Result)) { + QualType NamedT = ElabT->getNamedType(); + + // Copy information relevant to the template specialization. + TemplateSpecializationTypeLoc NamedTL + = TLB.push<TemplateSpecializationTypeLoc>(NamedT); + NamedTL.setLAngleLoc(TL.getLAngleLoc()); + NamedTL.setRAngleLoc(TL.getRAngleLoc()); + for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) + NamedTL.setArgLocInfo(I, TL.getArgLocInfo(I)); + + // Copy information relevant to the elaborated type. + ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result); + NewTL.setKeywordLoc(TL.getKeywordLoc()); + NewTL.setQualifierRange(TL.getQualifierRange()); + } else { + TypeLoc NewTL(Result, TL.getOpaqueData()); + TLB.pushFullCopy(NewTL); + } + return Result; +} + +template<typename Derived> QualType TreeTransform<Derived>::TransformObjCInterfaceType(TypeLocBuilder &TLB, ObjCInterfaceTypeLoc TL, @@ -5167,7 +5219,7 @@ TreeTransform<Derived>::TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E) { template<typename Derived> Sema::OwningExprResult -TreeTransform<Derived>::TransformCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) { +TreeTransform<Derived>::TransformCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { TemporaryBase Rebase(*this, E->getTypeBeginLoc(), DeclarationName()); QualType T = getDerived().TransformType(E->getType()); @@ -5178,10 +5230,10 @@ TreeTransform<Derived>::TransformCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) { T == E->getType()) return SemaRef.Owned(E->Retain()); - return getDerived().RebuildCXXZeroInitValueExpr(E->getTypeBeginLoc(), - /*FIXME:*/E->getTypeBeginLoc(), - T, - E->getRParenLoc()); + return getDerived().RebuildCXXScalarValueInitExpr(E->getTypeBeginLoc(), + /*FIXME:*/E->getTypeBeginLoc(), + T, + E->getRParenLoc()); } template<typename Derived> @@ -5300,7 +5352,7 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) { /*FIXME:*/E->getLocStart(), move_arg(PlacementArgs), /*FIXME:*/E->getLocStart(), - E->isParenTypeId(), + E->getTypeIdParens(), AllocType, /*FIXME:*/E->getLocStart(), /*FIXME:*/SourceRange(), @@ -6165,17 +6217,75 @@ TreeTransform<Derived>::TransformShuffleVectorExpr(ShuffleVectorExpr *E) { template<typename Derived> Sema::OwningExprResult TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) { - // FIXME: Implement this! - assert(false && "Cannot transform block expressions yet"); - return SemaRef.Owned(E->Retain()); + SourceLocation CaretLoc(E->getExprLoc()); + + SemaRef.ActOnBlockStart(CaretLoc, /*Scope=*/0); + BlockScopeInfo *CurBlock = SemaRef.getCurBlock(); + CurBlock->TheDecl->setIsVariadic(E->getBlockDecl()->isVariadic()); + llvm::SmallVector<ParmVarDecl*, 4> Params; + llvm::SmallVector<QualType, 4> ParamTypes; + + // Parameter substitution. + const BlockDecl *BD = E->getBlockDecl(); + for (BlockDecl::param_const_iterator P = BD->param_begin(), + EN = BD->param_end(); P != EN; ++P) { + ParmVarDecl *OldParm = (*P); + ParmVarDecl *NewParm = getDerived().TransformFunctionTypeParam(OldParm); + QualType NewType = NewParm->getType(); + Params.push_back(NewParm); + ParamTypes.push_back(NewParm->getType()); + } + + const FunctionType *BExprFunctionType = E->getFunctionType(); + QualType BExprResultType = BExprFunctionType->getResultType(); + if (!BExprResultType.isNull()) { + if (!BExprResultType->isDependentType()) + CurBlock->ReturnType = BExprResultType; + else if (BExprResultType != SemaRef.Context.DependentTy) + CurBlock->ReturnType = getDerived().TransformType(BExprResultType); + } + + // Transform the body + OwningStmtResult Body = getDerived().TransformStmt(E->getBody()); + if (Body.isInvalid()) + return SemaRef.ExprError(); + // Set the parameters on the block decl. + if (!Params.empty()) + CurBlock->TheDecl->setParams(Params.data(), Params.size()); + + QualType FunctionType = getDerived().RebuildFunctionProtoType( + CurBlock->ReturnType, + ParamTypes.data(), + ParamTypes.size(), + BD->isVariadic(), + 0); + + CurBlock->FunctionType = FunctionType; + return SemaRef.ActOnBlockStmtExpr(CaretLoc, move(Body), /*Scope=*/0); } template<typename Derived> Sema::OwningExprResult TreeTransform<Derived>::TransformBlockDeclRefExpr(BlockDeclRefExpr *E) { - // FIXME: Implement this! - assert(false && "Cannot transform block-related expressions yet"); - return SemaRef.Owned(E->Retain()); + NestedNameSpecifier *Qualifier = 0; + + ValueDecl *ND + = cast_or_null<ValueDecl>(getDerived().TransformDecl(E->getLocation(), + E->getDecl())); + if (!ND) + return SemaRef.ExprError(); + + if (!getDerived().AlwaysRebuild() && + ND == E->getDecl()) { + // Mark it referenced in the new context regardless. + // FIXME: this is a bit instantiation-specific. + SemaRef.MarkDeclarationReferenced(E->getLocation(), ND); + + return SemaRef.Owned(E->Retain()); + } + + return getDerived().RebuildDeclRefExpr(Qualifier, SourceLocation(), + ND, E->getLocation(), 0); } //===----------------------------------------------------------------------===// @@ -6185,14 +6295,14 @@ TreeTransform<Derived>::TransformBlockDeclRefExpr(BlockDeclRefExpr *E) { template<typename Derived> QualType TreeTransform<Derived>::RebuildPointerType(QualType PointeeType, SourceLocation Star) { - return SemaRef.BuildPointerType(PointeeType, Qualifiers(), Star, + return SemaRef.BuildPointerType(PointeeType, Star, getDerived().getBaseEntity()); } template<typename Derived> QualType TreeTransform<Derived>::RebuildBlockPointerType(QualType PointeeType, SourceLocation Star) { - return SemaRef.BuildBlockPointerType(PointeeType, Qualifiers(), Star, + return SemaRef.BuildBlockPointerType(PointeeType, Star, getDerived().getBaseEntity()); } @@ -6201,7 +6311,7 @@ QualType TreeTransform<Derived>::RebuildReferenceType(QualType ReferentType, bool WrittenAsLValue, SourceLocation Sigil) { - return SemaRef.BuildReferenceType(ReferentType, WrittenAsLValue, Qualifiers(), + return SemaRef.BuildReferenceType(ReferentType, WrittenAsLValue, Sigil, getDerived().getBaseEntity()); } @@ -6210,7 +6320,7 @@ QualType TreeTransform<Derived>::RebuildMemberPointerType(QualType PointeeType, QualType ClassType, SourceLocation Sigil) { - return SemaRef.BuildMemberPointerType(PointeeType, ClassType, Qualifiers(), + return SemaRef.BuildMemberPointerType(PointeeType, ClassType, Sigil, getDerived().getBaseEntity()); } @@ -6293,11 +6403,10 @@ TreeTransform<Derived>::RebuildDependentSizedArrayType(QualType ElementType, template<typename Derived> QualType TreeTransform<Derived>::RebuildVectorType(QualType ElementType, - unsigned NumElements, - bool IsAltiVec, bool IsPixel) { + unsigned NumElements, + VectorType::AltiVecSpecific AltiVecSpec) { // FIXME: semantic checking! - return SemaRef.Context.getVectorType(ElementType, NumElements, - IsAltiVec, IsPixel); + return SemaRef.Context.getVectorType(ElementType, NumElements, AltiVecSpec); } template<typename Derived> @@ -6449,13 +6558,15 @@ TreeTransform<Derived>::RebuildTemplateName(NestedNameSpecifier *Qualifier, SS.setScopeRep(Qualifier); UnqualifiedId Name; Name.setIdentifier(&II, /*FIXME:*/getDerived().getBaseLocation()); - return getSema().ActOnDependentTemplateName( - /*FIXME:*/getDerived().getBaseLocation(), - SS, - Name, - ObjectType.getAsOpaquePtr(), - /*EnteringContext=*/false) - .template getAsVal<TemplateName>(); + Sema::TemplateTy Template; + getSema().ActOnDependentTemplateName(/*Scope=*/0, + /*FIXME:*/getDerived().getBaseLocation(), + SS, + Name, + ObjectType.getAsOpaquePtr(), + /*EnteringContext=*/false, + Template); + return Template.template getAsVal<TemplateName>(); } template<typename Derived> @@ -6470,13 +6581,15 @@ TreeTransform<Derived>::RebuildTemplateName(NestedNameSpecifier *Qualifier, SourceLocation SymbolLocations[3]; // FIXME: Bogus location information. Name.setOperatorFunctionId(/*FIXME:*/getDerived().getBaseLocation(), Operator, SymbolLocations); - return getSema().ActOnDependentTemplateName( + Sema::TemplateTy Template; + getSema().ActOnDependentTemplateName(/*Scope=*/0, /*FIXME:*/getDerived().getBaseLocation(), - SS, - Name, - ObjectType.getAsOpaquePtr(), - /*EnteringContext=*/false) - .template getAsVal<TemplateName>(); + SS, + Name, + ObjectType.getAsOpaquePtr(), + /*EnteringContext=*/false, + Template); + return Template.template getAsVal<TemplateName>(); } template<typename Derived> |