summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp')
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp1017
1 files changed, 756 insertions, 261 deletions
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
index bd4e553..08970fd 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
@@ -23,6 +23,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Target/TargetData.h"
using namespace clang;
using namespace CodeGen;
@@ -134,14 +135,16 @@ void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
llvm::Value *Location,
Qualifiers Quals,
bool IsInit) {
- if (E->getType()->isAnyComplexType())
+ // FIXME: This function should take an LValue as an argument.
+ if (E->getType()->isAnyComplexType()) {
EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
- else if (hasAggregateLLVMType(E->getType()))
- EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
+ } else if (hasAggregateLLVMType(E->getType())) {
+ CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
+ EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
AggValueSlot::IsDestructed_t(IsInit),
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsAliased_t(!IsInit)));
- else {
+ } else {
RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
LValue LV = MakeAddrLValue(Location, E->getType());
EmitStoreThroughLValue(RV, LV);
@@ -182,7 +185,7 @@ CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
const NamedDecl *InitializedDecl) {
if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
if (VD->hasGlobalStorage()) {
- llvm::SmallString<256> Name;
+ SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
Out.flush();
@@ -209,13 +212,20 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
const CXXDestructorDecl *&ReferenceTemporaryDtor,
QualType &ObjCARCReferenceLifetimeType,
const NamedDecl *InitializedDecl) {
+ // Look through single-element init lists that claim to be lvalues. They're
+ // just syntactic wrappers in this case.
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
+ if (ILE->getNumInits() == 1 && ILE->isGLValue())
+ E = ILE->getInit(0);
+ }
+
// Look through expressions for materialized temporaries (for now).
if (const MaterializeTemporaryExpr *M
= dyn_cast<MaterializeTemporaryExpr>(E)) {
// Objective-C++ ARC:
// If we are binding a reference to a temporary that has ownership, we
// need to perform retain/release operations on the temporary.
- if (CGF.getContext().getLangOptions().ObjCAutoRefCount &&
+ if (CGF.getContext().getLangOpts().ObjCAutoRefCount &&
E->getType()->isObjCLifetimeType() &&
(E->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
E->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
@@ -228,29 +238,21 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
E = DAE->getExpr();
- if (const ExprWithCleanups *TE = dyn_cast<ExprWithCleanups>(E)) {
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) {
+ CGF.enterFullExpression(EWC);
CodeGenFunction::RunCleanupsScope Scope(CGF);
- return EmitExprForReferenceBinding(CGF, TE->getSubExpr(),
+ return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(),
ReferenceTemporary,
ReferenceTemporaryDtor,
ObjCARCReferenceLifetimeType,
InitializedDecl);
}
- if (const ObjCPropertyRefExpr *PRE =
- dyn_cast<ObjCPropertyRefExpr>(E->IgnoreParenImpCasts()))
- if (PRE->getGetterResultType()->isReferenceType())
- E = PRE;
-
RValue RV;
if (E->isGLValue()) {
// Emit the expression as an lvalue.
LValue LV = CGF.EmitLValue(E);
- if (LV.isPropertyRef()) {
- RV = CGF.EmitLoadOfPropertyRefLValue(LV);
- return RV.getScalarVal();
- }
if (LV.isSimple())
return LV.getAddress();
@@ -358,10 +360,11 @@ EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
!E->getType()->isAnyComplexType()) {
ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
InitializedDecl);
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType());
AggValueSlot::IsDestructed_t isDestructed
= AggValueSlot::IsDestructed_t(InitializedDecl != 0);
- AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Qualifiers(),
- isDestructed,
+ AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
+ Qualifiers(), isDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
}
@@ -484,21 +487,17 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
case Qualifiers::OCL_Strong: {
bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
CleanupKind cleanupKind = getARCCleanupKind();
- // This local is a GCC and MSVC compiler workaround.
- Destroyer *destroyer = precise ? &destroyARCStrongPrecise :
- &destroyARCStrongImprecise;
pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
- *destroyer, cleanupKind & EHCleanup);
+ precise ? destroyARCStrongPrecise : destroyARCStrongImprecise,
+ cleanupKind & EHCleanup);
break;
}
case Qualifiers::OCL_Weak: {
- // This local is a GCC and MSVC compiler workaround.
- Destroyer *destroyer = &destroyARCWeak;
// __weak objects always get EH cleanups; otherwise, exceptions
// could cause really nasty crashes instead of mere leaks.
pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
- ObjCARCReferenceLifetimeType, *destroyer, true);
+ ObjCARCReferenceLifetimeType, destroyARCWeak, true);
break;
}
}
@@ -512,10 +511,8 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
/// input field number being accessed.
unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
const llvm::Constant *Elts) {
- if (isa<llvm::ConstantAggregateZero>(Elts))
- return 0;
-
- return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue();
+ return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
+ ->getZExtValue();
}
void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
@@ -643,6 +640,9 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
switch (E->getStmtClass()) {
default: return EmitUnsupportedLValue(E, "l-value expression");
+ case Expr::ObjCPropertyRefExprClass:
+ llvm_unreachable("cannot emit a property reference directly");
+
case Expr::ObjCSelectorExprClass:
return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
case Expr::ObjCIsaExprClass:
@@ -656,6 +656,7 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::CallExprClass:
case Expr::CXXMemberCallExprClass:
case Expr::CXXOperatorCallExprClass:
+ case Expr::UserDefinedLiteralClass:
return EmitCallExprLValue(cast<CallExpr>(E));
case Expr::VAArgExprClass:
return EmitVAArgExprLValue(cast<VAArgExpr>(E));
@@ -671,17 +672,28 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitStringLiteralLValue(cast<StringLiteral>(E));
case Expr::ObjCEncodeExprClass:
return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
-
- case Expr::BlockDeclRefExprClass:
- return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E));
+ case Expr::PseudoObjectExprClass:
+ return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
+ case Expr::InitListExprClass:
+ assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
+ "Only single-element init list can be lvalue.");
+ return EmitLValue(cast<InitListExpr>(E)->getInit(0));
case Expr::CXXTemporaryObjectExprClass:
case Expr::CXXConstructExprClass:
return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
case Expr::CXXBindTemporaryExprClass:
return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
- case Expr::ExprWithCleanupsClass:
- return EmitExprWithCleanupsLValue(cast<ExprWithCleanups>(E));
+ case Expr::LambdaExprClass:
+ return EmitLambdaLValue(cast<LambdaExpr>(E));
+
+ case Expr::ExprWithCleanupsClass: {
+ const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E);
+ enterFullExpression(cleanups);
+ RunCleanupsScope Scope(*this);
+ return EmitLValue(cleanups->getSubExpr());
+ }
+
case Expr::CXXScalarValueInitExprClass:
return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
case Expr::CXXDefaultArgExprClass:
@@ -693,8 +705,6 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
case Expr::ObjCIvarRefExprClass:
return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
- case Expr::ObjCPropertyRefExprClass:
- return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E));
case Expr::StmtExprClass:
return EmitStmtExprLValue(cast<StmtExpr>(E));
case Expr::UnaryOperatorClass:
@@ -726,16 +736,188 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::CXXConstCastExprClass:
case Expr::ObjCBridgedCastExprClass:
return EmitCastLValue(cast<CastExpr>(E));
-
+
case Expr::MaterializeTemporaryExprClass:
return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
}
}
+/// Given an object of the given canonical type, can we safely copy a
+/// value out of it based on its initializer?
+static bool isConstantEmittableObjectType(QualType type) {
+ assert(type.isCanonical());
+ assert(!type->isReferenceType());
+
+ // Must be const-qualified but non-volatile.
+ Qualifiers qs = type.getLocalQualifiers();
+ if (!qs.hasConst() || qs.hasVolatile()) return false;
+
+ // Otherwise, all object types satisfy this except C++ classes with
+ // mutable subobjects or non-trivial copy/destroy behavior.
+ if (const RecordType *RT = dyn_cast<RecordType>(type))
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (RD->hasMutableFields() || !RD->isTrivial())
+ return false;
+
+ return true;
+}
+
+/// Can we constant-emit a load of a reference to a variable of the
+/// given type? This is different from predicates like
+/// Decl::isUsableInConstantExpressions because we do want it to apply
+/// in situations that don't necessarily satisfy the language's rules
+/// for this (e.g. C++'s ODR-use rules). For example, we want to able
+/// to do this with const float variables even if those variables
+/// aren't marked 'constexpr'.
+enum ConstantEmissionKind {
+ CEK_None,
+ CEK_AsReferenceOnly,
+ CEK_AsValueOrReference,
+ CEK_AsValueOnly
+};
+static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
+ type = type.getCanonicalType();
+ if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) {
+ if (isConstantEmittableObjectType(ref->getPointeeType()))
+ return CEK_AsValueOrReference;
+ return CEK_AsReferenceOnly;
+ }
+ if (isConstantEmittableObjectType(type))
+ return CEK_AsValueOnly;
+ return CEK_None;
+}
+
+/// Try to emit a reference to the given value without producing it as
+/// an l-value. This is actually more than an optimization: we can't
+/// produce an l-value for variables that we never actually captured
+/// in a block or lambda, which means const int variables or constexpr
+/// literals or similar.
+CodeGenFunction::ConstantEmission
+CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
+ ValueDecl *value = refExpr->getDecl();
+
+ // The value needs to be an enum constant or a constant variable.
+ ConstantEmissionKind CEK;
+ if (isa<ParmVarDecl>(value)) {
+ CEK = CEK_None;
+ } else if (VarDecl *var = dyn_cast<VarDecl>(value)) {
+ CEK = checkVarTypeForConstantEmission(var->getType());
+ } else if (isa<EnumConstantDecl>(value)) {
+ CEK = CEK_AsValueOnly;
+ } else {
+ CEK = CEK_None;
+ }
+ if (CEK == CEK_None) return ConstantEmission();
+
+ Expr::EvalResult result;
+ bool resultIsReference;
+ QualType resultType;
+
+ // It's best to evaluate all the way as an r-value if that's permitted.
+ if (CEK != CEK_AsReferenceOnly &&
+ refExpr->EvaluateAsRValue(result, getContext())) {
+ resultIsReference = false;
+ resultType = refExpr->getType();
+
+ // Otherwise, try to evaluate as an l-value.
+ } else if (CEK != CEK_AsValueOnly &&
+ refExpr->EvaluateAsLValue(result, getContext())) {
+ resultIsReference = true;
+ resultType = value->getType();
+
+ // Failure.
+ } else {
+ return ConstantEmission();
+ }
+
+ // In any case, if the initializer has side-effects, abandon ship.
+ if (result.HasSideEffects)
+ return ConstantEmission();
+
+ // Emit as a constant.
+ llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this);
+
+ // Make sure we emit a debug reference to the global variable.
+ // This should probably fire even for
+ if (isa<VarDecl>(value)) {
+ if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
+ EmitDeclRefExprDbgValue(refExpr, C);
+ } else {
+ assert(isa<EnumConstantDecl>(value));
+ EmitDeclRefExprDbgValue(refExpr, C);
+ }
+
+ // If we emitted a reference constant, we need to dereference that.
+ if (resultIsReference)
+ return ConstantEmission::forReference(C);
+
+ return ConstantEmission::forValue(C);
+}
+
llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
- lvalue.getAlignment(), lvalue.getType(),
- lvalue.getTBAAInfo());
+ lvalue.getAlignment().getQuantity(),
+ lvalue.getType(), lvalue.getTBAAInfo());
+}
+
+static bool hasBooleanRepresentation(QualType Ty) {
+ if (Ty->isBooleanType())
+ return true;
+
+ if (const EnumType *ET = Ty->getAs<EnumType>())
+ return ET->getDecl()->getIntegerType()->isBooleanType();
+
+ if (const AtomicType *AT = Ty->getAs<AtomicType>())
+ return hasBooleanRepresentation(AT->getValueType());
+
+ return false;
+}
+
+llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
+ const EnumType *ET = Ty->getAs<EnumType>();
+ bool IsRegularCPlusPlusEnum = (getLangOpts().CPlusPlus && ET &&
+ CGM.getCodeGenOpts().StrictEnums &&
+ !ET->getDecl()->isFixed());
+ bool IsBool = hasBooleanRepresentation(Ty);
+ llvm::Type *LTy;
+ if (!IsBool && !IsRegularCPlusPlusEnum)
+ return NULL;
+
+ llvm::APInt Min;
+ llvm::APInt End;
+ if (IsBool) {
+ Min = llvm::APInt(8, 0);
+ End = llvm::APInt(8, 2);
+ LTy = Int8Ty;
+ } else {
+ const EnumDecl *ED = ET->getDecl();
+ LTy = ConvertTypeForMem(ED->getIntegerType());
+ unsigned Bitwidth = LTy->getScalarSizeInBits();
+ unsigned NumNegativeBits = ED->getNumNegativeBits();
+ unsigned NumPositiveBits = ED->getNumPositiveBits();
+
+ if (NumNegativeBits) {
+ unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
+ assert(NumBits <= Bitwidth);
+ End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
+ Min = -End;
+ } else {
+ assert(NumPositiveBits <= Bitwidth);
+ End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
+ Min = llvm::APInt(Bitwidth, 0);
+ }
+ }
+
+ if (End == Min)
+ return NULL;
+
+ llvm::Value *LowAndHigh[2];
+ LowAndHigh[0] = llvm::ConstantInt::get(LTy, Min);
+ LowAndHigh[1] = llvm::ConstantInt::get(LTy, End);
+
+ llvm::LLVMContext &C = getLLVMContext();
+ llvm::MDNode *Range = llvm::MDNode::get(C, LowAndHigh);
+ return Range;
}
llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
@@ -748,19 +930,20 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
Load->setAlignment(Alignment);
if (TBAAInfo)
CGM.DecorateInstruction(Load, TBAAInfo);
+ // If this is an atomic type, all normal reads must be atomic
+ if (Ty->isAtomicType())
+ Load->setAtomic(llvm::SequentiallyConsistent);
- return EmitFromMemory(Load, Ty);
-}
+ if (CGM.getCodeGenOpts().OptimizationLevel > 0)
+ if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
+ Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
-static bool isBooleanUnderlyingType(QualType Ty) {
- if (const EnumType *ET = dyn_cast<EnumType>(Ty))
- return ET->getDecl()->getIntegerType()->isBooleanType();
- return false;
+ return EmitFromMemory(Load, Ty);
}
llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
// Bool has a different representation in memory than in registers.
- if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) {
+ if (hasBooleanRepresentation(Ty)) {
// This should really always be an i1, but sometimes it's already
// an i8, and it's awkward to track those cases down.
if (Value->getType()->isIntegerTy(1))
@@ -773,7 +956,7 @@ llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
// Bool has a different representation in memory than in registers.
- if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) {
+ if (hasBooleanRepresentation(Ty)) {
assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8");
return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
}
@@ -784,7 +967,8 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
bool Volatile, unsigned Alignment,
QualType Ty,
- llvm::MDNode *TBAAInfo) {
+ llvm::MDNode *TBAAInfo,
+ bool isInit) {
Value = EmitToMemory(Value, Ty);
llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
@@ -792,12 +976,15 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
Store->setAlignment(Alignment);
if (TBAAInfo)
CGM.DecorateInstruction(Store, TBAAInfo);
+ if (!isInit && Ty->isAtomicType())
+ Store->setAtomic(llvm::SequentiallyConsistent);
}
-void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue) {
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
+ bool isInit) {
EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
- lvalue.getAlignment(), lvalue.getType(),
- lvalue.getTBAAInfo());
+ lvalue.getAlignment().getQuantity(), lvalue.getType(),
+ lvalue.getTBAAInfo(), isInit);
}
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
@@ -821,9 +1008,10 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
}
if (LV.isVectorElt()) {
- llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(),
- LV.isVolatileQualified());
- return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(),
+ llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(),
+ LV.isVolatileQualified());
+ Load->setAlignment(LV.getAlignment().getQuantity());
+ return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
"vecext"));
}
@@ -832,11 +1020,8 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
if (LV.isExtVectorElt())
return EmitLoadOfExtVectorElementLValue(LV);
- if (LV.isBitField())
- return EmitLoadOfBitfieldLValue(LV);
-
- assert(LV.isPropertyRef() && "Unknown LValue type!");
- return EmitLoadOfPropertyRefLValue(LV);
+ assert(LV.isBitField() && "Unknown LValue type!");
+ return EmitLoadOfBitfieldLValue(LV);
}
RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
@@ -867,8 +1052,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
}
// Cast to the access type.
- llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(),
- AI.AccessWidth,
+ llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), AI.AccessWidth,
CGM.getContext().getTargetAddressSpace(LV.getType()));
Ptr = Builder.CreateBitCast(Ptr, PTy);
@@ -914,8 +1098,10 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
// If this is a reference to a subset of the elements of a vector, create an
// appropriate shufflevector.
RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
- llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(),
- LV.isVolatileQualified());
+ llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(),
+ LV.isVolatileQualified());
+ Load->setAlignment(LV.getAlignment().getQuantity());
+ llvm::Value *Vec = Load;
const llvm::Constant *Elts = LV.getExtVectorElts();
@@ -932,10 +1118,8 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
unsigned NumResultElts = ExprVT->getNumElements();
SmallVector<llvm::Constant*, 4> Mask;
- for (unsigned i = 0; i != NumResultElts; ++i) {
- unsigned InIdx = getAccessedFieldNo(i, Elts);
- Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx));
- }
+ for (unsigned i = 0; i != NumResultElts; ++i)
+ Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
@@ -948,15 +1132,19 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
-void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
if (!Dst.isSimple()) {
if (Dst.isVectorElt()) {
// Read/modify/write the vector, inserting the new element.
- llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(),
- Dst.isVolatileQualified());
+ llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(),
+ Dst.isVolatileQualified());
+ Load->setAlignment(Dst.getAlignment().getQuantity());
+ llvm::Value *Vec = Load;
Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
Dst.getVectorIdx(), "vecins");
- Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified());
+ llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(),
+ Dst.isVolatileQualified());
+ Store->setAlignment(Dst.getAlignment().getQuantity());
return;
}
@@ -965,11 +1153,8 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
if (Dst.isExtVectorElt())
return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
- if (Dst.isBitField())
- return EmitStoreThroughBitfieldLValue(Src, Dst);
-
- assert(Dst.isPropertyRef() && "Unknown LValue type");
- return EmitStoreThroughPropertyRefLValue(Src, Dst);
+ assert(Dst.isBitField() && "Unknown LValue type");
+ return EmitStoreThroughBitfieldLValue(Src, Dst);
}
// There's special magic for assigning into an ARC-qualified l-value.
@@ -1031,7 +1216,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
}
assert(Src.isScalar() && "Can't emit an agg store with this method");
- EmitStoreOfScalar(Src.getScalarVal(), Dst);
+ EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
}
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
@@ -1045,7 +1230,7 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
// Get the source value, truncated to the width of the bit-field.
llvm::Value *SrcVal = Src.getScalarVal();
- if (Dst.getType()->isBooleanType())
+ if (hasBooleanRepresentation(Dst.getType()))
SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
@@ -1143,8 +1328,10 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
LValue Dst) {
// This access turns into a read/modify/write of the vector. Load the input
// value now.
- llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(),
- Dst.isVolatileQualified());
+ llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(),
+ Dst.isVolatileQualified());
+ Load->setAlignment(Dst.getAlignment().getQuantity());
+ llvm::Value *Vec = Load;
const llvm::Constant *Elts = Dst.getExtVectorElts();
llvm::Value *SrcVal = Src.getScalarVal();
@@ -1158,10 +1345,8 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// elements and restore the vector mask since it is on the side it will be
// stored.
SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
- for (unsigned i = 0; i != NumSrcElts; ++i) {
- unsigned InIdx = getAccessedFieldNo(i, Elts);
- Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i);
- }
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(SrcVal,
@@ -1173,11 +1358,9 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// FIXME: since we're shuffling with undef, can we just use the indices
// into that? This could be simpler.
SmallVector<llvm::Constant*, 4> ExtMask;
- unsigned i;
- for (i = 0; i != NumSrcElts; ++i)
- ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i));
- for (; i != NumDstElts; ++i)
- ExtMask.push_back(llvm::UndefValue::get(Int32Ty));
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ ExtMask.push_back(Builder.getInt32(i));
+ ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
llvm::Value *ExtSrcVal =
Builder.CreateShuffleVector(SrcVal,
@@ -1186,13 +1369,11 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
// build identity
SmallVector<llvm::Constant*, 4> Mask;
for (unsigned i = 0; i != NumDstElts; ++i)
- Mask.push_back(llvm::ConstantInt::get(Int32Ty, i));
+ Mask.push_back(Builder.getInt32(i));
// modify when what gets shuffled in
- for (unsigned i = 0; i != NumSrcElts; ++i) {
- unsigned Idx = getAccessedFieldNo(i, Elts);
- Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts);
- }
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
} else {
@@ -1206,7 +1387,9 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
}
- Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified());
+ llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(),
+ Dst.isVolatileQualified());
+ Store->setAlignment(Dst.getAlignment().getQuantity());
}
// setObjCGCLValueClass - sets class of he lvalue for the purpose of
@@ -1215,7 +1398,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
LValue &LV,
bool IsMemberAccess=false) {
- if (Ctx.getLangOptions().getGC() == LangOptions::NonGC)
+ if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
return;
if (isa<ObjCIvarRefExpr>(E)) {
@@ -1323,14 +1506,19 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
"Var decl must have external storage or be a file var decl!");
llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
- if (VD->getType()->isReferenceType())
- V = CGF.Builder.CreateLoad(V);
-
- V = EmitBitCastOfLValueToProperType(CGF, V,
- CGF.getTypes().ConvertTypeForMem(E->getType()));
-
- unsigned Alignment = CGF.getContext().getDeclAlign(VD).getQuantity();
- LValue LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
+ llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
+ V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
+ CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
+ QualType T = E->getType();
+ LValue LV;
+ if (VD->getType()->isReferenceType()) {
+ llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
+ LI->setAlignment(Alignment.getQuantity());
+ V = LI;
+ LV = CGF.MakeNaturalAlignAddrLValue(V, T);
+ } else {
+ LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
+ }
setObjCGCLValueClass(CGF.getContext(), E, LV);
return LV;
}
@@ -1350,13 +1538,21 @@ static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
}
}
- unsigned Alignment = CGF.getContext().getDeclAlign(FD).getQuantity();
+ CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
return CGF.MakeAddrLValue(V, E->getType(), Alignment);
}
LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
const NamedDecl *ND = E->getDecl();
- unsigned Alignment = getContext().getDeclAlign(ND).getQuantity();
+ CharUnits Alignment = getContext().getDeclAlign(ND);
+ QualType T = E->getType();
+
+ // FIXME: We should be able to assert this for FunctionDecls as well!
+ // FIXME: We should be able to assert this for all DeclRefExprs, not just
+ // those with a valid source location.
+ assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
+ !E->getLocation().isValid()) &&
+ "Should not use decl without marking it used!");
if (ND->hasAttr<WeakRefAttr>()) {
const ValueDecl *VD = cast<ValueDecl>(ND);
@@ -1365,30 +1561,46 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
}
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
-
// Check if this is a global variable.
if (VD->hasExternalStorage() || VD->isFileVarDecl())
return EmitGlobalVarDeclLValue(*this, E, VD);
+ bool isBlockVariable = VD->hasAttr<BlocksAttr>();
+
bool NonGCable = VD->hasLocalStorage() &&
!VD->getType()->isReferenceType() &&
- !VD->hasAttr<BlocksAttr>();
+ !isBlockVariable;
llvm::Value *V = LocalDeclMap[VD];
if (!V && VD->isStaticLocal())
V = CGM.getStaticLocalDeclAddress(VD);
+
+ // Use special handling for lambdas.
+ if (!V) {
+ if (FieldDecl *FD = LambdaCaptureFields.lookup(VD))
+ return EmitLValueForField(CXXABIThisValue, FD, 0);
+
+ assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
+ CharUnits alignment = getContext().getDeclAlign(VD);
+ return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
+ E->getType(), alignment);
+ }
+
assert(V && "DeclRefExpr not entered in LocalDeclMap?");
- if (VD->hasAttr<BlocksAttr>())
+ if (isBlockVariable)
V = BuildBlockByrefAddress(V, VD);
-
- if (VD->getType()->isReferenceType())
- V = Builder.CreateLoad(V);
- V = EmitBitCastOfLValueToProperType(*this, V,
- getTypes().ConvertTypeForMem(E->getType()));
+ LValue LV;
+ if (VD->getType()->isReferenceType()) {
+ llvm::LoadInst *LI = Builder.CreateLoad(V);
+ LI->setAlignment(Alignment.getQuantity());
+ V = LI;
+ LV = MakeNaturalAlignAddrLValue(V, T);
+ } else {
+ LV = MakeAddrLValue(V, T, Alignment);
+ }
- LValue LV = MakeAddrLValue(V, E->getType(), Alignment);
if (NonGCable) {
LV.getQuals().removeObjCGCAttr();
LV.setNonGC(true);
@@ -1401,16 +1613,6 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
return EmitFunctionDeclLValue(*this, E, fn);
llvm_unreachable("Unhandled DeclRefExpr");
-
- // an invalid LValue, but the assert will
- // ensure that this point is never reached.
- return LValue();
-}
-
-LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) {
- unsigned Alignment =
- getContext().getDeclAlign(E->getDecl()).getQuantity();
- return MakeAddrLValue(GetAddrOfBlockDecl(E), E->getType(), Alignment);
}
LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
@@ -1425,15 +1627,15 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
QualType T = E->getSubExpr()->getType()->getPointeeType();
assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
- LValue LV = MakeAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
+ LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
// We should not generate __weak write barrier on indirect reference
// of a pointer to object; as in void foo (__weak id *param); *param = 0;
// But, we continue to generate __strong write barrier on indirect write
// into a pointer to object.
- if (getContext().getLangOptions().ObjC1 &&
- getContext().getLangOptions().getGC() != LangOptions::NonGC &&
+ if (getContext().getLangOpts().ObjC1 &&
+ getContext().getLangOpts().getGC() != LangOptions::NonGC &&
LV.isObjCWeak())
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
return LV;
@@ -1444,9 +1646,10 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
assert(LV.isSimple() && "real/imag on non-ordinary l-value");
llvm::Value *Addr = LV.getAddress();
- // real and imag are valid on scalars. This is a faster way of
- // testing that.
- if (!cast<llvm::PointerType>(Addr->getType())
+ // __real is valid on scalars. This is a faster way of testing that.
+ // __imag can only produce an rvalue on scalars.
+ if (E->getOpcode() == UO_Real &&
+ !cast<llvm::PointerType>(Addr->getType())
->getElementType()->isStructTy()) {
assert(E->getSubExpr()->getType()->isArithmeticType());
return LV;
@@ -1588,7 +1791,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
return LValue::MakeVectorElt(LHS.getAddress(), Idx,
- E->getBase()->getType());
+ E->getBase()->getType(), LHS.getAlignment());
}
// Extend or truncate the index type to 32 or 64-bits.
@@ -1617,7 +1820,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// We know that the pointer points to a type of the correct size, unless the
// size is a VLA or Objective-C interface.
llvm::Value *Address = 0;
- unsigned ArrayAlignment = 0;
+ CharUnits ArrayAlignment;
if (const VariableArrayType *vla =
getContext().getAsVariableArrayType(E->getType())) {
// The base must be a pointer, which is not an aggregate. Emit
@@ -1632,7 +1835,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// GEP indexes are signed, and scaling an index isn't permitted to
// signed-overflow, so we use the same semantics for our explicit
// multiply. We suppress this if overflow is not undefined behavior.
- if (getLangOptions().isSignedOverflowDefined()) {
+ if (getLangOpts().isSignedOverflowDefined()) {
Idx = Builder.CreateMul(Idx, numElements);
Address = Builder.CreateGEP(Address, Idx, "arrayidx");
} else {
@@ -1667,14 +1870,14 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// Propagate the alignment from the array itself to the result.
ArrayAlignment = ArrayLV.getAlignment();
- if (getContext().getLangOptions().isSignedOverflowDefined())
+ if (getContext().getLangOpts().isSignedOverflowDefined())
Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
else
Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
} else {
// The base must be a pointer, which is not an aggregate. Emit it.
llvm::Value *Base = EmitScalarExpr(E->getBase());
- if (getContext().getLangOptions().isSignedOverflowDefined())
+ if (getContext().getLangOpts().isSignedOverflowDefined())
Address = Builder.CreateGEP(Base, Idx, "arrayidx");
else
Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
@@ -1684,17 +1887,21 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
assert(!T.isNull() &&
"CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
+
// Limit the alignment to that of the result type.
- if (ArrayAlignment) {
- unsigned Align = getContext().getTypeAlignInChars(T).getQuantity();
+ LValue LV;
+ if (!ArrayAlignment.isZero()) {
+ CharUnits Align = getContext().getTypeAlignInChars(T);
ArrayAlignment = std::min(Align, ArrayAlignment);
+ LV = MakeAddrLValue(Address, T, ArrayAlignment);
+ } else {
+ LV = MakeNaturalAlignAddrLValue(Address, T);
}
- LValue LV = MakeAddrLValue(Address, T, ArrayAlignment);
LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
- if (getContext().getLangOptions().ObjC1 &&
- getContext().getLangOptions().getGC() != LangOptions::NonGC) {
+ if (getContext().getLangOpts().ObjC1 &&
+ getContext().getLangOpts().getGC() != LangOptions::NonGC) {
LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
setObjCGCLValueClass(getContext(), E, LV);
}
@@ -1702,13 +1909,11 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
}
static
-llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
+llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder,
SmallVector<unsigned, 4> &Elts) {
SmallVector<llvm::Constant*, 4> CElts;
-
- llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
for (unsigned i = 0, e = Elts.size(); i != e; ++i)
- CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i]));
+ CElts.push_back(Builder.getInt32(Elts[i]));
return llvm::ConstantVector::get(CElts);
}
@@ -1751,22 +1956,20 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
E->getEncodedElementAccess(Indices);
if (Base.isSimple()) {
- llvm::Constant *CV = GenerateConstantVector(getLLVMContext(), Indices);
- return LValue::MakeExtVectorElt(Base.getAddress(), CV, type);
+ llvm::Constant *CV = GenerateConstantVector(Builder, Indices);
+ return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
+ Base.getAlignment());
}
assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
llvm::Constant *BaseElts = Base.getExtVectorElts();
SmallVector<llvm::Constant *, 4> CElts;
- for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
- if (isa<llvm::ConstantAggregateZero>(BaseElts))
- CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0));
- else
- CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i])));
- }
+ for (unsigned i = 0, e = Indices.size(); i != e; ++i)
+ CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
llvm::Constant *CV = llvm::ConstantVector::get(CElts);
- return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type);
+ return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type,
+ Base.getAlignment());
}
LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
@@ -1847,6 +2050,7 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
const RecordDecl *rec = field->getParent();
QualType type = field->getType();
+ CharUnits alignment = getContext().getDeclAlign(field);
bool mayAlias = rec->hasAttr<MayAliasAttr>();
@@ -1863,6 +2067,7 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
if (cvr & Qualifiers::Volatile) load->setVolatile(true);
+ load->setAlignment(alignment.getQuantity());
if (CGM.shouldUseTBAA()) {
llvm::MDNode *tbaa;
@@ -1876,6 +2081,10 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
addr = load;
mayAlias = false;
type = refType->getPointeeType();
+ if (type->isIncompleteType())
+ alignment = CharUnits();
+ else
+ alignment = getContext().getTypeAlignInChars(type);
cvr = 0; // qualifiers don't recursively apply to referencee
}
}
@@ -1891,7 +2100,6 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
if (field->hasAttr<AnnotateAttr>())
addr = EmitFieldAnnotations(field, addr);
- unsigned alignment = getContext().getDeclAlign(field).getQuantity();
LValue LV = MakeAddrLValue(addr, type, alignment);
LV.getQuals().addCVRQualifiers(cvr);
@@ -1932,11 +2140,16 @@ CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue,
unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
V = Builder.CreateBitCast(V, llvmType->getPointerTo(AS));
- unsigned Alignment = getContext().getDeclAlign(Field).getQuantity();
+ CharUnits Alignment = getContext().getDeclAlign(Field);
return MakeAddrLValue(V, FieldType, Alignment);
}
LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
+ if (E->isFileScope()) {
+ llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
+ return MakeAddrLValue(GlobalPtr, E->getType());
+ }
+
llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
const Expr *InitExpr = E->getInitializer();
LValue Result = MakeAddrLValue(DeclPtr, E->getType());
@@ -1957,6 +2170,8 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
return EmitAggExprToLValue(expr);
}
+ OpaqueValueMapping binding(*this, expr);
+
const Expr *condExpr = expr->getCond();
bool CondExprBool;
if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
@@ -1967,8 +2182,6 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
return EmitLValue(live);
}
- OpaqueValueMapping binding(*this, expr);
-
llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
@@ -2020,21 +2233,11 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_Dependent:
llvm_unreachable("dependent cast kind in IR gen!");
-
- case CK_GetObjCProperty: {
- LValue LV = EmitLValue(E->getSubExpr());
- assert(LV.isPropertyRef());
- RValue RV = EmitLoadOfPropertyRefLValue(LV);
-
- // Property is an aggregate r-value.
- if (RV.isAggregate()) {
- return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
- }
-
- // Implicit property returns an l-value.
- assert(RV.isScalar());
- return MakeAddrLValue(RV.getScalarVal(), E->getSubExpr()->getType());
- }
+
+ // These two casts are currently treated as no-ops, although they could
+ // potentially be real operations depending on the target's ABI.
+ case CK_NonAtomicToAtomic:
+ case CK_AtomicToNonAtomic:
case CK_NoOp:
case CK_LValueToRValue:
@@ -2071,11 +2274,13 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_DerivedToBaseMemberPointer:
case CK_BaseToDerivedMemberPointer:
case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
case CK_AnyPointerToBlockPointerCast:
case CK_ARCProduceObject:
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
- case CK_ARCExtendBlockObject: {
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject: {
// These casts only produce lvalues when we're binding a reference to a
// temporary realized from a (converted) pure rvalue. Emit the expression
// as a value, copy it into a temporary, and return an lvalue referring to
@@ -2163,7 +2368,7 @@ LValue CodeGenFunction::EmitNullInitializationLValue(
}
LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
- assert(e->isGLValue() || e->getType()->isRecordType());
+ assert(OpaqueValueMappingData::shouldBindAsLValue(e));
return getOpaqueLValueMapping(e);
}
@@ -2206,7 +2411,7 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
if (const CXXPseudoDestructorExpr *PseudoDtor
= dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
QualType DestroyedType = PseudoDtor->getDestroyedType();
- if (getContext().getLangOptions().ObjCAutoRefCount &&
+ if (getContext().getLangOpts().ObjCAutoRefCount &&
DestroyedType->isObjCLifetimeType() &&
(DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
@@ -2342,7 +2547,14 @@ CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
Slot.setExternallyDestructed();
EmitAggExpr(E->getSubExpr(), Slot);
- EmitCXXTemporary(E->getTemporary(), Slot.getAddr());
+ EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr());
+ return MakeAddrLValue(Slot.getAddr(), E->getType());
+}
+
+LValue
+CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
+ AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
+ EmitLambdaExpr(E, Slot);
return MakeAddrLValue(Slot.getAddr(), E->getType());
}
@@ -2427,7 +2639,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
CallArgList Args;
EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
- const CGFunctionInfo &FnInfo = CGM.getTypes().getFunctionInfo(Args, FnType);
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().arrangeFunctionCall(Args, FnType);
// C99 6.5.2.2p6:
// If the expression that denotes the called function has a type
@@ -2446,11 +2659,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
// through an unprototyped function type works like a *non-variadic*
// call. The way we make this work is to cast to the exact type
// of the promoted arguments.
- if (isa<FunctionNoProtoType>(FnType) &&
- !getTargetHooks().isNoProtoCallVariadic(FnType->getCallConv())) {
- assert(cast<llvm::FunctionType>(Callee->getType()->getContainedType(0))
- ->isVarArg());
- llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo, false);
+ if (isa<FunctionNoProtoType>(FnType) && !FnInfo.isVariadic()) {
+ llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
CalleeTy = CalleeTy->getPointerTo();
Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
}
@@ -2481,7 +2691,17 @@ static void
EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
- if (E->isCmpXChg()) {
+ llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
+ llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled!");
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n: {
// Note that cmpxchg only supports specifying one ordering and
// doesn't support weak cmpxchg, at least at the moment.
llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
@@ -2498,7 +2718,9 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
return;
}
- if (E->getOp() == AtomicExpr::Load) {
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__atomic_load: {
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
Load->setAtomic(Order);
Load->setAlignment(Size);
@@ -2508,7 +2730,9 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
return;
}
- if (E->getOp() == AtomicExpr::Store) {
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n: {
assert(!Dest && "Store does not return a value");
llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
LoadVal1->setAlignment(Align);
@@ -2519,25 +2743,74 @@ EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
return;
}
- llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
- switch (E->getOp()) {
- case AtomicExpr::CmpXchgWeak:
- case AtomicExpr::CmpXchgStrong:
- case AtomicExpr::Store:
- case AtomicExpr::Load: assert(0 && "Already handled!");
- case AtomicExpr::Add: Op = llvm::AtomicRMWInst::Add; break;
- case AtomicExpr::Sub: Op = llvm::AtomicRMWInst::Sub; break;
- case AtomicExpr::And: Op = llvm::AtomicRMWInst::And; break;
- case AtomicExpr::Or: Op = llvm::AtomicRMWInst::Or; break;
- case AtomicExpr::Xor: Op = llvm::AtomicRMWInst::Xor; break;
- case AtomicExpr::Xchg: Op = llvm::AtomicRMWInst::Xchg; break;
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ Op = llvm::AtomicRMWInst::Xchg;
+ break;
+
+ case AtomicExpr::AO__atomic_add_fetch:
+ PostOp = llvm::Instruction::Add;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ Op = llvm::AtomicRMWInst::Add;
+ break;
+
+ case AtomicExpr::AO__atomic_sub_fetch:
+ PostOp = llvm::Instruction::Sub;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ Op = llvm::AtomicRMWInst::Sub;
+ break;
+
+ case AtomicExpr::AO__atomic_and_fetch:
+ PostOp = llvm::Instruction::And;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ Op = llvm::AtomicRMWInst::And;
+ break;
+
+ case AtomicExpr::AO__atomic_or_fetch:
+ PostOp = llvm::Instruction::Or;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ Op = llvm::AtomicRMWInst::Or;
+ break;
+
+ case AtomicExpr::AO__atomic_xor_fetch:
+ PostOp = llvm::Instruction::Xor;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ Op = llvm::AtomicRMWInst::Xor;
+ break;
+
+ case AtomicExpr::AO__atomic_nand_fetch:
+ PostOp = llvm::Instruction::And;
+ // Fall through.
+ case AtomicExpr::AO__atomic_fetch_nand:
+ Op = llvm::AtomicRMWInst::Nand;
+ break;
}
+
llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
LoadVal1->setAlignment(Align);
llvm::AtomicRMWInst *RMWI =
CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
RMWI->setVolatile(E->isVolatile());
- llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(RMWI, Dest);
+
+ // For __atomic_*_fetch operations, perform the operation again to
+ // determine the value which was written.
+ llvm::Value *Result = RMWI;
+ if (PostOp)
+ Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
+ if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
+ Result = CGF.Builder.CreateNot(Result);
+ llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
StoreDest->setAlignment(Align);
}
@@ -2562,7 +2835,9 @@ static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
- QualType MemTy = AtomicTy->getAs<AtomicType>()->getValueType();
+ QualType MemTy = AtomicTy;
+ if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
+ MemTy = AT->getValueType();
CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
uint64_t Size = sizeChars.getQuantity();
CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
@@ -2571,84 +2846,202 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
getContext().getTargetInfo().getMaxAtomicInlineWidth();
bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
+
+
llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
Ptr = EmitScalarExpr(E->getPtr());
+
+ if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
+ assert(!Dest && "Init does not return a value");
+ if (!hasAggregateLLVMType(E->getVal1()->getType())) {
+ QualType PointeeType
+ = E->getPtr()->getType()->getAs<PointerType>()->getPointeeType();
+ EmitScalarInit(EmitScalarExpr(E->getVal1()),
+ LValue::MakeAddr(Ptr, PointeeType, alignChars,
+ getContext()));
+ } else if (E->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(E->getVal1(), Ptr, E->isVolatile());
+ } else {
+ AggValueSlot Slot = AggValueSlot::forAddr(Ptr, alignChars,
+ AtomicTy.getQualifiers(),
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ EmitAggExpr(E->getVal1(), Slot);
+ }
+ return RValue::get(0);
+ }
+
Order = EmitScalarExpr(E->getOrder());
- if (E->isCmpXChg()) {
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled!");
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ break;
+
+ case AtomicExpr::AO__atomic_load:
+ Dest = EmitScalarExpr(E->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_store:
+ Val1 = EmitScalarExpr(E->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_exchange:
Val1 = EmitScalarExpr(E->getVal1());
- Val2 = EmitValToTemp(*this, E->getVal2());
+ Dest = EmitScalarExpr(E->getVal2());
+ break;
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ Val1 = EmitScalarExpr(E->getVal1());
+ if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
+ Val2 = EmitScalarExpr(E->getVal2());
+ else
+ Val2 = EmitValToTemp(*this, E->getVal2());
OrderFail = EmitScalarExpr(E->getOrderFail());
- (void)OrderFail; // OrderFail is unused at the moment
- } else if ((E->getOp() == AtomicExpr::Add || E->getOp() == AtomicExpr::Sub) &&
- MemTy->isPointerType()) {
- // For pointers, we're required to do a bit of math: adding 1 to an int*
- // is not the same as adding 1 to a uintptr_t.
- QualType Val1Ty = E->getVal1()->getType();
- llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
- CharUnits PointeeIncAmt =
- getContext().getTypeSizeInChars(MemTy->getPointeeType());
- Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
- Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
- EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
- } else if (E->getOp() != AtomicExpr::Load) {
+ // Evaluate and discard the 'weak' argument.
+ if (E->getNumSubExprs() == 6)
+ EmitScalarExpr(E->getWeak());
+ break;
+
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ if (MemTy->isPointerType()) {
+ // For pointer arithmetic, we're required to do a bit of math:
+ // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
+ // ... but only for the C11 builtins. The GNU builtins expect the
+ // user to multiply by sizeof(T).
+ QualType Val1Ty = E->getVal1()->getType();
+ llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
+ CharUnits PointeeIncAmt =
+ getContext().getTypeSizeInChars(MemTy->getPointeeType());
+ Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
+ Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
+ EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
+ break;
+ }
+ // Fall through.
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
Val1 = EmitValToTemp(*this, E->getVal1());
+ break;
}
- if (E->getOp() != AtomicExpr::Store && !Dest)
+ if (!E->getType()->isVoidType() && !Dest)
Dest = CreateMemTemp(E->getType(), ".atomicdst");
+ // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
if (UseLibcall) {
- // FIXME: Finalize what the libcalls are actually supposed to look like.
- // See also http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
- return EmitUnsupportedRValue(E, "atomic library call");
- }
-#if 0
- if (UseLibcall) {
- const char* LibCallName;
- switch (E->getOp()) {
- case AtomicExpr::CmpXchgWeak:
- LibCallName = "__atomic_compare_exchange_generic"; break;
- case AtomicExpr::CmpXchgStrong:
- LibCallName = "__atomic_compare_exchange_generic"; break;
- case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break;
- case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break;
- case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break;
- case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break;
- case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break;
- case AtomicExpr::Xchg: LibCallName = "__atomic_exchange_generic"; break;
- case AtomicExpr::Store: LibCallName = "__atomic_store_generic"; break;
- case AtomicExpr::Load: LibCallName = "__atomic_load_generic"; break;
- }
- llvm::SmallVector<QualType, 4> Params;
+
+ llvm::SmallVector<QualType, 5> Params;
CallArgList Args;
- QualType RetTy = getContext().VoidTy;
- if (E->getOp() != AtomicExpr::Store && !E->isCmpXChg())
- Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
- getContext().VoidPtrTy);
+ // Size is always the first parameter
+ Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
+ getContext().getSizeType());
+ // Atomic address is always the second parameter
Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
getContext().VoidPtrTy);
- if (E->getOp() != AtomicExpr::Load)
+
+ const char* LibCallName;
+ QualType RetTy = getContext().VoidTy;
+ switch (E->getOp()) {
+ // There is only one libcall for compare an exchange, because there is no
+ // optimisation benefit possible from a libcall version of a weak compare
+ // and exchange.
+ // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
+ // void *desired, int success, int failure)
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ LibCallName = "__atomic_compare_exchange";
+ RetTy = getContext().BoolTy;
Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
getContext().VoidPtrTy);
- if (E->isCmpXChg()) {
Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
getContext().VoidPtrTy);
- RetTy = getContext().IntTy;
+ Args.add(RValue::get(Order),
+ getContext().IntTy);
+ Order = OrderFail;
+ break;
+ // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
+ // int order)
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ LibCallName = "__atomic_exchange";
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
+ getContext().VoidPtrTy);
+ break;
+ // void __atomic_store(size_t size, void *mem, void *val, int order)
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n:
+ LibCallName = "__atomic_store";
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
+ getContext().VoidPtrTy);
+ break;
+ // void __atomic_load(size_t size, void *mem, void *return, int order)
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ LibCallName = "__atomic_load";
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
+ getContext().VoidPtrTy);
+ break;
+#if 0
+ // These are only defined for 1-16 byte integers. It is not clear what
+ // their semantics would be on anything else...
+ case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break;
+ case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break;
+ case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break;
+ case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break;
+ case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break;
+#endif
+ default: return EmitUnsupportedRValue(E, "atomic library call");
}
- Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
- getContext().getSizeType());
+ // order is always the last parameter
+ Args.add(RValue::get(Order),
+ getContext().IntTy);
+
const CGFunctionInfo &FuncInfo =
- CGM.getTypes().getFunctionInfo(RetTy, Args, FunctionType::ExtInfo());
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo, false);
+ CGM.getTypes().arrangeFunctionCall(RetTy, Args,
+ FunctionType::ExtInfo(), RequiredArgs::All);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
if (E->isCmpXChg())
return Res;
- if (E->getOp() == AtomicExpr::Store)
+ if (E->getType()->isVoidType())
return RValue::get(0);
return ConvertTempToRValue(*this, E->getType(), Dest);
}
-#endif
+
llvm::Type *IPtrTy =
llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
llvm::Value *OrigDest = Dest;
@@ -2684,24 +3077,31 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
default: // invalid order
// We should not ever get here normally, but it's hard to
// enforce that in general.
- break;
+ break;
}
- if (E->getOp() == AtomicExpr::Store)
+ if (E->getType()->isVoidType())
return RValue::get(0);
return ConvertTempToRValue(*this, E->getType(), OrigDest);
}
// Long case, when Order isn't obviously constant.
+ bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store_n;
+ bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load_n;
+
// Create all the relevant BB's
llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
*AcqRelBB = 0, *SeqCstBB = 0;
MonotonicBB = createBasicBlock("monotonic", CurFn);
- if (E->getOp() != AtomicExpr::Store)
+ if (!IsStore)
AcquireBB = createBasicBlock("acquire", CurFn);
- if (E->getOp() != AtomicExpr::Load)
+ if (!IsLoad)
ReleaseBB = createBasicBlock("release", CurFn);
- if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store)
+ if (!IsLoad && !IsStore)
AcqRelBB = createBasicBlock("acqrel", CurFn);
SeqCstBB = createBasicBlock("seqcst", CurFn);
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
@@ -2718,7 +3118,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Monotonic);
Builder.CreateBr(ContBB);
- if (E->getOp() != AtomicExpr::Store) {
+ if (!IsStore) {
Builder.SetInsertPoint(AcquireBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Acquire);
@@ -2726,14 +3126,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
SI->addCase(Builder.getInt32(1), AcquireBB);
SI->addCase(Builder.getInt32(2), AcquireBB);
}
- if (E->getOp() != AtomicExpr::Load) {
+ if (!IsLoad) {
Builder.SetInsertPoint(ReleaseBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::Release);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(3), ReleaseBB);
}
- if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) {
+ if (!IsLoad && !IsStore) {
Builder.SetInsertPoint(AcqRelBB);
EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
llvm::AcquireRelease);
@@ -2748,7 +3148,102 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// Cleanup and return
Builder.SetInsertPoint(ContBB);
- if (E->getOp() == AtomicExpr::Store)
+ if (E->getType()->isVoidType())
return RValue::get(0);
return ConvertTempToRValue(*this, E->getType(), OrigDest);
}
+
+void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
+ assert(Val->getType()->isFPOrFPVectorTy());
+ if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
+ return;
+
+ llvm::Value *ULPs = llvm::ConstantFP::get(Builder.getFloatTy(), Accuracy);
+ llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(), ULPs);
+
+ cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpaccuracy,
+ Node);
+}
+
+namespace {
+ struct LValueOrRValue {
+ LValue LV;
+ RValue RV;
+ };
+}
+
+static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
+ const PseudoObjectExpr *E,
+ bool forLValue,
+ AggValueSlot slot) {
+ llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
+
+ // Find the result expression, if any.
+ const Expr *resultExpr = E->getResultExpr();
+ LValueOrRValue result;
+
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
+ const Expr *semantic = *i;
+
+ // If this semantic expression is an opaque value, bind it
+ // to the result of its source expression.
+ if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
+
+ // If this is the result expression, we may need to evaluate
+ // directly into the slot.
+ typedef CodeGenFunction::OpaqueValueMappingData OVMA;
+ OVMA opaqueData;
+ if (ov == resultExpr && ov->isRValue() && !forLValue &&
+ CodeGenFunction::hasAggregateLLVMType(ov->getType()) &&
+ !ov->getType()->isAnyComplexType()) {
+ CGF.EmitAggExpr(ov->getSourceExpr(), slot);
+
+ LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType());
+ opaqueData = OVMA::bind(CGF, ov, LV);
+ result.RV = slot.asRValue();
+
+ // Otherwise, emit as normal.
+ } else {
+ opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
+
+ // If this is the result, also evaluate the result now.
+ if (ov == resultExpr) {
+ if (forLValue)
+ result.LV = CGF.EmitLValue(ov);
+ else
+ result.RV = CGF.EmitAnyExpr(ov, slot);
+ }
+ }
+
+ opaques.push_back(opaqueData);
+
+ // Otherwise, if the expression is the result, evaluate it
+ // and remember the result.
+ } else if (semantic == resultExpr) {
+ if (forLValue)
+ result.LV = CGF.EmitLValue(semantic);
+ else
+ result.RV = CGF.EmitAnyExpr(semantic, slot);
+
+ // Otherwise, evaluate the expression in an ignored context.
+ } else {
+ CGF.EmitIgnoredExpr(semantic);
+ }
+ }
+
+ // Unbind all the opaques now.
+ for (unsigned i = 0, e = opaques.size(); i != e; ++i)
+ opaques[i].unbind(CGF);
+
+ return result;
+}
+
+RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
+ AggValueSlot slot) {
+ return emitPseudoObjectExpr(*this, E, false, slot).RV;
+}
+
+LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
+ return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
+}
OpenPOWER on IntegriCloud