summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp')
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp85
1 files changed, 50 insertions, 35 deletions
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
index 272baac..d86534d 100644
--- a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
@@ -698,6 +698,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
std::pair<llvm::Value*, unsigned> Dest =
EmitPointerWithAlignment(E->getArg(0));
Value *SizeVal = EmitScalarExpr(E->getArg(1));
+ EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc(), FD, 0);
Builder.CreateMemSet(Dest.first, Builder.getInt8(0), SizeVal,
Dest.second, false);
return RValue::get(Dest.first);
@@ -710,6 +712,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
unsigned Align = std::min(Dest.second, Src.second);
+ EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc(), FD, 0);
+ EmitNonNullArgCheck(RValue::get(Src.first), E->getArg(1)->getType(),
+ E->getArg(1)->getExprLoc(), FD, 1);
Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
return RValue::get(Dest.first);
}
@@ -767,6 +773,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
unsigned Align = std::min(Dest.second, Src.second);
+ EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc(), FD, 0);
+ EmitNonNullArgCheck(RValue::get(Src.first), E->getArg(1)->getType(),
+ E->getArg(1)->getExprLoc(), FD, 1);
Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
return RValue::get(Dest.first);
}
@@ -777,6 +787,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ EmitNonNullArgCheck(RValue::get(Dest.first), E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc(), FD, 0);
Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
return RValue::get(Dest.first);
}
@@ -3479,6 +3491,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
}
+ // Find out if any arguments are required to be integer constant
+ // expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
SmallVector<Value*, 4> Ops;
llvm::Value *Align = nullptr;
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
@@ -3541,7 +3560,17 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
continue;
}
}
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ if ((ICEArguments & (1 << i)) == 0) {
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ } else {
+ // If this is required to be a constant, constant fold it so that we know
+ // that the generated intrinsic gets a ConstantInt.
+ llvm::APSInt Result;
+ bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
+ assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
+ Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
+ }
}
switch (BuiltinID) {
@@ -4001,38 +4030,6 @@ Value *CodeGenFunction::vectorWrapScalar8(Value *Op) {
return Op;
}
-Value *CodeGenFunction::
-emitVectorWrappedScalar8Intrinsic(unsigned Int, SmallVectorImpl<Value*> &Ops,
- const char *Name) {
- // i8 is not a legal types for AArch64, so we can't just use
- // a normal overloaded intrinsic call for these scalar types. Instead
- // we'll build 64-bit vectors w/ lane zero being our input values and
- // perform the operation on that. The back end can pattern match directly
- // to the scalar instruction.
- Ops[0] = vectorWrapScalar8(Ops[0]);
- Ops[1] = vectorWrapScalar8(Ops[1]);
- llvm::Type *VTy = llvm::VectorType::get(Int8Ty, 8);
- Value *V = EmitNeonCall(CGM.getIntrinsic(Int, VTy), Ops, Name);
- Constant *CI = ConstantInt::get(SizeTy, 0);
- return Builder.CreateExtractElement(V, CI, "lane0");
-}
-
-Value *CodeGenFunction::
-emitVectorWrappedScalar16Intrinsic(unsigned Int, SmallVectorImpl<Value*> &Ops,
- const char *Name) {
- // i16 is not a legal types for AArch64, so we can't just use
- // a normal overloaded intrinsic call for these scalar types. Instead
- // we'll build 64-bit vectors w/ lane zero being our input values and
- // perform the operation on that. The back end can pattern match directly
- // to the scalar instruction.
- Ops[0] = vectorWrapScalar16(Ops[0]);
- Ops[1] = vectorWrapScalar16(Ops[1]);
- llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
- Value *V = EmitNeonCall(CGM.getIntrinsic(Int, VTy), Ops, Name);
- Constant *CI = ConstantInt::get(SizeTy, 0);
- return Builder.CreateExtractElement(V, CI, "lane0");
-}
-
Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
unsigned HintID = static_cast<unsigned>(-1);
@@ -4242,9 +4239,27 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Arg0, Arg1});
}
+ // Find out if any arguments are required to be integer constant
+ // expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
llvm::SmallVector<Value*, 4> Ops;
- for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
+ if ((ICEArguments & (1 << i)) == 0) {
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ } else {
+ // If this is required to be a constant, constant fold it so that we know
+ // that the generated intrinsic gets a ConstantInt.
+ llvm::APSInt Result;
+ bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
+ assert(IsConst && "Constant arg isn't actually constant?");
+ (void)IsConst;
+ Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
+ }
+ }
auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
OpenPOWER on IntegriCloud