aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/CGBuiltin.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/CGBuiltin.cpp')
-rw-r--r--lib/CodeGen/CGBuiltin.cpp495
1 files changed, 282 insertions, 213 deletions
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 1566bd9e6697..ec0ca424220e 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -27,24 +27,34 @@ using namespace clang;
using namespace CodeGen;
using namespace llvm;
-static void EmitMemoryBarrier(CodeGenFunction &CGF,
- bool LoadLoad, bool LoadStore,
- bool StoreLoad, bool StoreStore,
- bool Device) {
- Value *True = CGF.Builder.getTrue();
- Value *False = CGF.Builder.getFalse();
- Value *C[5] = { LoadLoad ? True : False,
- LoadStore ? True : False,
- StoreLoad ? True : False,
- StoreStore ? True : False,
- Device ? True : False };
- CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::memory_barrier), C);
+/// getBuiltinLibFunction - Given a builtin id for a function like
+/// "__builtin_fabsf", return a Function* for "fabsf".
+llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
+ unsigned BuiltinID) {
+ assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
+
+ // Get the name, skip over the __builtin_ prefix (if necessary).
+ StringRef Name;
+ GlobalDecl D(FD);
+
+ // If the builtin has been declared explicitly with an assembler label,
+ // use the mangled name. This differs from the plain label on platforms
+ // that prefix labels.
+ if (FD->hasAttr<AsmLabelAttr>())
+ Name = getMangledName(D);
+ else
+ Name = Context.BuiltinInfo.GetName(BuiltinID) + 10;
+
+ llvm::FunctionType *Ty =
+ cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
+
+ return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
}
/// Emit the conversions required to turn the given value into an
/// integer of the given size.
static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
- QualType T, const llvm::IntegerType *IntType) {
+ QualType T, llvm::IntegerType *IntType) {
V = CGF.EmitToMemory(V, T);
if (V->getType()->isPointerTy())
@@ -55,7 +65,7 @@ static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
}
static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
- QualType T, const llvm::Type *ResultType) {
+ QualType T, llvm::Type *ResultType) {
V = CGF.EmitFromMemory(V, T);
if (ResultType->isPointerTy())
@@ -65,25 +75,11 @@ static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
return V;
}
-// The atomic builtins are also full memory barriers. This is a utility for
-// wrapping a call to the builtins with memory barriers.
-static Value *EmitCallWithBarrier(CodeGenFunction &CGF, Value *Fn,
- ArrayRef<Value *> Args) {
- // FIXME: We need a target hook for whether this applies to device memory or
- // not.
- bool Device = true;
-
- // Create barriers both before and after the call.
- EmitMemoryBarrier(CGF, true, true, true, true, Device);
- Value *Result = CGF.Builder.CreateCall(Fn, Args);
- EmitMemoryBarrier(CGF, true, true, true, true, Device);
- return Result;
-}
-
/// Utility to insert an atomic instruction based on Instrinsic::ID
/// and the expression node.
static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
- Intrinsic::ID Id, const CallExpr *E) {
+ llvm::AtomicRMWInst::BinOp Kind,
+ const CallExpr *E) {
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(T,
@@ -99,16 +95,15 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
CGF.getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
- llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes);
-
llvm::Value *Args[2];
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
- const llvm::Type *ValueType = Args[1]->getType();
+ llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
- llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args);
+ llvm::Value *Result =
+ CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
+ llvm::SequentiallyConsistent);
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
}
@@ -117,7 +112,8 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
/// the expression node, where the return value is the result of the
/// operation.
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
- Intrinsic::ID Id, const CallExpr *E,
+ llvm::AtomicRMWInst::BinOp Kind,
+ const CallExpr *E,
Instruction::BinaryOps Op) {
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
@@ -134,16 +130,15 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
CGF.getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
- llvm::Value *AtomF = CGF.CGM.getIntrinsic(Id, IntrinsicTypes);
-
llvm::Value *Args[2];
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
- const llvm::Type *ValueType = Args[1]->getType();
+ llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
- llvm::Value *Result = EmitCallWithBarrier(CGF, AtomF, Args);
+ llvm::Value *Result =
+ CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
+ llvm::SequentiallyConsistent);
Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
@@ -157,21 +152,26 @@ static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
StringRef FnName;
switch (ValTyP->getKind()) {
- default: assert(0 && "Isn't a scalar fp type!");
+ default: llvm_unreachable("Isn't a scalar fp type!");
case BuiltinType::Float: FnName = "fabsf"; break;
case BuiltinType::Double: FnName = "fabs"; break;
case BuiltinType::LongDouble: FnName = "fabsl"; break;
}
// The prototype is something that takes and returns whatever V's type is.
- llvm::Type *ArgTys[] = { V->getType() };
- llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), ArgTys,
+ llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(),
false);
llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName);
return CGF.Builder.CreateCall(Fn, V, "abs");
}
+static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
+ const CallExpr *E, llvm::Value *calleeValue) {
+ return CGF.EmitCall(E->getCallee()->getType(), calleeValue,
+ ReturnValueSlot(), E->arg_begin(), E->arg_end(), Fn);
+}
+
RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E) {
// See if we can constant fold this builtin. If so, don't emit it at all.
@@ -195,7 +195,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_va_start:
case Builtin::BI__builtin_va_end: {
Value *ArgValue = EmitVAListRef(E->getArg(0));
- const llvm::Type *DestType = Int8PtrTy;
+ llvm::Type *DestType = Int8PtrTy;
if (ArgValue->getType() != DestType)
ArgValue = Builder.CreateBitCast(ArgValue, DestType,
ArgValue->getName().data());
@@ -208,7 +208,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *DstPtr = EmitVAListRef(E->getArg(0));
Value *SrcPtr = EmitVAListRef(E->getArg(1));
- const llvm::Type *Type = Int8PtrTy;
+ llvm::Type *Type = Int8PtrTy;
DstPtr = Builder.CreateBitCast(DstPtr, Type);
SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
@@ -236,8 +236,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
- const llvm::Type *ResultType = ConvertType(E->getType());
- Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
@@ -251,8 +251,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
- const llvm::Type *ResultType = ConvertType(E->getType());
- Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
@@ -267,9 +267,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
- const llvm::Type *ResultType = ConvertType(E->getType());
- Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
- llvm::ConstantInt::get(ArgType, 1), "tmp");
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue),
+ llvm::ConstantInt::get(ArgType, 1));
Value *Zero = llvm::Constant::getNullValue(ArgType);
Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
@@ -287,10 +287,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
- const llvm::Type *ResultType = ConvertType(E->getType());
- Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
- Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1),
- "tmp");
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateCall(F, ArgValue);
+ Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
@@ -304,8 +303,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
- const llvm::Type *ResultType = ConvertType(E->getType());
- Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
@@ -327,7 +326,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType);
- return RValue::get(Builder.CreateCall(F, ArgValue, "tmp"));
+ return RValue::get(Builder.CreateCall(F, ArgValue));
}
case Builtin::BI__builtin_object_size: {
// We pass this builtin onto the optimizer so that it can
@@ -381,7 +380,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Exponent = EmitScalarExpr(E->getArg(1));
llvm::Type *ArgType = Base->getType();
Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
- return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
+ return RValue::get(Builder.CreateCall2(F, Base, Exponent));
}
case Builtin::BI__builtin_isgreater:
@@ -396,7 +395,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *RHS = EmitScalarExpr(E->getArg(1));
switch (BuiltinID) {
- default: assert(0 && "Unknown ordered comparison");
+ default: llvm_unreachable("Unknown ordered comparison");
case Builtin::BI__builtin_isgreater:
LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
break;
@@ -417,13 +416,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
break;
}
// ZExt bool to int type.
- return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()),
- "tmp"));
+ return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
}
case Builtin::BI__builtin_isnan: {
Value *V = EmitScalarExpr(E->getArg(0));
V = Builder.CreateFCmpUNO(V, V, "cmp");
- return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp"));
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
case Builtin::BI__builtin_isinf: {
@@ -432,7 +430,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
V = EmitFAbs(*this, V, E->getArg(0)->getType());
V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
- return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp"));
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
// TODO: BI__builtin_isinf_sign
@@ -457,7 +455,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BI__builtin_isfinite: {
- // isfinite(x) --> x == x && fabs(x) != infinity; }
+ // isfinite(x) --> x == x && fabs(x) != infinity;
Value *V = EmitScalarExpr(E->getArg(0));
Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
@@ -471,7 +469,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_fpclassify: {
Value *V = EmitScalarExpr(E->getArg(5));
- const llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
+ llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
// Create Result
BasicBlock *Begin = Builder.GetInsertBlock();
@@ -530,7 +528,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BIalloca:
case Builtin::BI__builtin_alloca: {
Value *Size = EmitScalarExpr(E->getArg(0));
- return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size, "tmp"));
+ return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size));
}
case Builtin::BIbzero:
case Builtin::BI__builtin_bzero: {
@@ -550,11 +548,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin___memcpy_chk: {
// fold __builtin_memcpy_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
- if (!E->getArg(2)->isEvaluatable(CGM.getContext()) ||
- !E->getArg(3)->isEvaluatable(CGM.getContext()))
+ llvm::APSInt Size, DstSize;
+ if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
break;
- llvm::APSInt Size = E->getArg(2)->EvaluateAsInt(CGM.getContext());
- llvm::APSInt DstSize = E->getArg(3)->EvaluateAsInt(CGM.getContext());
if (Size.ugt(DstSize))
break;
Value *Dest = EmitScalarExpr(E->getArg(0));
@@ -575,11 +572,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin___memmove_chk: {
// fold __builtin_memmove_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
- if (!E->getArg(2)->isEvaluatable(CGM.getContext()) ||
- !E->getArg(3)->isEvaluatable(CGM.getContext()))
+ llvm::APSInt Size, DstSize;
+ if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
break;
- llvm::APSInt Size = E->getArg(2)->EvaluateAsInt(CGM.getContext());
- llvm::APSInt DstSize = E->getArg(3)->EvaluateAsInt(CGM.getContext());
if (Size.ugt(DstSize))
break;
Value *Dest = EmitScalarExpr(E->getArg(0));
@@ -608,11 +604,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BI__builtin___memset_chk: {
// fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
- if (!E->getArg(2)->isEvaluatable(CGM.getContext()) ||
- !E->getArg(3)->isEvaluatable(CGM.getContext()))
+ llvm::APSInt Size, DstSize;
+ if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
break;
- llvm::APSInt Size = E->getArg(2)->EvaluateAsInt(CGM.getContext());
- llvm::APSInt DstSize = E->getArg(3)->EvaluateAsInt(CGM.getContext());
if (Size.ugt(DstSize))
break;
Value *Address = EmitScalarExpr(E->getArg(0));
@@ -640,13 +635,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BI__builtin_return_address: {
Value *Depth = EmitScalarExpr(E->getArg(0));
- Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp");
+ Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI__builtin_frame_address: {
Value *Depth = EmitScalarExpr(E->getArg(0));
- Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp");
+ Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
@@ -661,7 +656,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Result);
}
case Builtin::BI__builtin_dwarf_sp_column: {
- const llvm::IntegerType *Ty
+ llvm::IntegerType *Ty
= cast<llvm::IntegerType>(ConvertType(E->getType()));
int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
if (Column == -1) {
@@ -680,7 +675,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Int = EmitScalarExpr(E->getArg(0));
Value *Ptr = EmitScalarExpr(E->getArg(1));
- const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
+ llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
"LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
@@ -775,82 +770,82 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_lock_test_and_set:
case Builtin::BI__sync_lock_release:
case Builtin::BI__sync_swap:
- assert(0 && "Shouldn't make it through sema");
+ llvm_unreachable("Shouldn't make it through sema");
case Builtin::BI__sync_fetch_and_add_1:
case Builtin::BI__sync_fetch_and_add_2:
case Builtin::BI__sync_fetch_and_add_4:
case Builtin::BI__sync_fetch_and_add_8:
case Builtin::BI__sync_fetch_and_add_16:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
case Builtin::BI__sync_fetch_and_sub_1:
case Builtin::BI__sync_fetch_and_sub_2:
case Builtin::BI__sync_fetch_and_sub_4:
case Builtin::BI__sync_fetch_and_sub_8:
case Builtin::BI__sync_fetch_and_sub_16:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
case Builtin::BI__sync_fetch_and_or_1:
case Builtin::BI__sync_fetch_and_or_2:
case Builtin::BI__sync_fetch_and_or_4:
case Builtin::BI__sync_fetch_and_or_8:
case Builtin::BI__sync_fetch_and_or_16:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
case Builtin::BI__sync_fetch_and_and_1:
case Builtin::BI__sync_fetch_and_and_2:
case Builtin::BI__sync_fetch_and_and_4:
case Builtin::BI__sync_fetch_and_and_8:
case Builtin::BI__sync_fetch_and_and_16:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
case Builtin::BI__sync_fetch_and_xor_1:
case Builtin::BI__sync_fetch_and_xor_2:
case Builtin::BI__sync_fetch_and_xor_4:
case Builtin::BI__sync_fetch_and_xor_8:
case Builtin::BI__sync_fetch_and_xor_16:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
// Clang extensions: not overloaded yet.
case Builtin::BI__sync_fetch_and_min:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
case Builtin::BI__sync_fetch_and_max:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
case Builtin::BI__sync_fetch_and_umin:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
case Builtin::BI__sync_fetch_and_umax:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
case Builtin::BI__sync_add_and_fetch_1:
case Builtin::BI__sync_add_and_fetch_2:
case Builtin::BI__sync_add_and_fetch_4:
case Builtin::BI__sync_add_and_fetch_8:
case Builtin::BI__sync_add_and_fetch_16:
- return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
llvm::Instruction::Add);
case Builtin::BI__sync_sub_and_fetch_1:
case Builtin::BI__sync_sub_and_fetch_2:
case Builtin::BI__sync_sub_and_fetch_4:
case Builtin::BI__sync_sub_and_fetch_8:
case Builtin::BI__sync_sub_and_fetch_16:
- return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E,
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
llvm::Instruction::Sub);
case Builtin::BI__sync_and_and_fetch_1:
case Builtin::BI__sync_and_and_fetch_2:
case Builtin::BI__sync_and_and_fetch_4:
case Builtin::BI__sync_and_and_fetch_8:
case Builtin::BI__sync_and_and_fetch_16:
- return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E,
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
llvm::Instruction::And);
case Builtin::BI__sync_or_and_fetch_1:
case Builtin::BI__sync_or_and_fetch_2:
case Builtin::BI__sync_or_and_fetch_4:
case Builtin::BI__sync_or_and_fetch_8:
case Builtin::BI__sync_or_and_fetch_16:
- return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E,
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
llvm::Instruction::Or);
case Builtin::BI__sync_xor_and_fetch_1:
case Builtin::BI__sync_xor_and_fetch_2:
case Builtin::BI__sync_xor_and_fetch_4:
case Builtin::BI__sync_xor_and_fetch_8:
case Builtin::BI__sync_xor_and_fetch_16:
- return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E,
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
llvm::Instruction::Xor);
case Builtin::BI__sync_val_compare_and_swap_1:
@@ -867,18 +862,16 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
- Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap,
- IntrinsicTypes);
Value *Args[3];
Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
Args[1] = EmitScalarExpr(E->getArg(1));
- const llvm::Type *ValueType = Args[1]->getType();
+ llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(*this, Args[1], T, IntType);
Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
- Value *Result = EmitCallWithBarrier(*this, AtomF, Args);
+ Value *Result = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
+ llvm::SequentiallyConsistent);
Result = EmitFromInt(*this, Result, T, ValueType);
return RValue::get(Result);
}
@@ -897,9 +890,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- llvm::Type *IntrinsicTypes[2] = { IntType, IntPtrType };
- Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap,
- IntrinsicTypes);
Value *Args[3];
Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
@@ -907,7 +897,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
Value *OldVal = Args[1];
- Value *PrevVal = EmitCallWithBarrier(*this, AtomF, Args);
+ Value *PrevVal = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
+ llvm::SequentiallyConsistent);
Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
// zext bool to int.
Result = Builder.CreateZExt(Result, ConvertType(E->getType()));
@@ -919,14 +910,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_swap_4:
case Builtin::BI__sync_swap_8:
case Builtin::BI__sync_swap_16:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
case Builtin::BI__sync_lock_test_and_set_1:
case Builtin::BI__sync_lock_test_and_set_2:
case Builtin::BI__sync_lock_test_and_set_4:
case Builtin::BI__sync_lock_test_and_set_8:
case Builtin::BI__sync_lock_test_and_set_16:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
case Builtin::BI__sync_lock_release_1:
case Builtin::BI__sync_lock_release_2:
@@ -934,32 +925,95 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_lock_release_8:
case Builtin::BI__sync_lock_release_16: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ElTy =
+ llvm::Type *ElLLVMTy =
cast<llvm::PointerType>(Ptr->getType())->getElementType();
llvm::StoreInst *Store =
- Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr);
- Store->setVolatile(true);
+ Builder.CreateStore(llvm::Constant::getNullValue(ElLLVMTy), Ptr);
+ QualType ElTy = E->getArg(0)->getType()->getPointeeType();
+ CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
+ Store->setAlignment(StoreSize.getQuantity());
+ Store->setAtomic(llvm::Release);
return RValue::get(0);
}
case Builtin::BI__sync_synchronize: {
- // We assume like gcc appears to, that this only applies to cached memory.
- EmitMemoryBarrier(*this, true, true, true, true, false);
+ // We assume this is supposed to correspond to a C++0x-style
+ // sequentially-consistent fence (i.e. this is only usable for
+ // synchonization, not device I/O or anything like that). This intrinsic
+ // is really badly designed in the sense that in theory, there isn't
+ // any way to safely use it... but in practice, it mostly works
+ // to use it with non-atomic loads and stores to get acquire/release
+ // semantics.
+ Builder.CreateFence(llvm::SequentiallyConsistent);
return RValue::get(0);
}
- case Builtin::BI__builtin_llvm_memory_barrier: {
- Value *C[5] = {
- EmitScalarExpr(E->getArg(0)),
- EmitScalarExpr(E->getArg(1)),
- EmitScalarExpr(E->getArg(2)),
- EmitScalarExpr(E->getArg(3)),
- EmitScalarExpr(E->getArg(4))
- };
- Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C);
+ case Builtin::BI__atomic_thread_fence:
+ case Builtin::BI__atomic_signal_fence: {
+ llvm::SynchronizationScope Scope;
+ if (BuiltinID == Builtin::BI__atomic_signal_fence)
+ Scope = llvm::SingleThread;
+ else
+ Scope = llvm::CrossThread;
+ Value *Order = EmitScalarExpr(E->getArg(0));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ break;
+ case 1: // memory_order_consume
+ case 2: // memory_order_acquire
+ Builder.CreateFence(llvm::Acquire, Scope);
+ break;
+ case 3: // memory_order_release
+ Builder.CreateFence(llvm::Release, Scope);
+ break;
+ case 4: // memory_order_acq_rel
+ Builder.CreateFence(llvm::AcquireRelease, Scope);
+ break;
+ case 5: // memory_order_seq_cst
+ Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
+ break;
+ }
+ return RValue::get(0);
+ }
+
+ llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
+ AcquireBB = createBasicBlock("acquire", CurFn);
+ ReleaseBB = createBasicBlock("release", CurFn);
+ AcqRelBB = createBasicBlock("acqrel", CurFn);
+ SeqCstBB = createBasicBlock("seqcst", CurFn);
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
+
+ Builder.SetInsertPoint(AcquireBB);
+ Builder.CreateFence(llvm::Acquire, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(1), AcquireBB);
+ SI->addCase(Builder.getInt32(2), AcquireBB);
+
+ Builder.SetInsertPoint(ReleaseBB);
+ Builder.CreateFence(llvm::Release, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(3), ReleaseBB);
+
+ Builder.SetInsertPoint(AcqRelBB);
+ Builder.CreateFence(llvm::AcquireRelease, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(4), AcqRelBB);
+
+ Builder.SetInsertPoint(SeqCstBB);
+ Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(5), SeqCstBB);
+
+ Builder.SetInsertPoint(ContBB);
return RValue::get(0);
}
-
+
// Library functions with special handling.
case Builtin::BIsqrt:
case Builtin::BIsqrtf:
@@ -982,7 +1036,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Exponent = EmitScalarExpr(E->getArg(1));
llvm::Type *ArgType = Base->getType();
Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
- return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
+ return RValue::get(Builder.CreateCall2(F, Base, Exponent));
}
case Builtin::BIfma:
@@ -997,8 +1051,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType);
return RValue::get(Builder.CreateCall3(F, FirstArg,
EmitScalarExpr(E->getArg(1)),
- EmitScalarExpr(E->getArg(2)),
- "tmp"));
+ EmitScalarExpr(E->getArg(2))));
}
case Builtin::BI__builtin_signbit:
@@ -1007,25 +1060,40 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
LLVMContext &C = CGM.getLLVMContext();
Value *Arg = EmitScalarExpr(E->getArg(0));
- const llvm::Type *ArgTy = Arg->getType();
+ llvm::Type *ArgTy = Arg->getType();
if (ArgTy->isPPC_FP128Ty())
break; // FIXME: I'm not sure what the right implementation is here.
int ArgWidth = ArgTy->getPrimitiveSizeInBits();
- const llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
+ llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy);
Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy);
Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp);
return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
}
+ case Builtin::BI__builtin_annotation: {
+ llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
+ AnnVal->getType());
+
+ // Get the annotation string, go through casts. Sema requires this to be a
+ // non-wide string literal, potentially casted, so the cast<> is safe.
+ const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
+ llvm::StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
+ return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
+ }
}
- // If this is an alias for a libm function (e.g. __builtin_sin) turn it into
- // that function.
- if (getContext().BuiltinInfo.isLibFunction(BuiltinID) ||
- getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
- return EmitCall(E->getCallee()->getType(),
- CGM.getBuiltinLibFunction(FD, BuiltinID),
- ReturnValueSlot(), E->arg_begin(), E->arg_end(), FD);
+ // If this is an alias for a lib function (e.g. __builtin_sin), emit
+ // the call using the normal call path, but using the unmangled
+ // version of the function name.
+ if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
+ return emitLibraryCall(*this, FD, E,
+ CGM.getBuiltinLibFunction(FD, BuiltinID));
+
+ // If this is a predefined lib function (e.g. malloc), emit the call
+ // using exactly the normal call path.
+ if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
// See if we have a target specific intrinsic.
const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
@@ -1045,7 +1113,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
assert(Error == ASTContext::GE_None && "Should not codegen an error");
Function *F = CGM.getIntrinsic(IntrinsicID);
- const llvm::FunctionType *FTy = F->getFunctionType();
+ llvm::FunctionType *FTy = F->getFunctionType();
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
Value *ArgValue;
@@ -1064,7 +1132,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// If the intrinsic arg type is different from the builtin arg type
// we need to do a bit cast.
- const llvm::Type *PTy = FTy->getParamType(i);
+ llvm::Type *PTy = FTy->getParamType(i);
if (PTy != ArgValue->getType()) {
assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
"Must be able to losslessly bit cast to param");
@@ -1077,7 +1145,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *V = Builder.CreateCall(F, Args);
QualType BuiltinRetType = E->getType();
- const llvm::Type *RetTy = llvm::Type::getVoidTy(getLLVMContext());
+ llvm::Type *RetTy = llvm::Type::getVoidTy(getLLVMContext());
if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
if (RetTy != V->getType()) {
@@ -1154,12 +1222,12 @@ Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
return Builder.CreateCall(F, Ops, name);
}
-Value *CodeGenFunction::EmitNeonShiftVector(Value *V, const llvm::Type *Ty,
+Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
bool neg) {
ConstantInt *CI = cast<ConstantInt>(V);
int SV = CI->getSExtValue();
- const llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+ llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
SmallVector<llvm::Constant*, 16> CV(VTy->getNumElements(), C);
return llvm::ConstantVector::get(CV);
@@ -1193,12 +1261,12 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
const FunctionDecl *FD = E->getDirectCallee();
// Oddly people write this call without args on occasion and gcc accepts
// it - it's also marked as varargs in the description file.
- llvm::SmallVector<Value*, 2> Ops;
+ SmallVector<Value*, 2> Ops;
for (unsigned i = 0; i < E->getNumArgs(); i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
- const llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
- const llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
- llvm::StringRef Name = FD->getName();
+ llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
+ llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
+ StringRef Name = FD->getName();
return Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
@@ -1223,7 +1291,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL);
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
- Value *Tmp = Builder.CreateAlloca(Int64Ty, One, "tmp");
+ Value *Tmp = Builder.CreateAlloca(Int64Ty, One);
Value *Val = EmitScalarExpr(E->getArg(0));
Builder.CreateStore(Val, Tmp);
@@ -1236,10 +1304,41 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd");
}
- llvm::SmallVector<Value*, 4> Ops;
+ SmallVector<Value*, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ // vget_lane and vset_lane are not overloaded and do not have an extra
+ // argument that specifies the vector type.
+ switch (BuiltinID) {
+ default: break;
+ case ARM::BI__builtin_neon_vget_lane_i8:
+ case ARM::BI__builtin_neon_vget_lane_i16:
+ case ARM::BI__builtin_neon_vget_lane_i32:
+ case ARM::BI__builtin_neon_vget_lane_i64:
+ case ARM::BI__builtin_neon_vget_lane_f32:
+ case ARM::BI__builtin_neon_vgetq_lane_i8:
+ case ARM::BI__builtin_neon_vgetq_lane_i16:
+ case ARM::BI__builtin_neon_vgetq_lane_i32:
+ case ARM::BI__builtin_neon_vgetq_lane_i64:
+ case ARM::BI__builtin_neon_vgetq_lane_f32:
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vget_lane");
+ case ARM::BI__builtin_neon_vset_lane_i8:
+ case ARM::BI__builtin_neon_vset_lane_i16:
+ case ARM::BI__builtin_neon_vset_lane_i32:
+ case ARM::BI__builtin_neon_vset_lane_i64:
+ case ARM::BI__builtin_neon_vset_lane_f32:
+ case ARM::BI__builtin_neon_vsetq_lane_i8:
+ case ARM::BI__builtin_neon_vsetq_lane_i16:
+ case ARM::BI__builtin_neon_vsetq_lane_i32:
+ case ARM::BI__builtin_neon_vsetq_lane_i64:
+ case ARM::BI__builtin_neon_vsetq_lane_f32:
+ Ops.push_back(EmitScalarExpr(E->getArg(2)));
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
+ }
+
+ // Get the last argument, which specifies the vector type.
llvm::APSInt Result;
const Expr *Arg = E->getArg(E->getNumArgs()-1);
if (!Arg->isIntegerConstantExpr(Result, getContext()))
@@ -1382,18 +1481,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Value *SV = llvm::ConstantVector::get(Indices);
return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
}
- case ARM::BI__builtin_neon_vget_lane_i8:
- case ARM::BI__builtin_neon_vget_lane_i16:
- case ARM::BI__builtin_neon_vget_lane_i32:
- case ARM::BI__builtin_neon_vget_lane_i64:
- case ARM::BI__builtin_neon_vget_lane_f32:
- case ARM::BI__builtin_neon_vgetq_lane_i8:
- case ARM::BI__builtin_neon_vgetq_lane_i16:
- case ARM::BI__builtin_neon_vgetq_lane_i32:
- case ARM::BI__builtin_neon_vgetq_lane_i64:
- case ARM::BI__builtin_neon_vgetq_lane_f32:
- return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
- "vget_lane");
case ARM::BI__builtin_neon_vhadd_v:
case ARM::BI__builtin_neon_vhaddq_v:
Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds;
@@ -1457,9 +1544,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F,
- ArrayRef<Value *>(Ops.begin() + 1, Ops.end()),
- "vld2_lane");
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -1471,9 +1556,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F,
- ArrayRef<Value *>(Ops.begin() + 1, Ops.end()),
- "vld3_lane");
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -1486,9 +1569,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
Ops.push_back(GetPointeeAlignment(*this, E->getArg(1)));
- Ops[1] = Builder.CreateCall(F,
- ArrayRef<Value *>(Ops.begin() + 1, Ops.end()),
- "vld3_lane");
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -1508,7 +1589,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld4_dup_v:
Int = Intrinsic::arm_neon_vld2;
break;
- default: assert(0 && "unknown vld_dup intrinsic?");
+ default: llvm_unreachable("unknown vld_dup intrinsic?");
}
Function *F = CGM.getIntrinsic(Int, Ty);
Value *Align = GetPointeeAlignment(*this, E->getArg(1));
@@ -1527,10 +1608,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vld4_dup_v:
Int = Intrinsic::arm_neon_vld2lane;
break;
- default: assert(0 && "unknown vld_dup intrinsic?");
+ default: llvm_unreachable("unknown vld_dup intrinsic?");
}
Function *F = CGM.getIntrinsic(Int, Ty);
- const llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
+ llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
SmallVector<Value*, 6> Args;
Args.push_back(Ops[1]);
@@ -1562,14 +1643,14 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
case ARM::BI__builtin_neon_vmovl_v: {
- const llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
if (usgn)
return Builder.CreateZExt(Ops[0], Ty, "vmovl");
return Builder.CreateSExt(Ops[0], Ty, "vmovl");
}
case ARM::BI__builtin_neon_vmovn_v: {
- const llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
+ llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
}
@@ -1587,7 +1668,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
- const llvm::Type *EltTy =
+ llvm::Type *EltTy =
llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
llvm::Type *NarrowTy =
llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
@@ -1602,7 +1683,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls;
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
- const llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
+ llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
llvm::Type *NarrowTy =
llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
llvm::Type *Tys[2] = { Ty, NarrowTy };
@@ -1729,18 +1810,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vrsubhn_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, Ty),
Ops, "vrsubhn");
- case ARM::BI__builtin_neon_vset_lane_i8:
- case ARM::BI__builtin_neon_vset_lane_i16:
- case ARM::BI__builtin_neon_vset_lane_i32:
- case ARM::BI__builtin_neon_vset_lane_i64:
- case ARM::BI__builtin_neon_vset_lane_f32:
- case ARM::BI__builtin_neon_vsetq_lane_i8:
- case ARM::BI__builtin_neon_vsetq_lane_i16:
- case ARM::BI__builtin_neon_vsetq_lane_i32:
- case ARM::BI__builtin_neon_vsetq_lane_i64:
- case ARM::BI__builtin_neon_vsetq_lane_f32:
- Ops.push_back(EmitScalarExpr(E->getArg(2)));
- return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case ARM::BI__builtin_neon_vshl_v:
case ARM::BI__builtin_neon_vshlq_v:
Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
@@ -1921,7 +1990,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
llvm::Value *CodeGenFunction::
-BuildVector(const llvm::SmallVectorImpl<llvm::Value*> &Ops) {
+BuildVector(const SmallVectorImpl<llvm::Value*> &Ops) {
assert((Ops.size() & (Ops.size() - 1)) == 0 &&
"Not a power-of-two sized vector!");
bool AllConstants = true;
@@ -1949,7 +2018,7 @@ BuildVector(const llvm::SmallVectorImpl<llvm::Value*> &Ops) {
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
- llvm::SmallVector<Value*, 4> Ops;
+ SmallVector<Value*, 4> Ops;
// Find out if any arguments are required to be integer constant expressions.
unsigned ICEArguments = 0;
@@ -1983,7 +2052,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_psrlqi128:
case X86::BI__builtin_ia32_psrlwi128: {
Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext");
- const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
+ llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
Ops[1], Zero, "insert");
@@ -1992,7 +2061,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Intrinsic::ID ID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
- default: assert(0 && "Unsupported shift intrinsic!");
+ default: llvm_unreachable("Unsupported shift intrinsic!");
case X86::BI__builtin_ia32_pslldi128:
name = "pslldi";
ID = Intrinsic::x86_sse2_psll_d;
@@ -2046,13 +2115,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_psrlqi:
case X86::BI__builtin_ia32_psrlwi: {
Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext");
- const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 1);
+ llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 1);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
const char *name = 0;
Intrinsic::ID ID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
- default: assert(0 && "Unsupported shift intrinsic!");
+ default: llvm_unreachable("Unsupported shift intrinsic!");
case X86::BI__builtin_ia32_pslldi:
name = "pslldi";
ID = Intrinsic::x86_mmx_psll_d;
@@ -2098,19 +2167,19 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, Ops, "cmpss");
}
case X86::BI__builtin_ia32_ldmxcsr: {
- const llvm::Type *PtrTy = Int8PtrTy;
+ llvm::Type *PtrTy = Int8PtrTy;
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
- Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp");
+ Value *Tmp = Builder.CreateAlloca(Int32Ty, One);
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
Builder.CreateBitCast(Tmp, PtrTy));
}
case X86::BI__builtin_ia32_stmxcsr: {
- const llvm::Type *PtrTy = Int8PtrTy;
+ llvm::Type *PtrTy = Int8PtrTy;
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
- Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp");
- One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
- Builder.CreateBitCast(Tmp, PtrTy));
+ Value *Tmp = Builder.CreateAlloca(Int32Ty, One);
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
+ Builder.CreateBitCast(Tmp, PtrTy));
return Builder.CreateLoad(Tmp, "stmxcsr");
}
case X86::BI__builtin_ia32_cmppd: {
@@ -2144,7 +2213,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of input vectors less than 9 bytes,
// emit a shuffle instruction.
if (shiftVal <= 8) {
- llvm::SmallVector<llvm::Constant*, 8> Indices;
+ SmallVector<llvm::Constant*, 8> Indices;
for (unsigned i = 0; i != 8; ++i)
Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
@@ -2156,17 +2225,17 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// than 16 bytes, emit a logical right shift of the destination.
if (shiftVal < 16) {
// MMX has these as 1 x i64 vectors for some odd optimization reasons.
- const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
- return Builder.CreateCall(F, ArrayRef<Value *>(&Ops[0], 2), "palignr");
+ return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
}
- // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ // If palignr is shifting the pair of vectors more than 16 bytes, emit zero.
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
case X86::BI__builtin_ia32_palignr128: {
@@ -2175,7 +2244,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of input vectors less than 17 bytes,
// emit a shuffle instruction.
if (shiftVal <= 16) {
- llvm::SmallVector<llvm::Constant*, 16> Indices;
+ SmallVector<llvm::Constant*, 16> Indices;
for (unsigned i = 0; i != 16; ++i)
Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
@@ -2186,14 +2255,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// If palignr is shifting the pair of input vectors more than 16 but less
// than 32 bytes, emit a logical right shift of the destination.
if (shiftVal < 32) {
- const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
- return Builder.CreateCall(F, ArrayRef<Value *>(&Ops[0], 2), "palignr");
+ return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
}
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
@@ -2352,7 +2421,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
- llvm::SmallVector<Value*, 4> Ops;
+ SmallVector<Value*, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
@@ -2373,11 +2442,11 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
{
Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
- Ops[0] = Builder.CreateGEP(Ops[1], Ops[0], "tmp");
+ Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
Ops.pop_back();
switch (BuiltinID) {
- default: assert(0 && "Unsupported ld/lvsl/lvsr intrinsic!");
+ default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
case PPC::BI__builtin_altivec_lvx:
ID = Intrinsic::ppc_altivec_lvx;
break;
@@ -2412,11 +2481,11 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_stvewx:
{
Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
- Ops[1] = Builder.CreateGEP(Ops[2], Ops[1], "tmp");
+ Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
Ops.pop_back();
switch (BuiltinID) {
- default: assert(0 && "Unsupported st intrinsic!");
+ default: llvm_unreachable("Unsupported st intrinsic!");
case PPC::BI__builtin_altivec_stvx:
ID = Intrinsic::ppc_altivec_stvx;
break;