1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Builtin calls as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CGObjCRuntime.h" 16#include "CodeGenModule.h" 17#include "TargetInfo.h" 18#include "clang/AST/ASTContext.h" 19#include "clang/AST/Decl.h" 20#include "clang/Basic/TargetBuiltins.h" 21#include "clang/Basic/TargetInfo.h" 22#include "llvm/IR/DataLayout.h" 23#include "llvm/IR/Intrinsics.h" 24 25using namespace clang; 26using namespace CodeGen; 27using namespace llvm; 28 29/// getBuiltinLibFunction - Given a builtin id for a function like 30/// "__builtin_fabsf", return a Function* for "fabsf". 31llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD, 32 unsigned BuiltinID) { 33 assert(Context.BuiltinInfo.isLibFunction(BuiltinID)); 34 35 // Get the name, skip over the __builtin_ prefix (if necessary). 36 StringRef Name; 37 GlobalDecl D(FD); 38 39 // If the builtin has been declared explicitly with an assembler label, 40 // use the mangled name. This differs from the plain label on platforms 41 // that prefix labels. 42 if (FD->hasAttr<AsmLabelAttr>()) 43 Name = getMangledName(D); 44 else 45 Name = Context.BuiltinInfo.GetName(BuiltinID) + 10; 46 47 llvm::FunctionType *Ty = 48 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType())); 49 50 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false); 51} 52 53/// Emit the conversions required to turn the given value into an 54/// integer of the given size. 55static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, 56 QualType T, llvm::IntegerType *IntType) { 57 V = CGF.EmitToMemory(V, T); 58 59 if (V->getType()->isPointerTy()) 60 return CGF.Builder.CreatePtrToInt(V, IntType); 61 62 assert(V->getType() == IntType); 63 return V; 64} 65 66static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, 67 QualType T, llvm::Type *ResultType) { 68 V = CGF.EmitFromMemory(V, T); 69 70 if (ResultType->isPointerTy()) 71 return CGF.Builder.CreateIntToPtr(V, ResultType); 72 73 assert(V->getType() == ResultType); 74 return V; 75} 76 77/// Utility to insert an atomic instruction based on Instrinsic::ID 78/// and the expression node. 79static RValue EmitBinaryAtomic(CodeGenFunction &CGF, 80 llvm::AtomicRMWInst::BinOp Kind, 81 const CallExpr *E) { 82 QualType T = E->getType(); 83 assert(E->getArg(0)->getType()->isPointerType()); 84 assert(CGF.getContext().hasSameUnqualifiedType(T, 85 E->getArg(0)->getType()->getPointeeType())); 86 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); 87 88 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); 89 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); 90 91 llvm::IntegerType *IntType = 92 llvm::IntegerType::get(CGF.getLLVMContext(), 93 CGF.getContext().getTypeSize(T)); 94 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 95 96 llvm::Value *Args[2]; 97 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); 98 Args[1] = CGF.EmitScalarExpr(E->getArg(1)); 99 llvm::Type *ValueType = Args[1]->getType(); 100 Args[1] = EmitToInt(CGF, Args[1], T, IntType); 101 102 llvm::Value *Result = 103 CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1], 104 llvm::SequentiallyConsistent); 105 Result = EmitFromInt(CGF, Result, T, ValueType); 106 return RValue::get(Result); 107} 108 109/// Utility to insert an atomic instruction based Instrinsic::ID and 110/// the expression node, where the return value is the result of the 111/// operation. 112static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, 113 llvm::AtomicRMWInst::BinOp Kind, 114 const CallExpr *E, 115 Instruction::BinaryOps Op) { 116 QualType T = E->getType(); 117 assert(E->getArg(0)->getType()->isPointerType()); 118 assert(CGF.getContext().hasSameUnqualifiedType(T, 119 E->getArg(0)->getType()->getPointeeType())); 120 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); 121 122 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); 123 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); 124 125 llvm::IntegerType *IntType = 126 llvm::IntegerType::get(CGF.getLLVMContext(), 127 CGF.getContext().getTypeSize(T)); 128 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 129 130 llvm::Value *Args[2]; 131 Args[1] = CGF.EmitScalarExpr(E->getArg(1)); 132 llvm::Type *ValueType = Args[1]->getType(); 133 Args[1] = EmitToInt(CGF, Args[1], T, IntType); 134 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType); 135 136 llvm::Value *Result = 137 CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1], 138 llvm::SequentiallyConsistent); 139 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]); 140 Result = EmitFromInt(CGF, Result, T, ValueType); 141 return RValue::get(Result); 142} 143 144/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy, 145/// which must be a scalar floating point type. 146static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) { 147 const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>(); 148 assert(ValTyP && "isn't scalar fp type!"); 149 150 StringRef FnName; 151 switch (ValTyP->getKind()) { 152 default: llvm_unreachable("Isn't a scalar fp type!"); 153 case BuiltinType::Float: FnName = "fabsf"; break; 154 case BuiltinType::Double: FnName = "fabs"; break; 155 case BuiltinType::LongDouble: FnName = "fabsl"; break; 156 } 157 158 // The prototype is something that takes and returns whatever V's type is. 159 llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(), 160 false); 161 llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName); 162 163 return CGF.EmitNounwindRuntimeCall(Fn, V, "abs"); 164} 165 166static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn, 167 const CallExpr *E, llvm::Value *calleeValue) { 168 return CGF.EmitCall(E->getCallee()->getType(), calleeValue, 169 ReturnValueSlot(), E->arg_begin(), E->arg_end(), Fn); 170} 171 172/// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.* 173/// depending on IntrinsicID. 174/// 175/// \arg CGF The current codegen function. 176/// \arg IntrinsicID The ID for the Intrinsic we wish to generate. 177/// \arg X The first argument to the llvm.*.with.overflow.*. 178/// \arg Y The second argument to the llvm.*.with.overflow.*. 179/// \arg Carry The carry returned by the llvm.*.with.overflow.*. 180/// \returns The result (i.e. sum/product) returned by the intrinsic. 181static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF, 182 const llvm::Intrinsic::ID IntrinsicID, 183 llvm::Value *X, llvm::Value *Y, 184 llvm::Value *&Carry) { 185 // Make sure we have integers of the same width. 186 assert(X->getType() == Y->getType() && 187 "Arguments must be the same type. (Did you forget to make sure both " 188 "arguments have the same integer width?)"); 189 190 llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType()); 191 llvm::Value *Tmp = CGF.Builder.CreateCall2(Callee, X, Y); 192 Carry = CGF.Builder.CreateExtractValue(Tmp, 1); 193 return CGF.Builder.CreateExtractValue(Tmp, 0); 194} 195 196RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, 197 unsigned BuiltinID, const CallExpr *E) { 198 // See if we can constant fold this builtin. If so, don't emit it at all. 199 Expr::EvalResult Result; 200 if (E->EvaluateAsRValue(Result, CGM.getContext()) && 201 !Result.hasSideEffects()) { 202 if (Result.Val.isInt()) 203 return RValue::get(llvm::ConstantInt::get(getLLVMContext(), 204 Result.Val.getInt())); 205 if (Result.Val.isFloat()) 206 return RValue::get(llvm::ConstantFP::get(getLLVMContext(), 207 Result.Val.getFloat())); 208 } 209 210 switch (BuiltinID) { 211 default: break; // Handle intrinsics and libm functions below. 212 case Builtin::BI__builtin___CFStringMakeConstantString: 213 case Builtin::BI__builtin___NSStringMakeConstantString: 214 return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0)); 215 case Builtin::BI__builtin_stdarg_start: 216 case Builtin::BI__builtin_va_start: 217 case Builtin::BI__builtin_va_end: { 218 Value *ArgValue = EmitVAListRef(E->getArg(0)); 219 llvm::Type *DestType = Int8PtrTy; 220 if (ArgValue->getType() != DestType) 221 ArgValue = Builder.CreateBitCast(ArgValue, DestType, 222 ArgValue->getName().data()); 223 224 Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ? 225 Intrinsic::vaend : Intrinsic::vastart; 226 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue)); 227 } 228 case Builtin::BI__builtin_va_copy: { 229 Value *DstPtr = EmitVAListRef(E->getArg(0)); 230 Value *SrcPtr = EmitVAListRef(E->getArg(1)); 231 232 llvm::Type *Type = Int8PtrTy; 233 234 DstPtr = Builder.CreateBitCast(DstPtr, Type); 235 SrcPtr = Builder.CreateBitCast(SrcPtr, Type); 236 return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy), 237 DstPtr, SrcPtr)); 238 } 239 case Builtin::BI__builtin_abs: 240 case Builtin::BI__builtin_labs: 241 case Builtin::BI__builtin_llabs: { 242 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 243 244 Value *NegOp = Builder.CreateNeg(ArgValue, "neg"); 245 Value *CmpResult = 246 Builder.CreateICmpSGE(ArgValue, 247 llvm::Constant::getNullValue(ArgValue->getType()), 248 "abscond"); 249 Value *Result = 250 Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs"); 251 252 return RValue::get(Result); 253 } 254 255 case Builtin::BI__builtin_conj: 256 case Builtin::BI__builtin_conjf: 257 case Builtin::BI__builtin_conjl: { 258 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); 259 Value *Real = ComplexVal.first; 260 Value *Imag = ComplexVal.second; 261 Value *Zero = 262 Imag->getType()->isFPOrFPVectorTy() 263 ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType()) 264 : llvm::Constant::getNullValue(Imag->getType()); 265 266 Imag = Builder.CreateFSub(Zero, Imag, "sub"); 267 return RValue::getComplex(std::make_pair(Real, Imag)); 268 } 269 case Builtin::BI__builtin_creal: 270 case Builtin::BI__builtin_crealf: 271 case Builtin::BI__builtin_creall: 272 case Builtin::BIcreal: 273 case Builtin::BIcrealf: 274 case Builtin::BIcreall: { 275 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); 276 return RValue::get(ComplexVal.first); 277 } 278 279 case Builtin::BI__builtin_cimag: 280 case Builtin::BI__builtin_cimagf: 281 case Builtin::BI__builtin_cimagl: 282 case Builtin::BIcimag: 283 case Builtin::BIcimagf: 284 case Builtin::BIcimagl: { 285 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); 286 return RValue::get(ComplexVal.second); 287 } 288 289 case Builtin::BI__builtin_ctzs: 290 case Builtin::BI__builtin_ctz: 291 case Builtin::BI__builtin_ctzl: 292 case Builtin::BI__builtin_ctzll: { 293 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 294 295 llvm::Type *ArgType = ArgValue->getType(); 296 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); 297 298 llvm::Type *ResultType = ConvertType(E->getType()); 299 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); 300 Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef); 301 if (Result->getType() != ResultType) 302 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 303 "cast"); 304 return RValue::get(Result); 305 } 306 case Builtin::BI__builtin_clzs: 307 case Builtin::BI__builtin_clz: 308 case Builtin::BI__builtin_clzl: 309 case Builtin::BI__builtin_clzll: { 310 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 311 312 llvm::Type *ArgType = ArgValue->getType(); 313 Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType); 314 315 llvm::Type *ResultType = ConvertType(E->getType()); 316 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); 317 Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef); 318 if (Result->getType() != ResultType) 319 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 320 "cast"); 321 return RValue::get(Result); 322 } 323 case Builtin::BI__builtin_ffs: 324 case Builtin::BI__builtin_ffsl: 325 case Builtin::BI__builtin_ffsll: { 326 // ffs(x) -> x ? cttz(x) + 1 : 0 327 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 328 329 llvm::Type *ArgType = ArgValue->getType(); 330 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType); 331 332 llvm::Type *ResultType = ConvertType(E->getType()); 333 Value *Tmp = Builder.CreateAdd(Builder.CreateCall2(F, ArgValue, 334 Builder.getTrue()), 335 llvm::ConstantInt::get(ArgType, 1)); 336 Value *Zero = llvm::Constant::getNullValue(ArgType); 337 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); 338 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); 339 if (Result->getType() != ResultType) 340 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 341 "cast"); 342 return RValue::get(Result); 343 } 344 case Builtin::BI__builtin_parity: 345 case Builtin::BI__builtin_parityl: 346 case Builtin::BI__builtin_parityll: { 347 // parity(x) -> ctpop(x) & 1 348 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 349 350 llvm::Type *ArgType = ArgValue->getType(); 351 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); 352 353 llvm::Type *ResultType = ConvertType(E->getType()); 354 Value *Tmp = Builder.CreateCall(F, ArgValue); 355 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); 356 if (Result->getType() != ResultType) 357 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 358 "cast"); 359 return RValue::get(Result); 360 } 361 case Builtin::BI__builtin_popcount: 362 case Builtin::BI__builtin_popcountl: 363 case Builtin::BI__builtin_popcountll: { 364 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 365 366 llvm::Type *ArgType = ArgValue->getType(); 367 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType); 368 369 llvm::Type *ResultType = ConvertType(E->getType()); 370 Value *Result = Builder.CreateCall(F, ArgValue); 371 if (Result->getType() != ResultType) 372 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, 373 "cast"); 374 return RValue::get(Result); 375 } 376 case Builtin::BI__builtin_expect: { 377 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 378 llvm::Type *ArgType = ArgValue->getType(); 379 380 Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType); 381 Value *ExpectedValue = EmitScalarExpr(E->getArg(1)); 382 383 Value *Result = Builder.CreateCall2(FnExpect, ArgValue, ExpectedValue, 384 "expval"); 385 return RValue::get(Result); 386 } 387 case Builtin::BI__builtin_bswap16: 388 case Builtin::BI__builtin_bswap32: 389 case Builtin::BI__builtin_bswap64: { 390 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 391 llvm::Type *ArgType = ArgValue->getType(); 392 Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType); 393 return RValue::get(Builder.CreateCall(F, ArgValue)); 394 } 395 case Builtin::BI__builtin_object_size: { 396 // We rely on constant folding to deal with expressions with side effects. 397 assert(!E->getArg(0)->HasSideEffects(getContext()) && 398 "should have been constant folded"); 399 400 // We pass this builtin onto the optimizer so that it can 401 // figure out the object size in more complex cases. 402 llvm::Type *ResType = ConvertType(E->getType()); 403 404 // LLVM only supports 0 and 2, make sure that we pass along that 405 // as a boolean. 406 Value *Ty = EmitScalarExpr(E->getArg(1)); 407 ConstantInt *CI = dyn_cast<ConstantInt>(Ty); 408 assert(CI); 409 uint64_t val = CI->getZExtValue(); 410 CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1); 411 412 Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType); 413 return RValue::get(Builder.CreateCall2(F, EmitScalarExpr(E->getArg(0)),CI)); 414 } 415 case Builtin::BI__builtin_prefetch: { 416 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); 417 // FIXME: Technically these constants should of type 'int', yes? 418 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : 419 llvm::ConstantInt::get(Int32Ty, 0); 420 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : 421 llvm::ConstantInt::get(Int32Ty, 3); 422 Value *Data = llvm::ConstantInt::get(Int32Ty, 1); 423 Value *F = CGM.getIntrinsic(Intrinsic::prefetch); 424 return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data)); 425 } 426 case Builtin::BI__builtin_readcyclecounter: { 427 Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter); 428 return RValue::get(Builder.CreateCall(F)); 429 } 430 case Builtin::BI__builtin_trap: { 431 Value *F = CGM.getIntrinsic(Intrinsic::trap); 432 return RValue::get(Builder.CreateCall(F)); 433 } 434 case Builtin::BI__debugbreak: { 435 Value *F = CGM.getIntrinsic(Intrinsic::debugtrap); 436 return RValue::get(Builder.CreateCall(F)); 437 } 438 case Builtin::BI__builtin_unreachable: { 439 if (SanOpts->Unreachable) 440 EmitCheck(Builder.getFalse(), "builtin_unreachable", 441 EmitCheckSourceLocation(E->getExprLoc()), 442 ArrayRef<llvm::Value *>(), CRK_Unrecoverable); 443 else 444 Builder.CreateUnreachable(); 445 446 // We do need to preserve an insertion point. 447 EmitBlock(createBasicBlock("unreachable.cont")); 448 449 return RValue::get(0); 450 } 451 452 case Builtin::BI__builtin_powi: 453 case Builtin::BI__builtin_powif: 454 case Builtin::BI__builtin_powil: { 455 Value *Base = EmitScalarExpr(E->getArg(0)); 456 Value *Exponent = EmitScalarExpr(E->getArg(1)); 457 llvm::Type *ArgType = Base->getType(); 458 Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType); 459 return RValue::get(Builder.CreateCall2(F, Base, Exponent)); 460 } 461 462 case Builtin::BI__builtin_isgreater: 463 case Builtin::BI__builtin_isgreaterequal: 464 case Builtin::BI__builtin_isless: 465 case Builtin::BI__builtin_islessequal: 466 case Builtin::BI__builtin_islessgreater: 467 case Builtin::BI__builtin_isunordered: { 468 // Ordered comparisons: we know the arguments to these are matching scalar 469 // floating point values. 470 Value *LHS = EmitScalarExpr(E->getArg(0)); 471 Value *RHS = EmitScalarExpr(E->getArg(1)); 472 473 switch (BuiltinID) { 474 default: llvm_unreachable("Unknown ordered comparison"); 475 case Builtin::BI__builtin_isgreater: 476 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); 477 break; 478 case Builtin::BI__builtin_isgreaterequal: 479 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); 480 break; 481 case Builtin::BI__builtin_isless: 482 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); 483 break; 484 case Builtin::BI__builtin_islessequal: 485 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); 486 break; 487 case Builtin::BI__builtin_islessgreater: 488 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); 489 break; 490 case Builtin::BI__builtin_isunordered: 491 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); 492 break; 493 } 494 // ZExt bool to int type. 495 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()))); 496 } 497 case Builtin::BI__builtin_isnan: { 498 Value *V = EmitScalarExpr(E->getArg(0)); 499 V = Builder.CreateFCmpUNO(V, V, "cmp"); 500 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 501 } 502 503 case Builtin::BI__builtin_isinf: { 504 // isinf(x) --> fabs(x) == infinity 505 Value *V = EmitScalarExpr(E->getArg(0)); 506 V = EmitFAbs(*this, V, E->getArg(0)->getType()); 507 508 V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf"); 509 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 510 } 511 512 // TODO: BI__builtin_isinf_sign 513 // isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0 514 515 case Builtin::BI__builtin_isnormal: { 516 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min 517 Value *V = EmitScalarExpr(E->getArg(0)); 518 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); 519 520 Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType()); 521 Value *IsLessThanInf = 522 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); 523 APFloat Smallest = APFloat::getSmallestNormalized( 524 getContext().getFloatTypeSemantics(E->getArg(0)->getType())); 525 Value *IsNormal = 526 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest), 527 "isnormal"); 528 V = Builder.CreateAnd(Eq, IsLessThanInf, "and"); 529 V = Builder.CreateAnd(V, IsNormal, "and"); 530 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 531 } 532 533 case Builtin::BI__builtin_isfinite: { 534 // isfinite(x) --> x == x && fabs(x) != infinity; 535 Value *V = EmitScalarExpr(E->getArg(0)); 536 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); 537 538 Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType()); 539 Value *IsNotInf = 540 Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); 541 542 V = Builder.CreateAnd(Eq, IsNotInf, "and"); 543 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); 544 } 545 546 case Builtin::BI__builtin_fpclassify: { 547 Value *V = EmitScalarExpr(E->getArg(5)); 548 llvm::Type *Ty = ConvertType(E->getArg(5)->getType()); 549 550 // Create Result 551 BasicBlock *Begin = Builder.GetInsertBlock(); 552 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); 553 Builder.SetInsertPoint(End); 554 PHINode *Result = 555 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4, 556 "fpclassify_result"); 557 558 // if (V==0) return FP_ZERO 559 Builder.SetInsertPoint(Begin); 560 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), 561 "iszero"); 562 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); 563 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); 564 Builder.CreateCondBr(IsZero, End, NotZero); 565 Result->addIncoming(ZeroLiteral, Begin); 566 567 // if (V != V) return FP_NAN 568 Builder.SetInsertPoint(NotZero); 569 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp"); 570 Value *NanLiteral = EmitScalarExpr(E->getArg(0)); 571 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn); 572 Builder.CreateCondBr(IsNan, End, NotNan); 573 Result->addIncoming(NanLiteral, NotZero); 574 575 // if (fabs(V) == infinity) return FP_INFINITY 576 Builder.SetInsertPoint(NotNan); 577 Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType()); 578 Value *IsInf = 579 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), 580 "isinf"); 581 Value *InfLiteral = EmitScalarExpr(E->getArg(1)); 582 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); 583 Builder.CreateCondBr(IsInf, End, NotInf); 584 Result->addIncoming(InfLiteral, NotNan); 585 586 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL 587 Builder.SetInsertPoint(NotInf); 588 APFloat Smallest = APFloat::getSmallestNormalized( 589 getContext().getFloatTypeSemantics(E->getArg(5)->getType())); 590 Value *IsNormal = 591 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), 592 "isnormal"); 593 Value *NormalResult = 594 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), 595 EmitScalarExpr(E->getArg(3))); 596 Builder.CreateBr(End); 597 Result->addIncoming(NormalResult, NotInf); 598 599 // return Result 600 Builder.SetInsertPoint(End); 601 return RValue::get(Result); 602 } 603 604 case Builtin::BIalloca: 605 case Builtin::BI__builtin_alloca: { 606 Value *Size = EmitScalarExpr(E->getArg(0)); 607 return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size)); 608 } 609 case Builtin::BIbzero: 610 case Builtin::BI__builtin_bzero: { 611 std::pair<llvm::Value*, unsigned> Dest = 612 EmitPointerWithAlignment(E->getArg(0)); 613 Value *SizeVal = EmitScalarExpr(E->getArg(1)); 614 Builder.CreateMemSet(Dest.first, Builder.getInt8(0), SizeVal, 615 Dest.second, false); 616 return RValue::get(Dest.first); 617 } 618 case Builtin::BImemcpy: 619 case Builtin::BI__builtin_memcpy: { 620 std::pair<llvm::Value*, unsigned> Dest = 621 EmitPointerWithAlignment(E->getArg(0)); 622 std::pair<llvm::Value*, unsigned> Src = 623 EmitPointerWithAlignment(E->getArg(1)); 624 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 625 unsigned Align = std::min(Dest.second, Src.second); 626 Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false); 627 return RValue::get(Dest.first); 628 } 629 630 case Builtin::BI__builtin___memcpy_chk: { 631 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2. 632 llvm::APSInt Size, DstSize; 633 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || 634 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) 635 break; 636 if (Size.ugt(DstSize)) 637 break; 638 std::pair<llvm::Value*, unsigned> Dest = 639 EmitPointerWithAlignment(E->getArg(0)); 640 std::pair<llvm::Value*, unsigned> Src = 641 EmitPointerWithAlignment(E->getArg(1)); 642 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); 643 unsigned Align = std::min(Dest.second, Src.second); 644 Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false); 645 return RValue::get(Dest.first); 646 } 647 648 case Builtin::BI__builtin_objc_memmove_collectable: { 649 Value *Address = EmitScalarExpr(E->getArg(0)); 650 Value *SrcAddr = EmitScalarExpr(E->getArg(1)); 651 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 652 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, 653 Address, SrcAddr, SizeVal); 654 return RValue::get(Address); 655 } 656 657 case Builtin::BI__builtin___memmove_chk: { 658 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2. 659 llvm::APSInt Size, DstSize; 660 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || 661 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) 662 break; 663 if (Size.ugt(DstSize)) 664 break; 665 std::pair<llvm::Value*, unsigned> Dest = 666 EmitPointerWithAlignment(E->getArg(0)); 667 std::pair<llvm::Value*, unsigned> Src = 668 EmitPointerWithAlignment(E->getArg(1)); 669 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); 670 unsigned Align = std::min(Dest.second, Src.second); 671 Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false); 672 return RValue::get(Dest.first); 673 } 674 675 case Builtin::BImemmove: 676 case Builtin::BI__builtin_memmove: { 677 std::pair<llvm::Value*, unsigned> Dest = 678 EmitPointerWithAlignment(E->getArg(0)); 679 std::pair<llvm::Value*, unsigned> Src = 680 EmitPointerWithAlignment(E->getArg(1)); 681 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 682 unsigned Align = std::min(Dest.second, Src.second); 683 Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false); 684 return RValue::get(Dest.first); 685 } 686 case Builtin::BImemset: 687 case Builtin::BI__builtin_memset: { 688 std::pair<llvm::Value*, unsigned> Dest = 689 EmitPointerWithAlignment(E->getArg(0)); 690 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), 691 Builder.getInt8Ty()); 692 Value *SizeVal = EmitScalarExpr(E->getArg(2)); 693 Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false); 694 return RValue::get(Dest.first); 695 } 696 case Builtin::BI__builtin___memset_chk: { 697 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2. 698 llvm::APSInt Size, DstSize; 699 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) || 700 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext())) 701 break; 702 if (Size.ugt(DstSize)) 703 break; 704 std::pair<llvm::Value*, unsigned> Dest = 705 EmitPointerWithAlignment(E->getArg(0)); 706 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), 707 Builder.getInt8Ty()); 708 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); 709 Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false); 710 return RValue::get(Dest.first); 711 } 712 case Builtin::BI__builtin_dwarf_cfa: { 713 // The offset in bytes from the first argument to the CFA. 714 // 715 // Why on earth is this in the frontend? Is there any reason at 716 // all that the backend can't reasonably determine this while 717 // lowering llvm.eh.dwarf.cfa()? 718 // 719 // TODO: If there's a satisfactory reason, add a target hook for 720 // this instead of hard-coding 0, which is correct for most targets. 721 int32_t Offset = 0; 722 723 Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa); 724 return RValue::get(Builder.CreateCall(F, 725 llvm::ConstantInt::get(Int32Ty, Offset))); 726 } 727 case Builtin::BI__builtin_return_address: { 728 Value *Depth = EmitScalarExpr(E->getArg(0)); 729 Depth = Builder.CreateIntCast(Depth, Int32Ty, false); 730 Value *F = CGM.getIntrinsic(Intrinsic::returnaddress); 731 return RValue::get(Builder.CreateCall(F, Depth)); 732 } 733 case Builtin::BI__builtin_frame_address: { 734 Value *Depth = EmitScalarExpr(E->getArg(0)); 735 Depth = Builder.CreateIntCast(Depth, Int32Ty, false); 736 Value *F = CGM.getIntrinsic(Intrinsic::frameaddress); 737 return RValue::get(Builder.CreateCall(F, Depth)); 738 } 739 case Builtin::BI__builtin_extract_return_addr: { 740 Value *Address = EmitScalarExpr(E->getArg(0)); 741 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address); 742 return RValue::get(Result); 743 } 744 case Builtin::BI__builtin_frob_return_addr: { 745 Value *Address = EmitScalarExpr(E->getArg(0)); 746 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address); 747 return RValue::get(Result); 748 } 749 case Builtin::BI__builtin_dwarf_sp_column: { 750 llvm::IntegerType *Ty 751 = cast<llvm::IntegerType>(ConvertType(E->getType())); 752 int Column = getTargetHooks().getDwarfEHStackPointer(CGM); 753 if (Column == -1) { 754 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column"); 755 return RValue::get(llvm::UndefValue::get(Ty)); 756 } 757 return RValue::get(llvm::ConstantInt::get(Ty, Column, true)); 758 } 759 case Builtin::BI__builtin_init_dwarf_reg_size_table: { 760 Value *Address = EmitScalarExpr(E->getArg(0)); 761 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address)) 762 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table"); 763 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType()))); 764 } 765 case Builtin::BI__builtin_eh_return: { 766 Value *Int = EmitScalarExpr(E->getArg(0)); 767 Value *Ptr = EmitScalarExpr(E->getArg(1)); 768 769 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType()); 770 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) && 771 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants"); 772 Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32 773 ? Intrinsic::eh_return_i32 774 : Intrinsic::eh_return_i64); 775 Builder.CreateCall2(F, Int, Ptr); 776 Builder.CreateUnreachable(); 777 778 // We do need to preserve an insertion point. 779 EmitBlock(createBasicBlock("builtin_eh_return.cont")); 780 781 return RValue::get(0); 782 } 783 case Builtin::BI__builtin_unwind_init: { 784 Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init); 785 return RValue::get(Builder.CreateCall(F)); 786 } 787 case Builtin::BI__builtin_extend_pointer: { 788 // Extends a pointer to the size of an _Unwind_Word, which is 789 // uint64_t on all platforms. Generally this gets poked into a 790 // register and eventually used as an address, so if the 791 // addressing registers are wider than pointers and the platform 792 // doesn't implicitly ignore high-order bits when doing 793 // addressing, we need to make sure we zext / sext based on 794 // the platform's expectations. 795 // 796 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html 797 798 // Cast the pointer to intptr_t. 799 Value *Ptr = EmitScalarExpr(E->getArg(0)); 800 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast"); 801 802 // If that's 64 bits, we're done. 803 if (IntPtrTy->getBitWidth() == 64) 804 return RValue::get(Result); 805 806 // Otherwise, ask the codegen data what to do. 807 if (getTargetHooks().extendPointerWithSExt()) 808 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext")); 809 else 810 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext")); 811 } 812 case Builtin::BI__builtin_setjmp: { 813 // Buffer is a void**. 814 Value *Buf = EmitScalarExpr(E->getArg(0)); 815 816 // Store the frame pointer to the setjmp buffer. 817 Value *FrameAddr = 818 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), 819 ConstantInt::get(Int32Ty, 0)); 820 Builder.CreateStore(FrameAddr, Buf); 821 822 // Store the stack pointer to the setjmp buffer. 823 Value *StackAddr = 824 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); 825 Value *StackSaveSlot = 826 Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2)); 827 Builder.CreateStore(StackAddr, StackSaveSlot); 828 829 // Call LLVM's EH setjmp, which is lightweight. 830 Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp); 831 Buf = Builder.CreateBitCast(Buf, Int8PtrTy); 832 return RValue::get(Builder.CreateCall(F, Buf)); 833 } 834 case Builtin::BI__builtin_longjmp: { 835 Value *Buf = EmitScalarExpr(E->getArg(0)); 836 Buf = Builder.CreateBitCast(Buf, Int8PtrTy); 837 838 // Call LLVM's EH longjmp, which is lightweight. 839 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf); 840 841 // longjmp doesn't return; mark this as unreachable. 842 Builder.CreateUnreachable(); 843 844 // We do need to preserve an insertion point. 845 EmitBlock(createBasicBlock("longjmp.cont")); 846 847 return RValue::get(0); 848 } 849 case Builtin::BI__sync_fetch_and_add: 850 case Builtin::BI__sync_fetch_and_sub: 851 case Builtin::BI__sync_fetch_and_or: 852 case Builtin::BI__sync_fetch_and_and: 853 case Builtin::BI__sync_fetch_and_xor: 854 case Builtin::BI__sync_add_and_fetch: 855 case Builtin::BI__sync_sub_and_fetch: 856 case Builtin::BI__sync_and_and_fetch: 857 case Builtin::BI__sync_or_and_fetch: 858 case Builtin::BI__sync_xor_and_fetch: 859 case Builtin::BI__sync_val_compare_and_swap: 860 case Builtin::BI__sync_bool_compare_and_swap: 861 case Builtin::BI__sync_lock_test_and_set: 862 case Builtin::BI__sync_lock_release: 863 case Builtin::BI__sync_swap: 864 llvm_unreachable("Shouldn't make it through sema"); 865 case Builtin::BI__sync_fetch_and_add_1: 866 case Builtin::BI__sync_fetch_and_add_2: 867 case Builtin::BI__sync_fetch_and_add_4: 868 case Builtin::BI__sync_fetch_and_add_8: 869 case Builtin::BI__sync_fetch_and_add_16: 870 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E); 871 case Builtin::BI__sync_fetch_and_sub_1: 872 case Builtin::BI__sync_fetch_and_sub_2: 873 case Builtin::BI__sync_fetch_and_sub_4: 874 case Builtin::BI__sync_fetch_and_sub_8: 875 case Builtin::BI__sync_fetch_and_sub_16: 876 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E); 877 case Builtin::BI__sync_fetch_and_or_1: 878 case Builtin::BI__sync_fetch_and_or_2: 879 case Builtin::BI__sync_fetch_and_or_4: 880 case Builtin::BI__sync_fetch_and_or_8: 881 case Builtin::BI__sync_fetch_and_or_16: 882 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E); 883 case Builtin::BI__sync_fetch_and_and_1: 884 case Builtin::BI__sync_fetch_and_and_2: 885 case Builtin::BI__sync_fetch_and_and_4: 886 case Builtin::BI__sync_fetch_and_and_8: 887 case Builtin::BI__sync_fetch_and_and_16: 888 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E); 889 case Builtin::BI__sync_fetch_and_xor_1: 890 case Builtin::BI__sync_fetch_and_xor_2: 891 case Builtin::BI__sync_fetch_and_xor_4: 892 case Builtin::BI__sync_fetch_and_xor_8: 893 case Builtin::BI__sync_fetch_and_xor_16: 894 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E); 895 896 // Clang extensions: not overloaded yet. 897 case Builtin::BI__sync_fetch_and_min: 898 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E); 899 case Builtin::BI__sync_fetch_and_max: 900 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E); 901 case Builtin::BI__sync_fetch_and_umin: 902 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E); 903 case Builtin::BI__sync_fetch_and_umax: 904 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E); 905 906 case Builtin::BI__sync_add_and_fetch_1: 907 case Builtin::BI__sync_add_and_fetch_2: 908 case Builtin::BI__sync_add_and_fetch_4: 909 case Builtin::BI__sync_add_and_fetch_8: 910 case Builtin::BI__sync_add_and_fetch_16: 911 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E, 912 llvm::Instruction::Add); 913 case Builtin::BI__sync_sub_and_fetch_1: 914 case Builtin::BI__sync_sub_and_fetch_2: 915 case Builtin::BI__sync_sub_and_fetch_4: 916 case Builtin::BI__sync_sub_and_fetch_8: 917 case Builtin::BI__sync_sub_and_fetch_16: 918 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E, 919 llvm::Instruction::Sub); 920 case Builtin::BI__sync_and_and_fetch_1: 921 case Builtin::BI__sync_and_and_fetch_2: 922 case Builtin::BI__sync_and_and_fetch_4: 923 case Builtin::BI__sync_and_and_fetch_8: 924 case Builtin::BI__sync_and_and_fetch_16: 925 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E, 926 llvm::Instruction::And); 927 case Builtin::BI__sync_or_and_fetch_1: 928 case Builtin::BI__sync_or_and_fetch_2: 929 case Builtin::BI__sync_or_and_fetch_4: 930 case Builtin::BI__sync_or_and_fetch_8: 931 case Builtin::BI__sync_or_and_fetch_16: 932 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E, 933 llvm::Instruction::Or); 934 case Builtin::BI__sync_xor_and_fetch_1: 935 case Builtin::BI__sync_xor_and_fetch_2: 936 case Builtin::BI__sync_xor_and_fetch_4: 937 case Builtin::BI__sync_xor_and_fetch_8: 938 case Builtin::BI__sync_xor_and_fetch_16: 939 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E, 940 llvm::Instruction::Xor); 941 942 case Builtin::BI__sync_val_compare_and_swap_1: 943 case Builtin::BI__sync_val_compare_and_swap_2: 944 case Builtin::BI__sync_val_compare_and_swap_4: 945 case Builtin::BI__sync_val_compare_and_swap_8: 946 case Builtin::BI__sync_val_compare_and_swap_16: { 947 QualType T = E->getType(); 948 llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0)); 949 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); 950 951 llvm::IntegerType *IntType = 952 llvm::IntegerType::get(getLLVMContext(), 953 getContext().getTypeSize(T)); 954 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 955 956 Value *Args[3]; 957 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType); 958 Args[1] = EmitScalarExpr(E->getArg(1)); 959 llvm::Type *ValueType = Args[1]->getType(); 960 Args[1] = EmitToInt(*this, Args[1], T, IntType); 961 Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType); 962 963 Value *Result = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2], 964 llvm::SequentiallyConsistent); 965 Result = EmitFromInt(*this, Result, T, ValueType); 966 return RValue::get(Result); 967 } 968 969 case Builtin::BI__sync_bool_compare_and_swap_1: 970 case Builtin::BI__sync_bool_compare_and_swap_2: 971 case Builtin::BI__sync_bool_compare_and_swap_4: 972 case Builtin::BI__sync_bool_compare_and_swap_8: 973 case Builtin::BI__sync_bool_compare_and_swap_16: { 974 QualType T = E->getArg(1)->getType(); 975 llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0)); 976 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); 977 978 llvm::IntegerType *IntType = 979 llvm::IntegerType::get(getLLVMContext(), 980 getContext().getTypeSize(T)); 981 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); 982 983 Value *Args[3]; 984 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType); 985 Args[1] = EmitToInt(*this, EmitScalarExpr(E->getArg(1)), T, IntType); 986 Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType); 987 988 Value *OldVal = Args[1]; 989 Value *PrevVal = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2], 990 llvm::SequentiallyConsistent); 991 Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal); 992 // zext bool to int. 993 Result = Builder.CreateZExt(Result, ConvertType(E->getType())); 994 return RValue::get(Result); 995 } 996 997 case Builtin::BI__sync_swap_1: 998 case Builtin::BI__sync_swap_2: 999 case Builtin::BI__sync_swap_4: 1000 case Builtin::BI__sync_swap_8: 1001 case Builtin::BI__sync_swap_16: 1002 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); 1003 1004 case Builtin::BI__sync_lock_test_and_set_1: 1005 case Builtin::BI__sync_lock_test_and_set_2: 1006 case Builtin::BI__sync_lock_test_and_set_4: 1007 case Builtin::BI__sync_lock_test_and_set_8: 1008 case Builtin::BI__sync_lock_test_and_set_16: 1009 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E); 1010 1011 case Builtin::BI__sync_lock_release_1: 1012 case Builtin::BI__sync_lock_release_2: 1013 case Builtin::BI__sync_lock_release_4: 1014 case Builtin::BI__sync_lock_release_8: 1015 case Builtin::BI__sync_lock_release_16: { 1016 Value *Ptr = EmitScalarExpr(E->getArg(0)); 1017 QualType ElTy = E->getArg(0)->getType()->getPointeeType(); 1018 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy); 1019 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(), 1020 StoreSize.getQuantity() * 8); 1021 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo()); 1022 llvm::StoreInst *Store = 1023 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr); 1024 Store->setAlignment(StoreSize.getQuantity()); 1025 Store->setAtomic(llvm::Release); 1026 return RValue::get(0); 1027 } 1028 1029 case Builtin::BI__sync_synchronize: { 1030 // We assume this is supposed to correspond to a C++0x-style 1031 // sequentially-consistent fence (i.e. this is only usable for 1032 // synchonization, not device I/O or anything like that). This intrinsic 1033 // is really badly designed in the sense that in theory, there isn't 1034 // any way to safely use it... but in practice, it mostly works 1035 // to use it with non-atomic loads and stores to get acquire/release 1036 // semantics. 1037 Builder.CreateFence(llvm::SequentiallyConsistent); 1038 return RValue::get(0); 1039 } 1040 1041 case Builtin::BI__c11_atomic_is_lock_free: 1042 case Builtin::BI__atomic_is_lock_free: { 1043 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the 1044 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since 1045 // _Atomic(T) is always properly-aligned. 1046 const char *LibCallName = "__atomic_is_lock_free"; 1047 CallArgList Args; 1048 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))), 1049 getContext().getSizeType()); 1050 if (BuiltinID == Builtin::BI__atomic_is_lock_free) 1051 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))), 1052 getContext().VoidPtrTy); 1053 else 1054 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)), 1055 getContext().VoidPtrTy); 1056 const CGFunctionInfo &FuncInfo = 1057 CGM.getTypes().arrangeFreeFunctionCall(E->getType(), Args, 1058 FunctionType::ExtInfo(), 1059 RequiredArgs::All); 1060 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo); 1061 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName); 1062 return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args); 1063 } 1064 1065 case Builtin::BI__atomic_test_and_set: { 1066 // Look at the argument type to determine whether this is a volatile 1067 // operation. The parameter type is always volatile. 1068 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); 1069 bool Volatile = 1070 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); 1071 1072 Value *Ptr = EmitScalarExpr(E->getArg(0)); 1073 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace(); 1074 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); 1075 Value *NewVal = Builder.getInt8(1); 1076 Value *Order = EmitScalarExpr(E->getArg(1)); 1077 if (isa<llvm::ConstantInt>(Order)) { 1078 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 1079 AtomicRMWInst *Result = 0; 1080 switch (ord) { 1081 case 0: // memory_order_relaxed 1082 default: // invalid order 1083 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1084 Ptr, NewVal, 1085 llvm::Monotonic); 1086 break; 1087 case 1: // memory_order_consume 1088 case 2: // memory_order_acquire 1089 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1090 Ptr, NewVal, 1091 llvm::Acquire); 1092 break; 1093 case 3: // memory_order_release 1094 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1095 Ptr, NewVal, 1096 llvm::Release); 1097 break; 1098 case 4: // memory_order_acq_rel 1099 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1100 Ptr, NewVal, 1101 llvm::AcquireRelease); 1102 break; 1103 case 5: // memory_order_seq_cst 1104 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1105 Ptr, NewVal, 1106 llvm::SequentiallyConsistent); 1107 break; 1108 } 1109 Result->setVolatile(Volatile); 1110 return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); 1111 } 1112 1113 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 1114 1115 llvm::BasicBlock *BBs[5] = { 1116 createBasicBlock("monotonic", CurFn), 1117 createBasicBlock("acquire", CurFn), 1118 createBasicBlock("release", CurFn), 1119 createBasicBlock("acqrel", CurFn), 1120 createBasicBlock("seqcst", CurFn) 1121 }; 1122 llvm::AtomicOrdering Orders[5] = { 1123 llvm::Monotonic, llvm::Acquire, llvm::Release, 1124 llvm::AcquireRelease, llvm::SequentiallyConsistent 1125 }; 1126 1127 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 1128 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); 1129 1130 Builder.SetInsertPoint(ContBB); 1131 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set"); 1132 1133 for (unsigned i = 0; i < 5; ++i) { 1134 Builder.SetInsertPoint(BBs[i]); 1135 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, 1136 Ptr, NewVal, Orders[i]); 1137 RMW->setVolatile(Volatile); 1138 Result->addIncoming(RMW, BBs[i]); 1139 Builder.CreateBr(ContBB); 1140 } 1141 1142 SI->addCase(Builder.getInt32(0), BBs[0]); 1143 SI->addCase(Builder.getInt32(1), BBs[1]); 1144 SI->addCase(Builder.getInt32(2), BBs[1]); 1145 SI->addCase(Builder.getInt32(3), BBs[2]); 1146 SI->addCase(Builder.getInt32(4), BBs[3]); 1147 SI->addCase(Builder.getInt32(5), BBs[4]); 1148 1149 Builder.SetInsertPoint(ContBB); 1150 return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); 1151 } 1152 1153 case Builtin::BI__atomic_clear: { 1154 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); 1155 bool Volatile = 1156 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified(); 1157 1158 Value *Ptr = EmitScalarExpr(E->getArg(0)); 1159 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace(); 1160 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace)); 1161 Value *NewVal = Builder.getInt8(0); 1162 Value *Order = EmitScalarExpr(E->getArg(1)); 1163 if (isa<llvm::ConstantInt>(Order)) { 1164 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 1165 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); 1166 Store->setAlignment(1); 1167 switch (ord) { 1168 case 0: // memory_order_relaxed 1169 default: // invalid order 1170 Store->setOrdering(llvm::Monotonic); 1171 break; 1172 case 3: // memory_order_release 1173 Store->setOrdering(llvm::Release); 1174 break; 1175 case 5: // memory_order_seq_cst 1176 Store->setOrdering(llvm::SequentiallyConsistent); 1177 break; 1178 } 1179 return RValue::get(0); 1180 } 1181 1182 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 1183 1184 llvm::BasicBlock *BBs[3] = { 1185 createBasicBlock("monotonic", CurFn), 1186 createBasicBlock("release", CurFn), 1187 createBasicBlock("seqcst", CurFn) 1188 }; 1189 llvm::AtomicOrdering Orders[3] = { 1190 llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent 1191 }; 1192 1193 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 1194 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); 1195 1196 for (unsigned i = 0; i < 3; ++i) { 1197 Builder.SetInsertPoint(BBs[i]); 1198 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); 1199 Store->setAlignment(1); 1200 Store->setOrdering(Orders[i]); 1201 Builder.CreateBr(ContBB); 1202 } 1203 1204 SI->addCase(Builder.getInt32(0), BBs[0]); 1205 SI->addCase(Builder.getInt32(3), BBs[1]); 1206 SI->addCase(Builder.getInt32(5), BBs[2]); 1207 1208 Builder.SetInsertPoint(ContBB); 1209 return RValue::get(0); 1210 } 1211 1212 case Builtin::BI__atomic_thread_fence: 1213 case Builtin::BI__atomic_signal_fence: 1214 case Builtin::BI__c11_atomic_thread_fence: 1215 case Builtin::BI__c11_atomic_signal_fence: { 1216 llvm::SynchronizationScope Scope; 1217 if (BuiltinID == Builtin::BI__atomic_signal_fence || 1218 BuiltinID == Builtin::BI__c11_atomic_signal_fence) 1219 Scope = llvm::SingleThread; 1220 else 1221 Scope = llvm::CrossThread; 1222 Value *Order = EmitScalarExpr(E->getArg(0)); 1223 if (isa<llvm::ConstantInt>(Order)) { 1224 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 1225 switch (ord) { 1226 case 0: // memory_order_relaxed 1227 default: // invalid order 1228 break; 1229 case 1: // memory_order_consume 1230 case 2: // memory_order_acquire 1231 Builder.CreateFence(llvm::Acquire, Scope); 1232 break; 1233 case 3: // memory_order_release 1234 Builder.CreateFence(llvm::Release, Scope); 1235 break; 1236 case 4: // memory_order_acq_rel 1237 Builder.CreateFence(llvm::AcquireRelease, Scope); 1238 break; 1239 case 5: // memory_order_seq_cst 1240 Builder.CreateFence(llvm::SequentiallyConsistent, Scope); 1241 break; 1242 } 1243 return RValue::get(0); 1244 } 1245 1246 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB; 1247 AcquireBB = createBasicBlock("acquire", CurFn); 1248 ReleaseBB = createBasicBlock("release", CurFn); 1249 AcqRelBB = createBasicBlock("acqrel", CurFn); 1250 SeqCstBB = createBasicBlock("seqcst", CurFn); 1251 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 1252 1253 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 1254 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB); 1255 1256 Builder.SetInsertPoint(AcquireBB); 1257 Builder.CreateFence(llvm::Acquire, Scope); 1258 Builder.CreateBr(ContBB); 1259 SI->addCase(Builder.getInt32(1), AcquireBB); 1260 SI->addCase(Builder.getInt32(2), AcquireBB); 1261 1262 Builder.SetInsertPoint(ReleaseBB); 1263 Builder.CreateFence(llvm::Release, Scope); 1264 Builder.CreateBr(ContBB); 1265 SI->addCase(Builder.getInt32(3), ReleaseBB); 1266 1267 Builder.SetInsertPoint(AcqRelBB); 1268 Builder.CreateFence(llvm::AcquireRelease, Scope); 1269 Builder.CreateBr(ContBB); 1270 SI->addCase(Builder.getInt32(4), AcqRelBB); 1271 1272 Builder.SetInsertPoint(SeqCstBB); 1273 Builder.CreateFence(llvm::SequentiallyConsistent, Scope); 1274 Builder.CreateBr(ContBB); 1275 SI->addCase(Builder.getInt32(5), SeqCstBB); 1276 1277 Builder.SetInsertPoint(ContBB); 1278 return RValue::get(0); 1279 } 1280 1281 // Library functions with special handling. 1282 case Builtin::BIsqrt: 1283 case Builtin::BIsqrtf: 1284 case Builtin::BIsqrtl: { 1285 // TODO: there is currently no set of optimizer flags 1286 // sufficient for us to rewrite sqrt to @llvm.sqrt. 1287 // -fmath-errno=0 is not good enough; we need finiteness. 1288 // We could probably precondition the call with an ult 1289 // against 0, but is that worth the complexity? 1290 break; 1291 } 1292 1293 case Builtin::BIpow: 1294 case Builtin::BIpowf: 1295 case Builtin::BIpowl: { 1296 // Rewrite sqrt to intrinsic if allowed. 1297 if (!FD->hasAttr<ConstAttr>()) 1298 break; 1299 Value *Base = EmitScalarExpr(E->getArg(0)); 1300 Value *Exponent = EmitScalarExpr(E->getArg(1)); 1301 llvm::Type *ArgType = Base->getType(); 1302 Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType); 1303 return RValue::get(Builder.CreateCall2(F, Base, Exponent)); 1304 } 1305 1306 case Builtin::BIfma: 1307 case Builtin::BIfmaf: 1308 case Builtin::BIfmal: 1309 case Builtin::BI__builtin_fma: 1310 case Builtin::BI__builtin_fmaf: 1311 case Builtin::BI__builtin_fmal: { 1312 // Rewrite fma to intrinsic. 1313 Value *FirstArg = EmitScalarExpr(E->getArg(0)); 1314 llvm::Type *ArgType = FirstArg->getType(); 1315 Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType); 1316 return RValue::get(Builder.CreateCall3(F, FirstArg, 1317 EmitScalarExpr(E->getArg(1)), 1318 EmitScalarExpr(E->getArg(2)))); 1319 } 1320 1321 case Builtin::BI__builtin_signbit: 1322 case Builtin::BI__builtin_signbitf: 1323 case Builtin::BI__builtin_signbitl: { 1324 LLVMContext &C = CGM.getLLVMContext(); 1325 1326 Value *Arg = EmitScalarExpr(E->getArg(0)); 1327 llvm::Type *ArgTy = Arg->getType(); 1328 if (ArgTy->isPPC_FP128Ty()) 1329 break; // FIXME: I'm not sure what the right implementation is here. 1330 int ArgWidth = ArgTy->getPrimitiveSizeInBits(); 1331 llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth); 1332 Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy); 1333 Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy); 1334 Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp); 1335 return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType()))); 1336 } 1337 case Builtin::BI__builtin_annotation: { 1338 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0)); 1339 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation, 1340 AnnVal->getType()); 1341 1342 // Get the annotation string, go through casts. Sema requires this to be a 1343 // non-wide string literal, potentially casted, so the cast<> is safe. 1344 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts(); 1345 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString(); 1346 return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc())); 1347 } 1348 case Builtin::BI__builtin_addcs: 1349 case Builtin::BI__builtin_addc: 1350 case Builtin::BI__builtin_addcl: 1351 case Builtin::BI__builtin_addcll: 1352 case Builtin::BI__builtin_subcs: 1353 case Builtin::BI__builtin_subc: 1354 case Builtin::BI__builtin_subcl: 1355 case Builtin::BI__builtin_subcll: { 1356 1357 // We translate all of these builtins from expressions of the form: 1358 // int x = ..., y = ..., carryin = ..., carryout, result; 1359 // result = __builtin_addc(x, y, carryin, &carryout); 1360 // 1361 // to LLVM IR of the form: 1362 // 1363 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y) 1364 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0 1365 // %carry1 = extractvalue {i32, i1} %tmp1, 1 1366 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1, 1367 // i32 %carryin) 1368 // %result = extractvalue {i32, i1} %tmp2, 0 1369 // %carry2 = extractvalue {i32, i1} %tmp2, 1 1370 // %tmp3 = or i1 %carry1, %carry2 1371 // %tmp4 = zext i1 %tmp3 to i32 1372 // store i32 %tmp4, i32* %carryout 1373 1374 // Scalarize our inputs. 1375 llvm::Value *X = EmitScalarExpr(E->getArg(0)); 1376 llvm::Value *Y = EmitScalarExpr(E->getArg(1)); 1377 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2)); 1378 std::pair<llvm::Value*, unsigned> CarryOutPtr = 1379 EmitPointerWithAlignment(E->getArg(3)); 1380 1381 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow. 1382 llvm::Intrinsic::ID IntrinsicId; 1383 switch (BuiltinID) { 1384 default: llvm_unreachable("Unknown multiprecision builtin id."); 1385 case Builtin::BI__builtin_addcs: 1386 case Builtin::BI__builtin_addc: 1387 case Builtin::BI__builtin_addcl: 1388 case Builtin::BI__builtin_addcll: 1389 IntrinsicId = llvm::Intrinsic::uadd_with_overflow; 1390 break; 1391 case Builtin::BI__builtin_subcs: 1392 case Builtin::BI__builtin_subc: 1393 case Builtin::BI__builtin_subcl: 1394 case Builtin::BI__builtin_subcll: 1395 IntrinsicId = llvm::Intrinsic::usub_with_overflow; 1396 break; 1397 } 1398 1399 // Construct our resulting LLVM IR expression. 1400 llvm::Value *Carry1; 1401 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId, 1402 X, Y, Carry1); 1403 llvm::Value *Carry2; 1404 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId, 1405 Sum1, Carryin, Carry2); 1406 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2), 1407 X->getType()); 1408 llvm::StoreInst *CarryOutStore = Builder.CreateStore(CarryOut, 1409 CarryOutPtr.first); 1410 CarryOutStore->setAlignment(CarryOutPtr.second); 1411 return RValue::get(Sum2); 1412 } 1413 case Builtin::BI__noop: 1414 return RValue::get(0); 1415 } 1416 1417 // If this is an alias for a lib function (e.g. __builtin_sin), emit 1418 // the call using the normal call path, but using the unmangled 1419 // version of the function name. 1420 if (getContext().BuiltinInfo.isLibFunction(BuiltinID)) 1421 return emitLibraryCall(*this, FD, E, 1422 CGM.getBuiltinLibFunction(FD, BuiltinID)); 1423 1424 // If this is a predefined lib function (e.g. malloc), emit the call 1425 // using exactly the normal call path. 1426 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) 1427 return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee())); 1428 1429 // See if we have a target specific intrinsic. 1430 const char *Name = getContext().BuiltinInfo.GetName(BuiltinID); 1431 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic; 1432 if (const char *Prefix = 1433 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch())) 1434 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name); 1435 1436 if (IntrinsicID != Intrinsic::not_intrinsic) { 1437 SmallVector<Value*, 16> Args; 1438 1439 // Find out if any arguments are required to be integer constant 1440 // expressions. 1441 unsigned ICEArguments = 0; 1442 ASTContext::GetBuiltinTypeError Error; 1443 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); 1444 assert(Error == ASTContext::GE_None && "Should not codegen an error"); 1445 1446 Function *F = CGM.getIntrinsic(IntrinsicID); 1447 llvm::FunctionType *FTy = F->getFunctionType(); 1448 1449 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { 1450 Value *ArgValue; 1451 // If this is a normal argument, just emit it as a scalar. 1452 if ((ICEArguments & (1 << i)) == 0) { 1453 ArgValue = EmitScalarExpr(E->getArg(i)); 1454 } else { 1455 // If this is required to be a constant, constant fold it so that we 1456 // know that the generated intrinsic gets a ConstantInt. 1457 llvm::APSInt Result; 1458 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext()); 1459 assert(IsConst && "Constant arg isn't actually constant?"); 1460 (void)IsConst; 1461 ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result); 1462 } 1463 1464 // If the intrinsic arg type is different from the builtin arg type 1465 // we need to do a bit cast. 1466 llvm::Type *PTy = FTy->getParamType(i); 1467 if (PTy != ArgValue->getType()) { 1468 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && 1469 "Must be able to losslessly bit cast to param"); 1470 ArgValue = Builder.CreateBitCast(ArgValue, PTy); 1471 } 1472 1473 Args.push_back(ArgValue); 1474 } 1475 1476 Value *V = Builder.CreateCall(F, Args); 1477 QualType BuiltinRetType = E->getType(); 1478 1479 llvm::Type *RetTy = VoidTy; 1480 if (!BuiltinRetType->isVoidType()) 1481 RetTy = ConvertType(BuiltinRetType); 1482 1483 if (RetTy != V->getType()) { 1484 assert(V->getType()->canLosslesslyBitCastTo(RetTy) && 1485 "Must be able to losslessly bit cast result type"); 1486 V = Builder.CreateBitCast(V, RetTy); 1487 } 1488 1489 return RValue::get(V); 1490 } 1491 1492 // See if we have a target specific builtin that needs to be lowered. 1493 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E)) 1494 return RValue::get(V); 1495 1496 ErrorUnsupported(E, "builtin function"); 1497 1498 // Unknown builtin, for now just dump it out and return undef. 1499 return GetUndefRValue(E->getType()); 1500} 1501 1502Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, 1503 const CallExpr *E) { 1504 switch (getTarget().getTriple().getArch()) { 1505 case llvm::Triple::aarch64: 1506 return EmitAArch64BuiltinExpr(BuiltinID, E); 1507 case llvm::Triple::arm: 1508 case llvm::Triple::thumb: 1509 return EmitARMBuiltinExpr(BuiltinID, E); 1510 case llvm::Triple::x86: 1511 case llvm::Triple::x86_64: 1512 return EmitX86BuiltinExpr(BuiltinID, E); 1513 case llvm::Triple::ppc: 1514 case llvm::Triple::ppc64: 1515 return EmitPPCBuiltinExpr(BuiltinID, E); 1516 default: 1517 return 0; 1518 } 1519} 1520 1521static llvm::VectorType *GetNeonType(CodeGenFunction *CGF, 1522 NeonTypeFlags TypeFlags) { 1523 int IsQuad = TypeFlags.isQuad(); 1524 switch (TypeFlags.getEltType()) { 1525 case NeonTypeFlags::Int8: 1526 case NeonTypeFlags::Poly8: 1527 return llvm::VectorType::get(CGF->Int8Ty, 8 << IsQuad); 1528 case NeonTypeFlags::Int16: 1529 case NeonTypeFlags::Poly16: 1530 case NeonTypeFlags::Float16: 1531 return llvm::VectorType::get(CGF->Int16Ty, 4 << IsQuad); 1532 case NeonTypeFlags::Int32: 1533 return llvm::VectorType::get(CGF->Int32Ty, 2 << IsQuad); 1534 case NeonTypeFlags::Int64: 1535 return llvm::VectorType::get(CGF->Int64Ty, 1 << IsQuad); 1536 case NeonTypeFlags::Float32: 1537 return llvm::VectorType::get(CGF->FloatTy, 2 << IsQuad); 1538 } 1539 llvm_unreachable("Invalid NeonTypeFlags element type!"); 1540} 1541 1542Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { 1543 unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements(); 1544 Value* SV = llvm::ConstantVector::getSplat(nElts, C); 1545 return Builder.CreateShuffleVector(V, V, SV, "lane"); 1546} 1547 1548Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops, 1549 const char *name, 1550 unsigned shift, bool rightshift) { 1551 unsigned j = 0; 1552 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); 1553 ai != ae; ++ai, ++j) 1554 if (shift > 0 && shift == j) 1555 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift); 1556 else 1557 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name); 1558 1559 return Builder.CreateCall(F, Ops, name); 1560} 1561 1562Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty, 1563 bool neg) { 1564 int SV = cast<ConstantInt>(V)->getSExtValue(); 1565 1566 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty); 1567 llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV); 1568 return llvm::ConstantVector::getSplat(VTy->getNumElements(), C); 1569} 1570 1571/// GetPointeeAlignment - Given an expression with a pointer type, find the 1572/// alignment of the type referenced by the pointer. Skip over implicit 1573/// casts. 1574std::pair<llvm::Value*, unsigned> 1575CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) { 1576 assert(Addr->getType()->isPointerType()); 1577 Addr = Addr->IgnoreParens(); 1578 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Addr)) { 1579 if ((ICE->getCastKind() == CK_BitCast || ICE->getCastKind() == CK_NoOp) && 1580 ICE->getSubExpr()->getType()->isPointerType()) { 1581 std::pair<llvm::Value*, unsigned> Ptr = 1582 EmitPointerWithAlignment(ICE->getSubExpr()); 1583 Ptr.first = Builder.CreateBitCast(Ptr.first, 1584 ConvertType(Addr->getType())); 1585 return Ptr; 1586 } else if (ICE->getCastKind() == CK_ArrayToPointerDecay) { 1587 LValue LV = EmitLValue(ICE->getSubExpr()); 1588 unsigned Align = LV.getAlignment().getQuantity(); 1589 if (!Align) { 1590 // FIXME: Once LValues are fixed to always set alignment, 1591 // zap this code. 1592 QualType PtTy = ICE->getSubExpr()->getType(); 1593 if (!PtTy->isIncompleteType()) 1594 Align = getContext().getTypeAlignInChars(PtTy).getQuantity(); 1595 else 1596 Align = 1; 1597 } 1598 return std::make_pair(LV.getAddress(), Align); 1599 } 1600 } 1601 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Addr)) { 1602 if (UO->getOpcode() == UO_AddrOf) { 1603 LValue LV = EmitLValue(UO->getSubExpr()); 1604 unsigned Align = LV.getAlignment().getQuantity(); 1605 if (!Align) { 1606 // FIXME: Once LValues are fixed to always set alignment, 1607 // zap this code. 1608 QualType PtTy = UO->getSubExpr()->getType(); 1609 if (!PtTy->isIncompleteType()) 1610 Align = getContext().getTypeAlignInChars(PtTy).getQuantity(); 1611 else 1612 Align = 1; 1613 } 1614 return std::make_pair(LV.getAddress(), Align); 1615 } 1616 } 1617 1618 unsigned Align = 1; 1619 QualType PtTy = Addr->getType()->getPointeeType(); 1620 if (!PtTy->isIncompleteType()) 1621 Align = getContext().getTypeAlignInChars(PtTy).getQuantity(); 1622 1623 return std::make_pair(EmitScalarExpr(Addr), Align); 1624} 1625 1626Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, 1627 const CallExpr *E) { 1628 if (BuiltinID == AArch64::BI__clear_cache) { 1629 assert(E->getNumArgs() == 2 && 1630 "Variadic __clear_cache slipped through on AArch64"); 1631 1632 const FunctionDecl *FD = E->getDirectCallee(); 1633 SmallVector<Value *, 2> Ops; 1634 for (unsigned i = 0; i < E->getNumArgs(); i++) 1635 Ops.push_back(EmitScalarExpr(E->getArg(i))); 1636 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); 1637 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); 1638 StringRef Name = FD->getName(); 1639 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); 1640 } 1641 1642 return 0; 1643} 1644 1645Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, 1646 const CallExpr *E) { 1647 if (BuiltinID == ARM::BI__clear_cache) { 1648 const FunctionDecl *FD = E->getDirectCallee(); 1649 // Oddly people write this call without args on occasion and gcc accepts 1650 // it - it's also marked as varargs in the description file. 1651 SmallVector<Value*, 2> Ops; 1652 for (unsigned i = 0; i < E->getNumArgs(); i++) 1653 Ops.push_back(EmitScalarExpr(E->getArg(i))); 1654 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType()); 1655 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); 1656 StringRef Name = FD->getName(); 1657 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops); 1658 } 1659 1660 if (BuiltinID == ARM::BI__builtin_arm_ldrexd) { 1661 Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrexd); 1662 1663 Value *LdPtr = EmitScalarExpr(E->getArg(0)); 1664 Value *Val = Builder.CreateCall(F, LdPtr, "ldrexd"); 1665 1666 Value *Val0 = Builder.CreateExtractValue(Val, 1); 1667 Value *Val1 = Builder.CreateExtractValue(Val, 0); 1668 Val0 = Builder.CreateZExt(Val0, Int64Ty); 1669 Val1 = Builder.CreateZExt(Val1, Int64Ty); 1670 1671 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32); 1672 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */); 1673 return Builder.CreateOr(Val, Val1); 1674 } 1675 1676 if (BuiltinID == ARM::BI__builtin_arm_strexd) { 1677 Function *F = CGM.getIntrinsic(Intrinsic::arm_strexd); 1678 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL); 1679 1680 Value *One = llvm::ConstantInt::get(Int32Ty, 1); 1681 Value *Tmp = Builder.CreateAlloca(Int64Ty, One); 1682 Value *Val = EmitScalarExpr(E->getArg(0)); 1683 Builder.CreateStore(Val, Tmp); 1684 1685 Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy)); 1686 Val = Builder.CreateLoad(LdPtr); 1687 1688 Value *Arg0 = Builder.CreateExtractValue(Val, 0); 1689 Value *Arg1 = Builder.CreateExtractValue(Val, 1); 1690 Value *StPtr = EmitScalarExpr(E->getArg(1)); 1691 return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd"); 1692 } 1693 1694 SmallVector<Value*, 4> Ops; 1695 llvm::Value *Align = 0; 1696 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { 1697 if (i == 0) { 1698 switch (BuiltinID) { 1699 case ARM::BI__builtin_neon_vld1_v: 1700 case ARM::BI__builtin_neon_vld1q_v: 1701 case ARM::BI__builtin_neon_vld1q_lane_v: 1702 case ARM::BI__builtin_neon_vld1_lane_v: 1703 case ARM::BI__builtin_neon_vld1_dup_v: 1704 case ARM::BI__builtin_neon_vld1q_dup_v: 1705 case ARM::BI__builtin_neon_vst1_v: 1706 case ARM::BI__builtin_neon_vst1q_v: 1707 case ARM::BI__builtin_neon_vst1q_lane_v: 1708 case ARM::BI__builtin_neon_vst1_lane_v: 1709 case ARM::BI__builtin_neon_vst2_v: 1710 case ARM::BI__builtin_neon_vst2q_v: 1711 case ARM::BI__builtin_neon_vst2_lane_v: 1712 case ARM::BI__builtin_neon_vst2q_lane_v: 1713 case ARM::BI__builtin_neon_vst3_v: 1714 case ARM::BI__builtin_neon_vst3q_v: 1715 case ARM::BI__builtin_neon_vst3_lane_v: 1716 case ARM::BI__builtin_neon_vst3q_lane_v: 1717 case ARM::BI__builtin_neon_vst4_v: 1718 case ARM::BI__builtin_neon_vst4q_v: 1719 case ARM::BI__builtin_neon_vst4_lane_v: 1720 case ARM::BI__builtin_neon_vst4q_lane_v: 1721 // Get the alignment for the argument in addition to the value; 1722 // we'll use it later. 1723 std::pair<llvm::Value*, unsigned> Src = 1724 EmitPointerWithAlignment(E->getArg(0)); 1725 Ops.push_back(Src.first); 1726 Align = Builder.getInt32(Src.second); 1727 continue; 1728 } 1729 } 1730 if (i == 1) { 1731 switch (BuiltinID) { 1732 case ARM::BI__builtin_neon_vld2_v: 1733 case ARM::BI__builtin_neon_vld2q_v: 1734 case ARM::BI__builtin_neon_vld3_v: 1735 case ARM::BI__builtin_neon_vld3q_v: 1736 case ARM::BI__builtin_neon_vld4_v: 1737 case ARM::BI__builtin_neon_vld4q_v: 1738 case ARM::BI__builtin_neon_vld2_lane_v: 1739 case ARM::BI__builtin_neon_vld2q_lane_v: 1740 case ARM::BI__builtin_neon_vld3_lane_v: 1741 case ARM::BI__builtin_neon_vld3q_lane_v: 1742 case ARM::BI__builtin_neon_vld4_lane_v: 1743 case ARM::BI__builtin_neon_vld4q_lane_v: 1744 case ARM::BI__builtin_neon_vld2_dup_v: 1745 case ARM::BI__builtin_neon_vld3_dup_v: 1746 case ARM::BI__builtin_neon_vld4_dup_v: 1747 // Get the alignment for the argument in addition to the value; 1748 // we'll use it later. 1749 std::pair<llvm::Value*, unsigned> Src = 1750 EmitPointerWithAlignment(E->getArg(1)); 1751 Ops.push_back(Src.first); 1752 Align = Builder.getInt32(Src.second); 1753 continue; 1754 } 1755 } 1756 Ops.push_back(EmitScalarExpr(E->getArg(i))); 1757 } 1758 1759 // vget_lane and vset_lane are not overloaded and do not have an extra 1760 // argument that specifies the vector type. 1761 switch (BuiltinID) { 1762 default: break; 1763 case ARM::BI__builtin_neon_vget_lane_i8: 1764 case ARM::BI__builtin_neon_vget_lane_i16: 1765 case ARM::BI__builtin_neon_vget_lane_i32: 1766 case ARM::BI__builtin_neon_vget_lane_i64: 1767 case ARM::BI__builtin_neon_vget_lane_f32: 1768 case ARM::BI__builtin_neon_vgetq_lane_i8: 1769 case ARM::BI__builtin_neon_vgetq_lane_i16: 1770 case ARM::BI__builtin_neon_vgetq_lane_i32: 1771 case ARM::BI__builtin_neon_vgetq_lane_i64: 1772 case ARM::BI__builtin_neon_vgetq_lane_f32: 1773 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)), 1774 "vget_lane"); 1775 case ARM::BI__builtin_neon_vset_lane_i8: 1776 case ARM::BI__builtin_neon_vset_lane_i16: 1777 case ARM::BI__builtin_neon_vset_lane_i32: 1778 case ARM::BI__builtin_neon_vset_lane_i64: 1779 case ARM::BI__builtin_neon_vset_lane_f32: 1780 case ARM::BI__builtin_neon_vsetq_lane_i8: 1781 case ARM::BI__builtin_neon_vsetq_lane_i16: 1782 case ARM::BI__builtin_neon_vsetq_lane_i32: 1783 case ARM::BI__builtin_neon_vsetq_lane_i64: 1784 case ARM::BI__builtin_neon_vsetq_lane_f32: 1785 Ops.push_back(EmitScalarExpr(E->getArg(2))); 1786 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane"); 1787 } 1788 1789 // Get the last argument, which specifies the vector type. 1790 llvm::APSInt Result; 1791 const Expr *Arg = E->getArg(E->getNumArgs()-1); 1792 if (!Arg->isIntegerConstantExpr(Result, getContext())) 1793 return 0; 1794 1795 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f || 1796 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) { 1797 // Determine the overloaded type of this builtin. 1798 llvm::Type *Ty; 1799 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f) 1800 Ty = FloatTy; 1801 else 1802 Ty = DoubleTy; 1803 1804 // Determine whether this is an unsigned conversion or not. 1805 bool usgn = Result.getZExtValue() == 1; 1806 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr; 1807 1808 // Call the appropriate intrinsic. 1809 Function *F = CGM.getIntrinsic(Int, Ty); 1810 return Builder.CreateCall(F, Ops, "vcvtr"); 1811 } 1812 1813 // Determine the type of this overloaded NEON intrinsic. 1814 NeonTypeFlags Type(Result.getZExtValue()); 1815 bool usgn = Type.isUnsigned(); 1816 bool quad = Type.isQuad(); 1817 bool rightShift = false; 1818 1819 llvm::VectorType *VTy = GetNeonType(this, Type); 1820 llvm::Type *Ty = VTy; 1821 if (!Ty) 1822 return 0; 1823 1824 unsigned Int; 1825 switch (BuiltinID) { 1826 default: return 0; 1827 case ARM::BI__builtin_neon_vbsl_v: 1828 case ARM::BI__builtin_neon_vbslq_v: 1829 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty), 1830 Ops, "vbsl"); 1831 case ARM::BI__builtin_neon_vabd_v: 1832 case ARM::BI__builtin_neon_vabdq_v: 1833 Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds; 1834 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd"); 1835 case ARM::BI__builtin_neon_vabs_v: 1836 case ARM::BI__builtin_neon_vabsq_v: 1837 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, Ty), 1838 Ops, "vabs"); 1839 case ARM::BI__builtin_neon_vaddhn_v: 1840 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, Ty), 1841 Ops, "vaddhn"); 1842 case ARM::BI__builtin_neon_vcale_v: 1843 std::swap(Ops[0], Ops[1]); 1844 case ARM::BI__builtin_neon_vcage_v: { 1845 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged); 1846 return EmitNeonCall(F, Ops, "vcage"); 1847 } 1848 case ARM::BI__builtin_neon_vcaleq_v: 1849 std::swap(Ops[0], Ops[1]); 1850 case ARM::BI__builtin_neon_vcageq_v: { 1851 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq); 1852 return EmitNeonCall(F, Ops, "vcage"); 1853 } 1854 case ARM::BI__builtin_neon_vcalt_v: 1855 std::swap(Ops[0], Ops[1]); 1856 case ARM::BI__builtin_neon_vcagt_v: { 1857 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd); 1858 return EmitNeonCall(F, Ops, "vcagt"); 1859 } 1860 case ARM::BI__builtin_neon_vcaltq_v: 1861 std::swap(Ops[0], Ops[1]); 1862 case ARM::BI__builtin_neon_vcagtq_v: { 1863 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq); 1864 return EmitNeonCall(F, Ops, "vcagt"); 1865 } 1866 case ARM::BI__builtin_neon_vcls_v: 1867 case ARM::BI__builtin_neon_vclsq_v: { 1868 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, Ty); 1869 return EmitNeonCall(F, Ops, "vcls"); 1870 } 1871 case ARM::BI__builtin_neon_vclz_v: 1872 case ARM::BI__builtin_neon_vclzq_v: { 1873 // Generate target-independent intrinsic; also need to add second argument 1874 // for whether or not clz of zero is undefined; on ARM it isn't. 1875 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ty); 1876 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef())); 1877 return EmitNeonCall(F, Ops, "vclz"); 1878 } 1879 case ARM::BI__builtin_neon_vcnt_v: 1880 case ARM::BI__builtin_neon_vcntq_v: { 1881 // generate target-independent intrinsic 1882 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, Ty); 1883 return EmitNeonCall(F, Ops, "vctpop"); 1884 } 1885 case ARM::BI__builtin_neon_vcvt_f16_v: { 1886 assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad && 1887 "unexpected vcvt_f16_v builtin"); 1888 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvtfp2hf); 1889 return EmitNeonCall(F, Ops, "vcvt"); 1890 } 1891 case ARM::BI__builtin_neon_vcvt_f32_f16: { 1892 assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad && 1893 "unexpected vcvt_f32_f16 builtin"); 1894 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvthf2fp); 1895 return EmitNeonCall(F, Ops, "vcvt"); 1896 } 1897 case ARM::BI__builtin_neon_vcvt_f32_v: 1898 case ARM::BI__builtin_neon_vcvtq_f32_v: 1899 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1900 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad)); 1901 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt") 1902 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt"); 1903 case ARM::BI__builtin_neon_vcvt_s32_v: 1904 case ARM::BI__builtin_neon_vcvt_u32_v: 1905 case ARM::BI__builtin_neon_vcvtq_s32_v: 1906 case ARM::BI__builtin_neon_vcvtq_u32_v: { 1907 llvm::Type *FloatTy = 1908 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad)); 1909 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy); 1910 return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt") 1911 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt"); 1912 } 1913 case ARM::BI__builtin_neon_vcvt_n_f32_v: 1914 case ARM::BI__builtin_neon_vcvtq_n_f32_v: { 1915 llvm::Type *FloatTy = 1916 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad)); 1917 llvm::Type *Tys[2] = { FloatTy, Ty }; 1918 Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp 1919 : Intrinsic::arm_neon_vcvtfxs2fp; 1920 Function *F = CGM.getIntrinsic(Int, Tys); 1921 return EmitNeonCall(F, Ops, "vcvt_n"); 1922 } 1923 case ARM::BI__builtin_neon_vcvt_n_s32_v: 1924 case ARM::BI__builtin_neon_vcvt_n_u32_v: 1925 case ARM::BI__builtin_neon_vcvtq_n_s32_v: 1926 case ARM::BI__builtin_neon_vcvtq_n_u32_v: { 1927 llvm::Type *FloatTy = 1928 GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad)); 1929 llvm::Type *Tys[2] = { Ty, FloatTy }; 1930 Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu 1931 : Intrinsic::arm_neon_vcvtfp2fxs; 1932 Function *F = CGM.getIntrinsic(Int, Tys); 1933 return EmitNeonCall(F, Ops, "vcvt_n"); 1934 } 1935 case ARM::BI__builtin_neon_vext_v: 1936 case ARM::BI__builtin_neon_vextq_v: { 1937 int CV = cast<ConstantInt>(Ops[2])->getSExtValue(); 1938 SmallVector<Constant*, 16> Indices; 1939 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) 1940 Indices.push_back(ConstantInt::get(Int32Ty, i+CV)); 1941 1942 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1943 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1944 Value *SV = llvm::ConstantVector::get(Indices); 1945 return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext"); 1946 } 1947 case ARM::BI__builtin_neon_vhadd_v: 1948 case ARM::BI__builtin_neon_vhaddq_v: 1949 Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds; 1950 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhadd"); 1951 case ARM::BI__builtin_neon_vhsub_v: 1952 case ARM::BI__builtin_neon_vhsubq_v: 1953 Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs; 1954 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhsub"); 1955 case ARM::BI__builtin_neon_vld1_v: 1956 case ARM::BI__builtin_neon_vld1q_v: 1957 Ops.push_back(Align); 1958 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty), 1959 Ops, "vld1"); 1960 case ARM::BI__builtin_neon_vld1q_lane_v: 1961 // Handle 64-bit integer elements as a special case. Use shuffles of 1962 // one-element vectors to avoid poor code for i64 in the backend. 1963 if (VTy->getElementType()->isIntegerTy(64)) { 1964 // Extract the other lane. 1965 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1966 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue(); 1967 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane)); 1968 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); 1969 // Load the value as a one-element vector. 1970 Ty = llvm::VectorType::get(VTy->getElementType(), 1); 1971 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty); 1972 Value *Ld = Builder.CreateCall2(F, Ops[0], Align); 1973 // Combine them. 1974 SmallVector<Constant*, 2> Indices; 1975 Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane)); 1976 Indices.push_back(ConstantInt::get(Int32Ty, Lane)); 1977 SV = llvm::ConstantVector::get(Indices); 1978 return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane"); 1979 } 1980 // fall through 1981 case ARM::BI__builtin_neon_vld1_lane_v: { 1982 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 1983 Ty = llvm::PointerType::getUnqual(VTy->getElementType()); 1984 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1985 LoadInst *Ld = Builder.CreateLoad(Ops[0]); 1986 Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue()); 1987 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane"); 1988 } 1989 case ARM::BI__builtin_neon_vld1_dup_v: 1990 case ARM::BI__builtin_neon_vld1q_dup_v: { 1991 Value *V = UndefValue::get(Ty); 1992 Ty = llvm::PointerType::getUnqual(VTy->getElementType()); 1993 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 1994 LoadInst *Ld = Builder.CreateLoad(Ops[0]); 1995 Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue()); 1996 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); 1997 Ops[0] = Builder.CreateInsertElement(V, Ld, CI); 1998 return EmitNeonSplat(Ops[0], CI); 1999 } 2000 case ARM::BI__builtin_neon_vld2_v: 2001 case ARM::BI__builtin_neon_vld2q_v: { 2002 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, Ty); 2003 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2"); 2004 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2005 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2006 return Builder.CreateStore(Ops[1], Ops[0]); 2007 } 2008 case ARM::BI__builtin_neon_vld3_v: 2009 case ARM::BI__builtin_neon_vld3q_v: { 2010 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, Ty); 2011 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3"); 2012 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2013 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2014 return Builder.CreateStore(Ops[1], Ops[0]); 2015 } 2016 case ARM::BI__builtin_neon_vld4_v: 2017 case ARM::BI__builtin_neon_vld4q_v: { 2018 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, Ty); 2019 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4"); 2020 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2021 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2022 return Builder.CreateStore(Ops[1], Ops[0]); 2023 } 2024 case ARM::BI__builtin_neon_vld2_lane_v: 2025 case ARM::BI__builtin_neon_vld2q_lane_v: { 2026 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, Ty); 2027 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 2028 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 2029 Ops.push_back(Align); 2030 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane"); 2031 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2032 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2033 return Builder.CreateStore(Ops[1], Ops[0]); 2034 } 2035 case ARM::BI__builtin_neon_vld3_lane_v: 2036 case ARM::BI__builtin_neon_vld3q_lane_v: { 2037 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, Ty); 2038 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 2039 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 2040 Ops[4] = Builder.CreateBitCast(Ops[4], Ty); 2041 Ops.push_back(Align); 2042 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane"); 2043 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2044 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2045 return Builder.CreateStore(Ops[1], Ops[0]); 2046 } 2047 case ARM::BI__builtin_neon_vld4_lane_v: 2048 case ARM::BI__builtin_neon_vld4q_lane_v: { 2049 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, Ty); 2050 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 2051 Ops[3] = Builder.CreateBitCast(Ops[3], Ty); 2052 Ops[4] = Builder.CreateBitCast(Ops[4], Ty); 2053 Ops[5] = Builder.CreateBitCast(Ops[5], Ty); 2054 Ops.push_back(Align); 2055 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane"); 2056 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2057 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2058 return Builder.CreateStore(Ops[1], Ops[0]); 2059 } 2060 case ARM::BI__builtin_neon_vld2_dup_v: 2061 case ARM::BI__builtin_neon_vld3_dup_v: 2062 case ARM::BI__builtin_neon_vld4_dup_v: { 2063 // Handle 64-bit elements as a special-case. There is no "dup" needed. 2064 if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) { 2065 switch (BuiltinID) { 2066 case ARM::BI__builtin_neon_vld2_dup_v: 2067 Int = Intrinsic::arm_neon_vld2; 2068 break; 2069 case ARM::BI__builtin_neon_vld3_dup_v: 2070 Int = Intrinsic::arm_neon_vld3; 2071 break; 2072 case ARM::BI__builtin_neon_vld4_dup_v: 2073 Int = Intrinsic::arm_neon_vld4; 2074 break; 2075 default: llvm_unreachable("unknown vld_dup intrinsic?"); 2076 } 2077 Function *F = CGM.getIntrinsic(Int, Ty); 2078 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup"); 2079 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2080 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2081 return Builder.CreateStore(Ops[1], Ops[0]); 2082 } 2083 switch (BuiltinID) { 2084 case ARM::BI__builtin_neon_vld2_dup_v: 2085 Int = Intrinsic::arm_neon_vld2lane; 2086 break; 2087 case ARM::BI__builtin_neon_vld3_dup_v: 2088 Int = Intrinsic::arm_neon_vld3lane; 2089 break; 2090 case ARM::BI__builtin_neon_vld4_dup_v: 2091 Int = Intrinsic::arm_neon_vld4lane; 2092 break; 2093 default: llvm_unreachable("unknown vld_dup intrinsic?"); 2094 } 2095 Function *F = CGM.getIntrinsic(Int, Ty); 2096 llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType()); 2097 2098 SmallVector<Value*, 6> Args; 2099 Args.push_back(Ops[1]); 2100 Args.append(STy->getNumElements(), UndefValue::get(Ty)); 2101 2102 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); 2103 Args.push_back(CI); 2104 Args.push_back(Align); 2105 2106 Ops[1] = Builder.CreateCall(F, Args, "vld_dup"); 2107 // splat lane 0 to all elts in each vector of the result. 2108 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2109 Value *Val = Builder.CreateExtractValue(Ops[1], i); 2110 Value *Elt = Builder.CreateBitCast(Val, Ty); 2111 Elt = EmitNeonSplat(Elt, CI); 2112 Elt = Builder.CreateBitCast(Elt, Val->getType()); 2113 Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i); 2114 } 2115 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2116 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2117 return Builder.CreateStore(Ops[1], Ops[0]); 2118 } 2119 case ARM::BI__builtin_neon_vmax_v: 2120 case ARM::BI__builtin_neon_vmaxq_v: 2121 Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs; 2122 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax"); 2123 case ARM::BI__builtin_neon_vmin_v: 2124 case ARM::BI__builtin_neon_vminq_v: 2125 Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins; 2126 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin"); 2127 case ARM::BI__builtin_neon_vmovl_v: { 2128 llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy); 2129 Ops[0] = Builder.CreateBitCast(Ops[0], DTy); 2130 if (usgn) 2131 return Builder.CreateZExt(Ops[0], Ty, "vmovl"); 2132 return Builder.CreateSExt(Ops[0], Ty, "vmovl"); 2133 } 2134 case ARM::BI__builtin_neon_vmovn_v: { 2135 llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy); 2136 Ops[0] = Builder.CreateBitCast(Ops[0], QTy); 2137 return Builder.CreateTrunc(Ops[0], Ty, "vmovn"); 2138 } 2139 case ARM::BI__builtin_neon_vmul_v: 2140 case ARM::BI__builtin_neon_vmulq_v: 2141 assert(Type.isPoly() && "vmul builtin only supported for polynomial types"); 2142 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, Ty), 2143 Ops, "vmul"); 2144 case ARM::BI__builtin_neon_vmull_v: 2145 Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls; 2146 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int; 2147 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull"); 2148 case ARM::BI__builtin_neon_vfma_v: 2149 case ARM::BI__builtin_neon_vfmaq_v: { 2150 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty); 2151 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2152 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 2153 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 2154 2155 // NEON intrinsic puts accumulator first, unlike the LLVM fma. 2156 return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]); 2157 } 2158 case ARM::BI__builtin_neon_vpadal_v: 2159 case ARM::BI__builtin_neon_vpadalq_v: { 2160 Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals; 2161 // The source operand type has twice as many elements of half the size. 2162 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); 2163 llvm::Type *EltTy = 2164 llvm::IntegerType::get(getLLVMContext(), EltBits / 2); 2165 llvm::Type *NarrowTy = 2166 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); 2167 llvm::Type *Tys[2] = { Ty, NarrowTy }; 2168 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpadal"); 2169 } 2170 case ARM::BI__builtin_neon_vpadd_v: 2171 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, Ty), 2172 Ops, "vpadd"); 2173 case ARM::BI__builtin_neon_vpaddl_v: 2174 case ARM::BI__builtin_neon_vpaddlq_v: { 2175 Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls; 2176 // The source operand type has twice as many elements of half the size. 2177 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); 2178 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); 2179 llvm::Type *NarrowTy = 2180 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2); 2181 llvm::Type *Tys[2] = { Ty, NarrowTy }; 2182 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl"); 2183 } 2184 case ARM::BI__builtin_neon_vpmax_v: 2185 Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs; 2186 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax"); 2187 case ARM::BI__builtin_neon_vpmin_v: 2188 Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins; 2189 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin"); 2190 case ARM::BI__builtin_neon_vqabs_v: 2191 case ARM::BI__builtin_neon_vqabsq_v: 2192 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, Ty), 2193 Ops, "vqabs"); 2194 case ARM::BI__builtin_neon_vqadd_v: 2195 case ARM::BI__builtin_neon_vqaddq_v: 2196 Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds; 2197 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqadd"); 2198 case ARM::BI__builtin_neon_vqdmlal_v: 2199 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, Ty), 2200 Ops, "vqdmlal"); 2201 case ARM::BI__builtin_neon_vqdmlsl_v: 2202 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, Ty), 2203 Ops, "vqdmlsl"); 2204 case ARM::BI__builtin_neon_vqdmulh_v: 2205 case ARM::BI__builtin_neon_vqdmulhq_v: 2206 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, Ty), 2207 Ops, "vqdmulh"); 2208 case ARM::BI__builtin_neon_vqdmull_v: 2209 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty), 2210 Ops, "vqdmull"); 2211 case ARM::BI__builtin_neon_vqmovn_v: 2212 Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns; 2213 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqmovn"); 2214 case ARM::BI__builtin_neon_vqmovun_v: 2215 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, Ty), 2216 Ops, "vqdmull"); 2217 case ARM::BI__builtin_neon_vqneg_v: 2218 case ARM::BI__builtin_neon_vqnegq_v: 2219 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, Ty), 2220 Ops, "vqneg"); 2221 case ARM::BI__builtin_neon_vqrdmulh_v: 2222 case ARM::BI__builtin_neon_vqrdmulhq_v: 2223 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, Ty), 2224 Ops, "vqrdmulh"); 2225 case ARM::BI__builtin_neon_vqrshl_v: 2226 case ARM::BI__builtin_neon_vqrshlq_v: 2227 Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts; 2228 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshl"); 2229 case ARM::BI__builtin_neon_vqrshrn_n_v: 2230 Int = 2231 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns; 2232 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n", 2233 1, true); 2234 case ARM::BI__builtin_neon_vqrshrun_n_v: 2235 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty), 2236 Ops, "vqrshrun_n", 1, true); 2237 case ARM::BI__builtin_neon_vqshl_v: 2238 case ARM::BI__builtin_neon_vqshlq_v: 2239 Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts; 2240 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl"); 2241 case ARM::BI__builtin_neon_vqshl_n_v: 2242 case ARM::BI__builtin_neon_vqshlq_n_v: 2243 Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts; 2244 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n", 2245 1, false); 2246 case ARM::BI__builtin_neon_vqshlu_n_v: 2247 case ARM::BI__builtin_neon_vqshluq_n_v: 2248 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, Ty), 2249 Ops, "vqshlu", 1, false); 2250 case ARM::BI__builtin_neon_vqshrn_n_v: 2251 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns; 2252 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n", 2253 1, true); 2254 case ARM::BI__builtin_neon_vqshrun_n_v: 2255 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty), 2256 Ops, "vqshrun_n", 1, true); 2257 case ARM::BI__builtin_neon_vqsub_v: 2258 case ARM::BI__builtin_neon_vqsubq_v: 2259 Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs; 2260 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqsub"); 2261 case ARM::BI__builtin_neon_vraddhn_v: 2262 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, Ty), 2263 Ops, "vraddhn"); 2264 case ARM::BI__builtin_neon_vrecpe_v: 2265 case ARM::BI__builtin_neon_vrecpeq_v: 2266 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty), 2267 Ops, "vrecpe"); 2268 case ARM::BI__builtin_neon_vrecps_v: 2269 case ARM::BI__builtin_neon_vrecpsq_v: 2270 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, Ty), 2271 Ops, "vrecps"); 2272 case ARM::BI__builtin_neon_vrhadd_v: 2273 case ARM::BI__builtin_neon_vrhaddq_v: 2274 Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds; 2275 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrhadd"); 2276 case ARM::BI__builtin_neon_vrshl_v: 2277 case ARM::BI__builtin_neon_vrshlq_v: 2278 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 2279 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshl"); 2280 case ARM::BI__builtin_neon_vrshrn_n_v: 2281 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty), 2282 Ops, "vrshrn_n", 1, true); 2283 case ARM::BI__builtin_neon_vrshr_n_v: 2284 case ARM::BI__builtin_neon_vrshrq_n_v: 2285 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 2286 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true); 2287 case ARM::BI__builtin_neon_vrsqrte_v: 2288 case ARM::BI__builtin_neon_vrsqrteq_v: 2289 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, Ty), 2290 Ops, "vrsqrte"); 2291 case ARM::BI__builtin_neon_vrsqrts_v: 2292 case ARM::BI__builtin_neon_vrsqrtsq_v: 2293 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, Ty), 2294 Ops, "vrsqrts"); 2295 case ARM::BI__builtin_neon_vrsra_n_v: 2296 case ARM::BI__builtin_neon_vrsraq_n_v: 2297 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2298 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 2299 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true); 2300 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts; 2301 Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]); 2302 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n"); 2303 case ARM::BI__builtin_neon_vrsubhn_v: 2304 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, Ty), 2305 Ops, "vrsubhn"); 2306 case ARM::BI__builtin_neon_vshl_v: 2307 case ARM::BI__builtin_neon_vshlq_v: 2308 Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts; 2309 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshl"); 2310 case ARM::BI__builtin_neon_vshll_n_v: 2311 Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls; 2312 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshll", 1); 2313 case ARM::BI__builtin_neon_vshl_n_v: 2314 case ARM::BI__builtin_neon_vshlq_n_v: 2315 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); 2316 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], 2317 "vshl_n"); 2318 case ARM::BI__builtin_neon_vshrn_n_v: 2319 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, Ty), 2320 Ops, "vshrn_n", 1, true); 2321 case ARM::BI__builtin_neon_vshr_n_v: 2322 case ARM::BI__builtin_neon_vshrq_n_v: 2323 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2324 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); 2325 if (usgn) 2326 return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n"); 2327 else 2328 return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n"); 2329 case ARM::BI__builtin_neon_vsri_n_v: 2330 case ARM::BI__builtin_neon_vsriq_n_v: 2331 rightShift = true; 2332 case ARM::BI__builtin_neon_vsli_n_v: 2333 case ARM::BI__builtin_neon_vsliq_n_v: 2334 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift); 2335 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty), 2336 Ops, "vsli_n"); 2337 case ARM::BI__builtin_neon_vsra_n_v: 2338 case ARM::BI__builtin_neon_vsraq_n_v: 2339 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2340 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 2341 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false); 2342 if (usgn) 2343 Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n"); 2344 else 2345 Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n"); 2346 return Builder.CreateAdd(Ops[0], Ops[1]); 2347 case ARM::BI__builtin_neon_vst1_v: 2348 case ARM::BI__builtin_neon_vst1q_v: 2349 Ops.push_back(Align); 2350 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty), 2351 Ops, ""); 2352 case ARM::BI__builtin_neon_vst1q_lane_v: 2353 // Handle 64-bit integer elements as a special case. Use a shuffle to get 2354 // a one-element vector and avoid poor code for i64 in the backend. 2355 if (VTy->getElementType()->isIntegerTy(64)) { 2356 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 2357 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2])); 2358 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); 2359 Ops[2] = Align; 2360 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, 2361 Ops[1]->getType()), Ops); 2362 } 2363 // fall through 2364 case ARM::BI__builtin_neon_vst1_lane_v: { 2365 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 2366 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); 2367 Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); 2368 StoreInst *St = Builder.CreateStore(Ops[1], 2369 Builder.CreateBitCast(Ops[0], Ty)); 2370 St->setAlignment(cast<ConstantInt>(Align)->getZExtValue()); 2371 return St; 2372 } 2373 case ARM::BI__builtin_neon_vst2_v: 2374 case ARM::BI__builtin_neon_vst2q_v: 2375 Ops.push_back(Align); 2376 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, Ty), 2377 Ops, ""); 2378 case ARM::BI__builtin_neon_vst2_lane_v: 2379 case ARM::BI__builtin_neon_vst2q_lane_v: 2380 Ops.push_back(Align); 2381 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, Ty), 2382 Ops, ""); 2383 case ARM::BI__builtin_neon_vst3_v: 2384 case ARM::BI__builtin_neon_vst3q_v: 2385 Ops.push_back(Align); 2386 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, Ty), 2387 Ops, ""); 2388 case ARM::BI__builtin_neon_vst3_lane_v: 2389 case ARM::BI__builtin_neon_vst3q_lane_v: 2390 Ops.push_back(Align); 2391 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, Ty), 2392 Ops, ""); 2393 case ARM::BI__builtin_neon_vst4_v: 2394 case ARM::BI__builtin_neon_vst4q_v: 2395 Ops.push_back(Align); 2396 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, Ty), 2397 Ops, ""); 2398 case ARM::BI__builtin_neon_vst4_lane_v: 2399 case ARM::BI__builtin_neon_vst4q_lane_v: 2400 Ops.push_back(Align); 2401 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty), 2402 Ops, ""); 2403 case ARM::BI__builtin_neon_vsubhn_v: 2404 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, Ty), 2405 Ops, "vsubhn"); 2406 case ARM::BI__builtin_neon_vtbl1_v: 2407 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1), 2408 Ops, "vtbl1"); 2409 case ARM::BI__builtin_neon_vtbl2_v: 2410 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2), 2411 Ops, "vtbl2"); 2412 case ARM::BI__builtin_neon_vtbl3_v: 2413 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3), 2414 Ops, "vtbl3"); 2415 case ARM::BI__builtin_neon_vtbl4_v: 2416 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4), 2417 Ops, "vtbl4"); 2418 case ARM::BI__builtin_neon_vtbx1_v: 2419 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1), 2420 Ops, "vtbx1"); 2421 case ARM::BI__builtin_neon_vtbx2_v: 2422 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2), 2423 Ops, "vtbx2"); 2424 case ARM::BI__builtin_neon_vtbx3_v: 2425 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3), 2426 Ops, "vtbx3"); 2427 case ARM::BI__builtin_neon_vtbx4_v: 2428 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4), 2429 Ops, "vtbx4"); 2430 case ARM::BI__builtin_neon_vtst_v: 2431 case ARM::BI__builtin_neon_vtstq_v: { 2432 Ops[0] = Builder.CreateBitCast(Ops[0], Ty); 2433 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 2434 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]); 2435 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0], 2436 ConstantAggregateZero::get(Ty)); 2437 return Builder.CreateSExt(Ops[0], Ty, "vtst"); 2438 } 2439 case ARM::BI__builtin_neon_vtrn_v: 2440 case ARM::BI__builtin_neon_vtrnq_v: { 2441 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 2442 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 2443 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 2444 Value *SV = 0; 2445 2446 for (unsigned vi = 0; vi != 2; ++vi) { 2447 SmallVector<Constant*, 16> Indices; 2448 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { 2449 Indices.push_back(Builder.getInt32(i+vi)); 2450 Indices.push_back(Builder.getInt32(i+e+vi)); 2451 } 2452 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 2453 SV = llvm::ConstantVector::get(Indices); 2454 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn"); 2455 SV = Builder.CreateStore(SV, Addr); 2456 } 2457 return SV; 2458 } 2459 case ARM::BI__builtin_neon_vuzp_v: 2460 case ARM::BI__builtin_neon_vuzpq_v: { 2461 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 2462 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 2463 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 2464 Value *SV = 0; 2465 2466 for (unsigned vi = 0; vi != 2; ++vi) { 2467 SmallVector<Constant*, 16> Indices; 2468 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) 2469 Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi)); 2470 2471 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 2472 SV = llvm::ConstantVector::get(Indices); 2473 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp"); 2474 SV = Builder.CreateStore(SV, Addr); 2475 } 2476 return SV; 2477 } 2478 case ARM::BI__builtin_neon_vzip_v: 2479 case ARM::BI__builtin_neon_vzipq_v: { 2480 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); 2481 Ops[1] = Builder.CreateBitCast(Ops[1], Ty); 2482 Ops[2] = Builder.CreateBitCast(Ops[2], Ty); 2483 Value *SV = 0; 2484 2485 for (unsigned vi = 0; vi != 2; ++vi) { 2486 SmallVector<Constant*, 16> Indices; 2487 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { 2488 Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1)); 2489 Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e)); 2490 } 2491 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); 2492 SV = llvm::ConstantVector::get(Indices); 2493 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip"); 2494 SV = Builder.CreateStore(SV, Addr); 2495 } 2496 return SV; 2497 } 2498 } 2499} 2500 2501llvm::Value *CodeGenFunction:: 2502BuildVector(ArrayRef<llvm::Value*> Ops) { 2503 assert((Ops.size() & (Ops.size() - 1)) == 0 && 2504 "Not a power-of-two sized vector!"); 2505 bool AllConstants = true; 2506 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i) 2507 AllConstants &= isa<Constant>(Ops[i]); 2508 2509 // If this is a constant vector, create a ConstantVector. 2510 if (AllConstants) { 2511 SmallVector<llvm::Constant*, 16> CstOps; 2512 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2513 CstOps.push_back(cast<Constant>(Ops[i])); 2514 return llvm::ConstantVector::get(CstOps); 2515 } 2516 2517 // Otherwise, insertelement the values to build the vector. 2518 Value *Result = 2519 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size())); 2520 2521 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 2522 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i)); 2523 2524 return Result; 2525} 2526 2527Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, 2528 const CallExpr *E) { 2529 SmallVector<Value*, 4> Ops; 2530 2531 // Find out if any arguments are required to be integer constant expressions. 2532 unsigned ICEArguments = 0; 2533 ASTContext::GetBuiltinTypeError Error; 2534 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); 2535 assert(Error == ASTContext::GE_None && "Should not codegen an error"); 2536 2537 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { 2538 // If this is a normal argument, just emit it as a scalar. 2539 if ((ICEArguments & (1 << i)) == 0) { 2540 Ops.push_back(EmitScalarExpr(E->getArg(i))); 2541 continue; 2542 } 2543 2544 // If this is required to be a constant, constant fold it so that we know 2545 // that the generated intrinsic gets a ConstantInt. 2546 llvm::APSInt Result; 2547 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext()); 2548 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst; 2549 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result)); 2550 } 2551 2552 switch (BuiltinID) { 2553 default: return 0; 2554 case X86::BI__builtin_ia32_vec_init_v8qi: 2555 case X86::BI__builtin_ia32_vec_init_v4hi: 2556 case X86::BI__builtin_ia32_vec_init_v2si: 2557 return Builder.CreateBitCast(BuildVector(Ops), 2558 llvm::Type::getX86_MMXTy(getLLVMContext())); 2559 case X86::BI__builtin_ia32_vec_ext_v2si: 2560 return Builder.CreateExtractElement(Ops[0], 2561 llvm::ConstantInt::get(Ops[1]->getType(), 0)); 2562 case X86::BI__builtin_ia32_ldmxcsr: { 2563 llvm::Type *PtrTy = Int8PtrTy; 2564 Value *One = llvm::ConstantInt::get(Int32Ty, 1); 2565 Value *Tmp = Builder.CreateAlloca(Int32Ty, One); 2566 Builder.CreateStore(Ops[0], Tmp); 2567 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), 2568 Builder.CreateBitCast(Tmp, PtrTy)); 2569 } 2570 case X86::BI__builtin_ia32_stmxcsr: { 2571 llvm::Type *PtrTy = Int8PtrTy; 2572 Value *One = llvm::ConstantInt::get(Int32Ty, 1); 2573 Value *Tmp = Builder.CreateAlloca(Int32Ty, One); 2574 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), 2575 Builder.CreateBitCast(Tmp, PtrTy)); 2576 return Builder.CreateLoad(Tmp, "stmxcsr"); 2577 } 2578 case X86::BI__builtin_ia32_storehps: 2579 case X86::BI__builtin_ia32_storelps: { 2580 llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty); 2581 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); 2582 2583 // cast val v2i64 2584 Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast"); 2585 2586 // extract (0, 1) 2587 unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1; 2588 llvm::Value *Idx = llvm::ConstantInt::get(Int32Ty, Index); 2589 Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract"); 2590 2591 // cast pointer to i64 & store 2592 Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy); 2593 return Builder.CreateStore(Ops[1], Ops[0]); 2594 } 2595 case X86::BI__builtin_ia32_palignr: { 2596 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); 2597 2598 // If palignr is shifting the pair of input vectors less than 9 bytes, 2599 // emit a shuffle instruction. 2600 if (shiftVal <= 8) { 2601 SmallVector<llvm::Constant*, 8> Indices; 2602 for (unsigned i = 0; i != 8; ++i) 2603 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); 2604 2605 Value* SV = llvm::ConstantVector::get(Indices); 2606 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); 2607 } 2608 2609 // If palignr is shifting the pair of input vectors more than 8 but less 2610 // than 16 bytes, emit a logical right shift of the destination. 2611 if (shiftVal < 16) { 2612 // MMX has these as 1 x i64 vectors for some odd optimization reasons. 2613 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1); 2614 2615 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); 2616 Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8); 2617 2618 // create i32 constant 2619 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q); 2620 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr"); 2621 } 2622 2623 // If palignr is shifting the pair of vectors more than 16 bytes, emit zero. 2624 return llvm::Constant::getNullValue(ConvertType(E->getType())); 2625 } 2626 case X86::BI__builtin_ia32_palignr128: { 2627 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); 2628 2629 // If palignr is shifting the pair of input vectors less than 17 bytes, 2630 // emit a shuffle instruction. 2631 if (shiftVal <= 16) { 2632 SmallVector<llvm::Constant*, 16> Indices; 2633 for (unsigned i = 0; i != 16; ++i) 2634 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); 2635 2636 Value* SV = llvm::ConstantVector::get(Indices); 2637 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); 2638 } 2639 2640 // If palignr is shifting the pair of input vectors more than 16 but less 2641 // than 32 bytes, emit a logical right shift of the destination. 2642 if (shiftVal < 32) { 2643 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); 2644 2645 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); 2646 Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8); 2647 2648 // create i32 constant 2649 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq); 2650 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr"); 2651 } 2652 2653 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. 2654 return llvm::Constant::getNullValue(ConvertType(E->getType())); 2655 } 2656 case X86::BI__builtin_ia32_palignr256: { 2657 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue(); 2658 2659 // If palignr is shifting the pair of input vectors less than 17 bytes, 2660 // emit a shuffle instruction. 2661 if (shiftVal <= 16) { 2662 SmallVector<llvm::Constant*, 32> Indices; 2663 // 256-bit palignr operates on 128-bit lanes so we need to handle that 2664 for (unsigned l = 0; l != 2; ++l) { 2665 unsigned LaneStart = l * 16; 2666 unsigned LaneEnd = (l+1) * 16; 2667 for (unsigned i = 0; i != 16; ++i) { 2668 unsigned Idx = shiftVal + i + LaneStart; 2669 if (Idx >= LaneEnd) Idx += 16; // end of lane, switch operand 2670 Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx)); 2671 } 2672 } 2673 2674 Value* SV = llvm::ConstantVector::get(Indices); 2675 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); 2676 } 2677 2678 // If palignr is shifting the pair of input vectors more than 16 but less 2679 // than 32 bytes, emit a logical right shift of the destination. 2680 if (shiftVal < 32) { 2681 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 4); 2682 2683 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); 2684 Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8); 2685 2686 // create i32 constant 2687 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_avx2_psrl_dq); 2688 return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr"); 2689 } 2690 2691 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. 2692 return llvm::Constant::getNullValue(ConvertType(E->getType())); 2693 } 2694 case X86::BI__builtin_ia32_movntps: 2695 case X86::BI__builtin_ia32_movntps256: 2696 case X86::BI__builtin_ia32_movntpd: 2697 case X86::BI__builtin_ia32_movntpd256: 2698 case X86::BI__builtin_ia32_movntdq: 2699 case X86::BI__builtin_ia32_movntdq256: 2700 case X86::BI__builtin_ia32_movnti: { 2701 llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(), 2702 Builder.getInt32(1)); 2703 2704 // Convert the type of the pointer to a pointer to the stored type. 2705 Value *BC = Builder.CreateBitCast(Ops[0], 2706 llvm::PointerType::getUnqual(Ops[1]->getType()), 2707 "cast"); 2708 StoreInst *SI = Builder.CreateStore(Ops[1], BC); 2709 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); 2710 SI->setAlignment(16); 2711 return SI; 2712 } 2713 // 3DNow! 2714 case X86::BI__builtin_ia32_pswapdsf: 2715 case X86::BI__builtin_ia32_pswapdsi: { 2716 const char *name = 0; 2717 Intrinsic::ID ID = Intrinsic::not_intrinsic; 2718 switch(BuiltinID) { 2719 default: llvm_unreachable("Unsupported intrinsic!"); 2720 case X86::BI__builtin_ia32_pswapdsf: 2721 case X86::BI__builtin_ia32_pswapdsi: 2722 name = "pswapd"; 2723 ID = Intrinsic::x86_3dnowa_pswapd; 2724 break; 2725 } 2726 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext()); 2727 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast"); 2728 llvm::Function *F = CGM.getIntrinsic(ID); 2729 return Builder.CreateCall(F, Ops, name); 2730 } 2731 case X86::BI__builtin_ia32_rdrand16_step: 2732 case X86::BI__builtin_ia32_rdrand32_step: 2733 case X86::BI__builtin_ia32_rdrand64_step: 2734 case X86::BI__builtin_ia32_rdseed16_step: 2735 case X86::BI__builtin_ia32_rdseed32_step: 2736 case X86::BI__builtin_ia32_rdseed64_step: { 2737 Intrinsic::ID ID; 2738 switch (BuiltinID) { 2739 default: llvm_unreachable("Unsupported intrinsic!"); 2740 case X86::BI__builtin_ia32_rdrand16_step: 2741 ID = Intrinsic::x86_rdrand_16; 2742 break; 2743 case X86::BI__builtin_ia32_rdrand32_step: 2744 ID = Intrinsic::x86_rdrand_32; 2745 break; 2746 case X86::BI__builtin_ia32_rdrand64_step: 2747 ID = Intrinsic::x86_rdrand_64; 2748 break; 2749 case X86::BI__builtin_ia32_rdseed16_step: 2750 ID = Intrinsic::x86_rdseed_16; 2751 break; 2752 case X86::BI__builtin_ia32_rdseed32_step: 2753 ID = Intrinsic::x86_rdseed_32; 2754 break; 2755 case X86::BI__builtin_ia32_rdseed64_step: 2756 ID = Intrinsic::x86_rdseed_64; 2757 break; 2758 } 2759 2760 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID)); 2761 Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]); 2762 return Builder.CreateExtractValue(Call, 1); 2763 } 2764 } 2765} 2766 2767 2768Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, 2769 const CallExpr *E) { 2770 SmallVector<Value*, 4> Ops; 2771 2772 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) 2773 Ops.push_back(EmitScalarExpr(E->getArg(i))); 2774 2775 Intrinsic::ID ID = Intrinsic::not_intrinsic; 2776 2777 switch (BuiltinID) { 2778 default: return 0; 2779 2780 // vec_ld, vec_lvsl, vec_lvsr 2781 case PPC::BI__builtin_altivec_lvx: 2782 case PPC::BI__builtin_altivec_lvxl: 2783 case PPC::BI__builtin_altivec_lvebx: 2784 case PPC::BI__builtin_altivec_lvehx: 2785 case PPC::BI__builtin_altivec_lvewx: 2786 case PPC::BI__builtin_altivec_lvsl: 2787 case PPC::BI__builtin_altivec_lvsr: 2788 { 2789 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); 2790 2791 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]); 2792 Ops.pop_back(); 2793 2794 switch (BuiltinID) { 2795 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!"); 2796 case PPC::BI__builtin_altivec_lvx: 2797 ID = Intrinsic::ppc_altivec_lvx; 2798 break; 2799 case PPC::BI__builtin_altivec_lvxl: 2800 ID = Intrinsic::ppc_altivec_lvxl; 2801 break; 2802 case PPC::BI__builtin_altivec_lvebx: 2803 ID = Intrinsic::ppc_altivec_lvebx; 2804 break; 2805 case PPC::BI__builtin_altivec_lvehx: 2806 ID = Intrinsic::ppc_altivec_lvehx; 2807 break; 2808 case PPC::BI__builtin_altivec_lvewx: 2809 ID = Intrinsic::ppc_altivec_lvewx; 2810 break; 2811 case PPC::BI__builtin_altivec_lvsl: 2812 ID = Intrinsic::ppc_altivec_lvsl; 2813 break; 2814 case PPC::BI__builtin_altivec_lvsr: 2815 ID = Intrinsic::ppc_altivec_lvsr; 2816 break; 2817 } 2818 llvm::Function *F = CGM.getIntrinsic(ID); 2819 return Builder.CreateCall(F, Ops, ""); 2820 } 2821 2822 // vec_st 2823 case PPC::BI__builtin_altivec_stvx: 2824 case PPC::BI__builtin_altivec_stvxl: 2825 case PPC::BI__builtin_altivec_stvebx: 2826 case PPC::BI__builtin_altivec_stvehx: 2827 case PPC::BI__builtin_altivec_stvewx: 2828 { 2829 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); 2830 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]); 2831 Ops.pop_back(); 2832 2833 switch (BuiltinID) { 2834 default: llvm_unreachable("Unsupported st intrinsic!"); 2835 case PPC::BI__builtin_altivec_stvx: 2836 ID = Intrinsic::ppc_altivec_stvx; 2837 break; 2838 case PPC::BI__builtin_altivec_stvxl: 2839 ID = Intrinsic::ppc_altivec_stvxl; 2840 break; 2841 case PPC::BI__builtin_altivec_stvebx: 2842 ID = Intrinsic::ppc_altivec_stvebx; 2843 break; 2844 case PPC::BI__builtin_altivec_stvehx: 2845 ID = Intrinsic::ppc_altivec_stvehx; 2846 break; 2847 case PPC::BI__builtin_altivec_stvewx: 2848 ID = Intrinsic::ppc_altivec_stvewx; 2849 break; 2850 } 2851 llvm::Function *F = CGM.getIntrinsic(ID); 2852 return Builder.CreateCall(F, Ops, ""); 2853 } 2854 } 2855} 2856