1//===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation -----===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that AArch64 uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "aarch64-isel" 16#include "AArch64.h" 17#include "AArch64ISelLowering.h" 18#include "AArch64MachineFunctionInfo.h" 19#include "AArch64TargetMachine.h" 20#include "AArch64TargetObjectFile.h" 21#include "Utils/AArch64BaseInfo.h" 22#include "llvm/CodeGen/Analysis.h" 23#include "llvm/CodeGen/CallingConvLower.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstrBuilder.h" 26#include "llvm/CodeGen/MachineRegisterInfo.h" 27#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 28#include "llvm/IR/CallingConv.h" 29 30using namespace llvm; 31 32static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) { 33 const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>(); 34 assert (Subtarget->isTargetELF() && "unknown subtarget type"); 35 return new AArch64ElfTargetObjectFile(); 36} 37 38AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM) 39 : TargetLowering(TM, createTLOF(TM)), Itins(TM.getInstrItineraryData()) { 40 41 const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>(); 42 43 // SIMD compares set the entire lane's bits to 1 44 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 45 46 // Scalar register <-> type mapping 47 addRegisterClass(MVT::i32, &AArch64::GPR32RegClass); 48 addRegisterClass(MVT::i64, &AArch64::GPR64RegClass); 49 50 if (Subtarget->hasFPARMv8()) { 51 addRegisterClass(MVT::f16, &AArch64::FPR16RegClass); 52 addRegisterClass(MVT::f32, &AArch64::FPR32RegClass); 53 addRegisterClass(MVT::f64, &AArch64::FPR64RegClass); 54 addRegisterClass(MVT::f128, &AArch64::FPR128RegClass); 55 } 56 57 if (Subtarget->hasNEON()) { 58 // And the vectors 59 addRegisterClass(MVT::v1i8, &AArch64::FPR8RegClass); 60 addRegisterClass(MVT::v1i16, &AArch64::FPR16RegClass); 61 addRegisterClass(MVT::v1i32, &AArch64::FPR32RegClass); 62 addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass); 63 addRegisterClass(MVT::v1f32, &AArch64::FPR32RegClass); 64 addRegisterClass(MVT::v1f64, &AArch64::FPR64RegClass); 65 addRegisterClass(MVT::v8i8, &AArch64::FPR64RegClass); 66 addRegisterClass(MVT::v4i16, &AArch64::FPR64RegClass); 67 addRegisterClass(MVT::v2i32, &AArch64::FPR64RegClass); 68 addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass); 69 addRegisterClass(MVT::v2f32, &AArch64::FPR64RegClass); 70 addRegisterClass(MVT::v16i8, &AArch64::FPR128RegClass); 71 addRegisterClass(MVT::v8i16, &AArch64::FPR128RegClass); 72 addRegisterClass(MVT::v4i32, &AArch64::FPR128RegClass); 73 addRegisterClass(MVT::v2i64, &AArch64::FPR128RegClass); 74 addRegisterClass(MVT::v4f32, &AArch64::FPR128RegClass); 75 addRegisterClass(MVT::v2f64, &AArch64::FPR128RegClass); 76 } 77 78 computeRegisterProperties(); 79 80 // We combine OR nodes for bitfield and NEON BSL operations. 81 setTargetDAGCombine(ISD::OR); 82 83 setTargetDAGCombine(ISD::AND); 84 setTargetDAGCombine(ISD::SRA); 85 setTargetDAGCombine(ISD::SRL); 86 setTargetDAGCombine(ISD::SHL); 87 88 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 89 setTargetDAGCombine(ISD::INTRINSIC_VOID); 90 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 91 92 // AArch64 does not have i1 loads, or much of anything for i1 really. 93 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 94 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); 95 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); 96 97 setStackPointerRegisterToSaveRestore(AArch64::XSP); 98 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 99 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 100 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 101 102 // We'll lower globals to wrappers for selection. 103 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 104 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 105 106 // A64 instructions have the comparison predicate attached to the user of the 107 // result, but having a separate comparison is valuable for matching. 108 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 109 setOperationAction(ISD::BR_CC, MVT::i64, Custom); 110 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 111 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 112 113 setOperationAction(ISD::SELECT, MVT::i32, Custom); 114 setOperationAction(ISD::SELECT, MVT::i64, Custom); 115 setOperationAction(ISD::SELECT, MVT::f32, Custom); 116 setOperationAction(ISD::SELECT, MVT::f64, Custom); 117 118 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 119 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); 120 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 121 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 122 123 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 124 125 setOperationAction(ISD::SETCC, MVT::i32, Custom); 126 setOperationAction(ISD::SETCC, MVT::i64, Custom); 127 setOperationAction(ISD::SETCC, MVT::f32, Custom); 128 setOperationAction(ISD::SETCC, MVT::f64, Custom); 129 130 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 131 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 132 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 133 134 setOperationAction(ISD::VASTART, MVT::Other, Custom); 135 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 136 setOperationAction(ISD::VAEND, MVT::Other, Expand); 137 setOperationAction(ISD::VAARG, MVT::Other, Expand); 138 139 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 140 141 setOperationAction(ISD::ROTL, MVT::i32, Expand); 142 setOperationAction(ISD::ROTL, MVT::i64, Expand); 143 144 setOperationAction(ISD::UREM, MVT::i32, Expand); 145 setOperationAction(ISD::UREM, MVT::i64, Expand); 146 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 147 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 148 149 setOperationAction(ISD::SREM, MVT::i32, Expand); 150 setOperationAction(ISD::SREM, MVT::i64, Expand); 151 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 152 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 153 154 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 155 setOperationAction(ISD::CTPOP, MVT::i64, Expand); 156 157 // Legal floating-point operations. 158 setOperationAction(ISD::FABS, MVT::f32, Legal); 159 setOperationAction(ISD::FABS, MVT::f64, Legal); 160 161 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 162 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 163 164 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 165 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 166 167 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 168 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 169 170 setOperationAction(ISD::FNEG, MVT::f32, Legal); 171 setOperationAction(ISD::FNEG, MVT::f64, Legal); 172 173 setOperationAction(ISD::FRINT, MVT::f32, Legal); 174 setOperationAction(ISD::FRINT, MVT::f64, Legal); 175 176 setOperationAction(ISD::FSQRT, MVT::f32, Legal); 177 setOperationAction(ISD::FSQRT, MVT::f64, Legal); 178 179 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 180 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 181 182 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); 183 setOperationAction(ISD::ConstantFP, MVT::f64, Legal); 184 setOperationAction(ISD::ConstantFP, MVT::f128, Legal); 185 186 // Illegal floating-point operations. 187 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 188 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 189 190 setOperationAction(ISD::FCOS, MVT::f32, Expand); 191 setOperationAction(ISD::FCOS, MVT::f64, Expand); 192 193 setOperationAction(ISD::FEXP, MVT::f32, Expand); 194 setOperationAction(ISD::FEXP, MVT::f64, Expand); 195 196 setOperationAction(ISD::FEXP2, MVT::f32, Expand); 197 setOperationAction(ISD::FEXP2, MVT::f64, Expand); 198 199 setOperationAction(ISD::FLOG, MVT::f32, Expand); 200 setOperationAction(ISD::FLOG, MVT::f64, Expand); 201 202 setOperationAction(ISD::FLOG2, MVT::f32, Expand); 203 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 204 205 setOperationAction(ISD::FLOG10, MVT::f32, Expand); 206 setOperationAction(ISD::FLOG10, MVT::f64, Expand); 207 208 setOperationAction(ISD::FPOW, MVT::f32, Expand); 209 setOperationAction(ISD::FPOW, MVT::f64, Expand); 210 211 setOperationAction(ISD::FPOWI, MVT::f32, Expand); 212 setOperationAction(ISD::FPOWI, MVT::f64, Expand); 213 214 setOperationAction(ISD::FREM, MVT::f32, Expand); 215 setOperationAction(ISD::FREM, MVT::f64, Expand); 216 217 setOperationAction(ISD::FSIN, MVT::f32, Expand); 218 setOperationAction(ISD::FSIN, MVT::f64, Expand); 219 220 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 221 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 222 223 // Virtually no operation on f128 is legal, but LLVM can't expand them when 224 // there's a valid register class, so we need custom operations in most cases. 225 setOperationAction(ISD::FABS, MVT::f128, Expand); 226 setOperationAction(ISD::FADD, MVT::f128, Custom); 227 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); 228 setOperationAction(ISD::FCOS, MVT::f128, Expand); 229 setOperationAction(ISD::FDIV, MVT::f128, Custom); 230 setOperationAction(ISD::FMA, MVT::f128, Expand); 231 setOperationAction(ISD::FMUL, MVT::f128, Custom); 232 setOperationAction(ISD::FNEG, MVT::f128, Expand); 233 setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand); 234 setOperationAction(ISD::FP_ROUND, MVT::f128, Expand); 235 setOperationAction(ISD::FPOW, MVT::f128, Expand); 236 setOperationAction(ISD::FREM, MVT::f128, Expand); 237 setOperationAction(ISD::FRINT, MVT::f128, Expand); 238 setOperationAction(ISD::FSIN, MVT::f128, Expand); 239 setOperationAction(ISD::FSINCOS, MVT::f128, Expand); 240 setOperationAction(ISD::FSQRT, MVT::f128, Expand); 241 setOperationAction(ISD::FSUB, MVT::f128, Custom); 242 setOperationAction(ISD::FTRUNC, MVT::f128, Expand); 243 setOperationAction(ISD::SETCC, MVT::f128, Custom); 244 setOperationAction(ISD::BR_CC, MVT::f128, Custom); 245 setOperationAction(ISD::SELECT, MVT::f128, Expand); 246 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); 247 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); 248 249 // Lowering for many of the conversions is actually specified by the non-f128 250 // type. The LowerXXX function will be trivial when f128 isn't involved. 251 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 252 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 253 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom); 254 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 255 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 256 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom); 257 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 258 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 259 setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom); 260 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 261 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 262 setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom); 263 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); 264 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); 265 266 // This prevents LLVM trying to compress double constants into a floating 267 // constant-pool entry and trying to load from there. It's of doubtful benefit 268 // for A64: we'd need LDR followed by FCVT, I believe. 269 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand); 270 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 271 setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand); 272 273 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 274 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 275 setTruncStoreAction(MVT::f128, MVT::f16, Expand); 276 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 277 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 278 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 279 280 setExceptionPointerRegister(AArch64::X0); 281 setExceptionSelectorRegister(AArch64::X1); 282 283 if (Subtarget->hasNEON()) { 284 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i8, Custom); 285 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom); 286 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 287 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i16, Custom); 288 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom); 289 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 290 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i32, Custom); 291 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom); 292 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 293 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom); 294 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 295 setOperationAction(ISD::BUILD_VECTOR, MVT::v1f32, Custom); 296 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom); 297 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 298 setOperationAction(ISD::BUILD_VECTOR, MVT::v1f64, Custom); 299 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 300 301 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom); 302 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 303 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom); 304 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom); 305 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom); 306 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i32, Custom); 307 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom); 308 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom); 309 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Custom); 310 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); 311 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1f64, Custom); 312 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom); 313 314 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Legal); 315 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal); 316 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal); 317 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal); 318 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal); 319 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal); 320 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal); 321 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Legal); 322 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Legal); 323 324 setOperationAction(ISD::SETCC, MVT::v8i8, Custom); 325 setOperationAction(ISD::SETCC, MVT::v16i8, Custom); 326 setOperationAction(ISD::SETCC, MVT::v4i16, Custom); 327 setOperationAction(ISD::SETCC, MVT::v8i16, Custom); 328 setOperationAction(ISD::SETCC, MVT::v2i32, Custom); 329 setOperationAction(ISD::SETCC, MVT::v4i32, Custom); 330 setOperationAction(ISD::SETCC, MVT::v1i64, Custom); 331 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 332 setOperationAction(ISD::SETCC, MVT::v1f32, Custom); 333 setOperationAction(ISD::SETCC, MVT::v2f32, Custom); 334 setOperationAction(ISD::SETCC, MVT::v4f32, Custom); 335 setOperationAction(ISD::SETCC, MVT::v1f64, Custom); 336 setOperationAction(ISD::SETCC, MVT::v2f64, Custom); 337 338 setOperationAction(ISD::FFLOOR, MVT::v2f32, Legal); 339 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 340 setOperationAction(ISD::FFLOOR, MVT::v1f64, Legal); 341 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 342 343 setOperationAction(ISD::FCEIL, MVT::v2f32, Legal); 344 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 345 setOperationAction(ISD::FCEIL, MVT::v1f64, Legal); 346 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 347 348 setOperationAction(ISD::FTRUNC, MVT::v2f32, Legal); 349 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 350 setOperationAction(ISD::FTRUNC, MVT::v1f64, Legal); 351 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 352 353 setOperationAction(ISD::FRINT, MVT::v2f32, Legal); 354 setOperationAction(ISD::FRINT, MVT::v4f32, Legal); 355 setOperationAction(ISD::FRINT, MVT::v1f64, Legal); 356 setOperationAction(ISD::FRINT, MVT::v2f64, Legal); 357 358 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Legal); 359 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 360 setOperationAction(ISD::FNEARBYINT, MVT::v1f64, Legal); 361 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 362 363 setOperationAction(ISD::FROUND, MVT::v2f32, Legal); 364 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 365 setOperationAction(ISD::FROUND, MVT::v1f64, Legal); 366 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 367 } 368} 369 370EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const { 371 // It's reasonably important that this value matches the "natural" legal 372 // promotion from i1 for scalar types. Otherwise LegalizeTypes can get itself 373 // in a twist (e.g. inserting an any_extend which then becomes i64 -> i64). 374 if (!VT.isVector()) return MVT::i32; 375 return VT.changeVectorElementTypeToInteger(); 376} 377 378static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord, 379 unsigned &LdrOpc, 380 unsigned &StrOpc) { 381 static const unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword, 382 AArch64::LDXR_word, AArch64::LDXR_dword}; 383 static const unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword, 384 AArch64::LDAXR_word, AArch64::LDAXR_dword}; 385 static const unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword, 386 AArch64::STXR_word, AArch64::STXR_dword}; 387 static const unsigned StoreRels[] = {AArch64::STLXR_byte,AArch64::STLXR_hword, 388 AArch64::STLXR_word, AArch64::STLXR_dword}; 389 390 const unsigned *LoadOps, *StoreOps; 391 if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent) 392 LoadOps = LoadAcqs; 393 else 394 LoadOps = LoadBares; 395 396 if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent) 397 StoreOps = StoreRels; 398 else 399 StoreOps = StoreBares; 400 401 assert(isPowerOf2_32(Size) && Size <= 8 && 402 "unsupported size for atomic binary op!"); 403 404 LdrOpc = LoadOps[Log2_32(Size)]; 405 StrOpc = StoreOps[Log2_32(Size)]; 406} 407 408// FIXME: AArch64::DTripleRegClass and AArch64::QTripleRegClass don't really 409// have value type mapped, and they are both being defined as MVT::untyped. 410// Without knowing the MVT type, MachineLICM::getRegisterClassIDAndCost 411// would fail to figure out the register pressure correctly. 412std::pair<const TargetRegisterClass*, uint8_t> 413AArch64TargetLowering::findRepresentativeClass(MVT VT) const{ 414 const TargetRegisterClass *RRC = 0; 415 uint8_t Cost = 1; 416 switch (VT.SimpleTy) { 417 default: 418 return TargetLowering::findRepresentativeClass(VT); 419 case MVT::v4i64: 420 RRC = &AArch64::QPairRegClass; 421 Cost = 2; 422 break; 423 case MVT::v8i64: 424 RRC = &AArch64::QQuadRegClass; 425 Cost = 4; 426 break; 427 } 428 return std::make_pair(RRC, Cost); 429} 430 431MachineBasicBlock * 432AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 433 unsigned Size, 434 unsigned BinOpcode) const { 435 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 436 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 437 438 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 439 MachineFunction *MF = BB->getParent(); 440 MachineFunction::iterator It = BB; 441 ++It; 442 443 unsigned dest = MI->getOperand(0).getReg(); 444 unsigned ptr = MI->getOperand(1).getReg(); 445 unsigned incr = MI->getOperand(2).getReg(); 446 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm()); 447 DebugLoc dl = MI->getDebugLoc(); 448 449 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 450 451 unsigned ldrOpc, strOpc; 452 getExclusiveOperation(Size, Ord, ldrOpc, strOpc); 453 454 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 455 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 456 MF->insert(It, loopMBB); 457 MF->insert(It, exitMBB); 458 459 // Transfer the remainder of BB and its successor edges to exitMBB. 460 exitMBB->splice(exitMBB->begin(), BB, 461 llvm::next(MachineBasicBlock::iterator(MI)), 462 BB->end()); 463 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 464 465 const TargetRegisterClass *TRC 466 = Size == 8 ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass; 467 unsigned scratch = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); 468 469 // thisMBB: 470 // ... 471 // fallthrough --> loopMBB 472 BB->addSuccessor(loopMBB); 473 474 // loopMBB: 475 // ldxr dest, ptr 476 // <binop> scratch, dest, incr 477 // stxr stxr_status, scratch, ptr 478 // cbnz stxr_status, loopMBB 479 // fallthrough --> exitMBB 480 BB = loopMBB; 481 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 482 if (BinOpcode) { 483 // All arithmetic operations we'll be creating are designed to take an extra 484 // shift or extend operand, which we can conveniently set to zero. 485 486 // Operand order needs to go the other way for NAND. 487 if (BinOpcode == AArch64::BICwww_lsl || BinOpcode == AArch64::BICxxx_lsl) 488 BuildMI(BB, dl, TII->get(BinOpcode), scratch) 489 .addReg(incr).addReg(dest).addImm(0); 490 else 491 BuildMI(BB, dl, TII->get(BinOpcode), scratch) 492 .addReg(dest).addReg(incr).addImm(0); 493 } 494 495 // From the stxr, the register is GPR32; from the cmp it's GPR32wsp 496 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass); 497 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass); 498 499 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(scratch).addReg(ptr); 500 BuildMI(BB, dl, TII->get(AArch64::CBNZw)) 501 .addReg(stxr_status).addMBB(loopMBB); 502 503 BB->addSuccessor(loopMBB); 504 BB->addSuccessor(exitMBB); 505 506 // exitMBB: 507 // ... 508 BB = exitMBB; 509 510 MI->eraseFromParent(); // The instruction is gone now. 511 512 return BB; 513} 514 515MachineBasicBlock * 516AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI, 517 MachineBasicBlock *BB, 518 unsigned Size, 519 unsigned CmpOp, 520 A64CC::CondCodes Cond) const { 521 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 522 523 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 524 MachineFunction *MF = BB->getParent(); 525 MachineFunction::iterator It = BB; 526 ++It; 527 528 unsigned dest = MI->getOperand(0).getReg(); 529 unsigned ptr = MI->getOperand(1).getReg(); 530 unsigned incr = MI->getOperand(2).getReg(); 531 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm()); 532 533 unsigned oldval = dest; 534 DebugLoc dl = MI->getDebugLoc(); 535 536 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 537 const TargetRegisterClass *TRC, *TRCsp; 538 if (Size == 8) { 539 TRC = &AArch64::GPR64RegClass; 540 TRCsp = &AArch64::GPR64xspRegClass; 541 } else { 542 TRC = &AArch64::GPR32RegClass; 543 TRCsp = &AArch64::GPR32wspRegClass; 544 } 545 546 unsigned ldrOpc, strOpc; 547 getExclusiveOperation(Size, Ord, ldrOpc, strOpc); 548 549 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 550 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 551 MF->insert(It, loopMBB); 552 MF->insert(It, exitMBB); 553 554 // Transfer the remainder of BB and its successor edges to exitMBB. 555 exitMBB->splice(exitMBB->begin(), BB, 556 llvm::next(MachineBasicBlock::iterator(MI)), 557 BB->end()); 558 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 559 560 unsigned scratch = MRI.createVirtualRegister(TRC); 561 MRI.constrainRegClass(scratch, TRCsp); 562 563 // thisMBB: 564 // ... 565 // fallthrough --> loopMBB 566 BB->addSuccessor(loopMBB); 567 568 // loopMBB: 569 // ldxr dest, ptr 570 // cmp incr, dest (, sign extend if necessary) 571 // csel scratch, dest, incr, cond 572 // stxr stxr_status, scratch, ptr 573 // cbnz stxr_status, loopMBB 574 // fallthrough --> exitMBB 575 BB = loopMBB; 576 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 577 578 // Build compare and cmov instructions. 579 MRI.constrainRegClass(incr, TRCsp); 580 BuildMI(BB, dl, TII->get(CmpOp)) 581 .addReg(incr).addReg(oldval).addImm(0); 582 583 BuildMI(BB, dl, TII->get(Size == 8 ? AArch64::CSELxxxc : AArch64::CSELwwwc), 584 scratch) 585 .addReg(oldval).addReg(incr).addImm(Cond); 586 587 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass); 588 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass); 589 590 BuildMI(BB, dl, TII->get(strOpc), stxr_status) 591 .addReg(scratch).addReg(ptr); 592 BuildMI(BB, dl, TII->get(AArch64::CBNZw)) 593 .addReg(stxr_status).addMBB(loopMBB); 594 595 BB->addSuccessor(loopMBB); 596 BB->addSuccessor(exitMBB); 597 598 // exitMBB: 599 // ... 600 BB = exitMBB; 601 602 MI->eraseFromParent(); // The instruction is gone now. 603 604 return BB; 605} 606 607MachineBasicBlock * 608AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI, 609 MachineBasicBlock *BB, 610 unsigned Size) const { 611 unsigned dest = MI->getOperand(0).getReg(); 612 unsigned ptr = MI->getOperand(1).getReg(); 613 unsigned oldval = MI->getOperand(2).getReg(); 614 unsigned newval = MI->getOperand(3).getReg(); 615 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm()); 616 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 617 DebugLoc dl = MI->getDebugLoc(); 618 619 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 620 const TargetRegisterClass *TRCsp; 621 TRCsp = Size == 8 ? &AArch64::GPR64xspRegClass : &AArch64::GPR32wspRegClass; 622 623 unsigned ldrOpc, strOpc; 624 getExclusiveOperation(Size, Ord, ldrOpc, strOpc); 625 626 MachineFunction *MF = BB->getParent(); 627 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 628 MachineFunction::iterator It = BB; 629 ++It; // insert the new blocks after the current block 630 631 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 632 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 633 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 634 MF->insert(It, loop1MBB); 635 MF->insert(It, loop2MBB); 636 MF->insert(It, exitMBB); 637 638 // Transfer the remainder of BB and its successor edges to exitMBB. 639 exitMBB->splice(exitMBB->begin(), BB, 640 llvm::next(MachineBasicBlock::iterator(MI)), 641 BB->end()); 642 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 643 644 // thisMBB: 645 // ... 646 // fallthrough --> loop1MBB 647 BB->addSuccessor(loop1MBB); 648 649 // loop1MBB: 650 // ldxr dest, [ptr] 651 // cmp dest, oldval 652 // b.ne exitMBB 653 BB = loop1MBB; 654 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 655 656 unsigned CmpOp = Size == 8 ? AArch64::CMPxx_lsl : AArch64::CMPww_lsl; 657 MRI.constrainRegClass(dest, TRCsp); 658 BuildMI(BB, dl, TII->get(CmpOp)) 659 .addReg(dest).addReg(oldval).addImm(0); 660 BuildMI(BB, dl, TII->get(AArch64::Bcc)) 661 .addImm(A64CC::NE).addMBB(exitMBB); 662 BB->addSuccessor(loop2MBB); 663 BB->addSuccessor(exitMBB); 664 665 // loop2MBB: 666 // strex stxr_status, newval, [ptr] 667 // cbnz stxr_status, loop1MBB 668 BB = loop2MBB; 669 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass); 670 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass); 671 672 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(newval).addReg(ptr); 673 BuildMI(BB, dl, TII->get(AArch64::CBNZw)) 674 .addReg(stxr_status).addMBB(loop1MBB); 675 BB->addSuccessor(loop1MBB); 676 BB->addSuccessor(exitMBB); 677 678 // exitMBB: 679 // ... 680 BB = exitMBB; 681 682 MI->eraseFromParent(); // The instruction is gone now. 683 684 return BB; 685} 686 687MachineBasicBlock * 688AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI, 689 MachineBasicBlock *MBB) const { 690 // We materialise the F128CSEL pseudo-instruction using conditional branches 691 // and loads, giving an instruciton sequence like: 692 // str q0, [sp] 693 // b.ne IfTrue 694 // b Finish 695 // IfTrue: 696 // str q1, [sp] 697 // Finish: 698 // ldr q0, [sp] 699 // 700 // Using virtual registers would probably not be beneficial since COPY 701 // instructions are expensive for f128 (there's no actual instruction to 702 // implement them). 703 // 704 // An alternative would be to do an integer-CSEL on some address. E.g.: 705 // mov x0, sp 706 // add x1, sp, #16 707 // str q0, [x0] 708 // str q1, [x1] 709 // csel x0, x0, x1, ne 710 // ldr q0, [x0] 711 // 712 // It's unclear which approach is actually optimal. 713 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 714 MachineFunction *MF = MBB->getParent(); 715 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 716 DebugLoc DL = MI->getDebugLoc(); 717 MachineFunction::iterator It = MBB; 718 ++It; 719 720 unsigned DestReg = MI->getOperand(0).getReg(); 721 unsigned IfTrueReg = MI->getOperand(1).getReg(); 722 unsigned IfFalseReg = MI->getOperand(2).getReg(); 723 unsigned CondCode = MI->getOperand(3).getImm(); 724 bool NZCVKilled = MI->getOperand(4).isKill(); 725 726 MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB); 727 MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB); 728 MF->insert(It, TrueBB); 729 MF->insert(It, EndBB); 730 731 // Transfer rest of current basic-block to EndBB 732 EndBB->splice(EndBB->begin(), MBB, 733 llvm::next(MachineBasicBlock::iterator(MI)), 734 MBB->end()); 735 EndBB->transferSuccessorsAndUpdatePHIs(MBB); 736 737 // We need somewhere to store the f128 value needed. 738 int ScratchFI = MF->getFrameInfo()->CreateSpillStackObject(16, 16); 739 740 // [... start of incoming MBB ...] 741 // str qIFFALSE, [sp] 742 // b.cc IfTrue 743 // b Done 744 BuildMI(MBB, DL, TII->get(AArch64::LSFP128_STR)) 745 .addReg(IfFalseReg) 746 .addFrameIndex(ScratchFI) 747 .addImm(0); 748 BuildMI(MBB, DL, TII->get(AArch64::Bcc)) 749 .addImm(CondCode) 750 .addMBB(TrueBB); 751 BuildMI(MBB, DL, TII->get(AArch64::Bimm)) 752 .addMBB(EndBB); 753 MBB->addSuccessor(TrueBB); 754 MBB->addSuccessor(EndBB); 755 756 if (!NZCVKilled) { 757 // NZCV is live-through TrueBB. 758 TrueBB->addLiveIn(AArch64::NZCV); 759 EndBB->addLiveIn(AArch64::NZCV); 760 } 761 762 // IfTrue: 763 // str qIFTRUE, [sp] 764 BuildMI(TrueBB, DL, TII->get(AArch64::LSFP128_STR)) 765 .addReg(IfTrueReg) 766 .addFrameIndex(ScratchFI) 767 .addImm(0); 768 769 // Note: fallthrough. We can rely on LLVM adding a branch if it reorders the 770 // blocks. 771 TrueBB->addSuccessor(EndBB); 772 773 // Done: 774 // ldr qDEST, [sp] 775 // [... rest of incoming MBB ...] 776 MachineInstr *StartOfEnd = EndBB->begin(); 777 BuildMI(*EndBB, StartOfEnd, DL, TII->get(AArch64::LSFP128_LDR), DestReg) 778 .addFrameIndex(ScratchFI) 779 .addImm(0); 780 781 MI->eraseFromParent(); 782 return EndBB; 783} 784 785MachineBasicBlock * 786AArch64TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 787 MachineBasicBlock *MBB) const { 788 switch (MI->getOpcode()) { 789 default: llvm_unreachable("Unhandled instruction with custom inserter"); 790 case AArch64::F128CSEL: 791 return EmitF128CSEL(MI, MBB); 792 case AArch64::ATOMIC_LOAD_ADD_I8: 793 return emitAtomicBinary(MI, MBB, 1, AArch64::ADDwww_lsl); 794 case AArch64::ATOMIC_LOAD_ADD_I16: 795 return emitAtomicBinary(MI, MBB, 2, AArch64::ADDwww_lsl); 796 case AArch64::ATOMIC_LOAD_ADD_I32: 797 return emitAtomicBinary(MI, MBB, 4, AArch64::ADDwww_lsl); 798 case AArch64::ATOMIC_LOAD_ADD_I64: 799 return emitAtomicBinary(MI, MBB, 8, AArch64::ADDxxx_lsl); 800 801 case AArch64::ATOMIC_LOAD_SUB_I8: 802 return emitAtomicBinary(MI, MBB, 1, AArch64::SUBwww_lsl); 803 case AArch64::ATOMIC_LOAD_SUB_I16: 804 return emitAtomicBinary(MI, MBB, 2, AArch64::SUBwww_lsl); 805 case AArch64::ATOMIC_LOAD_SUB_I32: 806 return emitAtomicBinary(MI, MBB, 4, AArch64::SUBwww_lsl); 807 case AArch64::ATOMIC_LOAD_SUB_I64: 808 return emitAtomicBinary(MI, MBB, 8, AArch64::SUBxxx_lsl); 809 810 case AArch64::ATOMIC_LOAD_AND_I8: 811 return emitAtomicBinary(MI, MBB, 1, AArch64::ANDwww_lsl); 812 case AArch64::ATOMIC_LOAD_AND_I16: 813 return emitAtomicBinary(MI, MBB, 2, AArch64::ANDwww_lsl); 814 case AArch64::ATOMIC_LOAD_AND_I32: 815 return emitAtomicBinary(MI, MBB, 4, AArch64::ANDwww_lsl); 816 case AArch64::ATOMIC_LOAD_AND_I64: 817 return emitAtomicBinary(MI, MBB, 8, AArch64::ANDxxx_lsl); 818 819 case AArch64::ATOMIC_LOAD_OR_I8: 820 return emitAtomicBinary(MI, MBB, 1, AArch64::ORRwww_lsl); 821 case AArch64::ATOMIC_LOAD_OR_I16: 822 return emitAtomicBinary(MI, MBB, 2, AArch64::ORRwww_lsl); 823 case AArch64::ATOMIC_LOAD_OR_I32: 824 return emitAtomicBinary(MI, MBB, 4, AArch64::ORRwww_lsl); 825 case AArch64::ATOMIC_LOAD_OR_I64: 826 return emitAtomicBinary(MI, MBB, 8, AArch64::ORRxxx_lsl); 827 828 case AArch64::ATOMIC_LOAD_XOR_I8: 829 return emitAtomicBinary(MI, MBB, 1, AArch64::EORwww_lsl); 830 case AArch64::ATOMIC_LOAD_XOR_I16: 831 return emitAtomicBinary(MI, MBB, 2, AArch64::EORwww_lsl); 832 case AArch64::ATOMIC_LOAD_XOR_I32: 833 return emitAtomicBinary(MI, MBB, 4, AArch64::EORwww_lsl); 834 case AArch64::ATOMIC_LOAD_XOR_I64: 835 return emitAtomicBinary(MI, MBB, 8, AArch64::EORxxx_lsl); 836 837 case AArch64::ATOMIC_LOAD_NAND_I8: 838 return emitAtomicBinary(MI, MBB, 1, AArch64::BICwww_lsl); 839 case AArch64::ATOMIC_LOAD_NAND_I16: 840 return emitAtomicBinary(MI, MBB, 2, AArch64::BICwww_lsl); 841 case AArch64::ATOMIC_LOAD_NAND_I32: 842 return emitAtomicBinary(MI, MBB, 4, AArch64::BICwww_lsl); 843 case AArch64::ATOMIC_LOAD_NAND_I64: 844 return emitAtomicBinary(MI, MBB, 8, AArch64::BICxxx_lsl); 845 846 case AArch64::ATOMIC_LOAD_MIN_I8: 847 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::GT); 848 case AArch64::ATOMIC_LOAD_MIN_I16: 849 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::GT); 850 case AArch64::ATOMIC_LOAD_MIN_I32: 851 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::GT); 852 case AArch64::ATOMIC_LOAD_MIN_I64: 853 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::GT); 854 855 case AArch64::ATOMIC_LOAD_MAX_I8: 856 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::LT); 857 case AArch64::ATOMIC_LOAD_MAX_I16: 858 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::LT); 859 case AArch64::ATOMIC_LOAD_MAX_I32: 860 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LT); 861 case AArch64::ATOMIC_LOAD_MAX_I64: 862 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LT); 863 864 case AArch64::ATOMIC_LOAD_UMIN_I8: 865 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::HI); 866 case AArch64::ATOMIC_LOAD_UMIN_I16: 867 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::HI); 868 case AArch64::ATOMIC_LOAD_UMIN_I32: 869 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::HI); 870 case AArch64::ATOMIC_LOAD_UMIN_I64: 871 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::HI); 872 873 case AArch64::ATOMIC_LOAD_UMAX_I8: 874 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::LO); 875 case AArch64::ATOMIC_LOAD_UMAX_I16: 876 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::LO); 877 case AArch64::ATOMIC_LOAD_UMAX_I32: 878 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LO); 879 case AArch64::ATOMIC_LOAD_UMAX_I64: 880 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LO); 881 882 case AArch64::ATOMIC_SWAP_I8: 883 return emitAtomicBinary(MI, MBB, 1, 0); 884 case AArch64::ATOMIC_SWAP_I16: 885 return emitAtomicBinary(MI, MBB, 2, 0); 886 case AArch64::ATOMIC_SWAP_I32: 887 return emitAtomicBinary(MI, MBB, 4, 0); 888 case AArch64::ATOMIC_SWAP_I64: 889 return emitAtomicBinary(MI, MBB, 8, 0); 890 891 case AArch64::ATOMIC_CMP_SWAP_I8: 892 return emitAtomicCmpSwap(MI, MBB, 1); 893 case AArch64::ATOMIC_CMP_SWAP_I16: 894 return emitAtomicCmpSwap(MI, MBB, 2); 895 case AArch64::ATOMIC_CMP_SWAP_I32: 896 return emitAtomicCmpSwap(MI, MBB, 4); 897 case AArch64::ATOMIC_CMP_SWAP_I64: 898 return emitAtomicCmpSwap(MI, MBB, 8); 899 } 900} 901 902 903const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const { 904 switch (Opcode) { 905 case AArch64ISD::BR_CC: return "AArch64ISD::BR_CC"; 906 case AArch64ISD::Call: return "AArch64ISD::Call"; 907 case AArch64ISD::FPMOV: return "AArch64ISD::FPMOV"; 908 case AArch64ISD::GOTLoad: return "AArch64ISD::GOTLoad"; 909 case AArch64ISD::BFI: return "AArch64ISD::BFI"; 910 case AArch64ISD::EXTR: return "AArch64ISD::EXTR"; 911 case AArch64ISD::Ret: return "AArch64ISD::Ret"; 912 case AArch64ISD::SBFX: return "AArch64ISD::SBFX"; 913 case AArch64ISD::SELECT_CC: return "AArch64ISD::SELECT_CC"; 914 case AArch64ISD::SETCC: return "AArch64ISD::SETCC"; 915 case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN"; 916 case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER"; 917 case AArch64ISD::TLSDESCCALL: return "AArch64ISD::TLSDESCCALL"; 918 case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge"; 919 case AArch64ISD::WrapperSmall: return "AArch64ISD::WrapperSmall"; 920 921 case AArch64ISD::NEON_BSL: 922 return "AArch64ISD::NEON_BSL"; 923 case AArch64ISD::NEON_MOVIMM: 924 return "AArch64ISD::NEON_MOVIMM"; 925 case AArch64ISD::NEON_MVNIMM: 926 return "AArch64ISD::NEON_MVNIMM"; 927 case AArch64ISD::NEON_FMOVIMM: 928 return "AArch64ISD::NEON_FMOVIMM"; 929 case AArch64ISD::NEON_CMP: 930 return "AArch64ISD::NEON_CMP"; 931 case AArch64ISD::NEON_CMPZ: 932 return "AArch64ISD::NEON_CMPZ"; 933 case AArch64ISD::NEON_TST: 934 return "AArch64ISD::NEON_TST"; 935 case AArch64ISD::NEON_QSHLs: 936 return "AArch64ISD::NEON_QSHLs"; 937 case AArch64ISD::NEON_QSHLu: 938 return "AArch64ISD::NEON_QSHLu"; 939 case AArch64ISD::NEON_VDUP: 940 return "AArch64ISD::NEON_VDUP"; 941 case AArch64ISD::NEON_VDUPLANE: 942 return "AArch64ISD::NEON_VDUPLANE"; 943 case AArch64ISD::NEON_REV16: 944 return "AArch64ISD::NEON_REV16"; 945 case AArch64ISD::NEON_REV32: 946 return "AArch64ISD::NEON_REV32"; 947 case AArch64ISD::NEON_REV64: 948 return "AArch64ISD::NEON_REV64"; 949 case AArch64ISD::NEON_UZP1: 950 return "AArch64ISD::NEON_UZP1"; 951 case AArch64ISD::NEON_UZP2: 952 return "AArch64ISD::NEON_UZP2"; 953 case AArch64ISD::NEON_ZIP1: 954 return "AArch64ISD::NEON_ZIP1"; 955 case AArch64ISD::NEON_ZIP2: 956 return "AArch64ISD::NEON_ZIP2"; 957 case AArch64ISD::NEON_TRN1: 958 return "AArch64ISD::NEON_TRN1"; 959 case AArch64ISD::NEON_TRN2: 960 return "AArch64ISD::NEON_TRN2"; 961 case AArch64ISD::NEON_LD1_UPD: 962 return "AArch64ISD::NEON_LD1_UPD"; 963 case AArch64ISD::NEON_LD2_UPD: 964 return "AArch64ISD::NEON_LD2_UPD"; 965 case AArch64ISD::NEON_LD3_UPD: 966 return "AArch64ISD::NEON_LD3_UPD"; 967 case AArch64ISD::NEON_LD4_UPD: 968 return "AArch64ISD::NEON_LD4_UPD"; 969 case AArch64ISD::NEON_ST1_UPD: 970 return "AArch64ISD::NEON_ST1_UPD"; 971 case AArch64ISD::NEON_ST2_UPD: 972 return "AArch64ISD::NEON_ST2_UPD"; 973 case AArch64ISD::NEON_ST3_UPD: 974 return "AArch64ISD::NEON_ST3_UPD"; 975 case AArch64ISD::NEON_ST4_UPD: 976 return "AArch64ISD::NEON_ST4_UPD"; 977 case AArch64ISD::NEON_LD1x2_UPD: 978 return "AArch64ISD::NEON_LD1x2_UPD"; 979 case AArch64ISD::NEON_LD1x3_UPD: 980 return "AArch64ISD::NEON_LD1x3_UPD"; 981 case AArch64ISD::NEON_LD1x4_UPD: 982 return "AArch64ISD::NEON_LD1x4_UPD"; 983 case AArch64ISD::NEON_ST1x2_UPD: 984 return "AArch64ISD::NEON_ST1x2_UPD"; 985 case AArch64ISD::NEON_ST1x3_UPD: 986 return "AArch64ISD::NEON_ST1x3_UPD"; 987 case AArch64ISD::NEON_ST1x4_UPD: 988 return "AArch64ISD::NEON_ST1x4_UPD"; 989 case AArch64ISD::NEON_LD2DUP: 990 return "AArch64ISD::NEON_LD2DUP"; 991 case AArch64ISD::NEON_LD3DUP: 992 return "AArch64ISD::NEON_LD3DUP"; 993 case AArch64ISD::NEON_LD4DUP: 994 return "AArch64ISD::NEON_LD4DUP"; 995 case AArch64ISD::NEON_LD2DUP_UPD: 996 return "AArch64ISD::NEON_LD2DUP_UPD"; 997 case AArch64ISD::NEON_LD3DUP_UPD: 998 return "AArch64ISD::NEON_LD3DUP_UPD"; 999 case AArch64ISD::NEON_LD4DUP_UPD: 1000 return "AArch64ISD::NEON_LD4DUP_UPD"; 1001 case AArch64ISD::NEON_LD2LN_UPD: 1002 return "AArch64ISD::NEON_LD2LN_UPD"; 1003 case AArch64ISD::NEON_LD3LN_UPD: 1004 return "AArch64ISD::NEON_LD3LN_UPD"; 1005 case AArch64ISD::NEON_LD4LN_UPD: 1006 return "AArch64ISD::NEON_LD4LN_UPD"; 1007 case AArch64ISD::NEON_ST2LN_UPD: 1008 return "AArch64ISD::NEON_ST2LN_UPD"; 1009 case AArch64ISD::NEON_ST3LN_UPD: 1010 return "AArch64ISD::NEON_ST3LN_UPD"; 1011 case AArch64ISD::NEON_ST4LN_UPD: 1012 return "AArch64ISD::NEON_ST4LN_UPD"; 1013 case AArch64ISD::NEON_VEXTRACT: 1014 return "AArch64ISD::NEON_VEXTRACT"; 1015 default: 1016 return NULL; 1017 } 1018} 1019 1020static const uint16_t AArch64FPRArgRegs[] = { 1021 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, 1022 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7 1023}; 1024static const unsigned NumFPRArgRegs = llvm::array_lengthof(AArch64FPRArgRegs); 1025 1026static const uint16_t AArch64ArgRegs[] = { 1027 AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3, 1028 AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7 1029}; 1030static const unsigned NumArgRegs = llvm::array_lengthof(AArch64ArgRegs); 1031 1032static bool CC_AArch64NoMoreRegs(unsigned ValNo, MVT ValVT, MVT LocVT, 1033 CCValAssign::LocInfo LocInfo, 1034 ISD::ArgFlagsTy ArgFlags, CCState &State) { 1035 // Mark all remaining general purpose registers as allocated. We don't 1036 // backtrack: if (for example) an i128 gets put on the stack, no subsequent 1037 // i64 will go in registers (C.11). 1038 for (unsigned i = 0; i < NumArgRegs; ++i) 1039 State.AllocateReg(AArch64ArgRegs[i]); 1040 1041 return false; 1042} 1043 1044#include "AArch64GenCallingConv.inc" 1045 1046CCAssignFn *AArch64TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const { 1047 1048 switch(CC) { 1049 default: llvm_unreachable("Unsupported calling convention"); 1050 case CallingConv::Fast: 1051 case CallingConv::C: 1052 return CC_A64_APCS; 1053 } 1054} 1055 1056void 1057AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, 1058 SDLoc DL, SDValue &Chain) const { 1059 MachineFunction &MF = DAG.getMachineFunction(); 1060 MachineFrameInfo *MFI = MF.getFrameInfo(); 1061 AArch64MachineFunctionInfo *FuncInfo 1062 = MF.getInfo<AArch64MachineFunctionInfo>(); 1063 1064 SmallVector<SDValue, 8> MemOps; 1065 1066 unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(AArch64ArgRegs, 1067 NumArgRegs); 1068 unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(AArch64FPRArgRegs, 1069 NumFPRArgRegs); 1070 1071 unsigned GPRSaveSize = 8 * (NumArgRegs - FirstVariadicGPR); 1072 int GPRIdx = 0; 1073 if (GPRSaveSize != 0) { 1074 GPRIdx = MFI->CreateStackObject(GPRSaveSize, 8, false); 1075 1076 SDValue FIN = DAG.getFrameIndex(GPRIdx, getPointerTy()); 1077 1078 for (unsigned i = FirstVariadicGPR; i < NumArgRegs; ++i) { 1079 unsigned VReg = MF.addLiveIn(AArch64ArgRegs[i], &AArch64::GPR64RegClass); 1080 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); 1081 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN, 1082 MachinePointerInfo::getStack(i * 8), 1083 false, false, 0); 1084 MemOps.push_back(Store); 1085 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN, 1086 DAG.getConstant(8, getPointerTy())); 1087 } 1088 } 1089 1090 if (getSubtarget()->hasFPARMv8()) { 1091 unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR); 1092 int FPRIdx = 0; 1093 // According to the AArch64 Procedure Call Standard, section B.1/B.3, we 1094 // can omit a register save area if we know we'll never use registers of 1095 // that class. 1096 if (FPRSaveSize != 0) { 1097 FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false); 1098 1099 SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy()); 1100 1101 for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) { 1102 unsigned VReg = MF.addLiveIn(AArch64FPRArgRegs[i], 1103 &AArch64::FPR128RegClass); 1104 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128); 1105 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN, 1106 MachinePointerInfo::getStack(i * 16), 1107 false, false, 0); 1108 MemOps.push_back(Store); 1109 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN, 1110 DAG.getConstant(16, getPointerTy())); 1111 } 1112 } 1113 FuncInfo->setVariadicFPRIdx(FPRIdx); 1114 FuncInfo->setVariadicFPRSize(FPRSaveSize); 1115 } 1116 1117 int StackIdx = MFI->CreateFixedObject(8, CCInfo.getNextStackOffset(), true); 1118 1119 FuncInfo->setVariadicStackIdx(StackIdx); 1120 FuncInfo->setVariadicGPRIdx(GPRIdx); 1121 FuncInfo->setVariadicGPRSize(GPRSaveSize); 1122 1123 if (!MemOps.empty()) { 1124 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0], 1125 MemOps.size()); 1126 } 1127} 1128 1129 1130SDValue 1131AArch64TargetLowering::LowerFormalArguments(SDValue Chain, 1132 CallingConv::ID CallConv, bool isVarArg, 1133 const SmallVectorImpl<ISD::InputArg> &Ins, 1134 SDLoc dl, SelectionDAG &DAG, 1135 SmallVectorImpl<SDValue> &InVals) const { 1136 MachineFunction &MF = DAG.getMachineFunction(); 1137 AArch64MachineFunctionInfo *FuncInfo 1138 = MF.getInfo<AArch64MachineFunctionInfo>(); 1139 MachineFrameInfo *MFI = MF.getFrameInfo(); 1140 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; 1141 1142 SmallVector<CCValAssign, 16> ArgLocs; 1143 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1144 getTargetMachine(), ArgLocs, *DAG.getContext()); 1145 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv)); 1146 1147 SmallVector<SDValue, 16> ArgValues; 1148 1149 SDValue ArgValue; 1150 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1151 CCValAssign &VA = ArgLocs[i]; 1152 ISD::ArgFlagsTy Flags = Ins[i].Flags; 1153 1154 if (Flags.isByVal()) { 1155 // Byval is used for small structs and HFAs in the PCS, but the system 1156 // should work in a non-compliant manner for larger structs. 1157 EVT PtrTy = getPointerTy(); 1158 int Size = Flags.getByValSize(); 1159 unsigned NumRegs = (Size + 7) / 8; 1160 1161 unsigned FrameIdx = MFI->CreateFixedObject(8 * NumRegs, 1162 VA.getLocMemOffset(), 1163 false); 1164 SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrTy); 1165 InVals.push_back(FrameIdxN); 1166 1167 continue; 1168 } else if (VA.isRegLoc()) { 1169 MVT RegVT = VA.getLocVT(); 1170 const TargetRegisterClass *RC = getRegClassFor(RegVT); 1171 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1172 1173 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1174 } else { // VA.isRegLoc() 1175 assert(VA.isMemLoc()); 1176 1177 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 1178 VA.getLocMemOffset(), true); 1179 1180 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 1181 ArgValue = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, 1182 MachinePointerInfo::getFixedStack(FI), 1183 false, false, false, 0); 1184 1185 1186 } 1187 1188 switch (VA.getLocInfo()) { 1189 default: llvm_unreachable("Unknown loc info!"); 1190 case CCValAssign::Full: break; 1191 case CCValAssign::BCvt: 1192 ArgValue = DAG.getNode(ISD::BITCAST,dl, VA.getValVT(), ArgValue); 1193 break; 1194 case CCValAssign::SExt: 1195 case CCValAssign::ZExt: 1196 case CCValAssign::AExt: { 1197 unsigned DestSize = VA.getValVT().getSizeInBits(); 1198 unsigned DestSubReg; 1199 1200 switch (DestSize) { 1201 case 8: DestSubReg = AArch64::sub_8; break; 1202 case 16: DestSubReg = AArch64::sub_16; break; 1203 case 32: DestSubReg = AArch64::sub_32; break; 1204 case 64: DestSubReg = AArch64::sub_64; break; 1205 default: llvm_unreachable("Unexpected argument promotion"); 1206 } 1207 1208 ArgValue = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, 1209 VA.getValVT(), ArgValue, 1210 DAG.getTargetConstant(DestSubReg, MVT::i32)), 1211 0); 1212 break; 1213 } 1214 } 1215 1216 InVals.push_back(ArgValue); 1217 } 1218 1219 if (isVarArg) 1220 SaveVarArgRegisters(CCInfo, DAG, dl, Chain); 1221 1222 unsigned StackArgSize = CCInfo.getNextStackOffset(); 1223 if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) { 1224 // This is a non-standard ABI so by fiat I say we're allowed to make full 1225 // use of the stack area to be popped, which must be aligned to 16 bytes in 1226 // any case: 1227 StackArgSize = RoundUpToAlignment(StackArgSize, 16); 1228 1229 // If we're expected to restore the stack (e.g. fastcc) then we'll be adding 1230 // a multiple of 16. 1231 FuncInfo->setArgumentStackToRestore(StackArgSize); 1232 1233 // This realignment carries over to the available bytes below. Our own 1234 // callers will guarantee the space is free by giving an aligned value to 1235 // CALLSEQ_START. 1236 } 1237 // Even if we're not expected to free up the space, it's useful to know how 1238 // much is there while considering tail calls (because we can reuse it). 1239 FuncInfo->setBytesInStackArgArea(StackArgSize); 1240 1241 return Chain; 1242} 1243 1244SDValue 1245AArch64TargetLowering::LowerReturn(SDValue Chain, 1246 CallingConv::ID CallConv, bool isVarArg, 1247 const SmallVectorImpl<ISD::OutputArg> &Outs, 1248 const SmallVectorImpl<SDValue> &OutVals, 1249 SDLoc dl, SelectionDAG &DAG) const { 1250 // CCValAssign - represent the assignment of the return value to a location. 1251 SmallVector<CCValAssign, 16> RVLocs; 1252 1253 // CCState - Info about the registers and stack slots. 1254 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1255 getTargetMachine(), RVLocs, *DAG.getContext()); 1256 1257 // Analyze outgoing return values. 1258 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv)); 1259 1260 SDValue Flag; 1261 SmallVector<SDValue, 4> RetOps(1, Chain); 1262 1263 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1264 // PCS: "If the type, T, of the result of a function is such that 1265 // void func(T arg) would require that arg be passed as a value in a 1266 // register (or set of registers) according to the rules in 5.4, then the 1267 // result is returned in the same registers as would be used for such an 1268 // argument. 1269 // 1270 // Otherwise, the caller shall reserve a block of memory of sufficient 1271 // size and alignment to hold the result. The address of the memory block 1272 // shall be passed as an additional argument to the function in x8." 1273 // 1274 // This is implemented in two places. The register-return values are dealt 1275 // with here, more complex returns are passed as an sret parameter, which 1276 // means we don't have to worry about it during actual return. 1277 CCValAssign &VA = RVLocs[i]; 1278 assert(VA.isRegLoc() && "Only register-returns should be created by PCS"); 1279 1280 1281 SDValue Arg = OutVals[i]; 1282 1283 // There's no convenient note in the ABI about this as there is for normal 1284 // arguments, but it says return values are passed in the same registers as 1285 // an argument would be. I believe that includes the comments about 1286 // unspecified higher bits, putting the burden of widening on the *caller* 1287 // for return values. 1288 switch (VA.getLocInfo()) { 1289 default: llvm_unreachable("Unknown loc info"); 1290 case CCValAssign::Full: break; 1291 case CCValAssign::SExt: 1292 case CCValAssign::ZExt: 1293 case CCValAssign::AExt: 1294 // Floating-point values should only be extended when they're going into 1295 // memory, which can't happen here so an integer extend is acceptable. 1296 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1297 break; 1298 case CCValAssign::BCvt: 1299 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1300 break; 1301 } 1302 1303 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 1304 Flag = Chain.getValue(1); 1305 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1306 } 1307 1308 RetOps[0] = Chain; // Update chain. 1309 1310 // Add the flag if we have it. 1311 if (Flag.getNode()) 1312 RetOps.push_back(Flag); 1313 1314 return DAG.getNode(AArch64ISD::Ret, dl, MVT::Other, 1315 &RetOps[0], RetOps.size()); 1316} 1317 1318SDValue 1319AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, 1320 SmallVectorImpl<SDValue> &InVals) const { 1321 SelectionDAG &DAG = CLI.DAG; 1322 SDLoc &dl = CLI.DL; 1323 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1324 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1325 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1326 SDValue Chain = CLI.Chain; 1327 SDValue Callee = CLI.Callee; 1328 bool &IsTailCall = CLI.IsTailCall; 1329 CallingConv::ID CallConv = CLI.CallConv; 1330 bool IsVarArg = CLI.IsVarArg; 1331 1332 MachineFunction &MF = DAG.getMachineFunction(); 1333 AArch64MachineFunctionInfo *FuncInfo 1334 = MF.getInfo<AArch64MachineFunctionInfo>(); 1335 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; 1336 bool IsStructRet = !Outs.empty() && Outs[0].Flags.isSRet(); 1337 bool IsSibCall = false; 1338 1339 if (IsTailCall) { 1340 IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1341 IsVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(), 1342 Outs, OutVals, Ins, DAG); 1343 1344 // A sibling call is one where we're under the usual C ABI and not planning 1345 // to change that but can still do a tail call: 1346 if (!TailCallOpt && IsTailCall) 1347 IsSibCall = true; 1348 } 1349 1350 SmallVector<CCValAssign, 16> ArgLocs; 1351 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), 1352 getTargetMachine(), ArgLocs, *DAG.getContext()); 1353 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv)); 1354 1355 // On AArch64 (and all other architectures I'm aware of) the most this has to 1356 // do is adjust the stack pointer. 1357 unsigned NumBytes = RoundUpToAlignment(CCInfo.getNextStackOffset(), 16); 1358 if (IsSibCall) { 1359 // Since we're not changing the ABI to make this a tail call, the memory 1360 // operands are already available in the caller's incoming argument space. 1361 NumBytes = 0; 1362 } 1363 1364 // FPDiff is the byte offset of the call's argument area from the callee's. 1365 // Stores to callee stack arguments will be placed in FixedStackSlots offset 1366 // by this amount for a tail call. In a sibling call it must be 0 because the 1367 // caller will deallocate the entire stack and the callee still expects its 1368 // arguments to begin at SP+0. Completely unused for non-tail calls. 1369 int FPDiff = 0; 1370 1371 if (IsTailCall && !IsSibCall) { 1372 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); 1373 1374 // FPDiff will be negative if this tail call requires more space than we 1375 // would automatically have in our incoming argument space. Positive if we 1376 // can actually shrink the stack. 1377 FPDiff = NumReusableBytes - NumBytes; 1378 1379 // The stack pointer must be 16-byte aligned at all times it's used for a 1380 // memory operation, which in practice means at *all* times and in 1381 // particular across call boundaries. Therefore our own arguments started at 1382 // a 16-byte aligned SP and the delta applied for the tail call should 1383 // satisfy the same constraint. 1384 assert(FPDiff % 16 == 0 && "unaligned stack on tail call"); 1385 } 1386 1387 if (!IsSibCall) 1388 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true), 1389 dl); 1390 1391 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP, 1392 getPointerTy()); 1393 1394 SmallVector<SDValue, 8> MemOpChains; 1395 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 1396 1397 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 1398 CCValAssign &VA = ArgLocs[i]; 1399 ISD::ArgFlagsTy Flags = Outs[i].Flags; 1400 SDValue Arg = OutVals[i]; 1401 1402 // Callee does the actual widening, so all extensions just use an implicit 1403 // definition of the rest of the Loc. Aesthetically, this would be nicer as 1404 // an ANY_EXTEND, but that isn't valid for floating-point types and this 1405 // alternative works on integer types too. 1406 switch (VA.getLocInfo()) { 1407 default: llvm_unreachable("Unknown loc info!"); 1408 case CCValAssign::Full: break; 1409 case CCValAssign::SExt: 1410 case CCValAssign::ZExt: 1411 case CCValAssign::AExt: { 1412 unsigned SrcSize = VA.getValVT().getSizeInBits(); 1413 unsigned SrcSubReg; 1414 1415 switch (SrcSize) { 1416 case 8: SrcSubReg = AArch64::sub_8; break; 1417 case 16: SrcSubReg = AArch64::sub_16; break; 1418 case 32: SrcSubReg = AArch64::sub_32; break; 1419 case 64: SrcSubReg = AArch64::sub_64; break; 1420 default: llvm_unreachable("Unexpected argument promotion"); 1421 } 1422 1423 Arg = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, 1424 VA.getLocVT(), 1425 DAG.getUNDEF(VA.getLocVT()), 1426 Arg, 1427 DAG.getTargetConstant(SrcSubReg, MVT::i32)), 1428 0); 1429 1430 break; 1431 } 1432 case CCValAssign::BCvt: 1433 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1434 break; 1435 } 1436 1437 if (VA.isRegLoc()) { 1438 // A normal register (sub-) argument. For now we just note it down because 1439 // we want to copy things into registers as late as possible to avoid 1440 // register-pressure (and possibly worse). 1441 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1442 continue; 1443 } 1444 1445 assert(VA.isMemLoc() && "unexpected argument location"); 1446 1447 SDValue DstAddr; 1448 MachinePointerInfo DstInfo; 1449 if (IsTailCall) { 1450 uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize() : 1451 VA.getLocVT().getSizeInBits(); 1452 OpSize = (OpSize + 7) / 8; 1453 int32_t Offset = VA.getLocMemOffset() + FPDiff; 1454 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true); 1455 1456 DstAddr = DAG.getFrameIndex(FI, getPointerTy()); 1457 DstInfo = MachinePointerInfo::getFixedStack(FI); 1458 1459 // Make sure any stack arguments overlapping with where we're storing are 1460 // loaded before this eventual operation. Otherwise they'll be clobbered. 1461 Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI); 1462 } else { 1463 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()); 1464 1465 DstAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1466 DstInfo = MachinePointerInfo::getStack(VA.getLocMemOffset()); 1467 } 1468 1469 if (Flags.isByVal()) { 1470 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i64); 1471 SDValue Cpy = DAG.getMemcpy(Chain, dl, DstAddr, Arg, SizeNode, 1472 Flags.getByValAlign(), 1473 /*isVolatile = */ false, 1474 /*alwaysInline = */ false, 1475 DstInfo, MachinePointerInfo(0)); 1476 MemOpChains.push_back(Cpy); 1477 } else { 1478 // Normal stack argument, put it where it's needed. 1479 SDValue Store = DAG.getStore(Chain, dl, Arg, DstAddr, DstInfo, 1480 false, false, 0); 1481 MemOpChains.push_back(Store); 1482 } 1483 } 1484 1485 // The loads and stores generated above shouldn't clash with each 1486 // other. Combining them with this TokenFactor notes that fact for the rest of 1487 // the backend. 1488 if (!MemOpChains.empty()) 1489 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1490 &MemOpChains[0], MemOpChains.size()); 1491 1492 // Most of the rest of the instructions need to be glued together; we don't 1493 // want assignments to actual registers used by a call to be rearranged by a 1494 // well-meaning scheduler. 1495 SDValue InFlag; 1496 1497 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1498 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1499 RegsToPass[i].second, InFlag); 1500 InFlag = Chain.getValue(1); 1501 } 1502 1503 // The linker is responsible for inserting veneers when necessary to put a 1504 // function call destination in range, so we don't need to bother with a 1505 // wrapper here. 1506 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1507 const GlobalValue *GV = G->getGlobal(); 1508 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy()); 1509 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1510 const char *Sym = S->getSymbol(); 1511 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy()); 1512 } 1513 1514 // We don't usually want to end the call-sequence here because we would tidy 1515 // the frame up *after* the call, however in the ABI-changing tail-call case 1516 // we've carefully laid out the parameters so that when sp is reset they'll be 1517 // in the correct location. 1518 if (IsTailCall && !IsSibCall) { 1519 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1520 DAG.getIntPtrConstant(0, true), InFlag, dl); 1521 InFlag = Chain.getValue(1); 1522 } 1523 1524 // We produce the following DAG scheme for the actual call instruction: 1525 // (AArch64Call Chain, Callee, reg1, ..., regn, preserveMask, inflag? 1526 // 1527 // Most arguments aren't going to be used and just keep the values live as 1528 // far as LLVM is concerned. It's expected to be selected as simply "bl 1529 // callee" (for a direct, non-tail call). 1530 std::vector<SDValue> Ops; 1531 Ops.push_back(Chain); 1532 Ops.push_back(Callee); 1533 1534 if (IsTailCall) { 1535 // Each tail call may have to adjust the stack by a different amount, so 1536 // this information must travel along with the operation for eventual 1537 // consumption by emitEpilogue. 1538 Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32)); 1539 } 1540 1541 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1542 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1543 RegsToPass[i].second.getValueType())); 1544 1545 1546 // Add a register mask operand representing the call-preserved registers. This 1547 // is used later in codegen to constrain register-allocation. 1548 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 1549 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); 1550 assert(Mask && "Missing call preserved mask for calling convention"); 1551 Ops.push_back(DAG.getRegisterMask(Mask)); 1552 1553 // If we needed glue, put it in as the last argument. 1554 if (InFlag.getNode()) 1555 Ops.push_back(InFlag); 1556 1557 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1558 1559 if (IsTailCall) { 1560 return DAG.getNode(AArch64ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1561 } 1562 1563 Chain = DAG.getNode(AArch64ISD::Call, dl, NodeTys, &Ops[0], Ops.size()); 1564 InFlag = Chain.getValue(1); 1565 1566 // Now we can reclaim the stack, just as well do it before working out where 1567 // our return value is. 1568 if (!IsSibCall) { 1569 uint64_t CalleePopBytes 1570 = DoesCalleeRestoreStack(CallConv, TailCallOpt) ? NumBytes : 0; 1571 1572 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1573 DAG.getIntPtrConstant(CalleePopBytes, true), 1574 InFlag, dl); 1575 InFlag = Chain.getValue(1); 1576 } 1577 1578 return LowerCallResult(Chain, InFlag, CallConv, 1579 IsVarArg, Ins, dl, DAG, InVals); 1580} 1581 1582SDValue 1583AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1584 CallingConv::ID CallConv, bool IsVarArg, 1585 const SmallVectorImpl<ISD::InputArg> &Ins, 1586 SDLoc dl, SelectionDAG &DAG, 1587 SmallVectorImpl<SDValue> &InVals) const { 1588 // Assign locations to each value returned by this call. 1589 SmallVector<CCValAssign, 16> RVLocs; 1590 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), 1591 getTargetMachine(), RVLocs, *DAG.getContext()); 1592 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForNode(CallConv)); 1593 1594 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1595 CCValAssign VA = RVLocs[i]; 1596 1597 // Return values that are too big to fit into registers should use an sret 1598 // pointer, so this can be a lot simpler than the main argument code. 1599 assert(VA.isRegLoc() && "Memory locations not expected for call return"); 1600 1601 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1602 InFlag); 1603 Chain = Val.getValue(1); 1604 InFlag = Val.getValue(2); 1605 1606 switch (VA.getLocInfo()) { 1607 default: llvm_unreachable("Unknown loc info!"); 1608 case CCValAssign::Full: break; 1609 case CCValAssign::BCvt: 1610 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1611 break; 1612 case CCValAssign::ZExt: 1613 case CCValAssign::SExt: 1614 case CCValAssign::AExt: 1615 // Floating-point arguments only get extended/truncated if they're going 1616 // in memory, so using the integer operation is acceptable here. 1617 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 1618 break; 1619 } 1620 1621 InVals.push_back(Val); 1622 } 1623 1624 return Chain; 1625} 1626 1627bool 1628AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1629 CallingConv::ID CalleeCC, 1630 bool IsVarArg, 1631 bool IsCalleeStructRet, 1632 bool IsCallerStructRet, 1633 const SmallVectorImpl<ISD::OutputArg> &Outs, 1634 const SmallVectorImpl<SDValue> &OutVals, 1635 const SmallVectorImpl<ISD::InputArg> &Ins, 1636 SelectionDAG& DAG) const { 1637 1638 // For CallingConv::C this function knows whether the ABI needs 1639 // changing. That's not true for other conventions so they will have to opt in 1640 // manually. 1641 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C) 1642 return false; 1643 1644 const MachineFunction &MF = DAG.getMachineFunction(); 1645 const Function *CallerF = MF.getFunction(); 1646 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1647 bool CCMatch = CallerCC == CalleeCC; 1648 1649 // Byval parameters hand the function a pointer directly into the stack area 1650 // we want to reuse during a tail call. Working around this *is* possible (see 1651 // X86) but less efficient and uglier in LowerCall. 1652 for (Function::const_arg_iterator i = CallerF->arg_begin(), 1653 e = CallerF->arg_end(); i != e; ++i) 1654 if (i->hasByValAttr()) 1655 return false; 1656 1657 if (getTargetMachine().Options.GuaranteedTailCallOpt) { 1658 if (IsTailCallConvention(CalleeCC) && CCMatch) 1659 return true; 1660 return false; 1661 } 1662 1663 // Now we search for cases where we can use a tail call without changing the 1664 // ABI. Sibcall is used in some places (particularly gcc) to refer to this 1665 // concept. 1666 1667 // I want anyone implementing a new calling convention to think long and hard 1668 // about this assert. 1669 assert((!IsVarArg || CalleeCC == CallingConv::C) 1670 && "Unexpected variadic calling convention"); 1671 1672 if (IsVarArg && !Outs.empty()) { 1673 // At least two cases here: if caller is fastcc then we can't have any 1674 // memory arguments (we'd be expected to clean up the stack afterwards). If 1675 // caller is C then we could potentially use its argument area. 1676 1677 // FIXME: for now we take the most conservative of these in both cases: 1678 // disallow all variadic memory operands. 1679 SmallVector<CCValAssign, 16> ArgLocs; 1680 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(), 1681 getTargetMachine(), ArgLocs, *DAG.getContext()); 1682 1683 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC)); 1684 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) 1685 if (!ArgLocs[i].isRegLoc()) 1686 return false; 1687 } 1688 1689 // If the calling conventions do not match, then we'd better make sure the 1690 // results are returned in the same way as what the caller expects. 1691 if (!CCMatch) { 1692 SmallVector<CCValAssign, 16> RVLocs1; 1693 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 1694 getTargetMachine(), RVLocs1, *DAG.getContext()); 1695 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC)); 1696 1697 SmallVector<CCValAssign, 16> RVLocs2; 1698 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 1699 getTargetMachine(), RVLocs2, *DAG.getContext()); 1700 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC)); 1701 1702 if (RVLocs1.size() != RVLocs2.size()) 1703 return false; 1704 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1705 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1706 return false; 1707 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1708 return false; 1709 if (RVLocs1[i].isRegLoc()) { 1710 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1711 return false; 1712 } else { 1713 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1714 return false; 1715 } 1716 } 1717 } 1718 1719 // Nothing more to check if the callee is taking no arguments 1720 if (Outs.empty()) 1721 return true; 1722 1723 SmallVector<CCValAssign, 16> ArgLocs; 1724 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(), 1725 getTargetMachine(), ArgLocs, *DAG.getContext()); 1726 1727 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC)); 1728 1729 const AArch64MachineFunctionInfo *FuncInfo 1730 = MF.getInfo<AArch64MachineFunctionInfo>(); 1731 1732 // If the stack arguments for this call would fit into our own save area then 1733 // the call can be made tail. 1734 return CCInfo.getNextStackOffset() <= FuncInfo->getBytesInStackArgArea(); 1735} 1736 1737bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC, 1738 bool TailCallOpt) const { 1739 return CallCC == CallingConv::Fast && TailCallOpt; 1740} 1741 1742bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const { 1743 return CallCC == CallingConv::Fast; 1744} 1745 1746SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain, 1747 SelectionDAG &DAG, 1748 MachineFrameInfo *MFI, 1749 int ClobberedFI) const { 1750 SmallVector<SDValue, 8> ArgChains; 1751 int64_t FirstByte = MFI->getObjectOffset(ClobberedFI); 1752 int64_t LastByte = FirstByte + MFI->getObjectSize(ClobberedFI) - 1; 1753 1754 // Include the original chain at the beginning of the list. When this is 1755 // used by target LowerCall hooks, this helps legalize find the 1756 // CALLSEQ_BEGIN node. 1757 ArgChains.push_back(Chain); 1758 1759 // Add a chain value for each stack argument corresponding 1760 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(), 1761 UE = DAG.getEntryNode().getNode()->use_end(); U != UE; ++U) 1762 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 1763 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 1764 if (FI->getIndex() < 0) { 1765 int64_t InFirstByte = MFI->getObjectOffset(FI->getIndex()); 1766 int64_t InLastByte = InFirstByte; 1767 InLastByte += MFI->getObjectSize(FI->getIndex()) - 1; 1768 1769 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) || 1770 (FirstByte <= InFirstByte && InFirstByte <= LastByte)) 1771 ArgChains.push_back(SDValue(L, 1)); 1772 } 1773 1774 // Build a tokenfactor for all the chains. 1775 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, 1776 &ArgChains[0], ArgChains.size()); 1777} 1778 1779static A64CC::CondCodes IntCCToA64CC(ISD::CondCode CC) { 1780 switch (CC) { 1781 case ISD::SETEQ: return A64CC::EQ; 1782 case ISD::SETGT: return A64CC::GT; 1783 case ISD::SETGE: return A64CC::GE; 1784 case ISD::SETLT: return A64CC::LT; 1785 case ISD::SETLE: return A64CC::LE; 1786 case ISD::SETNE: return A64CC::NE; 1787 case ISD::SETUGT: return A64CC::HI; 1788 case ISD::SETUGE: return A64CC::HS; 1789 case ISD::SETULT: return A64CC::LO; 1790 case ISD::SETULE: return A64CC::LS; 1791 default: llvm_unreachable("Unexpected condition code"); 1792 } 1793} 1794 1795bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Val) const { 1796 // icmp is implemented using adds/subs immediate, which take an unsigned 1797 // 12-bit immediate, optionally shifted left by 12 bits. 1798 1799 // Symmetric by using adds/subs 1800 if (Val < 0) 1801 Val = -Val; 1802 1803 return (Val & ~0xfff) == 0 || (Val & ~0xfff000) == 0; 1804} 1805 1806SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS, 1807 ISD::CondCode CC, SDValue &A64cc, 1808 SelectionDAG &DAG, SDLoc &dl) const { 1809 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 1810 int64_t C = 0; 1811 EVT VT = RHSC->getValueType(0); 1812 bool knownInvalid = false; 1813 1814 // I'm not convinced the rest of LLVM handles these edge cases properly, but 1815 // we can at least get it right. 1816 if (isSignedIntSetCC(CC)) { 1817 C = RHSC->getSExtValue(); 1818 } else if (RHSC->getZExtValue() > INT64_MAX) { 1819 // A 64-bit constant not representable by a signed 64-bit integer is far 1820 // too big to fit into a SUBS immediate anyway. 1821 knownInvalid = true; 1822 } else { 1823 C = RHSC->getZExtValue(); 1824 } 1825 1826 if (!knownInvalid && !isLegalICmpImmediate(C)) { 1827 // Constant does not fit, try adjusting it by one? 1828 switch (CC) { 1829 default: break; 1830 case ISD::SETLT: 1831 case ISD::SETGE: 1832 if (isLegalICmpImmediate(C-1)) { 1833 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 1834 RHS = DAG.getConstant(C-1, VT); 1835 } 1836 break; 1837 case ISD::SETULT: 1838 case ISD::SETUGE: 1839 if (isLegalICmpImmediate(C-1)) { 1840 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 1841 RHS = DAG.getConstant(C-1, VT); 1842 } 1843 break; 1844 case ISD::SETLE: 1845 case ISD::SETGT: 1846 if (isLegalICmpImmediate(C+1)) { 1847 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 1848 RHS = DAG.getConstant(C+1, VT); 1849 } 1850 break; 1851 case ISD::SETULE: 1852 case ISD::SETUGT: 1853 if (isLegalICmpImmediate(C+1)) { 1854 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 1855 RHS = DAG.getConstant(C+1, VT); 1856 } 1857 break; 1858 } 1859 } 1860 } 1861 1862 A64CC::CondCodes CondCode = IntCCToA64CC(CC); 1863 A64cc = DAG.getConstant(CondCode, MVT::i32); 1864 return DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, 1865 DAG.getCondCode(CC)); 1866} 1867 1868static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC, 1869 A64CC::CondCodes &Alternative) { 1870 A64CC::CondCodes CondCode = A64CC::Invalid; 1871 Alternative = A64CC::Invalid; 1872 1873 switch (CC) { 1874 default: llvm_unreachable("Unknown FP condition!"); 1875 case ISD::SETEQ: 1876 case ISD::SETOEQ: CondCode = A64CC::EQ; break; 1877 case ISD::SETGT: 1878 case ISD::SETOGT: CondCode = A64CC::GT; break; 1879 case ISD::SETGE: 1880 case ISD::SETOGE: CondCode = A64CC::GE; break; 1881 case ISD::SETOLT: CondCode = A64CC::MI; break; 1882 case ISD::SETOLE: CondCode = A64CC::LS; break; 1883 case ISD::SETONE: CondCode = A64CC::MI; Alternative = A64CC::GT; break; 1884 case ISD::SETO: CondCode = A64CC::VC; break; 1885 case ISD::SETUO: CondCode = A64CC::VS; break; 1886 case ISD::SETUEQ: CondCode = A64CC::EQ; Alternative = A64CC::VS; break; 1887 case ISD::SETUGT: CondCode = A64CC::HI; break; 1888 case ISD::SETUGE: CondCode = A64CC::PL; break; 1889 case ISD::SETLT: 1890 case ISD::SETULT: CondCode = A64CC::LT; break; 1891 case ISD::SETLE: 1892 case ISD::SETULE: CondCode = A64CC::LE; break; 1893 case ISD::SETNE: 1894 case ISD::SETUNE: CondCode = A64CC::NE; break; 1895 } 1896 return CondCode; 1897} 1898 1899SDValue 1900AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { 1901 SDLoc DL(Op); 1902 EVT PtrVT = getPointerTy(); 1903 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 1904 1905 switch(getTargetMachine().getCodeModel()) { 1906 case CodeModel::Small: 1907 // The most efficient code is PC-relative anyway for the small memory model, 1908 // so we don't need to worry about relocation model. 1909 return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, 1910 DAG.getTargetBlockAddress(BA, PtrVT, 0, 1911 AArch64II::MO_NO_FLAG), 1912 DAG.getTargetBlockAddress(BA, PtrVT, 0, 1913 AArch64II::MO_LO12), 1914 DAG.getConstant(/*Alignment=*/ 4, MVT::i32)); 1915 case CodeModel::Large: 1916 return DAG.getNode( 1917 AArch64ISD::WrapperLarge, DL, PtrVT, 1918 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G3), 1919 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G2_NC), 1920 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G1_NC), 1921 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G0_NC)); 1922 default: 1923 llvm_unreachable("Only small and large code models supported now"); 1924 } 1925} 1926 1927 1928// (BRCOND chain, val, dest) 1929SDValue 1930AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 1931 SDLoc dl(Op); 1932 SDValue Chain = Op.getOperand(0); 1933 SDValue TheBit = Op.getOperand(1); 1934 SDValue DestBB = Op.getOperand(2); 1935 1936 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means 1937 // that as the consumer we are responsible for ignoring rubbish in higher 1938 // bits. 1939 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit, 1940 DAG.getConstant(1, MVT::i32)); 1941 1942 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit, 1943 DAG.getConstant(0, TheBit.getValueType()), 1944 DAG.getCondCode(ISD::SETNE)); 1945 1946 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, Chain, 1947 A64CMP, DAG.getConstant(A64CC::NE, MVT::i32), 1948 DestBB); 1949} 1950 1951// (BR_CC chain, condcode, lhs, rhs, dest) 1952SDValue 1953AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 1954 SDLoc dl(Op); 1955 SDValue Chain = Op.getOperand(0); 1956 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 1957 SDValue LHS = Op.getOperand(2); 1958 SDValue RHS = Op.getOperand(3); 1959 SDValue DestBB = Op.getOperand(4); 1960 1961 if (LHS.getValueType() == MVT::f128) { 1962 // f128 comparisons are lowered to runtime calls by a routine which sets 1963 // LHS, RHS and CC appropriately for the rest of this function to continue. 1964 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); 1965 1966 // If softenSetCCOperands returned a scalar, we need to compare the result 1967 // against zero to select between true and false values. 1968 if (RHS.getNode() == 0) { 1969 RHS = DAG.getConstant(0, LHS.getValueType()); 1970 CC = ISD::SETNE; 1971 } 1972 } 1973 1974 if (LHS.getValueType().isInteger()) { 1975 SDValue A64cc; 1976 1977 // Integers are handled in a separate function because the combinations of 1978 // immediates and tests can get hairy and we may want to fiddle things. 1979 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl); 1980 1981 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, 1982 Chain, CmpOp, A64cc, DestBB); 1983 } 1984 1985 // Note that some LLVM floating-point CondCodes can't be lowered to a single 1986 // conditional branch, hence FPCCToA64CC can set a second test, where either 1987 // passing is sufficient. 1988 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid; 1989 CondCode = FPCCToA64CC(CC, Alternative); 1990 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); 1991 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, 1992 DAG.getCondCode(CC)); 1993 SDValue A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, 1994 Chain, SetCC, A64cc, DestBB); 1995 1996 if (Alternative != A64CC::Invalid) { 1997 A64cc = DAG.getConstant(Alternative, MVT::i32); 1998 A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, 1999 A64BR_CC, SetCC, A64cc, DestBB); 2000 2001 } 2002 2003 return A64BR_CC; 2004} 2005 2006SDValue 2007AArch64TargetLowering::LowerF128ToCall(SDValue Op, SelectionDAG &DAG, 2008 RTLIB::Libcall Call) const { 2009 ArgListTy Args; 2010 ArgListEntry Entry; 2011 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) { 2012 EVT ArgVT = Op.getOperand(i).getValueType(); 2013 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2014 Entry.Node = Op.getOperand(i); Entry.Ty = ArgTy; 2015 Entry.isSExt = false; 2016 Entry.isZExt = false; 2017 Args.push_back(Entry); 2018 } 2019 SDValue Callee = DAG.getExternalSymbol(getLibcallName(Call), getPointerTy()); 2020 2021 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext()); 2022 2023 // By default, the input chain to this libcall is the entry node of the 2024 // function. If the libcall is going to be emitted as a tail call then 2025 // isUsedByReturnOnly will change it to the right chain if the return 2026 // node which is being folded has a non-entry input chain. 2027 SDValue InChain = DAG.getEntryNode(); 2028 2029 // isTailCall may be true since the callee does not reference caller stack 2030 // frame. Check if it's in the right position. 2031 SDValue TCChain = InChain; 2032 bool isTailCall = isInTailCallPosition(DAG, Op.getNode(), TCChain); 2033 if (isTailCall) 2034 InChain = TCChain; 2035 2036 TargetLowering:: 2037 CallLoweringInfo CLI(InChain, RetTy, false, false, false, false, 2038 0, getLibcallCallingConv(Call), isTailCall, 2039 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true, 2040 Callee, Args, DAG, SDLoc(Op)); 2041 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 2042 2043 if (!CallInfo.second.getNode()) 2044 // It's a tailcall, return the chain (which is the DAG root). 2045 return DAG.getRoot(); 2046 2047 return CallInfo.first; 2048} 2049 2050SDValue 2051AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { 2052 if (Op.getOperand(0).getValueType() != MVT::f128) { 2053 // It's legal except when f128 is involved 2054 return Op; 2055 } 2056 2057 RTLIB::Libcall LC; 2058 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType()); 2059 2060 SDValue SrcVal = Op.getOperand(0); 2061 return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1, 2062 /*isSigned*/ false, SDLoc(Op)).first; 2063} 2064 2065SDValue 2066AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 2067 assert(Op.getValueType() == MVT::f128 && "Unexpected lowering"); 2068 2069 RTLIB::Libcall LC; 2070 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType()); 2071 2072 return LowerF128ToCall(Op, DAG, LC); 2073} 2074 2075SDValue 2076AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 2077 bool IsSigned) const { 2078 if (Op.getOperand(0).getValueType() != MVT::f128) { 2079 // It's legal except when f128 is involved 2080 return Op; 2081 } 2082 2083 RTLIB::Libcall LC; 2084 if (IsSigned) 2085 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType()); 2086 else 2087 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType()); 2088 2089 return LowerF128ToCall(Op, DAG, LC); 2090} 2091 2092SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 2093 MachineFunction &MF = DAG.getMachineFunction(); 2094 MachineFrameInfo *MFI = MF.getFrameInfo(); 2095 MFI->setReturnAddressIsTaken(true); 2096 2097 EVT VT = Op.getValueType(); 2098 SDLoc dl(Op); 2099 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2100 if (Depth) { 2101 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 2102 SDValue Offset = DAG.getConstant(8, MVT::i64); 2103 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 2104 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 2105 MachinePointerInfo(), false, false, false, 0); 2106 } 2107 2108 // Return X30, which contains the return address. Mark it an implicit live-in. 2109 unsigned Reg = MF.addLiveIn(AArch64::X30, getRegClassFor(MVT::i64)); 2110 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, MVT::i64); 2111} 2112 2113 2114SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) 2115 const { 2116 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 2117 MFI->setFrameAddressIsTaken(true); 2118 2119 EVT VT = Op.getValueType(); 2120 SDLoc dl(Op); 2121 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2122 unsigned FrameReg = AArch64::X29; 2123 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 2124 while (Depth--) 2125 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 2126 MachinePointerInfo(), 2127 false, false, false, 0); 2128 return FrameAddr; 2129} 2130 2131SDValue 2132AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op, 2133 SelectionDAG &DAG) const { 2134 assert(getTargetMachine().getCodeModel() == CodeModel::Large); 2135 assert(getTargetMachine().getRelocationModel() == Reloc::Static); 2136 2137 EVT PtrVT = getPointerTy(); 2138 SDLoc dl(Op); 2139 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 2140 const GlobalValue *GV = GN->getGlobal(); 2141 2142 SDValue GlobalAddr = DAG.getNode( 2143 AArch64ISD::WrapperLarge, dl, PtrVT, 2144 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G3), 2145 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G2_NC), 2146 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G1_NC), 2147 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G0_NC)); 2148 2149 if (GN->getOffset() != 0) 2150 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr, 2151 DAG.getConstant(GN->getOffset(), PtrVT)); 2152 2153 return GlobalAddr; 2154} 2155 2156SDValue 2157AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op, 2158 SelectionDAG &DAG) const { 2159 assert(getTargetMachine().getCodeModel() == CodeModel::Small); 2160 2161 EVT PtrVT = getPointerTy(); 2162 SDLoc dl(Op); 2163 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 2164 const GlobalValue *GV = GN->getGlobal(); 2165 unsigned Alignment = GV->getAlignment(); 2166 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2167 if (GV->isWeakForLinker() && GV->isDeclaration() && RelocM == Reloc::Static) { 2168 // Weak undefined symbols can't use ADRP/ADD pair since they should evaluate 2169 // to zero when they remain undefined. In PIC mode the GOT can take care of 2170 // this, but in absolute mode we use a constant pool load. 2171 SDValue PoolAddr; 2172 PoolAddr = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT, 2173 DAG.getTargetConstantPool(GV, PtrVT, 0, 0, 2174 AArch64II::MO_NO_FLAG), 2175 DAG.getTargetConstantPool(GV, PtrVT, 0, 0, 2176 AArch64II::MO_LO12), 2177 DAG.getConstant(8, MVT::i32)); 2178 SDValue GlobalAddr = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), PoolAddr, 2179 MachinePointerInfo::getConstantPool(), 2180 /*isVolatile=*/ false, 2181 /*isNonTemporal=*/ true, 2182 /*isInvariant=*/ true, 8); 2183 if (GN->getOffset() != 0) 2184 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr, 2185 DAG.getConstant(GN->getOffset(), PtrVT)); 2186 2187 return GlobalAddr; 2188 } 2189 2190 if (Alignment == 0) { 2191 const PointerType *GVPtrTy = cast<PointerType>(GV->getType()); 2192 if (GVPtrTy->getElementType()->isSized()) { 2193 Alignment 2194 = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType()); 2195 } else { 2196 // Be conservative if we can't guess, not that it really matters: 2197 // functions and labels aren't valid for loads, and the methods used to 2198 // actually calculate an address work with any alignment. 2199 Alignment = 1; 2200 } 2201 } 2202 2203 unsigned char HiFixup, LoFixup; 2204 bool UseGOT = getSubtarget()->GVIsIndirectSymbol(GV, RelocM); 2205 2206 if (UseGOT) { 2207 HiFixup = AArch64II::MO_GOT; 2208 LoFixup = AArch64II::MO_GOT_LO12; 2209 Alignment = 8; 2210 } else { 2211 HiFixup = AArch64II::MO_NO_FLAG; 2212 LoFixup = AArch64II::MO_LO12; 2213 } 2214 2215 // AArch64's small model demands the following sequence: 2216 // ADRP x0, somewhere 2217 // ADD x0, x0, #:lo12:somewhere ; (or LDR directly). 2218 SDValue GlobalRef = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT, 2219 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2220 HiFixup), 2221 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2222 LoFixup), 2223 DAG.getConstant(Alignment, MVT::i32)); 2224 2225 if (UseGOT) { 2226 GlobalRef = DAG.getNode(AArch64ISD::GOTLoad, dl, PtrVT, DAG.getEntryNode(), 2227 GlobalRef); 2228 } 2229 2230 if (GN->getOffset() != 0) 2231 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalRef, 2232 DAG.getConstant(GN->getOffset(), PtrVT)); 2233 2234 return GlobalRef; 2235} 2236 2237SDValue 2238AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op, 2239 SelectionDAG &DAG) const { 2240 // TableGen doesn't have easy access to the CodeModel or RelocationModel, so 2241 // we make those distinctions here. 2242 2243 switch (getTargetMachine().getCodeModel()) { 2244 case CodeModel::Small: 2245 return LowerGlobalAddressELFSmall(Op, DAG); 2246 case CodeModel::Large: 2247 return LowerGlobalAddressELFLarge(Op, DAG); 2248 default: 2249 llvm_unreachable("Only small and large code models supported now"); 2250 } 2251} 2252 2253SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr, 2254 SDValue DescAddr, 2255 SDLoc DL, 2256 SelectionDAG &DAG) const { 2257 EVT PtrVT = getPointerTy(); 2258 2259 // The function we need to call is simply the first entry in the GOT for this 2260 // descriptor, load it in preparation. 2261 SDValue Func, Chain; 2262 Func = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(), 2263 DescAddr); 2264 2265 // The function takes only one argument: the address of the descriptor itself 2266 // in X0. 2267 SDValue Glue; 2268 Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X0, DescAddr, Glue); 2269 Glue = Chain.getValue(1); 2270 2271 // Finally, there's a special calling-convention which means that the lookup 2272 // must preserve all registers (except X0, obviously). 2273 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 2274 const AArch64RegisterInfo *A64RI 2275 = static_cast<const AArch64RegisterInfo *>(TRI); 2276 const uint32_t *Mask = A64RI->getTLSDescCallPreservedMask(); 2277 2278 // We're now ready to populate the argument list, as with a normal call: 2279 std::vector<SDValue> Ops; 2280 Ops.push_back(Chain); 2281 Ops.push_back(Func); 2282 Ops.push_back(SymAddr); 2283 Ops.push_back(DAG.getRegister(AArch64::X0, PtrVT)); 2284 Ops.push_back(DAG.getRegisterMask(Mask)); 2285 Ops.push_back(Glue); 2286 2287 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 2288 Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0], 2289 Ops.size()); 2290 Glue = Chain.getValue(1); 2291 2292 // After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it 2293 // back to the generic handling code. 2294 return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue); 2295} 2296 2297SDValue 2298AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op, 2299 SelectionDAG &DAG) const { 2300 assert(getSubtarget()->isTargetELF() && 2301 "TLS not implemented for non-ELF targets"); 2302 assert(getTargetMachine().getCodeModel() == CodeModel::Small 2303 && "TLS only supported in small memory model"); 2304 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2305 2306 TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal()); 2307 2308 SDValue TPOff; 2309 EVT PtrVT = getPointerTy(); 2310 SDLoc DL(Op); 2311 const GlobalValue *GV = GA->getGlobal(); 2312 2313 SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT); 2314 2315 if (Model == TLSModel::InitialExec) { 2316 TPOff = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, 2317 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2318 AArch64II::MO_GOTTPREL), 2319 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2320 AArch64II::MO_GOTTPREL_LO12), 2321 DAG.getConstant(8, MVT::i32)); 2322 TPOff = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(), 2323 TPOff); 2324 } else if (Model == TLSModel::LocalExec) { 2325 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, 2326 AArch64II::MO_TPREL_G1); 2327 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, 2328 AArch64II::MO_TPREL_G0_NC); 2329 2330 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar, 2331 DAG.getTargetConstant(1, MVT::i32)), 0); 2332 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT, 2333 TPOff, LoVar, 2334 DAG.getTargetConstant(0, MVT::i32)), 0); 2335 } else if (Model == TLSModel::GeneralDynamic) { 2336 // Accesses used in this sequence go via the TLS descriptor which lives in 2337 // the GOT. Prepare an address we can use to handle this. 2338 SDValue HiDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2339 AArch64II::MO_TLSDESC); 2340 SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 2341 AArch64II::MO_TLSDESC_LO12); 2342 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, 2343 HiDesc, LoDesc, 2344 DAG.getConstant(8, MVT::i32)); 2345 SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0); 2346 2347 TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG); 2348 } else if (Model == TLSModel::LocalDynamic) { 2349 // Local-dynamic accesses proceed in two phases. A general-dynamic TLS 2350 // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate 2351 // the beginning of the module's TLS region, followed by a DTPREL offset 2352 // calculation. 2353 2354 // These accesses will need deduplicating if there's more than one. 2355 AArch64MachineFunctionInfo* MFI = DAG.getMachineFunction() 2356 .getInfo<AArch64MachineFunctionInfo>(); 2357 MFI->incNumLocalDynamicTLSAccesses(); 2358 2359 2360 // Get the location of _TLS_MODULE_BASE_: 2361 SDValue HiDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT, 2362 AArch64II::MO_TLSDESC); 2363 SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT, 2364 AArch64II::MO_TLSDESC_LO12); 2365 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT, 2366 HiDesc, LoDesc, 2367 DAG.getConstant(8, MVT::i32)); 2368 SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT); 2369 2370 ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG); 2371 2372 // Get the variable's offset from _TLS_MODULE_BASE_ 2373 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, 2374 AArch64II::MO_DTPREL_G1); 2375 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0, 2376 AArch64II::MO_DTPREL_G0_NC); 2377 2378 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar, 2379 DAG.getTargetConstant(0, MVT::i32)), 0); 2380 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT, 2381 TPOff, LoVar, 2382 DAG.getTargetConstant(0, MVT::i32)), 0); 2383 } else 2384 llvm_unreachable("Unsupported TLS access model"); 2385 2386 2387 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff); 2388} 2389 2390SDValue 2391AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, 2392 bool IsSigned) const { 2393 if (Op.getValueType() != MVT::f128) { 2394 // Legal for everything except f128. 2395 return Op; 2396 } 2397 2398 RTLIB::Libcall LC; 2399 if (IsSigned) 2400 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType()); 2401 else 2402 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType()); 2403 2404 return LowerF128ToCall(Op, DAG, LC); 2405} 2406 2407 2408SDValue 2409AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2410 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2411 SDLoc dl(JT); 2412 EVT PtrVT = getPointerTy(); 2413 2414 // When compiling PIC, jump tables get put in the code section so a static 2415 // relocation-style is acceptable for both cases. 2416 switch (getTargetMachine().getCodeModel()) { 2417 case CodeModel::Small: 2418 return DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT, 2419 DAG.getTargetJumpTable(JT->getIndex(), PtrVT), 2420 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2421 AArch64II::MO_LO12), 2422 DAG.getConstant(1, MVT::i32)); 2423 case CodeModel::Large: 2424 return DAG.getNode( 2425 AArch64ISD::WrapperLarge, dl, PtrVT, 2426 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G3), 2427 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G2_NC), 2428 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G1_NC), 2429 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G0_NC)); 2430 default: 2431 llvm_unreachable("Only small and large code models supported now"); 2432 } 2433} 2434 2435// (SELECT_CC lhs, rhs, iftrue, iffalse, condcode) 2436SDValue 2437AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 2438 SDLoc dl(Op); 2439 SDValue LHS = Op.getOperand(0); 2440 SDValue RHS = Op.getOperand(1); 2441 SDValue IfTrue = Op.getOperand(2); 2442 SDValue IfFalse = Op.getOperand(3); 2443 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 2444 2445 if (LHS.getValueType() == MVT::f128) { 2446 // f128 comparisons are lowered to libcalls, but slot in nicely here 2447 // afterwards. 2448 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); 2449 2450 // If softenSetCCOperands returned a scalar, we need to compare the result 2451 // against zero to select between true and false values. 2452 if (RHS.getNode() == 0) { 2453 RHS = DAG.getConstant(0, LHS.getValueType()); 2454 CC = ISD::SETNE; 2455 } 2456 } 2457 2458 if (LHS.getValueType().isInteger()) { 2459 SDValue A64cc; 2460 2461 // Integers are handled in a separate function because the combinations of 2462 // immediates and tests can get hairy and we may want to fiddle things. 2463 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl); 2464 2465 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), 2466 CmpOp, IfTrue, IfFalse, A64cc); 2467 } 2468 2469 // Note that some LLVM floating-point CondCodes can't be lowered to a single 2470 // conditional branch, hence FPCCToA64CC can set a second test, where either 2471 // passing is sufficient. 2472 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid; 2473 CondCode = FPCCToA64CC(CC, Alternative); 2474 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); 2475 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, 2476 DAG.getCondCode(CC)); 2477 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, 2478 Op.getValueType(), 2479 SetCC, IfTrue, IfFalse, A64cc); 2480 2481 if (Alternative != A64CC::Invalid) { 2482 A64cc = DAG.getConstant(Alternative, MVT::i32); 2483 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), 2484 SetCC, IfTrue, A64SELECT_CC, A64cc); 2485 2486 } 2487 2488 return A64SELECT_CC; 2489} 2490 2491// (SELECT testbit, iftrue, iffalse) 2492SDValue 2493AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 2494 SDLoc dl(Op); 2495 SDValue TheBit = Op.getOperand(0); 2496 SDValue IfTrue = Op.getOperand(1); 2497 SDValue IfFalse = Op.getOperand(2); 2498 2499 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means 2500 // that as the consumer we are responsible for ignoring rubbish in higher 2501 // bits. 2502 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit, 2503 DAG.getConstant(1, MVT::i32)); 2504 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit, 2505 DAG.getConstant(0, TheBit.getValueType()), 2506 DAG.getCondCode(ISD::SETNE)); 2507 2508 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(), 2509 A64CMP, IfTrue, IfFalse, 2510 DAG.getConstant(A64CC::NE, MVT::i32)); 2511} 2512 2513static SDValue LowerVectorSETCC(SDValue Op, SelectionDAG &DAG) { 2514 SDLoc DL(Op); 2515 SDValue LHS = Op.getOperand(0); 2516 SDValue RHS = Op.getOperand(1); 2517 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2518 EVT VT = Op.getValueType(); 2519 bool Invert = false; 2520 SDValue Op0, Op1; 2521 unsigned Opcode; 2522 2523 if (LHS.getValueType().isInteger()) { 2524 2525 // Attempt to use Vector Integer Compare Mask Test instruction. 2526 // TST = icmp ne (and (op0, op1), zero). 2527 if (CC == ISD::SETNE) { 2528 if (((LHS.getOpcode() == ISD::AND) && 2529 ISD::isBuildVectorAllZeros(RHS.getNode())) || 2530 ((RHS.getOpcode() == ISD::AND) && 2531 ISD::isBuildVectorAllZeros(LHS.getNode()))) { 2532 2533 SDValue AndOp = (LHS.getOpcode() == ISD::AND) ? LHS : RHS; 2534 SDValue NewLHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(0)); 2535 SDValue NewRHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(1)); 2536 return DAG.getNode(AArch64ISD::NEON_TST, DL, VT, NewLHS, NewRHS); 2537 } 2538 } 2539 2540 // Attempt to use Vector Integer Compare Mask against Zero instr (Signed). 2541 // Note: Compare against Zero does not support unsigned predicates. 2542 if ((ISD::isBuildVectorAllZeros(RHS.getNode()) || 2543 ISD::isBuildVectorAllZeros(LHS.getNode())) && 2544 !isUnsignedIntSetCC(CC)) { 2545 2546 // If LHS is the zero value, swap operands and CondCode. 2547 if (ISD::isBuildVectorAllZeros(LHS.getNode())) { 2548 CC = getSetCCSwappedOperands(CC); 2549 Op0 = RHS; 2550 } else 2551 Op0 = LHS; 2552 2553 // Ensure valid CondCode for Compare Mask against Zero instruction: 2554 // EQ, GE, GT, LE, LT. 2555 if (ISD::SETNE == CC) { 2556 Invert = true; 2557 CC = ISD::SETEQ; 2558 } 2559 2560 // Using constant type to differentiate integer and FP compares with zero. 2561 Op1 = DAG.getConstant(0, MVT::i32); 2562 Opcode = AArch64ISD::NEON_CMPZ; 2563 2564 } else { 2565 // Attempt to use Vector Integer Compare Mask instr (Signed/Unsigned). 2566 // Ensure valid CondCode for Compare Mask instr: EQ, GE, GT, UGE, UGT. 2567 bool Swap = false; 2568 switch (CC) { 2569 default: 2570 llvm_unreachable("Illegal integer comparison."); 2571 case ISD::SETEQ: 2572 case ISD::SETGT: 2573 case ISD::SETGE: 2574 case ISD::SETUGT: 2575 case ISD::SETUGE: 2576 break; 2577 case ISD::SETNE: 2578 Invert = true; 2579 CC = ISD::SETEQ; 2580 break; 2581 case ISD::SETULT: 2582 case ISD::SETULE: 2583 case ISD::SETLT: 2584 case ISD::SETLE: 2585 Swap = true; 2586 CC = getSetCCSwappedOperands(CC); 2587 } 2588 2589 if (Swap) 2590 std::swap(LHS, RHS); 2591 2592 Opcode = AArch64ISD::NEON_CMP; 2593 Op0 = LHS; 2594 Op1 = RHS; 2595 } 2596 2597 // Generate Compare Mask instr or Compare Mask against Zero instr. 2598 SDValue NeonCmp = 2599 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC)); 2600 2601 if (Invert) 2602 NeonCmp = DAG.getNOT(DL, NeonCmp, VT); 2603 2604 return NeonCmp; 2605 } 2606 2607 // Now handle Floating Point cases. 2608 // Attempt to use Vector Floating Point Compare Mask against Zero instruction. 2609 if (ISD::isBuildVectorAllZeros(RHS.getNode()) || 2610 ISD::isBuildVectorAllZeros(LHS.getNode())) { 2611 2612 // If LHS is the zero value, swap operands and CondCode. 2613 if (ISD::isBuildVectorAllZeros(LHS.getNode())) { 2614 CC = getSetCCSwappedOperands(CC); 2615 Op0 = RHS; 2616 } else 2617 Op0 = LHS; 2618 2619 // Using constant type to differentiate integer and FP compares with zero. 2620 Op1 = DAG.getConstantFP(0, MVT::f32); 2621 Opcode = AArch64ISD::NEON_CMPZ; 2622 } else { 2623 // Attempt to use Vector Floating Point Compare Mask instruction. 2624 Op0 = LHS; 2625 Op1 = RHS; 2626 Opcode = AArch64ISD::NEON_CMP; 2627 } 2628 2629 SDValue NeonCmpAlt; 2630 // Some register compares have to be implemented with swapped CC and operands, 2631 // e.g.: OLT implemented as OGT with swapped operands. 2632 bool SwapIfRegArgs = false; 2633 2634 // Ensure valid CondCode for FP Compare Mask against Zero instruction: 2635 // EQ, GE, GT, LE, LT. 2636 // And ensure valid CondCode for FP Compare Mask instruction: EQ, GE, GT. 2637 switch (CC) { 2638 default: 2639 llvm_unreachable("Illegal FP comparison"); 2640 case ISD::SETUNE: 2641 case ISD::SETNE: 2642 Invert = true; // Fallthrough 2643 case ISD::SETOEQ: 2644 case ISD::SETEQ: 2645 CC = ISD::SETEQ; 2646 break; 2647 case ISD::SETOLT: 2648 case ISD::SETLT: 2649 CC = ISD::SETLT; 2650 SwapIfRegArgs = true; 2651 break; 2652 case ISD::SETOGT: 2653 case ISD::SETGT: 2654 CC = ISD::SETGT; 2655 break; 2656 case ISD::SETOLE: 2657 case ISD::SETLE: 2658 CC = ISD::SETLE; 2659 SwapIfRegArgs = true; 2660 break; 2661 case ISD::SETOGE: 2662 case ISD::SETGE: 2663 CC = ISD::SETGE; 2664 break; 2665 case ISD::SETUGE: 2666 Invert = true; 2667 CC = ISD::SETLT; 2668 SwapIfRegArgs = true; 2669 break; 2670 case ISD::SETULE: 2671 Invert = true; 2672 CC = ISD::SETGT; 2673 break; 2674 case ISD::SETUGT: 2675 Invert = true; 2676 CC = ISD::SETLE; 2677 SwapIfRegArgs = true; 2678 break; 2679 case ISD::SETULT: 2680 Invert = true; 2681 CC = ISD::SETGE; 2682 break; 2683 case ISD::SETUEQ: 2684 Invert = true; // Fallthrough 2685 case ISD::SETONE: 2686 // Expand this to (OGT |OLT). 2687 NeonCmpAlt = 2688 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGT)); 2689 CC = ISD::SETLT; 2690 SwapIfRegArgs = true; 2691 break; 2692 case ISD::SETUO: 2693 Invert = true; // Fallthrough 2694 case ISD::SETO: 2695 // Expand this to (OGE | OLT). 2696 NeonCmpAlt = 2697 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGE)); 2698 CC = ISD::SETLT; 2699 SwapIfRegArgs = true; 2700 break; 2701 } 2702 2703 if (Opcode == AArch64ISD::NEON_CMP && SwapIfRegArgs) { 2704 CC = getSetCCSwappedOperands(CC); 2705 std::swap(Op0, Op1); 2706 } 2707 2708 // Generate FP Compare Mask instr or FP Compare Mask against Zero instr 2709 SDValue NeonCmp = DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC)); 2710 2711 if (NeonCmpAlt.getNode()) 2712 NeonCmp = DAG.getNode(ISD::OR, DL, VT, NeonCmp, NeonCmpAlt); 2713 2714 if (Invert) 2715 NeonCmp = DAG.getNOT(DL, NeonCmp, VT); 2716 2717 return NeonCmp; 2718} 2719 2720// (SETCC lhs, rhs, condcode) 2721SDValue 2722AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 2723 SDLoc dl(Op); 2724 SDValue LHS = Op.getOperand(0); 2725 SDValue RHS = Op.getOperand(1); 2726 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 2727 EVT VT = Op.getValueType(); 2728 2729 if (VT.isVector()) 2730 return LowerVectorSETCC(Op, DAG); 2731 2732 if (LHS.getValueType() == MVT::f128) { 2733 // f128 comparisons will be lowered to libcalls giving a valid LHS and RHS 2734 // for the rest of the function (some i32 or i64 values). 2735 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); 2736 2737 // If softenSetCCOperands returned a scalar, use it. 2738 if (RHS.getNode() == 0) { 2739 assert(LHS.getValueType() == Op.getValueType() && 2740 "Unexpected setcc expansion!"); 2741 return LHS; 2742 } 2743 } 2744 2745 if (LHS.getValueType().isInteger()) { 2746 SDValue A64cc; 2747 2748 // Integers are handled in a separate function because the combinations of 2749 // immediates and tests can get hairy and we may want to fiddle things. 2750 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl); 2751 2752 return DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, 2753 CmpOp, DAG.getConstant(1, VT), DAG.getConstant(0, VT), 2754 A64cc); 2755 } 2756 2757 // Note that some LLVM floating-point CondCodes can't be lowered to a single 2758 // conditional branch, hence FPCCToA64CC can set a second test, where either 2759 // passing is sufficient. 2760 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid; 2761 CondCode = FPCCToA64CC(CC, Alternative); 2762 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32); 2763 SDValue CmpOp = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS, 2764 DAG.getCondCode(CC)); 2765 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, 2766 CmpOp, DAG.getConstant(1, VT), 2767 DAG.getConstant(0, VT), A64cc); 2768 2769 if (Alternative != A64CC::Invalid) { 2770 A64cc = DAG.getConstant(Alternative, MVT::i32); 2771 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp, 2772 DAG.getConstant(1, VT), A64SELECT_CC, A64cc); 2773 } 2774 2775 return A64SELECT_CC; 2776} 2777 2778SDValue 2779AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 2780 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 2781 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 2782 2783 // We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes 2784 // rather than just 8. 2785 return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op), 2786 Op.getOperand(1), Op.getOperand(2), 2787 DAG.getConstant(32, MVT::i32), 8, false, false, 2788 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV)); 2789} 2790 2791SDValue 2792AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 2793 // The layout of the va_list struct is specified in the AArch64 Procedure Call 2794 // Standard, section B.3. 2795 MachineFunction &MF = DAG.getMachineFunction(); 2796 AArch64MachineFunctionInfo *FuncInfo 2797 = MF.getInfo<AArch64MachineFunctionInfo>(); 2798 SDLoc DL(Op); 2799 2800 SDValue Chain = Op.getOperand(0); 2801 SDValue VAList = Op.getOperand(1); 2802 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2803 SmallVector<SDValue, 4> MemOps; 2804 2805 // void *__stack at offset 0 2806 SDValue Stack = DAG.getFrameIndex(FuncInfo->getVariadicStackIdx(), 2807 getPointerTy()); 2808 MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList, 2809 MachinePointerInfo(SV), false, false, 0)); 2810 2811 // void *__gr_top at offset 8 2812 int GPRSize = FuncInfo->getVariadicGPRSize(); 2813 if (GPRSize > 0) { 2814 SDValue GRTop, GRTopAddr; 2815 2816 GRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, 2817 DAG.getConstant(8, getPointerTy())); 2818 2819 GRTop = DAG.getFrameIndex(FuncInfo->getVariadicGPRIdx(), getPointerTy()); 2820 GRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), GRTop, 2821 DAG.getConstant(GPRSize, getPointerTy())); 2822 2823 MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr, 2824 MachinePointerInfo(SV, 8), 2825 false, false, 0)); 2826 } 2827 2828 // void *__vr_top at offset 16 2829 int FPRSize = FuncInfo->getVariadicFPRSize(); 2830 if (FPRSize > 0) { 2831 SDValue VRTop, VRTopAddr; 2832 VRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, 2833 DAG.getConstant(16, getPointerTy())); 2834 2835 VRTop = DAG.getFrameIndex(FuncInfo->getVariadicFPRIdx(), getPointerTy()); 2836 VRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), VRTop, 2837 DAG.getConstant(FPRSize, getPointerTy())); 2838 2839 MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr, 2840 MachinePointerInfo(SV, 16), 2841 false, false, 0)); 2842 } 2843 2844 // int __gr_offs at offset 24 2845 SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, 2846 DAG.getConstant(24, getPointerTy())); 2847 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, MVT::i32), 2848 GROffsAddr, MachinePointerInfo(SV, 24), 2849 false, false, 0)); 2850 2851 // int __vr_offs at offset 28 2852 SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList, 2853 DAG.getConstant(28, getPointerTy())); 2854 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, MVT::i32), 2855 VROffsAddr, MachinePointerInfo(SV, 28), 2856 false, false, 0)); 2857 2858 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0], 2859 MemOps.size()); 2860} 2861 2862SDValue 2863AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 2864 switch (Op.getOpcode()) { 2865 default: llvm_unreachable("Don't know how to custom lower this!"); 2866 case ISD::FADD: return LowerF128ToCall(Op, DAG, RTLIB::ADD_F128); 2867 case ISD::FSUB: return LowerF128ToCall(Op, DAG, RTLIB::SUB_F128); 2868 case ISD::FMUL: return LowerF128ToCall(Op, DAG, RTLIB::MUL_F128); 2869 case ISD::FDIV: return LowerF128ToCall(Op, DAG, RTLIB::DIV_F128); 2870 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, true); 2871 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG, false); 2872 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG, true); 2873 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG, false); 2874 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG); 2875 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 2876 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 2877 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 2878 2879 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 2880 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 2881 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 2882 case ISD::GlobalAddress: return LowerGlobalAddressELF(Op, DAG); 2883 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 2884 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 2885 case ISD::SELECT: return LowerSELECT(Op, DAG); 2886 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 2887 case ISD::SETCC: return LowerSETCC(Op, DAG); 2888 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 2889 case ISD::VASTART: return LowerVASTART(Op, DAG); 2890 case ISD::BUILD_VECTOR: 2891 return LowerBUILD_VECTOR(Op, DAG, getSubtarget()); 2892 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 2893 } 2894 2895 return SDValue(); 2896} 2897 2898/// Check if the specified splat value corresponds to a valid vector constant 2899/// for a Neon instruction with a "modified immediate" operand (e.g., MOVI). If 2900/// so, return the encoded 8-bit immediate and the OpCmode instruction fields 2901/// values. 2902static bool isNeonModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 2903 unsigned SplatBitSize, SelectionDAG &DAG, 2904 bool is128Bits, NeonModImmType type, EVT &VT, 2905 unsigned &Imm, unsigned &OpCmode) { 2906 switch (SplatBitSize) { 2907 default: 2908 llvm_unreachable("unexpected size for isNeonModifiedImm"); 2909 case 8: { 2910 if (type != Neon_Mov_Imm) 2911 return false; 2912 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 2913 // Neon movi per byte: Op=0, Cmode=1110. 2914 OpCmode = 0xe; 2915 Imm = SplatBits; 2916 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 2917 break; 2918 } 2919 case 16: { 2920 // Neon move inst per halfword 2921 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 2922 if ((SplatBits & ~0xff) == 0) { 2923 // Value = 0x00nn is 0x00nn LSL 0 2924 // movi: Op=0, Cmode=1000; mvni: Op=1, Cmode=1000 2925 // bic: Op=1, Cmode=1001; orr: Op=0, Cmode=1001 2926 // Op=x, Cmode=100y 2927 Imm = SplatBits; 2928 OpCmode = 0x8; 2929 break; 2930 } 2931 if ((SplatBits & ~0xff00) == 0) { 2932 // Value = 0xnn00 is 0x00nn LSL 8 2933 // movi: Op=0, Cmode=1010; mvni: Op=1, Cmode=1010 2934 // bic: Op=1, Cmode=1011; orr: Op=0, Cmode=1011 2935 // Op=x, Cmode=101x 2936 Imm = SplatBits >> 8; 2937 OpCmode = 0xa; 2938 break; 2939 } 2940 // can't handle any other 2941 return false; 2942 } 2943 2944 case 32: { 2945 // First the LSL variants (MSL is unusable by some interested instructions). 2946 2947 // Neon move instr per word, shift zeros 2948 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 2949 if ((SplatBits & ~0xff) == 0) { 2950 // Value = 0x000000nn is 0x000000nn LSL 0 2951 // movi: Op=0, Cmode= 0000; mvni: Op=1, Cmode= 0000 2952 // bic: Op=1, Cmode= 0001; orr: Op=0, Cmode= 0001 2953 // Op=x, Cmode=000x 2954 Imm = SplatBits; 2955 OpCmode = 0; 2956 break; 2957 } 2958 if ((SplatBits & ~0xff00) == 0) { 2959 // Value = 0x0000nn00 is 0x000000nn LSL 8 2960 // movi: Op=0, Cmode= 0010; mvni: Op=1, Cmode= 0010 2961 // bic: Op=1, Cmode= 0011; orr : Op=0, Cmode= 0011 2962 // Op=x, Cmode=001x 2963 Imm = SplatBits >> 8; 2964 OpCmode = 0x2; 2965 break; 2966 } 2967 if ((SplatBits & ~0xff0000) == 0) { 2968 // Value = 0x00nn0000 is 0x000000nn LSL 16 2969 // movi: Op=0, Cmode= 0100; mvni: Op=1, Cmode= 0100 2970 // bic: Op=1, Cmode= 0101; orr: Op=0, Cmode= 0101 2971 // Op=x, Cmode=010x 2972 Imm = SplatBits >> 16; 2973 OpCmode = 0x4; 2974 break; 2975 } 2976 if ((SplatBits & ~0xff000000) == 0) { 2977 // Value = 0xnn000000 is 0x000000nn LSL 24 2978 // movi: Op=0, Cmode= 0110; mvni: Op=1, Cmode= 0110 2979 // bic: Op=1, Cmode= 0111; orr: Op=0, Cmode= 0111 2980 // Op=x, Cmode=011x 2981 Imm = SplatBits >> 24; 2982 OpCmode = 0x6; 2983 break; 2984 } 2985 2986 // Now the MSL immediates. 2987 2988 // Neon move instr per word, shift ones 2989 if ((SplatBits & ~0xffff) == 0 && 2990 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 2991 // Value = 0x0000nnff is 0x000000nn MSL 8 2992 // movi: Op=0, Cmode= 1100; mvni: Op=1, Cmode= 1100 2993 // Op=x, Cmode=1100 2994 Imm = SplatBits >> 8; 2995 OpCmode = 0xc; 2996 break; 2997 } 2998 if ((SplatBits & ~0xffffff) == 0 && 2999 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 3000 // Value = 0x00nnffff is 0x000000nn MSL 16 3001 // movi: Op=1, Cmode= 1101; mvni: Op=1, Cmode= 1101 3002 // Op=x, Cmode=1101 3003 Imm = SplatBits >> 16; 3004 OpCmode = 0xd; 3005 break; 3006 } 3007 // can't handle any other 3008 return false; 3009 } 3010 3011 case 64: { 3012 if (type != Neon_Mov_Imm) 3013 return false; 3014 // Neon move instr bytemask, where each byte is either 0x00 or 0xff. 3015 // movi Op=1, Cmode=1110. 3016 OpCmode = 0x1e; 3017 uint64_t BitMask = 0xff; 3018 uint64_t Val = 0; 3019 unsigned ImmMask = 1; 3020 Imm = 0; 3021 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 3022 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 3023 Val |= BitMask; 3024 Imm |= ImmMask; 3025 } else if ((SplatBits & BitMask) != 0) { 3026 return false; 3027 } 3028 BitMask <<= 8; 3029 ImmMask <<= 1; 3030 } 3031 SplatBits = Val; 3032 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 3033 break; 3034 } 3035 } 3036 3037 return true; 3038} 3039 3040static SDValue PerformANDCombine(SDNode *N, 3041 TargetLowering::DAGCombinerInfo &DCI) { 3042 3043 SelectionDAG &DAG = DCI.DAG; 3044 SDLoc DL(N); 3045 EVT VT = N->getValueType(0); 3046 3047 // We're looking for an SRA/SHL pair which form an SBFX. 3048 3049 if (VT != MVT::i32 && VT != MVT::i64) 3050 return SDValue(); 3051 3052 if (!isa<ConstantSDNode>(N->getOperand(1))) 3053 return SDValue(); 3054 3055 uint64_t TruncMask = N->getConstantOperandVal(1); 3056 if (!isMask_64(TruncMask)) 3057 return SDValue(); 3058 3059 uint64_t Width = CountPopulation_64(TruncMask); 3060 SDValue Shift = N->getOperand(0); 3061 3062 if (Shift.getOpcode() != ISD::SRL) 3063 return SDValue(); 3064 3065 if (!isa<ConstantSDNode>(Shift->getOperand(1))) 3066 return SDValue(); 3067 uint64_t LSB = Shift->getConstantOperandVal(1); 3068 3069 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits()) 3070 return SDValue(); 3071 3072 return DAG.getNode(AArch64ISD::UBFX, DL, VT, Shift.getOperand(0), 3073 DAG.getConstant(LSB, MVT::i64), 3074 DAG.getConstant(LSB + Width - 1, MVT::i64)); 3075} 3076 3077/// For a true bitfield insert, the bits getting into that contiguous mask 3078/// should come from the low part of an existing value: they must be formed from 3079/// a compatible SHL operation (unless they're already low). This function 3080/// checks that condition and returns the least-significant bit that's 3081/// intended. If the operation not a field preparation, -1 is returned. 3082static int32_t getLSBForBFI(SelectionDAG &DAG, SDLoc DL, EVT VT, 3083 SDValue &MaskedVal, uint64_t Mask) { 3084 if (!isShiftedMask_64(Mask)) 3085 return -1; 3086 3087 // Now we need to alter MaskedVal so that it is an appropriate input for a BFI 3088 // instruction. BFI will do a left-shift by LSB before applying the mask we've 3089 // spotted, so in general we should pre-emptively "undo" that by making sure 3090 // the incoming bits have had a right-shift applied to them. 3091 // 3092 // This right shift, however, will combine with existing left/right shifts. In 3093 // the simplest case of a completely straight bitfield operation, it will be 3094 // expected to completely cancel out with an existing SHL. More complicated 3095 // cases (e.g. bitfield to bitfield copy) may still need a real shift before 3096 // the BFI. 3097 3098 uint64_t LSB = countTrailingZeros(Mask); 3099 int64_t ShiftRightRequired = LSB; 3100 if (MaskedVal.getOpcode() == ISD::SHL && 3101 isa<ConstantSDNode>(MaskedVal.getOperand(1))) { 3102 ShiftRightRequired -= MaskedVal.getConstantOperandVal(1); 3103 MaskedVal = MaskedVal.getOperand(0); 3104 } else if (MaskedVal.getOpcode() == ISD::SRL && 3105 isa<ConstantSDNode>(MaskedVal.getOperand(1))) { 3106 ShiftRightRequired += MaskedVal.getConstantOperandVal(1); 3107 MaskedVal = MaskedVal.getOperand(0); 3108 } 3109 3110 if (ShiftRightRequired > 0) 3111 MaskedVal = DAG.getNode(ISD::SRL, DL, VT, MaskedVal, 3112 DAG.getConstant(ShiftRightRequired, MVT::i64)); 3113 else if (ShiftRightRequired < 0) { 3114 // We could actually end up with a residual left shift, for example with 3115 // "struc.bitfield = val << 1". 3116 MaskedVal = DAG.getNode(ISD::SHL, DL, VT, MaskedVal, 3117 DAG.getConstant(-ShiftRightRequired, MVT::i64)); 3118 } 3119 3120 return LSB; 3121} 3122 3123/// Searches from N for an existing AArch64ISD::BFI node, possibly surrounded by 3124/// a mask and an extension. Returns true if a BFI was found and provides 3125/// information on its surroundings. 3126static bool findMaskedBFI(SDValue N, SDValue &BFI, uint64_t &Mask, 3127 bool &Extended) { 3128 Extended = false; 3129 if (N.getOpcode() == ISD::ZERO_EXTEND) { 3130 Extended = true; 3131 N = N.getOperand(0); 3132 } 3133 3134 if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) { 3135 Mask = N->getConstantOperandVal(1); 3136 N = N.getOperand(0); 3137 } else { 3138 // Mask is the whole width. 3139 Mask = -1ULL >> (64 - N.getValueType().getSizeInBits()); 3140 } 3141 3142 if (N.getOpcode() == AArch64ISD::BFI) { 3143 BFI = N; 3144 return true; 3145 } 3146 3147 return false; 3148} 3149 3150/// Try to combine a subtree (rooted at an OR) into a "masked BFI" node, which 3151/// is roughly equivalent to (and (BFI ...), mask). This form is used because it 3152/// can often be further combined with a larger mask. Ultimately, we want mask 3153/// to be 2^32-1 or 2^64-1 so the AND can be skipped. 3154static SDValue tryCombineToBFI(SDNode *N, 3155 TargetLowering::DAGCombinerInfo &DCI, 3156 const AArch64Subtarget *Subtarget) { 3157 SelectionDAG &DAG = DCI.DAG; 3158 SDLoc DL(N); 3159 EVT VT = N->getValueType(0); 3160 3161 assert(N->getOpcode() == ISD::OR && "Unexpected root"); 3162 3163 // We need the LHS to be (and SOMETHING, MASK). Find out what that mask is or 3164 // abandon the effort. 3165 SDValue LHS = N->getOperand(0); 3166 if (LHS.getOpcode() != ISD::AND) 3167 return SDValue(); 3168 3169 uint64_t LHSMask; 3170 if (isa<ConstantSDNode>(LHS.getOperand(1))) 3171 LHSMask = LHS->getConstantOperandVal(1); 3172 else 3173 return SDValue(); 3174 3175 // We also need the RHS to be (and SOMETHING, MASK). Find out what that mask 3176 // is or abandon the effort. 3177 SDValue RHS = N->getOperand(1); 3178 if (RHS.getOpcode() != ISD::AND) 3179 return SDValue(); 3180 3181 uint64_t RHSMask; 3182 if (isa<ConstantSDNode>(RHS.getOperand(1))) 3183 RHSMask = RHS->getConstantOperandVal(1); 3184 else 3185 return SDValue(); 3186 3187 // Can't do anything if the masks are incompatible. 3188 if (LHSMask & RHSMask) 3189 return SDValue(); 3190 3191 // Now we need one of the masks to be a contiguous field. Without loss of 3192 // generality that should be the RHS one. 3193 SDValue Bitfield = LHS.getOperand(0); 3194 if (getLSBForBFI(DAG, DL, VT, Bitfield, LHSMask) != -1) { 3195 // We know that LHS is a candidate new value, and RHS isn't already a better 3196 // one. 3197 std::swap(LHS, RHS); 3198 std::swap(LHSMask, RHSMask); 3199 } 3200 3201 // We've done our best to put the right operands in the right places, all we 3202 // can do now is check whether a BFI exists. 3203 Bitfield = RHS.getOperand(0); 3204 int32_t LSB = getLSBForBFI(DAG, DL, VT, Bitfield, RHSMask); 3205 if (LSB == -1) 3206 return SDValue(); 3207 3208 uint32_t Width = CountPopulation_64(RHSMask); 3209 assert(Width && "Expected non-zero bitfield width"); 3210 3211 SDValue BFI = DAG.getNode(AArch64ISD::BFI, DL, VT, 3212 LHS.getOperand(0), Bitfield, 3213 DAG.getConstant(LSB, MVT::i64), 3214 DAG.getConstant(Width, MVT::i64)); 3215 3216 // Mask is trivial 3217 if ((LHSMask | RHSMask) == (-1ULL >> (64 - VT.getSizeInBits()))) 3218 return BFI; 3219 3220 return DAG.getNode(ISD::AND, DL, VT, BFI, 3221 DAG.getConstant(LHSMask | RHSMask, VT)); 3222} 3223 3224/// Search for the bitwise combining (with careful masks) of a MaskedBFI and its 3225/// original input. This is surprisingly common because SROA splits things up 3226/// into i8 chunks, so the originally detected MaskedBFI may actually only act 3227/// on the low (say) byte of a word. This is then orred into the rest of the 3228/// word afterwards. 3229/// 3230/// Basic input: (or (and OLDFIELD, MASK1), (MaskedBFI MASK2, OLDFIELD, ...)). 3231/// 3232/// If MASK1 and MASK2 are compatible, we can fold the whole thing into the 3233/// MaskedBFI. We can also deal with a certain amount of extend/truncate being 3234/// involved. 3235static SDValue tryCombineToLargerBFI(SDNode *N, 3236 TargetLowering::DAGCombinerInfo &DCI, 3237 const AArch64Subtarget *Subtarget) { 3238 SelectionDAG &DAG = DCI.DAG; 3239 SDLoc DL(N); 3240 EVT VT = N->getValueType(0); 3241 3242 // First job is to hunt for a MaskedBFI on either the left or right. Swap 3243 // operands if it's actually on the right. 3244 SDValue BFI; 3245 SDValue PossExtraMask; 3246 uint64_t ExistingMask = 0; 3247 bool Extended = false; 3248 if (findMaskedBFI(N->getOperand(0), BFI, ExistingMask, Extended)) 3249 PossExtraMask = N->getOperand(1); 3250 else if (findMaskedBFI(N->getOperand(1), BFI, ExistingMask, Extended)) 3251 PossExtraMask = N->getOperand(0); 3252 else 3253 return SDValue(); 3254 3255 // We can only combine a BFI with another compatible mask. 3256 if (PossExtraMask.getOpcode() != ISD::AND || 3257 !isa<ConstantSDNode>(PossExtraMask.getOperand(1))) 3258 return SDValue(); 3259 3260 uint64_t ExtraMask = PossExtraMask->getConstantOperandVal(1); 3261 3262 // Masks must be compatible. 3263 if (ExtraMask & ExistingMask) 3264 return SDValue(); 3265 3266 SDValue OldBFIVal = BFI.getOperand(0); 3267 SDValue NewBFIVal = BFI.getOperand(1); 3268 if (Extended) { 3269 // We skipped a ZERO_EXTEND above, so the input to the MaskedBFIs should be 3270 // 32-bit and we'll be forming a 64-bit MaskedBFI. The MaskedBFI arguments 3271 // need to be made compatible. 3272 assert(VT == MVT::i64 && BFI.getValueType() == MVT::i32 3273 && "Invalid types for BFI"); 3274 OldBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, OldBFIVal); 3275 NewBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, NewBFIVal); 3276 } 3277 3278 // We need the MaskedBFI to be combined with a mask of the *same* value. 3279 if (PossExtraMask.getOperand(0) != OldBFIVal) 3280 return SDValue(); 3281 3282 BFI = DAG.getNode(AArch64ISD::BFI, DL, VT, 3283 OldBFIVal, NewBFIVal, 3284 BFI.getOperand(2), BFI.getOperand(3)); 3285 3286 // If the masking is trivial, we don't need to create it. 3287 if ((ExtraMask | ExistingMask) == (-1ULL >> (64 - VT.getSizeInBits()))) 3288 return BFI; 3289 3290 return DAG.getNode(ISD::AND, DL, VT, BFI, 3291 DAG.getConstant(ExtraMask | ExistingMask, VT)); 3292} 3293 3294/// An EXTR instruction is made up of two shifts, ORed together. This helper 3295/// searches for and classifies those shifts. 3296static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount, 3297 bool &FromHi) { 3298 if (N.getOpcode() == ISD::SHL) 3299 FromHi = false; 3300 else if (N.getOpcode() == ISD::SRL) 3301 FromHi = true; 3302 else 3303 return false; 3304 3305 if (!isa<ConstantSDNode>(N.getOperand(1))) 3306 return false; 3307 3308 ShiftAmount = N->getConstantOperandVal(1); 3309 Src = N->getOperand(0); 3310 return true; 3311} 3312 3313/// EXTR instruction extracts a contiguous chunk of bits from two existing 3314/// registers viewed as a high/low pair. This function looks for the pattern: 3315/// (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) and replaces it with an 3316/// EXTR. Can't quite be done in TableGen because the two immediates aren't 3317/// independent. 3318static SDValue tryCombineToEXTR(SDNode *N, 3319 TargetLowering::DAGCombinerInfo &DCI) { 3320 SelectionDAG &DAG = DCI.DAG; 3321 SDLoc DL(N); 3322 EVT VT = N->getValueType(0); 3323 3324 assert(N->getOpcode() == ISD::OR && "Unexpected root"); 3325 3326 if (VT != MVT::i32 && VT != MVT::i64) 3327 return SDValue(); 3328 3329 SDValue LHS; 3330 uint32_t ShiftLHS = 0; 3331 bool LHSFromHi = 0; 3332 if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi)) 3333 return SDValue(); 3334 3335 SDValue RHS; 3336 uint32_t ShiftRHS = 0; 3337 bool RHSFromHi = 0; 3338 if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi)) 3339 return SDValue(); 3340 3341 // If they're both trying to come from the high part of the register, they're 3342 // not really an EXTR. 3343 if (LHSFromHi == RHSFromHi) 3344 return SDValue(); 3345 3346 if (ShiftLHS + ShiftRHS != VT.getSizeInBits()) 3347 return SDValue(); 3348 3349 if (LHSFromHi) { 3350 std::swap(LHS, RHS); 3351 std::swap(ShiftLHS, ShiftRHS); 3352 } 3353 3354 return DAG.getNode(AArch64ISD::EXTR, DL, VT, 3355 LHS, RHS, 3356 DAG.getConstant(ShiftRHS, MVT::i64)); 3357} 3358 3359/// Target-specific dag combine xforms for ISD::OR 3360static SDValue PerformORCombine(SDNode *N, 3361 TargetLowering::DAGCombinerInfo &DCI, 3362 const AArch64Subtarget *Subtarget) { 3363 3364 SelectionDAG &DAG = DCI.DAG; 3365 SDLoc DL(N); 3366 EVT VT = N->getValueType(0); 3367 3368 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 3369 return SDValue(); 3370 3371 // Attempt to recognise bitfield-insert operations. 3372 SDValue Res = tryCombineToBFI(N, DCI, Subtarget); 3373 if (Res.getNode()) 3374 return Res; 3375 3376 // Attempt to combine an existing MaskedBFI operation into one with a larger 3377 // mask. 3378 Res = tryCombineToLargerBFI(N, DCI, Subtarget); 3379 if (Res.getNode()) 3380 return Res; 3381 3382 Res = tryCombineToEXTR(N, DCI); 3383 if (Res.getNode()) 3384 return Res; 3385 3386 if (!Subtarget->hasNEON()) 3387 return SDValue(); 3388 3389 // Attempt to use vector immediate-form BSL 3390 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 3391 3392 SDValue N0 = N->getOperand(0); 3393 if (N0.getOpcode() != ISD::AND) 3394 return SDValue(); 3395 3396 SDValue N1 = N->getOperand(1); 3397 if (N1.getOpcode() != ISD::AND) 3398 return SDValue(); 3399 3400 if (VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 3401 APInt SplatUndef; 3402 unsigned SplatBitSize; 3403 bool HasAnyUndefs; 3404 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 3405 APInt SplatBits0; 3406 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 3407 HasAnyUndefs) && 3408 !HasAnyUndefs) { 3409 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 3410 APInt SplatBits1; 3411 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 3412 HasAnyUndefs) && 3413 !HasAnyUndefs && SplatBits0 == ~SplatBits1) { 3414 // Canonicalize the vector type to make instruction selection simpler. 3415 EVT CanonicalVT = VT.is128BitVector() ? MVT::v16i8 : MVT::v8i8; 3416 SDValue Result = DAG.getNode(AArch64ISD::NEON_BSL, DL, CanonicalVT, 3417 N0->getOperand(1), N0->getOperand(0), 3418 N1->getOperand(0)); 3419 return DAG.getNode(ISD::BITCAST, DL, VT, Result); 3420 } 3421 } 3422 } 3423 3424 return SDValue(); 3425} 3426 3427/// Target-specific dag combine xforms for ISD::SRA 3428static SDValue PerformSRACombine(SDNode *N, 3429 TargetLowering::DAGCombinerInfo &DCI) { 3430 3431 SelectionDAG &DAG = DCI.DAG; 3432 SDLoc DL(N); 3433 EVT VT = N->getValueType(0); 3434 3435 // We're looking for an SRA/SHL pair which form an SBFX. 3436 3437 if (VT != MVT::i32 && VT != MVT::i64) 3438 return SDValue(); 3439 3440 if (!isa<ConstantSDNode>(N->getOperand(1))) 3441 return SDValue(); 3442 3443 uint64_t ExtraSignBits = N->getConstantOperandVal(1); 3444 SDValue Shift = N->getOperand(0); 3445 3446 if (Shift.getOpcode() != ISD::SHL) 3447 return SDValue(); 3448 3449 if (!isa<ConstantSDNode>(Shift->getOperand(1))) 3450 return SDValue(); 3451 3452 uint64_t BitsOnLeft = Shift->getConstantOperandVal(1); 3453 uint64_t Width = VT.getSizeInBits() - ExtraSignBits; 3454 uint64_t LSB = VT.getSizeInBits() - Width - BitsOnLeft; 3455 3456 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits()) 3457 return SDValue(); 3458 3459 return DAG.getNode(AArch64ISD::SBFX, DL, VT, Shift.getOperand(0), 3460 DAG.getConstant(LSB, MVT::i64), 3461 DAG.getConstant(LSB + Width - 1, MVT::i64)); 3462} 3463 3464/// Check if this is a valid build_vector for the immediate operand of 3465/// a vector shift operation, where all the elements of the build_vector 3466/// must have the same constant integer value. 3467static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 3468 // Ignore bit_converts. 3469 while (Op.getOpcode() == ISD::BITCAST) 3470 Op = Op.getOperand(0); 3471 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 3472 APInt SplatBits, SplatUndef; 3473 unsigned SplatBitSize; 3474 bool HasAnyUndefs; 3475 if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 3476 HasAnyUndefs, ElementBits) || 3477 SplatBitSize > ElementBits) 3478 return false; 3479 Cnt = SplatBits.getSExtValue(); 3480 return true; 3481} 3482 3483/// Check if this is a valid build_vector for the immediate operand of 3484/// a vector shift left operation. That value must be in the range: 3485/// 0 <= Value < ElementBits 3486static bool isVShiftLImm(SDValue Op, EVT VT, int64_t &Cnt) { 3487 assert(VT.isVector() && "vector shift count is not a vector type"); 3488 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 3489 if (!getVShiftImm(Op, ElementBits, Cnt)) 3490 return false; 3491 return (Cnt >= 0 && Cnt < ElementBits); 3492} 3493 3494/// Check if this is a valid build_vector for the immediate operand of a 3495/// vector shift right operation. The value must be in the range: 3496/// 1 <= Value <= ElementBits 3497static bool isVShiftRImm(SDValue Op, EVT VT, int64_t &Cnt) { 3498 assert(VT.isVector() && "vector shift count is not a vector type"); 3499 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 3500 if (!getVShiftImm(Op, ElementBits, Cnt)) 3501 return false; 3502 return (Cnt >= 1 && Cnt <= ElementBits); 3503} 3504 3505/// Checks for immediate versions of vector shifts and lowers them. 3506static SDValue PerformShiftCombine(SDNode *N, 3507 TargetLowering::DAGCombinerInfo &DCI, 3508 const AArch64Subtarget *ST) { 3509 SelectionDAG &DAG = DCI.DAG; 3510 EVT VT = N->getValueType(0); 3511 if (N->getOpcode() == ISD::SRA && (VT == MVT::i32 || VT == MVT::i64)) 3512 return PerformSRACombine(N, DCI); 3513 3514 // Nothing to be done for scalar shifts. 3515 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3516 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 3517 return SDValue(); 3518 3519 assert(ST->hasNEON() && "unexpected vector shift"); 3520 int64_t Cnt; 3521 3522 switch (N->getOpcode()) { 3523 default: 3524 llvm_unreachable("unexpected shift opcode"); 3525 3526 case ISD::SHL: 3527 if (isVShiftLImm(N->getOperand(1), VT, Cnt)) { 3528 SDValue RHS = 3529 DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT, 3530 DAG.getConstant(Cnt, MVT::i32)); 3531 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0), RHS); 3532 } 3533 break; 3534 3535 case ISD::SRA: 3536 case ISD::SRL: 3537 if (isVShiftRImm(N->getOperand(1), VT, Cnt)) { 3538 SDValue RHS = 3539 DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT, 3540 DAG.getConstant(Cnt, MVT::i32)); 3541 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N->getOperand(0), RHS); 3542 } 3543 break; 3544 } 3545 3546 return SDValue(); 3547} 3548 3549/// ARM-specific DAG combining for intrinsics. 3550static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 3551 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 3552 3553 switch (IntNo) { 3554 default: 3555 // Don't do anything for most intrinsics. 3556 break; 3557 3558 case Intrinsic::arm_neon_vqshifts: 3559 case Intrinsic::arm_neon_vqshiftu: 3560 EVT VT = N->getOperand(1).getValueType(); 3561 int64_t Cnt; 3562 if (!isVShiftLImm(N->getOperand(2), VT, Cnt)) 3563 break; 3564 unsigned VShiftOpc = (IntNo == Intrinsic::arm_neon_vqshifts) 3565 ? AArch64ISD::NEON_QSHLs 3566 : AArch64ISD::NEON_QSHLu; 3567 return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0), 3568 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 3569 } 3570 3571 return SDValue(); 3572} 3573 3574/// Target-specific DAG combine function for NEON load/store intrinsics 3575/// to merge base address updates. 3576static SDValue CombineBaseUpdate(SDNode *N, 3577 TargetLowering::DAGCombinerInfo &DCI) { 3578 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 3579 return SDValue(); 3580 3581 SelectionDAG &DAG = DCI.DAG; 3582 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 3583 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 3584 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1); 3585 SDValue Addr = N->getOperand(AddrOpIdx); 3586 3587 // Search for a use of the address operand that is an increment. 3588 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 3589 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 3590 SDNode *User = *UI; 3591 if (User->getOpcode() != ISD::ADD || 3592 UI.getUse().getResNo() != Addr.getResNo()) 3593 continue; 3594 3595 // Check that the add is independent of the load/store. Otherwise, folding 3596 // it would create a cycle. 3597 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 3598 continue; 3599 3600 // Find the new opcode for the updating load/store. 3601 bool isLoad = true; 3602 bool isLaneOp = false; 3603 unsigned NewOpc = 0; 3604 unsigned NumVecs = 0; 3605 if (isIntrinsic) { 3606 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 3607 switch (IntNo) { 3608 default: llvm_unreachable("unexpected intrinsic for Neon base update"); 3609 case Intrinsic::arm_neon_vld1: NewOpc = AArch64ISD::NEON_LD1_UPD; 3610 NumVecs = 1; break; 3611 case Intrinsic::arm_neon_vld2: NewOpc = AArch64ISD::NEON_LD2_UPD; 3612 NumVecs = 2; break; 3613 case Intrinsic::arm_neon_vld3: NewOpc = AArch64ISD::NEON_LD3_UPD; 3614 NumVecs = 3; break; 3615 case Intrinsic::arm_neon_vld4: NewOpc = AArch64ISD::NEON_LD4_UPD; 3616 NumVecs = 4; break; 3617 case Intrinsic::arm_neon_vst1: NewOpc = AArch64ISD::NEON_ST1_UPD; 3618 NumVecs = 1; isLoad = false; break; 3619 case Intrinsic::arm_neon_vst2: NewOpc = AArch64ISD::NEON_ST2_UPD; 3620 NumVecs = 2; isLoad = false; break; 3621 case Intrinsic::arm_neon_vst3: NewOpc = AArch64ISD::NEON_ST3_UPD; 3622 NumVecs = 3; isLoad = false; break; 3623 case Intrinsic::arm_neon_vst4: NewOpc = AArch64ISD::NEON_ST4_UPD; 3624 NumVecs = 4; isLoad = false; break; 3625 case Intrinsic::aarch64_neon_vld1x2: NewOpc = AArch64ISD::NEON_LD1x2_UPD; 3626 NumVecs = 2; break; 3627 case Intrinsic::aarch64_neon_vld1x3: NewOpc = AArch64ISD::NEON_LD1x3_UPD; 3628 NumVecs = 3; break; 3629 case Intrinsic::aarch64_neon_vld1x4: NewOpc = AArch64ISD::NEON_LD1x4_UPD; 3630 NumVecs = 4; break; 3631 case Intrinsic::aarch64_neon_vst1x2: NewOpc = AArch64ISD::NEON_ST1x2_UPD; 3632 NumVecs = 2; isLoad = false; break; 3633 case Intrinsic::aarch64_neon_vst1x3: NewOpc = AArch64ISD::NEON_ST1x3_UPD; 3634 NumVecs = 3; isLoad = false; break; 3635 case Intrinsic::aarch64_neon_vst1x4: NewOpc = AArch64ISD::NEON_ST1x4_UPD; 3636 NumVecs = 4; isLoad = false; break; 3637 case Intrinsic::arm_neon_vld2lane: NewOpc = AArch64ISD::NEON_LD2LN_UPD; 3638 NumVecs = 2; isLaneOp = true; break; 3639 case Intrinsic::arm_neon_vld3lane: NewOpc = AArch64ISD::NEON_LD3LN_UPD; 3640 NumVecs = 3; isLaneOp = true; break; 3641 case Intrinsic::arm_neon_vld4lane: NewOpc = AArch64ISD::NEON_LD4LN_UPD; 3642 NumVecs = 4; isLaneOp = true; break; 3643 case Intrinsic::arm_neon_vst2lane: NewOpc = AArch64ISD::NEON_ST2LN_UPD; 3644 NumVecs = 2; isLoad = false; isLaneOp = true; break; 3645 case Intrinsic::arm_neon_vst3lane: NewOpc = AArch64ISD::NEON_ST3LN_UPD; 3646 NumVecs = 3; isLoad = false; isLaneOp = true; break; 3647 case Intrinsic::arm_neon_vst4lane: NewOpc = AArch64ISD::NEON_ST4LN_UPD; 3648 NumVecs = 4; isLoad = false; isLaneOp = true; break; 3649 } 3650 } else { 3651 isLaneOp = true; 3652 switch (N->getOpcode()) { 3653 default: llvm_unreachable("unexpected opcode for Neon base update"); 3654 case AArch64ISD::NEON_LD2DUP: NewOpc = AArch64ISD::NEON_LD2DUP_UPD; 3655 NumVecs = 2; break; 3656 case AArch64ISD::NEON_LD3DUP: NewOpc = AArch64ISD::NEON_LD3DUP_UPD; 3657 NumVecs = 3; break; 3658 case AArch64ISD::NEON_LD4DUP: NewOpc = AArch64ISD::NEON_LD4DUP_UPD; 3659 NumVecs = 4; break; 3660 } 3661 } 3662 3663 // Find the size of memory referenced by the load/store. 3664 EVT VecTy; 3665 if (isLoad) 3666 VecTy = N->getValueType(0); 3667 else 3668 VecTy = N->getOperand(AddrOpIdx + 1).getValueType(); 3669 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 3670 if (isLaneOp) 3671 NumBytes /= VecTy.getVectorNumElements(); 3672 3673 // If the increment is a constant, it must match the memory ref size. 3674 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 3675 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 3676 uint32_t IncVal = CInc->getZExtValue(); 3677 if (IncVal != NumBytes) 3678 continue; 3679 Inc = DAG.getTargetConstant(IncVal, MVT::i32); 3680 } 3681 3682 // Create the new updating load/store node. 3683 EVT Tys[6]; 3684 unsigned NumResultVecs = (isLoad ? NumVecs : 0); 3685 unsigned n; 3686 for (n = 0; n < NumResultVecs; ++n) 3687 Tys[n] = VecTy; 3688 Tys[n++] = MVT::i64; 3689 Tys[n] = MVT::Other; 3690 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs + 2); 3691 SmallVector<SDValue, 8> Ops; 3692 Ops.push_back(N->getOperand(0)); // incoming chain 3693 Ops.push_back(N->getOperand(AddrOpIdx)); 3694 Ops.push_back(Inc); 3695 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { 3696 Ops.push_back(N->getOperand(i)); 3697 } 3698 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 3699 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, 3700 Ops.data(), Ops.size(), 3701 MemInt->getMemoryVT(), 3702 MemInt->getMemOperand()); 3703 3704 // Update the uses. 3705 std::vector<SDValue> NewResults; 3706 for (unsigned i = 0; i < NumResultVecs; ++i) { 3707 NewResults.push_back(SDValue(UpdN.getNode(), i)); 3708 } 3709 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain 3710 DCI.CombineTo(N, NewResults); 3711 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 3712 3713 break; 3714 } 3715 return SDValue(); 3716} 3717 3718/// For a VDUPLANE node N, check if its source operand is a vldN-lane (N > 1) 3719/// intrinsic, and if all the other uses of that intrinsic are also VDUPLANEs. 3720/// If so, combine them to a vldN-dup operation and return true. 3721static SDValue CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 3722 SelectionDAG &DAG = DCI.DAG; 3723 EVT VT = N->getValueType(0); 3724 3725 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 3726 SDNode *VLD = N->getOperand(0).getNode(); 3727 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 3728 return SDValue(); 3729 unsigned NumVecs = 0; 3730 unsigned NewOpc = 0; 3731 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 3732 if (IntNo == Intrinsic::arm_neon_vld2lane) { 3733 NumVecs = 2; 3734 NewOpc = AArch64ISD::NEON_LD2DUP; 3735 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 3736 NumVecs = 3; 3737 NewOpc = AArch64ISD::NEON_LD3DUP; 3738 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 3739 NumVecs = 4; 3740 NewOpc = AArch64ISD::NEON_LD4DUP; 3741 } else { 3742 return SDValue(); 3743 } 3744 3745 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 3746 // numbers match the load. 3747 unsigned VLDLaneNo = 3748 cast<ConstantSDNode>(VLD->getOperand(NumVecs + 3))->getZExtValue(); 3749 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 3750 UI != UE; ++UI) { 3751 // Ignore uses of the chain result. 3752 if (UI.getUse().getResNo() == NumVecs) 3753 continue; 3754 SDNode *User = *UI; 3755 if (User->getOpcode() != AArch64ISD::NEON_VDUPLANE || 3756 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 3757 return SDValue(); 3758 } 3759 3760 // Create the vldN-dup node. 3761 EVT Tys[5]; 3762 unsigned n; 3763 for (n = 0; n < NumVecs; ++n) 3764 Tys[n] = VT; 3765 Tys[n] = MVT::Other; 3766 SDVTList SDTys = DAG.getVTList(Tys, NumVecs + 1); 3767 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 3768 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 3769 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, Ops, 2, 3770 VLDMemInt->getMemoryVT(), 3771 VLDMemInt->getMemOperand()); 3772 3773 // Update the uses. 3774 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 3775 UI != UE; ++UI) { 3776 unsigned ResNo = UI.getUse().getResNo(); 3777 // Ignore uses of the chain result. 3778 if (ResNo == NumVecs) 3779 continue; 3780 SDNode *User = *UI; 3781 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 3782 } 3783 3784 // Now the vldN-lane intrinsic is dead except for its chain result. 3785 // Update uses of the chain. 3786 std::vector<SDValue> VLDDupResults; 3787 for (unsigned n = 0; n < NumVecs; ++n) 3788 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 3789 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 3790 DCI.CombineTo(VLD, VLDDupResults); 3791 3792 return SDValue(N, 0); 3793} 3794 3795SDValue 3796AArch64TargetLowering::PerformDAGCombine(SDNode *N, 3797 DAGCombinerInfo &DCI) const { 3798 switch (N->getOpcode()) { 3799 default: break; 3800 case ISD::AND: return PerformANDCombine(N, DCI); 3801 case ISD::OR: return PerformORCombine(N, DCI, getSubtarget()); 3802 case ISD::SHL: 3803 case ISD::SRA: 3804 case ISD::SRL: 3805 return PerformShiftCombine(N, DCI, getSubtarget()); 3806 case ISD::INTRINSIC_WO_CHAIN: 3807 return PerformIntrinsicCombine(N, DCI.DAG); 3808 case AArch64ISD::NEON_VDUPLANE: 3809 return CombineVLDDUP(N, DCI); 3810 case AArch64ISD::NEON_LD2DUP: 3811 case AArch64ISD::NEON_LD3DUP: 3812 case AArch64ISD::NEON_LD4DUP: 3813 return CombineBaseUpdate(N, DCI); 3814 case ISD::INTRINSIC_VOID: 3815 case ISD::INTRINSIC_W_CHAIN: 3816 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 3817 case Intrinsic::arm_neon_vld1: 3818 case Intrinsic::arm_neon_vld2: 3819 case Intrinsic::arm_neon_vld3: 3820 case Intrinsic::arm_neon_vld4: 3821 case Intrinsic::arm_neon_vst1: 3822 case Intrinsic::arm_neon_vst2: 3823 case Intrinsic::arm_neon_vst3: 3824 case Intrinsic::arm_neon_vst4: 3825 case Intrinsic::arm_neon_vld2lane: 3826 case Intrinsic::arm_neon_vld3lane: 3827 case Intrinsic::arm_neon_vld4lane: 3828 case Intrinsic::aarch64_neon_vld1x2: 3829 case Intrinsic::aarch64_neon_vld1x3: 3830 case Intrinsic::aarch64_neon_vld1x4: 3831 case Intrinsic::aarch64_neon_vst1x2: 3832 case Intrinsic::aarch64_neon_vst1x3: 3833 case Intrinsic::aarch64_neon_vst1x4: 3834 case Intrinsic::arm_neon_vst2lane: 3835 case Intrinsic::arm_neon_vst3lane: 3836 case Intrinsic::arm_neon_vst4lane: 3837 return CombineBaseUpdate(N, DCI); 3838 default: 3839 break; 3840 } 3841 } 3842 return SDValue(); 3843} 3844 3845bool 3846AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 3847 VT = VT.getScalarType(); 3848 3849 if (!VT.isSimple()) 3850 return false; 3851 3852 switch (VT.getSimpleVT().SimpleTy) { 3853 case MVT::f16: 3854 case MVT::f32: 3855 case MVT::f64: 3856 return true; 3857 case MVT::f128: 3858 return false; 3859 default: 3860 break; 3861 } 3862 3863 return false; 3864} 3865 3866// Check whether a Build Vector could be presented as Shuffle Vector. If yes, 3867// try to call LowerVECTOR_SHUFFLE to lower it. 3868bool AArch64TargetLowering::isKnownShuffleVector(SDValue Op, SelectionDAG &DAG, 3869 SDValue &Res) const { 3870 SDLoc DL(Op); 3871 EVT VT = Op.getValueType(); 3872 unsigned NumElts = VT.getVectorNumElements(); 3873 unsigned V0NumElts = 0; 3874 int Mask[16]; 3875 SDValue V0, V1; 3876 3877 // Check if all elements are extracted from less than 3 vectors. 3878 for (unsigned i = 0; i < NumElts; ++i) { 3879 SDValue Elt = Op.getOperand(i); 3880 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 3881 return false; 3882 3883 if (V0.getNode() == 0) { 3884 V0 = Elt.getOperand(0); 3885 V0NumElts = V0.getValueType().getVectorNumElements(); 3886 } 3887 if (Elt.getOperand(0) == V0) { 3888 Mask[i] = (cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue()); 3889 continue; 3890 } else if (V1.getNode() == 0) { 3891 V1 = Elt.getOperand(0); 3892 } 3893 if (Elt.getOperand(0) == V1) { 3894 unsigned Lane = cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue(); 3895 Mask[i] = (Lane + V0NumElts); 3896 continue; 3897 } else { 3898 return false; 3899 } 3900 } 3901 3902 if (!V1.getNode() && V0NumElts == NumElts * 2) { 3903 V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0, 3904 DAG.getConstant(NumElts, MVT::i64)); 3905 V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0, 3906 DAG.getConstant(0, MVT::i64)); 3907 V0NumElts = V0.getValueType().getVectorNumElements(); 3908 } 3909 3910 if (V1.getNode() && NumElts == V0NumElts && 3911 V0NumElts == V1.getValueType().getVectorNumElements()) { 3912 SDValue Shuffle = DAG.getVectorShuffle(VT, DL, V0, V1, Mask); 3913 Res = LowerVECTOR_SHUFFLE(Shuffle, DAG); 3914 return true; 3915 } else 3916 return false; 3917} 3918 3919// If this is a case we can't handle, return null and let the default 3920// expansion code take care of it. 3921SDValue 3922AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 3923 const AArch64Subtarget *ST) const { 3924 3925 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 3926 SDLoc DL(Op); 3927 EVT VT = Op.getValueType(); 3928 3929 APInt SplatBits, SplatUndef; 3930 unsigned SplatBitSize; 3931 bool HasAnyUndefs; 3932 3933 unsigned UseNeonMov = VT.getSizeInBits() >= 64; 3934 3935 // Note we favor lowering MOVI over MVNI. 3936 // This has implications on the definition of patterns in TableGen to select 3937 // BIC immediate instructions but not ORR immediate instructions. 3938 // If this lowering order is changed, TableGen patterns for BIC immediate and 3939 // ORR immediate instructions have to be updated. 3940 if (UseNeonMov && 3941 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 3942 if (SplatBitSize <= 64) { 3943 // First attempt to use vector immediate-form MOVI 3944 EVT NeonMovVT; 3945 unsigned Imm = 0; 3946 unsigned OpCmode = 0; 3947 3948 if (isNeonModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(), 3949 SplatBitSize, DAG, VT.is128BitVector(), 3950 Neon_Mov_Imm, NeonMovVT, Imm, OpCmode)) { 3951 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32); 3952 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32); 3953 3954 if (ImmVal.getNode() && OpCmodeVal.getNode()) { 3955 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MOVIMM, DL, NeonMovVT, 3956 ImmVal, OpCmodeVal); 3957 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov); 3958 } 3959 } 3960 3961 // Then attempt to use vector immediate-form MVNI 3962 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 3963 if (isNeonModifiedImm(NegatedImm, SplatUndef.getZExtValue(), SplatBitSize, 3964 DAG, VT.is128BitVector(), Neon_Mvn_Imm, NeonMovVT, 3965 Imm, OpCmode)) { 3966 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32); 3967 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32); 3968 if (ImmVal.getNode() && OpCmodeVal.getNode()) { 3969 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MVNIMM, DL, NeonMovVT, 3970 ImmVal, OpCmodeVal); 3971 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov); 3972 } 3973 } 3974 3975 // Attempt to use vector immediate-form FMOV 3976 if (((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) || 3977 (VT == MVT::v2f64 && SplatBitSize == 64)) { 3978 APFloat RealVal( 3979 SplatBitSize == 32 ? APFloat::IEEEsingle : APFloat::IEEEdouble, 3980 SplatBits); 3981 uint32_t ImmVal; 3982 if (A64Imms::isFPImm(RealVal, ImmVal)) { 3983 SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32); 3984 return DAG.getNode(AArch64ISD::NEON_FMOVIMM, DL, VT, Val); 3985 } 3986 } 3987 } 3988 } 3989 3990 unsigned NumElts = VT.getVectorNumElements(); 3991 bool isOnlyLowElement = true; 3992 bool usesOnlyOneValue = true; 3993 bool hasDominantValue = false; 3994 bool isConstant = true; 3995 3996 // Map of the number of times a particular SDValue appears in the 3997 // element list. 3998 DenseMap<SDValue, unsigned> ValueCounts; 3999 SDValue Value; 4000 for (unsigned i = 0; i < NumElts; ++i) { 4001 SDValue V = Op.getOperand(i); 4002 if (V.getOpcode() == ISD::UNDEF) 4003 continue; 4004 if (i > 0) 4005 isOnlyLowElement = false; 4006 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 4007 isConstant = false; 4008 4009 ValueCounts.insert(std::make_pair(V, 0)); 4010 unsigned &Count = ValueCounts[V]; 4011 4012 // Is this value dominant? (takes up more than half of the lanes) 4013 if (++Count > (NumElts / 2)) { 4014 hasDominantValue = true; 4015 Value = V; 4016 } 4017 } 4018 if (ValueCounts.size() != 1) 4019 usesOnlyOneValue = false; 4020 if (!Value.getNode() && ValueCounts.size() > 0) 4021 Value = ValueCounts.begin()->first; 4022 4023 if (ValueCounts.size() == 0) 4024 return DAG.getUNDEF(VT); 4025 4026 // Loads are better lowered with insert_vector_elt. 4027 // Keep going if we are hitting this case. 4028 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode())) 4029 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value); 4030 4031 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4032 if (hasDominantValue && EltSize <= 64) { 4033 // Use VDUP for non-constant splats. 4034 if (!isConstant) { 4035 SDValue N; 4036 4037 // If we are DUPing a value that comes directly from a vector, we could 4038 // just use DUPLANE. We can only do this if the lane being extracted 4039 // is at a constant index, as the DUP from lane instructions only have 4040 // constant-index forms. 4041 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 4042 isa<ConstantSDNode>(Value->getOperand(1))) { 4043 N = DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT, 4044 Value->getOperand(0), Value->getOperand(1)); 4045 } else 4046 N = DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value); 4047 4048 if (!usesOnlyOneValue) { 4049 // The dominant value was splatted as 'N', but we now have to insert 4050 // all differing elements. 4051 for (unsigned I = 0; I < NumElts; ++I) { 4052 if (Op.getOperand(I) == Value) 4053 continue; 4054 SmallVector<SDValue, 3> Ops; 4055 Ops.push_back(N); 4056 Ops.push_back(Op.getOperand(I)); 4057 Ops.push_back(DAG.getConstant(I, MVT::i64)); 4058 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, &Ops[0], 3); 4059 } 4060 } 4061 return N; 4062 } 4063 if (usesOnlyOneValue && isConstant) { 4064 return DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value); 4065 } 4066 } 4067 // If all elements are constants and the case above didn't get hit, fall back 4068 // to the default expansion, which will generate a load from the constant 4069 // pool. 4070 if (isConstant) 4071 return SDValue(); 4072 4073 // Try to lower this in lowering ShuffleVector way. 4074 SDValue Shuf; 4075 if (isKnownShuffleVector(Op, DAG, Shuf)) 4076 return Shuf; 4077 4078 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we 4079 // know the default expansion would otherwise fall back on something even 4080 // worse. For a vector with one or two non-undef values, that's 4081 // scalar_to_vector for the elements followed by a shuffle (provided the 4082 // shuffle is valid for the target) and materialization element by element 4083 // on the stack followed by a load for everything else. 4084 if (!isConstant && !usesOnlyOneValue) { 4085 SDValue Vec = DAG.getUNDEF(VT); 4086 for (unsigned i = 0 ; i < NumElts; ++i) { 4087 SDValue V = Op.getOperand(i); 4088 if (V.getOpcode() == ISD::UNDEF) 4089 continue; 4090 SDValue LaneIdx = DAG.getConstant(i, MVT::i64); 4091 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V, LaneIdx); 4092 } 4093 return Vec; 4094 } 4095 return SDValue(); 4096} 4097 4098/// isREVMask - Check if a vector shuffle corresponds to a REV 4099/// instruction with the specified blocksize. (The order of the elements 4100/// within each block of the vector is reversed.) 4101static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { 4102 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && 4103 "Only possible block sizes for REV are: 16, 32, 64"); 4104 4105 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4106 if (EltSz == 64) 4107 return false; 4108 4109 unsigned NumElts = VT.getVectorNumElements(); 4110 unsigned BlockElts = M[0] + 1; 4111 // If the first shuffle index is UNDEF, be optimistic. 4112 if (M[0] < 0) 4113 BlockElts = BlockSize / EltSz; 4114 4115 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 4116 return false; 4117 4118 for (unsigned i = 0; i < NumElts; ++i) { 4119 if (M[i] < 0) 4120 continue; // ignore UNDEF indices 4121 if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts)) 4122 return false; 4123 } 4124 4125 return true; 4126} 4127 4128// isPermuteMask - Check whether the vector shuffle matches to UZP, ZIP and 4129// TRN instruction. 4130static unsigned isPermuteMask(ArrayRef<int> M, EVT VT) { 4131 unsigned NumElts = VT.getVectorNumElements(); 4132 if (NumElts < 4) 4133 return 0; 4134 4135 bool ismatch = true; 4136 4137 // Check UZP1 4138 for (unsigned i = 0; i < NumElts; ++i) { 4139 if ((unsigned)M[i] != i * 2) { 4140 ismatch = false; 4141 break; 4142 } 4143 } 4144 if (ismatch) 4145 return AArch64ISD::NEON_UZP1; 4146 4147 // Check UZP2 4148 ismatch = true; 4149 for (unsigned i = 0; i < NumElts; ++i) { 4150 if ((unsigned)M[i] != i * 2 + 1) { 4151 ismatch = false; 4152 break; 4153 } 4154 } 4155 if (ismatch) 4156 return AArch64ISD::NEON_UZP2; 4157 4158 // Check ZIP1 4159 ismatch = true; 4160 for (unsigned i = 0; i < NumElts; ++i) { 4161 if ((unsigned)M[i] != i / 2 + NumElts * (i % 2)) { 4162 ismatch = false; 4163 break; 4164 } 4165 } 4166 if (ismatch) 4167 return AArch64ISD::NEON_ZIP1; 4168 4169 // Check ZIP2 4170 ismatch = true; 4171 for (unsigned i = 0; i < NumElts; ++i) { 4172 if ((unsigned)M[i] != (NumElts + i) / 2 + NumElts * (i % 2)) { 4173 ismatch = false; 4174 break; 4175 } 4176 } 4177 if (ismatch) 4178 return AArch64ISD::NEON_ZIP2; 4179 4180 // Check TRN1 4181 ismatch = true; 4182 for (unsigned i = 0; i < NumElts; ++i) { 4183 if ((unsigned)M[i] != i + (NumElts - 1) * (i % 2)) { 4184 ismatch = false; 4185 break; 4186 } 4187 } 4188 if (ismatch) 4189 return AArch64ISD::NEON_TRN1; 4190 4191 // Check TRN2 4192 ismatch = true; 4193 for (unsigned i = 0; i < NumElts; ++i) { 4194 if ((unsigned)M[i] != 1 + i + (NumElts - 1) * (i % 2)) { 4195 ismatch = false; 4196 break; 4197 } 4198 } 4199 if (ismatch) 4200 return AArch64ISD::NEON_TRN2; 4201 4202 return 0; 4203} 4204 4205SDValue 4206AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 4207 SelectionDAG &DAG) const { 4208 SDValue V1 = Op.getOperand(0); 4209 SDValue V2 = Op.getOperand(1); 4210 SDLoc dl(Op); 4211 EVT VT = Op.getValueType(); 4212 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 4213 4214 // Convert shuffles that are directly supported on NEON to target-specific 4215 // DAG nodes, instead of keeping them as shuffles and matching them again 4216 // during code selection. This is more efficient and avoids the possibility 4217 // of inconsistencies between legalization and selection. 4218 ArrayRef<int> ShuffleMask = SVN->getMask(); 4219 4220 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4221 if (EltSize > 64) 4222 return SDValue(); 4223 4224 if (isREVMask(ShuffleMask, VT, 64)) 4225 return DAG.getNode(AArch64ISD::NEON_REV64, dl, VT, V1); 4226 if (isREVMask(ShuffleMask, VT, 32)) 4227 return DAG.getNode(AArch64ISD::NEON_REV32, dl, VT, V1); 4228 if (isREVMask(ShuffleMask, VT, 16)) 4229 return DAG.getNode(AArch64ISD::NEON_REV16, dl, VT, V1); 4230 4231 unsigned ISDNo = isPermuteMask(ShuffleMask, VT); 4232 if (ISDNo) 4233 return DAG.getNode(ISDNo, dl, VT, V1, V2); 4234 4235 // If the element of shuffle mask are all the same constant, we can 4236 // transform it into either NEON_VDUP or NEON_VDUPLANE 4237 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 4238 int Lane = SVN->getSplatIndex(); 4239 // If this is undef splat, generate it via "just" vdup, if possible. 4240 if (Lane == -1) Lane = 0; 4241 4242 // Test if V1 is a SCALAR_TO_VECTOR. 4243 if (V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 4244 return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, V1.getOperand(0)); 4245 } 4246 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR. 4247 if (V1.getOpcode() == ISD::BUILD_VECTOR) { 4248 bool IsScalarToVector = true; 4249 for (unsigned i = 0, e = V1.getNumOperands(); i != e; ++i) 4250 if (V1.getOperand(i).getOpcode() != ISD::UNDEF && 4251 i != (unsigned)Lane) { 4252 IsScalarToVector = false; 4253 break; 4254 } 4255 if (IsScalarToVector) 4256 return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, 4257 V1.getOperand(Lane)); 4258 } 4259 4260 // Test if V1 is a EXTRACT_SUBVECTOR. 4261 if (V1.getOpcode() == ISD::EXTRACT_SUBVECTOR) { 4262 int ExtLane = cast<ConstantSDNode>(V1.getOperand(1))->getZExtValue(); 4263 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1.getOperand(0), 4264 DAG.getConstant(Lane + ExtLane, MVT::i64)); 4265 } 4266 // Test if V1 is a CONCAT_VECTORS. 4267 if (V1.getOpcode() == ISD::CONCAT_VECTORS && 4268 V1.getOperand(1).getOpcode() == ISD::UNDEF) { 4269 SDValue Op0 = V1.getOperand(0); 4270 assert((unsigned)Lane < Op0.getValueType().getVectorNumElements() && 4271 "Invalid vector lane access"); 4272 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, Op0, 4273 DAG.getConstant(Lane, MVT::i64)); 4274 } 4275 4276 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1, 4277 DAG.getConstant(Lane, MVT::i64)); 4278 } 4279 4280 int Length = ShuffleMask.size(); 4281 int V1EltNum = V1.getValueType().getVectorNumElements(); 4282 4283 // If the number of v1 elements is the same as the number of shuffle mask 4284 // element and the shuffle masks are sequential values, we can transform 4285 // it into NEON_VEXTRACT. 4286 if (V1EltNum == Length) { 4287 // Check if the shuffle mask is sequential. 4288 bool IsSequential = true; 4289 int CurMask = ShuffleMask[0]; 4290 for (int I = 0; I < Length; ++I) { 4291 if (ShuffleMask[I] != CurMask) { 4292 IsSequential = false; 4293 break; 4294 } 4295 CurMask++; 4296 } 4297 if (IsSequential) { 4298 assert((EltSize % 8 == 0) && "Bitsize of vector element is incorrect"); 4299 unsigned VecSize = EltSize * V1EltNum; 4300 unsigned Index = (EltSize/8) * ShuffleMask[0]; 4301 if (VecSize == 64 || VecSize == 128) 4302 return DAG.getNode(AArch64ISD::NEON_VEXTRACT, dl, VT, V1, V2, 4303 DAG.getConstant(Index, MVT::i64)); 4304 } 4305 } 4306 4307 // For shuffle mask like "0, 1, 2, 3, 4, 5, 13, 7", try to generate insert 4308 // by element from V2 to V1 . 4309 // If shuffle mask is like "0, 1, 10, 11, 12, 13, 14, 15", V2 would be a 4310 // better choice to be inserted than V1 as less insert needed, so we count 4311 // element to be inserted for both V1 and V2, and select less one as insert 4312 // target. 4313 4314 // Collect elements need to be inserted and their index. 4315 SmallVector<int, 8> NV1Elt; 4316 SmallVector<int, 8> N1Index; 4317 SmallVector<int, 8> NV2Elt; 4318 SmallVector<int, 8> N2Index; 4319 for (int I = 0; I != Length; ++I) { 4320 if (ShuffleMask[I] != I) { 4321 NV1Elt.push_back(ShuffleMask[I]); 4322 N1Index.push_back(I); 4323 } 4324 } 4325 for (int I = 0; I != Length; ++I) { 4326 if (ShuffleMask[I] != (I + V1EltNum)) { 4327 NV2Elt.push_back(ShuffleMask[I]); 4328 N2Index.push_back(I); 4329 } 4330 } 4331 4332 // Decide which to be inserted. If all lanes mismatch, neither V1 nor V2 4333 // will be inserted. 4334 SDValue InsV = V1; 4335 SmallVector<int, 8> InsMasks = NV1Elt; 4336 SmallVector<int, 8> InsIndex = N1Index; 4337 if ((int)NV1Elt.size() != Length || (int)NV2Elt.size() != Length) { 4338 if (NV1Elt.size() > NV2Elt.size()) { 4339 InsV = V2; 4340 InsMasks = NV2Elt; 4341 InsIndex = N2Index; 4342 } 4343 } else { 4344 InsV = DAG.getNode(ISD::UNDEF, dl, VT); 4345 } 4346 4347 for (int I = 0, E = InsMasks.size(); I != E; ++I) { 4348 SDValue ExtV = V1; 4349 int Mask = InsMasks[I]; 4350 if (Mask >= V1EltNum) { 4351 ExtV = V2; 4352 Mask -= V1EltNum; 4353 } 4354 // Any value type smaller than i32 is illegal in AArch64, and this lower 4355 // function is called after legalize pass, so we need to legalize 4356 // the result here. 4357 EVT EltVT; 4358 if (VT.getVectorElementType().isFloatingPoint()) 4359 EltVT = (EltSize == 64) ? MVT::f64 : MVT::f32; 4360 else 4361 EltVT = (EltSize == 64) ? MVT::i64 : MVT::i32; 4362 4363 if (Mask >= 0) { 4364 ExtV = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, ExtV, 4365 DAG.getConstant(Mask, MVT::i64)); 4366 InsV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, InsV, ExtV, 4367 DAG.getConstant(InsIndex[I], MVT::i64)); 4368 } 4369 } 4370 return InsV; 4371} 4372 4373AArch64TargetLowering::ConstraintType 4374AArch64TargetLowering::getConstraintType(const std::string &Constraint) const { 4375 if (Constraint.size() == 1) { 4376 switch (Constraint[0]) { 4377 default: break; 4378 case 'w': // An FP/SIMD vector register 4379 return C_RegisterClass; 4380 case 'I': // Constant that can be used with an ADD instruction 4381 case 'J': // Constant that can be used with a SUB instruction 4382 case 'K': // Constant that can be used with a 32-bit logical instruction 4383 case 'L': // Constant that can be used with a 64-bit logical instruction 4384 case 'M': // Constant that can be used as a 32-bit MOV immediate 4385 case 'N': // Constant that can be used as a 64-bit MOV immediate 4386 case 'Y': // Floating point constant zero 4387 case 'Z': // Integer constant zero 4388 return C_Other; 4389 case 'Q': // A memory reference with base register and no offset 4390 return C_Memory; 4391 case 'S': // A symbolic address 4392 return C_Other; 4393 } 4394 } 4395 4396 // FIXME: Ump, Utf, Usa, Ush 4397 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes, 4398 // whatever they may be 4399 // Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be 4400 // Usa: An absolute symbolic address 4401 // Ush: The high part (bits 32:12) of a pc-relative symbolic address 4402 assert(Constraint != "Ump" && Constraint != "Utf" && Constraint != "Usa" 4403 && Constraint != "Ush" && "Unimplemented constraints"); 4404 4405 return TargetLowering::getConstraintType(Constraint); 4406} 4407 4408TargetLowering::ConstraintWeight 4409AArch64TargetLowering::getSingleConstraintMatchWeight(AsmOperandInfo &Info, 4410 const char *Constraint) const { 4411 4412 llvm_unreachable("Constraint weight unimplemented"); 4413} 4414 4415void 4416AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 4417 std::string &Constraint, 4418 std::vector<SDValue> &Ops, 4419 SelectionDAG &DAG) const { 4420 SDValue Result(0, 0); 4421 4422 // Only length 1 constraints are C_Other. 4423 if (Constraint.size() != 1) return; 4424 4425 // Only C_Other constraints get lowered like this. That means constants for us 4426 // so return early if there's no hope the constraint can be lowered. 4427 4428 switch(Constraint[0]) { 4429 default: break; 4430 case 'I': case 'J': case 'K': case 'L': 4431 case 'M': case 'N': case 'Z': { 4432 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4433 if (!C) 4434 return; 4435 4436 uint64_t CVal = C->getZExtValue(); 4437 uint32_t Bits; 4438 4439 switch (Constraint[0]) { 4440 default: 4441 // FIXME: 'M' and 'N' are MOV pseudo-insts -- unsupported in assembly. 'J' 4442 // is a peculiarly useless SUB constraint. 4443 llvm_unreachable("Unimplemented C_Other constraint"); 4444 case 'I': 4445 if (CVal <= 0xfff) 4446 break; 4447 return; 4448 case 'K': 4449 if (A64Imms::isLogicalImm(32, CVal, Bits)) 4450 break; 4451 return; 4452 case 'L': 4453 if (A64Imms::isLogicalImm(64, CVal, Bits)) 4454 break; 4455 return; 4456 case 'Z': 4457 if (CVal == 0) 4458 break; 4459 return; 4460 } 4461 4462 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 4463 break; 4464 } 4465 case 'S': { 4466 // An absolute symbolic address or label reference. 4467 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 4468 Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 4469 GA->getValueType(0)); 4470 } else if (const BlockAddressSDNode *BA 4471 = dyn_cast<BlockAddressSDNode>(Op)) { 4472 Result = DAG.getTargetBlockAddress(BA->getBlockAddress(), 4473 BA->getValueType(0)); 4474 } else if (const ExternalSymbolSDNode *ES 4475 = dyn_cast<ExternalSymbolSDNode>(Op)) { 4476 Result = DAG.getTargetExternalSymbol(ES->getSymbol(), 4477 ES->getValueType(0)); 4478 } else 4479 return; 4480 break; 4481 } 4482 case 'Y': 4483 if (const ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) { 4484 if (CFP->isExactlyValue(0.0)) { 4485 Result = DAG.getTargetConstantFP(0.0, CFP->getValueType(0)); 4486 break; 4487 } 4488 } 4489 return; 4490 } 4491 4492 if (Result.getNode()) { 4493 Ops.push_back(Result); 4494 return; 4495 } 4496 4497 // It's an unknown constraint for us. Let generic code have a go. 4498 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 4499} 4500 4501std::pair<unsigned, const TargetRegisterClass*> 4502AArch64TargetLowering::getRegForInlineAsmConstraint( 4503 const std::string &Constraint, 4504 MVT VT) const { 4505 if (Constraint.size() == 1) { 4506 switch (Constraint[0]) { 4507 case 'r': 4508 if (VT.getSizeInBits() <= 32) 4509 return std::make_pair(0U, &AArch64::GPR32RegClass); 4510 else if (VT == MVT::i64) 4511 return std::make_pair(0U, &AArch64::GPR64RegClass); 4512 break; 4513 case 'w': 4514 if (VT == MVT::f16) 4515 return std::make_pair(0U, &AArch64::FPR16RegClass); 4516 else if (VT == MVT::f32) 4517 return std::make_pair(0U, &AArch64::FPR32RegClass); 4518 else if (VT.getSizeInBits() == 64) 4519 return std::make_pair(0U, &AArch64::FPR64RegClass); 4520 else if (VT.getSizeInBits() == 128) 4521 return std::make_pair(0U, &AArch64::FPR128RegClass); 4522 break; 4523 } 4524 } 4525 4526 // Use the default implementation in TargetLowering to convert the register 4527 // constraint into a member of a register class. 4528 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 4529} 4530 4531/// Represent NEON load and store intrinsics as MemIntrinsicNodes. 4532/// The associated MachineMemOperands record the alignment specified 4533/// in the intrinsic calls. 4534bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 4535 const CallInst &I, 4536 unsigned Intrinsic) const { 4537 switch (Intrinsic) { 4538 case Intrinsic::arm_neon_vld1: 4539 case Intrinsic::arm_neon_vld2: 4540 case Intrinsic::arm_neon_vld3: 4541 case Intrinsic::arm_neon_vld4: 4542 case Intrinsic::aarch64_neon_vld1x2: 4543 case Intrinsic::aarch64_neon_vld1x3: 4544 case Intrinsic::aarch64_neon_vld1x4: 4545 case Intrinsic::arm_neon_vld2lane: 4546 case Intrinsic::arm_neon_vld3lane: 4547 case Intrinsic::arm_neon_vld4lane: { 4548 Info.opc = ISD::INTRINSIC_W_CHAIN; 4549 // Conservatively set memVT to the entire set of vectors loaded. 4550 uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8; 4551 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 4552 Info.ptrVal = I.getArgOperand(0); 4553 Info.offset = 0; 4554 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 4555 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 4556 Info.vol = false; // volatile loads with NEON intrinsics not supported 4557 Info.readMem = true; 4558 Info.writeMem = false; 4559 return true; 4560 } 4561 case Intrinsic::arm_neon_vst1: 4562 case Intrinsic::arm_neon_vst2: 4563 case Intrinsic::arm_neon_vst3: 4564 case Intrinsic::arm_neon_vst4: 4565 case Intrinsic::aarch64_neon_vst1x2: 4566 case Intrinsic::aarch64_neon_vst1x3: 4567 case Intrinsic::aarch64_neon_vst1x4: 4568 case Intrinsic::arm_neon_vst2lane: 4569 case Intrinsic::arm_neon_vst3lane: 4570 case Intrinsic::arm_neon_vst4lane: { 4571 Info.opc = ISD::INTRINSIC_VOID; 4572 // Conservatively set memVT to the entire set of vectors stored. 4573 unsigned NumElts = 0; 4574 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 4575 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 4576 if (!ArgTy->isVectorTy()) 4577 break; 4578 NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8; 4579 } 4580 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 4581 Info.ptrVal = I.getArgOperand(0); 4582 Info.offset = 0; 4583 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 4584 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 4585 Info.vol = false; // volatile stores with NEON intrinsics not supported 4586 Info.readMem = false; 4587 Info.writeMem = true; 4588 return true; 4589 } 4590 default: 4591 break; 4592 } 4593 4594 return false; 4595} 4596