1//===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the interfaces that ARM uses to lower LLVM code into a 11// selection DAG. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "arm-isel" 16#include "ARMISelLowering.h" 17#include "ARM.h" 18#include "ARMCallingConv.h" 19#include "ARMConstantPoolValue.h" 20#include "ARMMachineFunctionInfo.h" 21#include "ARMPerfectShuffle.h" 22#include "ARMSubtarget.h" 23#include "ARMTargetMachine.h" 24#include "ARMTargetObjectFile.h" 25#include "MCTargetDesc/ARMAddressingModes.h" 26#include "llvm/ADT/Statistic.h" 27#include "llvm/ADT/StringExtras.h" 28#include "llvm/CodeGen/CallingConvLower.h" 29#include "llvm/CodeGen/IntrinsicLowering.h" 30#include "llvm/CodeGen/MachineBasicBlock.h" 31#include "llvm/CodeGen/MachineFrameInfo.h" 32#include "llvm/CodeGen/MachineFunction.h" 33#include "llvm/CodeGen/MachineInstrBuilder.h" 34#include "llvm/CodeGen/MachineModuleInfo.h" 35#include "llvm/CodeGen/MachineRegisterInfo.h" 36#include "llvm/CodeGen/SelectionDAG.h" 37#include "llvm/IR/CallingConv.h" 38#include "llvm/IR/Constants.h" 39#include "llvm/IR/Function.h" 40#include "llvm/IR/GlobalValue.h" 41#include "llvm/IR/Instruction.h" 42#include "llvm/IR/Instructions.h" 43#include "llvm/IR/Intrinsics.h" 44#include "llvm/IR/Type.h" 45#include "llvm/MC/MCSectionMachO.h" 46#include "llvm/Support/CommandLine.h" 47#include "llvm/Support/ErrorHandling.h" 48#include "llvm/Support/MathExtras.h" 49#include "llvm/Support/raw_ostream.h" 50#include "llvm/Target/TargetOptions.h" 51using namespace llvm; 52 53STATISTIC(NumTailCalls, "Number of tail calls"); 54STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt"); 55STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments"); 56 57// This option should go away when tail calls fully work. 58static cl::opt<bool> 59EnableARMTailCalls("arm-tail-calls", cl::Hidden, 60 cl::desc("Generate tail calls (TEMPORARY OPTION)."), 61 cl::init(false)); 62 63cl::opt<bool> 64EnableARMLongCalls("arm-long-calls", cl::Hidden, 65 cl::desc("Generate calls via indirect call instructions"), 66 cl::init(false)); 67 68static cl::opt<bool> 69ARMInterworking("arm-interworking", cl::Hidden, 70 cl::desc("Enable / disable ARM interworking (for debugging only)"), 71 cl::init(true)); 72 73namespace { 74 class ARMCCState : public CCState { 75 public: 76 ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, 77 const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs, 78 LLVMContext &C, ParmContext PC) 79 : CCState(CC, isVarArg, MF, TM, locs, C) { 80 assert(((PC == Call) || (PC == Prologue)) && 81 "ARMCCState users must specify whether their context is call" 82 "or prologue generation."); 83 CallOrPrologue = PC; 84 } 85 }; 86} 87 88// The APCS parameter registers. 89static const uint16_t GPRArgRegs[] = { 90 ARM::R0, ARM::R1, ARM::R2, ARM::R3 91}; 92 93void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, 94 MVT PromotedBitwiseVT) { 95 if (VT != PromotedLdStVT) { 96 setOperationAction(ISD::LOAD, VT, Promote); 97 AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT); 98 99 setOperationAction(ISD::STORE, VT, Promote); 100 AddPromotedToType (ISD::STORE, VT, PromotedLdStVT); 101 } 102 103 MVT ElemTy = VT.getVectorElementType(); 104 if (ElemTy != MVT::i64 && ElemTy != MVT::f64) 105 setOperationAction(ISD::SETCC, VT, Custom); 106 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 107 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 108 if (ElemTy == MVT::i32) { 109 setOperationAction(ISD::SINT_TO_FP, VT, Custom); 110 setOperationAction(ISD::UINT_TO_FP, VT, Custom); 111 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 112 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 113 } else { 114 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 115 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 116 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 117 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 118 } 119 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 120 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 121 setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); 122 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); 123 setOperationAction(ISD::SELECT, VT, Expand); 124 setOperationAction(ISD::SELECT_CC, VT, Expand); 125 setOperationAction(ISD::VSELECT, VT, Expand); 126 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 127 if (VT.isInteger()) { 128 setOperationAction(ISD::SHL, VT, Custom); 129 setOperationAction(ISD::SRA, VT, Custom); 130 setOperationAction(ISD::SRL, VT, Custom); 131 } 132 133 // Promote all bit-wise operations. 134 if (VT.isInteger() && VT != PromotedBitwiseVT) { 135 setOperationAction(ISD::AND, VT, Promote); 136 AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT); 137 setOperationAction(ISD::OR, VT, Promote); 138 AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT); 139 setOperationAction(ISD::XOR, VT, Promote); 140 AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT); 141 } 142 143 // Neon does not support vector divide/remainder operations. 144 setOperationAction(ISD::SDIV, VT, Expand); 145 setOperationAction(ISD::UDIV, VT, Expand); 146 setOperationAction(ISD::FDIV, VT, Expand); 147 setOperationAction(ISD::SREM, VT, Expand); 148 setOperationAction(ISD::UREM, VT, Expand); 149 setOperationAction(ISD::FREM, VT, Expand); 150} 151 152void ARMTargetLowering::addDRTypeForNEON(MVT VT) { 153 addRegisterClass(VT, &ARM::DPRRegClass); 154 addTypeForNEON(VT, MVT::f64, MVT::v2i32); 155} 156 157void ARMTargetLowering::addQRTypeForNEON(MVT VT) { 158 addRegisterClass(VT, &ARM::QPRRegClass); 159 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32); 160} 161 162static TargetLoweringObjectFile *createTLOF(TargetMachine &TM) { 163 if (TM.getSubtarget<ARMSubtarget>().isTargetDarwin()) 164 return new TargetLoweringObjectFileMachO(); 165 166 return new ARMElfTargetObjectFile(); 167} 168 169ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) 170 : TargetLowering(TM, createTLOF(TM)) { 171 Subtarget = &TM.getSubtarget<ARMSubtarget>(); 172 RegInfo = TM.getRegisterInfo(); 173 Itins = TM.getInstrItineraryData(); 174 175 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 176 177 if (Subtarget->isTargetDarwin()) { 178 // Uses VFP for Thumb libfuncs if available. 179 if (Subtarget->isThumb() && Subtarget->hasVFP2()) { 180 // Single-precision floating-point arithmetic. 181 setLibcallName(RTLIB::ADD_F32, "__addsf3vfp"); 182 setLibcallName(RTLIB::SUB_F32, "__subsf3vfp"); 183 setLibcallName(RTLIB::MUL_F32, "__mulsf3vfp"); 184 setLibcallName(RTLIB::DIV_F32, "__divsf3vfp"); 185 186 // Double-precision floating-point arithmetic. 187 setLibcallName(RTLIB::ADD_F64, "__adddf3vfp"); 188 setLibcallName(RTLIB::SUB_F64, "__subdf3vfp"); 189 setLibcallName(RTLIB::MUL_F64, "__muldf3vfp"); 190 setLibcallName(RTLIB::DIV_F64, "__divdf3vfp"); 191 192 // Single-precision comparisons. 193 setLibcallName(RTLIB::OEQ_F32, "__eqsf2vfp"); 194 setLibcallName(RTLIB::UNE_F32, "__nesf2vfp"); 195 setLibcallName(RTLIB::OLT_F32, "__ltsf2vfp"); 196 setLibcallName(RTLIB::OLE_F32, "__lesf2vfp"); 197 setLibcallName(RTLIB::OGE_F32, "__gesf2vfp"); 198 setLibcallName(RTLIB::OGT_F32, "__gtsf2vfp"); 199 setLibcallName(RTLIB::UO_F32, "__unordsf2vfp"); 200 setLibcallName(RTLIB::O_F32, "__unordsf2vfp"); 201 202 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 203 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETNE); 204 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 205 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 206 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 207 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 208 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 209 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 210 211 // Double-precision comparisons. 212 setLibcallName(RTLIB::OEQ_F64, "__eqdf2vfp"); 213 setLibcallName(RTLIB::UNE_F64, "__nedf2vfp"); 214 setLibcallName(RTLIB::OLT_F64, "__ltdf2vfp"); 215 setLibcallName(RTLIB::OLE_F64, "__ledf2vfp"); 216 setLibcallName(RTLIB::OGE_F64, "__gedf2vfp"); 217 setLibcallName(RTLIB::OGT_F64, "__gtdf2vfp"); 218 setLibcallName(RTLIB::UO_F64, "__unorddf2vfp"); 219 setLibcallName(RTLIB::O_F64, "__unorddf2vfp"); 220 221 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 222 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETNE); 223 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 224 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 225 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 226 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 227 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 228 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 229 230 // Floating-point to integer conversions. 231 // i64 conversions are done via library routines even when generating VFP 232 // instructions, so use the same ones. 233 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp"); 234 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp"); 235 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp"); 236 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp"); 237 238 // Conversions between floating types. 239 setLibcallName(RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp"); 240 setLibcallName(RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp"); 241 242 // Integer to floating-point conversions. 243 // i64 conversions are done via library routines even when generating VFP 244 // instructions, so use the same ones. 245 // FIXME: There appears to be some naming inconsistency in ARM libgcc: 246 // e.g., __floatunsidf vs. __floatunssidfvfp. 247 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp"); 248 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp"); 249 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp"); 250 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp"); 251 } 252 } 253 254 // These libcalls are not available in 32-bit. 255 setLibcallName(RTLIB::SHL_I128, 0); 256 setLibcallName(RTLIB::SRL_I128, 0); 257 setLibcallName(RTLIB::SRA_I128, 0); 258 259 if (Subtarget->isAAPCS_ABI() && !Subtarget->isTargetDarwin()) { 260 // Double-precision floating-point arithmetic helper functions 261 // RTABI chapter 4.1.2, Table 2 262 setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); 263 setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); 264 setLibcallName(RTLIB::MUL_F64, "__aeabi_dmul"); 265 setLibcallName(RTLIB::SUB_F64, "__aeabi_dsub"); 266 setLibcallCallingConv(RTLIB::ADD_F64, CallingConv::ARM_AAPCS); 267 setLibcallCallingConv(RTLIB::DIV_F64, CallingConv::ARM_AAPCS); 268 setLibcallCallingConv(RTLIB::MUL_F64, CallingConv::ARM_AAPCS); 269 setLibcallCallingConv(RTLIB::SUB_F64, CallingConv::ARM_AAPCS); 270 271 // Double-precision floating-point comparison helper functions 272 // RTABI chapter 4.1.2, Table 3 273 setLibcallName(RTLIB::OEQ_F64, "__aeabi_dcmpeq"); 274 setCmpLibcallCC(RTLIB::OEQ_F64, ISD::SETNE); 275 setLibcallName(RTLIB::UNE_F64, "__aeabi_dcmpeq"); 276 setCmpLibcallCC(RTLIB::UNE_F64, ISD::SETEQ); 277 setLibcallName(RTLIB::OLT_F64, "__aeabi_dcmplt"); 278 setCmpLibcallCC(RTLIB::OLT_F64, ISD::SETNE); 279 setLibcallName(RTLIB::OLE_F64, "__aeabi_dcmple"); 280 setCmpLibcallCC(RTLIB::OLE_F64, ISD::SETNE); 281 setLibcallName(RTLIB::OGE_F64, "__aeabi_dcmpge"); 282 setCmpLibcallCC(RTLIB::OGE_F64, ISD::SETNE); 283 setLibcallName(RTLIB::OGT_F64, "__aeabi_dcmpgt"); 284 setCmpLibcallCC(RTLIB::OGT_F64, ISD::SETNE); 285 setLibcallName(RTLIB::UO_F64, "__aeabi_dcmpun"); 286 setCmpLibcallCC(RTLIB::UO_F64, ISD::SETNE); 287 setLibcallName(RTLIB::O_F64, "__aeabi_dcmpun"); 288 setCmpLibcallCC(RTLIB::O_F64, ISD::SETEQ); 289 setLibcallCallingConv(RTLIB::OEQ_F64, CallingConv::ARM_AAPCS); 290 setLibcallCallingConv(RTLIB::UNE_F64, CallingConv::ARM_AAPCS); 291 setLibcallCallingConv(RTLIB::OLT_F64, CallingConv::ARM_AAPCS); 292 setLibcallCallingConv(RTLIB::OLE_F64, CallingConv::ARM_AAPCS); 293 setLibcallCallingConv(RTLIB::OGE_F64, CallingConv::ARM_AAPCS); 294 setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::ARM_AAPCS); 295 setLibcallCallingConv(RTLIB::UO_F64, CallingConv::ARM_AAPCS); 296 setLibcallCallingConv(RTLIB::O_F64, CallingConv::ARM_AAPCS); 297 298 // Single-precision floating-point arithmetic helper functions 299 // RTABI chapter 4.1.2, Table 4 300 setLibcallName(RTLIB::ADD_F32, "__aeabi_fadd"); 301 setLibcallName(RTLIB::DIV_F32, "__aeabi_fdiv"); 302 setLibcallName(RTLIB::MUL_F32, "__aeabi_fmul"); 303 setLibcallName(RTLIB::SUB_F32, "__aeabi_fsub"); 304 setLibcallCallingConv(RTLIB::ADD_F32, CallingConv::ARM_AAPCS); 305 setLibcallCallingConv(RTLIB::DIV_F32, CallingConv::ARM_AAPCS); 306 setLibcallCallingConv(RTLIB::MUL_F32, CallingConv::ARM_AAPCS); 307 setLibcallCallingConv(RTLIB::SUB_F32, CallingConv::ARM_AAPCS); 308 309 // Single-precision floating-point comparison helper functions 310 // RTABI chapter 4.1.2, Table 5 311 setLibcallName(RTLIB::OEQ_F32, "__aeabi_fcmpeq"); 312 setCmpLibcallCC(RTLIB::OEQ_F32, ISD::SETNE); 313 setLibcallName(RTLIB::UNE_F32, "__aeabi_fcmpeq"); 314 setCmpLibcallCC(RTLIB::UNE_F32, ISD::SETEQ); 315 setLibcallName(RTLIB::OLT_F32, "__aeabi_fcmplt"); 316 setCmpLibcallCC(RTLIB::OLT_F32, ISD::SETNE); 317 setLibcallName(RTLIB::OLE_F32, "__aeabi_fcmple"); 318 setCmpLibcallCC(RTLIB::OLE_F32, ISD::SETNE); 319 setLibcallName(RTLIB::OGE_F32, "__aeabi_fcmpge"); 320 setCmpLibcallCC(RTLIB::OGE_F32, ISD::SETNE); 321 setLibcallName(RTLIB::OGT_F32, "__aeabi_fcmpgt"); 322 setCmpLibcallCC(RTLIB::OGT_F32, ISD::SETNE); 323 setLibcallName(RTLIB::UO_F32, "__aeabi_fcmpun"); 324 setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE); 325 setLibcallName(RTLIB::O_F32, "__aeabi_fcmpun"); 326 setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ); 327 setLibcallCallingConv(RTLIB::OEQ_F32, CallingConv::ARM_AAPCS); 328 setLibcallCallingConv(RTLIB::UNE_F32, CallingConv::ARM_AAPCS); 329 setLibcallCallingConv(RTLIB::OLT_F32, CallingConv::ARM_AAPCS); 330 setLibcallCallingConv(RTLIB::OLE_F32, CallingConv::ARM_AAPCS); 331 setLibcallCallingConv(RTLIB::OGE_F32, CallingConv::ARM_AAPCS); 332 setLibcallCallingConv(RTLIB::OGT_F32, CallingConv::ARM_AAPCS); 333 setLibcallCallingConv(RTLIB::UO_F32, CallingConv::ARM_AAPCS); 334 setLibcallCallingConv(RTLIB::O_F32, CallingConv::ARM_AAPCS); 335 336 // Floating-point to integer conversions. 337 // RTABI chapter 4.1.2, Table 6 338 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz"); 339 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz"); 340 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz"); 341 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz"); 342 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz"); 343 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz"); 344 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz"); 345 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz"); 346 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I32, CallingConv::ARM_AAPCS); 347 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I32, CallingConv::ARM_AAPCS); 348 setLibcallCallingConv(RTLIB::FPTOSINT_F64_I64, CallingConv::ARM_AAPCS); 349 setLibcallCallingConv(RTLIB::FPTOUINT_F64_I64, CallingConv::ARM_AAPCS); 350 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I32, CallingConv::ARM_AAPCS); 351 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I32, CallingConv::ARM_AAPCS); 352 setLibcallCallingConv(RTLIB::FPTOSINT_F32_I64, CallingConv::ARM_AAPCS); 353 setLibcallCallingConv(RTLIB::FPTOUINT_F32_I64, CallingConv::ARM_AAPCS); 354 355 // Conversions between floating types. 356 // RTABI chapter 4.1.2, Table 7 357 setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); 358 setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); 359 setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); 360 setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); 361 362 // Integer to floating-point conversions. 363 // RTABI chapter 4.1.2, Table 8 364 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d"); 365 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d"); 366 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d"); 367 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d"); 368 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f"); 369 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f"); 370 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f"); 371 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f"); 372 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 373 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F64, CallingConv::ARM_AAPCS); 374 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 375 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F64, CallingConv::ARM_AAPCS); 376 setLibcallCallingConv(RTLIB::SINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 377 setLibcallCallingConv(RTLIB::UINTTOFP_I32_F32, CallingConv::ARM_AAPCS); 378 setLibcallCallingConv(RTLIB::SINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 379 setLibcallCallingConv(RTLIB::UINTTOFP_I64_F32, CallingConv::ARM_AAPCS); 380 381 // Long long helper functions 382 // RTABI chapter 4.2, Table 9 383 setLibcallName(RTLIB::MUL_I64, "__aeabi_lmul"); 384 setLibcallName(RTLIB::SHL_I64, "__aeabi_llsl"); 385 setLibcallName(RTLIB::SRL_I64, "__aeabi_llsr"); 386 setLibcallName(RTLIB::SRA_I64, "__aeabi_lasr"); 387 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::ARM_AAPCS); 388 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 389 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 390 setLibcallCallingConv(RTLIB::SHL_I64, CallingConv::ARM_AAPCS); 391 setLibcallCallingConv(RTLIB::SRL_I64, CallingConv::ARM_AAPCS); 392 setLibcallCallingConv(RTLIB::SRA_I64, CallingConv::ARM_AAPCS); 393 394 // Integer division functions 395 // RTABI chapter 4.3.1 396 setLibcallName(RTLIB::SDIV_I8, "__aeabi_idiv"); 397 setLibcallName(RTLIB::SDIV_I16, "__aeabi_idiv"); 398 setLibcallName(RTLIB::SDIV_I32, "__aeabi_idiv"); 399 setLibcallName(RTLIB::SDIV_I64, "__aeabi_ldivmod"); 400 setLibcallName(RTLIB::UDIV_I8, "__aeabi_uidiv"); 401 setLibcallName(RTLIB::UDIV_I16, "__aeabi_uidiv"); 402 setLibcallName(RTLIB::UDIV_I32, "__aeabi_uidiv"); 403 setLibcallName(RTLIB::UDIV_I64, "__aeabi_uldivmod"); 404 setLibcallCallingConv(RTLIB::SDIV_I8, CallingConv::ARM_AAPCS); 405 setLibcallCallingConv(RTLIB::SDIV_I16, CallingConv::ARM_AAPCS); 406 setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); 407 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::ARM_AAPCS); 408 setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); 409 setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); 410 setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); 411 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::ARM_AAPCS); 412 413 // Memory operations 414 // RTABI chapter 4.3.4 415 setLibcallName(RTLIB::MEMCPY, "__aeabi_memcpy"); 416 setLibcallName(RTLIB::MEMMOVE, "__aeabi_memmove"); 417 setLibcallName(RTLIB::MEMSET, "__aeabi_memset"); 418 setLibcallCallingConv(RTLIB::MEMCPY, CallingConv::ARM_AAPCS); 419 setLibcallCallingConv(RTLIB::MEMMOVE, CallingConv::ARM_AAPCS); 420 setLibcallCallingConv(RTLIB::MEMSET, CallingConv::ARM_AAPCS); 421 } 422 423 // Use divmod compiler-rt calls for iOS 5.0 and later. 424 if (Subtarget->getTargetTriple().getOS() == Triple::IOS && 425 !Subtarget->getTargetTriple().isOSVersionLT(5, 0)) { 426 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); 427 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); 428 } 429 430 if (Subtarget->isThumb1Only()) 431 addRegisterClass(MVT::i32, &ARM::tGPRRegClass); 432 else 433 addRegisterClass(MVT::i32, &ARM::GPRRegClass); 434 if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && 435 !Subtarget->isThumb1Only()) { 436 addRegisterClass(MVT::f32, &ARM::SPRRegClass); 437 if (!Subtarget->isFPOnlySP()) 438 addRegisterClass(MVT::f64, &ARM::DPRRegClass); 439 440 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 441 } 442 443 for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 444 VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) { 445 for (unsigned InnerVT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; 446 InnerVT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++InnerVT) 447 setTruncStoreAction((MVT::SimpleValueType)VT, 448 (MVT::SimpleValueType)InnerVT, Expand); 449 setLoadExtAction(ISD::SEXTLOAD, (MVT::SimpleValueType)VT, Expand); 450 setLoadExtAction(ISD::ZEXTLOAD, (MVT::SimpleValueType)VT, Expand); 451 setLoadExtAction(ISD::EXTLOAD, (MVT::SimpleValueType)VT, Expand); 452 } 453 454 setOperationAction(ISD::ConstantFP, MVT::f32, Custom); 455 456 if (Subtarget->hasNEON()) { 457 addDRTypeForNEON(MVT::v2f32); 458 addDRTypeForNEON(MVT::v8i8); 459 addDRTypeForNEON(MVT::v4i16); 460 addDRTypeForNEON(MVT::v2i32); 461 addDRTypeForNEON(MVT::v1i64); 462 463 addQRTypeForNEON(MVT::v4f32); 464 addQRTypeForNEON(MVT::v2f64); 465 addQRTypeForNEON(MVT::v16i8); 466 addQRTypeForNEON(MVT::v8i16); 467 addQRTypeForNEON(MVT::v4i32); 468 addQRTypeForNEON(MVT::v2i64); 469 470 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but 471 // neither Neon nor VFP support any arithmetic operations on it. 472 // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively 473 // supported for v4f32. 474 setOperationAction(ISD::FADD, MVT::v2f64, Expand); 475 setOperationAction(ISD::FSUB, MVT::v2f64, Expand); 476 setOperationAction(ISD::FMUL, MVT::v2f64, Expand); 477 // FIXME: Code duplication: FDIV and FREM are expanded always, see 478 // ARMTargetLowering::addTypeForNEON method for details. 479 setOperationAction(ISD::FDIV, MVT::v2f64, Expand); 480 setOperationAction(ISD::FREM, MVT::v2f64, Expand); 481 // FIXME: Create unittest. 482 // In another words, find a way when "copysign" appears in DAG with vector 483 // operands. 484 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand); 485 // FIXME: Code duplication: SETCC has custom operation action, see 486 // ARMTargetLowering::addTypeForNEON method for details. 487 setOperationAction(ISD::SETCC, MVT::v2f64, Expand); 488 // FIXME: Create unittest for FNEG and for FABS. 489 setOperationAction(ISD::FNEG, MVT::v2f64, Expand); 490 setOperationAction(ISD::FABS, MVT::v2f64, Expand); 491 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand); 492 setOperationAction(ISD::FSIN, MVT::v2f64, Expand); 493 setOperationAction(ISD::FCOS, MVT::v2f64, Expand); 494 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand); 495 setOperationAction(ISD::FPOW, MVT::v2f64, Expand); 496 setOperationAction(ISD::FLOG, MVT::v2f64, Expand); 497 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand); 498 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand); 499 setOperationAction(ISD::FEXP, MVT::v2f64, Expand); 500 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand); 501 // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR. 502 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand); 503 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand); 504 setOperationAction(ISD::FRINT, MVT::v2f64, Expand); 505 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand); 506 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand); 507 setOperationAction(ISD::FMA, MVT::v2f64, Expand); 508 509 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 510 setOperationAction(ISD::FSIN, MVT::v4f32, Expand); 511 setOperationAction(ISD::FCOS, MVT::v4f32, Expand); 512 setOperationAction(ISD::FPOWI, MVT::v4f32, Expand); 513 setOperationAction(ISD::FPOW, MVT::v4f32, Expand); 514 setOperationAction(ISD::FLOG, MVT::v4f32, Expand); 515 setOperationAction(ISD::FLOG2, MVT::v4f32, Expand); 516 setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); 517 setOperationAction(ISD::FEXP, MVT::v4f32, Expand); 518 setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); 519 setOperationAction(ISD::FCEIL, MVT::v4f32, Expand); 520 setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand); 521 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 522 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 523 setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); 524 525 // Mark v2f32 intrinsics. 526 setOperationAction(ISD::FSQRT, MVT::v2f32, Expand); 527 setOperationAction(ISD::FSIN, MVT::v2f32, Expand); 528 setOperationAction(ISD::FCOS, MVT::v2f32, Expand); 529 setOperationAction(ISD::FPOWI, MVT::v2f32, Expand); 530 setOperationAction(ISD::FPOW, MVT::v2f32, Expand); 531 setOperationAction(ISD::FLOG, MVT::v2f32, Expand); 532 setOperationAction(ISD::FLOG2, MVT::v2f32, Expand); 533 setOperationAction(ISD::FLOG10, MVT::v2f32, Expand); 534 setOperationAction(ISD::FEXP, MVT::v2f32, Expand); 535 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand); 536 setOperationAction(ISD::FCEIL, MVT::v2f32, Expand); 537 setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand); 538 setOperationAction(ISD::FRINT, MVT::v2f32, Expand); 539 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand); 540 setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand); 541 542 // Neon does not support some operations on v1i64 and v2i64 types. 543 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 544 // Custom handling for some quad-vector types to detect VMULL. 545 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 546 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 547 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 548 // Custom handling for some vector types to avoid expensive expansions 549 setOperationAction(ISD::SDIV, MVT::v4i16, Custom); 550 setOperationAction(ISD::SDIV, MVT::v8i8, Custom); 551 setOperationAction(ISD::UDIV, MVT::v4i16, Custom); 552 setOperationAction(ISD::UDIV, MVT::v8i8, Custom); 553 setOperationAction(ISD::SETCC, MVT::v1i64, Expand); 554 setOperationAction(ISD::SETCC, MVT::v2i64, Expand); 555 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with 556 // a destination type that is wider than the source, and nor does 557 // it have a FP_TO_[SU]INT instruction with a narrower destination than 558 // source. 559 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 560 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 561 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); 562 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); 563 564 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand); 565 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand); 566 567 // Custom expand long extensions to vectors. 568 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom); 569 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom); 570 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom); 571 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom); 572 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom); 573 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom); 574 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom); 575 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom); 576 577 // NEON does not have single instruction CTPOP for vectors with element 578 // types wider than 8-bits. However, custom lowering can leverage the 579 // v8i8/v16i8 vcnt instruction. 580 setOperationAction(ISD::CTPOP, MVT::v2i32, Custom); 581 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom); 582 setOperationAction(ISD::CTPOP, MVT::v4i16, Custom); 583 setOperationAction(ISD::CTPOP, MVT::v8i16, Custom); 584 585 // NEON only has FMA instructions as of VFP4. 586 if (!Subtarget->hasVFP4()) { 587 setOperationAction(ISD::FMA, MVT::v2f32, Expand); 588 setOperationAction(ISD::FMA, MVT::v4f32, Expand); 589 } 590 591 setTargetDAGCombine(ISD::INTRINSIC_VOID); 592 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 593 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 594 setTargetDAGCombine(ISD::SHL); 595 setTargetDAGCombine(ISD::SRL); 596 setTargetDAGCombine(ISD::SRA); 597 setTargetDAGCombine(ISD::SIGN_EXTEND); 598 setTargetDAGCombine(ISD::ZERO_EXTEND); 599 setTargetDAGCombine(ISD::ANY_EXTEND); 600 setTargetDAGCombine(ISD::SELECT_CC); 601 setTargetDAGCombine(ISD::BUILD_VECTOR); 602 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 603 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 604 setTargetDAGCombine(ISD::STORE); 605 setTargetDAGCombine(ISD::FP_TO_SINT); 606 setTargetDAGCombine(ISD::FP_TO_UINT); 607 setTargetDAGCombine(ISD::FDIV); 608 609 // It is legal to extload from v4i8 to v4i16 or v4i32. 610 MVT Tys[6] = {MVT::v8i8, MVT::v4i8, MVT::v2i8, 611 MVT::v4i16, MVT::v2i16, 612 MVT::v2i32}; 613 for (unsigned i = 0; i < 6; ++i) { 614 setLoadExtAction(ISD::EXTLOAD, Tys[i], Legal); 615 setLoadExtAction(ISD::ZEXTLOAD, Tys[i], Legal); 616 setLoadExtAction(ISD::SEXTLOAD, Tys[i], Legal); 617 } 618 } 619 620 // ARM and Thumb2 support UMLAL/SMLAL. 621 if (!Subtarget->isThumb1Only()) 622 setTargetDAGCombine(ISD::ADDC); 623 624 625 computeRegisterProperties(); 626 627 // ARM does not have f32 extending load. 628 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); 629 630 // ARM does not have i1 sign extending load. 631 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); 632 633 // ARM supports all 4 flavors of integer indexed load / store. 634 if (!Subtarget->isThumb1Only()) { 635 for (unsigned im = (unsigned)ISD::PRE_INC; 636 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 637 setIndexedLoadAction(im, MVT::i1, Legal); 638 setIndexedLoadAction(im, MVT::i8, Legal); 639 setIndexedLoadAction(im, MVT::i16, Legal); 640 setIndexedLoadAction(im, MVT::i32, Legal); 641 setIndexedStoreAction(im, MVT::i1, Legal); 642 setIndexedStoreAction(im, MVT::i8, Legal); 643 setIndexedStoreAction(im, MVT::i16, Legal); 644 setIndexedStoreAction(im, MVT::i32, Legal); 645 } 646 } 647 648 // i64 operation support. 649 setOperationAction(ISD::MUL, MVT::i64, Expand); 650 setOperationAction(ISD::MULHU, MVT::i32, Expand); 651 if (Subtarget->isThumb1Only()) { 652 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 653 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 654 } 655 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops() 656 || (Subtarget->isThumb2() && !Subtarget->hasThumb2DSP())) 657 setOperationAction(ISD::MULHS, MVT::i32, Expand); 658 659 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 660 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 661 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 662 setOperationAction(ISD::SRL, MVT::i64, Custom); 663 setOperationAction(ISD::SRA, MVT::i64, Custom); 664 665 if (!Subtarget->isThumb1Only()) { 666 // FIXME: We should do this for Thumb1 as well. 667 setOperationAction(ISD::ADDC, MVT::i32, Custom); 668 setOperationAction(ISD::ADDE, MVT::i32, Custom); 669 setOperationAction(ISD::SUBC, MVT::i32, Custom); 670 setOperationAction(ISD::SUBE, MVT::i32, Custom); 671 } 672 673 // ARM does not have ROTL. 674 setOperationAction(ISD::ROTL, MVT::i32, Expand); 675 setOperationAction(ISD::CTTZ, MVT::i32, Custom); 676 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 677 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) 678 setOperationAction(ISD::CTLZ, MVT::i32, Expand); 679 680 // These just redirect to CTTZ and CTLZ on ARM. 681 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i32 , Expand); 682 setOperationAction(ISD::CTLZ_ZERO_UNDEF , MVT::i32 , Expand); 683 684 // Only ARMv6 has BSWAP. 685 if (!Subtarget->hasV6Ops()) 686 setOperationAction(ISD::BSWAP, MVT::i32, Expand); 687 688 if (!(Subtarget->hasDivide() && Subtarget->isThumb2()) && 689 !(Subtarget->hasDivideInARMMode() && !Subtarget->isThumb())) { 690 // These are expanded into libcalls if the cpu doesn't have HW divider. 691 setOperationAction(ISD::SDIV, MVT::i32, Expand); 692 setOperationAction(ISD::UDIV, MVT::i32, Expand); 693 } 694 setOperationAction(ISD::SREM, MVT::i32, Expand); 695 setOperationAction(ISD::UREM, MVT::i32, Expand); 696 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 697 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 698 699 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 700 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 701 setOperationAction(ISD::GLOBAL_OFFSET_TABLE, MVT::i32, Custom); 702 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 703 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 704 705 setOperationAction(ISD::TRAP, MVT::Other, Legal); 706 707 // Use the default implementation. 708 setOperationAction(ISD::VASTART, MVT::Other, Custom); 709 setOperationAction(ISD::VAARG, MVT::Other, Expand); 710 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 711 setOperationAction(ISD::VAEND, MVT::Other, Expand); 712 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 713 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 714 715 if (!Subtarget->isTargetDarwin()) { 716 // Non-Darwin platforms may return values in these registers via the 717 // personality function. 718 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); 719 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); 720 setExceptionPointerRegister(ARM::R0); 721 setExceptionSelectorRegister(ARM::R1); 722 } 723 724 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); 725 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use 726 // the default expansion. 727 // FIXME: This should be checking for v6k, not just v6. 728 if (Subtarget->hasDataBarrier() || 729 (Subtarget->hasV6Ops() && !Subtarget->isThumb())) { 730 // membarrier needs custom lowering; the rest are legal and handled 731 // normally. 732 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 733 // Custom lowering for 64-bit ops 734 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i64, Custom); 735 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 736 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 737 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i64, Custom); 738 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); 739 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); 740 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i64, Custom); 741 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i64, Custom); 742 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i64, Custom); 743 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i64, Custom); 744 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom); 745 // Automatically insert fences (dmb ist) around ATOMIC_SWAP etc. 746 setInsertFencesForAtomic(true); 747 } else { 748 // Set them all for expansion, which will force libcalls. 749 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); 750 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); 751 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); 752 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); 753 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); 754 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); 755 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); 756 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); 757 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); 758 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); 759 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); 760 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); 761 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); 762 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the 763 // Unordered/Monotonic case. 764 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); 765 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); 766 } 767 768 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 769 770 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes. 771 if (!Subtarget->hasV6Ops()) { 772 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 773 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 774 } 775 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 776 777 if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && 778 !Subtarget->isThumb1Only()) { 779 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR 780 // iff target supports vfp2. 781 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 782 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 783 } 784 785 // We want to custom lower some of our intrinsics. 786 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 787 if (Subtarget->isTargetDarwin()) { 788 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 789 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 790 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); 791 } 792 793 setOperationAction(ISD::SETCC, MVT::i32, Expand); 794 setOperationAction(ISD::SETCC, MVT::f32, Expand); 795 setOperationAction(ISD::SETCC, MVT::f64, Expand); 796 setOperationAction(ISD::SELECT, MVT::i32, Custom); 797 setOperationAction(ISD::SELECT, MVT::f32, Custom); 798 setOperationAction(ISD::SELECT, MVT::f64, Custom); 799 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 800 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 801 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 802 803 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 804 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 805 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 806 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 807 setOperationAction(ISD::BR_JT, MVT::Other, Custom); 808 809 // We don't support sin/cos/fmod/copysign/pow 810 setOperationAction(ISD::FSIN, MVT::f64, Expand); 811 setOperationAction(ISD::FSIN, MVT::f32, Expand); 812 setOperationAction(ISD::FCOS, MVT::f32, Expand); 813 setOperationAction(ISD::FCOS, MVT::f64, Expand); 814 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 815 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 816 setOperationAction(ISD::FREM, MVT::f64, Expand); 817 setOperationAction(ISD::FREM, MVT::f32, Expand); 818 if (!TM.Options.UseSoftFloat && Subtarget->hasVFP2() && 819 !Subtarget->isThumb1Only()) { 820 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 821 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 822 } 823 setOperationAction(ISD::FPOW, MVT::f64, Expand); 824 setOperationAction(ISD::FPOW, MVT::f32, Expand); 825 826 if (!Subtarget->hasVFP4()) { 827 setOperationAction(ISD::FMA, MVT::f64, Expand); 828 setOperationAction(ISD::FMA, MVT::f32, Expand); 829 } 830 831 // Various VFP goodness 832 if (!TM.Options.UseSoftFloat && !Subtarget->isThumb1Only()) { 833 // int <-> fp are custom expanded into bit_convert + ARMISD ops. 834 if (Subtarget->hasVFP2()) { 835 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 836 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 837 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 838 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 839 } 840 // Special handling for half-precision FP. 841 if (!Subtarget->hasFP16()) { 842 setOperationAction(ISD::FP16_TO_FP32, MVT::f32, Expand); 843 setOperationAction(ISD::FP32_TO_FP16, MVT::i32, Expand); 844 } 845 } 846 847 // We have target-specific dag combine patterns for the following nodes: 848 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine 849 setTargetDAGCombine(ISD::ADD); 850 setTargetDAGCombine(ISD::SUB); 851 setTargetDAGCombine(ISD::MUL); 852 setTargetDAGCombine(ISD::AND); 853 setTargetDAGCombine(ISD::OR); 854 setTargetDAGCombine(ISD::XOR); 855 856 if (Subtarget->hasV6Ops()) 857 setTargetDAGCombine(ISD::SRL); 858 859 setStackPointerRegisterToSaveRestore(ARM::SP); 860 861 if (TM.Options.UseSoftFloat || Subtarget->isThumb1Only() || 862 !Subtarget->hasVFP2()) 863 setSchedulingPreference(Sched::RegPressure); 864 else 865 setSchedulingPreference(Sched::Hybrid); 866 867 //// temporary - rewrite interface to use type 868 MaxStoresPerMemset = 8; 869 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 8 : 4; 870 MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores 871 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 4 : 2; 872 MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores 873 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 4 : 2; 874 875 // On ARM arguments smaller than 4 bytes are extended, so all arguments 876 // are at least 4 bytes aligned. 877 setMinStackArgumentAlignment(4); 878 879 // Prefer likely predicted branches to selects on out-of-order cores. 880 PredictableSelectIsExpensive = Subtarget->isLikeA9(); 881 882 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); 883} 884 885// FIXME: It might make sense to define the representative register class as the 886// nearest super-register that has a non-null superset. For example, DPR_VFP2 is 887// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently, 888// SPR's representative would be DPR_VFP2. This should work well if register 889// pressure tracking were modified such that a register use would increment the 890// pressure of the register class's representative and all of it's super 891// classes' representatives transitively. We have not implemented this because 892// of the difficulty prior to coalescing of modeling operand register classes 893// due to the common occurrence of cross class copies and subregister insertions 894// and extractions. 895std::pair<const TargetRegisterClass*, uint8_t> 896ARMTargetLowering::findRepresentativeClass(MVT VT) const{ 897 const TargetRegisterClass *RRC = 0; 898 uint8_t Cost = 1; 899 switch (VT.SimpleTy) { 900 default: 901 return TargetLowering::findRepresentativeClass(VT); 902 // Use DPR as representative register class for all floating point 903 // and vector types. Since there are 32 SPR registers and 32 DPR registers so 904 // the cost is 1 for both f32 and f64. 905 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16: 906 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32: 907 RRC = &ARM::DPRRegClass; 908 // When NEON is used for SP, only half of the register file is available 909 // because operations that define both SP and DP results will be constrained 910 // to the VFP2 class (D0-D15). We currently model this constraint prior to 911 // coalescing by double-counting the SP regs. See the FIXME above. 912 if (Subtarget->useNEONForSinglePrecisionFP()) 913 Cost = 2; 914 break; 915 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: 916 case MVT::v4f32: case MVT::v2f64: 917 RRC = &ARM::DPRRegClass; 918 Cost = 2; 919 break; 920 case MVT::v4i64: 921 RRC = &ARM::DPRRegClass; 922 Cost = 4; 923 break; 924 case MVT::v8i64: 925 RRC = &ARM::DPRRegClass; 926 Cost = 8; 927 break; 928 } 929 return std::make_pair(RRC, Cost); 930} 931 932const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { 933 switch (Opcode) { 934 default: return 0; 935 case ARMISD::Wrapper: return "ARMISD::Wrapper"; 936 case ARMISD::WrapperDYN: return "ARMISD::WrapperDYN"; 937 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC"; 938 case ARMISD::WrapperJT: return "ARMISD::WrapperJT"; 939 case ARMISD::CALL: return "ARMISD::CALL"; 940 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED"; 941 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK"; 942 case ARMISD::tCALL: return "ARMISD::tCALL"; 943 case ARMISD::BRCOND: return "ARMISD::BRCOND"; 944 case ARMISD::BR_JT: return "ARMISD::BR_JT"; 945 case ARMISD::BR2_JT: return "ARMISD::BR2_JT"; 946 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG"; 947 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD"; 948 case ARMISD::CMP: return "ARMISD::CMP"; 949 case ARMISD::CMN: return "ARMISD::CMN"; 950 case ARMISD::CMPZ: return "ARMISD::CMPZ"; 951 case ARMISD::CMPFP: return "ARMISD::CMPFP"; 952 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0"; 953 case ARMISD::BCC_i64: return "ARMISD::BCC_i64"; 954 case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; 955 956 case ARMISD::CMOV: return "ARMISD::CMOV"; 957 958 case ARMISD::RBIT: return "ARMISD::RBIT"; 959 960 case ARMISD::FTOSI: return "ARMISD::FTOSI"; 961 case ARMISD::FTOUI: return "ARMISD::FTOUI"; 962 case ARMISD::SITOF: return "ARMISD::SITOF"; 963 case ARMISD::UITOF: return "ARMISD::UITOF"; 964 965 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG"; 966 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG"; 967 case ARMISD::RRX: return "ARMISD::RRX"; 968 969 case ARMISD::ADDC: return "ARMISD::ADDC"; 970 case ARMISD::ADDE: return "ARMISD::ADDE"; 971 case ARMISD::SUBC: return "ARMISD::SUBC"; 972 case ARMISD::SUBE: return "ARMISD::SUBE"; 973 974 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD"; 975 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR"; 976 977 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP"; 978 case ARMISD::EH_SJLJ_LONGJMP:return "ARMISD::EH_SJLJ_LONGJMP"; 979 980 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN"; 981 982 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER"; 983 984 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC"; 985 986 case ARMISD::MEMBARRIER: return "ARMISD::MEMBARRIER"; 987 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR"; 988 989 case ARMISD::PRELOAD: return "ARMISD::PRELOAD"; 990 991 case ARMISD::VCEQ: return "ARMISD::VCEQ"; 992 case ARMISD::VCEQZ: return "ARMISD::VCEQZ"; 993 case ARMISD::VCGE: return "ARMISD::VCGE"; 994 case ARMISD::VCGEZ: return "ARMISD::VCGEZ"; 995 case ARMISD::VCLEZ: return "ARMISD::VCLEZ"; 996 case ARMISD::VCGEU: return "ARMISD::VCGEU"; 997 case ARMISD::VCGT: return "ARMISD::VCGT"; 998 case ARMISD::VCGTZ: return "ARMISD::VCGTZ"; 999 case ARMISD::VCLTZ: return "ARMISD::VCLTZ"; 1000 case ARMISD::VCGTU: return "ARMISD::VCGTU"; 1001 case ARMISD::VTST: return "ARMISD::VTST"; 1002 1003 case ARMISD::VSHL: return "ARMISD::VSHL"; 1004 case ARMISD::VSHRs: return "ARMISD::VSHRs"; 1005 case ARMISD::VSHRu: return "ARMISD::VSHRu"; 1006 case ARMISD::VSHLLs: return "ARMISD::VSHLLs"; 1007 case ARMISD::VSHLLu: return "ARMISD::VSHLLu"; 1008 case ARMISD::VSHLLi: return "ARMISD::VSHLLi"; 1009 case ARMISD::VSHRN: return "ARMISD::VSHRN"; 1010 case ARMISD::VRSHRs: return "ARMISD::VRSHRs"; 1011 case ARMISD::VRSHRu: return "ARMISD::VRSHRu"; 1012 case ARMISD::VRSHRN: return "ARMISD::VRSHRN"; 1013 case ARMISD::VQSHLs: return "ARMISD::VQSHLs"; 1014 case ARMISD::VQSHLu: return "ARMISD::VQSHLu"; 1015 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu"; 1016 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs"; 1017 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu"; 1018 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu"; 1019 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs"; 1020 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu"; 1021 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu"; 1022 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu"; 1023 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs"; 1024 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM"; 1025 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM"; 1026 case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM"; 1027 case ARMISD::VDUP: return "ARMISD::VDUP"; 1028 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE"; 1029 case ARMISD::VEXT: return "ARMISD::VEXT"; 1030 case ARMISD::VREV64: return "ARMISD::VREV64"; 1031 case ARMISD::VREV32: return "ARMISD::VREV32"; 1032 case ARMISD::VREV16: return "ARMISD::VREV16"; 1033 case ARMISD::VZIP: return "ARMISD::VZIP"; 1034 case ARMISD::VUZP: return "ARMISD::VUZP"; 1035 case ARMISD::VTRN: return "ARMISD::VTRN"; 1036 case ARMISD::VTBL1: return "ARMISD::VTBL1"; 1037 case ARMISD::VTBL2: return "ARMISD::VTBL2"; 1038 case ARMISD::VMULLs: return "ARMISD::VMULLs"; 1039 case ARMISD::VMULLu: return "ARMISD::VMULLu"; 1040 case ARMISD::UMLAL: return "ARMISD::UMLAL"; 1041 case ARMISD::SMLAL: return "ARMISD::SMLAL"; 1042 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; 1043 case ARMISD::FMAX: return "ARMISD::FMAX"; 1044 case ARMISD::FMIN: return "ARMISD::FMIN"; 1045 case ARMISD::BFI: return "ARMISD::BFI"; 1046 case ARMISD::VORRIMM: return "ARMISD::VORRIMM"; 1047 case ARMISD::VBICIMM: return "ARMISD::VBICIMM"; 1048 case ARMISD::VBSL: return "ARMISD::VBSL"; 1049 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP"; 1050 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP"; 1051 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP"; 1052 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD"; 1053 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD"; 1054 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD"; 1055 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD"; 1056 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD"; 1057 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD"; 1058 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD"; 1059 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD"; 1060 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD"; 1061 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD"; 1062 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD"; 1063 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD"; 1064 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD"; 1065 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD"; 1066 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD"; 1067 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD"; 1068 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD"; 1069 } 1070} 1071 1072EVT ARMTargetLowering::getSetCCResultType(EVT VT) const { 1073 if (!VT.isVector()) return getPointerTy(); 1074 return VT.changeVectorElementTypeToInteger(); 1075} 1076 1077/// getRegClassFor - Return the register class that should be used for the 1078/// specified value type. 1079const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const { 1080 // Map v4i64 to QQ registers but do not make the type legal. Similarly map 1081 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to 1082 // load / store 4 to 8 consecutive D registers. 1083 if (Subtarget->hasNEON()) { 1084 if (VT == MVT::v4i64) 1085 return &ARM::QQPRRegClass; 1086 if (VT == MVT::v8i64) 1087 return &ARM::QQQQPRRegClass; 1088 } 1089 return TargetLowering::getRegClassFor(VT); 1090} 1091 1092// Create a fast isel object. 1093FastISel * 1094ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 1095 const TargetLibraryInfo *libInfo) const { 1096 return ARM::createFastISel(funcInfo, libInfo); 1097} 1098 1099/// getMaximalGlobalOffset - Returns the maximal possible offset which can 1100/// be used for loads / stores from the global. 1101unsigned ARMTargetLowering::getMaximalGlobalOffset() const { 1102 return (Subtarget->isThumb1Only() ? 127 : 4095); 1103} 1104 1105Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { 1106 unsigned NumVals = N->getNumValues(); 1107 if (!NumVals) 1108 return Sched::RegPressure; 1109 1110 for (unsigned i = 0; i != NumVals; ++i) { 1111 EVT VT = N->getValueType(i); 1112 if (VT == MVT::Glue || VT == MVT::Other) 1113 continue; 1114 if (VT.isFloatingPoint() || VT.isVector()) 1115 return Sched::ILP; 1116 } 1117 1118 if (!N->isMachineOpcode()) 1119 return Sched::RegPressure; 1120 1121 // Load are scheduled for latency even if there instruction itinerary 1122 // is not available. 1123 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 1124 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1125 1126 if (MCID.getNumDefs() == 0) 1127 return Sched::RegPressure; 1128 if (!Itins->isEmpty() && 1129 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2) 1130 return Sched::ILP; 1131 1132 return Sched::RegPressure; 1133} 1134 1135//===----------------------------------------------------------------------===// 1136// Lowering Code 1137//===----------------------------------------------------------------------===// 1138 1139/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC 1140static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { 1141 switch (CC) { 1142 default: llvm_unreachable("Unknown condition code!"); 1143 case ISD::SETNE: return ARMCC::NE; 1144 case ISD::SETEQ: return ARMCC::EQ; 1145 case ISD::SETGT: return ARMCC::GT; 1146 case ISD::SETGE: return ARMCC::GE; 1147 case ISD::SETLT: return ARMCC::LT; 1148 case ISD::SETLE: return ARMCC::LE; 1149 case ISD::SETUGT: return ARMCC::HI; 1150 case ISD::SETUGE: return ARMCC::HS; 1151 case ISD::SETULT: return ARMCC::LO; 1152 case ISD::SETULE: return ARMCC::LS; 1153 } 1154} 1155 1156/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. 1157static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, 1158 ARMCC::CondCodes &CondCode2) { 1159 CondCode2 = ARMCC::AL; 1160 switch (CC) { 1161 default: llvm_unreachable("Unknown FP condition!"); 1162 case ISD::SETEQ: 1163 case ISD::SETOEQ: CondCode = ARMCC::EQ; break; 1164 case ISD::SETGT: 1165 case ISD::SETOGT: CondCode = ARMCC::GT; break; 1166 case ISD::SETGE: 1167 case ISD::SETOGE: CondCode = ARMCC::GE; break; 1168 case ISD::SETOLT: CondCode = ARMCC::MI; break; 1169 case ISD::SETOLE: CondCode = ARMCC::LS; break; 1170 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; 1171 case ISD::SETO: CondCode = ARMCC::VC; break; 1172 case ISD::SETUO: CondCode = ARMCC::VS; break; 1173 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; 1174 case ISD::SETUGT: CondCode = ARMCC::HI; break; 1175 case ISD::SETUGE: CondCode = ARMCC::PL; break; 1176 case ISD::SETLT: 1177 case ISD::SETULT: CondCode = ARMCC::LT; break; 1178 case ISD::SETLE: 1179 case ISD::SETULE: CondCode = ARMCC::LE; break; 1180 case ISD::SETNE: 1181 case ISD::SETUNE: CondCode = ARMCC::NE; break; 1182 } 1183} 1184 1185//===----------------------------------------------------------------------===// 1186// Calling Convention Implementation 1187//===----------------------------------------------------------------------===// 1188 1189#include "ARMGenCallingConv.inc" 1190 1191/// CCAssignFnForNode - Selects the correct CCAssignFn for a the 1192/// given CallingConvention value. 1193CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC, 1194 bool Return, 1195 bool isVarArg) const { 1196 switch (CC) { 1197 default: 1198 llvm_unreachable("Unsupported calling convention"); 1199 case CallingConv::Fast: 1200 if (Subtarget->hasVFP2() && !isVarArg) { 1201 if (!Subtarget->isAAPCS_ABI()) 1202 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); 1203 // For AAPCS ABI targets, just use VFP variant of the calling convention. 1204 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1205 } 1206 // Fallthrough 1207 case CallingConv::C: { 1208 // Use target triple & subtarget features to do actual dispatch. 1209 if (!Subtarget->isAAPCS_ABI()) 1210 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1211 else if (Subtarget->hasVFP2() && 1212 getTargetMachine().Options.FloatABIType == FloatABI::Hard && 1213 !isVarArg) 1214 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1215 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1216 } 1217 case CallingConv::ARM_AAPCS_VFP: 1218 if (!isVarArg) 1219 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP); 1220 // Fallthrough 1221 case CallingConv::ARM_AAPCS: 1222 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS); 1223 case CallingConv::ARM_APCS: 1224 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS); 1225 case CallingConv::GHC: 1226 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC); 1227 } 1228} 1229 1230/// LowerCallResult - Lower the result values of a call into the 1231/// appropriate copies out of appropriate physical registers. 1232SDValue 1233ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, 1234 CallingConv::ID CallConv, bool isVarArg, 1235 const SmallVectorImpl<ISD::InputArg> &Ins, 1236 DebugLoc dl, SelectionDAG &DAG, 1237 SmallVectorImpl<SDValue> &InVals, 1238 bool isThisReturn, SDValue ThisVal) const { 1239 1240 // Assign locations to each value returned by this call. 1241 SmallVector<CCValAssign, 16> RVLocs; 1242 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1243 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 1244 CCInfo.AnalyzeCallResult(Ins, 1245 CCAssignFnForNode(CallConv, /* Return*/ true, 1246 isVarArg)); 1247 1248 // Copy all of the result registers out of their specified physreg. 1249 for (unsigned i = 0; i != RVLocs.size(); ++i) { 1250 CCValAssign VA = RVLocs[i]; 1251 1252 // Pass 'this' value directly from the argument to return value, to avoid 1253 // reg unit interference 1254 if (i == 0 && isThisReturn) { 1255 assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 && 1256 "unexpected return calling convention register assignment"); 1257 InVals.push_back(ThisVal); 1258 continue; 1259 } 1260 1261 SDValue Val; 1262 if (VA.needsCustom()) { 1263 // Handle f64 or half of a v2f64. 1264 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1265 InFlag); 1266 Chain = Lo.getValue(1); 1267 InFlag = Lo.getValue(2); 1268 VA = RVLocs[++i]; // skip ahead to next loc 1269 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 1270 InFlag); 1271 Chain = Hi.getValue(1); 1272 InFlag = Hi.getValue(2); 1273 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1274 1275 if (VA.getLocVT() == MVT::v2f64) { 1276 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 1277 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1278 DAG.getConstant(0, MVT::i32)); 1279 1280 VA = RVLocs[++i]; // skip ahead to next loc 1281 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1282 Chain = Lo.getValue(1); 1283 InFlag = Lo.getValue(2); 1284 VA = RVLocs[++i]; // skip ahead to next loc 1285 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag); 1286 Chain = Hi.getValue(1); 1287 InFlag = Hi.getValue(2); 1288 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 1289 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val, 1290 DAG.getConstant(1, MVT::i32)); 1291 } 1292 } else { 1293 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), 1294 InFlag); 1295 Chain = Val.getValue(1); 1296 InFlag = Val.getValue(2); 1297 } 1298 1299 switch (VA.getLocInfo()) { 1300 default: llvm_unreachable("Unknown loc info!"); 1301 case CCValAssign::Full: break; 1302 case CCValAssign::BCvt: 1303 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val); 1304 break; 1305 } 1306 1307 InVals.push_back(Val); 1308 } 1309 1310 return Chain; 1311} 1312 1313/// LowerMemOpCallTo - Store the argument to the stack. 1314SDValue 1315ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, 1316 SDValue StackPtr, SDValue Arg, 1317 DebugLoc dl, SelectionDAG &DAG, 1318 const CCValAssign &VA, 1319 ISD::ArgFlagsTy Flags) const { 1320 unsigned LocMemOffset = VA.getLocMemOffset(); 1321 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); 1322 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); 1323 return DAG.getStore(Chain, dl, Arg, PtrOff, 1324 MachinePointerInfo::getStack(LocMemOffset), 1325 false, false, 0); 1326} 1327 1328void ARMTargetLowering::PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG, 1329 SDValue Chain, SDValue &Arg, 1330 RegsToPassVector &RegsToPass, 1331 CCValAssign &VA, CCValAssign &NextVA, 1332 SDValue &StackPtr, 1333 SmallVector<SDValue, 8> &MemOpChains, 1334 ISD::ArgFlagsTy Flags) const { 1335 1336 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 1337 DAG.getVTList(MVT::i32, MVT::i32), Arg); 1338 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd)); 1339 1340 if (NextVA.isRegLoc()) 1341 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1))); 1342 else { 1343 assert(NextVA.isMemLoc()); 1344 if (StackPtr.getNode() == 0) 1345 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1346 1347 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1), 1348 dl, DAG, NextVA, 1349 Flags)); 1350 } 1351} 1352 1353/// LowerCall - Lowering a call into a callseq_start <- 1354/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter 1355/// nodes. 1356SDValue 1357ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1358 SmallVectorImpl<SDValue> &InVals) const { 1359 SelectionDAG &DAG = CLI.DAG; 1360 DebugLoc &dl = CLI.DL; 1361 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 1362 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 1363 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 1364 SDValue Chain = CLI.Chain; 1365 SDValue Callee = CLI.Callee; 1366 bool &isTailCall = CLI.IsTailCall; 1367 CallingConv::ID CallConv = CLI.CallConv; 1368 bool doesNotRet = CLI.DoesNotReturn; 1369 bool isVarArg = CLI.IsVarArg; 1370 1371 MachineFunction &MF = DAG.getMachineFunction(); 1372 bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); 1373 bool isThisReturn = false; 1374 bool isSibCall = false; 1375 // Disable tail calls if they're not supported. 1376 if (!EnableARMTailCalls && !Subtarget->supportsTailCall()) 1377 isTailCall = false; 1378 if (isTailCall) { 1379 // Check if it's really possible to do a tail call. 1380 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, 1381 isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(), 1382 Outs, OutVals, Ins, DAG); 1383 // We don't support GuaranteedTailCallOpt for ARM, only automatically 1384 // detected sibcalls. 1385 if (isTailCall) { 1386 ++NumTailCalls; 1387 isSibCall = true; 1388 } 1389 } 1390 1391 // Analyze operands of the call, assigning locations to each operand. 1392 SmallVector<CCValAssign, 16> ArgLocs; 1393 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 1394 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1395 CCInfo.AnalyzeCallOperands(Outs, 1396 CCAssignFnForNode(CallConv, /* Return*/ false, 1397 isVarArg)); 1398 1399 // Get a count of how many bytes are to be pushed on the stack. 1400 unsigned NumBytes = CCInfo.getNextStackOffset(); 1401 1402 // For tail calls, memory operands are available in our caller's stack. 1403 if (isSibCall) 1404 NumBytes = 0; 1405 1406 // Adjust the stack pointer for the new arguments... 1407 // These operations are automatically eliminated by the prolog/epilog pass 1408 if (!isSibCall) 1409 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); 1410 1411 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy()); 1412 1413 RegsToPassVector RegsToPass; 1414 SmallVector<SDValue, 8> MemOpChains; 1415 1416 // Walk the register/memloc assignments, inserting copies/loads. In the case 1417 // of tail call optimization, arguments are handled later. 1418 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1419 i != e; 1420 ++i, ++realArgIdx) { 1421 CCValAssign &VA = ArgLocs[i]; 1422 SDValue Arg = OutVals[realArgIdx]; 1423 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1424 bool isByVal = Flags.isByVal(); 1425 1426 // Promote the value if needed. 1427 switch (VA.getLocInfo()) { 1428 default: llvm_unreachable("Unknown loc info!"); 1429 case CCValAssign::Full: break; 1430 case CCValAssign::SExt: 1431 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 1432 break; 1433 case CCValAssign::ZExt: 1434 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 1435 break; 1436 case CCValAssign::AExt: 1437 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 1438 break; 1439 case CCValAssign::BCvt: 1440 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 1441 break; 1442 } 1443 1444 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces 1445 if (VA.needsCustom()) { 1446 if (VA.getLocVT() == MVT::v2f64) { 1447 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1448 DAG.getConstant(0, MVT::i32)); 1449 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 1450 DAG.getConstant(1, MVT::i32)); 1451 1452 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, 1453 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1454 1455 VA = ArgLocs[++i]; // skip ahead to next loc 1456 if (VA.isRegLoc()) { 1457 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, 1458 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags); 1459 } else { 1460 assert(VA.isMemLoc()); 1461 1462 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1, 1463 dl, DAG, VA, Flags)); 1464 } 1465 } else { 1466 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i], 1467 StackPtr, MemOpChains, Flags); 1468 } 1469 } else if (VA.isRegLoc()) { 1470 if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i32) { 1471 assert(VA.getLocVT() == MVT::i32 && 1472 "unexpected calling convention register assignment"); 1473 assert(!Ins.empty() && Ins[0].VT == MVT::i32 && 1474 "unexpected use of 'returned'"); 1475 isThisReturn = true; 1476 } 1477 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1478 } else if (isByVal) { 1479 assert(VA.isMemLoc()); 1480 unsigned offset = 0; 1481 1482 // True if this byval aggregate will be split between registers 1483 // and memory. 1484 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount(); 1485 unsigned CurByValIdx = CCInfo.getInRegsParamsProceed(); 1486 1487 if (CurByValIdx < ByValArgsCount) { 1488 1489 unsigned RegBegin, RegEnd; 1490 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd); 1491 1492 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 1493 unsigned int i, j; 1494 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) { 1495 SDValue Const = DAG.getConstant(4*i, MVT::i32); 1496 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 1497 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg, 1498 MachinePointerInfo(), 1499 false, false, false, 0); 1500 MemOpChains.push_back(Load.getValue(1)); 1501 RegsToPass.push_back(std::make_pair(j, Load)); 1502 } 1503 1504 // If parameter size outsides register area, "offset" value 1505 // helps us to calculate stack slot for remained part properly. 1506 offset = RegEnd - RegBegin; 1507 1508 CCInfo.nextInRegsParam(); 1509 } 1510 1511 if (Flags.getByValSize() > 4*offset) { 1512 unsigned LocMemOffset = VA.getLocMemOffset(); 1513 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset); 1514 SDValue Dst = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, 1515 StkPtrOff); 1516 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset); 1517 SDValue Src = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg, SrcOffset); 1518 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, 1519 MVT::i32); 1520 SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), MVT::i32); 1521 1522 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 1523 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode}; 1524 MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs, 1525 Ops, array_lengthof(Ops))); 1526 } 1527 } else if (!isSibCall) { 1528 assert(VA.isMemLoc()); 1529 1530 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, 1531 dl, DAG, VA, Flags)); 1532 } 1533 } 1534 1535 if (!MemOpChains.empty()) 1536 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 1537 &MemOpChains[0], MemOpChains.size()); 1538 1539 // Build a sequence of copy-to-reg nodes chained together with token chain 1540 // and flag operands which copy the outgoing args into the appropriate regs. 1541 SDValue InFlag; 1542 // Tail call byval lowering might overwrite argument registers so in case of 1543 // tail call optimization the copies to registers are lowered later. 1544 if (!isTailCall) 1545 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1546 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1547 RegsToPass[i].second, InFlag); 1548 InFlag = Chain.getValue(1); 1549 } 1550 1551 // For tail calls lower the arguments to the 'real' stack slot. 1552 if (isTailCall) { 1553 // Force all the incoming stack arguments to be loaded from the stack 1554 // before any new outgoing arguments are stored to the stack, because the 1555 // outgoing stack slots may alias the incoming argument stack slots, and 1556 // the alias isn't otherwise explicit. This is slightly more conservative 1557 // than necessary, because it means that each store effectively depends 1558 // on every argument instead of just those arguments it would clobber. 1559 1560 // Do not flag preceding copytoreg stuff together with the following stuff. 1561 InFlag = SDValue(); 1562 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 1563 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 1564 RegsToPass[i].second, InFlag); 1565 InFlag = Chain.getValue(1); 1566 } 1567 InFlag = SDValue(); 1568 } 1569 1570 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1571 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1572 // node so that legalize doesn't hack it. 1573 bool isDirect = false; 1574 bool isARMFunc = false; 1575 bool isLocalARMFunc = false; 1576 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 1577 1578 if (EnableARMLongCalls) { 1579 assert (getTargetMachine().getRelocationModel() == Reloc::Static 1580 && "long-calls with non-static relocation model!"); 1581 // Handle a global address or an external symbol. If it's not one of 1582 // those, the target's already in a register, so we don't need to do 1583 // anything extra. 1584 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1585 const GlobalValue *GV = G->getGlobal(); 1586 // Create a constant pool entry for the callee address 1587 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1588 ARMConstantPoolValue *CPV = 1589 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0); 1590 1591 // Get the address of the callee into a register 1592 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1593 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1594 Callee = DAG.getLoad(getPointerTy(), dl, 1595 DAG.getEntryNode(), CPAddr, 1596 MachinePointerInfo::getConstantPool(), 1597 false, false, false, 0); 1598 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) { 1599 const char *Sym = S->getSymbol(); 1600 1601 // Create a constant pool entry for the callee address 1602 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1603 ARMConstantPoolValue *CPV = 1604 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1605 ARMPCLabelIndex, 0); 1606 // Get the address of the callee into a register 1607 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1608 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1609 Callee = DAG.getLoad(getPointerTy(), dl, 1610 DAG.getEntryNode(), CPAddr, 1611 MachinePointerInfo::getConstantPool(), 1612 false, false, false, 0); 1613 } 1614 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1615 const GlobalValue *GV = G->getGlobal(); 1616 isDirect = true; 1617 bool isExt = GV->isDeclaration() || GV->isWeakForLinker(); 1618 bool isStub = (isExt && Subtarget->isTargetDarwin()) && 1619 getTargetMachine().getRelocationModel() != Reloc::Static; 1620 isARMFunc = !Subtarget->isThumb() || isStub; 1621 // ARM call to a local ARM function is predicable. 1622 isLocalARMFunc = !Subtarget->isThumb() && (!isExt || !ARMInterworking); 1623 // tBX takes a register source operand. 1624 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1625 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1626 ARMConstantPoolValue *CPV = 1627 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 4); 1628 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1629 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1630 Callee = DAG.getLoad(getPointerTy(), dl, 1631 DAG.getEntryNode(), CPAddr, 1632 MachinePointerInfo::getConstantPool(), 1633 false, false, false, 0); 1634 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1635 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1636 getPointerTy(), Callee, PICLabel); 1637 } else { 1638 // On ELF targets for PIC code, direct calls should go through the PLT 1639 unsigned OpFlags = 0; 1640 if (Subtarget->isTargetELF() && 1641 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1642 OpFlags = ARMII::MO_PLT; 1643 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags); 1644 } 1645 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 1646 isDirect = true; 1647 bool isStub = Subtarget->isTargetDarwin() && 1648 getTargetMachine().getRelocationModel() != Reloc::Static; 1649 isARMFunc = !Subtarget->isThumb() || isStub; 1650 // tBX takes a register source operand. 1651 const char *Sym = S->getSymbol(); 1652 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) { 1653 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 1654 ARMConstantPoolValue *CPV = 1655 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym, 1656 ARMPCLabelIndex, 4); 1657 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4); 1658 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 1659 Callee = DAG.getLoad(getPointerTy(), dl, 1660 DAG.getEntryNode(), CPAddr, 1661 MachinePointerInfo::getConstantPool(), 1662 false, false, false, 0); 1663 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 1664 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, 1665 getPointerTy(), Callee, PICLabel); 1666 } else { 1667 unsigned OpFlags = 0; 1668 // On ELF targets for PIC code, direct calls should go through the PLT 1669 if (Subtarget->isTargetELF() && 1670 getTargetMachine().getRelocationModel() == Reloc::PIC_) 1671 OpFlags = ARMII::MO_PLT; 1672 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlags); 1673 } 1674 } 1675 1676 // FIXME: handle tail calls differently. 1677 unsigned CallOpc; 1678 bool HasMinSizeAttr = MF.getFunction()->getAttributes(). 1679 hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize); 1680 if (Subtarget->isThumb()) { 1681 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) 1682 CallOpc = ARMISD::CALL_NOLINK; 1683 else 1684 CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; 1685 } else { 1686 if (!isDirect && !Subtarget->hasV5TOps()) 1687 CallOpc = ARMISD::CALL_NOLINK; 1688 else if (doesNotRet && isDirect && Subtarget->hasRAS() && 1689 // Emit regular call when code size is the priority 1690 !HasMinSizeAttr) 1691 // "mov lr, pc; b _foo" to avoid confusing the RSP 1692 CallOpc = ARMISD::CALL_NOLINK; 1693 else 1694 CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL; 1695 } 1696 1697 std::vector<SDValue> Ops; 1698 Ops.push_back(Chain); 1699 Ops.push_back(Callee); 1700 1701 // Add argument registers to the end of the list so that they are known live 1702 // into the call. 1703 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 1704 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 1705 RegsToPass[i].second.getValueType())); 1706 1707 // Add a register mask operand representing the call-preserved registers. 1708 const uint32_t *Mask; 1709 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); 1710 const ARMBaseRegisterInfo *ARI = static_cast<const ARMBaseRegisterInfo*>(TRI); 1711 if (isThisReturn) 1712 // For 'this' returns, use the R0-preserving mask 1713 Mask = ARI->getThisReturnPreservedMask(CallConv); 1714 else 1715 Mask = ARI->getCallPreservedMask(CallConv); 1716 1717 assert(Mask && "Missing call preserved mask for calling convention"); 1718 Ops.push_back(DAG.getRegisterMask(Mask)); 1719 1720 if (InFlag.getNode()) 1721 Ops.push_back(InFlag); 1722 1723 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1724 if (isTailCall) 1725 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size()); 1726 1727 // Returns a chain and a flag for retval copy to use. 1728 Chain = DAG.getNode(CallOpc, dl, NodeTys, &Ops[0], Ops.size()); 1729 InFlag = Chain.getValue(1); 1730 1731 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), 1732 DAG.getIntPtrConstant(0, true), InFlag); 1733 if (!Ins.empty()) 1734 InFlag = Chain.getValue(1); 1735 1736 // Handle result values, copying them out of physregs into vregs that we 1737 // return. 1738 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, 1739 InVals, isThisReturn, 1740 isThisReturn ? OutVals[0] : SDValue()); 1741} 1742 1743/// HandleByVal - Every parameter *after* a byval parameter is passed 1744/// on the stack. Remember the next parameter register to allocate, 1745/// and then confiscate the rest of the parameter registers to insure 1746/// this. 1747void 1748ARMTargetLowering::HandleByVal( 1749 CCState *State, unsigned &size, unsigned Align) const { 1750 unsigned reg = State->AllocateReg(GPRArgRegs, 4); 1751 assert((State->getCallOrPrologue() == Prologue || 1752 State->getCallOrPrologue() == Call) && 1753 "unhandled ParmContext"); 1754 1755 // For in-prologue parameters handling, we also introduce stack offset 1756 // for byval registers: see CallingConvLower.cpp, CCState::HandleByVal. 1757 // This behaviour outsides AAPCS rules (5.5 Parameters Passing) of how 1758 // NSAA should be evaluted (NSAA means "next stacked argument address"). 1759 // So: NextStackOffset = NSAAOffset + SizeOfByValParamsStoredInRegs. 1760 // Then: NSAAOffset = NextStackOffset - SizeOfByValParamsStoredInRegs. 1761 unsigned NSAAOffset = State->getNextStackOffset(); 1762 if (State->getCallOrPrologue() != Call) { 1763 for (unsigned i = 0, e = State->getInRegsParamsCount(); i != e; ++i) { 1764 unsigned RB, RE; 1765 State->getInRegsParamInfo(i, RB, RE); 1766 assert(NSAAOffset >= (RE-RB)*4 && 1767 "Stack offset for byval regs doesn't introduced anymore?"); 1768 NSAAOffset -= (RE-RB)*4; 1769 } 1770 } 1771 if ((ARM::R0 <= reg) && (reg <= ARM::R3)) { 1772 if (Subtarget->isAAPCS_ABI() && Align > 4) { 1773 unsigned AlignInRegs = Align / 4; 1774 unsigned Waste = (ARM::R4 - reg) % AlignInRegs; 1775 for (unsigned i = 0; i < Waste; ++i) 1776 reg = State->AllocateReg(GPRArgRegs, 4); 1777 } 1778 if (reg != 0) { 1779 unsigned excess = 4 * (ARM::R4 - reg); 1780 1781 // Special case when NSAA != SP and parameter size greater than size of 1782 // all remained GPR regs. In that case we can't split parameter, we must 1783 // send it to stack. We also must set NCRN to R4, so waste all 1784 // remained registers. 1785 if (Subtarget->isAAPCS_ABI() && NSAAOffset != 0 && size > excess) { 1786 while (State->AllocateReg(GPRArgRegs, 4)) 1787 ; 1788 return; 1789 } 1790 1791 // First register for byval parameter is the first register that wasn't 1792 // allocated before this method call, so it would be "reg". 1793 // If parameter is small enough to be saved in range [reg, r4), then 1794 // the end (first after last) register would be reg + param-size-in-regs, 1795 // else parameter would be splitted between registers and stack, 1796 // end register would be r4 in this case. 1797 unsigned ByValRegBegin = reg; 1798 unsigned ByValRegEnd = (size < excess) ? reg + size/4 : ARM::R4; 1799 State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd); 1800 // Note, first register is allocated in the beginning of function already, 1801 // allocate remained amount of registers we need. 1802 for (unsigned i = reg+1; i != ByValRegEnd; ++i) 1803 State->AllocateReg(GPRArgRegs, 4); 1804 // At a call site, a byval parameter that is split between 1805 // registers and memory needs its size truncated here. In a 1806 // function prologue, such byval parameters are reassembled in 1807 // memory, and are not truncated. 1808 if (State->getCallOrPrologue() == Call) { 1809 // Make remained size equal to 0 in case, when 1810 // the whole structure may be stored into registers. 1811 if (size < excess) 1812 size = 0; 1813 else 1814 size -= excess; 1815 } 1816 } 1817 } 1818} 1819 1820/// MatchingStackOffset - Return true if the given stack call argument is 1821/// already available in the same position (relatively) of the caller's 1822/// incoming argument stack. 1823static 1824bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, 1825 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, 1826 const TargetInstrInfo *TII) { 1827 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8; 1828 int FI = INT_MAX; 1829 if (Arg.getOpcode() == ISD::CopyFromReg) { 1830 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); 1831 if (!TargetRegisterInfo::isVirtualRegister(VR)) 1832 return false; 1833 MachineInstr *Def = MRI->getVRegDef(VR); 1834 if (!Def) 1835 return false; 1836 if (!Flags.isByVal()) { 1837 if (!TII->isLoadFromStackSlot(Def, FI)) 1838 return false; 1839 } else { 1840 return false; 1841 } 1842 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { 1843 if (Flags.isByVal()) 1844 // ByVal argument is passed in as a pointer but it's now being 1845 // dereferenced. e.g. 1846 // define @foo(%struct.X* %A) { 1847 // tail call @bar(%struct.X* byval %A) 1848 // } 1849 return false; 1850 SDValue Ptr = Ld->getBasePtr(); 1851 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); 1852 if (!FINode) 1853 return false; 1854 FI = FINode->getIndex(); 1855 } else 1856 return false; 1857 1858 assert(FI != INT_MAX); 1859 if (!MFI->isFixedObjectIndex(FI)) 1860 return false; 1861 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI); 1862} 1863 1864/// IsEligibleForTailCallOptimization - Check whether the call is eligible 1865/// for tail call optimization. Targets which want to do tail call 1866/// optimization should implement this function. 1867bool 1868ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 1869 CallingConv::ID CalleeCC, 1870 bool isVarArg, 1871 bool isCalleeStructRet, 1872 bool isCallerStructRet, 1873 const SmallVectorImpl<ISD::OutputArg> &Outs, 1874 const SmallVectorImpl<SDValue> &OutVals, 1875 const SmallVectorImpl<ISD::InputArg> &Ins, 1876 SelectionDAG& DAG) const { 1877 const Function *CallerF = DAG.getMachineFunction().getFunction(); 1878 CallingConv::ID CallerCC = CallerF->getCallingConv(); 1879 bool CCMatch = CallerCC == CalleeCC; 1880 1881 // Look for obvious safe cases to perform tail call optimization that do not 1882 // require ABI changes. This is what gcc calls sibcall. 1883 1884 // Do not sibcall optimize vararg calls unless the call site is not passing 1885 // any arguments. 1886 if (isVarArg && !Outs.empty()) 1887 return false; 1888 1889 // Also avoid sibcall optimization if either caller or callee uses struct 1890 // return semantics. 1891 if (isCalleeStructRet || isCallerStructRet) 1892 return false; 1893 1894 // FIXME: Completely disable sibcall for Thumb1 since Thumb1RegisterInfo:: 1895 // emitEpilogue is not ready for them. Thumb tail calls also use t2B, as 1896 // the Thumb1 16-bit unconditional branch doesn't have sufficient relocation 1897 // support in the assembler and linker to be used. This would need to be 1898 // fixed to fully support tail calls in Thumb1. 1899 // 1900 // Doing this is tricky, since the LDM/POP instruction on Thumb doesn't take 1901 // LR. This means if we need to reload LR, it takes an extra instructions, 1902 // which outweighs the value of the tail call; but here we don't know yet 1903 // whether LR is going to be used. Probably the right approach is to 1904 // generate the tail call here and turn it back into CALL/RET in 1905 // emitEpilogue if LR is used. 1906 1907 // Thumb1 PIC calls to external symbols use BX, so they can be tail calls, 1908 // but we need to make sure there are enough registers; the only valid 1909 // registers are the 4 used for parameters. We don't currently do this 1910 // case. 1911 if (Subtarget->isThumb1Only()) 1912 return false; 1913 1914 // If the calling conventions do not match, then we'd better make sure the 1915 // results are returned in the same way as what the caller expects. 1916 if (!CCMatch) { 1917 SmallVector<CCValAssign, 16> RVLocs1; 1918 ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), 1919 getTargetMachine(), RVLocs1, *DAG.getContext(), Call); 1920 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); 1921 1922 SmallVector<CCValAssign, 16> RVLocs2; 1923 ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), 1924 getTargetMachine(), RVLocs2, *DAG.getContext(), Call); 1925 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); 1926 1927 if (RVLocs1.size() != RVLocs2.size()) 1928 return false; 1929 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) { 1930 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc()) 1931 return false; 1932 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo()) 1933 return false; 1934 if (RVLocs1[i].isRegLoc()) { 1935 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg()) 1936 return false; 1937 } else { 1938 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset()) 1939 return false; 1940 } 1941 } 1942 } 1943 1944 // If Caller's vararg or byval argument has been split between registers and 1945 // stack, do not perform tail call, since part of the argument is in caller's 1946 // local frame. 1947 const ARMFunctionInfo *AFI_Caller = DAG.getMachineFunction(). 1948 getInfo<ARMFunctionInfo>(); 1949 if (AFI_Caller->getArgRegsSaveSize()) 1950 return false; 1951 1952 // If the callee takes no arguments then go on to check the results of the 1953 // call. 1954 if (!Outs.empty()) { 1955 // Check if stack adjustment is needed. For now, do not do this if any 1956 // argument is passed on the stack. 1957 SmallVector<CCValAssign, 16> ArgLocs; 1958 ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), 1959 getTargetMachine(), ArgLocs, *DAG.getContext(), Call); 1960 CCInfo.AnalyzeCallOperands(Outs, 1961 CCAssignFnForNode(CalleeCC, false, isVarArg)); 1962 if (CCInfo.getNextStackOffset()) { 1963 MachineFunction &MF = DAG.getMachineFunction(); 1964 1965 // Check if the arguments are already laid out in the right way as 1966 // the caller's fixed stack objects. 1967 MachineFrameInfo *MFI = MF.getFrameInfo(); 1968 const MachineRegisterInfo *MRI = &MF.getRegInfo(); 1969 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 1970 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); 1971 i != e; 1972 ++i, ++realArgIdx) { 1973 CCValAssign &VA = ArgLocs[i]; 1974 EVT RegVT = VA.getLocVT(); 1975 SDValue Arg = OutVals[realArgIdx]; 1976 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 1977 if (VA.getLocInfo() == CCValAssign::Indirect) 1978 return false; 1979 if (VA.needsCustom()) { 1980 // f64 and vector types are split into multiple registers or 1981 // register/stack-slot combinations. The types will not match 1982 // the registers; give up on memory f64 refs until we figure 1983 // out what to do about this. 1984 if (!VA.isRegLoc()) 1985 return false; 1986 if (!ArgLocs[++i].isRegLoc()) 1987 return false; 1988 if (RegVT == MVT::v2f64) { 1989 if (!ArgLocs[++i].isRegLoc()) 1990 return false; 1991 if (!ArgLocs[++i].isRegLoc()) 1992 return false; 1993 } 1994 } else if (!VA.isRegLoc()) { 1995 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, 1996 MFI, MRI, TII)) 1997 return false; 1998 } 1999 } 2000 } 2001 } 2002 2003 return true; 2004} 2005 2006bool 2007ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 2008 MachineFunction &MF, bool isVarArg, 2009 const SmallVectorImpl<ISD::OutputArg> &Outs, 2010 LLVMContext &Context) const { 2011 SmallVector<CCValAssign, 16> RVLocs; 2012 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context); 2013 return CCInfo.CheckReturn(Outs, CCAssignFnForNode(CallConv, /*Return=*/true, 2014 isVarArg)); 2015} 2016 2017SDValue 2018ARMTargetLowering::LowerReturn(SDValue Chain, 2019 CallingConv::ID CallConv, bool isVarArg, 2020 const SmallVectorImpl<ISD::OutputArg> &Outs, 2021 const SmallVectorImpl<SDValue> &OutVals, 2022 DebugLoc dl, SelectionDAG &DAG) const { 2023 2024 // CCValAssign - represent the assignment of the return value to a location. 2025 SmallVector<CCValAssign, 16> RVLocs; 2026 2027 // CCState - Info about the registers and stack slots. 2028 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2029 getTargetMachine(), RVLocs, *DAG.getContext(), Call); 2030 2031 // Analyze outgoing return values. 2032 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, 2033 isVarArg)); 2034 2035 SDValue Flag; 2036 SmallVector<SDValue, 4> RetOps; 2037 RetOps.push_back(Chain); // Operand #0 = Chain (updated below) 2038 2039 // Copy the result values into the output registers. 2040 for (unsigned i = 0, realRVLocIdx = 0; 2041 i != RVLocs.size(); 2042 ++i, ++realRVLocIdx) { 2043 CCValAssign &VA = RVLocs[i]; 2044 assert(VA.isRegLoc() && "Can only return in registers!"); 2045 2046 SDValue Arg = OutVals[realRVLocIdx]; 2047 2048 switch (VA.getLocInfo()) { 2049 default: llvm_unreachable("Unknown loc info!"); 2050 case CCValAssign::Full: break; 2051 case CCValAssign::BCvt: 2052 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg); 2053 break; 2054 } 2055 2056 if (VA.needsCustom()) { 2057 if (VA.getLocVT() == MVT::v2f64) { 2058 // Extract the first half and return it in two registers. 2059 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2060 DAG.getConstant(0, MVT::i32)); 2061 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl, 2062 DAG.getVTList(MVT::i32, MVT::i32), Half); 2063 2064 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), HalfGPRs, Flag); 2065 Flag = Chain.getValue(1); 2066 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2067 VA = RVLocs[++i]; // skip ahead to next loc 2068 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), 2069 HalfGPRs.getValue(1), Flag); 2070 Flag = Chain.getValue(1); 2071 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2072 VA = RVLocs[++i]; // skip ahead to next loc 2073 2074 // Extract the 2nd half and fall through to handle it as an f64 value. 2075 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg, 2076 DAG.getConstant(1, MVT::i32)); 2077 } 2078 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is 2079 // available. 2080 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl, 2081 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); 2082 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag); 2083 Flag = Chain.getValue(1); 2084 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2085 VA = RVLocs[++i]; // skip ahead to next loc 2086 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1), 2087 Flag); 2088 } else 2089 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 2090 2091 // Guarantee that all emitted copies are 2092 // stuck together, avoiding something bad. 2093 Flag = Chain.getValue(1); 2094 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 2095 } 2096 2097 // Update chain and glue. 2098 RetOps[0] = Chain; 2099 if (Flag.getNode()) 2100 RetOps.push_back(Flag); 2101 2102 return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, 2103 RetOps.data(), RetOps.size()); 2104} 2105 2106bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { 2107 if (N->getNumValues() != 1) 2108 return false; 2109 if (!N->hasNUsesOfValue(1, 0)) 2110 return false; 2111 2112 SDValue TCChain = Chain; 2113 SDNode *Copy = *N->use_begin(); 2114 if (Copy->getOpcode() == ISD::CopyToReg) { 2115 // If the copy has a glue operand, we conservatively assume it isn't safe to 2116 // perform a tail call. 2117 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) 2118 return false; 2119 TCChain = Copy->getOperand(0); 2120 } else if (Copy->getOpcode() == ARMISD::VMOVRRD) { 2121 SDNode *VMov = Copy; 2122 // f64 returned in a pair of GPRs. 2123 SmallPtrSet<SDNode*, 2> Copies; 2124 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 2125 UI != UE; ++UI) { 2126 if (UI->getOpcode() != ISD::CopyToReg) 2127 return false; 2128 Copies.insert(*UI); 2129 } 2130 if (Copies.size() > 2) 2131 return false; 2132 2133 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end(); 2134 UI != UE; ++UI) { 2135 SDValue UseChain = UI->getOperand(0); 2136 if (Copies.count(UseChain.getNode())) 2137 // Second CopyToReg 2138 Copy = *UI; 2139 else 2140 // First CopyToReg 2141 TCChain = UseChain; 2142 } 2143 } else if (Copy->getOpcode() == ISD::BITCAST) { 2144 // f32 returned in a single GPR. 2145 if (!Copy->hasOneUse()) 2146 return false; 2147 Copy = *Copy->use_begin(); 2148 if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0)) 2149 return false; 2150 Chain = Copy->getOperand(0); 2151 } else { 2152 return false; 2153 } 2154 2155 bool HasRet = false; 2156 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); 2157 UI != UE; ++UI) { 2158 if (UI->getOpcode() != ARMISD::RET_FLAG) 2159 return false; 2160 HasRet = true; 2161 } 2162 2163 if (!HasRet) 2164 return false; 2165 2166 Chain = TCChain; 2167 return true; 2168} 2169 2170bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { 2171 if (!EnableARMTailCalls && !Subtarget->supportsTailCall()) 2172 return false; 2173 2174 if (!CI->isTailCall()) 2175 return false; 2176 2177 return !Subtarget->isThumb1Only(); 2178} 2179 2180// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as 2181// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is 2182// one of the above mentioned nodes. It has to be wrapped because otherwise 2183// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only 2184// be used to form addressing mode. These wrapped nodes will be selected 2185// into MOVi. 2186static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { 2187 EVT PtrVT = Op.getValueType(); 2188 // FIXME there is no actual debug info here 2189 DebugLoc dl = Op.getDebugLoc(); 2190 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2191 SDValue Res; 2192 if (CP->isMachineConstantPoolEntry()) 2193 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, 2194 CP->getAlignment()); 2195 else 2196 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, 2197 CP->getAlignment()); 2198 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res); 2199} 2200 2201unsigned ARMTargetLowering::getJumpTableEncoding() const { 2202 return MachineJumpTableInfo::EK_Inline; 2203} 2204 2205SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op, 2206 SelectionDAG &DAG) const { 2207 MachineFunction &MF = DAG.getMachineFunction(); 2208 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2209 unsigned ARMPCLabelIndex = 0; 2210 DebugLoc DL = Op.getDebugLoc(); 2211 EVT PtrVT = getPointerTy(); 2212 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 2213 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2214 SDValue CPAddr; 2215 if (RelocM == Reloc::Static) { 2216 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4); 2217 } else { 2218 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2219 ARMPCLabelIndex = AFI->createPICLabelUId(); 2220 ARMConstantPoolValue *CPV = 2221 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex, 2222 ARMCP::CPBlockAddress, PCAdj); 2223 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2224 } 2225 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr); 2226 SDValue Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), CPAddr, 2227 MachinePointerInfo::getConstantPool(), 2228 false, false, false, 0); 2229 if (RelocM == Reloc::Static) 2230 return Result; 2231 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2232 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel); 2233} 2234 2235// Lower ISD::GlobalTLSAddress using the "general dynamic" model 2236SDValue 2237ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 2238 SelectionDAG &DAG) const { 2239 DebugLoc dl = GA->getDebugLoc(); 2240 EVT PtrVT = getPointerTy(); 2241 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2242 MachineFunction &MF = DAG.getMachineFunction(); 2243 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2244 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2245 ARMConstantPoolValue *CPV = 2246 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2247 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true); 2248 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2249 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument); 2250 Argument = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Argument, 2251 MachinePointerInfo::getConstantPool(), 2252 false, false, false, 0); 2253 SDValue Chain = Argument.getValue(1); 2254 2255 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2256 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel); 2257 2258 // call __tls_get_addr. 2259 ArgListTy Args; 2260 ArgListEntry Entry; 2261 Entry.Node = Argument; 2262 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext()); 2263 Args.push_back(Entry); 2264 // FIXME: is there useful debug info available here? 2265 TargetLowering::CallLoweringInfo CLI(Chain, 2266 (Type *) Type::getInt32Ty(*DAG.getContext()), 2267 false, false, false, false, 2268 0, CallingConv::C, /*isTailCall=*/false, 2269 /*doesNotRet=*/false, /*isReturnValueUsed=*/true, 2270 DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG, dl); 2271 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2272 return CallResult.first; 2273} 2274 2275// Lower ISD::GlobalTLSAddress using the "initial exec" or 2276// "local exec" model. 2277SDValue 2278ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, 2279 SelectionDAG &DAG, 2280 TLSModel::Model model) const { 2281 const GlobalValue *GV = GA->getGlobal(); 2282 DebugLoc dl = GA->getDebugLoc(); 2283 SDValue Offset; 2284 SDValue Chain = DAG.getEntryNode(); 2285 EVT PtrVT = getPointerTy(); 2286 // Get the Thread Pointer 2287 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2288 2289 if (model == TLSModel::InitialExec) { 2290 MachineFunction &MF = DAG.getMachineFunction(); 2291 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2292 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2293 // Initial exec model. 2294 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8; 2295 ARMConstantPoolValue *CPV = 2296 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex, 2297 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF, 2298 true); 2299 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2300 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2301 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2302 MachinePointerInfo::getConstantPool(), 2303 false, false, false, 0); 2304 Chain = Offset.getValue(1); 2305 2306 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2307 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel); 2308 2309 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2310 MachinePointerInfo::getConstantPool(), 2311 false, false, false, 0); 2312 } else { 2313 // local exec model 2314 assert(model == TLSModel::LocalExec); 2315 ARMConstantPoolValue *CPV = 2316 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF); 2317 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2318 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset); 2319 Offset = DAG.getLoad(PtrVT, dl, Chain, Offset, 2320 MachinePointerInfo::getConstantPool(), 2321 false, false, false, 0); 2322 } 2323 2324 // The address of the thread local variable is the add of the thread 2325 // pointer with the offset of the variable. 2326 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); 2327} 2328 2329SDValue 2330ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { 2331 // TODO: implement the "local dynamic" model 2332 assert(Subtarget->isTargetELF() && 2333 "TLS not implemented for non-ELF targets"); 2334 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2335 2336 TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal()); 2337 2338 switch (model) { 2339 case TLSModel::GeneralDynamic: 2340 case TLSModel::LocalDynamic: 2341 return LowerToTLSGeneralDynamicModel(GA, DAG); 2342 case TLSModel::InitialExec: 2343 case TLSModel::LocalExec: 2344 return LowerToTLSExecModels(GA, DAG, model); 2345 } 2346 llvm_unreachable("bogus TLS model"); 2347} 2348 2349SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, 2350 SelectionDAG &DAG) const { 2351 EVT PtrVT = getPointerTy(); 2352 DebugLoc dl = Op.getDebugLoc(); 2353 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2354 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 2355 bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); 2356 ARMConstantPoolValue *CPV = 2357 ARMConstantPoolConstant::Create(GV, 2358 UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); 2359 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2360 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2361 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 2362 CPAddr, 2363 MachinePointerInfo::getConstantPool(), 2364 false, false, false, 0); 2365 SDValue Chain = Result.getValue(1); 2366 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT); 2367 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, GOT); 2368 if (!UseGOTOFF) 2369 Result = DAG.getLoad(PtrVT, dl, Chain, Result, 2370 MachinePointerInfo::getGOT(), 2371 false, false, false, 0); 2372 return Result; 2373 } 2374 2375 // If we have T2 ops, we can materialize the address directly via movt/movw 2376 // pair. This is always cheaper. 2377 if (Subtarget->useMovt()) { 2378 ++NumMovwMovt; 2379 // FIXME: Once remat is capable of dealing with instructions with register 2380 // operands, expand this into two nodes. 2381 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2382 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2383 } else { 2384 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2385 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2386 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2387 MachinePointerInfo::getConstantPool(), 2388 false, false, false, 0); 2389 } 2390} 2391 2392SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, 2393 SelectionDAG &DAG) const { 2394 EVT PtrVT = getPointerTy(); 2395 DebugLoc dl = Op.getDebugLoc(); 2396 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 2397 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2398 2399 // FIXME: Enable this for static codegen when tool issues are fixed. Also 2400 // update ARMFastISel::ARMMaterializeGV. 2401 if (Subtarget->useMovt() && RelocM != Reloc::Static) { 2402 ++NumMovwMovt; 2403 // FIXME: Once remat is capable of dealing with instructions with register 2404 // operands, expand this into two nodes. 2405 if (RelocM == Reloc::Static) 2406 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT, 2407 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2408 2409 unsigned Wrapper = (RelocM == Reloc::PIC_) 2410 ? ARMISD::WrapperPIC : ARMISD::WrapperDYN; 2411 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, 2412 DAG.getTargetGlobalAddress(GV, dl, PtrVT)); 2413 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2414 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, 2415 MachinePointerInfo::getGOT(), 2416 false, false, false, 0); 2417 return Result; 2418 } 2419 2420 unsigned ARMPCLabelIndex = 0; 2421 SDValue CPAddr; 2422 if (RelocM == Reloc::Static) { 2423 CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4); 2424 } else { 2425 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 2426 ARMPCLabelIndex = AFI->createPICLabelUId(); 2427 unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb()?4:8); 2428 ARMConstantPoolValue *CPV = 2429 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 2430 PCAdj); 2431 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2432 } 2433 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2434 2435 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2436 MachinePointerInfo::getConstantPool(), 2437 false, false, false, 0); 2438 SDValue Chain = Result.getValue(1); 2439 2440 if (RelocM == Reloc::PIC_) { 2441 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2442 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2443 } 2444 2445 if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) 2446 Result = DAG.getLoad(PtrVT, dl, Chain, Result, MachinePointerInfo::getGOT(), 2447 false, false, false, 0); 2448 2449 return Result; 2450} 2451 2452SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, 2453 SelectionDAG &DAG) const { 2454 assert(Subtarget->isTargetELF() && 2455 "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); 2456 MachineFunction &MF = DAG.getMachineFunction(); 2457 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2458 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2459 EVT PtrVT = getPointerTy(); 2460 DebugLoc dl = Op.getDebugLoc(); 2461 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8; 2462 ARMConstantPoolValue *CPV = 2463 ARMConstantPoolSymbol::Create(*DAG.getContext(), "_GLOBAL_OFFSET_TABLE_", 2464 ARMPCLabelIndex, PCAdj); 2465 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2466 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2467 SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2468 MachinePointerInfo::getConstantPool(), 2469 false, false, false, 0); 2470 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2471 return DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2472} 2473 2474SDValue 2475ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { 2476 DebugLoc dl = Op.getDebugLoc(); 2477 SDValue Val = DAG.getConstant(0, MVT::i32); 2478 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl, 2479 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), 2480 Op.getOperand(1), Val); 2481} 2482 2483SDValue 2484ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { 2485 DebugLoc dl = Op.getDebugLoc(); 2486 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0), 2487 Op.getOperand(1), DAG.getConstant(0, MVT::i32)); 2488} 2489 2490SDValue 2491ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG, 2492 const ARMSubtarget *Subtarget) const { 2493 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2494 DebugLoc dl = Op.getDebugLoc(); 2495 switch (IntNo) { 2496 default: return SDValue(); // Don't custom lower most intrinsics. 2497 case Intrinsic::arm_thread_pointer: { 2498 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2499 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT); 2500 } 2501 case Intrinsic::eh_sjlj_lsda: { 2502 MachineFunction &MF = DAG.getMachineFunction(); 2503 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2504 unsigned ARMPCLabelIndex = AFI->createPICLabelUId(); 2505 EVT PtrVT = getPointerTy(); 2506 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 2507 SDValue CPAddr; 2508 unsigned PCAdj = (RelocM != Reloc::PIC_) 2509 ? 0 : (Subtarget->isThumb() ? 4 : 8); 2510 ARMConstantPoolValue *CPV = 2511 ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex, 2512 ARMCP::CPLSDA, PCAdj); 2513 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4); 2514 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr); 2515 SDValue Result = 2516 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), CPAddr, 2517 MachinePointerInfo::getConstantPool(), 2518 false, false, false, 0); 2519 2520 if (RelocM == Reloc::PIC_) { 2521 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, MVT::i32); 2522 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel); 2523 } 2524 return Result; 2525 } 2526 case Intrinsic::arm_neon_vmulls: 2527 case Intrinsic::arm_neon_vmullu: { 2528 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls) 2529 ? ARMISD::VMULLs : ARMISD::VMULLu; 2530 return DAG.getNode(NewOpc, Op.getDebugLoc(), Op.getValueType(), 2531 Op.getOperand(1), Op.getOperand(2)); 2532 } 2533 } 2534} 2535 2536static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, 2537 const ARMSubtarget *Subtarget) { 2538 // FIXME: handle "fence singlethread" more efficiently. 2539 DebugLoc dl = Op.getDebugLoc(); 2540 if (!Subtarget->hasDataBarrier()) { 2541 // Some ARMv6 cpus can support data barriers with an mcr instruction. 2542 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get 2543 // here. 2544 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() && 2545 "Unexpected ISD::MEMBARRIER encountered. Should be libcall!"); 2546 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0), 2547 DAG.getConstant(0, MVT::i32)); 2548 } 2549 2550 return DAG.getNode(ARMISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0), 2551 DAG.getConstant(ARM_MB::ISH, MVT::i32)); 2552} 2553 2554static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, 2555 const ARMSubtarget *Subtarget) { 2556 // ARM pre v5TE and Thumb1 does not have preload instructions. 2557 if (!(Subtarget->isThumb2() || 2558 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps()))) 2559 // Just preserve the chain. 2560 return Op.getOperand(0); 2561 2562 DebugLoc dl = Op.getDebugLoc(); 2563 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1; 2564 if (!isRead && 2565 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension())) 2566 // ARMv7 with MP extension has PLDW. 2567 return Op.getOperand(0); 2568 2569 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 2570 if (Subtarget->isThumb()) { 2571 // Invert the bits. 2572 isRead = ~isRead & 1; 2573 isData = ~isData & 1; 2574 } 2575 2576 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0), 2577 Op.getOperand(1), DAG.getConstant(isRead, MVT::i32), 2578 DAG.getConstant(isData, MVT::i32)); 2579} 2580 2581static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { 2582 MachineFunction &MF = DAG.getMachineFunction(); 2583 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>(); 2584 2585 // vastart just stores the address of the VarArgsFrameIndex slot into the 2586 // memory location argument. 2587 DebugLoc dl = Op.getDebugLoc(); 2588 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); 2589 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 2590 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 2591 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 2592 MachinePointerInfo(SV), false, false, 0); 2593} 2594 2595SDValue 2596ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA, 2597 SDValue &Root, SelectionDAG &DAG, 2598 DebugLoc dl) const { 2599 MachineFunction &MF = DAG.getMachineFunction(); 2600 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2601 2602 const TargetRegisterClass *RC; 2603 if (AFI->isThumb1OnlyFunction()) 2604 RC = &ARM::tGPRRegClass; 2605 else 2606 RC = &ARM::GPRRegClass; 2607 2608 // Transform the arguments stored in physical registers into virtual ones. 2609 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2610 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2611 2612 SDValue ArgValue2; 2613 if (NextVA.isMemLoc()) { 2614 MachineFrameInfo *MFI = MF.getFrameInfo(); 2615 int FI = MFI->CreateFixedObject(4, NextVA.getLocMemOffset(), true); 2616 2617 // Create load node to retrieve arguments from the stack. 2618 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2619 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, 2620 MachinePointerInfo::getFixedStack(FI), 2621 false, false, false, 0); 2622 } else { 2623 Reg = MF.addLiveIn(NextVA.getLocReg(), RC); 2624 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32); 2625 } 2626 2627 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2); 2628} 2629 2630void 2631ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, 2632 unsigned InRegsParamRecordIdx, 2633 unsigned &ArgRegsSize, 2634 unsigned &ArgRegsSaveSize) 2635 const { 2636 unsigned NumGPRs; 2637 if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { 2638 unsigned RBegin, REnd; 2639 CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd); 2640 NumGPRs = REnd - RBegin; 2641 } else { 2642 unsigned int firstUnalloced; 2643 firstUnalloced = CCInfo.getFirstUnallocated(GPRArgRegs, 2644 sizeof(GPRArgRegs) / 2645 sizeof(GPRArgRegs[0])); 2646 NumGPRs = (firstUnalloced <= 3) ? (4 - firstUnalloced) : 0; 2647 } 2648 2649 unsigned Align = MF.getTarget().getFrameLowering()->getStackAlignment(); 2650 ArgRegsSize = NumGPRs * 4; 2651 ArgRegsSaveSize = (ArgRegsSize + Align - 1) & ~(Align - 1); 2652} 2653 2654// The remaining GPRs hold either the beginning of variable-argument 2655// data, or the beginning of an aggregate passed by value (usually 2656// byval). Either way, we allocate stack slots adjacent to the data 2657// provided by our caller, and store the unallocated registers there. 2658// If this is a variadic function, the va_list pointer will begin with 2659// these values; otherwise, this reassembles a (byval) structure that 2660// was split between registers and memory. 2661// Return: The frame index registers were stored into. 2662int 2663ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, 2664 DebugLoc dl, SDValue &Chain, 2665 const Value *OrigArg, 2666 unsigned InRegsParamRecordIdx, 2667 unsigned OffsetFromOrigArg, 2668 unsigned ArgOffset, 2669 bool ForceMutable) const { 2670 2671 // Currently, two use-cases possible: 2672 // Case #1. Non var-args function, and we meet first byval parameter. 2673 // Setup first unallocated register as first byval register; 2674 // eat all remained registers 2675 // (these two actions are performed by HandleByVal method). 2676 // Then, here, we initialize stack frame with 2677 // "store-reg" instructions. 2678 // Case #2. Var-args function, that doesn't contain byval parameters. 2679 // The same: eat all remained unallocated registers, 2680 // initialize stack frame. 2681 2682 MachineFunction &MF = DAG.getMachineFunction(); 2683 MachineFrameInfo *MFI = MF.getFrameInfo(); 2684 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2685 unsigned firstRegToSaveIndex, lastRegToSaveIndex; 2686 unsigned RBegin, REnd; 2687 if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) { 2688 CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd); 2689 firstRegToSaveIndex = RBegin - ARM::R0; 2690 lastRegToSaveIndex = REnd - ARM::R0; 2691 } else { 2692 firstRegToSaveIndex = CCInfo.getFirstUnallocated 2693 (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0])); 2694 lastRegToSaveIndex = 4; 2695 } 2696 2697 unsigned ArgRegsSize, ArgRegsSaveSize; 2698 computeRegArea(CCInfo, MF, InRegsParamRecordIdx, ArgRegsSize, ArgRegsSaveSize); 2699 2700 // Store any by-val regs to their spots on the stack so that they may be 2701 // loaded by deferencing the result of formal parameter pointer or va_next. 2702 // Note: once stack area for byval/varargs registers 2703 // was initialized, it can't be initialized again. 2704 if (ArgRegsSaveSize) { 2705 2706 int FrameIndex = MFI->CreateFixedObject( 2707 ArgRegsSaveSize, 2708 ArgOffset + ArgRegsSaveSize - ArgRegsSize, 2709 false); 2710 SDValue FIN = DAG.getFrameIndex(FrameIndex, getPointerTy()); 2711 2712 SmallVector<SDValue, 4> MemOps; 2713 for (unsigned i = 0; firstRegToSaveIndex < lastRegToSaveIndex; 2714 ++firstRegToSaveIndex, ++i) { 2715 const TargetRegisterClass *RC; 2716 if (AFI->isThumb1OnlyFunction()) 2717 RC = &ARM::tGPRRegClass; 2718 else 2719 RC = &ARM::GPRRegClass; 2720 2721 unsigned VReg = MF.addLiveIn(GPRArgRegs[firstRegToSaveIndex], RC); 2722 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 2723 SDValue Store = 2724 DAG.getStore(Val.getValue(1), dl, Val, FIN, 2725 MachinePointerInfo(OrigArg, OffsetFromOrigArg + 4*i), 2726 false, false, 0); 2727 MemOps.push_back(Store); 2728 FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, 2729 DAG.getConstant(4, getPointerTy())); 2730 } 2731 2732 AFI->setArgRegsSaveSize(ArgRegsSaveSize + AFI->getArgRegsSaveSize()); 2733 2734 if (!MemOps.empty()) 2735 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 2736 &MemOps[0], MemOps.size()); 2737 return FrameIndex; 2738 } else 2739 // This will point to the next argument passed via stack. 2740 return MFI->CreateFixedObject(4, ArgOffset, !ForceMutable); 2741} 2742 2743// Setup stack frame, the va_list pointer will start from. 2744void 2745ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, 2746 DebugLoc dl, SDValue &Chain, 2747 unsigned ArgOffset, 2748 bool ForceMutable) const { 2749 MachineFunction &MF = DAG.getMachineFunction(); 2750 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2751 2752 // Try to store any remaining integer argument regs 2753 // to their spots on the stack so that they may be loaded by deferencing 2754 // the result of va_next. 2755 // If there is no regs to be stored, just point address after last 2756 // argument passed via stack. 2757 int FrameIndex = 2758 StoreByValRegs(CCInfo, DAG, dl, Chain, 0, CCInfo.getInRegsParamsCount(), 2759 0, ArgOffset, ForceMutable); 2760 2761 AFI->setVarArgsFrameIndex(FrameIndex); 2762} 2763 2764SDValue 2765ARMTargetLowering::LowerFormalArguments(SDValue Chain, 2766 CallingConv::ID CallConv, bool isVarArg, 2767 const SmallVectorImpl<ISD::InputArg> 2768 &Ins, 2769 DebugLoc dl, SelectionDAG &DAG, 2770 SmallVectorImpl<SDValue> &InVals) 2771 const { 2772 MachineFunction &MF = DAG.getMachineFunction(); 2773 MachineFrameInfo *MFI = MF.getFrameInfo(); 2774 2775 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>(); 2776 2777 // Assign locations to all of the incoming arguments. 2778 SmallVector<CCValAssign, 16> ArgLocs; 2779 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), 2780 getTargetMachine(), ArgLocs, *DAG.getContext(), Prologue); 2781 CCInfo.AnalyzeFormalArguments(Ins, 2782 CCAssignFnForNode(CallConv, /* Return*/ false, 2783 isVarArg)); 2784 2785 SmallVector<SDValue, 16> ArgValues; 2786 int lastInsIndex = -1; 2787 SDValue ArgValue; 2788 Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin(); 2789 unsigned CurArgIdx = 0; 2790 2791 // Initially ArgRegsSaveSize is zero. 2792 // Then we increase this value each time we meet byval parameter. 2793 // We also increase this value in case of varargs function. 2794 AFI->setArgRegsSaveSize(0); 2795 2796 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2797 CCValAssign &VA = ArgLocs[i]; 2798 std::advance(CurOrigArg, Ins[VA.getValNo()].OrigArgIndex - CurArgIdx); 2799 CurArgIdx = Ins[VA.getValNo()].OrigArgIndex; 2800 // Arguments stored in registers. 2801 if (VA.isRegLoc()) { 2802 EVT RegVT = VA.getLocVT(); 2803 2804 if (VA.needsCustom()) { 2805 // f64 and vector types are split up into multiple registers or 2806 // combinations of registers and stack slots. 2807 if (VA.getLocVT() == MVT::v2f64) { 2808 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i], 2809 Chain, DAG, dl); 2810 VA = ArgLocs[++i]; // skip ahead to next loc 2811 SDValue ArgValue2; 2812 if (VA.isMemLoc()) { 2813 int FI = MFI->CreateFixedObject(8, VA.getLocMemOffset(), true); 2814 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2815 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN, 2816 MachinePointerInfo::getFixedStack(FI), 2817 false, false, false, 0); 2818 } else { 2819 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], 2820 Chain, DAG, dl); 2821 } 2822 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64); 2823 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2824 ArgValue, ArgValue1, DAG.getIntPtrConstant(0)); 2825 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, 2826 ArgValue, ArgValue2, DAG.getIntPtrConstant(1)); 2827 } else 2828 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl); 2829 2830 } else { 2831 const TargetRegisterClass *RC; 2832 2833 if (RegVT == MVT::f32) 2834 RC = &ARM::SPRRegClass; 2835 else if (RegVT == MVT::f64) 2836 RC = &ARM::DPRRegClass; 2837 else if (RegVT == MVT::v2f64) 2838 RC = &ARM::QPRRegClass; 2839 else if (RegVT == MVT::i32) 2840 RC = AFI->isThumb1OnlyFunction() ? 2841 (const TargetRegisterClass*)&ARM::tGPRRegClass : 2842 (const TargetRegisterClass*)&ARM::GPRRegClass; 2843 else 2844 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 2845 2846 // Transform the arguments in physical registers into virtual ones. 2847 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 2848 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 2849 } 2850 2851 // If this is an 8 or 16-bit value, it is really passed promoted 2852 // to 32 bits. Insert an assert[sz]ext to capture this, then 2853 // truncate to the right size. 2854 switch (VA.getLocInfo()) { 2855 default: llvm_unreachable("Unknown loc info!"); 2856 case CCValAssign::Full: break; 2857 case CCValAssign::BCvt: 2858 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 2859 break; 2860 case CCValAssign::SExt: 2861 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 2862 DAG.getValueType(VA.getValVT())); 2863 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2864 break; 2865 case CCValAssign::ZExt: 2866 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 2867 DAG.getValueType(VA.getValVT())); 2868 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 2869 break; 2870 } 2871 2872 InVals.push_back(ArgValue); 2873 2874 } else { // VA.isRegLoc() 2875 2876 // sanity check 2877 assert(VA.isMemLoc()); 2878 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered"); 2879 2880 int index = ArgLocs[i].getValNo(); 2881 2882 // Some Ins[] entries become multiple ArgLoc[] entries. 2883 // Process them only once. 2884 if (index != lastInsIndex) 2885 { 2886 ISD::ArgFlagsTy Flags = Ins[index].Flags; 2887 // FIXME: For now, all byval parameter objects are marked mutable. 2888 // This can be changed with more analysis. 2889 // In case of tail call optimization mark all arguments mutable. 2890 // Since they could be overwritten by lowering of arguments in case of 2891 // a tail call. 2892 if (Flags.isByVal()) { 2893 unsigned CurByValIndex = CCInfo.getInRegsParamsProceed(); 2894 int FrameIndex = StoreByValRegs( 2895 CCInfo, DAG, dl, Chain, CurOrigArg, 2896 CurByValIndex, 2897 Ins[VA.getValNo()].PartOffset, 2898 VA.getLocMemOffset(), 2899 true /*force mutable frames*/); 2900 InVals.push_back(DAG.getFrameIndex(FrameIndex, getPointerTy())); 2901 CCInfo.nextInRegsParam(); 2902 } else { 2903 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, 2904 VA.getLocMemOffset(), true); 2905 2906 // Create load nodes to retrieve arguments from the stack. 2907 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); 2908 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, 2909 MachinePointerInfo::getFixedStack(FI), 2910 false, false, false, 0)); 2911 } 2912 lastInsIndex = index; 2913 } 2914 } 2915 } 2916 2917 // varargs 2918 if (isVarArg) 2919 VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 2920 CCInfo.getNextStackOffset()); 2921 2922 return Chain; 2923} 2924 2925/// isFloatingPointZero - Return true if this is +0.0. 2926static bool isFloatingPointZero(SDValue Op) { 2927 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 2928 return CFP->getValueAPF().isPosZero(); 2929 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 2930 // Maybe this has already been legalized into the constant pool? 2931 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { 2932 SDValue WrapperOp = Op.getOperand(1).getOperand(0); 2933 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) 2934 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 2935 return CFP->getValueAPF().isPosZero(); 2936 } 2937 } 2938 return false; 2939} 2940 2941/// Returns appropriate ARM CMP (cmp) and corresponding condition code for 2942/// the given operands. 2943SDValue 2944ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2945 SDValue &ARMcc, SelectionDAG &DAG, 2946 DebugLoc dl) const { 2947 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 2948 unsigned C = RHSC->getZExtValue(); 2949 if (!isLegalICmpImmediate(C)) { 2950 // Constant does not fit, try adjusting it by one? 2951 switch (CC) { 2952 default: break; 2953 case ISD::SETLT: 2954 case ISD::SETGE: 2955 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) { 2956 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 2957 RHS = DAG.getConstant(C-1, MVT::i32); 2958 } 2959 break; 2960 case ISD::SETULT: 2961 case ISD::SETUGE: 2962 if (C != 0 && isLegalICmpImmediate(C-1)) { 2963 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 2964 RHS = DAG.getConstant(C-1, MVT::i32); 2965 } 2966 break; 2967 case ISD::SETLE: 2968 case ISD::SETGT: 2969 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) { 2970 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 2971 RHS = DAG.getConstant(C+1, MVT::i32); 2972 } 2973 break; 2974 case ISD::SETULE: 2975 case ISD::SETUGT: 2976 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) { 2977 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 2978 RHS = DAG.getConstant(C+1, MVT::i32); 2979 } 2980 break; 2981 } 2982 } 2983 } 2984 2985 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 2986 ARMISD::NodeType CompareType; 2987 switch (CondCode) { 2988 default: 2989 CompareType = ARMISD::CMP; 2990 break; 2991 case ARMCC::EQ: 2992 case ARMCC::NE: 2993 // Uses only Z Flag 2994 CompareType = ARMISD::CMPZ; 2995 break; 2996 } 2997 ARMcc = DAG.getConstant(CondCode, MVT::i32); 2998 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS); 2999} 3000 3001/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. 3002SDValue 3003ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, 3004 DebugLoc dl) const { 3005 SDValue Cmp; 3006 if (!isFloatingPointZero(RHS)) 3007 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); 3008 else 3009 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); 3010 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); 3011} 3012 3013/// duplicateCmp - Glue values can have only one use, so this function 3014/// duplicates a comparison node. 3015SDValue 3016ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { 3017 unsigned Opc = Cmp.getOpcode(); 3018 DebugLoc DL = Cmp.getDebugLoc(); 3019 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ) 3020 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 3021 3022 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation"); 3023 Cmp = Cmp.getOperand(0); 3024 Opc = Cmp.getOpcode(); 3025 if (Opc == ARMISD::CMPFP) 3026 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); 3027 else { 3028 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); 3029 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); 3030 } 3031 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); 3032} 3033 3034SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { 3035 SDValue Cond = Op.getOperand(0); 3036 SDValue SelectTrue = Op.getOperand(1); 3037 SDValue SelectFalse = Op.getOperand(2); 3038 DebugLoc dl = Op.getDebugLoc(); 3039 3040 // Convert: 3041 // 3042 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond) 3043 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) 3044 // 3045 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { 3046 const ConstantSDNode *CMOVTrue = 3047 dyn_cast<ConstantSDNode>(Cond.getOperand(0)); 3048 const ConstantSDNode *CMOVFalse = 3049 dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 3050 3051 if (CMOVTrue && CMOVFalse) { 3052 unsigned CMOVTrueVal = CMOVTrue->getZExtValue(); 3053 unsigned CMOVFalseVal = CMOVFalse->getZExtValue(); 3054 3055 SDValue True; 3056 SDValue False; 3057 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) { 3058 True = SelectTrue; 3059 False = SelectFalse; 3060 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) { 3061 True = SelectFalse; 3062 False = SelectTrue; 3063 } 3064 3065 if (True.getNode() && False.getNode()) { 3066 EVT VT = Op.getValueType(); 3067 SDValue ARMcc = Cond.getOperand(2); 3068 SDValue CCR = Cond.getOperand(3); 3069 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG); 3070 assert(True.getValueType() == VT); 3071 return DAG.getNode(ARMISD::CMOV, dl, VT, True, False, ARMcc, CCR, Cmp); 3072 } 3073 } 3074 } 3075 3076 // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the 3077 // undefined bits before doing a full-word comparison with zero. 3078 Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond, 3079 DAG.getConstant(1, Cond.getValueType())); 3080 3081 return DAG.getSelectCC(dl, Cond, 3082 DAG.getConstant(0, Cond.getValueType()), 3083 SelectTrue, SelectFalse, ISD::SETNE); 3084} 3085 3086SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 3087 EVT VT = Op.getValueType(); 3088 SDValue LHS = Op.getOperand(0); 3089 SDValue RHS = Op.getOperand(1); 3090 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 3091 SDValue TrueVal = Op.getOperand(2); 3092 SDValue FalseVal = Op.getOperand(3); 3093 DebugLoc dl = Op.getDebugLoc(); 3094 3095 if (LHS.getValueType() == MVT::i32) { 3096 SDValue ARMcc; 3097 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3098 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3099 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,Cmp); 3100 } 3101 3102 ARMCC::CondCodes CondCode, CondCode2; 3103 FPCCToARMCC(CC, CondCode, CondCode2); 3104 3105 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 3106 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 3107 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3108 SDValue Result = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, 3109 ARMcc, CCR, Cmp); 3110 if (CondCode2 != ARMCC::AL) { 3111 SDValue ARMcc2 = DAG.getConstant(CondCode2, MVT::i32); 3112 // FIXME: Needs another CMP because flag can have but one use. 3113 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); 3114 Result = DAG.getNode(ARMISD::CMOV, dl, VT, 3115 Result, TrueVal, ARMcc2, CCR, Cmp2); 3116 } 3117 return Result; 3118} 3119 3120/// canChangeToInt - Given the fp compare operand, return true if it is suitable 3121/// to morph to an integer compare sequence. 3122static bool canChangeToInt(SDValue Op, bool &SeenZero, 3123 const ARMSubtarget *Subtarget) { 3124 SDNode *N = Op.getNode(); 3125 if (!N->hasOneUse()) 3126 // Otherwise it requires moving the value from fp to integer registers. 3127 return false; 3128 if (!N->getNumValues()) 3129 return false; 3130 EVT VT = Op.getValueType(); 3131 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow()) 3132 // f32 case is generally profitable. f64 case only makes sense when vcmpe + 3133 // vmrs are very slow, e.g. cortex-a8. 3134 return false; 3135 3136 if (isFloatingPointZero(Op)) { 3137 SeenZero = true; 3138 return true; 3139 } 3140 return ISD::isNormalLoad(N); 3141} 3142 3143static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { 3144 if (isFloatingPointZero(Op)) 3145 return DAG.getConstant(0, MVT::i32); 3146 3147 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) 3148 return DAG.getLoad(MVT::i32, Op.getDebugLoc(), 3149 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), 3150 Ld->isVolatile(), Ld->isNonTemporal(), 3151 Ld->isInvariant(), Ld->getAlignment()); 3152 3153 llvm_unreachable("Unknown VFP cmp argument!"); 3154} 3155 3156static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, 3157 SDValue &RetVal1, SDValue &RetVal2) { 3158 if (isFloatingPointZero(Op)) { 3159 RetVal1 = DAG.getConstant(0, MVT::i32); 3160 RetVal2 = DAG.getConstant(0, MVT::i32); 3161 return; 3162 } 3163 3164 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) { 3165 SDValue Ptr = Ld->getBasePtr(); 3166 RetVal1 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 3167 Ld->getChain(), Ptr, 3168 Ld->getPointerInfo(), 3169 Ld->isVolatile(), Ld->isNonTemporal(), 3170 Ld->isInvariant(), Ld->getAlignment()); 3171 3172 EVT PtrType = Ptr.getValueType(); 3173 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4); 3174 SDValue NewPtr = DAG.getNode(ISD::ADD, Op.getDebugLoc(), 3175 PtrType, Ptr, DAG.getConstant(4, PtrType)); 3176 RetVal2 = DAG.getLoad(MVT::i32, Op.getDebugLoc(), 3177 Ld->getChain(), NewPtr, 3178 Ld->getPointerInfo().getWithOffset(4), 3179 Ld->isVolatile(), Ld->isNonTemporal(), 3180 Ld->isInvariant(), NewAlign); 3181 return; 3182 } 3183 3184 llvm_unreachable("Unknown VFP cmp argument!"); 3185} 3186 3187/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some 3188/// f32 and even f64 comparisons to integer ones. 3189SDValue 3190ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { 3191 SDValue Chain = Op.getOperand(0); 3192 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 3193 SDValue LHS = Op.getOperand(2); 3194 SDValue RHS = Op.getOperand(3); 3195 SDValue Dest = Op.getOperand(4); 3196 DebugLoc dl = Op.getDebugLoc(); 3197 3198 bool LHSSeenZero = false; 3199 bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget); 3200 bool RHSSeenZero = false; 3201 bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget); 3202 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) { 3203 // If unsafe fp math optimization is enabled and there are no other uses of 3204 // the CMP operands, and the condition code is EQ or NE, we can optimize it 3205 // to an integer comparison. 3206 if (CC == ISD::SETOEQ) 3207 CC = ISD::SETEQ; 3208 else if (CC == ISD::SETUNE) 3209 CC = ISD::SETNE; 3210 3211 SDValue Mask = DAG.getConstant(0x7fffffff, MVT::i32); 3212 SDValue ARMcc; 3213 if (LHS.getValueType() == MVT::f32) { 3214 LHS = DAG.getNode(ISD::AND, dl, MVT::i32, 3215 bitcastf32Toi32(LHS, DAG), Mask); 3216 RHS = DAG.getNode(ISD::AND, dl, MVT::i32, 3217 bitcastf32Toi32(RHS, DAG), Mask); 3218 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3219 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3220 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 3221 Chain, Dest, ARMcc, CCR, Cmp); 3222 } 3223 3224 SDValue LHS1, LHS2; 3225 SDValue RHS1, RHS2; 3226 expandf64Toi32(LHS, DAG, LHS1, LHS2); 3227 expandf64Toi32(RHS, DAG, RHS1, RHS2); 3228 LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask); 3229 RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask); 3230 ARMCC::CondCodes CondCode = IntCCToARMCC(CC); 3231 ARMcc = DAG.getConstant(CondCode, MVT::i32); 3232 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 3233 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest }; 3234 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops, 7); 3235 } 3236 3237 return SDValue(); 3238} 3239 3240SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 3241 SDValue Chain = Op.getOperand(0); 3242 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 3243 SDValue LHS = Op.getOperand(2); 3244 SDValue RHS = Op.getOperand(3); 3245 SDValue Dest = Op.getOperand(4); 3246 DebugLoc dl = Op.getDebugLoc(); 3247 3248 if (LHS.getValueType() == MVT::i32) { 3249 SDValue ARMcc; 3250 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl); 3251 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3252 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, 3253 Chain, Dest, ARMcc, CCR, Cmp); 3254 } 3255 3256 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64); 3257 3258 if (getTargetMachine().Options.UnsafeFPMath && 3259 (CC == ISD::SETEQ || CC == ISD::SETOEQ || 3260 CC == ISD::SETNE || CC == ISD::SETUNE)) { 3261 SDValue Result = OptimizeVFPBrcond(Op, DAG); 3262 if (Result.getNode()) 3263 return Result; 3264 } 3265 3266 ARMCC::CondCodes CondCode, CondCode2; 3267 FPCCToARMCC(CC, CondCode, CondCode2); 3268 3269 SDValue ARMcc = DAG.getConstant(CondCode, MVT::i32); 3270 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); 3271 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3272 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); 3273 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; 3274 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 3275 if (CondCode2 != ARMCC::AL) { 3276 ARMcc = DAG.getConstant(CondCode2, MVT::i32); 3277 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) }; 3278 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops, 5); 3279 } 3280 return Res; 3281} 3282 3283SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { 3284 SDValue Chain = Op.getOperand(0); 3285 SDValue Table = Op.getOperand(1); 3286 SDValue Index = Op.getOperand(2); 3287 DebugLoc dl = Op.getDebugLoc(); 3288 3289 EVT PTy = getPointerTy(); 3290 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); 3291 ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); 3292 SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); 3293 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); 3294 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI, UId); 3295 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, PTy)); 3296 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table); 3297 if (Subtarget->isThumb2()) { 3298 // Thumb2 uses a two-level jump. That is, it jumps into the jump table 3299 // which does another jump to the destination. This also makes it easier 3300 // to translate it to TBB / TBH later. 3301 // FIXME: This might not work if the function is extremely large. 3302 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain, 3303 Addr, Op.getOperand(2), JTI, UId); 3304 } 3305 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { 3306 Addr = DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr, 3307 MachinePointerInfo::getJumpTable(), 3308 false, false, false, 0); 3309 Chain = Addr.getValue(1); 3310 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table); 3311 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3312 } else { 3313 Addr = DAG.getLoad(PTy, dl, Chain, Addr, 3314 MachinePointerInfo::getJumpTable(), 3315 false, false, false, 0); 3316 Chain = Addr.getValue(1); 3317 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI, UId); 3318 } 3319} 3320 3321static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 3322 EVT VT = Op.getValueType(); 3323 DebugLoc dl = Op.getDebugLoc(); 3324 3325 if (Op.getValueType().getVectorElementType() == MVT::i32) { 3326 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32) 3327 return Op; 3328 return DAG.UnrollVectorOp(Op.getNode()); 3329 } 3330 3331 assert(Op.getOperand(0).getValueType() == MVT::v4f32 && 3332 "Invalid type for custom lowering!"); 3333 if (VT != MVT::v4i16) 3334 return DAG.UnrollVectorOp(Op.getNode()); 3335 3336 Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0)); 3337 return DAG.getNode(ISD::TRUNCATE, dl, VT, Op); 3338} 3339 3340static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 3341 EVT VT = Op.getValueType(); 3342 if (VT.isVector()) 3343 return LowerVectorFP_TO_INT(Op, DAG); 3344 3345 DebugLoc dl = Op.getDebugLoc(); 3346 unsigned Opc; 3347 3348 switch (Op.getOpcode()) { 3349 default: llvm_unreachable("Invalid opcode!"); 3350 case ISD::FP_TO_SINT: 3351 Opc = ARMISD::FTOSI; 3352 break; 3353 case ISD::FP_TO_UINT: 3354 Opc = ARMISD::FTOUI; 3355 break; 3356 } 3357 Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); 3358 return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); 3359} 3360 3361static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3362 EVT VT = Op.getValueType(); 3363 DebugLoc dl = Op.getDebugLoc(); 3364 3365 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) { 3366 if (VT.getVectorElementType() == MVT::f32) 3367 return Op; 3368 return DAG.UnrollVectorOp(Op.getNode()); 3369 } 3370 3371 assert(Op.getOperand(0).getValueType() == MVT::v4i16 && 3372 "Invalid type for custom lowering!"); 3373 if (VT != MVT::v4f32) 3374 return DAG.UnrollVectorOp(Op.getNode()); 3375 3376 unsigned CastOpc; 3377 unsigned Opc; 3378 switch (Op.getOpcode()) { 3379 default: llvm_unreachable("Invalid opcode!"); 3380 case ISD::SINT_TO_FP: 3381 CastOpc = ISD::SIGN_EXTEND; 3382 Opc = ISD::SINT_TO_FP; 3383 break; 3384 case ISD::UINT_TO_FP: 3385 CastOpc = ISD::ZERO_EXTEND; 3386 Opc = ISD::UINT_TO_FP; 3387 break; 3388 } 3389 3390 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0)); 3391 return DAG.getNode(Opc, dl, VT, Op); 3392} 3393 3394static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 3395 EVT VT = Op.getValueType(); 3396 if (VT.isVector()) 3397 return LowerVectorINT_TO_FP(Op, DAG); 3398 3399 DebugLoc dl = Op.getDebugLoc(); 3400 unsigned Opc; 3401 3402 switch (Op.getOpcode()) { 3403 default: llvm_unreachable("Invalid opcode!"); 3404 case ISD::SINT_TO_FP: 3405 Opc = ARMISD::SITOF; 3406 break; 3407 case ISD::UINT_TO_FP: 3408 Opc = ARMISD::UITOF; 3409 break; 3410 } 3411 3412 Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0)); 3413 return DAG.getNode(Opc, dl, VT, Op); 3414} 3415 3416SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { 3417 // Implement fcopysign with a fabs and a conditional fneg. 3418 SDValue Tmp0 = Op.getOperand(0); 3419 SDValue Tmp1 = Op.getOperand(1); 3420 DebugLoc dl = Op.getDebugLoc(); 3421 EVT VT = Op.getValueType(); 3422 EVT SrcVT = Tmp1.getValueType(); 3423 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST || 3424 Tmp0.getOpcode() == ARMISD::VMOVDRR; 3425 bool UseNEON = !InGPR && Subtarget->hasNEON(); 3426 3427 if (UseNEON) { 3428 // Use VBSL to copy the sign bit. 3429 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80); 3430 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32, 3431 DAG.getTargetConstant(EncodedVal, MVT::i32)); 3432 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64; 3433 if (VT == MVT::f64) 3434 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3435 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask), 3436 DAG.getConstant(32, MVT::i32)); 3437 else /*if (VT == MVT::f32)*/ 3438 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0); 3439 if (SrcVT == MVT::f32) { 3440 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1); 3441 if (VT == MVT::f64) 3442 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT, 3443 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1), 3444 DAG.getConstant(32, MVT::i32)); 3445 } else if (VT == MVT::f32) 3446 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64, 3447 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1), 3448 DAG.getConstant(32, MVT::i32)); 3449 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0); 3450 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1); 3451 3452 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff), 3453 MVT::i32); 3454 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes); 3455 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask, 3456 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes)); 3457 3458 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT, 3459 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask), 3460 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot)); 3461 if (VT == MVT::f32) { 3462 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res); 3463 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res, 3464 DAG.getConstant(0, MVT::i32)); 3465 } else { 3466 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res); 3467 } 3468 3469 return Res; 3470 } 3471 3472 // Bitcast operand 1 to i32. 3473 if (SrcVT == MVT::f64) 3474 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3475 &Tmp1, 1).getValue(1); 3476 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1); 3477 3478 // Or in the signbit with integer operations. 3479 SDValue Mask1 = DAG.getConstant(0x80000000, MVT::i32); 3480 SDValue Mask2 = DAG.getConstant(0x7fffffff, MVT::i32); 3481 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1); 3482 if (VT == MVT::f32) { 3483 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32, 3484 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2); 3485 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, 3486 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1)); 3487 } 3488 3489 // f64: Or the high part with signbit and then combine two parts. 3490 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), 3491 &Tmp0, 1); 3492 SDValue Lo = Tmp0.getValue(0); 3493 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2); 3494 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1); 3495 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi); 3496} 3497 3498SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ 3499 MachineFunction &MF = DAG.getMachineFunction(); 3500 MachineFrameInfo *MFI = MF.getFrameInfo(); 3501 MFI->setReturnAddressIsTaken(true); 3502 3503 EVT VT = Op.getValueType(); 3504 DebugLoc dl = Op.getDebugLoc(); 3505 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3506 if (Depth) { 3507 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 3508 SDValue Offset = DAG.getConstant(4, MVT::i32); 3509 return DAG.getLoad(VT, dl, DAG.getEntryNode(), 3510 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset), 3511 MachinePointerInfo(), false, false, false, 0); 3512 } 3513 3514 // Return LR, which contains the return address. Mark it an implicit live-in. 3515 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32)); 3516 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); 3517} 3518 3519SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { 3520 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); 3521 MFI->setFrameAddressIsTaken(true); 3522 3523 EVT VT = Op.getValueType(); 3524 DebugLoc dl = Op.getDebugLoc(); // FIXME probably not meaningful 3525 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3526 unsigned FrameReg = (Subtarget->isThumb() || Subtarget->isTargetDarwin()) 3527 ? ARM::R7 : ARM::R11; 3528 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); 3529 while (Depth--) 3530 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, 3531 MachinePointerInfo(), 3532 false, false, false, 0); 3533 return FrameAddr; 3534} 3535 3536/// Custom Expand long vector extensions, where size(DestVec) > 2*size(SrcVec), 3537/// and size(DestVec) > 128-bits. 3538/// This is achieved by doing the one extension from the SrcVec, splitting the 3539/// result, extending these parts, and then concatenating these into the 3540/// destination. 3541static SDValue ExpandVectorExtension(SDNode *N, SelectionDAG &DAG) { 3542 SDValue Op = N->getOperand(0); 3543 EVT SrcVT = Op.getValueType(); 3544 EVT DestVT = N->getValueType(0); 3545 3546 assert(DestVT.getSizeInBits() > 128 && 3547 "Custom sext/zext expansion needs >128-bit vector."); 3548 // If this is a normal length extension, use the default expansion. 3549 if (SrcVT.getSizeInBits()*4 != DestVT.getSizeInBits() && 3550 SrcVT.getSizeInBits()*8 != DestVT.getSizeInBits()) 3551 return SDValue(); 3552 3553 DebugLoc dl = N->getDebugLoc(); 3554 unsigned SrcEltSize = SrcVT.getVectorElementType().getSizeInBits(); 3555 unsigned DestEltSize = DestVT.getVectorElementType().getSizeInBits(); 3556 unsigned NumElts = SrcVT.getVectorNumElements(); 3557 LLVMContext &Ctx = *DAG.getContext(); 3558 SDValue Mid, SplitLo, SplitHi, ExtLo, ExtHi; 3559 3560 EVT MidVT = EVT::getVectorVT(Ctx, EVT::getIntegerVT(Ctx, SrcEltSize*2), 3561 NumElts); 3562 EVT SplitVT = EVT::getVectorVT(Ctx, EVT::getIntegerVT(Ctx, SrcEltSize*2), 3563 NumElts/2); 3564 EVT ExtVT = EVT::getVectorVT(Ctx, EVT::getIntegerVT(Ctx, DestEltSize), 3565 NumElts/2); 3566 3567 Mid = DAG.getNode(N->getOpcode(), dl, MidVT, Op); 3568 SplitLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SplitVT, Mid, 3569 DAG.getIntPtrConstant(0)); 3570 SplitHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SplitVT, Mid, 3571 DAG.getIntPtrConstant(NumElts/2)); 3572 ExtLo = DAG.getNode(N->getOpcode(), dl, ExtVT, SplitLo); 3573 ExtHi = DAG.getNode(N->getOpcode(), dl, ExtVT, SplitHi); 3574 return DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, ExtLo, ExtHi); 3575} 3576 3577/// ExpandBITCAST - If the target supports VFP, this function is called to 3578/// expand a bit convert where either the source or destination type is i64 to 3579/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 3580/// operand type is illegal (e.g., v2f32 for a target that doesn't support 3581/// vectors), since the legalizer won't know what to do with that. 3582static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) { 3583 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3584 DebugLoc dl = N->getDebugLoc(); 3585 SDValue Op = N->getOperand(0); 3586 3587 // This function is only supposed to be called for i64 types, either as the 3588 // source or destination of the bit convert. 3589 EVT SrcVT = Op.getValueType(); 3590 EVT DstVT = N->getValueType(0); 3591 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && 3592 "ExpandBITCAST called for non-i64 type"); 3593 3594 // Turn i64->f64 into VMOVDRR. 3595 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { 3596 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3597 DAG.getConstant(0, MVT::i32)); 3598 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, 3599 DAG.getConstant(1, MVT::i32)); 3600 return DAG.getNode(ISD::BITCAST, dl, DstVT, 3601 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); 3602 } 3603 3604 // Turn f64->i64 into VMOVRRD. 3605 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) { 3606 SDValue Cvt = DAG.getNode(ARMISD::VMOVRRD, dl, 3607 DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); 3608 // Merge the pieces into a single i64 value. 3609 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1)); 3610 } 3611 3612 return SDValue(); 3613} 3614 3615/// getZeroVector - Returns a vector of specified type with all zero elements. 3616/// Zero vectors are used to represent vector negation and in those cases 3617/// will be implemented with the NEON VNEG instruction. However, VNEG does 3618/// not support i64 elements, so sometimes the zero vectors will need to be 3619/// explicitly constructed. Regardless, use a canonical VMOV to create the 3620/// zero vector. 3621static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { 3622 assert(VT.isVector() && "Expected a vector type"); 3623 // The canonical modified immediate encoding of a zero vector is....0! 3624 SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); 3625 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 3626 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); 3627 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 3628} 3629 3630/// LowerShiftRightParts - Lower SRA_PARTS, which returns two 3631/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3632SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op, 3633 SelectionDAG &DAG) const { 3634 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3635 EVT VT = Op.getValueType(); 3636 unsigned VTBits = VT.getSizeInBits(); 3637 DebugLoc dl = Op.getDebugLoc(); 3638 SDValue ShOpLo = Op.getOperand(0); 3639 SDValue ShOpHi = Op.getOperand(1); 3640 SDValue ShAmt = Op.getOperand(2); 3641 SDValue ARMcc; 3642 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 3643 3644 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 3645 3646 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3647 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3648 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 3649 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3650 DAG.getConstant(VTBits, MVT::i32)); 3651 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 3652 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3653 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 3654 3655 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3656 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3657 ARMcc, DAG, dl); 3658 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 3659 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, 3660 CCR, Cmp); 3661 3662 SDValue Ops[2] = { Lo, Hi }; 3663 return DAG.getMergeValues(Ops, 2, dl); 3664} 3665 3666/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 3667/// i32 values and take a 2 x i32 value to shift plus a shift amount. 3668SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op, 3669 SelectionDAG &DAG) const { 3670 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 3671 EVT VT = Op.getValueType(); 3672 unsigned VTBits = VT.getSizeInBits(); 3673 DebugLoc dl = Op.getDebugLoc(); 3674 SDValue ShOpLo = Op.getOperand(0); 3675 SDValue ShOpHi = Op.getOperand(1); 3676 SDValue ShAmt = Op.getOperand(2); 3677 SDValue ARMcc; 3678 3679 assert(Op.getOpcode() == ISD::SHL_PARTS); 3680 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, 3681 DAG.getConstant(VTBits, MVT::i32), ShAmt); 3682 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 3683 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, 3684 DAG.getConstant(VTBits, MVT::i32)); 3685 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 3686 SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 3687 3688 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); 3689 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); 3690 SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, 3691 ARMcc, DAG, dl); 3692 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 3693 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMcc, 3694 CCR, Cmp); 3695 3696 SDValue Ops[2] = { Lo, Hi }; 3697 return DAG.getMergeValues(Ops, 2, dl); 3698} 3699 3700SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 3701 SelectionDAG &DAG) const { 3702 // The rounding mode is in bits 23:22 of the FPSCR. 3703 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 3704 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 3705 // so that the shift + and get folded into a bitfield extract. 3706 DebugLoc dl = Op.getDebugLoc(); 3707 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32, 3708 DAG.getConstant(Intrinsic::arm_get_fpscr, 3709 MVT::i32)); 3710 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR, 3711 DAG.getConstant(1U << 22, MVT::i32)); 3712 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 3713 DAG.getConstant(22, MVT::i32)); 3714 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 3715 DAG.getConstant(3, MVT::i32)); 3716} 3717 3718static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, 3719 const ARMSubtarget *ST) { 3720 EVT VT = N->getValueType(0); 3721 DebugLoc dl = N->getDebugLoc(); 3722 3723 if (!ST->hasV6T2Ops()) 3724 return SDValue(); 3725 3726 SDValue rbit = DAG.getNode(ARMISD::RBIT, dl, VT, N->getOperand(0)); 3727 return DAG.getNode(ISD::CTLZ, dl, VT, rbit); 3728} 3729 3730/// getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count 3731/// for each 16-bit element from operand, repeated. The basic idea is to 3732/// leverage vcnt to get the 8-bit counts, gather and add the results. 3733/// 3734/// Trace for v4i16: 3735/// input = [v0 v1 v2 v3 ] (vi 16-bit element) 3736/// cast: N0 = [w0 w1 w2 w3 w4 w5 w6 w7] (v0 = [w0 w1], wi 8-bit element) 3737/// vcnt: N1 = [b0 b1 b2 b3 b4 b5 b6 b7] (bi = bit-count of 8-bit element wi) 3738/// vrev: N2 = [b1 b0 b3 b2 b5 b4 b7 b6] 3739/// [b0 b1 b2 b3 b4 b5 b6 b7] 3740/// +[b1 b0 b3 b2 b5 b4 b7 b6] 3741/// N3=N1+N2 = [k0 k0 k1 k1 k2 k2 k3 k3] (k0 = b0+b1 = bit-count of 16-bit v0, 3742/// vuzp: = [k0 k1 k2 k3 k0 k1 k2 k3] each ki is 8-bits) 3743static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) { 3744 EVT VT = N->getValueType(0); 3745 DebugLoc DL = N->getDebugLoc(); 3746 3747 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8; 3748 SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0)); 3749 SDValue N1 = DAG.getNode(ISD::CTPOP, DL, VT8Bit, N0); 3750 SDValue N2 = DAG.getNode(ARMISD::VREV16, DL, VT8Bit, N1); 3751 SDValue N3 = DAG.getNode(ISD::ADD, DL, VT8Bit, N1, N2); 3752 return DAG.getNode(ARMISD::VUZP, DL, VT8Bit, N3, N3); 3753} 3754 3755/// lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the 3756/// bit-count for each 16-bit element from the operand. We need slightly 3757/// different sequencing for v4i16 and v8i16 to stay within NEON's available 3758/// 64/128-bit registers. 3759/// 3760/// Trace for v4i16: 3761/// input = [v0 v1 v2 v3 ] (vi 16-bit element) 3762/// v8i8: BitCounts = [k0 k1 k2 k3 k0 k1 k2 k3 ] (ki is the bit-count of vi) 3763/// v8i16:Extended = [k0 k1 k2 k3 k0 k1 k2 k3 ] 3764/// v4i16:Extracted = [k0 k1 k2 k3 ] 3765static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) { 3766 EVT VT = N->getValueType(0); 3767 DebugLoc DL = N->getDebugLoc(); 3768 3769 SDValue BitCounts = getCTPOP16BitCounts(N, DAG); 3770 if (VT.is64BitVector()) { 3771 SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, BitCounts); 3772 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Extended, 3773 DAG.getIntPtrConstant(0)); 3774 } else { 3775 SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8, 3776 BitCounts, DAG.getIntPtrConstant(0)); 3777 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, Extracted); 3778 } 3779} 3780 3781/// lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the 3782/// bit-count for each 32-bit element from the operand. The idea here is 3783/// to split the vector into 16-bit elements, leverage the 16-bit count 3784/// routine, and then combine the results. 3785/// 3786/// Trace for v2i32 (v4i32 similar with Extracted/Extended exchanged): 3787/// input = [v0 v1 ] (vi: 32-bit elements) 3788/// Bitcast = [w0 w1 w2 w3 ] (wi: 16-bit elements, v0 = [w0 w1]) 3789/// Counts16 = [k0 k1 k2 k3 ] (ki: 16-bit elements, bit-count of wi) 3790/// vrev: N0 = [k1 k0 k3 k2 ] 3791/// [k0 k1 k2 k3 ] 3792/// N1 =+[k1 k0 k3 k2 ] 3793/// [k0 k2 k1 k3 ] 3794/// N2 =+[k1 k3 k0 k2 ] 3795/// [k0 k2 k1 k3 ] 3796/// Extended =+[k1 k3 k0 k2 ] 3797/// [k0 k2 ] 3798/// Extracted=+[k1 k3 ] 3799/// 3800static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) { 3801 EVT VT = N->getValueType(0); 3802 DebugLoc DL = N->getDebugLoc(); 3803 3804 EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16; 3805 3806 SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, VT16Bit, N->getOperand(0)); 3807 SDValue Counts16 = lowerCTPOP16BitElements(Bitcast.getNode(), DAG); 3808 SDValue N0 = DAG.getNode(ARMISD::VREV32, DL, VT16Bit, Counts16); 3809 SDValue N1 = DAG.getNode(ISD::ADD, DL, VT16Bit, Counts16, N0); 3810 SDValue N2 = DAG.getNode(ARMISD::VUZP, DL, VT16Bit, N1, N1); 3811 3812 if (VT.is64BitVector()) { 3813 SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, N2); 3814 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Extended, 3815 DAG.getIntPtrConstant(0)); 3816 } else { 3817 SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, N2, 3818 DAG.getIntPtrConstant(0)); 3819 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, Extracted); 3820 } 3821} 3822 3823static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, 3824 const ARMSubtarget *ST) { 3825 EVT VT = N->getValueType(0); 3826 3827 assert(ST->hasNEON() && "Custom ctpop lowering requires NEON."); 3828 assert((VT == MVT::v2i32 || VT == MVT::v4i32 || 3829 VT == MVT::v4i16 || VT == MVT::v8i16) && 3830 "Unexpected type for custom ctpop lowering"); 3831 3832 if (VT.getVectorElementType() == MVT::i32) 3833 return lowerCTPOP32BitElements(N, DAG); 3834 else 3835 return lowerCTPOP16BitElements(N, DAG); 3836} 3837 3838static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, 3839 const ARMSubtarget *ST) { 3840 EVT VT = N->getValueType(0); 3841 DebugLoc dl = N->getDebugLoc(); 3842 3843 if (!VT.isVector()) 3844 return SDValue(); 3845 3846 // Lower vector shifts on NEON to use VSHL. 3847 assert(ST->hasNEON() && "unexpected vector shift"); 3848 3849 // Left shifts translate directly to the vshiftu intrinsic. 3850 if (N->getOpcode() == ISD::SHL) 3851 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3852 DAG.getConstant(Intrinsic::arm_neon_vshiftu, MVT::i32), 3853 N->getOperand(0), N->getOperand(1)); 3854 3855 assert((N->getOpcode() == ISD::SRA || 3856 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode"); 3857 3858 // NEON uses the same intrinsics for both left and right shifts. For 3859 // right shifts, the shift amounts are negative, so negate the vector of 3860 // shift amounts. 3861 EVT ShiftVT = N->getOperand(1).getValueType(); 3862 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT, 3863 getZeroVector(ShiftVT, DAG, dl), 3864 N->getOperand(1)); 3865 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ? 3866 Intrinsic::arm_neon_vshifts : 3867 Intrinsic::arm_neon_vshiftu); 3868 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, 3869 DAG.getConstant(vshiftInt, MVT::i32), 3870 N->getOperand(0), NegatedCount); 3871} 3872 3873static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, 3874 const ARMSubtarget *ST) { 3875 EVT VT = N->getValueType(0); 3876 DebugLoc dl = N->getDebugLoc(); 3877 3878 // We can get here for a node like i32 = ISD::SHL i32, i64 3879 if (VT != MVT::i64) 3880 return SDValue(); 3881 3882 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) && 3883 "Unknown shift to lower!"); 3884 3885 // We only lower SRA, SRL of 1 here, all others use generic lowering. 3886 if (!isa<ConstantSDNode>(N->getOperand(1)) || 3887 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1) 3888 return SDValue(); 3889 3890 // If we are in thumb mode, we don't have RRX. 3891 if (ST->isThumb1Only()) return SDValue(); 3892 3893 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. 3894 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3895 DAG.getConstant(0, MVT::i32)); 3896 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0), 3897 DAG.getConstant(1, MVT::i32)); 3898 3899 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and 3900 // captures the result into a carry flag. 3901 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG; 3902 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), &Hi, 1); 3903 3904 // The low part is an ARMISD::RRX operand, which shifts the carry in. 3905 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1)); 3906 3907 // Merge the pieces into a single i64 value. 3908 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 3909} 3910 3911static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) { 3912 SDValue TmpOp0, TmpOp1; 3913 bool Invert = false; 3914 bool Swap = false; 3915 unsigned Opc = 0; 3916 3917 SDValue Op0 = Op.getOperand(0); 3918 SDValue Op1 = Op.getOperand(1); 3919 SDValue CC = Op.getOperand(2); 3920 EVT VT = Op.getValueType(); 3921 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); 3922 DebugLoc dl = Op.getDebugLoc(); 3923 3924 if (Op.getOperand(1).getValueType().isFloatingPoint()) { 3925 switch (SetCCOpcode) { 3926 default: llvm_unreachable("Illegal FP comparison"); 3927 case ISD::SETUNE: 3928 case ISD::SETNE: Invert = true; // Fallthrough 3929 case ISD::SETOEQ: 3930 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3931 case ISD::SETOLT: 3932 case ISD::SETLT: Swap = true; // Fallthrough 3933 case ISD::SETOGT: 3934 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3935 case ISD::SETOLE: 3936 case ISD::SETLE: Swap = true; // Fallthrough 3937 case ISD::SETOGE: 3938 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3939 case ISD::SETUGE: Swap = true; // Fallthrough 3940 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break; 3941 case ISD::SETUGT: Swap = true; // Fallthrough 3942 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break; 3943 case ISD::SETUEQ: Invert = true; // Fallthrough 3944 case ISD::SETONE: 3945 // Expand this to (OLT | OGT). 3946 TmpOp0 = Op0; 3947 TmpOp1 = Op1; 3948 Opc = ISD::OR; 3949 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3950 Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1); 3951 break; 3952 case ISD::SETUO: Invert = true; // Fallthrough 3953 case ISD::SETO: 3954 // Expand this to (OLT | OGE). 3955 TmpOp0 = Op0; 3956 TmpOp1 = Op1; 3957 Opc = ISD::OR; 3958 Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0); 3959 Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1); 3960 break; 3961 } 3962 } else { 3963 // Integer comparisons. 3964 switch (SetCCOpcode) { 3965 default: llvm_unreachable("Illegal integer comparison"); 3966 case ISD::SETNE: Invert = true; 3967 case ISD::SETEQ: Opc = ARMISD::VCEQ; break; 3968 case ISD::SETLT: Swap = true; 3969 case ISD::SETGT: Opc = ARMISD::VCGT; break; 3970 case ISD::SETLE: Swap = true; 3971 case ISD::SETGE: Opc = ARMISD::VCGE; break; 3972 case ISD::SETULT: Swap = true; 3973 case ISD::SETUGT: Opc = ARMISD::VCGTU; break; 3974 case ISD::SETULE: Swap = true; 3975 case ISD::SETUGE: Opc = ARMISD::VCGEU; break; 3976 } 3977 3978 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero). 3979 if (Opc == ARMISD::VCEQ) { 3980 3981 SDValue AndOp; 3982 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 3983 AndOp = Op0; 3984 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) 3985 AndOp = Op1; 3986 3987 // Ignore bitconvert. 3988 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST) 3989 AndOp = AndOp.getOperand(0); 3990 3991 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { 3992 Opc = ARMISD::VTST; 3993 Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0)); 3994 Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1)); 3995 Invert = !Invert; 3996 } 3997 } 3998 } 3999 4000 if (Swap) 4001 std::swap(Op0, Op1); 4002 4003 // If one of the operands is a constant vector zero, attempt to fold the 4004 // comparison to a specialized compare-against-zero form. 4005 SDValue SingleOp; 4006 if (ISD::isBuildVectorAllZeros(Op1.getNode())) 4007 SingleOp = Op0; 4008 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) { 4009 if (Opc == ARMISD::VCGE) 4010 Opc = ARMISD::VCLEZ; 4011 else if (Opc == ARMISD::VCGT) 4012 Opc = ARMISD::VCLTZ; 4013 SingleOp = Op1; 4014 } 4015 4016 SDValue Result; 4017 if (SingleOp.getNode()) { 4018 switch (Opc) { 4019 case ARMISD::VCEQ: 4020 Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break; 4021 case ARMISD::VCGE: 4022 Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break; 4023 case ARMISD::VCLEZ: 4024 Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break; 4025 case ARMISD::VCGT: 4026 Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break; 4027 case ARMISD::VCLTZ: 4028 Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break; 4029 default: 4030 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 4031 } 4032 } else { 4033 Result = DAG.getNode(Opc, dl, VT, Op0, Op1); 4034 } 4035 4036 if (Invert) 4037 Result = DAG.getNOT(dl, Result, VT); 4038 4039 return Result; 4040} 4041 4042/// isNEONModifiedImm - Check if the specified splat value corresponds to a 4043/// valid vector constant for a NEON instruction with a "modified immediate" 4044/// operand (e.g., VMOV). If so, return the encoded value. 4045static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, 4046 unsigned SplatBitSize, SelectionDAG &DAG, 4047 EVT &VT, bool is128Bits, NEONModImmType type) { 4048 unsigned OpCmode, Imm; 4049 4050 // SplatBitSize is set to the smallest size that splats the vector, so a 4051 // zero vector will always have SplatBitSize == 8. However, NEON modified 4052 // immediate instructions others than VMOV do not support the 8-bit encoding 4053 // of a zero vector, and the default encoding of zero is supposed to be the 4054 // 32-bit version. 4055 if (SplatBits == 0) 4056 SplatBitSize = 32; 4057 4058 switch (SplatBitSize) { 4059 case 8: 4060 if (type != VMOVModImm) 4061 return SDValue(); 4062 // Any 1-byte value is OK. Op=0, Cmode=1110. 4063 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big"); 4064 OpCmode = 0xe; 4065 Imm = SplatBits; 4066 VT = is128Bits ? MVT::v16i8 : MVT::v8i8; 4067 break; 4068 4069 case 16: 4070 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero. 4071 VT = is128Bits ? MVT::v8i16 : MVT::v4i16; 4072 if ((SplatBits & ~0xff) == 0) { 4073 // Value = 0x00nn: Op=x, Cmode=100x. 4074 OpCmode = 0x8; 4075 Imm = SplatBits; 4076 break; 4077 } 4078 if ((SplatBits & ~0xff00) == 0) { 4079 // Value = 0xnn00: Op=x, Cmode=101x. 4080 OpCmode = 0xa; 4081 Imm = SplatBits >> 8; 4082 break; 4083 } 4084 return SDValue(); 4085 4086 case 32: 4087 // NEON's 32-bit VMOV supports splat values where: 4088 // * only one byte is nonzero, or 4089 // * the least significant byte is 0xff and the second byte is nonzero, or 4090 // * the least significant 2 bytes are 0xff and the third is nonzero. 4091 VT = is128Bits ? MVT::v4i32 : MVT::v2i32; 4092 if ((SplatBits & ~0xff) == 0) { 4093 // Value = 0x000000nn: Op=x, Cmode=000x. 4094 OpCmode = 0; 4095 Imm = SplatBits; 4096 break; 4097 } 4098 if ((SplatBits & ~0xff00) == 0) { 4099 // Value = 0x0000nn00: Op=x, Cmode=001x. 4100 OpCmode = 0x2; 4101 Imm = SplatBits >> 8; 4102 break; 4103 } 4104 if ((SplatBits & ~0xff0000) == 0) { 4105 // Value = 0x00nn0000: Op=x, Cmode=010x. 4106 OpCmode = 0x4; 4107 Imm = SplatBits >> 16; 4108 break; 4109 } 4110 if ((SplatBits & ~0xff000000) == 0) { 4111 // Value = 0xnn000000: Op=x, Cmode=011x. 4112 OpCmode = 0x6; 4113 Imm = SplatBits >> 24; 4114 break; 4115 } 4116 4117 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC 4118 if (type == OtherModImm) return SDValue(); 4119 4120 if ((SplatBits & ~0xffff) == 0 && 4121 ((SplatBits | SplatUndef) & 0xff) == 0xff) { 4122 // Value = 0x0000nnff: Op=x, Cmode=1100. 4123 OpCmode = 0xc; 4124 Imm = SplatBits >> 8; 4125 SplatBits |= 0xff; 4126 break; 4127 } 4128 4129 if ((SplatBits & ~0xffffff) == 0 && 4130 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) { 4131 // Value = 0x00nnffff: Op=x, Cmode=1101. 4132 OpCmode = 0xd; 4133 Imm = SplatBits >> 16; 4134 SplatBits |= 0xffff; 4135 break; 4136 } 4137 4138 // Note: there are a few 32-bit splat values (specifically: 00ffff00, 4139 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not 4140 // VMOV.I32. A (very) minor optimization would be to replicate the value 4141 // and fall through here to test for a valid 64-bit splat. But, then the 4142 // caller would also need to check and handle the change in size. 4143 return SDValue(); 4144 4145 case 64: { 4146 if (type != VMOVModImm) 4147 return SDValue(); 4148 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff. 4149 uint64_t BitMask = 0xff; 4150 uint64_t Val = 0; 4151 unsigned ImmMask = 1; 4152 Imm = 0; 4153 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) { 4154 if (((SplatBits | SplatUndef) & BitMask) == BitMask) { 4155 Val |= BitMask; 4156 Imm |= ImmMask; 4157 } else if ((SplatBits & BitMask) != 0) { 4158 return SDValue(); 4159 } 4160 BitMask <<= 8; 4161 ImmMask <<= 1; 4162 } 4163 // Op=1, Cmode=1110. 4164 OpCmode = 0x1e; 4165 SplatBits = Val; 4166 VT = is128Bits ? MVT::v2i64 : MVT::v1i64; 4167 break; 4168 } 4169 4170 default: 4171 llvm_unreachable("unexpected size for isNEONModifiedImm"); 4172 } 4173 4174 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm); 4175 return DAG.getTargetConstant(EncodedVal, MVT::i32); 4176} 4177 4178SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, 4179 const ARMSubtarget *ST) const { 4180 if (!ST->useNEONForSinglePrecisionFP() || !ST->hasVFP3() || ST->hasD16()) 4181 return SDValue(); 4182 4183 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op); 4184 assert(Op.getValueType() == MVT::f32 && 4185 "ConstantFP custom lowering should only occur for f32."); 4186 4187 // Try splatting with a VMOV.f32... 4188 APFloat FPVal = CFP->getValueAPF(); 4189 int ImmVal = ARM_AM::getFP32Imm(FPVal); 4190 if (ImmVal != -1) { 4191 DebugLoc DL = Op.getDebugLoc(); 4192 SDValue NewVal = DAG.getTargetConstant(ImmVal, MVT::i32); 4193 SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32, 4194 NewVal); 4195 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant, 4196 DAG.getConstant(0, MVT::i32)); 4197 } 4198 4199 // If that fails, try a VMOV.i32 4200 EVT VMovVT; 4201 unsigned iVal = FPVal.bitcastToAPInt().getZExtValue(); 4202 SDValue NewVal = isNEONModifiedImm(iVal, 0, 32, DAG, VMovVT, false, 4203 VMOVModImm); 4204 if (NewVal != SDValue()) { 4205 DebugLoc DL = Op.getDebugLoc(); 4206 SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT, 4207 NewVal); 4208 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 4209 VecConstant); 4210 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 4211 DAG.getConstant(0, MVT::i32)); 4212 } 4213 4214 // Finally, try a VMVN.i32 4215 NewVal = isNEONModifiedImm(~iVal & 0xffffffff, 0, 32, DAG, VMovVT, false, 4216 VMVNModImm); 4217 if (NewVal != SDValue()) { 4218 DebugLoc DL = Op.getDebugLoc(); 4219 SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal); 4220 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, 4221 VecConstant); 4222 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant, 4223 DAG.getConstant(0, MVT::i32)); 4224 } 4225 4226 return SDValue(); 4227} 4228 4229// check if an VEXT instruction can handle the shuffle mask when the 4230// vector sources of the shuffle are the same. 4231static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { 4232 unsigned NumElts = VT.getVectorNumElements(); 4233 4234 // Assume that the first shuffle index is not UNDEF. Fail if it is. 4235 if (M[0] < 0) 4236 return false; 4237 4238 Imm = M[0]; 4239 4240 // If this is a VEXT shuffle, the immediate value is the index of the first 4241 // element. The other shuffle indices must be the successive elements after 4242 // the first one. 4243 unsigned ExpectedElt = Imm; 4244 for (unsigned i = 1; i < NumElts; ++i) { 4245 // Increment the expected index. If it wraps around, just follow it 4246 // back to index zero and keep going. 4247 ++ExpectedElt; 4248 if (ExpectedElt == NumElts) 4249 ExpectedElt = 0; 4250 4251 if (M[i] < 0) continue; // ignore UNDEF indices 4252 if (ExpectedElt != static_cast<unsigned>(M[i])) 4253 return false; 4254 } 4255 4256 return true; 4257} 4258 4259 4260static bool isVEXTMask(ArrayRef<int> M, EVT VT, 4261 bool &ReverseVEXT, unsigned &Imm) { 4262 unsigned NumElts = VT.getVectorNumElements(); 4263 ReverseVEXT = false; 4264 4265 // Assume that the first shuffle index is not UNDEF. Fail if it is. 4266 if (M[0] < 0) 4267 return false; 4268 4269 Imm = M[0]; 4270 4271 // If this is a VEXT shuffle, the immediate value is the index of the first 4272 // element. The other shuffle indices must be the successive elements after 4273 // the first one. 4274 unsigned ExpectedElt = Imm; 4275 for (unsigned i = 1; i < NumElts; ++i) { 4276 // Increment the expected index. If it wraps around, it may still be 4277 // a VEXT but the source vectors must be swapped. 4278 ExpectedElt += 1; 4279 if (ExpectedElt == NumElts * 2) { 4280 ExpectedElt = 0; 4281 ReverseVEXT = true; 4282 } 4283 4284 if (M[i] < 0) continue; // ignore UNDEF indices 4285 if (ExpectedElt != static_cast<unsigned>(M[i])) 4286 return false; 4287 } 4288 4289 // Adjust the index value if the source operands will be swapped. 4290 if (ReverseVEXT) 4291 Imm -= NumElts; 4292 4293 return true; 4294} 4295 4296/// isVREVMask - Check if a vector shuffle corresponds to a VREV 4297/// instruction with the specified blocksize. (The order of the elements 4298/// within each block of the vector is reversed.) 4299static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { 4300 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) && 4301 "Only possible block sizes for VREV are: 16, 32, 64"); 4302 4303 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4304 if (EltSz == 64) 4305 return false; 4306 4307 unsigned NumElts = VT.getVectorNumElements(); 4308 unsigned BlockElts = M[0] + 1; 4309 // If the first shuffle index is UNDEF, be optimistic. 4310 if (M[0] < 0) 4311 BlockElts = BlockSize / EltSz; 4312 4313 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 4314 return false; 4315 4316 for (unsigned i = 0; i < NumElts; ++i) { 4317 if (M[i] < 0) continue; // ignore UNDEF indices 4318 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts)) 4319 return false; 4320 } 4321 4322 return true; 4323} 4324 4325static bool isVTBLMask(ArrayRef<int> M, EVT VT) { 4326 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of 4327 // range, then 0 is placed into the resulting vector. So pretty much any mask 4328 // of 8 elements can work here. 4329 return VT == MVT::v8i8 && M.size() == 8; 4330} 4331 4332static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 4333 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4334 if (EltSz == 64) 4335 return false; 4336 4337 unsigned NumElts = VT.getVectorNumElements(); 4338 WhichResult = (M[0] == 0 ? 0 : 1); 4339 for (unsigned i = 0; i < NumElts; i += 2) { 4340 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 4341 (M[i+1] >= 0 && (unsigned) M[i+1] != i + NumElts + WhichResult)) 4342 return false; 4343 } 4344 return true; 4345} 4346 4347/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of 4348/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 4349/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 4350static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 4351 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4352 if (EltSz == 64) 4353 return false; 4354 4355 unsigned NumElts = VT.getVectorNumElements(); 4356 WhichResult = (M[0] == 0 ? 0 : 1); 4357 for (unsigned i = 0; i < NumElts; i += 2) { 4358 if ((M[i] >= 0 && (unsigned) M[i] != i + WhichResult) || 4359 (M[i+1] >= 0 && (unsigned) M[i+1] != i + WhichResult)) 4360 return false; 4361 } 4362 return true; 4363} 4364 4365static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 4366 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4367 if (EltSz == 64) 4368 return false; 4369 4370 unsigned NumElts = VT.getVectorNumElements(); 4371 WhichResult = (M[0] == 0 ? 0 : 1); 4372 for (unsigned i = 0; i != NumElts; ++i) { 4373 if (M[i] < 0) continue; // ignore UNDEF indices 4374 if ((unsigned) M[i] != 2 * i + WhichResult) 4375 return false; 4376 } 4377 4378 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4379 if (VT.is64BitVector() && EltSz == 32) 4380 return false; 4381 4382 return true; 4383} 4384 4385/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of 4386/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 4387/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 4388static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 4389 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4390 if (EltSz == 64) 4391 return false; 4392 4393 unsigned Half = VT.getVectorNumElements() / 2; 4394 WhichResult = (M[0] == 0 ? 0 : 1); 4395 for (unsigned j = 0; j != 2; ++j) { 4396 unsigned Idx = WhichResult; 4397 for (unsigned i = 0; i != Half; ++i) { 4398 int MIdx = M[i + j * Half]; 4399 if (MIdx >= 0 && (unsigned) MIdx != Idx) 4400 return false; 4401 Idx += 2; 4402 } 4403 } 4404 4405 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4406 if (VT.is64BitVector() && EltSz == 32) 4407 return false; 4408 4409 return true; 4410} 4411 4412static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 4413 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4414 if (EltSz == 64) 4415 return false; 4416 4417 unsigned NumElts = VT.getVectorNumElements(); 4418 WhichResult = (M[0] == 0 ? 0 : 1); 4419 unsigned Idx = WhichResult * NumElts / 2; 4420 for (unsigned i = 0; i != NumElts; i += 2) { 4421 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 4422 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx + NumElts)) 4423 return false; 4424 Idx += 1; 4425 } 4426 4427 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4428 if (VT.is64BitVector() && EltSz == 32) 4429 return false; 4430 4431 return true; 4432} 4433 4434/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of 4435/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 4436/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 4437static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ 4438 unsigned EltSz = VT.getVectorElementType().getSizeInBits(); 4439 if (EltSz == 64) 4440 return false; 4441 4442 unsigned NumElts = VT.getVectorNumElements(); 4443 WhichResult = (M[0] == 0 ? 0 : 1); 4444 unsigned Idx = WhichResult * NumElts / 2; 4445 for (unsigned i = 0; i != NumElts; i += 2) { 4446 if ((M[i] >= 0 && (unsigned) M[i] != Idx) || 4447 (M[i+1] >= 0 && (unsigned) M[i+1] != Idx)) 4448 return false; 4449 Idx += 1; 4450 } 4451 4452 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32. 4453 if (VT.is64BitVector() && EltSz == 32) 4454 return false; 4455 4456 return true; 4457} 4458 4459/// \return true if this is a reverse operation on an vector. 4460static bool isReverseMask(ArrayRef<int> M, EVT VT) { 4461 unsigned NumElts = VT.getVectorNumElements(); 4462 // Make sure the mask has the right size. 4463 if (NumElts != M.size()) 4464 return false; 4465 4466 // Look for <15, ..., 3, -1, 1, 0>. 4467 for (unsigned i = 0; i != NumElts; ++i) 4468 if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i)) 4469 return false; 4470 4471 return true; 4472} 4473 4474// If N is an integer constant that can be moved into a register in one 4475// instruction, return an SDValue of such a constant (will become a MOV 4476// instruction). Otherwise return null. 4477static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, 4478 const ARMSubtarget *ST, DebugLoc dl) { 4479 uint64_t Val; 4480 if (!isa<ConstantSDNode>(N)) 4481 return SDValue(); 4482 Val = cast<ConstantSDNode>(N)->getZExtValue(); 4483 4484 if (ST->isThumb1Only()) { 4485 if (Val <= 255 || ~Val <= 255) 4486 return DAG.getConstant(Val, MVT::i32); 4487 } else { 4488 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1) 4489 return DAG.getConstant(Val, MVT::i32); 4490 } 4491 return SDValue(); 4492} 4493 4494// If this is a case we can't handle, return null and let the default 4495// expansion code take care of it. 4496SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 4497 const ARMSubtarget *ST) const { 4498 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 4499 DebugLoc dl = Op.getDebugLoc(); 4500 EVT VT = Op.getValueType(); 4501 4502 APInt SplatBits, SplatUndef; 4503 unsigned SplatBitSize; 4504 bool HasAnyUndefs; 4505 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 4506 if (SplatBitSize <= 64) { 4507 // Check if an immediate VMOV works. 4508 EVT VmovVT; 4509 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 4510 SplatUndef.getZExtValue(), SplatBitSize, 4511 DAG, VmovVT, VT.is128BitVector(), 4512 VMOVModImm); 4513 if (Val.getNode()) { 4514 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); 4515 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 4516 } 4517 4518 // Try an immediate VMVN. 4519 uint64_t NegatedImm = (~SplatBits).getZExtValue(); 4520 Val = isNEONModifiedImm(NegatedImm, 4521 SplatUndef.getZExtValue(), SplatBitSize, 4522 DAG, VmovVT, VT.is128BitVector(), 4523 VMVNModImm); 4524 if (Val.getNode()) { 4525 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); 4526 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov); 4527 } 4528 4529 // Use vmov.f32 to materialize other v2f32 and v4f32 splats. 4530 if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) { 4531 int ImmVal = ARM_AM::getFP32Imm(SplatBits); 4532 if (ImmVal != -1) { 4533 SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32); 4534 return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val); 4535 } 4536 } 4537 } 4538 } 4539 4540 // Scan through the operands to see if only one value is used. 4541 // 4542 // As an optimisation, even if more than one value is used it may be more 4543 // profitable to splat with one value then change some lanes. 4544 // 4545 // Heuristically we decide to do this if the vector has a "dominant" value, 4546 // defined as splatted to more than half of the lanes. 4547 unsigned NumElts = VT.getVectorNumElements(); 4548 bool isOnlyLowElement = true; 4549 bool usesOnlyOneValue = true; 4550 bool hasDominantValue = false; 4551 bool isConstant = true; 4552 4553 // Map of the number of times a particular SDValue appears in the 4554 // element list. 4555 DenseMap<SDValue, unsigned> ValueCounts; 4556 SDValue Value; 4557 for (unsigned i = 0; i < NumElts; ++i) { 4558 SDValue V = Op.getOperand(i); 4559 if (V.getOpcode() == ISD::UNDEF) 4560 continue; 4561 if (i > 0) 4562 isOnlyLowElement = false; 4563 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 4564 isConstant = false; 4565 4566 ValueCounts.insert(std::make_pair(V, 0)); 4567 unsigned &Count = ValueCounts[V]; 4568 4569 // Is this value dominant? (takes up more than half of the lanes) 4570 if (++Count > (NumElts / 2)) { 4571 hasDominantValue = true; 4572 Value = V; 4573 } 4574 } 4575 if (ValueCounts.size() != 1) 4576 usesOnlyOneValue = false; 4577 if (!Value.getNode() && ValueCounts.size() > 0) 4578 Value = ValueCounts.begin()->first; 4579 4580 if (ValueCounts.size() == 0) 4581 return DAG.getUNDEF(VT); 4582 4583 if (isOnlyLowElement) 4584 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 4585 4586 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4587 4588 // Use VDUP for non-constant splats. For f32 constant splats, reduce to 4589 // i32 and try again. 4590 if (hasDominantValue && EltSize <= 32) { 4591 if (!isConstant) { 4592 SDValue N; 4593 4594 // If we are VDUPing a value that comes directly from a vector, that will 4595 // cause an unnecessary move to and from a GPR, where instead we could 4596 // just use VDUPLANE. We can only do this if the lane being extracted 4597 // is at a constant index, as the VDUP from lane instructions only have 4598 // constant-index forms. 4599 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT && 4600 isa<ConstantSDNode>(Value->getOperand(1))) { 4601 // We need to create a new undef vector to use for the VDUPLANE if the 4602 // size of the vector from which we get the value is different than the 4603 // size of the vector that we need to create. We will insert the element 4604 // such that the register coalescer will remove unnecessary copies. 4605 if (VT != Value->getOperand(0).getValueType()) { 4606 ConstantSDNode *constIndex; 4607 constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)); 4608 assert(constIndex && "The index is not a constant!"); 4609 unsigned index = constIndex->getAPIntValue().getLimitedValue() % 4610 VT.getVectorNumElements(); 4611 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4612 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT), 4613 Value, DAG.getConstant(index, MVT::i32)), 4614 DAG.getConstant(index, MVT::i32)); 4615 } else 4616 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4617 Value->getOperand(0), Value->getOperand(1)); 4618 } else 4619 N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); 4620 4621 if (!usesOnlyOneValue) { 4622 // The dominant value was splatted as 'N', but we now have to insert 4623 // all differing elements. 4624 for (unsigned I = 0; I < NumElts; ++I) { 4625 if (Op.getOperand(I) == Value) 4626 continue; 4627 SmallVector<SDValue, 3> Ops; 4628 Ops.push_back(N); 4629 Ops.push_back(Op.getOperand(I)); 4630 Ops.push_back(DAG.getConstant(I, MVT::i32)); 4631 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, &Ops[0], 3); 4632 } 4633 } 4634 return N; 4635 } 4636 if (VT.getVectorElementType().isFloatingPoint()) { 4637 SmallVector<SDValue, 8> Ops; 4638 for (unsigned i = 0; i < NumElts; ++i) 4639 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32, 4640 Op.getOperand(i))); 4641 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); 4642 SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts); 4643 Val = LowerBUILD_VECTOR(Val, DAG, ST); 4644 if (Val.getNode()) 4645 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4646 } 4647 if (usesOnlyOneValue) { 4648 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); 4649 if (isConstant && Val.getNode()) 4650 return DAG.getNode(ARMISD::VDUP, dl, VT, Val); 4651 } 4652 } 4653 4654 // If all elements are constants and the case above didn't get hit, fall back 4655 // to the default expansion, which will generate a load from the constant 4656 // pool. 4657 if (isConstant) 4658 return SDValue(); 4659 4660 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 4661 if (NumElts >= 4) { 4662 SDValue shuffle = ReconstructShuffle(Op, DAG); 4663 if (shuffle != SDValue()) 4664 return shuffle; 4665 } 4666 4667 // Vectors with 32- or 64-bit elements can be built by directly assigning 4668 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands 4669 // will be legalized. 4670 if (EltSize >= 32) { 4671 // Do the expansion with floating-point types, since that is what the VFP 4672 // registers are defined to use, and since i64 is not legal. 4673 EVT EltVT = EVT::getFloatingPointVT(EltSize); 4674 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 4675 SmallVector<SDValue, 8> Ops; 4676 for (unsigned i = 0; i < NumElts; ++i) 4677 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i))); 4678 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 4679 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 4680 } 4681 4682 return SDValue(); 4683} 4684 4685// Gather data to see if the operation can be modelled as a 4686// shuffle in combination with VEXTs. 4687SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op, 4688 SelectionDAG &DAG) const { 4689 DebugLoc dl = Op.getDebugLoc(); 4690 EVT VT = Op.getValueType(); 4691 unsigned NumElts = VT.getVectorNumElements(); 4692 4693 SmallVector<SDValue, 2> SourceVecs; 4694 SmallVector<unsigned, 2> MinElts; 4695 SmallVector<unsigned, 2> MaxElts; 4696 4697 for (unsigned i = 0; i < NumElts; ++i) { 4698 SDValue V = Op.getOperand(i); 4699 if (V.getOpcode() == ISD::UNDEF) 4700 continue; 4701 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) { 4702 // A shuffle can only come from building a vector from various 4703 // elements of other vectors. 4704 return SDValue(); 4705 } else if (V.getOperand(0).getValueType().getVectorElementType() != 4706 VT.getVectorElementType()) { 4707 // This code doesn't know how to handle shuffles where the vector 4708 // element types do not match (this happens because type legalization 4709 // promotes the return type of EXTRACT_VECTOR_ELT). 4710 // FIXME: It might be appropriate to extend this code to handle 4711 // mismatched types. 4712 return SDValue(); 4713 } 4714 4715 // Record this extraction against the appropriate vector if possible... 4716 SDValue SourceVec = V.getOperand(0); 4717 // If the element number isn't a constant, we can't effectively 4718 // analyze what's going on. 4719 if (!isa<ConstantSDNode>(V.getOperand(1))) 4720 return SDValue(); 4721 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 4722 bool FoundSource = false; 4723 for (unsigned j = 0; j < SourceVecs.size(); ++j) { 4724 if (SourceVecs[j] == SourceVec) { 4725 if (MinElts[j] > EltNo) 4726 MinElts[j] = EltNo; 4727 if (MaxElts[j] < EltNo) 4728 MaxElts[j] = EltNo; 4729 FoundSource = true; 4730 break; 4731 } 4732 } 4733 4734 // Or record a new source if not... 4735 if (!FoundSource) { 4736 SourceVecs.push_back(SourceVec); 4737 MinElts.push_back(EltNo); 4738 MaxElts.push_back(EltNo); 4739 } 4740 } 4741 4742 // Currently only do something sane when at most two source vectors 4743 // involved. 4744 if (SourceVecs.size() > 2) 4745 return SDValue(); 4746 4747 SDValue ShuffleSrcs[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT) }; 4748 int VEXTOffsets[2] = {0, 0}; 4749 4750 // This loop extracts the usage patterns of the source vectors 4751 // and prepares appropriate SDValues for a shuffle if possible. 4752 for (unsigned i = 0; i < SourceVecs.size(); ++i) { 4753 if (SourceVecs[i].getValueType() == VT) { 4754 // No VEXT necessary 4755 ShuffleSrcs[i] = SourceVecs[i]; 4756 VEXTOffsets[i] = 0; 4757 continue; 4758 } else if (SourceVecs[i].getValueType().getVectorNumElements() < NumElts) { 4759 // It probably isn't worth padding out a smaller vector just to 4760 // break it down again in a shuffle. 4761 return SDValue(); 4762 } 4763 4764 // Since only 64-bit and 128-bit vectors are legal on ARM and 4765 // we've eliminated the other cases... 4766 assert(SourceVecs[i].getValueType().getVectorNumElements() == 2*NumElts && 4767 "unexpected vector sizes in ReconstructShuffle"); 4768 4769 if (MaxElts[i] - MinElts[i] >= NumElts) { 4770 // Span too large for a VEXT to cope 4771 return SDValue(); 4772 } 4773 4774 if (MinElts[i] >= NumElts) { 4775 // The extraction can just take the second half 4776 VEXTOffsets[i] = NumElts; 4777 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4778 SourceVecs[i], 4779 DAG.getIntPtrConstant(NumElts)); 4780 } else if (MaxElts[i] < NumElts) { 4781 // The extraction can just take the first half 4782 VEXTOffsets[i] = 0; 4783 ShuffleSrcs[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4784 SourceVecs[i], 4785 DAG.getIntPtrConstant(0)); 4786 } else { 4787 // An actual VEXT is needed 4788 VEXTOffsets[i] = MinElts[i]; 4789 SDValue VEXTSrc1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4790 SourceVecs[i], 4791 DAG.getIntPtrConstant(0)); 4792 SDValue VEXTSrc2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, 4793 SourceVecs[i], 4794 DAG.getIntPtrConstant(NumElts)); 4795 ShuffleSrcs[i] = DAG.getNode(ARMISD::VEXT, dl, VT, VEXTSrc1, VEXTSrc2, 4796 DAG.getConstant(VEXTOffsets[i], MVT::i32)); 4797 } 4798 } 4799 4800 SmallVector<int, 8> Mask; 4801 4802 for (unsigned i = 0; i < NumElts; ++i) { 4803 SDValue Entry = Op.getOperand(i); 4804 if (Entry.getOpcode() == ISD::UNDEF) { 4805 Mask.push_back(-1); 4806 continue; 4807 } 4808 4809 SDValue ExtractVec = Entry.getOperand(0); 4810 int ExtractElt = cast<ConstantSDNode>(Op.getOperand(i) 4811 .getOperand(1))->getSExtValue(); 4812 if (ExtractVec == SourceVecs[0]) { 4813 Mask.push_back(ExtractElt - VEXTOffsets[0]); 4814 } else { 4815 Mask.push_back(ExtractElt + NumElts - VEXTOffsets[1]); 4816 } 4817 } 4818 4819 // Final check before we try to produce nonsense... 4820 if (isShuffleMaskLegal(Mask, VT)) 4821 return DAG.getVectorShuffle(VT, dl, ShuffleSrcs[0], ShuffleSrcs[1], 4822 &Mask[0]); 4823 4824 return SDValue(); 4825} 4826 4827/// isShuffleMaskLegal - Targets can use this to indicate that they only 4828/// support *some* VECTOR_SHUFFLE operations, those with specific masks. 4829/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values 4830/// are assumed to be legal. 4831bool 4832ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M, 4833 EVT VT) const { 4834 if (VT.getVectorNumElements() == 4 && 4835 (VT.is128BitVector() || VT.is64BitVector())) { 4836 unsigned PFIndexes[4]; 4837 for (unsigned i = 0; i != 4; ++i) { 4838 if (M[i] < 0) 4839 PFIndexes[i] = 8; 4840 else 4841 PFIndexes[i] = M[i]; 4842 } 4843 4844 // Compute the index in the perfect shuffle table. 4845 unsigned PFTableIndex = 4846 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 4847 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 4848 unsigned Cost = (PFEntry >> 30); 4849 4850 if (Cost <= 4) 4851 return true; 4852 } 4853 4854 bool ReverseVEXT; 4855 unsigned Imm, WhichResult; 4856 4857 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 4858 return (EltSize >= 32 || 4859 ShuffleVectorSDNode::isSplatMask(&M[0], VT) || 4860 isVREVMask(M, VT, 64) || 4861 isVREVMask(M, VT, 32) || 4862 isVREVMask(M, VT, 16) || 4863 isVEXTMask(M, VT, ReverseVEXT, Imm) || 4864 isVTBLMask(M, VT) || 4865 isVTRNMask(M, VT, WhichResult) || 4866 isVUZPMask(M, VT, WhichResult) || 4867 isVZIPMask(M, VT, WhichResult) || 4868 isVTRN_v_undef_Mask(M, VT, WhichResult) || 4869 isVUZP_v_undef_Mask(M, VT, WhichResult) || 4870 isVZIP_v_undef_Mask(M, VT, WhichResult) || 4871 ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(M, VT))); 4872} 4873 4874/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 4875/// the specified operations to build the shuffle. 4876static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 4877 SDValue RHS, SelectionDAG &DAG, 4878 DebugLoc dl) { 4879 unsigned OpNum = (PFEntry >> 26) & 0x0F; 4880 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 4881 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 4882 4883 enum { 4884 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 4885 OP_VREV, 4886 OP_VDUP0, 4887 OP_VDUP1, 4888 OP_VDUP2, 4889 OP_VDUP3, 4890 OP_VEXT1, 4891 OP_VEXT2, 4892 OP_VEXT3, 4893 OP_VUZPL, // VUZP, left result 4894 OP_VUZPR, // VUZP, right result 4895 OP_VZIPL, // VZIP, left result 4896 OP_VZIPR, // VZIP, right result 4897 OP_VTRNL, // VTRN, left result 4898 OP_VTRNR // VTRN, right result 4899 }; 4900 4901 if (OpNum == OP_COPY) { 4902 if (LHSID == (1*9+2)*9+3) return LHS; 4903 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 4904 return RHS; 4905 } 4906 4907 SDValue OpLHS, OpRHS; 4908 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 4909 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 4910 EVT VT = OpLHS.getValueType(); 4911 4912 switch (OpNum) { 4913 default: llvm_unreachable("Unknown shuffle opcode!"); 4914 case OP_VREV: 4915 // VREV divides the vector in half and swaps within the half. 4916 if (VT.getVectorElementType() == MVT::i32 || 4917 VT.getVectorElementType() == MVT::f32) 4918 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS); 4919 // vrev <4 x i16> -> VREV32 4920 if (VT.getVectorElementType() == MVT::i16) 4921 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS); 4922 // vrev <4 x i8> -> VREV16 4923 assert(VT.getVectorElementType() == MVT::i8); 4924 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS); 4925 case OP_VDUP0: 4926 case OP_VDUP1: 4927 case OP_VDUP2: 4928 case OP_VDUP3: 4929 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, 4930 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, MVT::i32)); 4931 case OP_VEXT1: 4932 case OP_VEXT2: 4933 case OP_VEXT3: 4934 return DAG.getNode(ARMISD::VEXT, dl, VT, 4935 OpLHS, OpRHS, 4936 DAG.getConstant(OpNum-OP_VEXT1+1, MVT::i32)); 4937 case OP_VUZPL: 4938 case OP_VUZPR: 4939 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 4940 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL); 4941 case OP_VZIPL: 4942 case OP_VZIPR: 4943 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 4944 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL); 4945 case OP_VTRNL: 4946 case OP_VTRNR: 4947 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 4948 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL); 4949 } 4950} 4951 4952static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, 4953 ArrayRef<int> ShuffleMask, 4954 SelectionDAG &DAG) { 4955 // Check to see if we can use the VTBL instruction. 4956 SDValue V1 = Op.getOperand(0); 4957 SDValue V2 = Op.getOperand(1); 4958 DebugLoc DL = Op.getDebugLoc(); 4959 4960 SmallVector<SDValue, 8> VTBLMask; 4961 for (ArrayRef<int>::iterator 4962 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I) 4963 VTBLMask.push_back(DAG.getConstant(*I, MVT::i32)); 4964 4965 if (V2.getNode()->getOpcode() == ISD::UNDEF) 4966 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1, 4967 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4968 &VTBLMask[0], 8)); 4969 4970 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2, 4971 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i8, 4972 &VTBLMask[0], 8)); 4973} 4974 4975static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, 4976 SelectionDAG &DAG) { 4977 DebugLoc DL = Op.getDebugLoc(); 4978 SDValue OpLHS = Op.getOperand(0); 4979 EVT VT = OpLHS.getValueType(); 4980 4981 assert((VT == MVT::v8i16 || VT == MVT::v16i8) && 4982 "Expect an v8i16/v16i8 type"); 4983 OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS); 4984 // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now, 4985 // extract the first 8 bytes into the top double word and the last 8 bytes 4986 // into the bottom double word. The v8i16 case is similar. 4987 unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4; 4988 return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS, 4989 DAG.getConstant(ExtractNum, MVT::i32)); 4990} 4991 4992static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { 4993 SDValue V1 = Op.getOperand(0); 4994 SDValue V2 = Op.getOperand(1); 4995 DebugLoc dl = Op.getDebugLoc(); 4996 EVT VT = Op.getValueType(); 4997 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 4998 4999 // Convert shuffles that are directly supported on NEON to target-specific 5000 // DAG nodes, instead of keeping them as shuffles and matching them again 5001 // during code selection. This is more efficient and avoids the possibility 5002 // of inconsistencies between legalization and selection. 5003 // FIXME: floating-point vectors should be canonicalized to integer vectors 5004 // of the same time so that they get CSEd properly. 5005 ArrayRef<int> ShuffleMask = SVN->getMask(); 5006 5007 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 5008 if (EltSize <= 32) { 5009 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) { 5010 int Lane = SVN->getSplatIndex(); 5011 // If this is undef splat, generate it via "just" vdup, if possible. 5012 if (Lane == -1) Lane = 0; 5013 5014 // Test if V1 is a SCALAR_TO_VECTOR. 5015 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) { 5016 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 5017 } 5018 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR 5019 // (and probably will turn into a SCALAR_TO_VECTOR once legalization 5020 // reaches it). 5021 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR && 5022 !isa<ConstantSDNode>(V1.getOperand(0))) { 5023 bool IsScalarToVector = true; 5024 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i) 5025 if (V1.getOperand(i).getOpcode() != ISD::UNDEF) { 5026 IsScalarToVector = false; 5027 break; 5028 } 5029 if (IsScalarToVector) 5030 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0)); 5031 } 5032 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1, 5033 DAG.getConstant(Lane, MVT::i32)); 5034 } 5035 5036 bool ReverseVEXT; 5037 unsigned Imm; 5038 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) { 5039 if (ReverseVEXT) 5040 std::swap(V1, V2); 5041 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2, 5042 DAG.getConstant(Imm, MVT::i32)); 5043 } 5044 5045 if (isVREVMask(ShuffleMask, VT, 64)) 5046 return DAG.getNode(ARMISD::VREV64, dl, VT, V1); 5047 if (isVREVMask(ShuffleMask, VT, 32)) 5048 return DAG.getNode(ARMISD::VREV32, dl, VT, V1); 5049 if (isVREVMask(ShuffleMask, VT, 16)) 5050 return DAG.getNode(ARMISD::VREV16, dl, VT, V1); 5051 5052 if (V2->getOpcode() == ISD::UNDEF && 5053 isSingletonVEXTMask(ShuffleMask, VT, Imm)) { 5054 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1, 5055 DAG.getConstant(Imm, MVT::i32)); 5056 } 5057 5058 // Check for Neon shuffles that modify both input vectors in place. 5059 // If both results are used, i.e., if there are two shuffles with the same 5060 // source operands and with masks corresponding to both results of one of 5061 // these operations, DAG memoization will ensure that a single node is 5062 // used for both shuffles. 5063 unsigned WhichResult; 5064 if (isVTRNMask(ShuffleMask, VT, WhichResult)) 5065 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 5066 V1, V2).getValue(WhichResult); 5067 if (isVUZPMask(ShuffleMask, VT, WhichResult)) 5068 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 5069 V1, V2).getValue(WhichResult); 5070 if (isVZIPMask(ShuffleMask, VT, WhichResult)) 5071 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 5072 V1, V2).getValue(WhichResult); 5073 5074 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5075 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT), 5076 V1, V1).getValue(WhichResult); 5077 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5078 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT), 5079 V1, V1).getValue(WhichResult); 5080 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) 5081 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT), 5082 V1, V1).getValue(WhichResult); 5083 } 5084 5085 // If the shuffle is not directly supported and it has 4 elements, use 5086 // the PerfectShuffle-generated table to synthesize it from other shuffles. 5087 unsigned NumElts = VT.getVectorNumElements(); 5088 if (NumElts == 4) { 5089 unsigned PFIndexes[4]; 5090 for (unsigned i = 0; i != 4; ++i) { 5091 if (ShuffleMask[i] < 0) 5092 PFIndexes[i] = 8; 5093 else 5094 PFIndexes[i] = ShuffleMask[i]; 5095 } 5096 5097 // Compute the index in the perfect shuffle table. 5098 unsigned PFTableIndex = 5099 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 5100 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 5101 unsigned Cost = (PFEntry >> 30); 5102 5103 if (Cost <= 4) 5104 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 5105 } 5106 5107 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs. 5108 if (EltSize >= 32) { 5109 // Do the expansion with floating-point types, since that is what the VFP 5110 // registers are defined to use, and since i64 is not legal. 5111 EVT EltVT = EVT::getFloatingPointVT(EltSize); 5112 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); 5113 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1); 5114 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2); 5115 SmallVector<SDValue, 8> Ops; 5116 for (unsigned i = 0; i < NumElts; ++i) { 5117 if (ShuffleMask[i] < 0) 5118 Ops.push_back(DAG.getUNDEF(EltVT)); 5119 else 5120 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, 5121 ShuffleMask[i] < (int)NumElts ? V1 : V2, 5122 DAG.getConstant(ShuffleMask[i] & (NumElts-1), 5123 MVT::i32))); 5124 } 5125 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); 5126 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 5127 } 5128 5129 if ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT)) 5130 return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG); 5131 5132 if (VT == MVT::v8i8) { 5133 SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG); 5134 if (NewOp.getNode()) 5135 return NewOp; 5136 } 5137 5138 return SDValue(); 5139} 5140 5141static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 5142 // INSERT_VECTOR_ELT is legal only for immediate indexes. 5143 SDValue Lane = Op.getOperand(2); 5144 if (!isa<ConstantSDNode>(Lane)) 5145 return SDValue(); 5146 5147 return Op; 5148} 5149 5150static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { 5151 // EXTRACT_VECTOR_ELT is legal only for immediate indexes. 5152 SDValue Lane = Op.getOperand(1); 5153 if (!isa<ConstantSDNode>(Lane)) 5154 return SDValue(); 5155 5156 SDValue Vec = Op.getOperand(0); 5157 if (Op.getValueType() == MVT::i32 && 5158 Vec.getValueType().getVectorElementType().getSizeInBits() < 32) { 5159 DebugLoc dl = Op.getDebugLoc(); 5160 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane); 5161 } 5162 5163 return Op; 5164} 5165 5166static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { 5167 // The only time a CONCAT_VECTORS operation can have legal types is when 5168 // two 64-bit vectors are concatenated to a 128-bit vector. 5169 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 && 5170 "unexpected CONCAT_VECTORS"); 5171 DebugLoc dl = Op.getDebugLoc(); 5172 SDValue Val = DAG.getUNDEF(MVT::v2f64); 5173 SDValue Op0 = Op.getOperand(0); 5174 SDValue Op1 = Op.getOperand(1); 5175 if (Op0.getOpcode() != ISD::UNDEF) 5176 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 5177 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0), 5178 DAG.getIntPtrConstant(0)); 5179 if (Op1.getOpcode() != ISD::UNDEF) 5180 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, 5181 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1), 5182 DAG.getIntPtrConstant(1)); 5183 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val); 5184} 5185 5186/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each 5187/// element has been zero/sign-extended, depending on the isSigned parameter, 5188/// from an integer type half its size. 5189static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 5190 bool isSigned) { 5191 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32. 5192 EVT VT = N->getValueType(0); 5193 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) { 5194 SDNode *BVN = N->getOperand(0).getNode(); 5195 if (BVN->getValueType(0) != MVT::v4i32 || 5196 BVN->getOpcode() != ISD::BUILD_VECTOR) 5197 return false; 5198 unsigned LoElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 5199 unsigned HiElt = 1 - LoElt; 5200 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt)); 5201 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt)); 5202 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2)); 5203 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2)); 5204 if (!Lo0 || !Hi0 || !Lo1 || !Hi1) 5205 return false; 5206 if (isSigned) { 5207 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 && 5208 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32) 5209 return true; 5210 } else { 5211 if (Hi0->isNullValue() && Hi1->isNullValue()) 5212 return true; 5213 } 5214 return false; 5215 } 5216 5217 if (N->getOpcode() != ISD::BUILD_VECTOR) 5218 return false; 5219 5220 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 5221 SDNode *Elt = N->getOperand(i).getNode(); 5222 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 5223 unsigned EltSize = VT.getVectorElementType().getSizeInBits(); 5224 unsigned HalfSize = EltSize / 2; 5225 if (isSigned) { 5226 if (!isIntN(HalfSize, C->getSExtValue())) 5227 return false; 5228 } else { 5229 if (!isUIntN(HalfSize, C->getZExtValue())) 5230 return false; 5231 } 5232 continue; 5233 } 5234 return false; 5235 } 5236 5237 return true; 5238} 5239 5240/// isSignExtended - Check if a node is a vector value that is sign-extended 5241/// or a constant BUILD_VECTOR with sign-extended elements. 5242static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 5243 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N)) 5244 return true; 5245 if (isExtendedBUILD_VECTOR(N, DAG, true)) 5246 return true; 5247 return false; 5248} 5249 5250/// isZeroExtended - Check if a node is a vector value that is zero-extended 5251/// or a constant BUILD_VECTOR with zero-extended elements. 5252static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 5253 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N)) 5254 return true; 5255 if (isExtendedBUILD_VECTOR(N, DAG, false)) 5256 return true; 5257 return false; 5258} 5259 5260static EVT getExtensionTo64Bits(const EVT &OrigVT) { 5261 if (OrigVT.getSizeInBits() >= 64) 5262 return OrigVT; 5263 5264 assert(OrigVT.isSimple() && "Expecting a simple value type"); 5265 5266 MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; 5267 switch (OrigSimpleTy) { 5268 default: llvm_unreachable("Unexpected Vector Type"); 5269 case MVT::v2i8: 5270 case MVT::v2i16: 5271 return MVT::v2i32; 5272 case MVT::v4i8: 5273 return MVT::v4i16; 5274 } 5275} 5276 5277/// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total 5278/// value size to 64 bits. We need a 64-bit D register as an operand to VMULL. 5279/// We insert the required extension here to get the vector to fill a D register. 5280static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, 5281 const EVT &OrigTy, 5282 const EVT &ExtTy, 5283 unsigned ExtOpcode) { 5284 // The vector originally had a size of OrigTy. It was then extended to ExtTy. 5285 // We expect the ExtTy to be 128-bits total. If the OrigTy is less than 5286 // 64-bits we need to insert a new extension so that it will be 64-bits. 5287 assert(ExtTy.is128BitVector() && "Unexpected extension size"); 5288 if (OrigTy.getSizeInBits() >= 64) 5289 return N; 5290 5291 // Must extend size to at least 64 bits to be used as an operand for VMULL. 5292 EVT NewVT = getExtensionTo64Bits(OrigTy); 5293 5294 return DAG.getNode(ExtOpcode, N->getDebugLoc(), NewVT, N); 5295} 5296 5297/// SkipLoadExtensionForVMULL - return a load of the original vector size that 5298/// does not do any sign/zero extension. If the original vector is less 5299/// than 64 bits, an appropriate extension will be added after the load to 5300/// reach a total size of 64 bits. We have to add the extension separately 5301/// because ARM does not have a sign/zero extending load for vectors. 5302static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { 5303 EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT()); 5304 5305 // The load already has the right type. 5306 if (ExtendedTy == LD->getMemoryVT()) 5307 return DAG.getLoad(LD->getMemoryVT(), LD->getDebugLoc(), LD->getChain(), 5308 LD->getBasePtr(), LD->getPointerInfo(), LD->isVolatile(), 5309 LD->isNonTemporal(), LD->isInvariant(), 5310 LD->getAlignment()); 5311 5312 // We need to create a zextload/sextload. We cannot just create a load 5313 // followed by a zext/zext node because LowerMUL is also run during normal 5314 // operation legalization where we can't create illegal types. 5315 return DAG.getExtLoad(LD->getExtensionType(), LD->getDebugLoc(), ExtendedTy, 5316 LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(), 5317 LD->getMemoryVT(), LD->isVolatile(), 5318 LD->isNonTemporal(), LD->getAlignment()); 5319} 5320 5321/// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, 5322/// extending load, or BUILD_VECTOR with extended elements, return the 5323/// unextended value. The unextended vector should be 64 bits so that it can 5324/// be used as an operand to a VMULL instruction. If the original vector size 5325/// before extension is less than 64 bits we add a an extension to resize 5326/// the vector to 64 bits. 5327static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { 5328 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 5329 return AddRequiredExtensionForVMULL(N->getOperand(0), DAG, 5330 N->getOperand(0)->getValueType(0), 5331 N->getValueType(0), 5332 N->getOpcode()); 5333 5334 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) 5335 return SkipLoadExtensionForVMULL(LD, DAG); 5336 5337 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will 5338 // have been legalized as a BITCAST from v4i32. 5339 if (N->getOpcode() == ISD::BITCAST) { 5340 SDNode *BVN = N->getOperand(0).getNode(); 5341 assert(BVN->getOpcode() == ISD::BUILD_VECTOR && 5342 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR"); 5343 unsigned LowElt = DAG.getTargetLoweringInfo().isBigEndian() ? 1 : 0; 5344 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), MVT::v2i32, 5345 BVN->getOperand(LowElt), BVN->getOperand(LowElt+2)); 5346 } 5347 // Construct a new BUILD_VECTOR with elements truncated to half the size. 5348 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 5349 EVT VT = N->getValueType(0); 5350 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2; 5351 unsigned NumElts = VT.getVectorNumElements(); 5352 MVT TruncVT = MVT::getIntegerVT(EltSize); 5353 SmallVector<SDValue, 8> Ops; 5354 for (unsigned i = 0; i != NumElts; ++i) { 5355 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 5356 const APInt &CInt = C->getAPIntValue(); 5357 // Element types smaller than 32 bits are not legal, so use i32 elements. 5358 // The values are implicitly truncated so sext vs. zext doesn't matter. 5359 Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), MVT::i32)); 5360 } 5361 return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), 5362 MVT::getVectorVT(TruncVT, NumElts), Ops.data(), NumElts); 5363} 5364 5365static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 5366 unsigned Opcode = N->getOpcode(); 5367 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 5368 SDNode *N0 = N->getOperand(0).getNode(); 5369 SDNode *N1 = N->getOperand(1).getNode(); 5370 return N0->hasOneUse() && N1->hasOneUse() && 5371 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 5372 } 5373 return false; 5374} 5375 5376static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 5377 unsigned Opcode = N->getOpcode(); 5378 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 5379 SDNode *N0 = N->getOperand(0).getNode(); 5380 SDNode *N1 = N->getOperand(1).getNode(); 5381 return N0->hasOneUse() && N1->hasOneUse() && 5382 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 5383 } 5384 return false; 5385} 5386 5387static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 5388 // Multiplications are only custom-lowered for 128-bit vectors so that 5389 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 5390 EVT VT = Op.getValueType(); 5391 assert(VT.is128BitVector() && VT.isInteger() && 5392 "unexpected type for custom-lowering ISD::MUL"); 5393 SDNode *N0 = Op.getOperand(0).getNode(); 5394 SDNode *N1 = Op.getOperand(1).getNode(); 5395 unsigned NewOpc = 0; 5396 bool isMLA = false; 5397 bool isN0SExt = isSignExtended(N0, DAG); 5398 bool isN1SExt = isSignExtended(N1, DAG); 5399 if (isN0SExt && isN1SExt) 5400 NewOpc = ARMISD::VMULLs; 5401 else { 5402 bool isN0ZExt = isZeroExtended(N0, DAG); 5403 bool isN1ZExt = isZeroExtended(N1, DAG); 5404 if (isN0ZExt && isN1ZExt) 5405 NewOpc = ARMISD::VMULLu; 5406 else if (isN1SExt || isN1ZExt) { 5407 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 5408 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 5409 if (isN1SExt && isAddSubSExt(N0, DAG)) { 5410 NewOpc = ARMISD::VMULLs; 5411 isMLA = true; 5412 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 5413 NewOpc = ARMISD::VMULLu; 5414 isMLA = true; 5415 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 5416 std::swap(N0, N1); 5417 NewOpc = ARMISD::VMULLu; 5418 isMLA = true; 5419 } 5420 } 5421 5422 if (!NewOpc) { 5423 if (VT == MVT::v2i64) 5424 // Fall through to expand this. It is not legal. 5425 return SDValue(); 5426 else 5427 // Other vector multiplications are legal. 5428 return Op; 5429 } 5430 } 5431 5432 // Legalize to a VMULL instruction. 5433 DebugLoc DL = Op.getDebugLoc(); 5434 SDValue Op0; 5435 SDValue Op1 = SkipExtensionForVMULL(N1, DAG); 5436 if (!isMLA) { 5437 Op0 = SkipExtensionForVMULL(N0, DAG); 5438 assert(Op0.getValueType().is64BitVector() && 5439 Op1.getValueType().is64BitVector() && 5440 "unexpected types for extended operands to VMULL"); 5441 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 5442 } 5443 5444 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during 5445 // isel lowering to take advantage of no-stall back to back vmul + vmla. 5446 // vmull q0, d4, d6 5447 // vmlal q0, d5, d6 5448 // is faster than 5449 // vaddl q0, d4, d5 5450 // vmovl q1, d6 5451 // vmul q0, q0, q1 5452 SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG); 5453 SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG); 5454 EVT Op1VT = Op1.getValueType(); 5455 return DAG.getNode(N0->getOpcode(), DL, VT, 5456 DAG.getNode(NewOpc, DL, VT, 5457 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 5458 DAG.getNode(NewOpc, DL, VT, 5459 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 5460} 5461 5462static SDValue 5463LowerSDIV_v4i8(SDValue X, SDValue Y, DebugLoc dl, SelectionDAG &DAG) { 5464 // Convert to float 5465 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo)); 5466 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo)); 5467 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X); 5468 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y); 5469 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X); 5470 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y); 5471 // Get reciprocal estimate. 5472 // float4 recip = vrecpeq_f32(yf); 5473 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5474 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), Y); 5475 // Because char has a smaller range than uchar, we can actually get away 5476 // without any newton steps. This requires that we use a weird bias 5477 // of 0xb000, however (again, this has been exhaustively tested). 5478 // float4 result = as_float4(as_int4(xf*recip) + 0xb000); 5479 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y); 5480 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X); 5481 Y = DAG.getConstant(0xb000, MVT::i32); 5482 Y = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Y, Y, Y, Y); 5483 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y); 5484 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X); 5485 // Convert back to short. 5486 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X); 5487 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X); 5488 return X; 5489} 5490 5491static SDValue 5492LowerSDIV_v4i16(SDValue N0, SDValue N1, DebugLoc dl, SelectionDAG &DAG) { 5493 SDValue N2; 5494 // Convert to float. 5495 // float4 yf = vcvt_f32_s32(vmovl_s16(y)); 5496 // float4 xf = vcvt_f32_s32(vmovl_s16(x)); 5497 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0); 5498 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1); 5499 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 5500 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 5501 5502 // Use reciprocal estimate and one refinement step. 5503 // float4 recip = vrecpeq_f32(yf); 5504 // recip *= vrecpsq_f32(yf, recip); 5505 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5506 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), N1); 5507 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5508 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 5509 N1, N2); 5510 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 5511 // Because short has a smaller range than ushort, we can actually get away 5512 // with only a single newton step. This requires that we use a weird bias 5513 // of 89, however (again, this has been exhaustively tested). 5514 // float4 result = as_float4(as_int4(xf*recip) + 0x89); 5515 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 5516 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 5517 N1 = DAG.getConstant(0x89, MVT::i32); 5518 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 5519 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 5520 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 5521 // Convert back to integer and return. 5522 // return vmovn_s32(vcvt_s32_f32(result)); 5523 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 5524 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 5525 return N0; 5526} 5527 5528static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) { 5529 EVT VT = Op.getValueType(); 5530 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 5531 "unexpected type for custom-lowering ISD::SDIV"); 5532 5533 DebugLoc dl = Op.getDebugLoc(); 5534 SDValue N0 = Op.getOperand(0); 5535 SDValue N1 = Op.getOperand(1); 5536 SDValue N2, N3; 5537 5538 if (VT == MVT::v8i8) { 5539 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0); 5540 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1); 5541 5542 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5543 DAG.getIntPtrConstant(4)); 5544 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5545 DAG.getIntPtrConstant(4)); 5546 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5547 DAG.getIntPtrConstant(0)); 5548 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5549 DAG.getIntPtrConstant(0)); 5550 5551 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16 5552 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16 5553 5554 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 5555 N0 = LowerCONCAT_VECTORS(N0, DAG); 5556 5557 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0); 5558 return N0; 5559 } 5560 return LowerSDIV_v4i16(N0, N1, dl, DAG); 5561} 5562 5563static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) { 5564 EVT VT = Op.getValueType(); 5565 assert((VT == MVT::v4i16 || VT == MVT::v8i8) && 5566 "unexpected type for custom-lowering ISD::UDIV"); 5567 5568 DebugLoc dl = Op.getDebugLoc(); 5569 SDValue N0 = Op.getOperand(0); 5570 SDValue N1 = Op.getOperand(1); 5571 SDValue N2, N3; 5572 5573 if (VT == MVT::v8i8) { 5574 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0); 5575 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1); 5576 5577 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5578 DAG.getIntPtrConstant(4)); 5579 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5580 DAG.getIntPtrConstant(4)); 5581 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0, 5582 DAG.getIntPtrConstant(0)); 5583 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1, 5584 DAG.getIntPtrConstant(0)); 5585 5586 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16 5587 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16 5588 5589 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2); 5590 N0 = LowerCONCAT_VECTORS(N0, DAG); 5591 5592 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8, 5593 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, MVT::i32), 5594 N0); 5595 return N0; 5596 } 5597 5598 // v4i16 sdiv ... Convert to float. 5599 // float4 yf = vcvt_f32_s32(vmovl_u16(y)); 5600 // float4 xf = vcvt_f32_s32(vmovl_u16(x)); 5601 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0); 5602 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1); 5603 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0); 5604 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1); 5605 5606 // Use reciprocal estimate and two refinement steps. 5607 // float4 recip = vrecpeq_f32(yf); 5608 // recip *= vrecpsq_f32(yf, recip); 5609 // recip *= vrecpsq_f32(yf, recip); 5610 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5611 DAG.getConstant(Intrinsic::arm_neon_vrecpe, MVT::i32), BN1); 5612 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5613 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 5614 BN1, N2); 5615 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 5616 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32, 5617 DAG.getConstant(Intrinsic::arm_neon_vrecps, MVT::i32), 5618 BN1, N2); 5619 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2); 5620 // Simply multiplying by the reciprocal estimate can leave us a few ulps 5621 // too low, so we add 2 ulps (exhaustive testing shows that this is enough, 5622 // and that it will never cause us to return an answer too large). 5623 // float4 result = as_float4(as_int4(xf*recip) + 2); 5624 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2); 5625 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0); 5626 N1 = DAG.getConstant(2, MVT::i32); 5627 N1 = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, N1, N1, N1, N1); 5628 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1); 5629 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0); 5630 // Convert back to integer and return. 5631 // return vmovn_u32(vcvt_s32_f32(result)); 5632 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0); 5633 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0); 5634 return N0; 5635} 5636 5637static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 5638 EVT VT = Op.getNode()->getValueType(0); 5639 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 5640 5641 unsigned Opc; 5642 bool ExtraOp = false; 5643 switch (Op.getOpcode()) { 5644 default: llvm_unreachable("Invalid code"); 5645 case ISD::ADDC: Opc = ARMISD::ADDC; break; 5646 case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break; 5647 case ISD::SUBC: Opc = ARMISD::SUBC; break; 5648 case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break; 5649 } 5650 5651 if (!ExtraOp) 5652 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 5653 Op.getOperand(1)); 5654 return DAG.getNode(Opc, Op->getDebugLoc(), VTs, Op.getOperand(0), 5655 Op.getOperand(1), Op.getOperand(2)); 5656} 5657 5658static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { 5659 // Monotonic load/store is legal for all targets 5660 if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) 5661 return Op; 5662 5663 // Aquire/Release load/store is not legal for targets without a 5664 // dmb or equivalent available. 5665 return SDValue(); 5666} 5667 5668 5669static void 5670ReplaceATOMIC_OP_64(SDNode *Node, SmallVectorImpl<SDValue>& Results, 5671 SelectionDAG &DAG, unsigned NewOp) { 5672 DebugLoc dl = Node->getDebugLoc(); 5673 assert (Node->getValueType(0) == MVT::i64 && 5674 "Only know how to expand i64 atomics"); 5675 5676 SmallVector<SDValue, 6> Ops; 5677 Ops.push_back(Node->getOperand(0)); // Chain 5678 Ops.push_back(Node->getOperand(1)); // Ptr 5679 // Low part of Val1 5680 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 5681 Node->getOperand(2), DAG.getIntPtrConstant(0))); 5682 // High part of Val1 5683 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 5684 Node->getOperand(2), DAG.getIntPtrConstant(1))); 5685 if (NewOp == ARMISD::ATOMCMPXCHG64_DAG) { 5686 // High part of Val1 5687 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 5688 Node->getOperand(3), DAG.getIntPtrConstant(0))); 5689 // High part of Val2 5690 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, 5691 Node->getOperand(3), DAG.getIntPtrConstant(1))); 5692 } 5693 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 5694 SDValue Result = 5695 DAG.getMemIntrinsicNode(NewOp, dl, Tys, Ops.data(), Ops.size(), MVT::i64, 5696 cast<MemSDNode>(Node)->getMemOperand()); 5697 SDValue OpsF[] = { Result.getValue(0), Result.getValue(1) }; 5698 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, OpsF, 2)); 5699 Results.push_back(Result.getValue(2)); 5700} 5701 5702SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 5703 switch (Op.getOpcode()) { 5704 default: llvm_unreachable("Don't know how to custom lower this!"); 5705 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 5706 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 5707 case ISD::GlobalAddress: 5708 return Subtarget->isTargetDarwin() ? LowerGlobalAddressDarwin(Op, DAG) : 5709 LowerGlobalAddressELF(Op, DAG); 5710 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 5711 case ISD::SELECT: return LowerSELECT(Op, DAG); 5712 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 5713 case ISD::BR_CC: return LowerBR_CC(Op, DAG); 5714 case ISD::BR_JT: return LowerBR_JT(Op, DAG); 5715 case ISD::VASTART: return LowerVASTART(Op, DAG); 5716 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget); 5717 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget); 5718 case ISD::SINT_TO_FP: 5719 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 5720 case ISD::FP_TO_SINT: 5721 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); 5722 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); 5723 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 5724 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 5725 case ISD::GLOBAL_OFFSET_TABLE: return LowerGLOBAL_OFFSET_TABLE(Op, DAG); 5726 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG); 5727 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG); 5728 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, 5729 Subtarget); 5730 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG); 5731 case ISD::SHL: 5732 case ISD::SRL: 5733 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); 5734 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG); 5735 case ISD::SRL_PARTS: 5736 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG); 5737 case ISD::CTTZ: return LowerCTTZ(Op.getNode(), DAG, Subtarget); 5738 case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget); 5739 case ISD::SETCC: return LowerVSETCC(Op, DAG); 5740 case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget); 5741 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget); 5742 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 5743 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 5744 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 5745 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 5746 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 5747 case ISD::MUL: return LowerMUL(Op, DAG); 5748 case ISD::SDIV: return LowerSDIV(Op, DAG); 5749 case ISD::UDIV: return LowerUDIV(Op, DAG); 5750 case ISD::ADDC: 5751 case ISD::ADDE: 5752 case ISD::SUBC: 5753 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 5754 case ISD::ATOMIC_LOAD: 5755 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG); 5756 } 5757} 5758 5759/// ReplaceNodeResults - Replace the results of node with an illegal result 5760/// type with new values built out of custom code. 5761void ARMTargetLowering::ReplaceNodeResults(SDNode *N, 5762 SmallVectorImpl<SDValue>&Results, 5763 SelectionDAG &DAG) const { 5764 SDValue Res; 5765 switch (N->getOpcode()) { 5766 default: 5767 llvm_unreachable("Don't know how to custom expand this!"); 5768 case ISD::BITCAST: 5769 Res = ExpandBITCAST(N, DAG); 5770 break; 5771 case ISD::SIGN_EXTEND: 5772 case ISD::ZERO_EXTEND: 5773 Res = ExpandVectorExtension(N, DAG); 5774 break; 5775 case ISD::SRL: 5776 case ISD::SRA: 5777 Res = Expand64BitShift(N, DAG, Subtarget); 5778 break; 5779 case ISD::ATOMIC_LOAD_ADD: 5780 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMADD64_DAG); 5781 return; 5782 case ISD::ATOMIC_LOAD_AND: 5783 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMAND64_DAG); 5784 return; 5785 case ISD::ATOMIC_LOAD_NAND: 5786 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMNAND64_DAG); 5787 return; 5788 case ISD::ATOMIC_LOAD_OR: 5789 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMOR64_DAG); 5790 return; 5791 case ISD::ATOMIC_LOAD_SUB: 5792 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSUB64_DAG); 5793 return; 5794 case ISD::ATOMIC_LOAD_XOR: 5795 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMXOR64_DAG); 5796 return; 5797 case ISD::ATOMIC_SWAP: 5798 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMSWAP64_DAG); 5799 return; 5800 case ISD::ATOMIC_CMP_SWAP: 5801 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMCMPXCHG64_DAG); 5802 return; 5803 case ISD::ATOMIC_LOAD_MIN: 5804 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMMIN64_DAG); 5805 return; 5806 case ISD::ATOMIC_LOAD_UMIN: 5807 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMUMIN64_DAG); 5808 return; 5809 case ISD::ATOMIC_LOAD_MAX: 5810 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMMAX64_DAG); 5811 return; 5812 case ISD::ATOMIC_LOAD_UMAX: 5813 ReplaceATOMIC_OP_64(N, Results, DAG, ARMISD::ATOMUMAX64_DAG); 5814 return; 5815 } 5816 if (Res.getNode()) 5817 Results.push_back(Res); 5818} 5819 5820//===----------------------------------------------------------------------===// 5821// ARM Scheduler Hooks 5822//===----------------------------------------------------------------------===// 5823 5824MachineBasicBlock * 5825ARMTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, 5826 MachineBasicBlock *BB, 5827 unsigned Size) const { 5828 unsigned dest = MI->getOperand(0).getReg(); 5829 unsigned ptr = MI->getOperand(1).getReg(); 5830 unsigned oldval = MI->getOperand(2).getReg(); 5831 unsigned newval = MI->getOperand(3).getReg(); 5832 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5833 DebugLoc dl = MI->getDebugLoc(); 5834 bool isThumb2 = Subtarget->isThumb2(); 5835 5836 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5837 unsigned scratch = MRI.createVirtualRegister(isThumb2 ? 5838 (const TargetRegisterClass*)&ARM::rGPRRegClass : 5839 (const TargetRegisterClass*)&ARM::GPRRegClass); 5840 5841 if (isThumb2) { 5842 MRI.constrainRegClass(dest, &ARM::rGPRRegClass); 5843 MRI.constrainRegClass(oldval, &ARM::rGPRRegClass); 5844 MRI.constrainRegClass(newval, &ARM::rGPRRegClass); 5845 } 5846 5847 unsigned ldrOpc, strOpc; 5848 switch (Size) { 5849 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5850 case 1: 5851 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5852 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5853 break; 5854 case 2: 5855 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5856 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5857 break; 5858 case 4: 5859 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5860 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5861 break; 5862 } 5863 5864 MachineFunction *MF = BB->getParent(); 5865 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5866 MachineFunction::iterator It = BB; 5867 ++It; // insert the new blocks after the current block 5868 5869 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); 5870 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); 5871 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5872 MF->insert(It, loop1MBB); 5873 MF->insert(It, loop2MBB); 5874 MF->insert(It, exitMBB); 5875 5876 // Transfer the remainder of BB and its successor edges to exitMBB. 5877 exitMBB->splice(exitMBB->begin(), BB, 5878 llvm::next(MachineBasicBlock::iterator(MI)), 5879 BB->end()); 5880 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5881 5882 // thisMBB: 5883 // ... 5884 // fallthrough --> loop1MBB 5885 BB->addSuccessor(loop1MBB); 5886 5887 // loop1MBB: 5888 // ldrex dest, [ptr] 5889 // cmp dest, oldval 5890 // bne exitMBB 5891 BB = loop1MBB; 5892 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5893 if (ldrOpc == ARM::t2LDREX) 5894 MIB.addImm(0); 5895 AddDefaultPred(MIB); 5896 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 5897 .addReg(dest).addReg(oldval)); 5898 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5899 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5900 BB->addSuccessor(loop2MBB); 5901 BB->addSuccessor(exitMBB); 5902 5903 // loop2MBB: 5904 // strex scratch, newval, [ptr] 5905 // cmp scratch, #0 5906 // bne loop1MBB 5907 BB = loop2MBB; 5908 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(newval).addReg(ptr); 5909 if (strOpc == ARM::t2STREX) 5910 MIB.addImm(0); 5911 AddDefaultPred(MIB); 5912 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 5913 .addReg(scratch).addImm(0)); 5914 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 5915 .addMBB(loop1MBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 5916 BB->addSuccessor(loop1MBB); 5917 BB->addSuccessor(exitMBB); 5918 5919 // exitMBB: 5920 // ... 5921 BB = exitMBB; 5922 5923 MI->eraseFromParent(); // The instruction is gone now. 5924 5925 return BB; 5926} 5927 5928MachineBasicBlock * 5929ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, 5930 unsigned Size, unsigned BinOpcode) const { 5931 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 5932 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 5933 5934 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 5935 MachineFunction *MF = BB->getParent(); 5936 MachineFunction::iterator It = BB; 5937 ++It; 5938 5939 unsigned dest = MI->getOperand(0).getReg(); 5940 unsigned ptr = MI->getOperand(1).getReg(); 5941 unsigned incr = MI->getOperand(2).getReg(); 5942 DebugLoc dl = MI->getDebugLoc(); 5943 bool isThumb2 = Subtarget->isThumb2(); 5944 5945 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 5946 if (isThumb2) { 5947 MRI.constrainRegClass(dest, &ARM::rGPRRegClass); 5948 MRI.constrainRegClass(ptr, &ARM::rGPRRegClass); 5949 } 5950 5951 unsigned ldrOpc, strOpc; 5952 switch (Size) { 5953 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 5954 case 1: 5955 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 5956 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 5957 break; 5958 case 2: 5959 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 5960 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 5961 break; 5962 case 4: 5963 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 5964 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 5965 break; 5966 } 5967 5968 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5969 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 5970 MF->insert(It, loopMBB); 5971 MF->insert(It, exitMBB); 5972 5973 // Transfer the remainder of BB and its successor edges to exitMBB. 5974 exitMBB->splice(exitMBB->begin(), BB, 5975 llvm::next(MachineBasicBlock::iterator(MI)), 5976 BB->end()); 5977 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 5978 5979 const TargetRegisterClass *TRC = isThumb2 ? 5980 (const TargetRegisterClass*)&ARM::rGPRRegClass : 5981 (const TargetRegisterClass*)&ARM::GPRRegClass; 5982 unsigned scratch = MRI.createVirtualRegister(TRC); 5983 unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); 5984 5985 // thisMBB: 5986 // ... 5987 // fallthrough --> loopMBB 5988 BB->addSuccessor(loopMBB); 5989 5990 // loopMBB: 5991 // ldrex dest, ptr 5992 // <binop> scratch2, dest, incr 5993 // strex scratch, scratch2, ptr 5994 // cmp scratch, #0 5995 // bne- loopMBB 5996 // fallthrough --> exitMBB 5997 BB = loopMBB; 5998 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 5999 if (ldrOpc == ARM::t2LDREX) 6000 MIB.addImm(0); 6001 AddDefaultPred(MIB); 6002 if (BinOpcode) { 6003 // operand order needs to go the other way for NAND 6004 if (BinOpcode == ARM::BICrr || BinOpcode == ARM::t2BICrr) 6005 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 6006 addReg(incr).addReg(dest)).addReg(0); 6007 else 6008 AddDefaultPred(BuildMI(BB, dl, TII->get(BinOpcode), scratch2). 6009 addReg(dest).addReg(incr)).addReg(0); 6010 } 6011 6012 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 6013 if (strOpc == ARM::t2STREX) 6014 MIB.addImm(0); 6015 AddDefaultPred(MIB); 6016 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6017 .addReg(scratch).addImm(0)); 6018 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6019 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 6020 6021 BB->addSuccessor(loopMBB); 6022 BB->addSuccessor(exitMBB); 6023 6024 // exitMBB: 6025 // ... 6026 BB = exitMBB; 6027 6028 MI->eraseFromParent(); // The instruction is gone now. 6029 6030 return BB; 6031} 6032 6033MachineBasicBlock * 6034ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, 6035 MachineBasicBlock *BB, 6036 unsigned Size, 6037 bool signExtend, 6038 ARMCC::CondCodes Cond) const { 6039 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6040 6041 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6042 MachineFunction *MF = BB->getParent(); 6043 MachineFunction::iterator It = BB; 6044 ++It; 6045 6046 unsigned dest = MI->getOperand(0).getReg(); 6047 unsigned ptr = MI->getOperand(1).getReg(); 6048 unsigned incr = MI->getOperand(2).getReg(); 6049 unsigned oldval = dest; 6050 DebugLoc dl = MI->getDebugLoc(); 6051 bool isThumb2 = Subtarget->isThumb2(); 6052 6053 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 6054 if (isThumb2) { 6055 MRI.constrainRegClass(dest, &ARM::rGPRRegClass); 6056 MRI.constrainRegClass(ptr, &ARM::rGPRRegClass); 6057 } 6058 6059 unsigned ldrOpc, strOpc, extendOpc; 6060 switch (Size) { 6061 default: llvm_unreachable("unsupported size for AtomicCmpSwap!"); 6062 case 1: 6063 ldrOpc = isThumb2 ? ARM::t2LDREXB : ARM::LDREXB; 6064 strOpc = isThumb2 ? ARM::t2STREXB : ARM::STREXB; 6065 extendOpc = isThumb2 ? ARM::t2SXTB : ARM::SXTB; 6066 break; 6067 case 2: 6068 ldrOpc = isThumb2 ? ARM::t2LDREXH : ARM::LDREXH; 6069 strOpc = isThumb2 ? ARM::t2STREXH : ARM::STREXH; 6070 extendOpc = isThumb2 ? ARM::t2SXTH : ARM::SXTH; 6071 break; 6072 case 4: 6073 ldrOpc = isThumb2 ? ARM::t2LDREX : ARM::LDREX; 6074 strOpc = isThumb2 ? ARM::t2STREX : ARM::STREX; 6075 extendOpc = 0; 6076 break; 6077 } 6078 6079 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 6080 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 6081 MF->insert(It, loopMBB); 6082 MF->insert(It, exitMBB); 6083 6084 // Transfer the remainder of BB and its successor edges to exitMBB. 6085 exitMBB->splice(exitMBB->begin(), BB, 6086 llvm::next(MachineBasicBlock::iterator(MI)), 6087 BB->end()); 6088 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6089 6090 const TargetRegisterClass *TRC = isThumb2 ? 6091 (const TargetRegisterClass*)&ARM::rGPRRegClass : 6092 (const TargetRegisterClass*)&ARM::GPRRegClass; 6093 unsigned scratch = MRI.createVirtualRegister(TRC); 6094 unsigned scratch2 = MRI.createVirtualRegister(TRC); 6095 6096 // thisMBB: 6097 // ... 6098 // fallthrough --> loopMBB 6099 BB->addSuccessor(loopMBB); 6100 6101 // loopMBB: 6102 // ldrex dest, ptr 6103 // (sign extend dest, if required) 6104 // cmp dest, incr 6105 // cmov.cond scratch2, incr, dest 6106 // strex scratch, scratch2, ptr 6107 // cmp scratch, #0 6108 // bne- loopMBB 6109 // fallthrough --> exitMBB 6110 BB = loopMBB; 6111 MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr); 6112 if (ldrOpc == ARM::t2LDREX) 6113 MIB.addImm(0); 6114 AddDefaultPred(MIB); 6115 6116 // Sign extend the value, if necessary. 6117 if (signExtend && extendOpc) { 6118 oldval = MRI.createVirtualRegister(&ARM::GPRRegClass); 6119 AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval) 6120 .addReg(dest) 6121 .addImm(0)); 6122 } 6123 6124 // Build compare and cmov instructions. 6125 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 6126 .addReg(oldval).addReg(incr)); 6127 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2) 6128 .addReg(incr).addReg(oldval).addImm(Cond).addReg(ARM::CPSR); 6129 6130 MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); 6131 if (strOpc == ARM::t2STREX) 6132 MIB.addImm(0); 6133 AddDefaultPred(MIB); 6134 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6135 .addReg(scratch).addImm(0)); 6136 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6137 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 6138 6139 BB->addSuccessor(loopMBB); 6140 BB->addSuccessor(exitMBB); 6141 6142 // exitMBB: 6143 // ... 6144 BB = exitMBB; 6145 6146 MI->eraseFromParent(); // The instruction is gone now. 6147 6148 return BB; 6149} 6150 6151MachineBasicBlock * 6152ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB, 6153 unsigned Op1, unsigned Op2, 6154 bool NeedsCarry, bool IsCmpxchg, 6155 bool IsMinMax, ARMCC::CondCodes CC) const { 6156 // This also handles ATOMIC_SWAP, indicated by Op1==0. 6157 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6158 6159 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6160 MachineFunction *MF = BB->getParent(); 6161 MachineFunction::iterator It = BB; 6162 ++It; 6163 6164 unsigned destlo = MI->getOperand(0).getReg(); 6165 unsigned desthi = MI->getOperand(1).getReg(); 6166 unsigned ptr = MI->getOperand(2).getReg(); 6167 unsigned vallo = MI->getOperand(3).getReg(); 6168 unsigned valhi = MI->getOperand(4).getReg(); 6169 DebugLoc dl = MI->getDebugLoc(); 6170 bool isThumb2 = Subtarget->isThumb2(); 6171 6172 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 6173 if (isThumb2) { 6174 MRI.constrainRegClass(destlo, &ARM::rGPRRegClass); 6175 MRI.constrainRegClass(desthi, &ARM::rGPRRegClass); 6176 MRI.constrainRegClass(ptr, &ARM::rGPRRegClass); 6177 } 6178 6179 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 6180 MachineBasicBlock *contBB = 0, *cont2BB = 0; 6181 if (IsCmpxchg || IsMinMax) 6182 contBB = MF->CreateMachineBasicBlock(LLVM_BB); 6183 if (IsCmpxchg) 6184 cont2BB = MF->CreateMachineBasicBlock(LLVM_BB); 6185 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 6186 6187 MF->insert(It, loopMBB); 6188 if (IsCmpxchg || IsMinMax) MF->insert(It, contBB); 6189 if (IsCmpxchg) MF->insert(It, cont2BB); 6190 MF->insert(It, exitMBB); 6191 6192 // Transfer the remainder of BB and its successor edges to exitMBB. 6193 exitMBB->splice(exitMBB->begin(), BB, 6194 llvm::next(MachineBasicBlock::iterator(MI)), 6195 BB->end()); 6196 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 6197 6198 const TargetRegisterClass *TRC = isThumb2 ? 6199 (const TargetRegisterClass*)&ARM::tGPRRegClass : 6200 (const TargetRegisterClass*)&ARM::GPRRegClass; 6201 unsigned storesuccess = MRI.createVirtualRegister(TRC); 6202 6203 // thisMBB: 6204 // ... 6205 // fallthrough --> loopMBB 6206 BB->addSuccessor(loopMBB); 6207 6208 // loopMBB: 6209 // ldrexd r2, r3, ptr 6210 // <binopa> r0, r2, incr 6211 // <binopb> r1, r3, incr 6212 // strexd storesuccess, r0, r1, ptr 6213 // cmp storesuccess, #0 6214 // bne- loopMBB 6215 // fallthrough --> exitMBB 6216 BB = loopMBB; 6217 6218 // Load 6219 if (isThumb2) { 6220 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2LDREXD)) 6221 .addReg(destlo, RegState::Define) 6222 .addReg(desthi, RegState::Define) 6223 .addReg(ptr)); 6224 } else { 6225 unsigned GPRPair0 = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 6226 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::LDREXD)) 6227 .addReg(GPRPair0, RegState::Define).addReg(ptr)); 6228 // Copy r2/r3 into dest. (This copy will normally be coalesced.) 6229 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), destlo) 6230 .addReg(GPRPair0, 0, ARM::gsub_0); 6231 BuildMI(BB, dl, TII->get(TargetOpcode::COPY), desthi) 6232 .addReg(GPRPair0, 0, ARM::gsub_1); 6233 } 6234 6235 unsigned StoreLo, StoreHi; 6236 if (IsCmpxchg) { 6237 // Add early exit 6238 for (unsigned i = 0; i < 2; i++) { 6239 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : 6240 ARM::CMPrr)) 6241 .addReg(i == 0 ? destlo : desthi) 6242 .addReg(i == 0 ? vallo : valhi)); 6243 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6244 .addMBB(exitMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 6245 BB->addSuccessor(exitMBB); 6246 BB->addSuccessor(i == 0 ? contBB : cont2BB); 6247 BB = (i == 0 ? contBB : cont2BB); 6248 } 6249 6250 // Copy to physregs for strexd 6251 StoreLo = MI->getOperand(5).getReg(); 6252 StoreHi = MI->getOperand(6).getReg(); 6253 } else if (Op1) { 6254 // Perform binary operation 6255 unsigned tmpRegLo = MRI.createVirtualRegister(TRC); 6256 AddDefaultPred(BuildMI(BB, dl, TII->get(Op1), tmpRegLo) 6257 .addReg(destlo).addReg(vallo)) 6258 .addReg(NeedsCarry ? ARM::CPSR : 0, getDefRegState(NeedsCarry)); 6259 unsigned tmpRegHi = MRI.createVirtualRegister(TRC); 6260 AddDefaultPred(BuildMI(BB, dl, TII->get(Op2), tmpRegHi) 6261 .addReg(desthi).addReg(valhi)) 6262 .addReg(IsMinMax ? ARM::CPSR : 0, getDefRegState(IsMinMax)); 6263 6264 StoreLo = tmpRegLo; 6265 StoreHi = tmpRegHi; 6266 } else { 6267 // Copy to physregs for strexd 6268 StoreLo = vallo; 6269 StoreHi = valhi; 6270 } 6271 if (IsMinMax) { 6272 // Compare and branch to exit block. 6273 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6274 .addMBB(exitMBB).addImm(CC).addReg(ARM::CPSR); 6275 BB->addSuccessor(exitMBB); 6276 BB->addSuccessor(contBB); 6277 BB = contBB; 6278 StoreLo = vallo; 6279 StoreHi = valhi; 6280 } 6281 6282 // Store 6283 if (isThumb2) { 6284 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2STREXD), storesuccess) 6285 .addReg(StoreLo).addReg(StoreHi).addReg(ptr)); 6286 } else { 6287 // Marshal a pair... 6288 unsigned StorePair = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 6289 unsigned UndefPair = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 6290 unsigned r1 = MRI.createVirtualRegister(&ARM::GPRPairRegClass); 6291 BuildMI(BB, dl, TII->get(TargetOpcode::IMPLICIT_DEF), UndefPair); 6292 BuildMI(BB, dl, TII->get(TargetOpcode::INSERT_SUBREG), r1) 6293 .addReg(UndefPair) 6294 .addReg(StoreLo) 6295 .addImm(ARM::gsub_0); 6296 BuildMI(BB, dl, TII->get(TargetOpcode::INSERT_SUBREG), StorePair) 6297 .addReg(r1) 6298 .addReg(StoreHi) 6299 .addImm(ARM::gsub_1); 6300 6301 // ...and store it 6302 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::STREXD), storesuccess) 6303 .addReg(StorePair).addReg(ptr)); 6304 } 6305 // Cmp+jump 6306 AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 6307 .addReg(storesuccess).addImm(0)); 6308 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 6309 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 6310 6311 BB->addSuccessor(loopMBB); 6312 BB->addSuccessor(exitMBB); 6313 6314 // exitMBB: 6315 // ... 6316 BB = exitMBB; 6317 6318 MI->eraseFromParent(); // The instruction is gone now. 6319 6320 return BB; 6321} 6322 6323/// SetupEntryBlockForSjLj - Insert code into the entry block that creates and 6324/// registers the function context. 6325void ARMTargetLowering:: 6326SetupEntryBlockForSjLj(MachineInstr *MI, MachineBasicBlock *MBB, 6327 MachineBasicBlock *DispatchBB, int FI) const { 6328 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6329 DebugLoc dl = MI->getDebugLoc(); 6330 MachineFunction *MF = MBB->getParent(); 6331 MachineRegisterInfo *MRI = &MF->getRegInfo(); 6332 MachineConstantPool *MCP = MF->getConstantPool(); 6333 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 6334 const Function *F = MF->getFunction(); 6335 6336 bool isThumb = Subtarget->isThumb(); 6337 bool isThumb2 = Subtarget->isThumb2(); 6338 6339 unsigned PCLabelId = AFI->createPICLabelUId(); 6340 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8; 6341 ARMConstantPoolValue *CPV = 6342 ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj); 6343 unsigned CPI = MCP->getConstantPoolIndex(CPV, 4); 6344 6345 const TargetRegisterClass *TRC = isThumb ? 6346 (const TargetRegisterClass*)&ARM::tGPRRegClass : 6347 (const TargetRegisterClass*)&ARM::GPRRegClass; 6348 6349 // Grab constant pool and fixed stack memory operands. 6350 MachineMemOperand *CPMMO = 6351 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(), 6352 MachineMemOperand::MOLoad, 4, 4); 6353 6354 MachineMemOperand *FIMMOSt = 6355 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 6356 MachineMemOperand::MOStore, 4, 4); 6357 6358 // Load the address of the dispatch MBB into the jump buffer. 6359 if (isThumb2) { 6360 // Incoming value: jbuf 6361 // ldr.n r5, LCPI1_1 6362 // orr r5, r5, #1 6363 // add r5, pc 6364 // str r5, [$jbuf, #+4] ; &jbuf[1] 6365 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6366 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1) 6367 .addConstantPoolIndex(CPI) 6368 .addMemOperand(CPMMO)); 6369 // Set the low bit because of thumb mode. 6370 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 6371 AddDefaultCC( 6372 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2) 6373 .addReg(NewVReg1, RegState::Kill) 6374 .addImm(0x01))); 6375 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6376 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3) 6377 .addReg(NewVReg2, RegState::Kill) 6378 .addImm(PCLabelId); 6379 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12)) 6380 .addReg(NewVReg3, RegState::Kill) 6381 .addFrameIndex(FI) 6382 .addImm(36) // &jbuf[1] :: pc 6383 .addMemOperand(FIMMOSt)); 6384 } else if (isThumb) { 6385 // Incoming value: jbuf 6386 // ldr.n r1, LCPI1_4 6387 // add r1, pc 6388 // mov r2, #1 6389 // orrs r1, r2 6390 // add r2, $jbuf, #+4 ; &jbuf[1] 6391 // str r1, [r2] 6392 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6393 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1) 6394 .addConstantPoolIndex(CPI) 6395 .addMemOperand(CPMMO)); 6396 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 6397 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2) 6398 .addReg(NewVReg1, RegState::Kill) 6399 .addImm(PCLabelId); 6400 // Set the low bit because of thumb mode. 6401 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6402 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3) 6403 .addReg(ARM::CPSR, RegState::Define) 6404 .addImm(1)); 6405 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6406 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4) 6407 .addReg(ARM::CPSR, RegState::Define) 6408 .addReg(NewVReg2, RegState::Kill) 6409 .addReg(NewVReg3, RegState::Kill)); 6410 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 6411 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tADDrSPi), NewVReg5) 6412 .addFrameIndex(FI) 6413 .addImm(36)); // &jbuf[1] :: pc 6414 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi)) 6415 .addReg(NewVReg4, RegState::Kill) 6416 .addReg(NewVReg5, RegState::Kill) 6417 .addImm(0) 6418 .addMemOperand(FIMMOSt)); 6419 } else { 6420 // Incoming value: jbuf 6421 // ldr r1, LCPI1_1 6422 // add r1, pc, r1 6423 // str r1, [$jbuf, #+4] ; &jbuf[1] 6424 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6425 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1) 6426 .addConstantPoolIndex(CPI) 6427 .addImm(0) 6428 .addMemOperand(CPMMO)); 6429 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 6430 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2) 6431 .addReg(NewVReg1, RegState::Kill) 6432 .addImm(PCLabelId)); 6433 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12)) 6434 .addReg(NewVReg2, RegState::Kill) 6435 .addFrameIndex(FI) 6436 .addImm(36) // &jbuf[1] :: pc 6437 .addMemOperand(FIMMOSt)); 6438 } 6439} 6440 6441MachineBasicBlock *ARMTargetLowering:: 6442EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const { 6443 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6444 DebugLoc dl = MI->getDebugLoc(); 6445 MachineFunction *MF = MBB->getParent(); 6446 MachineRegisterInfo *MRI = &MF->getRegInfo(); 6447 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>(); 6448 MachineFrameInfo *MFI = MF->getFrameInfo(); 6449 int FI = MFI->getFunctionContextIndex(); 6450 6451 const TargetRegisterClass *TRC = Subtarget->isThumb() ? 6452 (const TargetRegisterClass*)&ARM::tGPRRegClass : 6453 (const TargetRegisterClass*)&ARM::GPRnopcRegClass; 6454 6455 // Get a mapping of the call site numbers to all of the landing pads they're 6456 // associated with. 6457 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad; 6458 unsigned MaxCSNum = 0; 6459 MachineModuleInfo &MMI = MF->getMMI(); 6460 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E; 6461 ++BB) { 6462 if (!BB->isLandingPad()) continue; 6463 6464 // FIXME: We should assert that the EH_LABEL is the first MI in the landing 6465 // pad. 6466 for (MachineBasicBlock::iterator 6467 II = BB->begin(), IE = BB->end(); II != IE; ++II) { 6468 if (!II->isEHLabel()) continue; 6469 6470 MCSymbol *Sym = II->getOperand(0).getMCSymbol(); 6471 if (!MMI.hasCallSiteLandingPad(Sym)) continue; 6472 6473 SmallVectorImpl<unsigned> &CallSiteIdxs = MMI.getCallSiteLandingPad(Sym); 6474 for (SmallVectorImpl<unsigned>::iterator 6475 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end(); 6476 CSI != CSE; ++CSI) { 6477 CallSiteNumToLPad[*CSI].push_back(BB); 6478 MaxCSNum = std::max(MaxCSNum, *CSI); 6479 } 6480 break; 6481 } 6482 } 6483 6484 // Get an ordered list of the machine basic blocks for the jump table. 6485 std::vector<MachineBasicBlock*> LPadList; 6486 SmallPtrSet<MachineBasicBlock*, 64> InvokeBBs; 6487 LPadList.reserve(CallSiteNumToLPad.size()); 6488 for (unsigned I = 1; I <= MaxCSNum; ++I) { 6489 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I]; 6490 for (SmallVectorImpl<MachineBasicBlock*>::iterator 6491 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) { 6492 LPadList.push_back(*II); 6493 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end()); 6494 } 6495 } 6496 6497 assert(!LPadList.empty() && 6498 "No landing pad destinations for the dispatch jump table!"); 6499 6500 // Create the jump table and associated information. 6501 MachineJumpTableInfo *JTI = 6502 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline); 6503 unsigned MJTI = JTI->createJumpTableIndex(LPadList); 6504 unsigned UId = AFI->createJumpTableUId(); 6505 Reloc::Model RelocM = getTargetMachine().getRelocationModel(); 6506 6507 // Create the MBBs for the dispatch code. 6508 6509 // Shove the dispatch's address into the return slot in the function context. 6510 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); 6511 DispatchBB->setIsLandingPad(); 6512 6513 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); 6514 unsigned trap_opcode; 6515 if (Subtarget->isThumb()) 6516 trap_opcode = ARM::tTRAP; 6517 else 6518 trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP; 6519 6520 BuildMI(TrapBB, dl, TII->get(trap_opcode)); 6521 DispatchBB->addSuccessor(TrapBB); 6522 6523 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); 6524 DispatchBB->addSuccessor(DispContBB); 6525 6526 // Insert and MBBs. 6527 MF->insert(MF->end(), DispatchBB); 6528 MF->insert(MF->end(), DispContBB); 6529 MF->insert(MF->end(), TrapBB); 6530 6531 // Insert code into the entry block that creates and registers the function 6532 // context. 6533 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI); 6534 6535 MachineMemOperand *FIMMOLd = 6536 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 6537 MachineMemOperand::MOLoad | 6538 MachineMemOperand::MOVolatile, 4, 4); 6539 6540 MachineInstrBuilder MIB; 6541 MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); 6542 6543 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII); 6544 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); 6545 6546 // Add a register mask with no preserved registers. This results in all 6547 // registers being marked as clobbered. 6548 MIB.addRegMask(RI.getNoPreservedMask()); 6549 6550 unsigned NumLPads = LPadList.size(); 6551 if (Subtarget->isThumb2()) { 6552 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6553 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1) 6554 .addFrameIndex(FI) 6555 .addImm(4) 6556 .addMemOperand(FIMMOLd)); 6557 6558 if (NumLPads < 256) { 6559 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri)) 6560 .addReg(NewVReg1) 6561 .addImm(LPadList.size())); 6562 } else { 6563 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6564 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1) 6565 .addImm(NumLPads & 0xFFFF)); 6566 6567 unsigned VReg2 = VReg1; 6568 if ((NumLPads & 0xFFFF0000) != 0) { 6569 VReg2 = MRI->createVirtualRegister(TRC); 6570 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2) 6571 .addReg(VReg1) 6572 .addImm(NumLPads >> 16)); 6573 } 6574 6575 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr)) 6576 .addReg(NewVReg1) 6577 .addReg(VReg2)); 6578 } 6579 6580 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc)) 6581 .addMBB(TrapBB) 6582 .addImm(ARMCC::HI) 6583 .addReg(ARM::CPSR); 6584 6585 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6586 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg3) 6587 .addJumpTableIndex(MJTI) 6588 .addImm(UId)); 6589 6590 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6591 AddDefaultCC( 6592 AddDefaultPred( 6593 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4) 6594 .addReg(NewVReg3, RegState::Kill) 6595 .addReg(NewVReg1) 6596 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 6597 6598 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT)) 6599 .addReg(NewVReg4, RegState::Kill) 6600 .addReg(NewVReg1) 6601 .addJumpTableIndex(MJTI) 6602 .addImm(UId); 6603 } else if (Subtarget->isThumb()) { 6604 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6605 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1) 6606 .addFrameIndex(FI) 6607 .addImm(1) 6608 .addMemOperand(FIMMOLd)); 6609 6610 if (NumLPads < 256) { 6611 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8)) 6612 .addReg(NewVReg1) 6613 .addImm(NumLPads)); 6614 } else { 6615 MachineConstantPool *ConstantPool = MF->getConstantPool(); 6616 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 6617 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 6618 6619 // MachineConstantPool wants an explicit alignment. 6620 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); 6621 if (Align == 0) 6622 Align = getDataLayout()->getTypeAllocSize(C->getType()); 6623 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 6624 6625 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6626 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci)) 6627 .addReg(VReg1, RegState::Define) 6628 .addConstantPoolIndex(Idx)); 6629 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr)) 6630 .addReg(NewVReg1) 6631 .addReg(VReg1)); 6632 } 6633 6634 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc)) 6635 .addMBB(TrapBB) 6636 .addImm(ARMCC::HI) 6637 .addReg(ARM::CPSR); 6638 6639 unsigned NewVReg2 = MRI->createVirtualRegister(TRC); 6640 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2) 6641 .addReg(ARM::CPSR, RegState::Define) 6642 .addReg(NewVReg1) 6643 .addImm(2)); 6644 6645 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6646 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3) 6647 .addJumpTableIndex(MJTI) 6648 .addImm(UId)); 6649 6650 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6651 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4) 6652 .addReg(ARM::CPSR, RegState::Define) 6653 .addReg(NewVReg2, RegState::Kill) 6654 .addReg(NewVReg3)); 6655 6656 MachineMemOperand *JTMMOLd = 6657 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 6658 MachineMemOperand::MOLoad, 4, 4); 6659 6660 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 6661 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5) 6662 .addReg(NewVReg4, RegState::Kill) 6663 .addImm(0) 6664 .addMemOperand(JTMMOLd)); 6665 6666 unsigned NewVReg6 = NewVReg5; 6667 if (RelocM == Reloc::PIC_) { 6668 NewVReg6 = MRI->createVirtualRegister(TRC); 6669 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6) 6670 .addReg(ARM::CPSR, RegState::Define) 6671 .addReg(NewVReg5, RegState::Kill) 6672 .addReg(NewVReg3)); 6673 } 6674 6675 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr)) 6676 .addReg(NewVReg6, RegState::Kill) 6677 .addJumpTableIndex(MJTI) 6678 .addImm(UId); 6679 } else { 6680 unsigned NewVReg1 = MRI->createVirtualRegister(TRC); 6681 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1) 6682 .addFrameIndex(FI) 6683 .addImm(4) 6684 .addMemOperand(FIMMOLd)); 6685 6686 if (NumLPads < 256) { 6687 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri)) 6688 .addReg(NewVReg1) 6689 .addImm(NumLPads)); 6690 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) { 6691 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6692 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1) 6693 .addImm(NumLPads & 0xFFFF)); 6694 6695 unsigned VReg2 = VReg1; 6696 if ((NumLPads & 0xFFFF0000) != 0) { 6697 VReg2 = MRI->createVirtualRegister(TRC); 6698 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2) 6699 .addReg(VReg1) 6700 .addImm(NumLPads >> 16)); 6701 } 6702 6703 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 6704 .addReg(NewVReg1) 6705 .addReg(VReg2)); 6706 } else { 6707 MachineConstantPool *ConstantPool = MF->getConstantPool(); 6708 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 6709 const Constant *C = ConstantInt::get(Int32Ty, NumLPads); 6710 6711 // MachineConstantPool wants an explicit alignment. 6712 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); 6713 if (Align == 0) 6714 Align = getDataLayout()->getTypeAllocSize(C->getType()); 6715 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 6716 6717 unsigned VReg1 = MRI->createVirtualRegister(TRC); 6718 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp)) 6719 .addReg(VReg1, RegState::Define) 6720 .addConstantPoolIndex(Idx) 6721 .addImm(0)); 6722 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr)) 6723 .addReg(NewVReg1) 6724 .addReg(VReg1, RegState::Kill)); 6725 } 6726 6727 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc)) 6728 .addMBB(TrapBB) 6729 .addImm(ARMCC::HI) 6730 .addReg(ARM::CPSR); 6731 6732 unsigned NewVReg3 = MRI->createVirtualRegister(TRC); 6733 AddDefaultCC( 6734 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3) 6735 .addReg(NewVReg1) 6736 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2)))); 6737 unsigned NewVReg4 = MRI->createVirtualRegister(TRC); 6738 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4) 6739 .addJumpTableIndex(MJTI) 6740 .addImm(UId)); 6741 6742 MachineMemOperand *JTMMOLd = 6743 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(), 6744 MachineMemOperand::MOLoad, 4, 4); 6745 unsigned NewVReg5 = MRI->createVirtualRegister(TRC); 6746 AddDefaultPred( 6747 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5) 6748 .addReg(NewVReg3, RegState::Kill) 6749 .addReg(NewVReg4) 6750 .addImm(0) 6751 .addMemOperand(JTMMOLd)); 6752 6753 if (RelocM == Reloc::PIC_) { 6754 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd)) 6755 .addReg(NewVReg5, RegState::Kill) 6756 .addReg(NewVReg4) 6757 .addJumpTableIndex(MJTI) 6758 .addImm(UId); 6759 } else { 6760 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr)) 6761 .addReg(NewVReg5, RegState::Kill) 6762 .addJumpTableIndex(MJTI) 6763 .addImm(UId); 6764 } 6765 } 6766 6767 // Add the jump table entries as successors to the MBB. 6768 SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs; 6769 for (std::vector<MachineBasicBlock*>::iterator 6770 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { 6771 MachineBasicBlock *CurMBB = *I; 6772 if (SeenMBBs.insert(CurMBB)) 6773 DispContBB->addSuccessor(CurMBB); 6774 } 6775 6776 // N.B. the order the invoke BBs are processed in doesn't matter here. 6777 const uint16_t *SavedRegs = RI.getCalleeSavedRegs(MF); 6778 SmallVector<MachineBasicBlock*, 64> MBBLPads; 6779 for (SmallPtrSet<MachineBasicBlock*, 64>::iterator 6780 I = InvokeBBs.begin(), E = InvokeBBs.end(); I != E; ++I) { 6781 MachineBasicBlock *BB = *I; 6782 6783 // Remove the landing pad successor from the invoke block and replace it 6784 // with the new dispatch block. 6785 SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(), 6786 BB->succ_end()); 6787 while (!Successors.empty()) { 6788 MachineBasicBlock *SMBB = Successors.pop_back_val(); 6789 if (SMBB->isLandingPad()) { 6790 BB->removeSuccessor(SMBB); 6791 MBBLPads.push_back(SMBB); 6792 } 6793 } 6794 6795 BB->addSuccessor(DispatchBB); 6796 6797 // Find the invoke call and mark all of the callee-saved registers as 6798 // 'implicit defined' so that they're spilled. This prevents code from 6799 // moving instructions to before the EH block, where they will never be 6800 // executed. 6801 for (MachineBasicBlock::reverse_iterator 6802 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) { 6803 if (!II->isCall()) continue; 6804 6805 DenseMap<unsigned, bool> DefRegs; 6806 for (MachineInstr::mop_iterator 6807 OI = II->operands_begin(), OE = II->operands_end(); 6808 OI != OE; ++OI) { 6809 if (!OI->isReg()) continue; 6810 DefRegs[OI->getReg()] = true; 6811 } 6812 6813 MachineInstrBuilder MIB(*MF, &*II); 6814 6815 for (unsigned i = 0; SavedRegs[i] != 0; ++i) { 6816 unsigned Reg = SavedRegs[i]; 6817 if (Subtarget->isThumb2() && 6818 !ARM::tGPRRegClass.contains(Reg) && 6819 !ARM::hGPRRegClass.contains(Reg)) 6820 continue; 6821 if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg)) 6822 continue; 6823 if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg)) 6824 continue; 6825 if (!DefRegs[Reg]) 6826 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); 6827 } 6828 6829 break; 6830 } 6831 } 6832 6833 // Mark all former landing pads as non-landing pads. The dispatch is the only 6834 // landing pad now. 6835 for (SmallVectorImpl<MachineBasicBlock*>::iterator 6836 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I) 6837 (*I)->setIsLandingPad(false); 6838 6839 // The instruction is gone now. 6840 MI->eraseFromParent(); 6841 6842 return MBB; 6843} 6844 6845static 6846MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { 6847 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(), 6848 E = MBB->succ_end(); I != E; ++I) 6849 if (*I != Succ) 6850 return *I; 6851 llvm_unreachable("Expecting a BB with two successors!"); 6852} 6853 6854MachineBasicBlock *ARMTargetLowering:: 6855EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const { 6856 // This pseudo instruction has 3 operands: dst, src, size 6857 // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold(). 6858 // Otherwise, we will generate unrolled scalar copies. 6859 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 6860 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 6861 MachineFunction::iterator It = BB; 6862 ++It; 6863 6864 unsigned dest = MI->getOperand(0).getReg(); 6865 unsigned src = MI->getOperand(1).getReg(); 6866 unsigned SizeVal = MI->getOperand(2).getImm(); 6867 unsigned Align = MI->getOperand(3).getImm(); 6868 DebugLoc dl = MI->getDebugLoc(); 6869 6870 bool isThumb2 = Subtarget->isThumb2(); 6871 MachineFunction *MF = BB->getParent(); 6872 MachineRegisterInfo &MRI = MF->getRegInfo(); 6873 unsigned ldrOpc, strOpc, UnitSize = 0; 6874 6875 const TargetRegisterClass *TRC = isThumb2 ? 6876 (const TargetRegisterClass*)&ARM::tGPRRegClass : 6877 (const TargetRegisterClass*)&ARM::GPRRegClass; 6878 const TargetRegisterClass *TRC_Vec = 0; 6879 6880 if (Align & 1) { 6881 ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM; 6882 strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM; 6883 UnitSize = 1; 6884 } else if (Align & 2) { 6885 ldrOpc = isThumb2 ? ARM::t2LDRH_POST : ARM::LDRH_POST; 6886 strOpc = isThumb2 ? ARM::t2STRH_POST : ARM::STRH_POST; 6887 UnitSize = 2; 6888 } else { 6889 // Check whether we can use NEON instructions. 6890 if (!MF->getFunction()->getAttributes(). 6891 hasAttribute(AttributeSet::FunctionIndex, 6892 Attribute::NoImplicitFloat) && 6893 Subtarget->hasNEON()) { 6894 if ((Align % 16 == 0) && SizeVal >= 16) { 6895 ldrOpc = ARM::VLD1q32wb_fixed; 6896 strOpc = ARM::VST1q32wb_fixed; 6897 UnitSize = 16; 6898 TRC_Vec = (const TargetRegisterClass*)&ARM::DPairRegClass; 6899 } 6900 else if ((Align % 8 == 0) && SizeVal >= 8) { 6901 ldrOpc = ARM::VLD1d32wb_fixed; 6902 strOpc = ARM::VST1d32wb_fixed; 6903 UnitSize = 8; 6904 TRC_Vec = (const TargetRegisterClass*)&ARM::DPRRegClass; 6905 } 6906 } 6907 // Can't use NEON instructions. 6908 if (UnitSize == 0) { 6909 ldrOpc = isThumb2 ? ARM::t2LDR_POST : ARM::LDR_POST_IMM; 6910 strOpc = isThumb2 ? ARM::t2STR_POST : ARM::STR_POST_IMM; 6911 UnitSize = 4; 6912 } 6913 } 6914 6915 unsigned BytesLeft = SizeVal % UnitSize; 6916 unsigned LoopSize = SizeVal - BytesLeft; 6917 6918 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) { 6919 // Use LDR and STR to copy. 6920 // [scratch, srcOut] = LDR_POST(srcIn, UnitSize) 6921 // [destOut] = STR_POST(scratch, destIn, UnitSize) 6922 unsigned srcIn = src; 6923 unsigned destIn = dest; 6924 for (unsigned i = 0; i < LoopSize; i+=UnitSize) { 6925 unsigned scratch = MRI.createVirtualRegister(UnitSize >= 8 ? TRC_Vec:TRC); 6926 unsigned srcOut = MRI.createVirtualRegister(TRC); 6927 unsigned destOut = MRI.createVirtualRegister(TRC); 6928 if (UnitSize >= 8) { 6929 AddDefaultPred(BuildMI(*BB, MI, dl, 6930 TII->get(ldrOpc), scratch) 6931 .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(0)); 6932 6933 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 6934 .addReg(destIn).addImm(0).addReg(scratch)); 6935 } else if (isThumb2) { 6936 AddDefaultPred(BuildMI(*BB, MI, dl, 6937 TII->get(ldrOpc), scratch) 6938 .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(UnitSize)); 6939 6940 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 6941 .addReg(scratch).addReg(destIn) 6942 .addImm(UnitSize)); 6943 } else { 6944 AddDefaultPred(BuildMI(*BB, MI, dl, 6945 TII->get(ldrOpc), scratch) 6946 .addReg(srcOut, RegState::Define).addReg(srcIn).addReg(0) 6947 .addImm(UnitSize)); 6948 6949 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 6950 .addReg(scratch).addReg(destIn) 6951 .addReg(0).addImm(UnitSize)); 6952 } 6953 srcIn = srcOut; 6954 destIn = destOut; 6955 } 6956 6957 // Handle the leftover bytes with LDRB and STRB. 6958 // [scratch, srcOut] = LDRB_POST(srcIn, 1) 6959 // [destOut] = STRB_POST(scratch, destIn, 1) 6960 ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM; 6961 strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM; 6962 for (unsigned i = 0; i < BytesLeft; i++) { 6963 unsigned scratch = MRI.createVirtualRegister(TRC); 6964 unsigned srcOut = MRI.createVirtualRegister(TRC); 6965 unsigned destOut = MRI.createVirtualRegister(TRC); 6966 if (isThumb2) { 6967 AddDefaultPred(BuildMI(*BB, MI, dl, 6968 TII->get(ldrOpc),scratch) 6969 .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(1)); 6970 6971 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 6972 .addReg(scratch).addReg(destIn) 6973 .addReg(0).addImm(1)); 6974 } else { 6975 AddDefaultPred(BuildMI(*BB, MI, dl, 6976 TII->get(ldrOpc),scratch) 6977 .addReg(srcOut, RegState::Define).addReg(srcIn) 6978 .addReg(0).addImm(1)); 6979 6980 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) 6981 .addReg(scratch).addReg(destIn) 6982 .addReg(0).addImm(1)); 6983 } 6984 srcIn = srcOut; 6985 destIn = destOut; 6986 } 6987 MI->eraseFromParent(); // The instruction is gone now. 6988 return BB; 6989 } 6990 6991 // Expand the pseudo op to a loop. 6992 // thisMBB: 6993 // ... 6994 // movw varEnd, # --> with thumb2 6995 // movt varEnd, # 6996 // ldrcp varEnd, idx --> without thumb2 6997 // fallthrough --> loopMBB 6998 // loopMBB: 6999 // PHI varPhi, varEnd, varLoop 7000 // PHI srcPhi, src, srcLoop 7001 // PHI destPhi, dst, destLoop 7002 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 7003 // [destLoop] = STR_POST(scratch, destPhi, UnitSize) 7004 // subs varLoop, varPhi, #UnitSize 7005 // bne loopMBB 7006 // fallthrough --> exitMBB 7007 // exitMBB: 7008 // epilogue to handle left-over bytes 7009 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 7010 // [destOut] = STRB_POST(scratch, destLoop, 1) 7011 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); 7012 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); 7013 MF->insert(It, loopMBB); 7014 MF->insert(It, exitMBB); 7015 7016 // Transfer the remainder of BB and its successor edges to exitMBB. 7017 exitMBB->splice(exitMBB->begin(), BB, 7018 llvm::next(MachineBasicBlock::iterator(MI)), 7019 BB->end()); 7020 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 7021 7022 // Load an immediate to varEnd. 7023 unsigned varEnd = MRI.createVirtualRegister(TRC); 7024 if (isThumb2) { 7025 unsigned VReg1 = varEnd; 7026 if ((LoopSize & 0xFFFF0000) != 0) 7027 VReg1 = MRI.createVirtualRegister(TRC); 7028 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2MOVi16), VReg1) 7029 .addImm(LoopSize & 0xFFFF)); 7030 7031 if ((LoopSize & 0xFFFF0000) != 0) 7032 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2MOVTi16), varEnd) 7033 .addReg(VReg1) 7034 .addImm(LoopSize >> 16)); 7035 } else { 7036 MachineConstantPool *ConstantPool = MF->getConstantPool(); 7037 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext()); 7038 const Constant *C = ConstantInt::get(Int32Ty, LoopSize); 7039 7040 // MachineConstantPool wants an explicit alignment. 7041 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); 7042 if (Align == 0) 7043 Align = getDataLayout()->getTypeAllocSize(C->getType()); 7044 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); 7045 7046 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::LDRcp)) 7047 .addReg(varEnd, RegState::Define) 7048 .addConstantPoolIndex(Idx) 7049 .addImm(0)); 7050 } 7051 BB->addSuccessor(loopMBB); 7052 7053 // Generate the loop body: 7054 // varPhi = PHI(varLoop, varEnd) 7055 // srcPhi = PHI(srcLoop, src) 7056 // destPhi = PHI(destLoop, dst) 7057 MachineBasicBlock *entryBB = BB; 7058 BB = loopMBB; 7059 unsigned varLoop = MRI.createVirtualRegister(TRC); 7060 unsigned varPhi = MRI.createVirtualRegister(TRC); 7061 unsigned srcLoop = MRI.createVirtualRegister(TRC); 7062 unsigned srcPhi = MRI.createVirtualRegister(TRC); 7063 unsigned destLoop = MRI.createVirtualRegister(TRC); 7064 unsigned destPhi = MRI.createVirtualRegister(TRC); 7065 7066 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi) 7067 .addReg(varLoop).addMBB(loopMBB) 7068 .addReg(varEnd).addMBB(entryBB); 7069 BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi) 7070 .addReg(srcLoop).addMBB(loopMBB) 7071 .addReg(src).addMBB(entryBB); 7072 BuildMI(BB, dl, TII->get(ARM::PHI), destPhi) 7073 .addReg(destLoop).addMBB(loopMBB) 7074 .addReg(dest).addMBB(entryBB); 7075 7076 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize) 7077 // [destLoop] = STR_POST(scratch, destPhi, UnitSiz) 7078 unsigned scratch = MRI.createVirtualRegister(UnitSize >= 8 ? TRC_Vec:TRC); 7079 if (UnitSize >= 8) { 7080 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch) 7081 .addReg(srcLoop, RegState::Define).addReg(srcPhi).addImm(0)); 7082 7083 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop) 7084 .addReg(destPhi).addImm(0).addReg(scratch)); 7085 } else if (isThumb2) { 7086 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch) 7087 .addReg(srcLoop, RegState::Define).addReg(srcPhi).addImm(UnitSize)); 7088 7089 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop) 7090 .addReg(scratch).addReg(destPhi) 7091 .addImm(UnitSize)); 7092 } else { 7093 AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc), scratch) 7094 .addReg(srcLoop, RegState::Define).addReg(srcPhi).addReg(0) 7095 .addImm(UnitSize)); 7096 7097 AddDefaultPred(BuildMI(BB, dl, TII->get(strOpc), destLoop) 7098 .addReg(scratch).addReg(destPhi) 7099 .addReg(0).addImm(UnitSize)); 7100 } 7101 7102 // Decrement loop variable by UnitSize. 7103 MachineInstrBuilder MIB = BuildMI(BB, dl, 7104 TII->get(isThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop); 7105 AddDefaultCC(AddDefaultPred(MIB.addReg(varPhi).addImm(UnitSize))); 7106 MIB->getOperand(5).setReg(ARM::CPSR); 7107 MIB->getOperand(5).setIsDef(true); 7108 7109 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 7110 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR); 7111 7112 // loopMBB can loop back to loopMBB or fall through to exitMBB. 7113 BB->addSuccessor(loopMBB); 7114 BB->addSuccessor(exitMBB); 7115 7116 // Add epilogue to handle BytesLeft. 7117 BB = exitMBB; 7118 MachineInstr *StartOfExit = exitMBB->begin(); 7119 ldrOpc = isThumb2 ? ARM::t2LDRB_POST : ARM::LDRB_POST_IMM; 7120 strOpc = isThumb2 ? ARM::t2STRB_POST : ARM::STRB_POST_IMM; 7121 7122 // [scratch, srcOut] = LDRB_POST(srcLoop, 1) 7123 // [destOut] = STRB_POST(scratch, destLoop, 1) 7124 unsigned srcIn = srcLoop; 7125 unsigned destIn = destLoop; 7126 for (unsigned i = 0; i < BytesLeft; i++) { 7127 unsigned scratch = MRI.createVirtualRegister(TRC); 7128 unsigned srcOut = MRI.createVirtualRegister(TRC); 7129 unsigned destOut = MRI.createVirtualRegister(TRC); 7130 if (isThumb2) { 7131 AddDefaultPred(BuildMI(*BB, StartOfExit, dl, 7132 TII->get(ldrOpc),scratch) 7133 .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(1)); 7134 7135 AddDefaultPred(BuildMI(*BB, StartOfExit, dl, TII->get(strOpc), destOut) 7136 .addReg(scratch).addReg(destIn) 7137 .addImm(1)); 7138 } else { 7139 AddDefaultPred(BuildMI(*BB, StartOfExit, dl, 7140 TII->get(ldrOpc),scratch) 7141 .addReg(srcOut, RegState::Define).addReg(srcIn).addReg(0).addImm(1)); 7142 7143 AddDefaultPred(BuildMI(*BB, StartOfExit, dl, TII->get(strOpc), destOut) 7144 .addReg(scratch).addReg(destIn) 7145 .addReg(0).addImm(1)); 7146 } 7147 srcIn = srcOut; 7148 destIn = destOut; 7149 } 7150 7151 MI->eraseFromParent(); // The instruction is gone now. 7152 return BB; 7153} 7154 7155MachineBasicBlock * 7156ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, 7157 MachineBasicBlock *BB) const { 7158 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); 7159 DebugLoc dl = MI->getDebugLoc(); 7160 bool isThumb2 = Subtarget->isThumb2(); 7161 switch (MI->getOpcode()) { 7162 default: { 7163 MI->dump(); 7164 llvm_unreachable("Unexpected instr type to insert"); 7165 } 7166 // The Thumb2 pre-indexed stores have the same MI operands, they just 7167 // define them differently in the .td files from the isel patterns, so 7168 // they need pseudos. 7169 case ARM::t2STR_preidx: 7170 MI->setDesc(TII->get(ARM::t2STR_PRE)); 7171 return BB; 7172 case ARM::t2STRB_preidx: 7173 MI->setDesc(TII->get(ARM::t2STRB_PRE)); 7174 return BB; 7175 case ARM::t2STRH_preidx: 7176 MI->setDesc(TII->get(ARM::t2STRH_PRE)); 7177 return BB; 7178 7179 case ARM::STRi_preidx: 7180 case ARM::STRBi_preidx: { 7181 unsigned NewOpc = MI->getOpcode() == ARM::STRi_preidx ? 7182 ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM; 7183 // Decode the offset. 7184 unsigned Offset = MI->getOperand(4).getImm(); 7185 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub; 7186 Offset = ARM_AM::getAM2Offset(Offset); 7187 if (isSub) 7188 Offset = -Offset; 7189 7190 MachineMemOperand *MMO = *MI->memoperands_begin(); 7191 BuildMI(*BB, MI, dl, TII->get(NewOpc)) 7192 .addOperand(MI->getOperand(0)) // Rn_wb 7193 .addOperand(MI->getOperand(1)) // Rt 7194 .addOperand(MI->getOperand(2)) // Rn 7195 .addImm(Offset) // offset (skip GPR==zero_reg) 7196 .addOperand(MI->getOperand(5)) // pred 7197 .addOperand(MI->getOperand(6)) 7198 .addMemOperand(MMO); 7199 MI->eraseFromParent(); 7200 return BB; 7201 } 7202 case ARM::STRr_preidx: 7203 case ARM::STRBr_preidx: 7204 case ARM::STRH_preidx: { 7205 unsigned NewOpc; 7206 switch (MI->getOpcode()) { 7207 default: llvm_unreachable("unexpected opcode!"); 7208 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break; 7209 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break; 7210 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break; 7211 } 7212 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); 7213 for (unsigned i = 0; i < MI->getNumOperands(); ++i) 7214 MIB.addOperand(MI->getOperand(i)); 7215 MI->eraseFromParent(); 7216 return BB; 7217 } 7218 case ARM::ATOMIC_LOAD_ADD_I8: 7219 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 7220 case ARM::ATOMIC_LOAD_ADD_I16: 7221 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 7222 case ARM::ATOMIC_LOAD_ADD_I32: 7223 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr); 7224 7225 case ARM::ATOMIC_LOAD_AND_I8: 7226 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 7227 case ARM::ATOMIC_LOAD_AND_I16: 7228 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 7229 case ARM::ATOMIC_LOAD_AND_I32: 7230 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 7231 7232 case ARM::ATOMIC_LOAD_OR_I8: 7233 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 7234 case ARM::ATOMIC_LOAD_OR_I16: 7235 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 7236 case ARM::ATOMIC_LOAD_OR_I32: 7237 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 7238 7239 case ARM::ATOMIC_LOAD_XOR_I8: 7240 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 7241 case ARM::ATOMIC_LOAD_XOR_I16: 7242 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 7243 case ARM::ATOMIC_LOAD_XOR_I32: 7244 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2EORrr : ARM::EORrr); 7245 7246 case ARM::ATOMIC_LOAD_NAND_I8: 7247 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 7248 case ARM::ATOMIC_LOAD_NAND_I16: 7249 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 7250 case ARM::ATOMIC_LOAD_NAND_I32: 7251 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2BICrr : ARM::BICrr); 7252 7253 case ARM::ATOMIC_LOAD_SUB_I8: 7254 return EmitAtomicBinary(MI, BB, 1, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 7255 case ARM::ATOMIC_LOAD_SUB_I16: 7256 return EmitAtomicBinary(MI, BB, 2, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 7257 case ARM::ATOMIC_LOAD_SUB_I32: 7258 return EmitAtomicBinary(MI, BB, 4, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr); 7259 7260 case ARM::ATOMIC_LOAD_MIN_I8: 7261 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::LT); 7262 case ARM::ATOMIC_LOAD_MIN_I16: 7263 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::LT); 7264 case ARM::ATOMIC_LOAD_MIN_I32: 7265 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::LT); 7266 7267 case ARM::ATOMIC_LOAD_MAX_I8: 7268 return EmitAtomicBinaryMinMax(MI, BB, 1, true, ARMCC::GT); 7269 case ARM::ATOMIC_LOAD_MAX_I16: 7270 return EmitAtomicBinaryMinMax(MI, BB, 2, true, ARMCC::GT); 7271 case ARM::ATOMIC_LOAD_MAX_I32: 7272 return EmitAtomicBinaryMinMax(MI, BB, 4, true, ARMCC::GT); 7273 7274 case ARM::ATOMIC_LOAD_UMIN_I8: 7275 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::LO); 7276 case ARM::ATOMIC_LOAD_UMIN_I16: 7277 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::LO); 7278 case ARM::ATOMIC_LOAD_UMIN_I32: 7279 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::LO); 7280 7281 case ARM::ATOMIC_LOAD_UMAX_I8: 7282 return EmitAtomicBinaryMinMax(MI, BB, 1, false, ARMCC::HI); 7283 case ARM::ATOMIC_LOAD_UMAX_I16: 7284 return EmitAtomicBinaryMinMax(MI, BB, 2, false, ARMCC::HI); 7285 case ARM::ATOMIC_LOAD_UMAX_I32: 7286 return EmitAtomicBinaryMinMax(MI, BB, 4, false, ARMCC::HI); 7287 7288 case ARM::ATOMIC_SWAP_I8: return EmitAtomicBinary(MI, BB, 1, 0); 7289 case ARM::ATOMIC_SWAP_I16: return EmitAtomicBinary(MI, BB, 2, 0); 7290 case ARM::ATOMIC_SWAP_I32: return EmitAtomicBinary(MI, BB, 4, 0); 7291 7292 case ARM::ATOMIC_CMP_SWAP_I8: return EmitAtomicCmpSwap(MI, BB, 1); 7293 case ARM::ATOMIC_CMP_SWAP_I16: return EmitAtomicCmpSwap(MI, BB, 2); 7294 case ARM::ATOMIC_CMP_SWAP_I32: return EmitAtomicCmpSwap(MI, BB, 4); 7295 7296 7297 case ARM::ATOMADD6432: 7298 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ADDrr : ARM::ADDrr, 7299 isThumb2 ? ARM::t2ADCrr : ARM::ADCrr, 7300 /*NeedsCarry*/ true); 7301 case ARM::ATOMSUB6432: 7302 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 7303 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 7304 /*NeedsCarry*/ true); 7305 case ARM::ATOMOR6432: 7306 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ORRrr : ARM::ORRrr, 7307 isThumb2 ? ARM::t2ORRrr : ARM::ORRrr); 7308 case ARM::ATOMXOR6432: 7309 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2EORrr : ARM::EORrr, 7310 isThumb2 ? ARM::t2EORrr : ARM::EORrr); 7311 case ARM::ATOMAND6432: 7312 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr, 7313 isThumb2 ? ARM::t2ANDrr : ARM::ANDrr); 7314 case ARM::ATOMSWAP6432: 7315 return EmitAtomicBinary64(MI, BB, 0, 0, false); 7316 case ARM::ATOMCMPXCHG6432: 7317 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 7318 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 7319 /*NeedsCarry*/ false, /*IsCmpxchg*/true); 7320 case ARM::ATOMMIN6432: 7321 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 7322 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 7323 /*NeedsCarry*/ true, /*IsCmpxchg*/false, 7324 /*IsMinMax*/ true, ARMCC::LT); 7325 case ARM::ATOMMAX6432: 7326 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 7327 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 7328 /*NeedsCarry*/ true, /*IsCmpxchg*/false, 7329 /*IsMinMax*/ true, ARMCC::GE); 7330 case ARM::ATOMUMIN6432: 7331 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 7332 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 7333 /*NeedsCarry*/ true, /*IsCmpxchg*/false, 7334 /*IsMinMax*/ true, ARMCC::LO); 7335 case ARM::ATOMUMAX6432: 7336 return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2SUBrr : ARM::SUBrr, 7337 isThumb2 ? ARM::t2SBCrr : ARM::SBCrr, 7338 /*NeedsCarry*/ true, /*IsCmpxchg*/false, 7339 /*IsMinMax*/ true, ARMCC::HS); 7340 7341 case ARM::tMOVCCr_pseudo: { 7342 // To "insert" a SELECT_CC instruction, we actually have to insert the 7343 // diamond control-flow pattern. The incoming instruction knows the 7344 // destination vreg to set, the condition code register to branch on, the 7345 // true/false values to select between, and a branch opcode to use. 7346 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 7347 MachineFunction::iterator It = BB; 7348 ++It; 7349 7350 // thisMBB: 7351 // ... 7352 // TrueVal = ... 7353 // cmpTY ccX, r1, r2 7354 // bCC copy1MBB 7355 // fallthrough --> copy0MBB 7356 MachineBasicBlock *thisMBB = BB; 7357 MachineFunction *F = BB->getParent(); 7358 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 7359 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 7360 F->insert(It, copy0MBB); 7361 F->insert(It, sinkMBB); 7362 7363 // Transfer the remainder of BB and its successor edges to sinkMBB. 7364 sinkMBB->splice(sinkMBB->begin(), BB, 7365 llvm::next(MachineBasicBlock::iterator(MI)), 7366 BB->end()); 7367 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 7368 7369 BB->addSuccessor(copy0MBB); 7370 BB->addSuccessor(sinkMBB); 7371 7372 BuildMI(BB, dl, TII->get(ARM::tBcc)).addMBB(sinkMBB) 7373 .addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg()); 7374 7375 // copy0MBB: 7376 // %FalseValue = ... 7377 // # fallthrough to sinkMBB 7378 BB = copy0MBB; 7379 7380 // Update machine-CFG edges 7381 BB->addSuccessor(sinkMBB); 7382 7383 // sinkMBB: 7384 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 7385 // ... 7386 BB = sinkMBB; 7387 BuildMI(*BB, BB->begin(), dl, 7388 TII->get(ARM::PHI), MI->getOperand(0).getReg()) 7389 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB) 7390 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB); 7391 7392 MI->eraseFromParent(); // The pseudo instruction is gone now. 7393 return BB; 7394 } 7395 7396 case ARM::BCCi64: 7397 case ARM::BCCZi64: { 7398 // If there is an unconditional branch to the other successor, remove it. 7399 BB->erase(llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); 7400 7401 // Compare both parts that make up the double comparison separately for 7402 // equality. 7403 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64; 7404 7405 unsigned LHS1 = MI->getOperand(1).getReg(); 7406 unsigned LHS2 = MI->getOperand(2).getReg(); 7407 if (RHSisZero) { 7408 AddDefaultPred(BuildMI(BB, dl, 7409 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 7410 .addReg(LHS1).addImm(0)); 7411 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 7412 .addReg(LHS2).addImm(0) 7413 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 7414 } else { 7415 unsigned RHS1 = MI->getOperand(3).getReg(); 7416 unsigned RHS2 = MI->getOperand(4).getReg(); 7417 AddDefaultPred(BuildMI(BB, dl, 7418 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 7419 .addReg(LHS1).addReg(RHS1)); 7420 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) 7421 .addReg(LHS2).addReg(RHS2) 7422 .addImm(ARMCC::EQ).addReg(ARM::CPSR); 7423 } 7424 7425 MachineBasicBlock *destMBB = MI->getOperand(RHSisZero ? 3 : 5).getMBB(); 7426 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB); 7427 if (MI->getOperand(0).getImm() == ARMCC::NE) 7428 std::swap(destMBB, exitMBB); 7429 7430 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)) 7431 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR); 7432 if (isThumb2) 7433 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB)); 7434 else 7435 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB); 7436 7437 MI->eraseFromParent(); // The pseudo instruction is gone now. 7438 return BB; 7439 } 7440 7441 case ARM::Int_eh_sjlj_setjmp: 7442 case ARM::Int_eh_sjlj_setjmp_nofp: 7443 case ARM::tInt_eh_sjlj_setjmp: 7444 case ARM::t2Int_eh_sjlj_setjmp: 7445 case ARM::t2Int_eh_sjlj_setjmp_nofp: 7446 EmitSjLjDispatchBlock(MI, BB); 7447 return BB; 7448 7449 case ARM::ABS: 7450 case ARM::t2ABS: { 7451 // To insert an ABS instruction, we have to insert the 7452 // diamond control-flow pattern. The incoming instruction knows the 7453 // source vreg to test against 0, the destination vreg to set, 7454 // the condition code register to branch on, the 7455 // true/false values to select between, and a branch opcode to use. 7456 // It transforms 7457 // V1 = ABS V0 7458 // into 7459 // V2 = MOVS V0 7460 // BCC (branch to SinkBB if V0 >= 0) 7461 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0) 7462 // SinkBB: V1 = PHI(V2, V3) 7463 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 7464 MachineFunction::iterator BBI = BB; 7465 ++BBI; 7466 MachineFunction *Fn = BB->getParent(); 7467 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB); 7468 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB); 7469 Fn->insert(BBI, RSBBB); 7470 Fn->insert(BBI, SinkBB); 7471 7472 unsigned int ABSSrcReg = MI->getOperand(1).getReg(); 7473 unsigned int ABSDstReg = MI->getOperand(0).getReg(); 7474 bool isThumb2 = Subtarget->isThumb2(); 7475 MachineRegisterInfo &MRI = Fn->getRegInfo(); 7476 // In Thumb mode S must not be specified if source register is the SP or 7477 // PC and if destination register is the SP, so restrict register class 7478 unsigned NewRsbDstReg = MRI.createVirtualRegister(isThumb2 ? 7479 (const TargetRegisterClass*)&ARM::rGPRRegClass : 7480 (const TargetRegisterClass*)&ARM::GPRRegClass); 7481 7482 // Transfer the remainder of BB and its successor edges to sinkMBB. 7483 SinkBB->splice(SinkBB->begin(), BB, 7484 llvm::next(MachineBasicBlock::iterator(MI)), 7485 BB->end()); 7486 SinkBB->transferSuccessorsAndUpdatePHIs(BB); 7487 7488 BB->addSuccessor(RSBBB); 7489 BB->addSuccessor(SinkBB); 7490 7491 // fall through to SinkMBB 7492 RSBBB->addSuccessor(SinkBB); 7493 7494 // insert a cmp at the end of BB 7495 AddDefaultPred(BuildMI(BB, dl, 7496 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri)) 7497 .addReg(ABSSrcReg).addImm(0)); 7498 7499 // insert a bcc with opposite CC to ARMCC::MI at the end of BB 7500 BuildMI(BB, dl, 7501 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB) 7502 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR); 7503 7504 // insert rsbri in RSBBB 7505 // Note: BCC and rsbri will be converted into predicated rsbmi 7506 // by if-conversion pass 7507 BuildMI(*RSBBB, RSBBB->begin(), dl, 7508 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg) 7509 .addReg(ABSSrcReg, RegState::Kill) 7510 .addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0); 7511 7512 // insert PHI in SinkBB, 7513 // reuse ABSDstReg to not change uses of ABS instruction 7514 BuildMI(*SinkBB, SinkBB->begin(), dl, 7515 TII->get(ARM::PHI), ABSDstReg) 7516 .addReg(NewRsbDstReg).addMBB(RSBBB) 7517 .addReg(ABSSrcReg).addMBB(BB); 7518 7519 // remove ABS instruction 7520 MI->eraseFromParent(); 7521 7522 // return last added BB 7523 return SinkBB; 7524 } 7525 case ARM::COPY_STRUCT_BYVAL_I32: 7526 ++NumLoopByVals; 7527 return EmitStructByval(MI, BB); 7528 } 7529} 7530 7531void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI, 7532 SDNode *Node) const { 7533 if (!MI->hasPostISelHook()) { 7534 assert(!convertAddSubFlagsOpcode(MI->getOpcode()) && 7535 "Pseudo flag-setting opcodes must be marked with 'hasPostISelHook'"); 7536 return; 7537 } 7538 7539 const MCInstrDesc *MCID = &MI->getDesc(); 7540 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB, 7541 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional 7542 // operand is still set to noreg. If needed, set the optional operand's 7543 // register to CPSR, and remove the redundant implicit def. 7544 // 7545 // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>). 7546 7547 // Rename pseudo opcodes. 7548 unsigned NewOpc = convertAddSubFlagsOpcode(MI->getOpcode()); 7549 if (NewOpc) { 7550 const ARMBaseInstrInfo *TII = 7551 static_cast<const ARMBaseInstrInfo*>(getTargetMachine().getInstrInfo()); 7552 MCID = &TII->get(NewOpc); 7553 7554 assert(MCID->getNumOperands() == MI->getDesc().getNumOperands() + 1 && 7555 "converted opcode should be the same except for cc_out"); 7556 7557 MI->setDesc(*MCID); 7558 7559 // Add the optional cc_out operand 7560 MI->addOperand(MachineOperand::CreateReg(0, /*isDef=*/true)); 7561 } 7562 unsigned ccOutIdx = MCID->getNumOperands() - 1; 7563 7564 // Any ARM instruction that sets the 's' bit should specify an optional 7565 // "cc_out" operand in the last operand position. 7566 if (!MI->hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { 7567 assert(!NewOpc && "Optional cc_out operand required"); 7568 return; 7569 } 7570 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it 7571 // since we already have an optional CPSR def. 7572 bool definesCPSR = false; 7573 bool deadCPSR = false; 7574 for (unsigned i = MCID->getNumOperands(), e = MI->getNumOperands(); 7575 i != e; ++i) { 7576 const MachineOperand &MO = MI->getOperand(i); 7577 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) { 7578 definesCPSR = true; 7579 if (MO.isDead()) 7580 deadCPSR = true; 7581 MI->RemoveOperand(i); 7582 break; 7583 } 7584 } 7585 if (!definesCPSR) { 7586 assert(!NewOpc && "Optional cc_out operand required"); 7587 return; 7588 } 7589 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag"); 7590 if (deadCPSR) { 7591 assert(!MI->getOperand(ccOutIdx).getReg() && 7592 "expect uninitialized optional cc_out operand"); 7593 return; 7594 } 7595 7596 // If this instruction was defined with an optional CPSR def and its dag node 7597 // had a live implicit CPSR def, then activate the optional CPSR def. 7598 MachineOperand &MO = MI->getOperand(ccOutIdx); 7599 MO.setReg(ARM::CPSR); 7600 MO.setIsDef(true); 7601} 7602 7603//===----------------------------------------------------------------------===// 7604// ARM Optimization Hooks 7605//===----------------------------------------------------------------------===// 7606 7607// Helper function that checks if N is a null or all ones constant. 7608static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { 7609 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N); 7610 if (!C) 7611 return false; 7612 return AllOnes ? C->isAllOnesValue() : C->isNullValue(); 7613} 7614 7615// Return true if N is conditionally 0 or all ones. 7616// Detects these expressions where cc is an i1 value: 7617// 7618// (select cc 0, y) [AllOnes=0] 7619// (select cc y, 0) [AllOnes=0] 7620// (zext cc) [AllOnes=0] 7621// (sext cc) [AllOnes=0/1] 7622// (select cc -1, y) [AllOnes=1] 7623// (select cc y, -1) [AllOnes=1] 7624// 7625// Invert is set when N is the null/all ones constant when CC is false. 7626// OtherOp is set to the alternative value of N. 7627static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, 7628 SDValue &CC, bool &Invert, 7629 SDValue &OtherOp, 7630 SelectionDAG &DAG) { 7631 switch (N->getOpcode()) { 7632 default: return false; 7633 case ISD::SELECT: { 7634 CC = N->getOperand(0); 7635 SDValue N1 = N->getOperand(1); 7636 SDValue N2 = N->getOperand(2); 7637 if (isZeroOrAllOnes(N1, AllOnes)) { 7638 Invert = false; 7639 OtherOp = N2; 7640 return true; 7641 } 7642 if (isZeroOrAllOnes(N2, AllOnes)) { 7643 Invert = true; 7644 OtherOp = N1; 7645 return true; 7646 } 7647 return false; 7648 } 7649 case ISD::ZERO_EXTEND: 7650 // (zext cc) can never be the all ones value. 7651 if (AllOnes) 7652 return false; 7653 // Fall through. 7654 case ISD::SIGN_EXTEND: { 7655 EVT VT = N->getValueType(0); 7656 CC = N->getOperand(0); 7657 if (CC.getValueType() != MVT::i1) 7658 return false; 7659 Invert = !AllOnes; 7660 if (AllOnes) 7661 // When looking for an AllOnes constant, N is an sext, and the 'other' 7662 // value is 0. 7663 OtherOp = DAG.getConstant(0, VT); 7664 else if (N->getOpcode() == ISD::ZERO_EXTEND) 7665 // When looking for a 0 constant, N can be zext or sext. 7666 OtherOp = DAG.getConstant(1, VT); 7667 else 7668 OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT); 7669 return true; 7670 } 7671 } 7672} 7673 7674// Combine a constant select operand into its use: 7675// 7676// (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 7677// (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 7678// (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] 7679// (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 7680// (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 7681// 7682// The transform is rejected if the select doesn't have a constant operand that 7683// is null, or all ones when AllOnes is set. 7684// 7685// Also recognize sext/zext from i1: 7686// 7687// (add (zext cc), x) -> (select cc (add x, 1), x) 7688// (add (sext cc), x) -> (select cc (add x, -1), x) 7689// 7690// These transformations eventually create predicated instructions. 7691// 7692// @param N The node to transform. 7693// @param Slct The N operand that is a select. 7694// @param OtherOp The other N operand (x above). 7695// @param DCI Context. 7696// @param AllOnes Require the select constant to be all ones instead of null. 7697// @returns The new node, or SDValue() on failure. 7698static 7699SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 7700 TargetLowering::DAGCombinerInfo &DCI, 7701 bool AllOnes = false) { 7702 SelectionDAG &DAG = DCI.DAG; 7703 EVT VT = N->getValueType(0); 7704 SDValue NonConstantVal; 7705 SDValue CCOp; 7706 bool SwapSelectOps; 7707 if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps, 7708 NonConstantVal, DAG)) 7709 return SDValue(); 7710 7711 // Slct is now know to be the desired identity constant when CC is true. 7712 SDValue TrueVal = OtherOp; 7713 SDValue FalseVal = DAG.getNode(N->getOpcode(), N->getDebugLoc(), VT, 7714 OtherOp, NonConstantVal); 7715 // Unless SwapSelectOps says CC should be false. 7716 if (SwapSelectOps) 7717 std::swap(TrueVal, FalseVal); 7718 7719 return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, 7720 CCOp, TrueVal, FalseVal); 7721} 7722 7723// Attempt combineSelectAndUse on each operand of a commutative operator N. 7724static 7725SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, 7726 TargetLowering::DAGCombinerInfo &DCI) { 7727 SDValue N0 = N->getOperand(0); 7728 SDValue N1 = N->getOperand(1); 7729 if (N0.getNode()->hasOneUse()) { 7730 SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes); 7731 if (Result.getNode()) 7732 return Result; 7733 } 7734 if (N1.getNode()->hasOneUse()) { 7735 SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes); 7736 if (Result.getNode()) 7737 return Result; 7738 } 7739 return SDValue(); 7740} 7741 7742// AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction 7743// (only after legalization). 7744static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, 7745 TargetLowering::DAGCombinerInfo &DCI, 7746 const ARMSubtarget *Subtarget) { 7747 7748 // Only perform optimization if after legalize, and if NEON is available. We 7749 // also expected both operands to be BUILD_VECTORs. 7750 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON() 7751 || N0.getOpcode() != ISD::BUILD_VECTOR 7752 || N1.getOpcode() != ISD::BUILD_VECTOR) 7753 return SDValue(); 7754 7755 // Check output type since VPADDL operand elements can only be 8, 16, or 32. 7756 EVT VT = N->getValueType(0); 7757 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64) 7758 return SDValue(); 7759 7760 // Check that the vector operands are of the right form. 7761 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR 7762 // operands, where N is the size of the formed vector. 7763 // Each EXTRACT_VECTOR should have the same input vector and odd or even 7764 // index such that we have a pair wise add pattern. 7765 7766 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing. 7767 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 7768 return SDValue(); 7769 SDValue Vec = N0->getOperand(0)->getOperand(0); 7770 SDNode *V = Vec.getNode(); 7771 unsigned nextIndex = 0; 7772 7773 // For each operands to the ADD which are BUILD_VECTORs, 7774 // check to see if each of their operands are an EXTRACT_VECTOR with 7775 // the same vector and appropriate index. 7776 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) { 7777 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT 7778 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 7779 7780 SDValue ExtVec0 = N0->getOperand(i); 7781 SDValue ExtVec1 = N1->getOperand(i); 7782 7783 // First operand is the vector, verify its the same. 7784 if (V != ExtVec0->getOperand(0).getNode() || 7785 V != ExtVec1->getOperand(0).getNode()) 7786 return SDValue(); 7787 7788 // Second is the constant, verify its correct. 7789 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1)); 7790 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1)); 7791 7792 // For the constant, we want to see all the even or all the odd. 7793 if (!C0 || !C1 || C0->getZExtValue() != nextIndex 7794 || C1->getZExtValue() != nextIndex+1) 7795 return SDValue(); 7796 7797 // Increment index. 7798 nextIndex+=2; 7799 } else 7800 return SDValue(); 7801 } 7802 7803 // Create VPADDL node. 7804 SelectionDAG &DAG = DCI.DAG; 7805 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 7806 7807 // Build operand list. 7808 SmallVector<SDValue, 8> Ops; 7809 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, 7810 TLI.getPointerTy())); 7811 7812 // Input is the vector. 7813 Ops.push_back(Vec); 7814 7815 // Get widened type and narrowed type. 7816 MVT widenType; 7817 unsigned numElem = VT.getVectorNumElements(); 7818 switch (VT.getVectorElementType().getSimpleVT().SimpleTy) { 7819 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; 7820 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break; 7821 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break; 7822 default: 7823 llvm_unreachable("Invalid vector element type for padd optimization."); 7824 } 7825 7826 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 7827 widenType, &Ops[0], Ops.size()); 7828 return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, tmp); 7829} 7830 7831static SDValue findMUL_LOHI(SDValue V) { 7832 if (V->getOpcode() == ISD::UMUL_LOHI || 7833 V->getOpcode() == ISD::SMUL_LOHI) 7834 return V; 7835 return SDValue(); 7836} 7837 7838static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, 7839 TargetLowering::DAGCombinerInfo &DCI, 7840 const ARMSubtarget *Subtarget) { 7841 7842 if (Subtarget->isThumb1Only()) return SDValue(); 7843 7844 // Only perform the checks after legalize when the pattern is available. 7845 if (DCI.isBeforeLegalize()) return SDValue(); 7846 7847 // Look for multiply add opportunities. 7848 // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where 7849 // each add nodes consumes a value from ISD::UMUL_LOHI and there is 7850 // a glue link from the first add to the second add. 7851 // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by 7852 // a S/UMLAL instruction. 7853 // loAdd UMUL_LOHI 7854 // \ / :lo \ :hi 7855 // \ / \ [no multiline comment] 7856 // ADDC | hiAdd 7857 // \ :glue / / 7858 // \ / / 7859 // ADDE 7860 // 7861 assert(AddcNode->getOpcode() == ISD::ADDC && "Expect an ADDC"); 7862 SDValue AddcOp0 = AddcNode->getOperand(0); 7863 SDValue AddcOp1 = AddcNode->getOperand(1); 7864 7865 // Check if the two operands are from the same mul_lohi node. 7866 if (AddcOp0.getNode() == AddcOp1.getNode()) 7867 return SDValue(); 7868 7869 assert(AddcNode->getNumValues() == 2 && 7870 AddcNode->getValueType(0) == MVT::i32 && 7871 AddcNode->getValueType(1) == MVT::Glue && 7872 "Expect ADDC with two result values: i32, glue"); 7873 7874 // Check that the ADDC adds the low result of the S/UMUL_LOHI. 7875 if (AddcOp0->getOpcode() != ISD::UMUL_LOHI && 7876 AddcOp0->getOpcode() != ISD::SMUL_LOHI && 7877 AddcOp1->getOpcode() != ISD::UMUL_LOHI && 7878 AddcOp1->getOpcode() != ISD::SMUL_LOHI) 7879 return SDValue(); 7880 7881 // Look for the glued ADDE. 7882 SDNode* AddeNode = AddcNode->getGluedUser(); 7883 if (AddeNode == NULL) 7884 return SDValue(); 7885 7886 // Make sure it is really an ADDE. 7887 if (AddeNode->getOpcode() != ISD::ADDE) 7888 return SDValue(); 7889 7890 assert(AddeNode->getNumOperands() == 3 && 7891 AddeNode->getOperand(2).getValueType() == MVT::Glue && 7892 "ADDE node has the wrong inputs"); 7893 7894 // Check for the triangle shape. 7895 SDValue AddeOp0 = AddeNode->getOperand(0); 7896 SDValue AddeOp1 = AddeNode->getOperand(1); 7897 7898 // Make sure that the ADDE operands are not coming from the same node. 7899 if (AddeOp0.getNode() == AddeOp1.getNode()) 7900 return SDValue(); 7901 7902 // Find the MUL_LOHI node walking up ADDE's operands. 7903 bool IsLeftOperandMUL = false; 7904 SDValue MULOp = findMUL_LOHI(AddeOp0); 7905 if (MULOp == SDValue()) 7906 MULOp = findMUL_LOHI(AddeOp1); 7907 else 7908 IsLeftOperandMUL = true; 7909 if (MULOp == SDValue()) 7910 return SDValue(); 7911 7912 // Figure out the right opcode. 7913 unsigned Opc = MULOp->getOpcode(); 7914 unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; 7915 7916 // Figure out the high and low input values to the MLAL node. 7917 SDValue* HiMul = &MULOp; 7918 SDValue* HiAdd = NULL; 7919 SDValue* LoMul = NULL; 7920 SDValue* LowAdd = NULL; 7921 7922 if (IsLeftOperandMUL) 7923 HiAdd = &AddeOp1; 7924 else 7925 HiAdd = &AddeOp0; 7926 7927 7928 if (AddcOp0->getOpcode() == Opc) { 7929 LoMul = &AddcOp0; 7930 LowAdd = &AddcOp1; 7931 } 7932 if (AddcOp1->getOpcode() == Opc) { 7933 LoMul = &AddcOp1; 7934 LowAdd = &AddcOp0; 7935 } 7936 7937 if (LoMul == NULL) 7938 return SDValue(); 7939 7940 if (LoMul->getNode() != HiMul->getNode()) 7941 return SDValue(); 7942 7943 // Create the merged node. 7944 SelectionDAG &DAG = DCI.DAG; 7945 7946 // Build operand list. 7947 SmallVector<SDValue, 8> Ops; 7948 Ops.push_back(LoMul->getOperand(0)); 7949 Ops.push_back(LoMul->getOperand(1)); 7950 Ops.push_back(*LowAdd); 7951 Ops.push_back(*HiAdd); 7952 7953 SDValue MLALNode = DAG.getNode(FinalOpc, AddcNode->getDebugLoc(), 7954 DAG.getVTList(MVT::i32, MVT::i32), 7955 &Ops[0], Ops.size()); 7956 7957 // Replace the ADDs' nodes uses by the MLA node's values. 7958 SDValue HiMLALResult(MLALNode.getNode(), 1); 7959 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult); 7960 7961 SDValue LoMLALResult(MLALNode.getNode(), 0); 7962 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult); 7963 7964 // Return original node to notify the driver to stop replacing. 7965 SDValue resNode(AddcNode, 0); 7966 return resNode; 7967} 7968 7969/// PerformADDCCombine - Target-specific dag combine transform from 7970/// ISD::ADDC, ISD::ADDE, and ISD::MUL_LOHI to MLAL. 7971static SDValue PerformADDCCombine(SDNode *N, 7972 TargetLowering::DAGCombinerInfo &DCI, 7973 const ARMSubtarget *Subtarget) { 7974 7975 return AddCombineTo64bitMLAL(N, DCI, Subtarget); 7976 7977} 7978 7979/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with 7980/// operands N0 and N1. This is a helper for PerformADDCombine that is 7981/// called with the default operands, and if that fails, with commuted 7982/// operands. 7983static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, 7984 TargetLowering::DAGCombinerInfo &DCI, 7985 const ARMSubtarget *Subtarget){ 7986 7987 // Attempt to create vpaddl for this add. 7988 SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget); 7989 if (Result.getNode()) 7990 return Result; 7991 7992 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) 7993 if (N0.getNode()->hasOneUse()) { 7994 SDValue Result = combineSelectAndUse(N, N0, N1, DCI); 7995 if (Result.getNode()) return Result; 7996 } 7997 return SDValue(); 7998} 7999 8000/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD. 8001/// 8002static SDValue PerformADDCombine(SDNode *N, 8003 TargetLowering::DAGCombinerInfo &DCI, 8004 const ARMSubtarget *Subtarget) { 8005 SDValue N0 = N->getOperand(0); 8006 SDValue N1 = N->getOperand(1); 8007 8008 // First try with the default operand order. 8009 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget); 8010 if (Result.getNode()) 8011 return Result; 8012 8013 // If that didn't work, try again with the operands commuted. 8014 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget); 8015} 8016 8017/// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB. 8018/// 8019static SDValue PerformSUBCombine(SDNode *N, 8020 TargetLowering::DAGCombinerInfo &DCI) { 8021 SDValue N0 = N->getOperand(0); 8022 SDValue N1 = N->getOperand(1); 8023 8024 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) 8025 if (N1.getNode()->hasOneUse()) { 8026 SDValue Result = combineSelectAndUse(N, N1, N0, DCI); 8027 if (Result.getNode()) return Result; 8028 } 8029 8030 return SDValue(); 8031} 8032 8033/// PerformVMULCombine 8034/// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the 8035/// special multiplier accumulator forwarding. 8036/// vmul d3, d0, d2 8037/// vmla d3, d1, d2 8038/// is faster than 8039/// vadd d3, d0, d1 8040/// vmul d3, d3, d2 8041static SDValue PerformVMULCombine(SDNode *N, 8042 TargetLowering::DAGCombinerInfo &DCI, 8043 const ARMSubtarget *Subtarget) { 8044 if (!Subtarget->hasVMLxForwarding()) 8045 return SDValue(); 8046 8047 SelectionDAG &DAG = DCI.DAG; 8048 SDValue N0 = N->getOperand(0); 8049 SDValue N1 = N->getOperand(1); 8050 unsigned Opcode = N0.getOpcode(); 8051 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 8052 Opcode != ISD::FADD && Opcode != ISD::FSUB) { 8053 Opcode = N1.getOpcode(); 8054 if (Opcode != ISD::ADD && Opcode != ISD::SUB && 8055 Opcode != ISD::FADD && Opcode != ISD::FSUB) 8056 return SDValue(); 8057 std::swap(N0, N1); 8058 } 8059 8060 EVT VT = N->getValueType(0); 8061 DebugLoc DL = N->getDebugLoc(); 8062 SDValue N00 = N0->getOperand(0); 8063 SDValue N01 = N0->getOperand(1); 8064 return DAG.getNode(Opcode, DL, VT, 8065 DAG.getNode(ISD::MUL, DL, VT, N00, N1), 8066 DAG.getNode(ISD::MUL, DL, VT, N01, N1)); 8067} 8068 8069static SDValue PerformMULCombine(SDNode *N, 8070 TargetLowering::DAGCombinerInfo &DCI, 8071 const ARMSubtarget *Subtarget) { 8072 SelectionDAG &DAG = DCI.DAG; 8073 8074 if (Subtarget->isThumb1Only()) 8075 return SDValue(); 8076 8077 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 8078 return SDValue(); 8079 8080 EVT VT = N->getValueType(0); 8081 if (VT.is64BitVector() || VT.is128BitVector()) 8082 return PerformVMULCombine(N, DCI, Subtarget); 8083 if (VT != MVT::i32) 8084 return SDValue(); 8085 8086 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 8087 if (!C) 8088 return SDValue(); 8089 8090 int64_t MulAmt = C->getSExtValue(); 8091 unsigned ShiftAmt = CountTrailingZeros_64(MulAmt); 8092 8093 ShiftAmt = ShiftAmt & (32 - 1); 8094 SDValue V = N->getOperand(0); 8095 DebugLoc DL = N->getDebugLoc(); 8096 8097 SDValue Res; 8098 MulAmt >>= ShiftAmt; 8099 8100 if (MulAmt >= 0) { 8101 if (isPowerOf2_32(MulAmt - 1)) { 8102 // (mul x, 2^N + 1) => (add (shl x, N), x) 8103 Res = DAG.getNode(ISD::ADD, DL, VT, 8104 V, 8105 DAG.getNode(ISD::SHL, DL, VT, 8106 V, 8107 DAG.getConstant(Log2_32(MulAmt - 1), 8108 MVT::i32))); 8109 } else if (isPowerOf2_32(MulAmt + 1)) { 8110 // (mul x, 2^N - 1) => (sub (shl x, N), x) 8111 Res = DAG.getNode(ISD::SUB, DL, VT, 8112 DAG.getNode(ISD::SHL, DL, VT, 8113 V, 8114 DAG.getConstant(Log2_32(MulAmt + 1), 8115 MVT::i32)), 8116 V); 8117 } else 8118 return SDValue(); 8119 } else { 8120 uint64_t MulAmtAbs = -MulAmt; 8121 if (isPowerOf2_32(MulAmtAbs + 1)) { 8122 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 8123 Res = DAG.getNode(ISD::SUB, DL, VT, 8124 V, 8125 DAG.getNode(ISD::SHL, DL, VT, 8126 V, 8127 DAG.getConstant(Log2_32(MulAmtAbs + 1), 8128 MVT::i32))); 8129 } else if (isPowerOf2_32(MulAmtAbs - 1)) { 8130 // (mul x, -(2^N + 1)) => - (add (shl x, N), x) 8131 Res = DAG.getNode(ISD::ADD, DL, VT, 8132 V, 8133 DAG.getNode(ISD::SHL, DL, VT, 8134 V, 8135 DAG.getConstant(Log2_32(MulAmtAbs-1), 8136 MVT::i32))); 8137 Res = DAG.getNode(ISD::SUB, DL, VT, 8138 DAG.getConstant(0, MVT::i32),Res); 8139 8140 } else 8141 return SDValue(); 8142 } 8143 8144 if (ShiftAmt != 0) 8145 Res = DAG.getNode(ISD::SHL, DL, VT, 8146 Res, DAG.getConstant(ShiftAmt, MVT::i32)); 8147 8148 // Do not add new nodes to DAG combiner worklist. 8149 DCI.CombineTo(N, Res, false); 8150 return SDValue(); 8151} 8152 8153static SDValue PerformANDCombine(SDNode *N, 8154 TargetLowering::DAGCombinerInfo &DCI, 8155 const ARMSubtarget *Subtarget) { 8156 8157 // Attempt to use immediate-form VBIC 8158 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 8159 DebugLoc dl = N->getDebugLoc(); 8160 EVT VT = N->getValueType(0); 8161 SelectionDAG &DAG = DCI.DAG; 8162 8163 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 8164 return SDValue(); 8165 8166 APInt SplatBits, SplatUndef; 8167 unsigned SplatBitSize; 8168 bool HasAnyUndefs; 8169 if (BVN && 8170 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 8171 if (SplatBitSize <= 64) { 8172 EVT VbicVT; 8173 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), 8174 SplatUndef.getZExtValue(), SplatBitSize, 8175 DAG, VbicVT, VT.is128BitVector(), 8176 OtherModImm); 8177 if (Val.getNode()) { 8178 SDValue Input = 8179 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0)); 8180 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); 8181 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic); 8182 } 8183 } 8184 } 8185 8186 if (!Subtarget->isThumb1Only()) { 8187 // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) 8188 SDValue Result = combineSelectAndUseCommutative(N, true, DCI); 8189 if (Result.getNode()) 8190 return Result; 8191 } 8192 8193 return SDValue(); 8194} 8195 8196/// PerformORCombine - Target-specific dag combine xforms for ISD::OR 8197static SDValue PerformORCombine(SDNode *N, 8198 TargetLowering::DAGCombinerInfo &DCI, 8199 const ARMSubtarget *Subtarget) { 8200 // Attempt to use immediate-form VORR 8201 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 8202 DebugLoc dl = N->getDebugLoc(); 8203 EVT VT = N->getValueType(0); 8204 SelectionDAG &DAG = DCI.DAG; 8205 8206 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 8207 return SDValue(); 8208 8209 APInt SplatBits, SplatUndef; 8210 unsigned SplatBitSize; 8211 bool HasAnyUndefs; 8212 if (BVN && Subtarget->hasNEON() && 8213 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 8214 if (SplatBitSize <= 64) { 8215 EVT VorrVT; 8216 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(), 8217 SplatUndef.getZExtValue(), SplatBitSize, 8218 DAG, VorrVT, VT.is128BitVector(), 8219 OtherModImm); 8220 if (Val.getNode()) { 8221 SDValue Input = 8222 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0)); 8223 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); 8224 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr); 8225 } 8226 } 8227 } 8228 8229 if (!Subtarget->isThumb1Only()) { 8230 // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) 8231 SDValue Result = combineSelectAndUseCommutative(N, false, DCI); 8232 if (Result.getNode()) 8233 return Result; 8234 } 8235 8236 // The code below optimizes (or (and X, Y), Z). 8237 // The AND operand needs to have a single user to make these optimizations 8238 // profitable. 8239 SDValue N0 = N->getOperand(0); 8240 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 8241 return SDValue(); 8242 SDValue N1 = N->getOperand(1); 8243 8244 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant. 8245 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() && 8246 DAG.getTargetLoweringInfo().isTypeLegal(VT)) { 8247 APInt SplatUndef; 8248 unsigned SplatBitSize; 8249 bool HasAnyUndefs; 8250 8251 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1)); 8252 APInt SplatBits0; 8253 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, 8254 HasAnyUndefs) && !HasAnyUndefs) { 8255 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1)); 8256 APInt SplatBits1; 8257 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize, 8258 HasAnyUndefs) && !HasAnyUndefs && 8259 SplatBits0 == ~SplatBits1) { 8260 // Canonicalize the vector type to make instruction selection simpler. 8261 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; 8262 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT, 8263 N0->getOperand(1), N0->getOperand(0), 8264 N1->getOperand(0)); 8265 return DAG.getNode(ISD::BITCAST, dl, VT, Result); 8266 } 8267 } 8268 } 8269 8270 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when 8271 // reasonable. 8272 8273 // BFI is only available on V6T2+ 8274 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops()) 8275 return SDValue(); 8276 8277 DebugLoc DL = N->getDebugLoc(); 8278 // 1) or (and A, mask), val => ARMbfi A, val, mask 8279 // iff (val & mask) == val 8280 // 8281 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 8282 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2) 8283 // && mask == ~mask2 8284 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2) 8285 // && ~mask == mask2 8286 // (i.e., copy a bitfield value into another bitfield of the same width) 8287 8288 if (VT != MVT::i32) 8289 return SDValue(); 8290 8291 SDValue N00 = N0.getOperand(0); 8292 8293 // The value and the mask need to be constants so we can verify this is 8294 // actually a bitfield set. If the mask is 0xffff, we can do better 8295 // via a movt instruction, so don't use BFI in that case. 8296 SDValue MaskOp = N0.getOperand(1); 8297 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp); 8298 if (!MaskC) 8299 return SDValue(); 8300 unsigned Mask = MaskC->getZExtValue(); 8301 if (Mask == 0xffff) 8302 return SDValue(); 8303 SDValue Res; 8304 // Case (1): or (and A, mask), val => ARMbfi A, val, mask 8305 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 8306 if (N1C) { 8307 unsigned Val = N1C->getZExtValue(); 8308 if ((Val & ~Mask) != Val) 8309 return SDValue(); 8310 8311 if (ARM::isBitFieldInvertedMask(Mask)) { 8312 Val >>= CountTrailingZeros_32(~Mask); 8313 8314 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, 8315 DAG.getConstant(Val, MVT::i32), 8316 DAG.getConstant(Mask, MVT::i32)); 8317 8318 // Do not add new nodes to DAG combiner worklist. 8319 DCI.CombineTo(N, Res, false); 8320 return SDValue(); 8321 } 8322 } else if (N1.getOpcode() == ISD::AND) { 8323 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask 8324 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 8325 if (!N11C) 8326 return SDValue(); 8327 unsigned Mask2 = N11C->getZExtValue(); 8328 8329 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern 8330 // as is to match. 8331 if (ARM::isBitFieldInvertedMask(Mask) && 8332 (Mask == ~Mask2)) { 8333 // The pack halfword instruction works better for masks that fit it, 8334 // so use that when it's available. 8335 if (Subtarget->hasT2ExtractPack() && 8336 (Mask == 0xffff || Mask == 0xffff0000)) 8337 return SDValue(); 8338 // 2a 8339 unsigned amt = CountTrailingZeros_32(Mask2); 8340 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0), 8341 DAG.getConstant(amt, MVT::i32)); 8342 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res, 8343 DAG.getConstant(Mask, MVT::i32)); 8344 // Do not add new nodes to DAG combiner worklist. 8345 DCI.CombineTo(N, Res, false); 8346 return SDValue(); 8347 } else if (ARM::isBitFieldInvertedMask(~Mask) && 8348 (~Mask == Mask2)) { 8349 // The pack halfword instruction works better for masks that fit it, 8350 // so use that when it's available. 8351 if (Subtarget->hasT2ExtractPack() && 8352 (Mask2 == 0xffff || Mask2 == 0xffff0000)) 8353 return SDValue(); 8354 // 2b 8355 unsigned lsb = CountTrailingZeros_32(Mask); 8356 Res = DAG.getNode(ISD::SRL, DL, VT, N00, 8357 DAG.getConstant(lsb, MVT::i32)); 8358 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res, 8359 DAG.getConstant(Mask2, MVT::i32)); 8360 // Do not add new nodes to DAG combiner worklist. 8361 DCI.CombineTo(N, Res, false); 8362 return SDValue(); 8363 } 8364 } 8365 8366 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) && 8367 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) && 8368 ARM::isBitFieldInvertedMask(~Mask)) { 8369 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask 8370 // where lsb(mask) == #shamt and masked bits of B are known zero. 8371 SDValue ShAmt = N00.getOperand(1); 8372 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 8373 unsigned LSB = CountTrailingZeros_32(Mask); 8374 if (ShAmtC != LSB) 8375 return SDValue(); 8376 8377 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0), 8378 DAG.getConstant(~Mask, MVT::i32)); 8379 8380 // Do not add new nodes to DAG combiner worklist. 8381 DCI.CombineTo(N, Res, false); 8382 } 8383 8384 return SDValue(); 8385} 8386 8387static SDValue PerformXORCombine(SDNode *N, 8388 TargetLowering::DAGCombinerInfo &DCI, 8389 const ARMSubtarget *Subtarget) { 8390 EVT VT = N->getValueType(0); 8391 SelectionDAG &DAG = DCI.DAG; 8392 8393 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 8394 return SDValue(); 8395 8396 if (!Subtarget->isThumb1Only()) { 8397 // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) 8398 SDValue Result = combineSelectAndUseCommutative(N, false, DCI); 8399 if (Result.getNode()) 8400 return Result; 8401 } 8402 8403 return SDValue(); 8404} 8405 8406/// PerformBFICombine - (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff 8407/// the bits being cleared by the AND are not demanded by the BFI. 8408static SDValue PerformBFICombine(SDNode *N, 8409 TargetLowering::DAGCombinerInfo &DCI) { 8410 SDValue N1 = N->getOperand(1); 8411 if (N1.getOpcode() == ISD::AND) { 8412 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 8413 if (!N11C) 8414 return SDValue(); 8415 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 8416 unsigned LSB = CountTrailingZeros_32(~InvMask); 8417 unsigned Width = (32 - CountLeadingZeros_32(~InvMask)) - LSB; 8418 unsigned Mask = (1 << Width)-1; 8419 unsigned Mask2 = N11C->getZExtValue(); 8420 if ((Mask & (~Mask2)) == 0) 8421 return DCI.DAG.getNode(ARMISD::BFI, N->getDebugLoc(), N->getValueType(0), 8422 N->getOperand(0), N1.getOperand(0), 8423 N->getOperand(2)); 8424 } 8425 return SDValue(); 8426} 8427 8428/// PerformVMOVRRDCombine - Target-specific dag combine xforms for 8429/// ARMISD::VMOVRRD. 8430static SDValue PerformVMOVRRDCombine(SDNode *N, 8431 TargetLowering::DAGCombinerInfo &DCI) { 8432 // vmovrrd(vmovdrr x, y) -> x,y 8433 SDValue InDouble = N->getOperand(0); 8434 if (InDouble.getOpcode() == ARMISD::VMOVDRR) 8435 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); 8436 8437 // vmovrrd(load f64) -> (load i32), (load i32) 8438 SDNode *InNode = InDouble.getNode(); 8439 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() && 8440 InNode->getValueType(0) == MVT::f64 && 8441 InNode->getOperand(1).getOpcode() == ISD::FrameIndex && 8442 !cast<LoadSDNode>(InNode)->isVolatile()) { 8443 // TODO: Should this be done for non-FrameIndex operands? 8444 LoadSDNode *LD = cast<LoadSDNode>(InNode); 8445 8446 SelectionDAG &DAG = DCI.DAG; 8447 DebugLoc DL = LD->getDebugLoc(); 8448 SDValue BasePtr = LD->getBasePtr(); 8449 SDValue NewLD1 = DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, 8450 LD->getPointerInfo(), LD->isVolatile(), 8451 LD->isNonTemporal(), LD->isInvariant(), 8452 LD->getAlignment()); 8453 8454 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 8455 DAG.getConstant(4, MVT::i32)); 8456 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, 8457 LD->getPointerInfo(), LD->isVolatile(), 8458 LD->isNonTemporal(), LD->isInvariant(), 8459 std::min(4U, LD->getAlignment() / 2)); 8460 8461 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1)); 8462 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2); 8463 DCI.RemoveFromWorklist(LD); 8464 DAG.DeleteNode(LD); 8465 return Result; 8466 } 8467 8468 return SDValue(); 8469} 8470 8471/// PerformVMOVDRRCombine - Target-specific dag combine xforms for 8472/// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands. 8473static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { 8474 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) 8475 SDValue Op0 = N->getOperand(0); 8476 SDValue Op1 = N->getOperand(1); 8477 if (Op0.getOpcode() == ISD::BITCAST) 8478 Op0 = Op0.getOperand(0); 8479 if (Op1.getOpcode() == ISD::BITCAST) 8480 Op1 = Op1.getOperand(0); 8481 if (Op0.getOpcode() == ARMISD::VMOVRRD && 8482 Op0.getNode() == Op1.getNode() && 8483 Op0.getResNo() == 0 && Op1.getResNo() == 1) 8484 return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), 8485 N->getValueType(0), Op0.getOperand(0)); 8486 return SDValue(); 8487} 8488 8489/// PerformSTORECombine - Target-specific dag combine xforms for 8490/// ISD::STORE. 8491static SDValue PerformSTORECombine(SDNode *N, 8492 TargetLowering::DAGCombinerInfo &DCI) { 8493 StoreSDNode *St = cast<StoreSDNode>(N); 8494 if (St->isVolatile()) 8495 return SDValue(); 8496 8497 // Optimize trunc store (of multiple scalars) to shuffle and store. First, 8498 // pack all of the elements in one place. Next, store to memory in fewer 8499 // chunks. 8500 SDValue StVal = St->getValue(); 8501 EVT VT = StVal.getValueType(); 8502 if (St->isTruncatingStore() && VT.isVector()) { 8503 SelectionDAG &DAG = DCI.DAG; 8504 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8505 EVT StVT = St->getMemoryVT(); 8506 unsigned NumElems = VT.getVectorNumElements(); 8507 assert(StVT != VT && "Cannot truncate to the same type"); 8508 unsigned FromEltSz = VT.getVectorElementType().getSizeInBits(); 8509 unsigned ToEltSz = StVT.getVectorElementType().getSizeInBits(); 8510 8511 // From, To sizes and ElemCount must be pow of two 8512 if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue(); 8513 8514 // We are going to use the original vector elt for storing. 8515 // Accumulated smaller vector elements must be a multiple of the store size. 8516 if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue(); 8517 8518 unsigned SizeRatio = FromEltSz / ToEltSz; 8519 assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits()); 8520 8521 // Create a type on which we perform the shuffle. 8522 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(), 8523 NumElems*SizeRatio); 8524 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits()); 8525 8526 DebugLoc DL = St->getDebugLoc(); 8527 SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal); 8528 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); 8529 for (unsigned i = 0; i < NumElems; ++i) ShuffleVec[i] = i * SizeRatio; 8530 8531 // Can't shuffle using an illegal type. 8532 if (!TLI.isTypeLegal(WideVecVT)) return SDValue(); 8533 8534 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec, 8535 DAG.getUNDEF(WideVec.getValueType()), 8536 ShuffleVec.data()); 8537 // At this point all of the data is stored at the bottom of the 8538 // register. We now need to save it to mem. 8539 8540 // Find the largest store unit 8541 MVT StoreType = MVT::i8; 8542 for (unsigned tp = MVT::FIRST_INTEGER_VALUETYPE; 8543 tp < MVT::LAST_INTEGER_VALUETYPE; ++tp) { 8544 MVT Tp = (MVT::SimpleValueType)tp; 8545 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz) 8546 StoreType = Tp; 8547 } 8548 // Didn't find a legal store type. 8549 if (!TLI.isTypeLegal(StoreType)) 8550 return SDValue(); 8551 8552 // Bitcast the original vector into a vector of store-size units 8553 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), 8554 StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits()); 8555 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits()); 8556 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff); 8557 SmallVector<SDValue, 8> Chains; 8558 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8, 8559 TLI.getPointerTy()); 8560 SDValue BasePtr = St->getBasePtr(); 8561 8562 // Perform one or more big stores into memory. 8563 unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits(); 8564 for (unsigned I = 0; I < E; I++) { 8565 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, 8566 StoreType, ShuffWide, 8567 DAG.getIntPtrConstant(I)); 8568 SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr, 8569 St->getPointerInfo(), St->isVolatile(), 8570 St->isNonTemporal(), St->getAlignment()); 8571 BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, 8572 Increment); 8573 Chains.push_back(Ch); 8574 } 8575 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &Chains[0], 8576 Chains.size()); 8577 } 8578 8579 if (!ISD::isNormalStore(St)) 8580 return SDValue(); 8581 8582 // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and 8583 // ARM stores of arguments in the same cache line. 8584 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR && 8585 StVal.getNode()->hasOneUse()) { 8586 SelectionDAG &DAG = DCI.DAG; 8587 DebugLoc DL = St->getDebugLoc(); 8588 SDValue BasePtr = St->getBasePtr(); 8589 SDValue NewST1 = DAG.getStore(St->getChain(), DL, 8590 StVal.getNode()->getOperand(0), BasePtr, 8591 St->getPointerInfo(), St->isVolatile(), 8592 St->isNonTemporal(), St->getAlignment()); 8593 8594 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, 8595 DAG.getConstant(4, MVT::i32)); 8596 return DAG.getStore(NewST1.getValue(0), DL, StVal.getNode()->getOperand(1), 8597 OffsetPtr, St->getPointerInfo(), St->isVolatile(), 8598 St->isNonTemporal(), 8599 std::min(4U, St->getAlignment() / 2)); 8600 } 8601 8602 if (StVal.getValueType() != MVT::i64 || 8603 StVal.getNode()->getOpcode() != ISD::EXTRACT_VECTOR_ELT) 8604 return SDValue(); 8605 8606 // Bitcast an i64 store extracted from a vector to f64. 8607 // Otherwise, the i64 value will be legalized to a pair of i32 values. 8608 SelectionDAG &DAG = DCI.DAG; 8609 DebugLoc dl = StVal.getDebugLoc(); 8610 SDValue IntVec = StVal.getOperand(0); 8611 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 8612 IntVec.getValueType().getVectorNumElements()); 8613 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec); 8614 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, 8615 Vec, StVal.getOperand(1)); 8616 dl = N->getDebugLoc(); 8617 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt); 8618 // Make the DAGCombiner fold the bitcasts. 8619 DCI.AddToWorklist(Vec.getNode()); 8620 DCI.AddToWorklist(ExtElt.getNode()); 8621 DCI.AddToWorklist(V.getNode()); 8622 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(), 8623 St->getPointerInfo(), St->isVolatile(), 8624 St->isNonTemporal(), St->getAlignment(), 8625 St->getTBAAInfo()); 8626} 8627 8628/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node 8629/// are normal, non-volatile loads. If so, it is profitable to bitcast an 8630/// i64 vector to have f64 elements, since the value can then be loaded 8631/// directly into a VFP register. 8632static bool hasNormalLoadOperand(SDNode *N) { 8633 unsigned NumElts = N->getValueType(0).getVectorNumElements(); 8634 for (unsigned i = 0; i < NumElts; ++i) { 8635 SDNode *Elt = N->getOperand(i).getNode(); 8636 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile()) 8637 return true; 8638 } 8639 return false; 8640} 8641 8642/// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for 8643/// ISD::BUILD_VECTOR. 8644static SDValue PerformBUILD_VECTORCombine(SDNode *N, 8645 TargetLowering::DAGCombinerInfo &DCI){ 8646 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X): 8647 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value 8648 // into a pair of GPRs, which is fine when the value is used as a scalar, 8649 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD. 8650 SelectionDAG &DAG = DCI.DAG; 8651 if (N->getNumOperands() == 2) { 8652 SDValue RV = PerformVMOVDRRCombine(N, DAG); 8653 if (RV.getNode()) 8654 return RV; 8655 } 8656 8657 // Load i64 elements as f64 values so that type legalization does not split 8658 // them up into i32 values. 8659 EVT VT = N->getValueType(0); 8660 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N)) 8661 return SDValue(); 8662 DebugLoc dl = N->getDebugLoc(); 8663 SmallVector<SDValue, 8> Ops; 8664 unsigned NumElts = VT.getVectorNumElements(); 8665 for (unsigned i = 0; i < NumElts; ++i) { 8666 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i)); 8667 Ops.push_back(V); 8668 // Make the DAGCombiner fold the bitcast. 8669 DCI.AddToWorklist(V.getNode()); 8670 } 8671 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts); 8672 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, FloatVT, Ops.data(), NumElts); 8673 return DAG.getNode(ISD::BITCAST, dl, VT, BV); 8674} 8675 8676/// PerformInsertEltCombine - Target-specific dag combine xforms for 8677/// ISD::INSERT_VECTOR_ELT. 8678static SDValue PerformInsertEltCombine(SDNode *N, 8679 TargetLowering::DAGCombinerInfo &DCI) { 8680 // Bitcast an i64 load inserted into a vector to f64. 8681 // Otherwise, the i64 value will be legalized to a pair of i32 values. 8682 EVT VT = N->getValueType(0); 8683 SDNode *Elt = N->getOperand(1).getNode(); 8684 if (VT.getVectorElementType() != MVT::i64 || 8685 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile()) 8686 return SDValue(); 8687 8688 SelectionDAG &DAG = DCI.DAG; 8689 DebugLoc dl = N->getDebugLoc(); 8690 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, 8691 VT.getVectorNumElements()); 8692 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0)); 8693 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1)); 8694 // Make the DAGCombiner fold the bitcasts. 8695 DCI.AddToWorklist(Vec.getNode()); 8696 DCI.AddToWorklist(V.getNode()); 8697 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT, 8698 Vec, V, N->getOperand(2)); 8699 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt); 8700} 8701 8702/// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for 8703/// ISD::VECTOR_SHUFFLE. 8704static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { 8705 // The LLVM shufflevector instruction does not require the shuffle mask 8706 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does 8707 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the 8708 // operands do not match the mask length, they are extended by concatenating 8709 // them with undef vectors. That is probably the right thing for other 8710 // targets, but for NEON it is better to concatenate two double-register 8711 // size vector operands into a single quad-register size vector. Do that 8712 // transformation here: 8713 // shuffle(concat(v1, undef), concat(v2, undef)) -> 8714 // shuffle(concat(v1, v2), undef) 8715 SDValue Op0 = N->getOperand(0); 8716 SDValue Op1 = N->getOperand(1); 8717 if (Op0.getOpcode() != ISD::CONCAT_VECTORS || 8718 Op1.getOpcode() != ISD::CONCAT_VECTORS || 8719 Op0.getNumOperands() != 2 || 8720 Op1.getNumOperands() != 2) 8721 return SDValue(); 8722 SDValue Concat0Op1 = Op0.getOperand(1); 8723 SDValue Concat1Op1 = Op1.getOperand(1); 8724 if (Concat0Op1.getOpcode() != ISD::UNDEF || 8725 Concat1Op1.getOpcode() != ISD::UNDEF) 8726 return SDValue(); 8727 // Skip the transformation if any of the types are illegal. 8728 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8729 EVT VT = N->getValueType(0); 8730 if (!TLI.isTypeLegal(VT) || 8731 !TLI.isTypeLegal(Concat0Op1.getValueType()) || 8732 !TLI.isTypeLegal(Concat1Op1.getValueType())) 8733 return SDValue(); 8734 8735 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, 8736 Op0.getOperand(0), Op1.getOperand(0)); 8737 // Translate the shuffle mask. 8738 SmallVector<int, 16> NewMask; 8739 unsigned NumElts = VT.getVectorNumElements(); 8740 unsigned HalfElts = NumElts/2; 8741 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 8742 for (unsigned n = 0; n < NumElts; ++n) { 8743 int MaskElt = SVN->getMaskElt(n); 8744 int NewElt = -1; 8745 if (MaskElt < (int)HalfElts) 8746 NewElt = MaskElt; 8747 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts)) 8748 NewElt = HalfElts + MaskElt - NumElts; 8749 NewMask.push_back(NewElt); 8750 } 8751 return DAG.getVectorShuffle(VT, N->getDebugLoc(), NewConcat, 8752 DAG.getUNDEF(VT), NewMask.data()); 8753} 8754 8755/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and 8756/// NEON load/store intrinsics to merge base address updates. 8757static SDValue CombineBaseUpdate(SDNode *N, 8758 TargetLowering::DAGCombinerInfo &DCI) { 8759 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 8760 return SDValue(); 8761 8762 SelectionDAG &DAG = DCI.DAG; 8763 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID || 8764 N->getOpcode() == ISD::INTRINSIC_W_CHAIN); 8765 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1); 8766 SDValue Addr = N->getOperand(AddrOpIdx); 8767 8768 // Search for a use of the address operand that is an increment. 8769 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 8770 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 8771 SDNode *User = *UI; 8772 if (User->getOpcode() != ISD::ADD || 8773 UI.getUse().getResNo() != Addr.getResNo()) 8774 continue; 8775 8776 // Check that the add is independent of the load/store. Otherwise, folding 8777 // it would create a cycle. 8778 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 8779 continue; 8780 8781 // Find the new opcode for the updating load/store. 8782 bool isLoad = true; 8783 bool isLaneOp = false; 8784 unsigned NewOpc = 0; 8785 unsigned NumVecs = 0; 8786 if (isIntrinsic) { 8787 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 8788 switch (IntNo) { 8789 default: llvm_unreachable("unexpected intrinsic for Neon base update"); 8790 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD; 8791 NumVecs = 1; break; 8792 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD; 8793 NumVecs = 2; break; 8794 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD; 8795 NumVecs = 3; break; 8796 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD; 8797 NumVecs = 4; break; 8798 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD; 8799 NumVecs = 2; isLaneOp = true; break; 8800 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD; 8801 NumVecs = 3; isLaneOp = true; break; 8802 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD; 8803 NumVecs = 4; isLaneOp = true; break; 8804 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD; 8805 NumVecs = 1; isLoad = false; break; 8806 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD; 8807 NumVecs = 2; isLoad = false; break; 8808 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD; 8809 NumVecs = 3; isLoad = false; break; 8810 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD; 8811 NumVecs = 4; isLoad = false; break; 8812 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD; 8813 NumVecs = 2; isLoad = false; isLaneOp = true; break; 8814 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD; 8815 NumVecs = 3; isLoad = false; isLaneOp = true; break; 8816 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD; 8817 NumVecs = 4; isLoad = false; isLaneOp = true; break; 8818 } 8819 } else { 8820 isLaneOp = true; 8821 switch (N->getOpcode()) { 8822 default: llvm_unreachable("unexpected opcode for Neon base update"); 8823 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break; 8824 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break; 8825 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break; 8826 } 8827 } 8828 8829 // Find the size of memory referenced by the load/store. 8830 EVT VecTy; 8831 if (isLoad) 8832 VecTy = N->getValueType(0); 8833 else 8834 VecTy = N->getOperand(AddrOpIdx+1).getValueType(); 8835 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 8836 if (isLaneOp) 8837 NumBytes /= VecTy.getVectorNumElements(); 8838 8839 // If the increment is a constant, it must match the memory ref size. 8840 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 8841 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 8842 uint64_t IncVal = CInc->getZExtValue(); 8843 if (IncVal != NumBytes) 8844 continue; 8845 } else if (NumBytes >= 3 * 16) { 8846 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two 8847 // separate instructions that make it harder to use a non-constant update. 8848 continue; 8849 } 8850 8851 // Create the new updating load/store node. 8852 EVT Tys[6]; 8853 unsigned NumResultVecs = (isLoad ? NumVecs : 0); 8854 unsigned n; 8855 for (n = 0; n < NumResultVecs; ++n) 8856 Tys[n] = VecTy; 8857 Tys[n++] = MVT::i32; 8858 Tys[n] = MVT::Other; 8859 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs+2); 8860 SmallVector<SDValue, 8> Ops; 8861 Ops.push_back(N->getOperand(0)); // incoming chain 8862 Ops.push_back(N->getOperand(AddrOpIdx)); 8863 Ops.push_back(Inc); 8864 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) { 8865 Ops.push_back(N->getOperand(i)); 8866 } 8867 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 8868 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, N->getDebugLoc(), SDTys, 8869 Ops.data(), Ops.size(), 8870 MemInt->getMemoryVT(), 8871 MemInt->getMemOperand()); 8872 8873 // Update the uses. 8874 std::vector<SDValue> NewResults; 8875 for (unsigned i = 0; i < NumResultVecs; ++i) { 8876 NewResults.push_back(SDValue(UpdN.getNode(), i)); 8877 } 8878 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain 8879 DCI.CombineTo(N, NewResults); 8880 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 8881 8882 break; 8883 } 8884 return SDValue(); 8885} 8886 8887/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a 8888/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic 8889/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and 8890/// return true. 8891static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { 8892 SelectionDAG &DAG = DCI.DAG; 8893 EVT VT = N->getValueType(0); 8894 // vldN-dup instructions only support 64-bit vectors for N > 1. 8895 if (!VT.is64BitVector()) 8896 return false; 8897 8898 // Check if the VDUPLANE operand is a vldN-dup intrinsic. 8899 SDNode *VLD = N->getOperand(0).getNode(); 8900 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN) 8901 return false; 8902 unsigned NumVecs = 0; 8903 unsigned NewOpc = 0; 8904 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue(); 8905 if (IntNo == Intrinsic::arm_neon_vld2lane) { 8906 NumVecs = 2; 8907 NewOpc = ARMISD::VLD2DUP; 8908 } else if (IntNo == Intrinsic::arm_neon_vld3lane) { 8909 NumVecs = 3; 8910 NewOpc = ARMISD::VLD3DUP; 8911 } else if (IntNo == Intrinsic::arm_neon_vld4lane) { 8912 NumVecs = 4; 8913 NewOpc = ARMISD::VLD4DUP; 8914 } else { 8915 return false; 8916 } 8917 8918 // First check that all the vldN-lane uses are VDUPLANEs and that the lane 8919 // numbers match the load. 8920 unsigned VLDLaneNo = 8921 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue(); 8922 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 8923 UI != UE; ++UI) { 8924 // Ignore uses of the chain result. 8925 if (UI.getUse().getResNo() == NumVecs) 8926 continue; 8927 SDNode *User = *UI; 8928 if (User->getOpcode() != ARMISD::VDUPLANE || 8929 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue()) 8930 return false; 8931 } 8932 8933 // Create the vldN-dup node. 8934 EVT Tys[5]; 8935 unsigned n; 8936 for (n = 0; n < NumVecs; ++n) 8937 Tys[n] = VT; 8938 Tys[n] = MVT::Other; 8939 SDVTList SDTys = DAG.getVTList(Tys, NumVecs+1); 8940 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; 8941 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD); 8942 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, VLD->getDebugLoc(), SDTys, 8943 Ops, 2, VLDMemInt->getMemoryVT(), 8944 VLDMemInt->getMemOperand()); 8945 8946 // Update the uses. 8947 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end(); 8948 UI != UE; ++UI) { 8949 unsigned ResNo = UI.getUse().getResNo(); 8950 // Ignore uses of the chain result. 8951 if (ResNo == NumVecs) 8952 continue; 8953 SDNode *User = *UI; 8954 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo)); 8955 } 8956 8957 // Now the vldN-lane intrinsic is dead except for its chain result. 8958 // Update uses of the chain. 8959 std::vector<SDValue> VLDDupResults; 8960 for (unsigned n = 0; n < NumVecs; ++n) 8961 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n)); 8962 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs)); 8963 DCI.CombineTo(VLD, VLDDupResults); 8964 8965 return true; 8966} 8967 8968/// PerformVDUPLANECombine - Target-specific dag combine xforms for 8969/// ARMISD::VDUPLANE. 8970static SDValue PerformVDUPLANECombine(SDNode *N, 8971 TargetLowering::DAGCombinerInfo &DCI) { 8972 SDValue Op = N->getOperand(0); 8973 8974 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses 8975 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation. 8976 if (CombineVLDDUP(N, DCI)) 8977 return SDValue(N, 0); 8978 8979 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is 8980 // redundant. Ignore bit_converts for now; element sizes are checked below. 8981 while (Op.getOpcode() == ISD::BITCAST) 8982 Op = Op.getOperand(0); 8983 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) 8984 return SDValue(); 8985 8986 // Make sure the VMOV element size is not bigger than the VDUPLANE elements. 8987 unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits(); 8988 // The canonical VMOV for a zero vector uses a 32-bit element size. 8989 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 8990 unsigned EltBits; 8991 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0) 8992 EltSize = 8; 8993 EVT VT = N->getValueType(0); 8994 if (EltSize > VT.getVectorElementType().getSizeInBits()) 8995 return SDValue(); 8996 8997 return DCI.DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op); 8998} 8999 9000// isConstVecPow2 - Return true if each vector element is a power of 2, all 9001// elements are the same constant, C, and Log2(C) ranges from 1 to 32. 9002static bool isConstVecPow2(SDValue ConstVec, bool isSigned, uint64_t &C) 9003{ 9004 integerPart cN; 9005 integerPart c0 = 0; 9006 for (unsigned I = 0, E = ConstVec.getValueType().getVectorNumElements(); 9007 I != E; I++) { 9008 ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(ConstVec.getOperand(I)); 9009 if (!C) 9010 return false; 9011 9012 bool isExact; 9013 APFloat APF = C->getValueAPF(); 9014 if (APF.convertToInteger(&cN, 64, isSigned, APFloat::rmTowardZero, &isExact) 9015 != APFloat::opOK || !isExact) 9016 return false; 9017 9018 c0 = (I == 0) ? cN : c0; 9019 if (!isPowerOf2_64(cN) || c0 != cN || Log2_64(c0) < 1 || Log2_64(c0) > 32) 9020 return false; 9021 } 9022 C = c0; 9023 return true; 9024} 9025 9026/// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) 9027/// can replace combinations of VMUL and VCVT (floating-point to integer) 9028/// when the VMUL has a constant operand that is a power of 2. 9029/// 9030/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 9031/// vmul.f32 d16, d17, d16 9032/// vcvt.s32.f32 d16, d16 9033/// becomes: 9034/// vcvt.s32.f32 d16, d16, #3 9035static SDValue PerformVCVTCombine(SDNode *N, 9036 TargetLowering::DAGCombinerInfo &DCI, 9037 const ARMSubtarget *Subtarget) { 9038 SelectionDAG &DAG = DCI.DAG; 9039 SDValue Op = N->getOperand(0); 9040 9041 if (!Subtarget->hasNEON() || !Op.getValueType().isVector() || 9042 Op.getOpcode() != ISD::FMUL) 9043 return SDValue(); 9044 9045 uint64_t C; 9046 SDValue N0 = Op->getOperand(0); 9047 SDValue ConstVec = Op->getOperand(1); 9048 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT; 9049 9050 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 9051 !isConstVecPow2(ConstVec, isSigned, C)) 9052 return SDValue(); 9053 9054 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs : 9055 Intrinsic::arm_neon_vcvtfp2fxu; 9056 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 9057 N->getValueType(0), 9058 DAG.getConstant(IntrinsicOpcode, MVT::i32), N0, 9059 DAG.getConstant(Log2_64(C), MVT::i32)); 9060} 9061 9062/// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) 9063/// can replace combinations of VCVT (integer to floating-point) and VDIV 9064/// when the VDIV has a constant operand that is a power of 2. 9065/// 9066/// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>): 9067/// vcvt.f32.s32 d16, d16 9068/// vdiv.f32 d16, d17, d16 9069/// becomes: 9070/// vcvt.f32.s32 d16, d16, #3 9071static SDValue PerformVDIVCombine(SDNode *N, 9072 TargetLowering::DAGCombinerInfo &DCI, 9073 const ARMSubtarget *Subtarget) { 9074 SelectionDAG &DAG = DCI.DAG; 9075 SDValue Op = N->getOperand(0); 9076 unsigned OpOpcode = Op.getNode()->getOpcode(); 9077 9078 if (!Subtarget->hasNEON() || !N->getValueType(0).isVector() || 9079 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP)) 9080 return SDValue(); 9081 9082 uint64_t C; 9083 SDValue ConstVec = N->getOperand(1); 9084 bool isSigned = OpOpcode == ISD::SINT_TO_FP; 9085 9086 if (ConstVec.getOpcode() != ISD::BUILD_VECTOR || 9087 !isConstVecPow2(ConstVec, isSigned, C)) 9088 return SDValue(); 9089 9090 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp : 9091 Intrinsic::arm_neon_vcvtfxu2fp; 9092 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, N->getDebugLoc(), 9093 Op.getValueType(), 9094 DAG.getConstant(IntrinsicOpcode, MVT::i32), 9095 Op.getOperand(0), DAG.getConstant(Log2_64(C), MVT::i32)); 9096} 9097 9098/// Getvshiftimm - Check if this is a valid build_vector for the immediate 9099/// operand of a vector shift operation, where all the elements of the 9100/// build_vector must have the same constant integer value. 9101static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 9102 // Ignore bit_converts. 9103 while (Op.getOpcode() == ISD::BITCAST) 9104 Op = Op.getOperand(0); 9105 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 9106 APInt SplatBits, SplatUndef; 9107 unsigned SplatBitSize; 9108 bool HasAnyUndefs; 9109 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 9110 HasAnyUndefs, ElementBits) || 9111 SplatBitSize > ElementBits) 9112 return false; 9113 Cnt = SplatBits.getSExtValue(); 9114 return true; 9115} 9116 9117/// isVShiftLImm - Check if this is a valid build_vector for the immediate 9118/// operand of a vector shift left operation. That value must be in the range: 9119/// 0 <= Value < ElementBits for a left shift; or 9120/// 0 <= Value <= ElementBits for a long left shift. 9121static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 9122 assert(VT.isVector() && "vector shift count is not a vector type"); 9123 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 9124 if (! getVShiftImm(Op, ElementBits, Cnt)) 9125 return false; 9126 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits); 9127} 9128 9129/// isVShiftRImm - Check if this is a valid build_vector for the immediate 9130/// operand of a vector shift right operation. For a shift opcode, the value 9131/// is positive, but for an intrinsic the value count must be negative. The 9132/// absolute value must be in the range: 9133/// 1 <= |Value| <= ElementBits for a right shift; or 9134/// 1 <= |Value| <= ElementBits/2 for a narrow right shift. 9135static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, 9136 int64_t &Cnt) { 9137 assert(VT.isVector() && "vector shift count is not a vector type"); 9138 unsigned ElementBits = VT.getVectorElementType().getSizeInBits(); 9139 if (! getVShiftImm(Op, ElementBits, Cnt)) 9140 return false; 9141 if (isIntrinsic) 9142 Cnt = -Cnt; 9143 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits)); 9144} 9145 9146/// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics. 9147static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) { 9148 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 9149 switch (IntNo) { 9150 default: 9151 // Don't do anything for most intrinsics. 9152 break; 9153 9154 // Vector shifts: check for immediate versions and lower them. 9155 // Note: This is done during DAG combining instead of DAG legalizing because 9156 // the build_vectors for 64-bit vector element shift counts are generally 9157 // not legal, and it is hard to see their values after they get legalized to 9158 // loads from a constant pool. 9159 case Intrinsic::arm_neon_vshifts: 9160 case Intrinsic::arm_neon_vshiftu: 9161 case Intrinsic::arm_neon_vshiftls: 9162 case Intrinsic::arm_neon_vshiftlu: 9163 case Intrinsic::arm_neon_vshiftn: 9164 case Intrinsic::arm_neon_vrshifts: 9165 case Intrinsic::arm_neon_vrshiftu: 9166 case Intrinsic::arm_neon_vrshiftn: 9167 case Intrinsic::arm_neon_vqshifts: 9168 case Intrinsic::arm_neon_vqshiftu: 9169 case Intrinsic::arm_neon_vqshiftsu: 9170 case Intrinsic::arm_neon_vqshiftns: 9171 case Intrinsic::arm_neon_vqshiftnu: 9172 case Intrinsic::arm_neon_vqshiftnsu: 9173 case Intrinsic::arm_neon_vqrshiftns: 9174 case Intrinsic::arm_neon_vqrshiftnu: 9175 case Intrinsic::arm_neon_vqrshiftnsu: { 9176 EVT VT = N->getOperand(1).getValueType(); 9177 int64_t Cnt; 9178 unsigned VShiftOpc = 0; 9179 9180 switch (IntNo) { 9181 case Intrinsic::arm_neon_vshifts: 9182 case Intrinsic::arm_neon_vshiftu: 9183 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) { 9184 VShiftOpc = ARMISD::VSHL; 9185 break; 9186 } 9187 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) { 9188 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? 9189 ARMISD::VSHRs : ARMISD::VSHRu); 9190 break; 9191 } 9192 return SDValue(); 9193 9194 case Intrinsic::arm_neon_vshiftls: 9195 case Intrinsic::arm_neon_vshiftlu: 9196 if (isVShiftLImm(N->getOperand(2), VT, true, Cnt)) 9197 break; 9198 llvm_unreachable("invalid shift count for vshll intrinsic"); 9199 9200 case Intrinsic::arm_neon_vrshifts: 9201 case Intrinsic::arm_neon_vrshiftu: 9202 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) 9203 break; 9204 return SDValue(); 9205 9206 case Intrinsic::arm_neon_vqshifts: 9207 case Intrinsic::arm_neon_vqshiftu: 9208 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 9209 break; 9210 return SDValue(); 9211 9212 case Intrinsic::arm_neon_vqshiftsu: 9213 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) 9214 break; 9215 llvm_unreachable("invalid shift count for vqshlu intrinsic"); 9216 9217 case Intrinsic::arm_neon_vshiftn: 9218 case Intrinsic::arm_neon_vrshiftn: 9219 case Intrinsic::arm_neon_vqshiftns: 9220 case Intrinsic::arm_neon_vqshiftnu: 9221 case Intrinsic::arm_neon_vqshiftnsu: 9222 case Intrinsic::arm_neon_vqrshiftns: 9223 case Intrinsic::arm_neon_vqrshiftnu: 9224 case Intrinsic::arm_neon_vqrshiftnsu: 9225 // Narrowing shifts require an immediate right shift. 9226 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt)) 9227 break; 9228 llvm_unreachable("invalid shift count for narrowing vector shift " 9229 "intrinsic"); 9230 9231 default: 9232 llvm_unreachable("unhandled vector shift"); 9233 } 9234 9235 switch (IntNo) { 9236 case Intrinsic::arm_neon_vshifts: 9237 case Intrinsic::arm_neon_vshiftu: 9238 // Opcode already set above. 9239 break; 9240 case Intrinsic::arm_neon_vshiftls: 9241 case Intrinsic::arm_neon_vshiftlu: 9242 if (Cnt == VT.getVectorElementType().getSizeInBits()) 9243 VShiftOpc = ARMISD::VSHLLi; 9244 else 9245 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshiftls ? 9246 ARMISD::VSHLLs : ARMISD::VSHLLu); 9247 break; 9248 case Intrinsic::arm_neon_vshiftn: 9249 VShiftOpc = ARMISD::VSHRN; break; 9250 case Intrinsic::arm_neon_vrshifts: 9251 VShiftOpc = ARMISD::VRSHRs; break; 9252 case Intrinsic::arm_neon_vrshiftu: 9253 VShiftOpc = ARMISD::VRSHRu; break; 9254 case Intrinsic::arm_neon_vrshiftn: 9255 VShiftOpc = ARMISD::VRSHRN; break; 9256 case Intrinsic::arm_neon_vqshifts: 9257 VShiftOpc = ARMISD::VQSHLs; break; 9258 case Intrinsic::arm_neon_vqshiftu: 9259 VShiftOpc = ARMISD::VQSHLu; break; 9260 case Intrinsic::arm_neon_vqshiftsu: 9261 VShiftOpc = ARMISD::VQSHLsu; break; 9262 case Intrinsic::arm_neon_vqshiftns: 9263 VShiftOpc = ARMISD::VQSHRNs; break; 9264 case Intrinsic::arm_neon_vqshiftnu: 9265 VShiftOpc = ARMISD::VQSHRNu; break; 9266 case Intrinsic::arm_neon_vqshiftnsu: 9267 VShiftOpc = ARMISD::VQSHRNsu; break; 9268 case Intrinsic::arm_neon_vqrshiftns: 9269 VShiftOpc = ARMISD::VQRSHRNs; break; 9270 case Intrinsic::arm_neon_vqrshiftnu: 9271 VShiftOpc = ARMISD::VQRSHRNu; break; 9272 case Intrinsic::arm_neon_vqrshiftnsu: 9273 VShiftOpc = ARMISD::VQRSHRNsu; break; 9274 } 9275 9276 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 9277 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32)); 9278 } 9279 9280 case Intrinsic::arm_neon_vshiftins: { 9281 EVT VT = N->getOperand(1).getValueType(); 9282 int64_t Cnt; 9283 unsigned VShiftOpc = 0; 9284 9285 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt)) 9286 VShiftOpc = ARMISD::VSLI; 9287 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt)) 9288 VShiftOpc = ARMISD::VSRI; 9289 else { 9290 llvm_unreachable("invalid shift count for vsli/vsri intrinsic"); 9291 } 9292 9293 return DAG.getNode(VShiftOpc, N->getDebugLoc(), N->getValueType(0), 9294 N->getOperand(1), N->getOperand(2), 9295 DAG.getConstant(Cnt, MVT::i32)); 9296 } 9297 9298 case Intrinsic::arm_neon_vqrshifts: 9299 case Intrinsic::arm_neon_vqrshiftu: 9300 // No immediate versions of these to check for. 9301 break; 9302 } 9303 9304 return SDValue(); 9305} 9306 9307/// PerformShiftCombine - Checks for immediate versions of vector shifts and 9308/// lowers them. As with the vector shift intrinsics, this is done during DAG 9309/// combining instead of DAG legalizing because the build_vectors for 64-bit 9310/// vector element shift counts are generally not legal, and it is hard to see 9311/// their values after they get legalized to loads from a constant pool. 9312static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, 9313 const ARMSubtarget *ST) { 9314 EVT VT = N->getValueType(0); 9315 if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) { 9316 // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high 9317 // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16. 9318 SDValue N1 = N->getOperand(1); 9319 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 9320 SDValue N0 = N->getOperand(0); 9321 if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP && 9322 DAG.MaskedValueIsZero(N0.getOperand(0), 9323 APInt::getHighBitsSet(32, 16))) 9324 return DAG.getNode(ISD::ROTR, N->getDebugLoc(), VT, N0, N1); 9325 } 9326 } 9327 9328 // Nothing to be done for scalar shifts. 9329 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9330 if (!VT.isVector() || !TLI.isTypeLegal(VT)) 9331 return SDValue(); 9332 9333 assert(ST->hasNEON() && "unexpected vector shift"); 9334 int64_t Cnt; 9335 9336 switch (N->getOpcode()) { 9337 default: llvm_unreachable("unexpected shift opcode"); 9338 9339 case ISD::SHL: 9340 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) 9341 return DAG.getNode(ARMISD::VSHL, N->getDebugLoc(), VT, N->getOperand(0), 9342 DAG.getConstant(Cnt, MVT::i32)); 9343 break; 9344 9345 case ISD::SRA: 9346 case ISD::SRL: 9347 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) { 9348 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ? 9349 ARMISD::VSHRs : ARMISD::VSHRu); 9350 return DAG.getNode(VShiftOpc, N->getDebugLoc(), VT, N->getOperand(0), 9351 DAG.getConstant(Cnt, MVT::i32)); 9352 } 9353 } 9354 return SDValue(); 9355} 9356 9357/// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, 9358/// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND. 9359static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, 9360 const ARMSubtarget *ST) { 9361 SDValue N0 = N->getOperand(0); 9362 9363 // Check for sign- and zero-extensions of vector extract operations of 8- 9364 // and 16-bit vector elements. NEON supports these directly. They are 9365 // handled during DAG combining because type legalization will promote them 9366 // to 32-bit types and it is messy to recognize the operations after that. 9367 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 9368 SDValue Vec = N0.getOperand(0); 9369 SDValue Lane = N0.getOperand(1); 9370 EVT VT = N->getValueType(0); 9371 EVT EltVT = N0.getValueType(); 9372 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9373 9374 if (VT == MVT::i32 && 9375 (EltVT == MVT::i8 || EltVT == MVT::i16) && 9376 TLI.isTypeLegal(Vec.getValueType()) && 9377 isa<ConstantSDNode>(Lane)) { 9378 9379 unsigned Opc = 0; 9380 switch (N->getOpcode()) { 9381 default: llvm_unreachable("unexpected opcode"); 9382 case ISD::SIGN_EXTEND: 9383 Opc = ARMISD::VGETLANEs; 9384 break; 9385 case ISD::ZERO_EXTEND: 9386 case ISD::ANY_EXTEND: 9387 Opc = ARMISD::VGETLANEu; 9388 break; 9389 } 9390 return DAG.getNode(Opc, N->getDebugLoc(), VT, Vec, Lane); 9391 } 9392 } 9393 9394 return SDValue(); 9395} 9396 9397/// PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC 9398/// to match f32 max/min patterns to use NEON vmax/vmin instructions. 9399static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, 9400 const ARMSubtarget *ST) { 9401 // If the target supports NEON, try to use vmax/vmin instructions for f32 9402 // selects like "x < y ? x : y". Unless the NoNaNsFPMath option is set, 9403 // be careful about NaNs: NEON's vmax/vmin return NaN if either operand is 9404 // a NaN; only do the transformation when it matches that behavior. 9405 9406 // For now only do this when using NEON for FP operations; if using VFP, it 9407 // is not obvious that the benefit outweighs the cost of switching to the 9408 // NEON pipeline. 9409 if (!ST->hasNEON() || !ST->useNEONForSinglePrecisionFP() || 9410 N->getValueType(0) != MVT::f32) 9411 return SDValue(); 9412 9413 SDValue CondLHS = N->getOperand(0); 9414 SDValue CondRHS = N->getOperand(1); 9415 SDValue LHS = N->getOperand(2); 9416 SDValue RHS = N->getOperand(3); 9417 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 9418 9419 unsigned Opcode = 0; 9420 bool IsReversed; 9421 if (DAG.isEqualTo(LHS, CondLHS) && DAG.isEqualTo(RHS, CondRHS)) { 9422 IsReversed = false; // x CC y ? x : y 9423 } else if (DAG.isEqualTo(LHS, CondRHS) && DAG.isEqualTo(RHS, CondLHS)) { 9424 IsReversed = true ; // x CC y ? y : x 9425 } else { 9426 return SDValue(); 9427 } 9428 9429 bool IsUnordered; 9430 switch (CC) { 9431 default: break; 9432 case ISD::SETOLT: 9433 case ISD::SETOLE: 9434 case ISD::SETLT: 9435 case ISD::SETLE: 9436 case ISD::SETULT: 9437 case ISD::SETULE: 9438 // If LHS is NaN, an ordered comparison will be false and the result will 9439 // be the RHS, but vmin(NaN, RHS) = NaN. Avoid this by checking that LHS 9440 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 9441 IsUnordered = (CC == ISD::SETULT || CC == ISD::SETULE); 9442 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 9443 break; 9444 // For less-than-or-equal comparisons, "+0 <= -0" will be true but vmin 9445 // will return -0, so vmin can only be used for unsafe math or if one of 9446 // the operands is known to be nonzero. 9447 if ((CC == ISD::SETLE || CC == ISD::SETOLE || CC == ISD::SETULE) && 9448 !DAG.getTarget().Options.UnsafeFPMath && 9449 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 9450 break; 9451 Opcode = IsReversed ? ARMISD::FMAX : ARMISD::FMIN; 9452 break; 9453 9454 case ISD::SETOGT: 9455 case ISD::SETOGE: 9456 case ISD::SETGT: 9457 case ISD::SETGE: 9458 case ISD::SETUGT: 9459 case ISD::SETUGE: 9460 // If LHS is NaN, an ordered comparison will be false and the result will 9461 // be the RHS, but vmax(NaN, RHS) = NaN. Avoid this by checking that LHS 9462 // != NaN. Likewise, for unordered comparisons, check for RHS != NaN. 9463 IsUnordered = (CC == ISD::SETUGT || CC == ISD::SETUGE); 9464 if (!DAG.isKnownNeverNaN(IsUnordered ? RHS : LHS)) 9465 break; 9466 // For greater-than-or-equal comparisons, "-0 >= +0" will be true but vmax 9467 // will return +0, so vmax can only be used for unsafe math or if one of 9468 // the operands is known to be nonzero. 9469 if ((CC == ISD::SETGE || CC == ISD::SETOGE || CC == ISD::SETUGE) && 9470 !DAG.getTarget().Options.UnsafeFPMath && 9471 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) 9472 break; 9473 Opcode = IsReversed ? ARMISD::FMIN : ARMISD::FMAX; 9474 break; 9475 } 9476 9477 if (!Opcode) 9478 return SDValue(); 9479 return DAG.getNode(Opcode, N->getDebugLoc(), N->getValueType(0), LHS, RHS); 9480} 9481 9482/// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV. 9483SDValue 9484ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { 9485 SDValue Cmp = N->getOperand(4); 9486 if (Cmp.getOpcode() != ARMISD::CMPZ) 9487 // Only looking at EQ and NE cases. 9488 return SDValue(); 9489 9490 EVT VT = N->getValueType(0); 9491 DebugLoc dl = N->getDebugLoc(); 9492 SDValue LHS = Cmp.getOperand(0); 9493 SDValue RHS = Cmp.getOperand(1); 9494 SDValue FalseVal = N->getOperand(0); 9495 SDValue TrueVal = N->getOperand(1); 9496 SDValue ARMcc = N->getOperand(2); 9497 ARMCC::CondCodes CC = 9498 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue(); 9499 9500 // Simplify 9501 // mov r1, r0 9502 // cmp r1, x 9503 // mov r0, y 9504 // moveq r0, x 9505 // to 9506 // cmp r0, x 9507 // movne r0, y 9508 // 9509 // mov r1, r0 9510 // cmp r1, x 9511 // mov r0, x 9512 // movne r0, y 9513 // to 9514 // cmp r0, x 9515 // movne r0, y 9516 /// FIXME: Turn this into a target neutral optimization? 9517 SDValue Res; 9518 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) { 9519 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc, 9520 N->getOperand(3), Cmp); 9521 } else if (CC == ARMCC::EQ && TrueVal == RHS) { 9522 SDValue ARMcc; 9523 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl); 9524 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc, 9525 N->getOperand(3), NewCmp); 9526 } 9527 9528 if (Res.getNode()) { 9529 APInt KnownZero, KnownOne; 9530 DAG.ComputeMaskedBits(SDValue(N,0), KnownZero, KnownOne); 9531 // Capture demanded bits information that would be otherwise lost. 9532 if (KnownZero == 0xfffffffe) 9533 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 9534 DAG.getValueType(MVT::i1)); 9535 else if (KnownZero == 0xffffff00) 9536 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 9537 DAG.getValueType(MVT::i8)); 9538 else if (KnownZero == 0xffff0000) 9539 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res, 9540 DAG.getValueType(MVT::i16)); 9541 } 9542 9543 return Res; 9544} 9545 9546SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, 9547 DAGCombinerInfo &DCI) const { 9548 switch (N->getOpcode()) { 9549 default: break; 9550 case ISD::ADDC: return PerformADDCCombine(N, DCI, Subtarget); 9551 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); 9552 case ISD::SUB: return PerformSUBCombine(N, DCI); 9553 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); 9554 case ISD::OR: return PerformORCombine(N, DCI, Subtarget); 9555 case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget); 9556 case ISD::AND: return PerformANDCombine(N, DCI, Subtarget); 9557 case ARMISD::BFI: return PerformBFICombine(N, DCI); 9558 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI); 9559 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG); 9560 case ISD::STORE: return PerformSTORECombine(N, DCI); 9561 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI); 9562 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI); 9563 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG); 9564 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI); 9565 case ISD::FP_TO_SINT: 9566 case ISD::FP_TO_UINT: return PerformVCVTCombine(N, DCI, Subtarget); 9567 case ISD::FDIV: return PerformVDIVCombine(N, DCI, Subtarget); 9568 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG); 9569 case ISD::SHL: 9570 case ISD::SRA: 9571 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget); 9572 case ISD::SIGN_EXTEND: 9573 case ISD::ZERO_EXTEND: 9574 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget); 9575 case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget); 9576 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG); 9577 case ARMISD::VLD2DUP: 9578 case ARMISD::VLD3DUP: 9579 case ARMISD::VLD4DUP: 9580 return CombineBaseUpdate(N, DCI); 9581 case ISD::INTRINSIC_VOID: 9582 case ISD::INTRINSIC_W_CHAIN: 9583 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 9584 case Intrinsic::arm_neon_vld1: 9585 case Intrinsic::arm_neon_vld2: 9586 case Intrinsic::arm_neon_vld3: 9587 case Intrinsic::arm_neon_vld4: 9588 case Intrinsic::arm_neon_vld2lane: 9589 case Intrinsic::arm_neon_vld3lane: 9590 case Intrinsic::arm_neon_vld4lane: 9591 case Intrinsic::arm_neon_vst1: 9592 case Intrinsic::arm_neon_vst2: 9593 case Intrinsic::arm_neon_vst3: 9594 case Intrinsic::arm_neon_vst4: 9595 case Intrinsic::arm_neon_vst2lane: 9596 case Intrinsic::arm_neon_vst3lane: 9597 case Intrinsic::arm_neon_vst4lane: 9598 return CombineBaseUpdate(N, DCI); 9599 default: break; 9600 } 9601 break; 9602 } 9603 return SDValue(); 9604} 9605 9606bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, 9607 EVT VT) const { 9608 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE); 9609} 9610 9611bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const { 9612 // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus 9613 bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); 9614 9615 switch (VT.getSimpleVT().SimpleTy) { 9616 default: 9617 return false; 9618 case MVT::i8: 9619 case MVT::i16: 9620 case MVT::i32: { 9621 // Unaligned access can use (for example) LRDB, LRDH, LDR 9622 if (AllowsUnaligned) { 9623 if (Fast) 9624 *Fast = Subtarget->hasV7Ops(); 9625 return true; 9626 } 9627 return false; 9628 } 9629 case MVT::f64: 9630 case MVT::v2f64: { 9631 // For any little-endian targets with neon, we can support unaligned ld/st 9632 // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. 9633 // A big-endian target may also explictly support unaligned accesses 9634 if (Subtarget->hasNEON() && (AllowsUnaligned || isLittleEndian())) { 9635 if (Fast) 9636 *Fast = true; 9637 return true; 9638 } 9639 return false; 9640 } 9641 } 9642} 9643 9644static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, 9645 unsigned AlignCheck) { 9646 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) && 9647 (DstAlign == 0 || DstAlign % AlignCheck == 0)); 9648} 9649 9650EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size, 9651 unsigned DstAlign, unsigned SrcAlign, 9652 bool IsMemset, bool ZeroMemset, 9653 bool MemcpyStrSrc, 9654 MachineFunction &MF) const { 9655 const Function *F = MF.getFunction(); 9656 9657 // See if we can use NEON instructions for this... 9658 if ((!IsMemset || ZeroMemset) && 9659 Subtarget->hasNEON() && 9660 !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 9661 Attribute::NoImplicitFloat)) { 9662 bool Fast; 9663 if (Size >= 16 && 9664 (memOpAlign(SrcAlign, DstAlign, 16) || 9665 (allowsUnalignedMemoryAccesses(MVT::v2f64, &Fast) && Fast))) { 9666 return MVT::v2f64; 9667 } else if (Size >= 8 && 9668 (memOpAlign(SrcAlign, DstAlign, 8) || 9669 (allowsUnalignedMemoryAccesses(MVT::f64, &Fast) && Fast))) { 9670 return MVT::f64; 9671 } 9672 } 9673 9674 // Lowering to i32/i16 if the size permits. 9675 if (Size >= 4) 9676 return MVT::i32; 9677 else if (Size >= 2) 9678 return MVT::i16; 9679 9680 // Let the target-independent logic figure it out. 9681 return MVT::Other; 9682} 9683 9684bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 9685 if (Val.getOpcode() != ISD::LOAD) 9686 return false; 9687 9688 EVT VT1 = Val.getValueType(); 9689 if (!VT1.isSimple() || !VT1.isInteger() || 9690 !VT2.isSimple() || !VT2.isInteger()) 9691 return false; 9692 9693 switch (VT1.getSimpleVT().SimpleTy) { 9694 default: break; 9695 case MVT::i1: 9696 case MVT::i8: 9697 case MVT::i16: 9698 // 8-bit and 16-bit loads implicitly zero-extend to 32-bits. 9699 return true; 9700 } 9701 9702 return false; 9703} 9704 9705static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { 9706 if (V < 0) 9707 return false; 9708 9709 unsigned Scale = 1; 9710 switch (VT.getSimpleVT().SimpleTy) { 9711 default: return false; 9712 case MVT::i1: 9713 case MVT::i8: 9714 // Scale == 1; 9715 break; 9716 case MVT::i16: 9717 // Scale == 2; 9718 Scale = 2; 9719 break; 9720 case MVT::i32: 9721 // Scale == 4; 9722 Scale = 4; 9723 break; 9724 } 9725 9726 if ((V & (Scale - 1)) != 0) 9727 return false; 9728 V /= Scale; 9729 return V == (V & ((1LL << 5) - 1)); 9730} 9731 9732static bool isLegalT2AddressImmediate(int64_t V, EVT VT, 9733 const ARMSubtarget *Subtarget) { 9734 bool isNeg = false; 9735 if (V < 0) { 9736 isNeg = true; 9737 V = - V; 9738 } 9739 9740 switch (VT.getSimpleVT().SimpleTy) { 9741 default: return false; 9742 case MVT::i1: 9743 case MVT::i8: 9744 case MVT::i16: 9745 case MVT::i32: 9746 // + imm12 or - imm8 9747 if (isNeg) 9748 return V == (V & ((1LL << 8) - 1)); 9749 return V == (V & ((1LL << 12) - 1)); 9750 case MVT::f32: 9751 case MVT::f64: 9752 // Same as ARM mode. FIXME: NEON? 9753 if (!Subtarget->hasVFP2()) 9754 return false; 9755 if ((V & 3) != 0) 9756 return false; 9757 V >>= 2; 9758 return V == (V & ((1LL << 8) - 1)); 9759 } 9760} 9761 9762/// isLegalAddressImmediate - Return true if the integer value can be used 9763/// as the offset of the target addressing mode for load / store of the 9764/// given type. 9765static bool isLegalAddressImmediate(int64_t V, EVT VT, 9766 const ARMSubtarget *Subtarget) { 9767 if (V == 0) 9768 return true; 9769 9770 if (!VT.isSimple()) 9771 return false; 9772 9773 if (Subtarget->isThumb1Only()) 9774 return isLegalT1AddressImmediate(V, VT); 9775 else if (Subtarget->isThumb2()) 9776 return isLegalT2AddressImmediate(V, VT, Subtarget); 9777 9778 // ARM mode. 9779 if (V < 0) 9780 V = - V; 9781 switch (VT.getSimpleVT().SimpleTy) { 9782 default: return false; 9783 case MVT::i1: 9784 case MVT::i8: 9785 case MVT::i32: 9786 // +- imm12 9787 return V == (V & ((1LL << 12) - 1)); 9788 case MVT::i16: 9789 // +- imm8 9790 return V == (V & ((1LL << 8) - 1)); 9791 case MVT::f32: 9792 case MVT::f64: 9793 if (!Subtarget->hasVFP2()) // FIXME: NEON? 9794 return false; 9795 if ((V & 3) != 0) 9796 return false; 9797 V >>= 2; 9798 return V == (V & ((1LL << 8) - 1)); 9799 } 9800} 9801 9802bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, 9803 EVT VT) const { 9804 int Scale = AM.Scale; 9805 if (Scale < 0) 9806 return false; 9807 9808 switch (VT.getSimpleVT().SimpleTy) { 9809 default: return false; 9810 case MVT::i1: 9811 case MVT::i8: 9812 case MVT::i16: 9813 case MVT::i32: 9814 if (Scale == 1) 9815 return true; 9816 // r + r << imm 9817 Scale = Scale & ~1; 9818 return Scale == 2 || Scale == 4 || Scale == 8; 9819 case MVT::i64: 9820 // r + r 9821 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 9822 return true; 9823 return false; 9824 case MVT::isVoid: 9825 // Note, we allow "void" uses (basically, uses that aren't loads or 9826 // stores), because arm allows folding a scale into many arithmetic 9827 // operations. This should be made more precise and revisited later. 9828 9829 // Allow r << imm, but the imm has to be a multiple of two. 9830 if (Scale & 1) return false; 9831 return isPowerOf2_32(Scale); 9832 } 9833} 9834 9835/// isLegalAddressingMode - Return true if the addressing mode represented 9836/// by AM is legal for this target, for a load/store of the specified type. 9837bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, 9838 Type *Ty) const { 9839 EVT VT = getValueType(Ty, true); 9840 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) 9841 return false; 9842 9843 // Can never fold addr of global into load/store. 9844 if (AM.BaseGV) 9845 return false; 9846 9847 switch (AM.Scale) { 9848 case 0: // no scale reg, must be "r+i" or "r", or "i". 9849 break; 9850 case 1: 9851 if (Subtarget->isThumb1Only()) 9852 return false; 9853 // FALL THROUGH. 9854 default: 9855 // ARM doesn't support any R+R*scale+imm addr modes. 9856 if (AM.BaseOffs) 9857 return false; 9858 9859 if (!VT.isSimple()) 9860 return false; 9861 9862 if (Subtarget->isThumb2()) 9863 return isLegalT2ScaledAddressingMode(AM, VT); 9864 9865 int Scale = AM.Scale; 9866 switch (VT.getSimpleVT().SimpleTy) { 9867 default: return false; 9868 case MVT::i1: 9869 case MVT::i8: 9870 case MVT::i32: 9871 if (Scale < 0) Scale = -Scale; 9872 if (Scale == 1) 9873 return true; 9874 // r + r << imm 9875 return isPowerOf2_32(Scale & ~1); 9876 case MVT::i16: 9877 case MVT::i64: 9878 // r + r 9879 if (((unsigned)AM.HasBaseReg + Scale) <= 2) 9880 return true; 9881 return false; 9882 9883 case MVT::isVoid: 9884 // Note, we allow "void" uses (basically, uses that aren't loads or 9885 // stores), because arm allows folding a scale into many arithmetic 9886 // operations. This should be made more precise and revisited later. 9887 9888 // Allow r << imm, but the imm has to be a multiple of two. 9889 if (Scale & 1) return false; 9890 return isPowerOf2_32(Scale); 9891 } 9892 } 9893 return true; 9894} 9895 9896/// isLegalICmpImmediate - Return true if the specified immediate is legal 9897/// icmp immediate, that is the target has icmp instructions which can compare 9898/// a register against the immediate without having to materialize the 9899/// immediate into a register. 9900bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 9901 // Thumb2 and ARM modes can use cmn for negative immediates. 9902 if (!Subtarget->isThumb()) 9903 return ARM_AM::getSOImmVal(llvm::abs64(Imm)) != -1; 9904 if (Subtarget->isThumb2()) 9905 return ARM_AM::getT2SOImmVal(llvm::abs64(Imm)) != -1; 9906 // Thumb1 doesn't have cmn, and only 8-bit immediates. 9907 return Imm >= 0 && Imm <= 255; 9908} 9909 9910/// isLegalAddImmediate - Return true if the specified immediate is a legal add 9911/// *or sub* immediate, that is the target has add or sub instructions which can 9912/// add a register with the immediate without having to materialize the 9913/// immediate into a register. 9914bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { 9915 // Same encoding for add/sub, just flip the sign. 9916 int64_t AbsImm = llvm::abs64(Imm); 9917 if (!Subtarget->isThumb()) 9918 return ARM_AM::getSOImmVal(AbsImm) != -1; 9919 if (Subtarget->isThumb2()) 9920 return ARM_AM::getT2SOImmVal(AbsImm) != -1; 9921 // Thumb1 only has 8-bit unsigned immediate. 9922 return AbsImm >= 0 && AbsImm <= 255; 9923} 9924 9925static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, 9926 bool isSEXTLoad, SDValue &Base, 9927 SDValue &Offset, bool &isInc, 9928 SelectionDAG &DAG) { 9929 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 9930 return false; 9931 9932 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { 9933 // AddressingMode 3 9934 Base = Ptr->getOperand(0); 9935 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 9936 int RHSC = (int)RHS->getZExtValue(); 9937 if (RHSC < 0 && RHSC > -256) { 9938 assert(Ptr->getOpcode() == ISD::ADD); 9939 isInc = false; 9940 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 9941 return true; 9942 } 9943 } 9944 isInc = (Ptr->getOpcode() == ISD::ADD); 9945 Offset = Ptr->getOperand(1); 9946 return true; 9947 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { 9948 // AddressingMode 2 9949 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 9950 int RHSC = (int)RHS->getZExtValue(); 9951 if (RHSC < 0 && RHSC > -0x1000) { 9952 assert(Ptr->getOpcode() == ISD::ADD); 9953 isInc = false; 9954 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 9955 Base = Ptr->getOperand(0); 9956 return true; 9957 } 9958 } 9959 9960 if (Ptr->getOpcode() == ISD::ADD) { 9961 isInc = true; 9962 ARM_AM::ShiftOpc ShOpcVal= 9963 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode()); 9964 if (ShOpcVal != ARM_AM::no_shift) { 9965 Base = Ptr->getOperand(1); 9966 Offset = Ptr->getOperand(0); 9967 } else { 9968 Base = Ptr->getOperand(0); 9969 Offset = Ptr->getOperand(1); 9970 } 9971 return true; 9972 } 9973 9974 isInc = (Ptr->getOpcode() == ISD::ADD); 9975 Base = Ptr->getOperand(0); 9976 Offset = Ptr->getOperand(1); 9977 return true; 9978 } 9979 9980 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store. 9981 return false; 9982} 9983 9984static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, 9985 bool isSEXTLoad, SDValue &Base, 9986 SDValue &Offset, bool &isInc, 9987 SelectionDAG &DAG) { 9988 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) 9989 return false; 9990 9991 Base = Ptr->getOperand(0); 9992 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) { 9993 int RHSC = (int)RHS->getZExtValue(); 9994 if (RHSC < 0 && RHSC > -0x100) { // 8 bits. 9995 assert(Ptr->getOpcode() == ISD::ADD); 9996 isInc = false; 9997 Offset = DAG.getConstant(-RHSC, RHS->getValueType(0)); 9998 return true; 9999 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero. 10000 isInc = Ptr->getOpcode() == ISD::ADD; 10001 Offset = DAG.getConstant(RHSC, RHS->getValueType(0)); 10002 return true; 10003 } 10004 } 10005 10006 return false; 10007} 10008 10009/// getPreIndexedAddressParts - returns true by value, base pointer and 10010/// offset pointer and addressing mode by reference if the node's address 10011/// can be legally represented as pre-indexed load / store address. 10012bool 10013ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 10014 SDValue &Offset, 10015 ISD::MemIndexedMode &AM, 10016 SelectionDAG &DAG) const { 10017 if (Subtarget->isThumb1Only()) 10018 return false; 10019 10020 EVT VT; 10021 SDValue Ptr; 10022 bool isSEXTLoad = false; 10023 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 10024 Ptr = LD->getBasePtr(); 10025 VT = LD->getMemoryVT(); 10026 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 10027 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 10028 Ptr = ST->getBasePtr(); 10029 VT = ST->getMemoryVT(); 10030 } else 10031 return false; 10032 10033 bool isInc; 10034 bool isLegal = false; 10035 if (Subtarget->isThumb2()) 10036 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 10037 Offset, isInc, DAG); 10038 else 10039 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base, 10040 Offset, isInc, DAG); 10041 if (!isLegal) 10042 return false; 10043 10044 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC; 10045 return true; 10046} 10047 10048/// getPostIndexedAddressParts - returns true by value, base pointer and 10049/// offset pointer and addressing mode by reference if this node can be 10050/// combined with a load / store to form a post-indexed load / store. 10051bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 10052 SDValue &Base, 10053 SDValue &Offset, 10054 ISD::MemIndexedMode &AM, 10055 SelectionDAG &DAG) const { 10056 if (Subtarget->isThumb1Only()) 10057 return false; 10058 10059 EVT VT; 10060 SDValue Ptr; 10061 bool isSEXTLoad = false; 10062 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 10063 VT = LD->getMemoryVT(); 10064 Ptr = LD->getBasePtr(); 10065 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; 10066 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 10067 VT = ST->getMemoryVT(); 10068 Ptr = ST->getBasePtr(); 10069 } else 10070 return false; 10071 10072 bool isInc; 10073 bool isLegal = false; 10074 if (Subtarget->isThumb2()) 10075 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 10076 isInc, DAG); 10077 else 10078 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset, 10079 isInc, DAG); 10080 if (!isLegal) 10081 return false; 10082 10083 if (Ptr != Base) { 10084 // Swap base ptr and offset to catch more post-index load / store when 10085 // it's legal. In Thumb2 mode, offset must be an immediate. 10086 if (Ptr == Offset && Op->getOpcode() == ISD::ADD && 10087 !Subtarget->isThumb2()) 10088 std::swap(Base, Offset); 10089 10090 // Post-indexed load / store update the base pointer. 10091 if (Ptr != Base) 10092 return false; 10093 } 10094 10095 AM = isInc ? ISD::POST_INC : ISD::POST_DEC; 10096 return true; 10097} 10098 10099void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 10100 APInt &KnownZero, 10101 APInt &KnownOne, 10102 const SelectionDAG &DAG, 10103 unsigned Depth) const { 10104 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); 10105 switch (Op.getOpcode()) { 10106 default: break; 10107 case ARMISD::CMOV: { 10108 // Bits are known zero/one if known on the LHS and RHS. 10109 DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); 10110 if (KnownZero == 0 && KnownOne == 0) return; 10111 10112 APInt KnownZeroRHS, KnownOneRHS; 10113 DAG.ComputeMaskedBits(Op.getOperand(1), KnownZeroRHS, KnownOneRHS, Depth+1); 10114 KnownZero &= KnownZeroRHS; 10115 KnownOne &= KnownOneRHS; 10116 return; 10117 } 10118 } 10119} 10120 10121//===----------------------------------------------------------------------===// 10122// ARM Inline Assembly Support 10123//===----------------------------------------------------------------------===// 10124 10125bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { 10126 // Looking for "rev" which is V6+. 10127 if (!Subtarget->hasV6Ops()) 10128 return false; 10129 10130 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); 10131 std::string AsmStr = IA->getAsmString(); 10132 SmallVector<StringRef, 4> AsmPieces; 10133 SplitString(AsmStr, AsmPieces, ";\n"); 10134 10135 switch (AsmPieces.size()) { 10136 default: return false; 10137 case 1: 10138 AsmStr = AsmPieces[0]; 10139 AsmPieces.clear(); 10140 SplitString(AsmStr, AsmPieces, " \t,"); 10141 10142 // rev $0, $1 10143 if (AsmPieces.size() == 3 && 10144 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && 10145 IA->getConstraintString().compare(0, 4, "=l,l") == 0) { 10146 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); 10147 if (Ty && Ty->getBitWidth() == 32) 10148 return IntrinsicLowering::LowerToByteSwap(CI); 10149 } 10150 break; 10151 } 10152 10153 return false; 10154} 10155 10156/// getConstraintType - Given a constraint letter, return the type of 10157/// constraint it is for this target. 10158ARMTargetLowering::ConstraintType 10159ARMTargetLowering::getConstraintType(const std::string &Constraint) const { 10160 if (Constraint.size() == 1) { 10161 switch (Constraint[0]) { 10162 default: break; 10163 case 'l': return C_RegisterClass; 10164 case 'w': return C_RegisterClass; 10165 case 'h': return C_RegisterClass; 10166 case 'x': return C_RegisterClass; 10167 case 't': return C_RegisterClass; 10168 case 'j': return C_Other; // Constant for movw. 10169 // An address with a single base register. Due to the way we 10170 // currently handle addresses it is the same as an 'r' memory constraint. 10171 case 'Q': return C_Memory; 10172 } 10173 } else if (Constraint.size() == 2) { 10174 switch (Constraint[0]) { 10175 default: break; 10176 // All 'U+' constraints are addresses. 10177 case 'U': return C_Memory; 10178 } 10179 } 10180 return TargetLowering::getConstraintType(Constraint); 10181} 10182 10183/// Examine constraint type and operand type and determine a weight value. 10184/// This object must already have been set up with the operand type 10185/// and the current alternative constraint selected. 10186TargetLowering::ConstraintWeight 10187ARMTargetLowering::getSingleConstraintMatchWeight( 10188 AsmOperandInfo &info, const char *constraint) const { 10189 ConstraintWeight weight = CW_Invalid; 10190 Value *CallOperandVal = info.CallOperandVal; 10191 // If we don't have a value, we can't do a match, 10192 // but allow it at the lowest weight. 10193 if (CallOperandVal == NULL) 10194 return CW_Default; 10195 Type *type = CallOperandVal->getType(); 10196 // Look at the constraint type. 10197 switch (*constraint) { 10198 default: 10199 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 10200 break; 10201 case 'l': 10202 if (type->isIntegerTy()) { 10203 if (Subtarget->isThumb()) 10204 weight = CW_SpecificReg; 10205 else 10206 weight = CW_Register; 10207 } 10208 break; 10209 case 'w': 10210 if (type->isFloatingPointTy()) 10211 weight = CW_Register; 10212 break; 10213 } 10214 return weight; 10215} 10216 10217typedef std::pair<unsigned, const TargetRegisterClass*> RCPair; 10218RCPair 10219ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, 10220 EVT VT) const { 10221 if (Constraint.size() == 1) { 10222 // GCC ARM Constraint Letters 10223 switch (Constraint[0]) { 10224 case 'l': // Low regs or general regs. 10225 if (Subtarget->isThumb()) 10226 return RCPair(0U, &ARM::tGPRRegClass); 10227 return RCPair(0U, &ARM::GPRRegClass); 10228 case 'h': // High regs or no regs. 10229 if (Subtarget->isThumb()) 10230 return RCPair(0U, &ARM::hGPRRegClass); 10231 break; 10232 case 'r': 10233 return RCPair(0U, &ARM::GPRRegClass); 10234 case 'w': 10235 if (VT == MVT::f32) 10236 return RCPair(0U, &ARM::SPRRegClass); 10237 if (VT.getSizeInBits() == 64) 10238 return RCPair(0U, &ARM::DPRRegClass); 10239 if (VT.getSizeInBits() == 128) 10240 return RCPair(0U, &ARM::QPRRegClass); 10241 break; 10242 case 'x': 10243 if (VT == MVT::f32) 10244 return RCPair(0U, &ARM::SPR_8RegClass); 10245 if (VT.getSizeInBits() == 64) 10246 return RCPair(0U, &ARM::DPR_8RegClass); 10247 if (VT.getSizeInBits() == 128) 10248 return RCPair(0U, &ARM::QPR_8RegClass); 10249 break; 10250 case 't': 10251 if (VT == MVT::f32) 10252 return RCPair(0U, &ARM::SPRRegClass); 10253 break; 10254 } 10255 } 10256 if (StringRef("{cc}").equals_lower(Constraint)) 10257 return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass); 10258 10259 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); 10260} 10261 10262/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 10263/// vector. If it is invalid, don't add anything to Ops. 10264void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 10265 std::string &Constraint, 10266 std::vector<SDValue>&Ops, 10267 SelectionDAG &DAG) const { 10268 SDValue Result(0, 0); 10269 10270 // Currently only support length 1 constraints. 10271 if (Constraint.length() != 1) return; 10272 10273 char ConstraintLetter = Constraint[0]; 10274 switch (ConstraintLetter) { 10275 default: break; 10276 case 'j': 10277 case 'I': case 'J': case 'K': case 'L': 10278 case 'M': case 'N': case 'O': 10279 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 10280 if (!C) 10281 return; 10282 10283 int64_t CVal64 = C->getSExtValue(); 10284 int CVal = (int) CVal64; 10285 // None of these constraints allow values larger than 32 bits. Check 10286 // that the value fits in an int. 10287 if (CVal != CVal64) 10288 return; 10289 10290 switch (ConstraintLetter) { 10291 case 'j': 10292 // Constant suitable for movw, must be between 0 and 10293 // 65535. 10294 if (Subtarget->hasV6T2Ops()) 10295 if (CVal >= 0 && CVal <= 65535) 10296 break; 10297 return; 10298 case 'I': 10299 if (Subtarget->isThumb1Only()) { 10300 // This must be a constant between 0 and 255, for ADD 10301 // immediates. 10302 if (CVal >= 0 && CVal <= 255) 10303 break; 10304 } else if (Subtarget->isThumb2()) { 10305 // A constant that can be used as an immediate value in a 10306 // data-processing instruction. 10307 if (ARM_AM::getT2SOImmVal(CVal) != -1) 10308 break; 10309 } else { 10310 // A constant that can be used as an immediate value in a 10311 // data-processing instruction. 10312 if (ARM_AM::getSOImmVal(CVal) != -1) 10313 break; 10314 } 10315 return; 10316 10317 case 'J': 10318 if (Subtarget->isThumb()) { // FIXME thumb2 10319 // This must be a constant between -255 and -1, for negated ADD 10320 // immediates. This can be used in GCC with an "n" modifier that 10321 // prints the negated value, for use with SUB instructions. It is 10322 // not useful otherwise but is implemented for compatibility. 10323 if (CVal >= -255 && CVal <= -1) 10324 break; 10325 } else { 10326 // This must be a constant between -4095 and 4095. It is not clear 10327 // what this constraint is intended for. Implemented for 10328 // compatibility with GCC. 10329 if (CVal >= -4095 && CVal <= 4095) 10330 break; 10331 } 10332 return; 10333 10334 case 'K': 10335 if (Subtarget->isThumb1Only()) { 10336 // A 32-bit value where only one byte has a nonzero value. Exclude 10337 // zero to match GCC. This constraint is used by GCC internally for 10338 // constants that can be loaded with a move/shift combination. 10339 // It is not useful otherwise but is implemented for compatibility. 10340 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal)) 10341 break; 10342 } else if (Subtarget->isThumb2()) { 10343 // A constant whose bitwise inverse can be used as an immediate 10344 // value in a data-processing instruction. This can be used in GCC 10345 // with a "B" modifier that prints the inverted value, for use with 10346 // BIC and MVN instructions. It is not useful otherwise but is 10347 // implemented for compatibility. 10348 if (ARM_AM::getT2SOImmVal(~CVal) != -1) 10349 break; 10350 } else { 10351 // A constant whose bitwise inverse can be used as an immediate 10352 // value in a data-processing instruction. This can be used in GCC 10353 // with a "B" modifier that prints the inverted value, for use with 10354 // BIC and MVN instructions. It is not useful otherwise but is 10355 // implemented for compatibility. 10356 if (ARM_AM::getSOImmVal(~CVal) != -1) 10357 break; 10358 } 10359 return; 10360 10361 case 'L': 10362 if (Subtarget->isThumb1Only()) { 10363 // This must be a constant between -7 and 7, 10364 // for 3-operand ADD/SUB immediate instructions. 10365 if (CVal >= -7 && CVal < 7) 10366 break; 10367 } else if (Subtarget->isThumb2()) { 10368 // A constant whose negation can be used as an immediate value in a 10369 // data-processing instruction. This can be used in GCC with an "n" 10370 // modifier that prints the negated value, for use with SUB 10371 // instructions. It is not useful otherwise but is implemented for 10372 // compatibility. 10373 if (ARM_AM::getT2SOImmVal(-CVal) != -1) 10374 break; 10375 } else { 10376 // A constant whose negation can be used as an immediate value in a 10377 // data-processing instruction. This can be used in GCC with an "n" 10378 // modifier that prints the negated value, for use with SUB 10379 // instructions. It is not useful otherwise but is implemented for 10380 // compatibility. 10381 if (ARM_AM::getSOImmVal(-CVal) != -1) 10382 break; 10383 } 10384 return; 10385 10386 case 'M': 10387 if (Subtarget->isThumb()) { // FIXME thumb2 10388 // This must be a multiple of 4 between 0 and 1020, for 10389 // ADD sp + immediate. 10390 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0)) 10391 break; 10392 } else { 10393 // A power of two or a constant between 0 and 32. This is used in 10394 // GCC for the shift amount on shifted register operands, but it is 10395 // useful in general for any shift amounts. 10396 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0)) 10397 break; 10398 } 10399 return; 10400 10401 case 'N': 10402 if (Subtarget->isThumb()) { // FIXME thumb2 10403 // This must be a constant between 0 and 31, for shift amounts. 10404 if (CVal >= 0 && CVal <= 31) 10405 break; 10406 } 10407 return; 10408 10409 case 'O': 10410 if (Subtarget->isThumb()) { // FIXME thumb2 10411 // This must be a multiple of 4 between -508 and 508, for 10412 // ADD/SUB sp = sp + immediate. 10413 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0)) 10414 break; 10415 } 10416 return; 10417 } 10418 Result = DAG.getTargetConstant(CVal, Op.getValueType()); 10419 break; 10420 } 10421 10422 if (Result.getNode()) { 10423 Ops.push_back(Result); 10424 return; 10425 } 10426 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 10427} 10428 10429bool 10430ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 10431 // The ARM target isn't yet aware of offsets. 10432 return false; 10433} 10434 10435bool ARM::isBitFieldInvertedMask(unsigned v) { 10436 if (v == 0xffffffff) 10437 return 0; 10438 // there can be 1's on either or both "outsides", all the "inside" 10439 // bits must be 0's 10440 unsigned int lsb = 0, msb = 31; 10441 while (v & (1 << msb)) --msb; 10442 while (v & (1 << lsb)) ++lsb; 10443 for (unsigned int i = lsb; i <= msb; ++i) { 10444 if (v & (1 << i)) 10445 return 0; 10446 } 10447 return 1; 10448} 10449 10450/// isFPImmLegal - Returns true if the target can instruction select the 10451/// specified FP immediate natively. If false, the legalizer will 10452/// materialize the FP immediate as a load from a constant pool. 10453bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 10454 if (!Subtarget->hasVFP3()) 10455 return false; 10456 if (VT == MVT::f32) 10457 return ARM_AM::getFP32Imm(Imm) != -1; 10458 if (VT == MVT::f64) 10459 return ARM_AM::getFP64Imm(Imm) != -1; 10460 return false; 10461} 10462 10463/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 10464/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 10465/// specified in the intrinsic calls. 10466bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 10467 const CallInst &I, 10468 unsigned Intrinsic) const { 10469 switch (Intrinsic) { 10470 case Intrinsic::arm_neon_vld1: 10471 case Intrinsic::arm_neon_vld2: 10472 case Intrinsic::arm_neon_vld3: 10473 case Intrinsic::arm_neon_vld4: 10474 case Intrinsic::arm_neon_vld2lane: 10475 case Intrinsic::arm_neon_vld3lane: 10476 case Intrinsic::arm_neon_vld4lane: { 10477 Info.opc = ISD::INTRINSIC_W_CHAIN; 10478 // Conservatively set memVT to the entire set of vectors loaded. 10479 uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8; 10480 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 10481 Info.ptrVal = I.getArgOperand(0); 10482 Info.offset = 0; 10483 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 10484 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 10485 Info.vol = false; // volatile loads with NEON intrinsics not supported 10486 Info.readMem = true; 10487 Info.writeMem = false; 10488 return true; 10489 } 10490 case Intrinsic::arm_neon_vst1: 10491 case Intrinsic::arm_neon_vst2: 10492 case Intrinsic::arm_neon_vst3: 10493 case Intrinsic::arm_neon_vst4: 10494 case Intrinsic::arm_neon_vst2lane: 10495 case Intrinsic::arm_neon_vst3lane: 10496 case Intrinsic::arm_neon_vst4lane: { 10497 Info.opc = ISD::INTRINSIC_VOID; 10498 // Conservatively set memVT to the entire set of vectors stored. 10499 unsigned NumElts = 0; 10500 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 10501 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 10502 if (!ArgTy->isVectorTy()) 10503 break; 10504 NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8; 10505 } 10506 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 10507 Info.ptrVal = I.getArgOperand(0); 10508 Info.offset = 0; 10509 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); 10510 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue(); 10511 Info.vol = false; // volatile stores with NEON intrinsics not supported 10512 Info.readMem = false; 10513 Info.writeMem = true; 10514 return true; 10515 } 10516 case Intrinsic::arm_strexd: { 10517 Info.opc = ISD::INTRINSIC_W_CHAIN; 10518 Info.memVT = MVT::i64; 10519 Info.ptrVal = I.getArgOperand(2); 10520 Info.offset = 0; 10521 Info.align = 8; 10522 Info.vol = true; 10523 Info.readMem = false; 10524 Info.writeMem = true; 10525 return true; 10526 } 10527 case Intrinsic::arm_ldrexd: { 10528 Info.opc = ISD::INTRINSIC_W_CHAIN; 10529 Info.memVT = MVT::i64; 10530 Info.ptrVal = I.getArgOperand(0); 10531 Info.offset = 0; 10532 Info.align = 8; 10533 Info.vol = true; 10534 Info.readMem = true; 10535 Info.writeMem = false; 10536 return true; 10537 } 10538 default: 10539 break; 10540 } 10541 10542 return false; 10543} 10544