SILowerControlFlow.cpp revision 263508
1//===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10/// \file 11/// \brief This pass lowers the pseudo control flow instructions to real 12/// machine instructions. 13/// 14/// All control flow is handled using predicated instructions and 15/// a predicate stack. Each Scalar ALU controls the operations of 64 Vector 16/// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs 17/// by writting to the 64-bit EXEC register (each bit corresponds to a 18/// single vector ALU). Typically, for predicates, a vector ALU will write 19/// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each 20/// Vector ALU) and then the ScalarALU will AND the VCC register with the 21/// EXEC to update the predicates. 22/// 23/// For example: 24/// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2 25/// %SGPR0 = SI_IF %VCC 26/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 27/// %SGPR0 = SI_ELSE %SGPR0 28/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0 29/// SI_END_CF %SGPR0 30/// 31/// becomes: 32/// 33/// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask 34/// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask 35/// S_CBRANCH_EXECZ label0 // This instruction is an optional 36/// // optimization which allows us to 37/// // branch if all the bits of 38/// // EXEC are zero. 39/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch 40/// 41/// label0: 42/// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block 43/// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask 44/// S_BRANCH_EXECZ label1 // Use our branch optimization 45/// // instruction again. 46/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block 47/// label1: 48/// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits 49//===----------------------------------------------------------------------===// 50 51#include "AMDGPU.h" 52#include "SIInstrInfo.h" 53#include "SIMachineFunctionInfo.h" 54#include "llvm/CodeGen/MachineFunction.h" 55#include "llvm/CodeGen/MachineFunctionPass.h" 56#include "llvm/CodeGen/MachineInstrBuilder.h" 57#include "llvm/CodeGen/MachineRegisterInfo.h" 58 59using namespace llvm; 60 61namespace { 62 63class SILowerControlFlowPass : public MachineFunctionPass { 64 65private: 66 static const unsigned SkipThreshold = 12; 67 68 static char ID; 69 const TargetRegisterInfo *TRI; 70 const TargetInstrInfo *TII; 71 72 bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To); 73 74 void Skip(MachineInstr &From, MachineOperand &To); 75 void SkipIfDead(MachineInstr &MI); 76 77 void If(MachineInstr &MI); 78 void Else(MachineInstr &MI); 79 void Break(MachineInstr &MI); 80 void IfBreak(MachineInstr &MI); 81 void ElseBreak(MachineInstr &MI); 82 void Loop(MachineInstr &MI); 83 void EndCf(MachineInstr &MI); 84 85 void Kill(MachineInstr &MI); 86 void Branch(MachineInstr &MI); 87 88 void LoadM0(MachineInstr &MI, MachineInstr *MovRel); 89 void IndirectSrc(MachineInstr &MI); 90 void IndirectDst(MachineInstr &MI); 91 92public: 93 SILowerControlFlowPass(TargetMachine &tm) : 94 MachineFunctionPass(ID), TRI(0), TII(0) { } 95 96 virtual bool runOnMachineFunction(MachineFunction &MF); 97 98 const char *getPassName() const { 99 return "SI Lower control flow instructions"; 100 } 101 102}; 103 104} // End anonymous namespace 105 106char SILowerControlFlowPass::ID = 0; 107 108FunctionPass *llvm::createSILowerControlFlowPass(TargetMachine &tm) { 109 return new SILowerControlFlowPass(tm); 110} 111 112bool SILowerControlFlowPass::shouldSkip(MachineBasicBlock *From, 113 MachineBasicBlock *To) { 114 115 unsigned NumInstr = 0; 116 117 for (MachineBasicBlock *MBB = From; MBB != To && !MBB->succ_empty(); 118 MBB = *MBB->succ_begin()) { 119 120 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); 121 NumInstr < SkipThreshold && I != E; ++I) { 122 123 if (I->isBundle() || !I->isBundled()) 124 if (++NumInstr >= SkipThreshold) 125 return true; 126 } 127 } 128 129 return false; 130} 131 132void SILowerControlFlowPass::Skip(MachineInstr &From, MachineOperand &To) { 133 134 if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB())) 135 return; 136 137 DebugLoc DL = From.getDebugLoc(); 138 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ)) 139 .addOperand(To) 140 .addReg(AMDGPU::EXEC); 141} 142 143void SILowerControlFlowPass::SkipIfDead(MachineInstr &MI) { 144 145 MachineBasicBlock &MBB = *MI.getParent(); 146 DebugLoc DL = MI.getDebugLoc(); 147 148 if (!shouldSkip(&MBB, &MBB.getParent()->back())) 149 return; 150 151 MachineBasicBlock::iterator Insert = &MI; 152 ++Insert; 153 154 // If the exec mask is non-zero, skip the next two instructions 155 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 156 .addImm(3) 157 .addReg(AMDGPU::EXEC); 158 159 // Exec mask is zero: Export to NULL target... 160 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP)) 161 .addImm(0) 162 .addImm(0x09) // V_008DFC_SQ_EXP_NULL 163 .addImm(0) 164 .addImm(1) 165 .addImm(1) 166 .addReg(AMDGPU::VGPR0) 167 .addReg(AMDGPU::VGPR0) 168 .addReg(AMDGPU::VGPR0) 169 .addReg(AMDGPU::VGPR0); 170 171 // ... and terminate wavefront 172 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM)); 173} 174 175void SILowerControlFlowPass::If(MachineInstr &MI) { 176 MachineBasicBlock &MBB = *MI.getParent(); 177 DebugLoc DL = MI.getDebugLoc(); 178 unsigned Reg = MI.getOperand(0).getReg(); 179 unsigned Vcc = MI.getOperand(1).getReg(); 180 181 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg) 182 .addReg(Vcc); 183 184 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg) 185 .addReg(AMDGPU::EXEC) 186 .addReg(Reg); 187 188 Skip(MI, MI.getOperand(2)); 189 190 MI.eraseFromParent(); 191} 192 193void SILowerControlFlowPass::Else(MachineInstr &MI) { 194 MachineBasicBlock &MBB = *MI.getParent(); 195 DebugLoc DL = MI.getDebugLoc(); 196 unsigned Dst = MI.getOperand(0).getReg(); 197 unsigned Src = MI.getOperand(1).getReg(); 198 199 BuildMI(MBB, MBB.getFirstNonPHI(), DL, 200 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst) 201 .addReg(Src); // Saved EXEC 202 203 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 204 .addReg(AMDGPU::EXEC) 205 .addReg(Dst); 206 207 Skip(MI, MI.getOperand(2)); 208 209 MI.eraseFromParent(); 210} 211 212void SILowerControlFlowPass::Break(MachineInstr &MI) { 213 MachineBasicBlock &MBB = *MI.getParent(); 214 DebugLoc DL = MI.getDebugLoc(); 215 216 unsigned Dst = MI.getOperand(0).getReg(); 217 unsigned Src = MI.getOperand(1).getReg(); 218 219 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 220 .addReg(AMDGPU::EXEC) 221 .addReg(Src); 222 223 MI.eraseFromParent(); 224} 225 226void SILowerControlFlowPass::IfBreak(MachineInstr &MI) { 227 MachineBasicBlock &MBB = *MI.getParent(); 228 DebugLoc DL = MI.getDebugLoc(); 229 230 unsigned Dst = MI.getOperand(0).getReg(); 231 unsigned Vcc = MI.getOperand(1).getReg(); 232 unsigned Src = MI.getOperand(2).getReg(); 233 234 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 235 .addReg(Vcc) 236 .addReg(Src); 237 238 MI.eraseFromParent(); 239} 240 241void SILowerControlFlowPass::ElseBreak(MachineInstr &MI) { 242 MachineBasicBlock &MBB = *MI.getParent(); 243 DebugLoc DL = MI.getDebugLoc(); 244 245 unsigned Dst = MI.getOperand(0).getReg(); 246 unsigned Saved = MI.getOperand(1).getReg(); 247 unsigned Src = MI.getOperand(2).getReg(); 248 249 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) 250 .addReg(Saved) 251 .addReg(Src); 252 253 MI.eraseFromParent(); 254} 255 256void SILowerControlFlowPass::Loop(MachineInstr &MI) { 257 MachineBasicBlock &MBB = *MI.getParent(); 258 DebugLoc DL = MI.getDebugLoc(); 259 unsigned Src = MI.getOperand(0).getReg(); 260 261 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC) 262 .addReg(AMDGPU::EXEC) 263 .addReg(Src); 264 265 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 266 .addOperand(MI.getOperand(1)) 267 .addReg(AMDGPU::EXEC); 268 269 MI.eraseFromParent(); 270} 271 272void SILowerControlFlowPass::EndCf(MachineInstr &MI) { 273 MachineBasicBlock &MBB = *MI.getParent(); 274 DebugLoc DL = MI.getDebugLoc(); 275 unsigned Reg = MI.getOperand(0).getReg(); 276 277 BuildMI(MBB, MBB.getFirstNonPHI(), DL, 278 TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC) 279 .addReg(AMDGPU::EXEC) 280 .addReg(Reg); 281 282 MI.eraseFromParent(); 283} 284 285void SILowerControlFlowPass::Branch(MachineInstr &MI) { 286 MachineBasicBlock *Next = MI.getParent()->getNextNode(); 287 MachineBasicBlock *Target = MI.getOperand(0).getMBB(); 288 if (Target == Next) 289 MI.eraseFromParent(); 290 else 291 assert(0); 292} 293 294void SILowerControlFlowPass::Kill(MachineInstr &MI) { 295 296 MachineBasicBlock &MBB = *MI.getParent(); 297 DebugLoc DL = MI.getDebugLoc(); 298 299 // Kill is only allowed in pixel shaders 300 assert(MBB.getParent()->getInfo<SIMachineFunctionInfo>()->ShaderType == 301 ShaderType::PIXEL); 302 303 // Clear this pixel from the exec mask if the operand is negative 304 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32), AMDGPU::VCC) 305 .addImm(0) 306 .addOperand(MI.getOperand(0)); 307 308 MI.eraseFromParent(); 309} 310 311void SILowerControlFlowPass::LoadM0(MachineInstr &MI, MachineInstr *MovRel) { 312 313 MachineBasicBlock &MBB = *MI.getParent(); 314 DebugLoc DL = MI.getDebugLoc(); 315 MachineBasicBlock::iterator I = MI; 316 317 unsigned Save = MI.getOperand(1).getReg(); 318 unsigned Idx = MI.getOperand(3).getReg(); 319 320 if (AMDGPU::SReg_32RegClass.contains(Idx)) { 321 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 322 .addReg(Idx); 323 MBB.insert(I, MovRel); 324 MI.eraseFromParent(); 325 return; 326 } 327 328 assert(AMDGPU::SReg_64RegClass.contains(Save)); 329 assert(AMDGPU::VReg_32RegClass.contains(Idx)); 330 331 // Save the EXEC mask 332 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save) 333 .addReg(AMDGPU::EXEC); 334 335 // Read the next variant into VCC (lower 32 bits) <- also loop target 336 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32_e32), AMDGPU::VCC) 337 .addReg(Idx); 338 339 // Move index from VCC into M0 340 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) 341 .addReg(AMDGPU::VCC); 342 343 // Compare the just read M0 value to all possible Idx values 344 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32), AMDGPU::VCC) 345 .addReg(AMDGPU::M0) 346 .addReg(Idx); 347 348 // Update EXEC, save the original EXEC value to VCC 349 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC) 350 .addReg(AMDGPU::VCC); 351 352 // Do the actual move 353 MBB.insert(I, MovRel); 354 355 // Update EXEC, switch all done bits to 0 and all todo bits to 1 356 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC) 357 .addReg(AMDGPU::EXEC) 358 .addReg(AMDGPU::VCC); 359 360 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover 361 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) 362 .addImm(-7) 363 .addReg(AMDGPU::EXEC); 364 365 // Restore EXEC 366 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 367 .addReg(Save); 368 369 MI.eraseFromParent(); 370} 371 372void SILowerControlFlowPass::IndirectSrc(MachineInstr &MI) { 373 374 MachineBasicBlock &MBB = *MI.getParent(); 375 DebugLoc DL = MI.getDebugLoc(); 376 377 unsigned Dst = MI.getOperand(0).getReg(); 378 unsigned Vec = MI.getOperand(2).getReg(); 379 unsigned Off = MI.getOperand(4).getImm(); 380 unsigned SubReg = TRI->getSubReg(Vec, AMDGPU::sub0); 381 if (!SubReg) 382 SubReg = Vec; 383 384 MachineInstr *MovRel = 385 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst) 386 .addReg(SubReg + Off) 387 .addReg(AMDGPU::M0, RegState::Implicit) 388 .addReg(Vec, RegState::Implicit); 389 390 LoadM0(MI, MovRel); 391} 392 393void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) { 394 395 MachineBasicBlock &MBB = *MI.getParent(); 396 DebugLoc DL = MI.getDebugLoc(); 397 398 unsigned Dst = MI.getOperand(0).getReg(); 399 unsigned Off = MI.getOperand(4).getImm(); 400 unsigned Val = MI.getOperand(5).getReg(); 401 unsigned SubReg = TRI->getSubReg(Dst, AMDGPU::sub0); 402 if (!SubReg) 403 SubReg = Dst; 404 405 MachineInstr *MovRel = 406 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32)) 407 .addReg(SubReg + Off, RegState::Define) 408 .addReg(Val) 409 .addReg(AMDGPU::M0, RegState::Implicit) 410 .addReg(Dst, RegState::Implicit); 411 412 LoadM0(MI, MovRel); 413} 414 415bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) { 416 TII = MF.getTarget().getInstrInfo(); 417 TRI = MF.getTarget().getRegisterInfo(); 418 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 419 420 bool HaveKill = false; 421 bool NeedM0 = false; 422 bool NeedWQM = false; 423 unsigned Depth = 0; 424 425 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); 426 BI != BE; ++BI) { 427 428 MachineBasicBlock &MBB = *BI; 429 for (MachineBasicBlock::iterator I = MBB.begin(), Next = llvm::next(I); 430 I != MBB.end(); I = Next) { 431 432 Next = llvm::next(I); 433 MachineInstr &MI = *I; 434 switch (MI.getOpcode()) { 435 default: break; 436 case AMDGPU::SI_IF: 437 ++Depth; 438 If(MI); 439 break; 440 441 case AMDGPU::SI_ELSE: 442 Else(MI); 443 break; 444 445 case AMDGPU::SI_BREAK: 446 Break(MI); 447 break; 448 449 case AMDGPU::SI_IF_BREAK: 450 IfBreak(MI); 451 break; 452 453 case AMDGPU::SI_ELSE_BREAK: 454 ElseBreak(MI); 455 break; 456 457 case AMDGPU::SI_LOOP: 458 ++Depth; 459 Loop(MI); 460 break; 461 462 case AMDGPU::SI_END_CF: 463 if (--Depth == 0 && HaveKill) { 464 SkipIfDead(MI); 465 HaveKill = false; 466 } 467 EndCf(MI); 468 break; 469 470 case AMDGPU::SI_KILL: 471 if (Depth == 0) 472 SkipIfDead(MI); 473 else 474 HaveKill = true; 475 Kill(MI); 476 break; 477 478 case AMDGPU::S_BRANCH: 479 Branch(MI); 480 break; 481 482 case AMDGPU::SI_INDIRECT_SRC: 483 IndirectSrc(MI); 484 break; 485 486 case AMDGPU::SI_INDIRECT_DST_V1: 487 case AMDGPU::SI_INDIRECT_DST_V2: 488 case AMDGPU::SI_INDIRECT_DST_V4: 489 case AMDGPU::SI_INDIRECT_DST_V8: 490 case AMDGPU::SI_INDIRECT_DST_V16: 491 IndirectDst(MI); 492 break; 493 494 case AMDGPU::DS_READ_B32: 495 NeedWQM = true; 496 // Fall through 497 case AMDGPU::DS_WRITE_B32: 498 case AMDGPU::DS_ADD_U32_RTN: 499 NeedM0 = true; 500 break; 501 502 case AMDGPU::V_INTERP_P1_F32: 503 case AMDGPU::V_INTERP_P2_F32: 504 case AMDGPU::V_INTERP_MOV_F32: 505 NeedWQM = true; 506 break; 507 508 } 509 } 510 } 511 512 if (NeedM0) { 513 MachineBasicBlock &MBB = MF.front(); 514 // Initialize M0 to a value that won't cause LDS access to be discarded 515 // due to offset clamping 516 BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_MOV_B32), 517 AMDGPU::M0).addImm(0xffffffff); 518 } 519 520 if (NeedWQM && MFI->ShaderType != ShaderType::COMPUTE) { 521 MachineBasicBlock &MBB = MF.front(); 522 BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64), 523 AMDGPU::EXEC).addReg(AMDGPU::EXEC); 524 } 525 526 return true; 527} 528