AArch64MCCodeEmitter.cpp revision 263508
1//=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the AArch64MCCodeEmitter class. 11// 12//===----------------------------------------------------------------------===// 13 14#define DEBUG_TYPE "mccodeemitter" 15#include "MCTargetDesc/AArch64FixupKinds.h" 16#include "MCTargetDesc/AArch64MCExpr.h" 17#include "MCTargetDesc/AArch64MCTargetDesc.h" 18#include "Utils/AArch64BaseInfo.h" 19#include "llvm/MC/MCCodeEmitter.h" 20#include "llvm/MC/MCContext.h" 21#include "llvm/MC/MCInst.h" 22#include "llvm/MC/MCInstrInfo.h" 23#include "llvm/MC/MCRegisterInfo.h" 24#include "llvm/MC/MCSubtargetInfo.h" 25#include "llvm/Support/ErrorHandling.h" 26#include "llvm/Support/raw_ostream.h" 27 28using namespace llvm; 29 30namespace { 31class AArch64MCCodeEmitter : public MCCodeEmitter { 32 AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION; 33 void operator=(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION; 34 MCContext &Ctx; 35 36public: 37 AArch64MCCodeEmitter(MCContext &ctx) : Ctx(ctx) {} 38 39 ~AArch64MCCodeEmitter() {} 40 41 unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, 42 SmallVectorImpl<MCFixup> &Fixups) const; 43 44 unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx, 45 SmallVectorImpl<MCFixup> &Fixups) const; 46 47 template<int MemSize> 48 unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx, 49 SmallVectorImpl<MCFixup> &Fixups) const { 50 return getOffsetUImm12OpValue(MI, OpIdx, Fixups, MemSize); 51 } 52 53 unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx, 54 SmallVectorImpl<MCFixup> &Fixups, 55 int MemSize) const; 56 57 unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx, 58 SmallVectorImpl<MCFixup> &Fixups) const; 59 unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx, 60 SmallVectorImpl<MCFixup> &Fixups) const; 61 62 unsigned getShiftRightImm8(const MCInst &MI, unsigned Op, 63 SmallVectorImpl<MCFixup> &Fixups) const; 64 unsigned getShiftRightImm16(const MCInst &MI, unsigned Op, 65 SmallVectorImpl<MCFixup> &Fixups) const; 66 unsigned getShiftRightImm32(const MCInst &MI, unsigned Op, 67 SmallVectorImpl<MCFixup> &Fixups) const; 68 unsigned getShiftRightImm64(const MCInst &MI, unsigned Op, 69 SmallVectorImpl<MCFixup> &Fixups) const; 70 71 unsigned getShiftLeftImm8(const MCInst &MI, unsigned Op, 72 SmallVectorImpl<MCFixup> &Fixups) const; 73 unsigned getShiftLeftImm16(const MCInst &MI, unsigned Op, 74 SmallVectorImpl<MCFixup> &Fixups) const; 75 unsigned getShiftLeftImm32(const MCInst &MI, unsigned Op, 76 SmallVectorImpl<MCFixup> &Fixups) const; 77 unsigned getShiftLeftImm64(const MCInst &MI, unsigned Op, 78 SmallVectorImpl<MCFixup> &Fixups) const; 79 80 // Labels are handled mostly the same way: a symbol is needed, and 81 // just gets some fixup attached. 82 template<AArch64::Fixups fixupDesired> 83 unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx, 84 SmallVectorImpl<MCFixup> &Fixups) const; 85 86 unsigned getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx, 87 SmallVectorImpl<MCFixup> &Fixups) const; 88 89 90 unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, 91 SmallVectorImpl<MCFixup> &Fixups) const; 92 93 94 unsigned getAddressWithFixup(const MCOperand &MO, 95 unsigned FixupKind, 96 SmallVectorImpl<MCFixup> &Fixups) const; 97 98 99 // getBinaryCodeForInstr - TableGen'erated function for getting the 100 // binary encoding for an instruction. 101 uint64_t getBinaryCodeForInstr(const MCInst &MI, 102 SmallVectorImpl<MCFixup> &Fixups) const; 103 104 /// getMachineOpValue - Return binary encoding of operand. If the machine 105 /// operand requires relocation, record the relocation and return zero. 106 unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO, 107 SmallVectorImpl<MCFixup> &Fixups) const; 108 109 110 void EmitByte(unsigned char C, raw_ostream &OS) const { 111 OS << (char)C; 112 } 113 114 void EmitInstruction(uint32_t Val, raw_ostream &OS) const { 115 // Output the constant in little endian byte order. 116 for (unsigned i = 0; i != 4; ++i) { 117 EmitByte(Val & 0xff, OS); 118 Val >>= 8; 119 } 120 } 121 122 123 void EncodeInstruction(const MCInst &MI, raw_ostream &OS, 124 SmallVectorImpl<MCFixup> &Fixups) const; 125 126 template<int hasRs, int hasRt2> unsigned 127 fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue) const; 128 129 unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue) const; 130 131 unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue) const; 132 133 134}; 135 136} // end anonymous namespace 137 138unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO, 139 unsigned FixupKind, 140 SmallVectorImpl<MCFixup> &Fixups) const { 141 if (!MO.isExpr()) { 142 // This can occur for manually decoded or constructed MCInsts, but neither 143 // the assembly-parser nor instruction selection will currently produce an 144 // MCInst that's not a symbol reference. 145 assert(MO.isImm() && "Unexpected address requested"); 146 return MO.getImm(); 147 } 148 149 const MCExpr *Expr = MO.getExpr(); 150 MCFixupKind Kind = MCFixupKind(FixupKind); 151 Fixups.push_back(MCFixup::Create(0, Expr, Kind)); 152 153 return 0; 154} 155 156unsigned AArch64MCCodeEmitter:: 157getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx, 158 SmallVectorImpl<MCFixup> &Fixups, 159 int MemSize) const { 160 const MCOperand &ImmOp = MI.getOperand(OpIdx); 161 if (ImmOp.isImm()) 162 return ImmOp.getImm(); 163 164 assert(ImmOp.isExpr() && "Unexpected operand type"); 165 const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr()); 166 unsigned FixupKind; 167 168 169 switch (Expr->getKind()) { 170 default: llvm_unreachable("Unexpected operand modifier"); 171 case AArch64MCExpr::VK_AARCH64_LO12: { 172 static const unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12, 173 AArch64::fixup_a64_ldst16_lo12, 174 AArch64::fixup_a64_ldst32_lo12, 175 AArch64::fixup_a64_ldst64_lo12, 176 AArch64::fixup_a64_ldst128_lo12 }; 177 assert(MemSize <= 16 && "Invalid fixup for operation"); 178 FixupKind = FixupsBySize[Log2_32(MemSize)]; 179 break; 180 } 181 case AArch64MCExpr::VK_AARCH64_GOT_LO12: 182 assert(MemSize == 8 && "Invalid fixup for operation"); 183 FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc; 184 break; 185 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: { 186 static const unsigned FixupsBySize[] = { 187 AArch64::fixup_a64_ldst8_dtprel_lo12, 188 AArch64::fixup_a64_ldst16_dtprel_lo12, 189 AArch64::fixup_a64_ldst32_dtprel_lo12, 190 AArch64::fixup_a64_ldst64_dtprel_lo12 191 }; 192 assert(MemSize <= 8 && "Invalid fixup for operation"); 193 FixupKind = FixupsBySize[Log2_32(MemSize)]; 194 break; 195 } 196 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: { 197 static const unsigned FixupsBySize[] = { 198 AArch64::fixup_a64_ldst8_dtprel_lo12_nc, 199 AArch64::fixup_a64_ldst16_dtprel_lo12_nc, 200 AArch64::fixup_a64_ldst32_dtprel_lo12_nc, 201 AArch64::fixup_a64_ldst64_dtprel_lo12_nc 202 }; 203 assert(MemSize <= 8 && "Invalid fixup for operation"); 204 FixupKind = FixupsBySize[Log2_32(MemSize)]; 205 break; 206 } 207 case AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12: 208 assert(MemSize == 8 && "Invalid fixup for operation"); 209 FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc; 210 break; 211 case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{ 212 static const unsigned FixupsBySize[] = { 213 AArch64::fixup_a64_ldst8_tprel_lo12, 214 AArch64::fixup_a64_ldst16_tprel_lo12, 215 AArch64::fixup_a64_ldst32_tprel_lo12, 216 AArch64::fixup_a64_ldst64_tprel_lo12 217 }; 218 assert(MemSize <= 8 && "Invalid fixup for operation"); 219 FixupKind = FixupsBySize[Log2_32(MemSize)]; 220 break; 221 } 222 case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: { 223 static const unsigned FixupsBySize[] = { 224 AArch64::fixup_a64_ldst8_tprel_lo12_nc, 225 AArch64::fixup_a64_ldst16_tprel_lo12_nc, 226 AArch64::fixup_a64_ldst32_tprel_lo12_nc, 227 AArch64::fixup_a64_ldst64_tprel_lo12_nc 228 }; 229 assert(MemSize <= 8 && "Invalid fixup for operation"); 230 FixupKind = FixupsBySize[Log2_32(MemSize)]; 231 break; 232 } 233 case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12: 234 assert(MemSize == 8 && "Invalid fixup for operation"); 235 FixupKind = AArch64::fixup_a64_tlsdesc_ld64_lo12_nc; 236 break; 237 } 238 239 return getAddressWithFixup(ImmOp, FixupKind, Fixups); 240} 241 242unsigned 243AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, 244 SmallVectorImpl<MCFixup> &Fixups) const { 245 const MCOperand &MO = MI.getOperand(OpIdx); 246 if (MO.isImm()) 247 return static_cast<unsigned>(MO.getImm()); 248 249 assert(MO.isExpr()); 250 251 unsigned FixupKind = 0; 252 switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) { 253 default: llvm_unreachable("Invalid expression modifier"); 254 case AArch64MCExpr::VK_AARCH64_LO12: 255 FixupKind = AArch64::fixup_a64_add_lo12; break; 256 case AArch64MCExpr::VK_AARCH64_DTPREL_HI12: 257 FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break; 258 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: 259 FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break; 260 case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: 261 FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break; 262 case AArch64MCExpr::VK_AARCH64_TPREL_HI12: 263 FixupKind = AArch64::fixup_a64_add_tprel_hi12; break; 264 case AArch64MCExpr::VK_AARCH64_TPREL_LO12: 265 FixupKind = AArch64::fixup_a64_add_tprel_lo12; break; 266 case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: 267 FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break; 268 case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12: 269 FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break; 270 } 271 272 return getAddressWithFixup(MO, FixupKind, Fixups); 273} 274 275unsigned 276AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx, 277 SmallVectorImpl<MCFixup> &Fixups) const { 278 279 const MCOperand &MO = MI.getOperand(OpIdx); 280 if (MO.isImm()) 281 return static_cast<unsigned>(MO.getImm()); 282 283 assert(MO.isExpr()); 284 285 unsigned Modifier = AArch64MCExpr::VK_AARCH64_None; 286 if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr())) 287 Modifier = Expr->getKind(); 288 289 unsigned FixupKind = 0; 290 switch(Modifier) { 291 case AArch64MCExpr::VK_AARCH64_None: 292 FixupKind = AArch64::fixup_a64_adr_prel_page; 293 break; 294 case AArch64MCExpr::VK_AARCH64_GOT: 295 FixupKind = AArch64::fixup_a64_adr_prel_got_page; 296 break; 297 case AArch64MCExpr::VK_AARCH64_GOTTPREL: 298 FixupKind = AArch64::fixup_a64_adr_gottprel_page; 299 break; 300 case AArch64MCExpr::VK_AARCH64_TLSDESC: 301 FixupKind = AArch64::fixup_a64_tlsdesc_adr_page; 302 break; 303 default: 304 llvm_unreachable("Unknown symbol reference kind for ADRP instruction"); 305 } 306 307 return getAddressWithFixup(MO, FixupKind, Fixups); 308} 309 310unsigned 311AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx, 312 SmallVectorImpl<MCFixup> &Fixups) const { 313 314 const MCOperand &MO = MI.getOperand(OpIdx); 315 assert(MO.isImm() && "Only immediate expected for shift"); 316 317 return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6; 318} 319 320unsigned 321AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx, 322 SmallVectorImpl<MCFixup> &Fixups) const { 323 324 const MCOperand &MO = MI.getOperand(OpIdx); 325 assert(MO.isImm() && "Only immediate expected for shift"); 326 327 return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6; 328} 329 330unsigned AArch64MCCodeEmitter::getShiftRightImm8( 331 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { 332 return 8 - MI.getOperand(Op).getImm(); 333} 334 335unsigned AArch64MCCodeEmitter::getShiftRightImm16( 336 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { 337 return 16 - MI.getOperand(Op).getImm(); 338} 339 340unsigned AArch64MCCodeEmitter::getShiftRightImm32( 341 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { 342 return 32 - MI.getOperand(Op).getImm(); 343} 344 345unsigned AArch64MCCodeEmitter::getShiftRightImm64( 346 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { 347 return 64 - MI.getOperand(Op).getImm(); 348} 349 350unsigned AArch64MCCodeEmitter::getShiftLeftImm8( 351 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { 352 return MI.getOperand(Op).getImm() - 8; 353} 354 355unsigned AArch64MCCodeEmitter::getShiftLeftImm16( 356 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { 357 return MI.getOperand(Op).getImm() - 16; 358} 359 360unsigned AArch64MCCodeEmitter::getShiftLeftImm32( 361 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { 362 return MI.getOperand(Op).getImm() - 32; 363} 364 365unsigned AArch64MCCodeEmitter::getShiftLeftImm64( 366 const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { 367 return MI.getOperand(Op).getImm() - 64; 368} 369 370template<AArch64::Fixups fixupDesired> unsigned 371AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI, 372 unsigned OpIdx, 373 SmallVectorImpl<MCFixup> &Fixups) const { 374 const MCOperand &MO = MI.getOperand(OpIdx); 375 376 if (MO.isExpr()) 377 return getAddressWithFixup(MO, fixupDesired, Fixups); 378 379 assert(MO.isImm()); 380 return MO.getImm(); 381} 382 383unsigned 384AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI, 385 unsigned OpIdx, 386 SmallVectorImpl<MCFixup> &Fixups) const { 387 const MCOperand &MO = MI.getOperand(OpIdx); 388 389 if (MO.isImm()) 390 return MO.getImm(); 391 392 assert(MO.isExpr()); 393 394 unsigned FixupKind; 395 if (isa<AArch64MCExpr>(MO.getExpr())) { 396 assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind() 397 == AArch64MCExpr::VK_AARCH64_GOTTPREL 398 && "Invalid symbol modifier for literal load"); 399 FixupKind = AArch64::fixup_a64_ld_gottprel_prel19; 400 } else { 401 FixupKind = AArch64::fixup_a64_ld_prel; 402 } 403 404 return getAddressWithFixup(MO, FixupKind, Fixups); 405} 406 407 408unsigned 409AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI, 410 const MCOperand &MO, 411 SmallVectorImpl<MCFixup> &Fixups) const { 412 if (MO.isReg()) { 413 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()); 414 } else if (MO.isImm()) { 415 return static_cast<unsigned>(MO.getImm()); 416 } 417 418 llvm_unreachable("Unable to encode MCOperand!"); 419 return 0; 420} 421 422unsigned 423AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, 424 SmallVectorImpl<MCFixup> &Fixups) const { 425 const MCOperand &UImm16MO = MI.getOperand(OpIdx); 426 const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1); 427 428 unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16; 429 430 if (UImm16MO.isImm()) { 431 Result |= UImm16MO.getImm(); 432 return Result; 433 } 434 435 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr()); 436 AArch64::Fixups requestedFixup; 437 switch (A64E->getKind()) { 438 default: llvm_unreachable("unexpected expression modifier"); 439 case AArch64MCExpr::VK_AARCH64_ABS_G0: 440 requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break; 441 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC: 442 requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break; 443 case AArch64MCExpr::VK_AARCH64_ABS_G1: 444 requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break; 445 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC: 446 requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break; 447 case AArch64MCExpr::VK_AARCH64_ABS_G2: 448 requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break; 449 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC: 450 requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break; 451 case AArch64MCExpr::VK_AARCH64_ABS_G3: 452 requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break; 453 case AArch64MCExpr::VK_AARCH64_SABS_G0: 454 requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break; 455 case AArch64MCExpr::VK_AARCH64_SABS_G1: 456 requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break; 457 case AArch64MCExpr::VK_AARCH64_SABS_G2: 458 requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break; 459 case AArch64MCExpr::VK_AARCH64_DTPREL_G2: 460 requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break; 461 case AArch64MCExpr::VK_AARCH64_DTPREL_G1: 462 requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break; 463 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC: 464 requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break; 465 case AArch64MCExpr::VK_AARCH64_DTPREL_G0: 466 requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break; 467 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC: 468 requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break; 469 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1: 470 requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break; 471 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC: 472 requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break; 473 case AArch64MCExpr::VK_AARCH64_TPREL_G2: 474 requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break; 475 case AArch64MCExpr::VK_AARCH64_TPREL_G1: 476 requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break; 477 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC: 478 requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break; 479 case AArch64MCExpr::VK_AARCH64_TPREL_G0: 480 requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break; 481 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC: 482 requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break; 483 } 484 485 return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups); 486} 487 488template<int hasRs, int hasRt2> unsigned 489AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI, 490 unsigned EncodedValue) const { 491 if (!hasRs) EncodedValue |= 0x001F0000; 492 if (!hasRt2) EncodedValue |= 0x00007C00; 493 494 return EncodedValue; 495} 496 497unsigned 498AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue) const { 499 // If one of the signed fixup kinds is applied to a MOVZ instruction, the 500 // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's 501 // job to ensure that any bits possibly affected by this are 0. This means we 502 // must zero out bit 30 (essentially emitting a MOVN). 503 MCOperand UImm16MO = MI.getOperand(1); 504 505 // Nothing to do if there's no fixup. 506 if (UImm16MO.isImm()) 507 return EncodedValue; 508 509 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr()); 510 switch (A64E->getKind()) { 511 case AArch64MCExpr::VK_AARCH64_SABS_G0: 512 case AArch64MCExpr::VK_AARCH64_SABS_G1: 513 case AArch64MCExpr::VK_AARCH64_SABS_G2: 514 case AArch64MCExpr::VK_AARCH64_DTPREL_G2: 515 case AArch64MCExpr::VK_AARCH64_DTPREL_G1: 516 case AArch64MCExpr::VK_AARCH64_DTPREL_G0: 517 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1: 518 case AArch64MCExpr::VK_AARCH64_TPREL_G2: 519 case AArch64MCExpr::VK_AARCH64_TPREL_G1: 520 case AArch64MCExpr::VK_AARCH64_TPREL_G0: 521 return EncodedValue & ~(1u << 30); 522 default: 523 // Nothing to do for an unsigned fixup. 524 return EncodedValue; 525 } 526 527 llvm_unreachable("Should have returned by now"); 528} 529 530unsigned 531AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI, 532 unsigned EncodedValue) const { 533 // The Ra field of SMULH and UMULH is unused: it should be assembled as 31 534 // (i.e. all bits 1) but is ignored by the processor. 535 EncodedValue |= 0x1f << 10; 536 return EncodedValue; 537} 538 539MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII, 540 const MCRegisterInfo &MRI, 541 const MCSubtargetInfo &STI, 542 MCContext &Ctx) { 543 return new AArch64MCCodeEmitter(Ctx); 544} 545 546void AArch64MCCodeEmitter:: 547EncodeInstruction(const MCInst &MI, raw_ostream &OS, 548 SmallVectorImpl<MCFixup> &Fixups) const { 549 if (MI.getOpcode() == AArch64::TLSDESCCALL) { 550 // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the 551 // following (BLR) instruction. It doesn't emit any code itself so it 552 // doesn't go through the normal TableGenerated channels. 553 MCFixupKind Fixup = MCFixupKind(AArch64::fixup_a64_tlsdesc_call); 554 const MCExpr *Expr; 555 Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx); 556 Fixups.push_back(MCFixup::Create(0, Expr, Fixup)); 557 return; 558 } 559 560 uint32_t Binary = getBinaryCodeForInstr(MI, Fixups); 561 562 EmitInstruction(Binary, OS); 563} 564 565 566#include "AArch64GenMCCodeEmitter.inc" 567