1// aarch64.cc -- aarch64 target support for gold. 2 3// Copyright (C) 2014-2017 Free Software Foundation, Inc. 4// Written by Jing Yu <jingyu@google.com> and Han Shen <shenhan@google.com>. 5 6// This file is part of gold. 7 8// This program is free software; you can redistribute it and/or modify 9// it under the terms of the GNU General Public License as published by 10// the Free Software Foundation; either version 3 of the License, or 11// (at your option) any later version. 12 13// This program is distributed in the hope that it will be useful, 14// but WITHOUT ANY WARRANTY; without even the implied warranty of 15// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16// GNU General Public License for more details. 17 18// You should have received a copy of the GNU General Public License 19// along with this program; if not, write to the Free Software 20// Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, 21// MA 02110-1301, USA. 22 23#include "gold.h" 24 25#include <cstring> 26#include <map> 27#include <set> 28 29#include "elfcpp.h" 30#include "dwarf.h" 31#include "parameters.h" 32#include "reloc.h" 33#include "aarch64.h" 34#include "object.h" 35#include "symtab.h" 36#include "layout.h" 37#include "output.h" 38#include "copy-relocs.h" 39#include "target.h" 40#include "target-reloc.h" 41#include "target-select.h" 42#include "tls.h" 43#include "freebsd.h" 44#include "nacl.h" 45#include "gc.h" 46#include "icf.h" 47#include "aarch64-reloc-property.h" 48 49// The first three .got.plt entries are reserved. 50const int32_t AARCH64_GOTPLT_RESERVE_COUNT = 3; 51 52 53namespace 54{ 55 56using namespace gold; 57 58template<int size, bool big_endian> 59class Output_data_plt_aarch64; 60 61template<int size, bool big_endian> 62class Output_data_plt_aarch64_standard; 63 64template<int size, bool big_endian> 65class Target_aarch64; 66 67template<int size, bool big_endian> 68class AArch64_relocate_functions; 69 70// Utility class dealing with insns. This is ported from macros in 71// bfd/elfnn-aarch64.cc, but wrapped inside a class as static members. This 72// class is used in erratum sequence scanning. 73 74template<bool big_endian> 75class AArch64_insn_utilities 76{ 77public: 78 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 79 80 static const int BYTES_PER_INSN; 81 82 // Zero register encoding - 31. 83 static const unsigned int AARCH64_ZR; 84 85 static unsigned int 86 aarch64_bit(Insntype insn, int pos) 87 { return ((1 << pos) & insn) >> pos; } 88 89 static unsigned int 90 aarch64_bits(Insntype insn, int pos, int l) 91 { return (insn >> pos) & ((1 << l) - 1); } 92 93 // Get the encoding field "op31" of 3-source data processing insns. "op31" is 94 // the name defined in armv8 insn manual C3.5.9. 95 static unsigned int 96 aarch64_op31(Insntype insn) 97 { return aarch64_bits(insn, 21, 3); } 98 99 // Get the encoding field "ra" of 3-source data processing insns. "ra" is the 100 // third source register. See armv8 insn manual C3.5.9. 101 static unsigned int 102 aarch64_ra(Insntype insn) 103 { return aarch64_bits(insn, 10, 5); } 104 105 static bool 106 is_adr(const Insntype insn) 107 { return (insn & 0x9F000000) == 0x10000000; } 108 109 static bool 110 is_adrp(const Insntype insn) 111 { return (insn & 0x9F000000) == 0x90000000; } 112 113 static unsigned int 114 aarch64_rm(const Insntype insn) 115 { return aarch64_bits(insn, 16, 5); } 116 117 static unsigned int 118 aarch64_rn(const Insntype insn) 119 { return aarch64_bits(insn, 5, 5); } 120 121 static unsigned int 122 aarch64_rd(const Insntype insn) 123 { return aarch64_bits(insn, 0, 5); } 124 125 static unsigned int 126 aarch64_rt(const Insntype insn) 127 { return aarch64_bits(insn, 0, 5); } 128 129 static unsigned int 130 aarch64_rt2(const Insntype insn) 131 { return aarch64_bits(insn, 10, 5); } 132 133 // Encode imm21 into adr. Signed imm21 is in the range of [-1M, 1M). 134 static Insntype 135 aarch64_adr_encode_imm(Insntype adr, int imm21) 136 { 137 gold_assert(is_adr(adr)); 138 gold_assert(-(1 << 20) <= imm21 && imm21 < (1 << 20)); 139 const int mask19 = (1 << 19) - 1; 140 const int mask2 = 3; 141 adr &= ~((mask19 << 5) | (mask2 << 29)); 142 adr |= ((imm21 & mask2) << 29) | (((imm21 >> 2) & mask19) << 5); 143 return adr; 144 } 145 146 // Retrieve encoded adrp 33-bit signed imm value. This value is obtained by 147 // 21-bit signed imm encoded in the insn multiplied by 4k (page size) and 148 // 64-bit sign-extended, resulting in [-4G, 4G) with 12-lsb being 0. 149 static int64_t 150 aarch64_adrp_decode_imm(const Insntype adrp) 151 { 152 const int mask19 = (1 << 19) - 1; 153 const int mask2 = 3; 154 gold_assert(is_adrp(adrp)); 155 // 21-bit imm encoded in adrp. 156 uint64_t imm = ((adrp >> 29) & mask2) | (((adrp >> 5) & mask19) << 2); 157 // Retrieve msb of 21-bit-signed imm for sign extension. 158 uint64_t msbt = (imm >> 20) & 1; 159 // Real value is imm multiplied by 4k. Value now has 33-bit information. 160 int64_t value = imm << 12; 161 // Sign extend to 64-bit by repeating msbt 31 (64-33) times and merge it 162 // with value. 163 return ((((uint64_t)(1) << 32) - msbt) << 33) | value; 164 } 165 166 static bool 167 aarch64_b(const Insntype insn) 168 { return (insn & 0xFC000000) == 0x14000000; } 169 170 static bool 171 aarch64_bl(const Insntype insn) 172 { return (insn & 0xFC000000) == 0x94000000; } 173 174 static bool 175 aarch64_blr(const Insntype insn) 176 { return (insn & 0xFFFFFC1F) == 0xD63F0000; } 177 178 static bool 179 aarch64_br(const Insntype insn) 180 { return (insn & 0xFFFFFC1F) == 0xD61F0000; } 181 182 // All ld/st ops. See C4-182 of the ARM ARM. The encoding space for 183 // LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. 184 static bool 185 aarch64_ld(Insntype insn) { return aarch64_bit(insn, 22) == 1; } 186 187 static bool 188 aarch64_ldst(Insntype insn) 189 { return (insn & 0x0a000000) == 0x08000000; } 190 191 static bool 192 aarch64_ldst_ex(Insntype insn) 193 { return (insn & 0x3f000000) == 0x08000000; } 194 195 static bool 196 aarch64_ldst_pcrel(Insntype insn) 197 { return (insn & 0x3b000000) == 0x18000000; } 198 199 static bool 200 aarch64_ldst_nap(Insntype insn) 201 { return (insn & 0x3b800000) == 0x28000000; } 202 203 static bool 204 aarch64_ldstp_pi(Insntype insn) 205 { return (insn & 0x3b800000) == 0x28800000; } 206 207 static bool 208 aarch64_ldstp_o(Insntype insn) 209 { return (insn & 0x3b800000) == 0x29000000; } 210 211 static bool 212 aarch64_ldstp_pre(Insntype insn) 213 { return (insn & 0x3b800000) == 0x29800000; } 214 215 static bool 216 aarch64_ldst_ui(Insntype insn) 217 { return (insn & 0x3b200c00) == 0x38000000; } 218 219 static bool 220 aarch64_ldst_piimm(Insntype insn) 221 { return (insn & 0x3b200c00) == 0x38000400; } 222 223 static bool 224 aarch64_ldst_u(Insntype insn) 225 { return (insn & 0x3b200c00) == 0x38000800; } 226 227 static bool 228 aarch64_ldst_preimm(Insntype insn) 229 { return (insn & 0x3b200c00) == 0x38000c00; } 230 231 static bool 232 aarch64_ldst_ro(Insntype insn) 233 { return (insn & 0x3b200c00) == 0x38200800; } 234 235 static bool 236 aarch64_ldst_uimm(Insntype insn) 237 { return (insn & 0x3b000000) == 0x39000000; } 238 239 static bool 240 aarch64_ldst_simd_m(Insntype insn) 241 { return (insn & 0xbfbf0000) == 0x0c000000; } 242 243 static bool 244 aarch64_ldst_simd_m_pi(Insntype insn) 245 { return (insn & 0xbfa00000) == 0x0c800000; } 246 247 static bool 248 aarch64_ldst_simd_s(Insntype insn) 249 { return (insn & 0xbf9f0000) == 0x0d000000; } 250 251 static bool 252 aarch64_ldst_simd_s_pi(Insntype insn) 253 { return (insn & 0xbf800000) == 0x0d800000; } 254 255 // Classify an INSN if it is indeed a load/store. Return true if INSN is a 256 // LD/ST instruction otherwise return false. For scalar LD/ST instructions 257 // PAIR is FALSE, RT is returned and RT2 is set equal to RT. For LD/ST pair 258 // instructions PAIR is TRUE, RT and RT2 are returned. 259 static bool 260 aarch64_mem_op_p(Insntype insn, unsigned int *rt, unsigned int *rt2, 261 bool *pair, bool *load) 262 { 263 uint32_t opcode; 264 unsigned int r; 265 uint32_t opc = 0; 266 uint32_t v = 0; 267 uint32_t opc_v = 0; 268 269 /* Bail out quickly if INSN doesn't fall into the the load-store 270 encoding space. */ 271 if (!aarch64_ldst (insn)) 272 return false; 273 274 *pair = false; 275 *load = false; 276 if (aarch64_ldst_ex (insn)) 277 { 278 *rt = aarch64_rt (insn); 279 *rt2 = *rt; 280 if (aarch64_bit (insn, 21) == 1) 281 { 282 *pair = true; 283 *rt2 = aarch64_rt2 (insn); 284 } 285 *load = aarch64_ld (insn); 286 return true; 287 } 288 else if (aarch64_ldst_nap (insn) 289 || aarch64_ldstp_pi (insn) 290 || aarch64_ldstp_o (insn) 291 || aarch64_ldstp_pre (insn)) 292 { 293 *pair = true; 294 *rt = aarch64_rt (insn); 295 *rt2 = aarch64_rt2 (insn); 296 *load = aarch64_ld (insn); 297 return true; 298 } 299 else if (aarch64_ldst_pcrel (insn) 300 || aarch64_ldst_ui (insn) 301 || aarch64_ldst_piimm (insn) 302 || aarch64_ldst_u (insn) 303 || aarch64_ldst_preimm (insn) 304 || aarch64_ldst_ro (insn) 305 || aarch64_ldst_uimm (insn)) 306 { 307 *rt = aarch64_rt (insn); 308 *rt2 = *rt; 309 if (aarch64_ldst_pcrel (insn)) 310 *load = true; 311 opc = aarch64_bits (insn, 22, 2); 312 v = aarch64_bit (insn, 26); 313 opc_v = opc | (v << 2); 314 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3 315 || opc_v == 5 || opc_v == 7); 316 return true; 317 } 318 else if (aarch64_ldst_simd_m (insn) 319 || aarch64_ldst_simd_m_pi (insn)) 320 { 321 *rt = aarch64_rt (insn); 322 *load = aarch64_bit (insn, 22); 323 opcode = (insn >> 12) & 0xf; 324 switch (opcode) 325 { 326 case 0: 327 case 2: 328 *rt2 = *rt + 3; 329 break; 330 331 case 4: 332 case 6: 333 *rt2 = *rt + 2; 334 break; 335 336 case 7: 337 *rt2 = *rt; 338 break; 339 340 case 8: 341 case 10: 342 *rt2 = *rt + 1; 343 break; 344 345 default: 346 return false; 347 } 348 return true; 349 } 350 else if (aarch64_ldst_simd_s (insn) 351 || aarch64_ldst_simd_s_pi (insn)) 352 { 353 *rt = aarch64_rt (insn); 354 r = (insn >> 21) & 1; 355 *load = aarch64_bit (insn, 22); 356 opcode = (insn >> 13) & 0x7; 357 switch (opcode) 358 { 359 case 0: 360 case 2: 361 case 4: 362 *rt2 = *rt + r; 363 break; 364 365 case 1: 366 case 3: 367 case 5: 368 *rt2 = *rt + (r == 0 ? 2 : 3); 369 break; 370 371 case 6: 372 *rt2 = *rt + r; 373 break; 374 375 case 7: 376 *rt2 = *rt + (r == 0 ? 2 : 3); 377 break; 378 379 default: 380 return false; 381 } 382 return true; 383 } 384 return false; 385 } // End of "aarch64_mem_op_p". 386 387 // Return true if INSN is mac insn. 388 static bool 389 aarch64_mac(Insntype insn) 390 { return (insn & 0xff000000) == 0x9b000000; } 391 392 // Return true if INSN is multiply-accumulate. 393 // (This is similar to implementaton in elfnn-aarch64.c.) 394 static bool 395 aarch64_mlxl(Insntype insn) 396 { 397 uint32_t op31 = aarch64_op31(insn); 398 if (aarch64_mac(insn) 399 && (op31 == 0 || op31 == 1 || op31 == 5) 400 /* Exclude MUL instructions which are encoded as a multiple-accumulate 401 with RA = XZR. */ 402 && aarch64_ra(insn) != AARCH64_ZR) 403 { 404 return true; 405 } 406 return false; 407 } 408}; // End of "AArch64_insn_utilities". 409 410 411// Insn length in byte. 412 413template<bool big_endian> 414const int AArch64_insn_utilities<big_endian>::BYTES_PER_INSN = 4; 415 416 417// Zero register encoding - 31. 418 419template<bool big_endian> 420const unsigned int AArch64_insn_utilities<big_endian>::AARCH64_ZR = 0x1f; 421 422 423// Output_data_got_aarch64 class. 424 425template<int size, bool big_endian> 426class Output_data_got_aarch64 : public Output_data_got<size, big_endian> 427{ 428 public: 429 typedef typename elfcpp::Elf_types<size>::Elf_Addr Valtype; 430 Output_data_got_aarch64(Symbol_table* symtab, Layout* layout) 431 : Output_data_got<size, big_endian>(), 432 symbol_table_(symtab), layout_(layout) 433 { } 434 435 // Add a static entry for the GOT entry at OFFSET. GSYM is a global 436 // symbol and R_TYPE is the code of a dynamic relocation that needs to be 437 // applied in a static link. 438 void 439 add_static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym) 440 { this->static_relocs_.push_back(Static_reloc(got_offset, r_type, gsym)); } 441 442 443 // Add a static reloc for the GOT entry at OFFSET. RELOBJ is an object 444 // defining a local symbol with INDEX. R_TYPE is the code of a dynamic 445 // relocation that needs to be applied in a static link. 446 void 447 add_static_reloc(unsigned int got_offset, unsigned int r_type, 448 Sized_relobj_file<size, big_endian>* relobj, 449 unsigned int index) 450 { 451 this->static_relocs_.push_back(Static_reloc(got_offset, r_type, relobj, 452 index)); 453 } 454 455 456 protected: 457 // Write out the GOT table. 458 void 459 do_write(Output_file* of) { 460 // The first entry in the GOT is the address of the .dynamic section. 461 gold_assert(this->data_size() >= size / 8); 462 Output_section* dynamic = this->layout_->dynamic_section(); 463 Valtype dynamic_addr = dynamic == NULL ? 0 : dynamic->address(); 464 this->replace_constant(0, dynamic_addr); 465 Output_data_got<size, big_endian>::do_write(of); 466 467 // Handling static relocs 468 if (this->static_relocs_.empty()) 469 return; 470 471 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 472 473 gold_assert(parameters->doing_static_link()); 474 const off_t offset = this->offset(); 475 const section_size_type oview_size = 476 convert_to_section_size_type(this->data_size()); 477 unsigned char* const oview = of->get_output_view(offset, oview_size); 478 479 Output_segment* tls_segment = this->layout_->tls_segment(); 480 gold_assert(tls_segment != NULL); 481 482 AArch64_address aligned_tcb_address = 483 align_address(Target_aarch64<size, big_endian>::TCB_SIZE, 484 tls_segment->maximum_alignment()); 485 486 for (size_t i = 0; i < this->static_relocs_.size(); ++i) 487 { 488 Static_reloc& reloc(this->static_relocs_[i]); 489 AArch64_address value; 490 491 if (!reloc.symbol_is_global()) 492 { 493 Sized_relobj_file<size, big_endian>* object = reloc.relobj(); 494 const Symbol_value<size>* psymval = 495 reloc.relobj()->local_symbol(reloc.index()); 496 497 // We are doing static linking. Issue an error and skip this 498 // relocation if the symbol is undefined or in a discarded_section. 499 bool is_ordinary; 500 unsigned int shndx = psymval->input_shndx(&is_ordinary); 501 if ((shndx == elfcpp::SHN_UNDEF) 502 || (is_ordinary 503 && shndx != elfcpp::SHN_UNDEF 504 && !object->is_section_included(shndx) 505 && !this->symbol_table_->is_section_folded(object, shndx))) 506 { 507 gold_error(_("undefined or discarded local symbol %u from " 508 " object %s in GOT"), 509 reloc.index(), reloc.relobj()->name().c_str()); 510 continue; 511 } 512 value = psymval->value(object, 0); 513 } 514 else 515 { 516 const Symbol* gsym = reloc.symbol(); 517 gold_assert(gsym != NULL); 518 if (gsym->is_forwarder()) 519 gsym = this->symbol_table_->resolve_forwards(gsym); 520 521 // We are doing static linking. Issue an error and skip this 522 // relocation if the symbol is undefined or in a discarded_section 523 // unless it is a weakly_undefined symbol. 524 if ((gsym->is_defined_in_discarded_section() 525 || gsym->is_undefined()) 526 && !gsym->is_weak_undefined()) 527 { 528 gold_error(_("undefined or discarded symbol %s in GOT"), 529 gsym->name()); 530 continue; 531 } 532 533 if (!gsym->is_weak_undefined()) 534 { 535 const Sized_symbol<size>* sym = 536 static_cast<const Sized_symbol<size>*>(gsym); 537 value = sym->value(); 538 } 539 else 540 value = 0; 541 } 542 543 unsigned got_offset = reloc.got_offset(); 544 gold_assert(got_offset < oview_size); 545 546 typedef typename elfcpp::Swap<size, big_endian>::Valtype Valtype; 547 Valtype* wv = reinterpret_cast<Valtype*>(oview + got_offset); 548 Valtype x; 549 switch (reloc.r_type()) 550 { 551 case elfcpp::R_AARCH64_TLS_DTPREL64: 552 x = value; 553 break; 554 case elfcpp::R_AARCH64_TLS_TPREL64: 555 x = value + aligned_tcb_address; 556 break; 557 default: 558 gold_unreachable(); 559 } 560 elfcpp::Swap<size, big_endian>::writeval(wv, x); 561 } 562 563 of->write_output_view(offset, oview_size, oview); 564 } 565 566 private: 567 // Symbol table of the output object. 568 Symbol_table* symbol_table_; 569 // A pointer to the Layout class, so that we can find the .dynamic 570 // section when we write out the GOT section. 571 Layout* layout_; 572 573 // This class represent dynamic relocations that need to be applied by 574 // gold because we are using TLS relocations in a static link. 575 class Static_reloc 576 { 577 public: 578 Static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym) 579 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(true) 580 { this->u_.global.symbol = gsym; } 581 582 Static_reloc(unsigned int got_offset, unsigned int r_type, 583 Sized_relobj_file<size, big_endian>* relobj, unsigned int index) 584 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(false) 585 { 586 this->u_.local.relobj = relobj; 587 this->u_.local.index = index; 588 } 589 590 // Return the GOT offset. 591 unsigned int 592 got_offset() const 593 { return this->got_offset_; } 594 595 // Relocation type. 596 unsigned int 597 r_type() const 598 { return this->r_type_; } 599 600 // Whether the symbol is global or not. 601 bool 602 symbol_is_global() const 603 { return this->symbol_is_global_; } 604 605 // For a relocation against a global symbol, the global symbol. 606 Symbol* 607 symbol() const 608 { 609 gold_assert(this->symbol_is_global_); 610 return this->u_.global.symbol; 611 } 612 613 // For a relocation against a local symbol, the defining object. 614 Sized_relobj_file<size, big_endian>* 615 relobj() const 616 { 617 gold_assert(!this->symbol_is_global_); 618 return this->u_.local.relobj; 619 } 620 621 // For a relocation against a local symbol, the local symbol index. 622 unsigned int 623 index() const 624 { 625 gold_assert(!this->symbol_is_global_); 626 return this->u_.local.index; 627 } 628 629 private: 630 // GOT offset of the entry to which this relocation is applied. 631 unsigned int got_offset_; 632 // Type of relocation. 633 unsigned int r_type_; 634 // Whether this relocation is against a global symbol. 635 bool symbol_is_global_; 636 // A global or local symbol. 637 union 638 { 639 struct 640 { 641 // For a global symbol, the symbol itself. 642 Symbol* symbol; 643 } global; 644 struct 645 { 646 // For a local symbol, the object defining the symbol. 647 Sized_relobj_file<size, big_endian>* relobj; 648 // For a local symbol, the symbol index. 649 unsigned int index; 650 } local; 651 } u_; 652 }; // End of inner class Static_reloc 653 654 std::vector<Static_reloc> static_relocs_; 655}; // End of Output_data_got_aarch64 656 657 658template<int size, bool big_endian> 659class AArch64_input_section; 660 661 662template<int size, bool big_endian> 663class AArch64_output_section; 664 665 666template<int size, bool big_endian> 667class AArch64_relobj; 668 669 670// Stub type enum constants. 671 672enum 673{ 674 ST_NONE = 0, 675 676 // Using adrp/add pair, 4 insns (including alignment) without mem access, 677 // the fastest stub. This has a limited jump distance, which is tested by 678 // aarch64_valid_for_adrp_p. 679 ST_ADRP_BRANCH = 1, 680 681 // Using ldr-absolute-address/br-register, 4 insns with 1 mem access, 682 // unlimited in jump distance. 683 ST_LONG_BRANCH_ABS = 2, 684 685 // Using ldr/calculate-pcrel/jump, 8 insns (including alignment) with 1 686 // mem access, slowest one. Only used in position independent executables. 687 ST_LONG_BRANCH_PCREL = 3, 688 689 // Stub for erratum 843419 handling. 690 ST_E_843419 = 4, 691 692 // Stub for erratum 835769 handling. 693 ST_E_835769 = 5, 694 695 // Number of total stub types. 696 ST_NUMBER = 6 697}; 698 699 700// Struct that wraps insns for a particular stub. All stub templates are 701// created/initialized as constants by Stub_template_repertoire. 702 703template<bool big_endian> 704struct Stub_template 705{ 706 const typename AArch64_insn_utilities<big_endian>::Insntype* insns; 707 const int insn_num; 708}; 709 710 711// Simple singleton class that creates/initializes/stores all types of stub 712// templates. 713 714template<bool big_endian> 715class Stub_template_repertoire 716{ 717public: 718 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype; 719 720 // Single static method to get stub template for a given stub type. 721 static const Stub_template<big_endian>* 722 get_stub_template(int type) 723 { 724 static Stub_template_repertoire<big_endian> singleton; 725 return singleton.stub_templates_[type]; 726 } 727 728private: 729 // Constructor - creates/initializes all stub templates. 730 Stub_template_repertoire(); 731 ~Stub_template_repertoire() 732 { } 733 734 // Disallowing copy ctor and copy assignment operator. 735 Stub_template_repertoire(Stub_template_repertoire&); 736 Stub_template_repertoire& operator=(Stub_template_repertoire&); 737 738 // Data that stores all insn templates. 739 const Stub_template<big_endian>* stub_templates_[ST_NUMBER]; 740}; // End of "class Stub_template_repertoire". 741 742 743// Constructor - creates/initilizes all stub templates. 744 745template<bool big_endian> 746Stub_template_repertoire<big_endian>::Stub_template_repertoire() 747{ 748 // Insn array definitions. 749 const static Insntype ST_NONE_INSNS[] = {}; 750 751 const static Insntype ST_ADRP_BRANCH_INSNS[] = 752 { 753 0x90000010, /* adrp ip0, X */ 754 /* ADR_PREL_PG_HI21(X) */ 755 0x91000210, /* add ip0, ip0, :lo12:X */ 756 /* ADD_ABS_LO12_NC(X) */ 757 0xd61f0200, /* br ip0 */ 758 0x00000000, /* alignment padding */ 759 }; 760 761 const static Insntype ST_LONG_BRANCH_ABS_INSNS[] = 762 { 763 0x58000050, /* ldr ip0, 0x8 */ 764 0xd61f0200, /* br ip0 */ 765 0x00000000, /* address field */ 766 0x00000000, /* address fields */ 767 }; 768 769 const static Insntype ST_LONG_BRANCH_PCREL_INSNS[] = 770 { 771 0x58000090, /* ldr ip0, 0x10 */ 772 0x10000011, /* adr ip1, #0 */ 773 0x8b110210, /* add ip0, ip0, ip1 */ 774 0xd61f0200, /* br ip0 */ 775 0x00000000, /* address field */ 776 0x00000000, /* address field */ 777 0x00000000, /* alignment padding */ 778 0x00000000, /* alignment padding */ 779 }; 780 781 const static Insntype ST_E_843419_INSNS[] = 782 { 783 0x00000000, /* Placeholder for erratum insn. */ 784 0x14000000, /* b <label> */ 785 }; 786 787 // ST_E_835769 has the same stub template as ST_E_843419. 788 const static Insntype* ST_E_835769_INSNS = ST_E_843419_INSNS; 789 790#define install_insn_template(T) \ 791 const static Stub_template<big_endian> template_##T = { \ 792 T##_INSNS, sizeof(T##_INSNS) / sizeof(T##_INSNS[0]) }; \ 793 this->stub_templates_[T] = &template_##T 794 795 install_insn_template(ST_NONE); 796 install_insn_template(ST_ADRP_BRANCH); 797 install_insn_template(ST_LONG_BRANCH_ABS); 798 install_insn_template(ST_LONG_BRANCH_PCREL); 799 install_insn_template(ST_E_843419); 800 install_insn_template(ST_E_835769); 801 802#undef install_insn_template 803} 804 805 806// Base class for stubs. 807 808template<int size, bool big_endian> 809class Stub_base 810{ 811public: 812 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 813 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype; 814 815 static const AArch64_address invalid_address = 816 static_cast<AArch64_address>(-1); 817 818 static const section_offset_type invalid_offset = 819 static_cast<section_offset_type>(-1); 820 821 Stub_base(int type) 822 : destination_address_(invalid_address), 823 offset_(invalid_offset), 824 type_(type) 825 {} 826 827 ~Stub_base() 828 {} 829 830 // Get stub type. 831 int 832 type() const 833 { return this->type_; } 834 835 // Get stub template that provides stub insn information. 836 const Stub_template<big_endian>* 837 stub_template() const 838 { 839 return Stub_template_repertoire<big_endian>:: 840 get_stub_template(this->type()); 841 } 842 843 // Get destination address. 844 AArch64_address 845 destination_address() const 846 { 847 gold_assert(this->destination_address_ != this->invalid_address); 848 return this->destination_address_; 849 } 850 851 // Set destination address. 852 void 853 set_destination_address(AArch64_address address) 854 { 855 gold_assert(address != this->invalid_address); 856 this->destination_address_ = address; 857 } 858 859 // Reset the destination address. 860 void 861 reset_destination_address() 862 { this->destination_address_ = this->invalid_address; } 863 864 // Get offset of code stub. For Reloc_stub, it is the offset from the 865 // beginning of its containing stub table; for Erratum_stub, it is the offset 866 // from the end of reloc_stubs. 867 section_offset_type 868 offset() const 869 { 870 gold_assert(this->offset_ != this->invalid_offset); 871 return this->offset_; 872 } 873 874 // Set stub offset. 875 void 876 set_offset(section_offset_type offset) 877 { this->offset_ = offset; } 878 879 // Return the stub insn. 880 const Insntype* 881 insns() const 882 { return this->stub_template()->insns; } 883 884 // Return num of stub insns. 885 unsigned int 886 insn_num() const 887 { return this->stub_template()->insn_num; } 888 889 // Get size of the stub. 890 int 891 stub_size() const 892 { 893 return this->insn_num() * 894 AArch64_insn_utilities<big_endian>::BYTES_PER_INSN; 895 } 896 897 // Write stub to output file. 898 void 899 write(unsigned char* view, section_size_type view_size) 900 { this->do_write(view, view_size); } 901 902protected: 903 // Abstract method to be implemented by sub-classes. 904 virtual void 905 do_write(unsigned char*, section_size_type) = 0; 906 907private: 908 // The last insn of a stub is a jump to destination insn. This field records 909 // the destination address. 910 AArch64_address destination_address_; 911 // The stub offset. Note this has difference interpretations between an 912 // Reloc_stub and an Erratum_stub. For Reloc_stub this is the offset from the 913 // beginning of the containing stub_table, whereas for Erratum_stub, this is 914 // the offset from the end of reloc_stubs. 915 section_offset_type offset_; 916 // Stub type. 917 const int type_; 918}; // End of "Stub_base". 919 920 921// Erratum stub class. An erratum stub differs from a reloc stub in that for 922// each erratum occurrence, we generate an erratum stub. We never share erratum 923// stubs, whereas for reloc stubs, different branch insns share a single reloc 924// stub as long as the branch targets are the same. (More to the point, reloc 925// stubs can be shared because they're used to reach a specific target, whereas 926// erratum stubs branch back to the original control flow.) 927 928template<int size, bool big_endian> 929class Erratum_stub : public Stub_base<size, big_endian> 930{ 931public: 932 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj; 933 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 934 typedef AArch64_insn_utilities<big_endian> Insn_utilities; 935 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype; 936 937 static const int STUB_ADDR_ALIGN; 938 939 static const Insntype invalid_insn = static_cast<Insntype>(-1); 940 941 Erratum_stub(The_aarch64_relobj* relobj, int type, 942 unsigned shndx, unsigned int sh_offset) 943 : Stub_base<size, big_endian>(type), relobj_(relobj), 944 shndx_(shndx), sh_offset_(sh_offset), 945 erratum_insn_(invalid_insn), 946 erratum_address_(this->invalid_address) 947 {} 948 949 ~Erratum_stub() {} 950 951 // Return the object that contains the erratum. 952 The_aarch64_relobj* 953 relobj() 954 { return this->relobj_; } 955 956 // Get section index of the erratum. 957 unsigned int 958 shndx() const 959 { return this->shndx_; } 960 961 // Get section offset of the erratum. 962 unsigned int 963 sh_offset() const 964 { return this->sh_offset_; } 965 966 // Get the erratum insn. This is the insn located at erratum_insn_address. 967 Insntype 968 erratum_insn() const 969 { 970 gold_assert(this->erratum_insn_ != this->invalid_insn); 971 return this->erratum_insn_; 972 } 973 974 // Set the insn that the erratum happens to. 975 void 976 set_erratum_insn(Insntype insn) 977 { this->erratum_insn_ = insn; } 978 979 // For 843419, the erratum insn is ld/st xt, [xn, #uimm], which may be a 980 // relocation spot, in this case, the erratum_insn_ recorded at scanning phase 981 // is no longer the one we want to write out to the stub, update erratum_insn_ 982 // with relocated version. Also note that in this case xn must not be "PC", so 983 // it is safe to move the erratum insn from the origin place to the stub. For 984 // 835769, the erratum insn is multiply-accumulate insn, which could not be a 985 // relocation spot (assertion added though). 986 void 987 update_erratum_insn(Insntype insn) 988 { 989 gold_assert(this->erratum_insn_ != this->invalid_insn); 990 switch (this->type()) 991 { 992 case ST_E_843419: 993 gold_assert(Insn_utilities::aarch64_ldst_uimm(insn)); 994 gold_assert(Insn_utilities::aarch64_ldst_uimm(this->erratum_insn())); 995 gold_assert(Insn_utilities::aarch64_rd(insn) == 996 Insn_utilities::aarch64_rd(this->erratum_insn())); 997 gold_assert(Insn_utilities::aarch64_rn(insn) == 998 Insn_utilities::aarch64_rn(this->erratum_insn())); 999 // Update plain ld/st insn with relocated insn. 1000 this->erratum_insn_ = insn; 1001 break; 1002 case ST_E_835769: 1003 gold_assert(insn == this->erratum_insn()); 1004 break; 1005 default: 1006 gold_unreachable(); 1007 } 1008 } 1009 1010 1011 // Return the address where an erratum must be done. 1012 AArch64_address 1013 erratum_address() const 1014 { 1015 gold_assert(this->erratum_address_ != this->invalid_address); 1016 return this->erratum_address_; 1017 } 1018 1019 // Set the address where an erratum must be done. 1020 void 1021 set_erratum_address(AArch64_address addr) 1022 { this->erratum_address_ = addr; } 1023 1024 // Comparator used to group Erratum_stubs in a set by (obj, shndx, 1025 // sh_offset). We do not include 'type' in the calculation, because there is 1026 // at most one stub type at (obj, shndx, sh_offset). 1027 bool 1028 operator<(const Erratum_stub<size, big_endian>& k) const 1029 { 1030 if (this == &k) 1031 return false; 1032 // We group stubs by relobj. 1033 if (this->relobj_ != k.relobj_) 1034 return this->relobj_ < k.relobj_; 1035 // Then by section index. 1036 if (this->shndx_ != k.shndx_) 1037 return this->shndx_ < k.shndx_; 1038 // Lastly by section offset. 1039 return this->sh_offset_ < k.sh_offset_; 1040 } 1041 1042 void 1043 invalidate_erratum_stub() 1044 { 1045 gold_assert(this->relobj_ != NULL); 1046 this->relobj_ = NULL; 1047 } 1048 1049 bool 1050 is_invalidated_erratum_stub() 1051 { return this->relobj_ == NULL; } 1052 1053protected: 1054 virtual void 1055 do_write(unsigned char*, section_size_type); 1056 1057private: 1058 // The object that needs to be fixed. 1059 The_aarch64_relobj* relobj_; 1060 // The shndx in the object that needs to be fixed. 1061 const unsigned int shndx_; 1062 // The section offset in the obejct that needs to be fixed. 1063 const unsigned int sh_offset_; 1064 // The insn to be fixed. 1065 Insntype erratum_insn_; 1066 // The address of the above insn. 1067 AArch64_address erratum_address_; 1068}; // End of "Erratum_stub". 1069 1070 1071// Erratum sub class to wrap additional info needed by 843419. In fixing this 1072// erratum, we may choose to replace 'adrp' with 'adr', in this case, we need 1073// adrp's code position (two or three insns before erratum insn itself). 1074 1075template<int size, bool big_endian> 1076class E843419_stub : public Erratum_stub<size, big_endian> 1077{ 1078public: 1079 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype; 1080 1081 E843419_stub(AArch64_relobj<size, big_endian>* relobj, 1082 unsigned int shndx, unsigned int sh_offset, 1083 unsigned int adrp_sh_offset) 1084 : Erratum_stub<size, big_endian>(relobj, ST_E_843419, shndx, sh_offset), 1085 adrp_sh_offset_(adrp_sh_offset) 1086 {} 1087 1088 unsigned int 1089 adrp_sh_offset() const 1090 { return this->adrp_sh_offset_; } 1091 1092private: 1093 // Section offset of "adrp". (We do not need a "adrp_shndx_" field, because we 1094 // can can obtain it from its parent.) 1095 const unsigned int adrp_sh_offset_; 1096}; 1097 1098 1099template<int size, bool big_endian> 1100const int Erratum_stub<size, big_endian>::STUB_ADDR_ALIGN = 4; 1101 1102// Comparator used in set definition. 1103template<int size, bool big_endian> 1104struct Erratum_stub_less 1105{ 1106 bool 1107 operator()(const Erratum_stub<size, big_endian>* s1, 1108 const Erratum_stub<size, big_endian>* s2) const 1109 { return *s1 < *s2; } 1110}; 1111 1112// Erratum_stub implementation for writing stub to output file. 1113 1114template<int size, bool big_endian> 1115void 1116Erratum_stub<size, big_endian>::do_write(unsigned char* view, section_size_type) 1117{ 1118 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 1119 const Insntype* insns = this->insns(); 1120 uint32_t num_insns = this->insn_num(); 1121 Insntype* ip = reinterpret_cast<Insntype*>(view); 1122 // For current implemented erratum 843419 and 835769, the first insn in the 1123 // stub is always a copy of the problematic insn (in 843419, the mem access 1124 // insn, in 835769, the mac insn), followed by a jump-back. 1125 elfcpp::Swap<32, big_endian>::writeval(ip, this->erratum_insn()); 1126 for (uint32_t i = 1; i < num_insns; ++i) 1127 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]); 1128} 1129 1130 1131// Reloc stub class. 1132 1133template<int size, bool big_endian> 1134class Reloc_stub : public Stub_base<size, big_endian> 1135{ 1136 public: 1137 typedef Reloc_stub<size, big_endian> This; 1138 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 1139 1140 // Branch range. This is used to calculate the section group size, as well as 1141 // determine whether a stub is needed. 1142 static const int MAX_BRANCH_OFFSET = ((1 << 25) - 1) << 2; 1143 static const int MIN_BRANCH_OFFSET = -((1 << 25) << 2); 1144 1145 // Constant used to determine if an offset fits in the adrp instruction 1146 // encoding. 1147 static const int MAX_ADRP_IMM = (1 << 20) - 1; 1148 static const int MIN_ADRP_IMM = -(1 << 20); 1149 1150 static const int BYTES_PER_INSN = 4; 1151 static const int STUB_ADDR_ALIGN; 1152 1153 // Determine whether the offset fits in the jump/branch instruction. 1154 static bool 1155 aarch64_valid_branch_offset_p(int64_t offset) 1156 { return offset >= MIN_BRANCH_OFFSET && offset <= MAX_BRANCH_OFFSET; } 1157 1158 // Determine whether the offset fits in the adrp immediate field. 1159 static bool 1160 aarch64_valid_for_adrp_p(AArch64_address location, AArch64_address dest) 1161 { 1162 typedef AArch64_relocate_functions<size, big_endian> Reloc; 1163 int64_t adrp_imm = (Reloc::Page(dest) - Reloc::Page(location)) >> 12; 1164 return adrp_imm >= MIN_ADRP_IMM && adrp_imm <= MAX_ADRP_IMM; 1165 } 1166 1167 // Determine the stub type for a certain relocation or ST_NONE, if no stub is 1168 // needed. 1169 static int 1170 stub_type_for_reloc(unsigned int r_type, AArch64_address address, 1171 AArch64_address target); 1172 1173 Reloc_stub(int type) 1174 : Stub_base<size, big_endian>(type) 1175 { } 1176 1177 ~Reloc_stub() 1178 { } 1179 1180 // The key class used to index the stub instance in the stub table's stub map. 1181 class Key 1182 { 1183 public: 1184 Key(int type, const Symbol* symbol, const Relobj* relobj, 1185 unsigned int r_sym, int32_t addend) 1186 : type_(type), addend_(addend) 1187 { 1188 if (symbol != NULL) 1189 { 1190 this->r_sym_ = Reloc_stub::invalid_index; 1191 this->u_.symbol = symbol; 1192 } 1193 else 1194 { 1195 gold_assert(relobj != NULL && r_sym != invalid_index); 1196 this->r_sym_ = r_sym; 1197 this->u_.relobj = relobj; 1198 } 1199 } 1200 1201 ~Key() 1202 { } 1203 1204 // Return stub type. 1205 int 1206 type() const 1207 { return this->type_; } 1208 1209 // Return the local symbol index or invalid_index. 1210 unsigned int 1211 r_sym() const 1212 { return this->r_sym_; } 1213 1214 // Return the symbol if there is one. 1215 const Symbol* 1216 symbol() const 1217 { return this->r_sym_ == invalid_index ? this->u_.symbol : NULL; } 1218 1219 // Return the relobj if there is one. 1220 const Relobj* 1221 relobj() const 1222 { return this->r_sym_ != invalid_index ? this->u_.relobj : NULL; } 1223 1224 // Whether this equals to another key k. 1225 bool 1226 eq(const Key& k) const 1227 { 1228 return ((this->type_ == k.type_) 1229 && (this->r_sym_ == k.r_sym_) 1230 && ((this->r_sym_ != Reloc_stub::invalid_index) 1231 ? (this->u_.relobj == k.u_.relobj) 1232 : (this->u_.symbol == k.u_.symbol)) 1233 && (this->addend_ == k.addend_)); 1234 } 1235 1236 // Return a hash value. 1237 size_t 1238 hash_value() const 1239 { 1240 size_t name_hash_value = gold::string_hash<char>( 1241 (this->r_sym_ != Reloc_stub::invalid_index) 1242 ? this->u_.relobj->name().c_str() 1243 : this->u_.symbol->name()); 1244 // We only have 4 stub types. 1245 size_t stub_type_hash_value = 0x03 & this->type_; 1246 return (name_hash_value 1247 ^ stub_type_hash_value 1248 ^ ((this->r_sym_ & 0x3fff) << 2) 1249 ^ ((this->addend_ & 0xffff) << 16)); 1250 } 1251 1252 // Functors for STL associative containers. 1253 struct hash 1254 { 1255 size_t 1256 operator()(const Key& k) const 1257 { return k.hash_value(); } 1258 }; 1259 1260 struct equal_to 1261 { 1262 bool 1263 operator()(const Key& k1, const Key& k2) const 1264 { return k1.eq(k2); } 1265 }; 1266 1267 private: 1268 // Stub type. 1269 const int type_; 1270 // If this is a local symbol, this is the index in the defining object. 1271 // Otherwise, it is invalid_index for a global symbol. 1272 unsigned int r_sym_; 1273 // If r_sym_ is an invalid index, this points to a global symbol. 1274 // Otherwise, it points to a relobj. We used the unsized and target 1275 // independent Symbol and Relobj classes instead of Sized_symbol<32> and 1276 // Arm_relobj, in order to avoid making the stub class a template 1277 // as most of the stub machinery is endianness-neutral. However, it 1278 // may require a bit of casting done by users of this class. 1279 union 1280 { 1281 const Symbol* symbol; 1282 const Relobj* relobj; 1283 } u_; 1284 // Addend associated with a reloc. 1285 int32_t addend_; 1286 }; // End of inner class Reloc_stub::Key 1287 1288 protected: 1289 // This may be overridden in the child class. 1290 virtual void 1291 do_write(unsigned char*, section_size_type); 1292 1293 private: 1294 static const unsigned int invalid_index = static_cast<unsigned int>(-1); 1295}; // End of Reloc_stub 1296 1297template<int size, bool big_endian> 1298const int Reloc_stub<size, big_endian>::STUB_ADDR_ALIGN = 4; 1299 1300// Write data to output file. 1301 1302template<int size, bool big_endian> 1303void 1304Reloc_stub<size, big_endian>:: 1305do_write(unsigned char* view, section_size_type) 1306{ 1307 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 1308 const uint32_t* insns = this->insns(); 1309 uint32_t num_insns = this->insn_num(); 1310 Insntype* ip = reinterpret_cast<Insntype*>(view); 1311 for (uint32_t i = 0; i < num_insns; ++i) 1312 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]); 1313} 1314 1315 1316// Determine the stub type for a certain relocation or ST_NONE, if no stub is 1317// needed. 1318 1319template<int size, bool big_endian> 1320inline int 1321Reloc_stub<size, big_endian>::stub_type_for_reloc( 1322 unsigned int r_type, AArch64_address location, AArch64_address dest) 1323{ 1324 int64_t branch_offset = 0; 1325 switch(r_type) 1326 { 1327 case elfcpp::R_AARCH64_CALL26: 1328 case elfcpp::R_AARCH64_JUMP26: 1329 branch_offset = dest - location; 1330 break; 1331 default: 1332 gold_unreachable(); 1333 } 1334 1335 if (aarch64_valid_branch_offset_p(branch_offset)) 1336 return ST_NONE; 1337 1338 if (aarch64_valid_for_adrp_p(location, dest)) 1339 return ST_ADRP_BRANCH; 1340 1341 // Always use PC-relative addressing in case of -shared or -pie. 1342 if (parameters->options().output_is_position_independent()) 1343 return ST_LONG_BRANCH_PCREL; 1344 1345 // This saves 2 insns per stub, compared to ST_LONG_BRANCH_PCREL. 1346 // But is only applicable to non-shared or non-pie. 1347 return ST_LONG_BRANCH_ABS; 1348} 1349 1350// A class to hold stubs for the ARM target. This contains 2 different types of 1351// stubs - reloc stubs and erratum stubs. 1352 1353template<int size, bool big_endian> 1354class Stub_table : public Output_data 1355{ 1356 public: 1357 typedef Target_aarch64<size, big_endian> The_target_aarch64; 1358 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 1359 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj; 1360 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section; 1361 typedef Reloc_stub<size, big_endian> The_reloc_stub; 1362 typedef typename The_reloc_stub::Key The_reloc_stub_key; 1363 typedef Erratum_stub<size, big_endian> The_erratum_stub; 1364 typedef Erratum_stub_less<size, big_endian> The_erratum_stub_less; 1365 typedef typename The_reloc_stub_key::hash The_reloc_stub_key_hash; 1366 typedef typename The_reloc_stub_key::equal_to The_reloc_stub_key_equal_to; 1367 typedef Stub_table<size, big_endian> The_stub_table; 1368 typedef Unordered_map<The_reloc_stub_key, The_reloc_stub*, 1369 The_reloc_stub_key_hash, The_reloc_stub_key_equal_to> 1370 Reloc_stub_map; 1371 typedef typename Reloc_stub_map::const_iterator Reloc_stub_map_const_iter; 1372 typedef Relocate_info<size, big_endian> The_relocate_info; 1373 1374 typedef std::set<The_erratum_stub*, The_erratum_stub_less> Erratum_stub_set; 1375 typedef typename Erratum_stub_set::iterator Erratum_stub_set_iter; 1376 1377 Stub_table(The_aarch64_input_section* owner) 1378 : Output_data(), owner_(owner), reloc_stubs_size_(0), 1379 erratum_stubs_size_(0), prev_data_size_(0) 1380 { } 1381 1382 ~Stub_table() 1383 { } 1384 1385 The_aarch64_input_section* 1386 owner() const 1387 { return owner_; } 1388 1389 // Whether this stub table is empty. 1390 bool 1391 empty() const 1392 { return reloc_stubs_.empty() && erratum_stubs_.empty(); } 1393 1394 // Return the current data size. 1395 off_t 1396 current_data_size() const 1397 { return this->current_data_size_for_child(); } 1398 1399 // Add a STUB using KEY. The caller is responsible for avoiding addition 1400 // if a STUB with the same key has already been added. 1401 void 1402 add_reloc_stub(The_reloc_stub* stub, const The_reloc_stub_key& key); 1403 1404 // Add an erratum stub into the erratum stub set. The set is ordered by 1405 // (relobj, shndx, sh_offset). 1406 void 1407 add_erratum_stub(The_erratum_stub* stub); 1408 1409 // Find if such erratum exists for any given (obj, shndx, sh_offset). 1410 The_erratum_stub* 1411 find_erratum_stub(The_aarch64_relobj* a64relobj, 1412 unsigned int shndx, unsigned int sh_offset); 1413 1414 // Find all the erratums for a given input section. The return value is a pair 1415 // of iterators [begin, end). 1416 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> 1417 find_erratum_stubs_for_input_section(The_aarch64_relobj* a64relobj, 1418 unsigned int shndx); 1419 1420 // Compute the erratum stub address. 1421 AArch64_address 1422 erratum_stub_address(The_erratum_stub* stub) const 1423 { 1424 AArch64_address r = align_address(this->address() + this->reloc_stubs_size_, 1425 The_erratum_stub::STUB_ADDR_ALIGN); 1426 r += stub->offset(); 1427 return r; 1428 } 1429 1430 // Finalize stubs. No-op here, just for completeness. 1431 void 1432 finalize_stubs() 1433 { } 1434 1435 // Look up a relocation stub using KEY. Return NULL if there is none. 1436 The_reloc_stub* 1437 find_reloc_stub(The_reloc_stub_key& key) 1438 { 1439 Reloc_stub_map_const_iter p = this->reloc_stubs_.find(key); 1440 return (p != this->reloc_stubs_.end()) ? p->second : NULL; 1441 } 1442 1443 // Relocate reloc stubs in this stub table. This does not relocate erratum stubs. 1444 void 1445 relocate_reloc_stubs(const The_relocate_info*, 1446 The_target_aarch64*, 1447 Output_section*, 1448 unsigned char*, 1449 AArch64_address, 1450 section_size_type); 1451 1452 // Relocate an erratum stub. 1453 void 1454 relocate_erratum_stub(The_erratum_stub*, unsigned char*); 1455 1456 // Update data size at the end of a relaxation pass. Return true if data size 1457 // is different from that of the previous relaxation pass. 1458 bool 1459 update_data_size_changed_p() 1460 { 1461 // No addralign changed here. 1462 off_t s = align_address(this->reloc_stubs_size_, 1463 The_erratum_stub::STUB_ADDR_ALIGN) 1464 + this->erratum_stubs_size_; 1465 bool changed = (s != this->prev_data_size_); 1466 this->prev_data_size_ = s; 1467 return changed; 1468 } 1469 1470 protected: 1471 // Write out section contents. 1472 void 1473 do_write(Output_file*); 1474 1475 // Return the required alignment. 1476 uint64_t 1477 do_addralign() const 1478 { 1479 return std::max(The_reloc_stub::STUB_ADDR_ALIGN, 1480 The_erratum_stub::STUB_ADDR_ALIGN); 1481 } 1482 1483 // Reset address and file offset. 1484 void 1485 do_reset_address_and_file_offset() 1486 { this->set_current_data_size_for_child(this->prev_data_size_); } 1487 1488 // Set final data size. 1489 void 1490 set_final_data_size() 1491 { this->set_data_size(this->current_data_size()); } 1492 1493 private: 1494 // Relocate one reloc stub. 1495 void 1496 relocate_reloc_stub(The_reloc_stub*, 1497 const The_relocate_info*, 1498 The_target_aarch64*, 1499 Output_section*, 1500 unsigned char*, 1501 AArch64_address, 1502 section_size_type); 1503 1504 private: 1505 // Owner of this stub table. 1506 The_aarch64_input_section* owner_; 1507 // The relocation stubs. 1508 Reloc_stub_map reloc_stubs_; 1509 // The erratum stubs. 1510 Erratum_stub_set erratum_stubs_; 1511 // Size of reloc stubs. 1512 off_t reloc_stubs_size_; 1513 // Size of erratum stubs. 1514 off_t erratum_stubs_size_; 1515 // data size of this in the previous pass. 1516 off_t prev_data_size_; 1517}; // End of Stub_table 1518 1519 1520// Add an erratum stub into the erratum stub set. The set is ordered by 1521// (relobj, shndx, sh_offset). 1522 1523template<int size, bool big_endian> 1524void 1525Stub_table<size, big_endian>::add_erratum_stub(The_erratum_stub* stub) 1526{ 1527 std::pair<Erratum_stub_set_iter, bool> ret = 1528 this->erratum_stubs_.insert(stub); 1529 gold_assert(ret.second); 1530 this->erratum_stubs_size_ = align_address( 1531 this->erratum_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN); 1532 stub->set_offset(this->erratum_stubs_size_); 1533 this->erratum_stubs_size_ += stub->stub_size(); 1534} 1535 1536 1537// Find if such erratum exists for given (obj, shndx, sh_offset). 1538 1539template<int size, bool big_endian> 1540Erratum_stub<size, big_endian>* 1541Stub_table<size, big_endian>::find_erratum_stub( 1542 The_aarch64_relobj* a64relobj, unsigned int shndx, unsigned int sh_offset) 1543{ 1544 // A dummy object used as key to search in the set. 1545 The_erratum_stub key(a64relobj, ST_NONE, 1546 shndx, sh_offset); 1547 Erratum_stub_set_iter i = this->erratum_stubs_.find(&key); 1548 if (i != this->erratum_stubs_.end()) 1549 { 1550 The_erratum_stub* stub(*i); 1551 gold_assert(stub->erratum_insn() != 0); 1552 return stub; 1553 } 1554 return NULL; 1555} 1556 1557 1558// Find all the errata for a given input section. The return value is a pair of 1559// iterators [begin, end). 1560 1561template<int size, bool big_endian> 1562std::pair<typename Stub_table<size, big_endian>::Erratum_stub_set_iter, 1563 typename Stub_table<size, big_endian>::Erratum_stub_set_iter> 1564Stub_table<size, big_endian>::find_erratum_stubs_for_input_section( 1565 The_aarch64_relobj* a64relobj, unsigned int shndx) 1566{ 1567 typedef std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> Result_pair; 1568 Erratum_stub_set_iter start, end; 1569 The_erratum_stub low_key(a64relobj, ST_NONE, shndx, 0); 1570 start = this->erratum_stubs_.lower_bound(&low_key); 1571 if (start == this->erratum_stubs_.end()) 1572 return Result_pair(this->erratum_stubs_.end(), 1573 this->erratum_stubs_.end()); 1574 end = start; 1575 while (end != this->erratum_stubs_.end() && 1576 (*end)->relobj() == a64relobj && (*end)->shndx() == shndx) 1577 ++end; 1578 return Result_pair(start, end); 1579} 1580 1581 1582// Add a STUB using KEY. The caller is responsible for avoiding addition 1583// if a STUB with the same key has already been added. 1584 1585template<int size, bool big_endian> 1586void 1587Stub_table<size, big_endian>::add_reloc_stub( 1588 The_reloc_stub* stub, const The_reloc_stub_key& key) 1589{ 1590 gold_assert(stub->type() == key.type()); 1591 this->reloc_stubs_[key] = stub; 1592 1593 // Assign stub offset early. We can do this because we never remove 1594 // reloc stubs and they are in the beginning of the stub table. 1595 this->reloc_stubs_size_ = align_address(this->reloc_stubs_size_, 1596 The_reloc_stub::STUB_ADDR_ALIGN); 1597 stub->set_offset(this->reloc_stubs_size_); 1598 this->reloc_stubs_size_ += stub->stub_size(); 1599} 1600 1601 1602// Relocate an erratum stub. 1603 1604template<int size, bool big_endian> 1605void 1606Stub_table<size, big_endian>:: 1607relocate_erratum_stub(The_erratum_stub* estub, 1608 unsigned char* view) 1609{ 1610 // Just for convenience. 1611 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN; 1612 1613 gold_assert(!estub->is_invalidated_erratum_stub()); 1614 AArch64_address stub_address = this->erratum_stub_address(estub); 1615 // The address of "b" in the stub that is to be "relocated". 1616 AArch64_address stub_b_insn_address; 1617 // Branch offset that is to be filled in "b" insn. 1618 int b_offset = 0; 1619 switch (estub->type()) 1620 { 1621 case ST_E_843419: 1622 case ST_E_835769: 1623 // The 1st insn of the erratum could be a relocation spot, 1624 // in this case we need to fix it with 1625 // "(*i)->erratum_insn()". 1626 elfcpp::Swap<32, big_endian>::writeval( 1627 view + (stub_address - this->address()), 1628 estub->erratum_insn()); 1629 // For the erratum, the 2nd insn is a b-insn to be patched 1630 // (relocated). 1631 stub_b_insn_address = stub_address + 1 * BPI; 1632 b_offset = estub->destination_address() - stub_b_insn_address; 1633 AArch64_relocate_functions<size, big_endian>::construct_b( 1634 view + (stub_b_insn_address - this->address()), 1635 ((unsigned int)(b_offset)) & 0xfffffff); 1636 break; 1637 default: 1638 gold_unreachable(); 1639 break; 1640 } 1641 estub->invalidate_erratum_stub(); 1642} 1643 1644 1645// Relocate only reloc stubs in this stub table. This does not relocate erratum 1646// stubs. 1647 1648template<int size, bool big_endian> 1649void 1650Stub_table<size, big_endian>:: 1651relocate_reloc_stubs(const The_relocate_info* relinfo, 1652 The_target_aarch64* target_aarch64, 1653 Output_section* output_section, 1654 unsigned char* view, 1655 AArch64_address address, 1656 section_size_type view_size) 1657{ 1658 // "view_size" is the total size of the stub_table. 1659 gold_assert(address == this->address() && 1660 view_size == static_cast<section_size_type>(this->data_size())); 1661 for(Reloc_stub_map_const_iter p = this->reloc_stubs_.begin(); 1662 p != this->reloc_stubs_.end(); ++p) 1663 relocate_reloc_stub(p->second, relinfo, target_aarch64, output_section, 1664 view, address, view_size); 1665} 1666 1667 1668// Relocate one reloc stub. This is a helper for 1669// Stub_table::relocate_reloc_stubs(). 1670 1671template<int size, bool big_endian> 1672void 1673Stub_table<size, big_endian>:: 1674relocate_reloc_stub(The_reloc_stub* stub, 1675 const The_relocate_info* relinfo, 1676 The_target_aarch64* target_aarch64, 1677 Output_section* output_section, 1678 unsigned char* view, 1679 AArch64_address address, 1680 section_size_type view_size) 1681{ 1682 // "offset" is the offset from the beginning of the stub_table. 1683 section_size_type offset = stub->offset(); 1684 section_size_type stub_size = stub->stub_size(); 1685 // "view_size" is the total size of the stub_table. 1686 gold_assert(offset + stub_size <= view_size); 1687 1688 target_aarch64->relocate_reloc_stub(stub, relinfo, output_section, 1689 view + offset, address + offset, view_size); 1690} 1691 1692 1693// Write out the stubs to file. 1694 1695template<int size, bool big_endian> 1696void 1697Stub_table<size, big_endian>::do_write(Output_file* of) 1698{ 1699 off_t offset = this->offset(); 1700 const section_size_type oview_size = 1701 convert_to_section_size_type(this->data_size()); 1702 unsigned char* const oview = of->get_output_view(offset, oview_size); 1703 1704 // Write relocation stubs. 1705 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin(); 1706 p != this->reloc_stubs_.end(); ++p) 1707 { 1708 The_reloc_stub* stub = p->second; 1709 AArch64_address address = this->address() + stub->offset(); 1710 gold_assert(address == 1711 align_address(address, The_reloc_stub::STUB_ADDR_ALIGN)); 1712 stub->write(oview + stub->offset(), stub->stub_size()); 1713 } 1714 1715 // Write erratum stubs. 1716 unsigned int erratum_stub_start_offset = 1717 align_address(this->reloc_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN); 1718 for (typename Erratum_stub_set::iterator p = this->erratum_stubs_.begin(); 1719 p != this->erratum_stubs_.end(); ++p) 1720 { 1721 The_erratum_stub* stub(*p); 1722 stub->write(oview + erratum_stub_start_offset + stub->offset(), 1723 stub->stub_size()); 1724 } 1725 1726 of->write_output_view(this->offset(), oview_size, oview); 1727} 1728 1729 1730// AArch64_relobj class. 1731 1732template<int size, bool big_endian> 1733class AArch64_relobj : public Sized_relobj_file<size, big_endian> 1734{ 1735 public: 1736 typedef AArch64_relobj<size, big_endian> This; 1737 typedef Target_aarch64<size, big_endian> The_target_aarch64; 1738 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section; 1739 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 1740 typedef Stub_table<size, big_endian> The_stub_table; 1741 typedef Erratum_stub<size, big_endian> The_erratum_stub; 1742 typedef typename The_stub_table::Erratum_stub_set_iter Erratum_stub_set_iter; 1743 typedef std::vector<The_stub_table*> Stub_table_list; 1744 static const AArch64_address invalid_address = 1745 static_cast<AArch64_address>(-1); 1746 1747 AArch64_relobj(const std::string& name, Input_file* input_file, off_t offset, 1748 const typename elfcpp::Ehdr<size, big_endian>& ehdr) 1749 : Sized_relobj_file<size, big_endian>(name, input_file, offset, ehdr), 1750 stub_tables_() 1751 { } 1752 1753 ~AArch64_relobj() 1754 { } 1755 1756 // Return the stub table of the SHNDX-th section if there is one. 1757 The_stub_table* 1758 stub_table(unsigned int shndx) const 1759 { 1760 gold_assert(shndx < this->stub_tables_.size()); 1761 return this->stub_tables_[shndx]; 1762 } 1763 1764 // Set STUB_TABLE to be the stub_table of the SHNDX-th section. 1765 void 1766 set_stub_table(unsigned int shndx, The_stub_table* stub_table) 1767 { 1768 gold_assert(shndx < this->stub_tables_.size()); 1769 this->stub_tables_[shndx] = stub_table; 1770 } 1771 1772 // Entrance to errata scanning. 1773 void 1774 scan_errata(unsigned int shndx, 1775 const elfcpp::Shdr<size, big_endian>&, 1776 Output_section*, const Symbol_table*, 1777 The_target_aarch64*); 1778 1779 // Scan all relocation sections for stub generation. 1780 void 1781 scan_sections_for_stubs(The_target_aarch64*, const Symbol_table*, 1782 const Layout*); 1783 1784 // Whether a section is a scannable text section. 1785 bool 1786 text_section_is_scannable(const elfcpp::Shdr<size, big_endian>&, unsigned int, 1787 const Output_section*, const Symbol_table*); 1788 1789 // Convert regular input section with index SHNDX to a relaxed section. 1790 void 1791 convert_input_section_to_relaxed_section(unsigned /* shndx */) 1792 { 1793 // The stubs have relocations and we need to process them after writing 1794 // out the stubs. So relocation now must follow section write. 1795 this->set_relocs_must_follow_section_writes(); 1796 } 1797 1798 // Structure for mapping symbol position. 1799 struct Mapping_symbol_position 1800 { 1801 Mapping_symbol_position(unsigned int shndx, AArch64_address offset): 1802 shndx_(shndx), offset_(offset) 1803 {} 1804 1805 // "<" comparator used in ordered_map container. 1806 bool 1807 operator<(const Mapping_symbol_position& p) const 1808 { 1809 return (this->shndx_ < p.shndx_ 1810 || (this->shndx_ == p.shndx_ && this->offset_ < p.offset_)); 1811 } 1812 1813 // Section index. 1814 unsigned int shndx_; 1815 1816 // Section offset. 1817 AArch64_address offset_; 1818 }; 1819 1820 typedef std::map<Mapping_symbol_position, char> Mapping_symbol_info; 1821 1822 protected: 1823 // Post constructor setup. 1824 void 1825 do_setup() 1826 { 1827 // Call parent's setup method. 1828 Sized_relobj_file<size, big_endian>::do_setup(); 1829 1830 // Initialize look-up tables. 1831 this->stub_tables_.resize(this->shnum()); 1832 } 1833 1834 virtual void 1835 do_relocate_sections( 1836 const Symbol_table* symtab, const Layout* layout, 1837 const unsigned char* pshdrs, Output_file* of, 1838 typename Sized_relobj_file<size, big_endian>::Views* pviews); 1839 1840 // Count local symbols and (optionally) record mapping info. 1841 virtual void 1842 do_count_local_symbols(Stringpool_template<char>*, 1843 Stringpool_template<char>*); 1844 1845 private: 1846 // Fix all errata in the object, and for each erratum, relocate corresponding 1847 // erratum stub. 1848 void 1849 fix_errata_and_relocate_erratum_stubs( 1850 typename Sized_relobj_file<size, big_endian>::Views* pviews); 1851 1852 // Try to fix erratum 843419 in an optimized way. Return true if patch is 1853 // applied. 1854 bool 1855 try_fix_erratum_843419_optimized( 1856 The_erratum_stub*, 1857 typename Sized_relobj_file<size, big_endian>::View_size&); 1858 1859 // Whether a section needs to be scanned for relocation stubs. 1860 bool 1861 section_needs_reloc_stub_scanning(const elfcpp::Shdr<size, big_endian>&, 1862 const Relobj::Output_sections&, 1863 const Symbol_table*, const unsigned char*); 1864 1865 // List of stub tables. 1866 Stub_table_list stub_tables_; 1867 1868 // Mapping symbol information sorted by (section index, section_offset). 1869 Mapping_symbol_info mapping_symbol_info_; 1870}; // End of AArch64_relobj 1871 1872 1873// Override to record mapping symbol information. 1874template<int size, bool big_endian> 1875void 1876AArch64_relobj<size, big_endian>::do_count_local_symbols( 1877 Stringpool_template<char>* pool, Stringpool_template<char>* dynpool) 1878{ 1879 Sized_relobj_file<size, big_endian>::do_count_local_symbols(pool, dynpool); 1880 1881 // Only erratum-fixing work needs mapping symbols, so skip this time consuming 1882 // processing if not fixing erratum. 1883 if (!parameters->options().fix_cortex_a53_843419() 1884 && !parameters->options().fix_cortex_a53_835769()) 1885 return; 1886 1887 const unsigned int loccount = this->local_symbol_count(); 1888 if (loccount == 0) 1889 return; 1890 1891 // Read the symbol table section header. 1892 const unsigned int symtab_shndx = this->symtab_shndx(); 1893 elfcpp::Shdr<size, big_endian> 1894 symtabshdr(this, this->elf_file()->section_header(symtab_shndx)); 1895 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB); 1896 1897 // Read the local symbols. 1898 const int sym_size =elfcpp::Elf_sizes<size>::sym_size; 1899 gold_assert(loccount == symtabshdr.get_sh_info()); 1900 off_t locsize = loccount * sym_size; 1901 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(), 1902 locsize, true, true); 1903 1904 // For mapping symbol processing, we need to read the symbol names. 1905 unsigned int strtab_shndx = this->adjust_shndx(symtabshdr.get_sh_link()); 1906 if (strtab_shndx >= this->shnum()) 1907 { 1908 this->error(_("invalid symbol table name index: %u"), strtab_shndx); 1909 return; 1910 } 1911 1912 elfcpp::Shdr<size, big_endian> 1913 strtabshdr(this, this->elf_file()->section_header(strtab_shndx)); 1914 if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB) 1915 { 1916 this->error(_("symbol table name section has wrong type: %u"), 1917 static_cast<unsigned int>(strtabshdr.get_sh_type())); 1918 return; 1919 } 1920 1921 const char* pnames = 1922 reinterpret_cast<const char*>(this->get_view(strtabshdr.get_sh_offset(), 1923 strtabshdr.get_sh_size(), 1924 false, false)); 1925 1926 // Skip the first dummy symbol. 1927 psyms += sym_size; 1928 typename Sized_relobj_file<size, big_endian>::Local_values* 1929 plocal_values = this->local_values(); 1930 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size) 1931 { 1932 elfcpp::Sym<size, big_endian> sym(psyms); 1933 Symbol_value<size>& lv((*plocal_values)[i]); 1934 AArch64_address input_value = lv.input_value(); 1935 1936 // Check to see if this is a mapping symbol. AArch64 mapping symbols are 1937 // defined in "ELF for the ARM 64-bit Architecture", Table 4-4, Mapping 1938 // symbols. 1939 // Mapping symbols could be one of the following 4 forms - 1940 // a) $x 1941 // b) $x.<any...> 1942 // c) $d 1943 // d) $d.<any...> 1944 const char* sym_name = pnames + sym.get_st_name(); 1945 if (sym_name[0] == '$' && (sym_name[1] == 'x' || sym_name[1] == 'd') 1946 && (sym_name[2] == '\0' || sym_name[2] == '.')) 1947 { 1948 bool is_ordinary; 1949 unsigned int input_shndx = 1950 this->adjust_sym_shndx(i, sym.get_st_shndx(), &is_ordinary); 1951 gold_assert(is_ordinary); 1952 1953 Mapping_symbol_position msp(input_shndx, input_value); 1954 // Insert mapping_symbol_info into map whose ordering is defined by 1955 // (shndx, offset_within_section). 1956 this->mapping_symbol_info_[msp] = sym_name[1]; 1957 } 1958 } 1959} 1960 1961 1962// Fix all errata in the object and for each erratum, we relocate the 1963// corresponding erratum stub (by calling Stub_table::relocate_erratum_stub). 1964 1965template<int size, bool big_endian> 1966void 1967AArch64_relobj<size, big_endian>::fix_errata_and_relocate_erratum_stubs( 1968 typename Sized_relobj_file<size, big_endian>::Views* pviews) 1969{ 1970 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype; 1971 unsigned int shnum = this->shnum(); 1972 for (unsigned int i = 1; i < shnum; ++i) 1973 { 1974 The_stub_table* stub_table = this->stub_table(i); 1975 if (!stub_table) 1976 continue; 1977 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> 1978 ipair(stub_table->find_erratum_stubs_for_input_section(this, i)); 1979 Erratum_stub_set_iter p = ipair.first, end = ipair.second; 1980 while (p != end) 1981 { 1982 The_erratum_stub* stub = *p; 1983 typename Sized_relobj_file<size, big_endian>::View_size& 1984 pview((*pviews)[i]); 1985 1986 // Double check data before fix. 1987 gold_assert(pview.address + stub->sh_offset() 1988 == stub->erratum_address()); 1989 1990 // Update previously recorded erratum insn with relocated 1991 // version. 1992 Insntype* ip = 1993 reinterpret_cast<Insntype*>(pview.view + stub->sh_offset()); 1994 Insntype insn_to_fix = ip[0]; 1995 stub->update_erratum_insn(insn_to_fix); 1996 1997 // First try to see if erratum is 843419 and if it can be fixed 1998 // without using branch-to-stub. 1999 if (!try_fix_erratum_843419_optimized(stub, pview)) 2000 { 2001 // Replace the erratum insn with a branch-to-stub. 2002 AArch64_address stub_address = 2003 stub_table->erratum_stub_address(stub); 2004 unsigned int b_offset = stub_address - stub->erratum_address(); 2005 AArch64_relocate_functions<size, big_endian>::construct_b( 2006 pview.view + stub->sh_offset(), b_offset & 0xfffffff); 2007 } 2008 2009 // Erratum fix is done (or skipped), continue to relocate erratum 2010 // stub. Note, when erratum fix is skipped (either because we 2011 // proactively change the code sequence or the code sequence is 2012 // changed by relaxation, etc), we can still safely relocate the 2013 // erratum stub, ignoring the fact the erratum could never be 2014 // executed. 2015 stub_table->relocate_erratum_stub( 2016 stub, pview.view + (stub_table->address() - pview.address)); 2017 2018 // Next erratum stub. 2019 ++p; 2020 } 2021 } 2022} 2023 2024 2025// This is an optimization for 843419. This erratum requires the sequence begin 2026// with 'adrp', when final value calculated by adrp fits in adr, we can just 2027// replace 'adrp' with 'adr', so we save 2 jumps per occurrence. (Note, however, 2028// in this case, we do not delete the erratum stub (too late to do so), it is 2029// merely generated without ever being called.) 2030 2031template<int size, bool big_endian> 2032bool 2033AArch64_relobj<size, big_endian>::try_fix_erratum_843419_optimized( 2034 The_erratum_stub* stub, 2035 typename Sized_relobj_file<size, big_endian>::View_size& pview) 2036{ 2037 if (stub->type() != ST_E_843419) 2038 return false; 2039 2040 typedef AArch64_insn_utilities<big_endian> Insn_utilities; 2041 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype; 2042 E843419_stub<size, big_endian>* e843419_stub = 2043 reinterpret_cast<E843419_stub<size, big_endian>*>(stub); 2044 AArch64_address pc = pview.address + e843419_stub->adrp_sh_offset(); 2045 Insntype* adrp_view = reinterpret_cast<Insntype*>( 2046 pview.view + e843419_stub->adrp_sh_offset()); 2047 Insntype adrp_insn = adrp_view[0]; 2048 gold_assert(Insn_utilities::is_adrp(adrp_insn)); 2049 // Get adrp 33-bit signed imm value. 2050 int64_t adrp_imm = Insn_utilities:: 2051 aarch64_adrp_decode_imm(adrp_insn); 2052 // adrp - final value transferred to target register is calculated as: 2053 // PC[11:0] = Zeros(12) 2054 // adrp_dest_value = PC + adrp_imm; 2055 int64_t adrp_dest_value = (pc & ~((1 << 12) - 1)) + adrp_imm; 2056 // adr -final value transferred to target register is calucalted as: 2057 // PC + adr_imm 2058 // So we have: 2059 // PC + adr_imm = adrp_dest_value 2060 // ==> 2061 // adr_imm = adrp_dest_value - PC 2062 int64_t adr_imm = adrp_dest_value - pc; 2063 // Check if imm fits in adr (21-bit signed). 2064 if (-(1 << 20) <= adr_imm && adr_imm < (1 << 20)) 2065 { 2066 // Convert 'adrp' into 'adr'. 2067 Insntype adr_insn = adrp_insn & ((1u << 31) - 1); 2068 adr_insn = Insn_utilities:: 2069 aarch64_adr_encode_imm(adr_insn, adr_imm); 2070 elfcpp::Swap<32, big_endian>::writeval(adrp_view, adr_insn); 2071 return true; 2072 } 2073 return false; 2074} 2075 2076 2077// Relocate sections. 2078 2079template<int size, bool big_endian> 2080void 2081AArch64_relobj<size, big_endian>::do_relocate_sections( 2082 const Symbol_table* symtab, const Layout* layout, 2083 const unsigned char* pshdrs, Output_file* of, 2084 typename Sized_relobj_file<size, big_endian>::Views* pviews) 2085{ 2086 // Relocate the section data. 2087 this->relocate_section_range(symtab, layout, pshdrs, of, pviews, 2088 1, this->shnum() - 1); 2089 2090 // We do not generate stubs if doing a relocatable link. 2091 if (parameters->options().relocatable()) 2092 return; 2093 2094 // This part only relocates erratum stubs that belong to input sections of this 2095 // object file. 2096 if (parameters->options().fix_cortex_a53_843419() 2097 || parameters->options().fix_cortex_a53_835769()) 2098 this->fix_errata_and_relocate_erratum_stubs(pviews); 2099 2100 Relocate_info<size, big_endian> relinfo; 2101 relinfo.symtab = symtab; 2102 relinfo.layout = layout; 2103 relinfo.object = this; 2104 2105 // This part relocates all reloc stubs that are contained in stub_tables of 2106 // this object file. 2107 unsigned int shnum = this->shnum(); 2108 The_target_aarch64* target = The_target_aarch64::current_target(); 2109 2110 for (unsigned int i = 1; i < shnum; ++i) 2111 { 2112 The_aarch64_input_section* aarch64_input_section = 2113 target->find_aarch64_input_section(this, i); 2114 if (aarch64_input_section != NULL 2115 && aarch64_input_section->is_stub_table_owner() 2116 && !aarch64_input_section->stub_table()->empty()) 2117 { 2118 Output_section* os = this->output_section(i); 2119 gold_assert(os != NULL); 2120 2121 relinfo.reloc_shndx = elfcpp::SHN_UNDEF; 2122 relinfo.reloc_shdr = NULL; 2123 relinfo.data_shndx = i; 2124 relinfo.data_shdr = pshdrs + i * elfcpp::Elf_sizes<size>::shdr_size; 2125 2126 typename Sized_relobj_file<size, big_endian>::View_size& 2127 view_struct = (*pviews)[i]; 2128 gold_assert(view_struct.view != NULL); 2129 2130 The_stub_table* stub_table = aarch64_input_section->stub_table(); 2131 off_t offset = stub_table->address() - view_struct.address; 2132 unsigned char* view = view_struct.view + offset; 2133 AArch64_address address = stub_table->address(); 2134 section_size_type view_size = stub_table->data_size(); 2135 stub_table->relocate_reloc_stubs(&relinfo, target, os, view, address, 2136 view_size); 2137 } 2138 } 2139} 2140 2141 2142// Determine if an input section is scannable for stub processing. SHDR is 2143// the header of the section and SHNDX is the section index. OS is the output 2144// section for the input section and SYMTAB is the global symbol table used to 2145// look up ICF information. 2146 2147template<int size, bool big_endian> 2148bool 2149AArch64_relobj<size, big_endian>::text_section_is_scannable( 2150 const elfcpp::Shdr<size, big_endian>& text_shdr, 2151 unsigned int text_shndx, 2152 const Output_section* os, 2153 const Symbol_table* symtab) 2154{ 2155 // Skip any empty sections, unallocated sections or sections whose 2156 // type are not SHT_PROGBITS. 2157 if (text_shdr.get_sh_size() == 0 2158 || (text_shdr.get_sh_flags() & elfcpp::SHF_ALLOC) == 0 2159 || text_shdr.get_sh_type() != elfcpp::SHT_PROGBITS) 2160 return false; 2161 2162 // Skip any discarded or ICF'ed sections. 2163 if (os == NULL || symtab->is_section_folded(this, text_shndx)) 2164 return false; 2165 2166 // Skip exception frame. 2167 if (strcmp(os->name(), ".eh_frame") == 0) 2168 return false ; 2169 2170 gold_assert(!this->is_output_section_offset_invalid(text_shndx) || 2171 os->find_relaxed_input_section(this, text_shndx) != NULL); 2172 2173 return true; 2174} 2175 2176 2177// Determine if we want to scan the SHNDX-th section for relocation stubs. 2178// This is a helper for AArch64_relobj::scan_sections_for_stubs(). 2179 2180template<int size, bool big_endian> 2181bool 2182AArch64_relobj<size, big_endian>::section_needs_reloc_stub_scanning( 2183 const elfcpp::Shdr<size, big_endian>& shdr, 2184 const Relobj::Output_sections& out_sections, 2185 const Symbol_table* symtab, 2186 const unsigned char* pshdrs) 2187{ 2188 unsigned int sh_type = shdr.get_sh_type(); 2189 if (sh_type != elfcpp::SHT_RELA) 2190 return false; 2191 2192 // Ignore empty section. 2193 off_t sh_size = shdr.get_sh_size(); 2194 if (sh_size == 0) 2195 return false; 2196 2197 // Ignore reloc section with unexpected symbol table. The 2198 // error will be reported in the final link. 2199 if (this->adjust_shndx(shdr.get_sh_link()) != this->symtab_shndx()) 2200 return false; 2201 2202 gold_assert(sh_type == elfcpp::SHT_RELA); 2203 unsigned int reloc_size = elfcpp::Elf_sizes<size>::rela_size; 2204 2205 // Ignore reloc section with unexpected entsize or uneven size. 2206 // The error will be reported in the final link. 2207 if (reloc_size != shdr.get_sh_entsize() || sh_size % reloc_size != 0) 2208 return false; 2209 2210 // Ignore reloc section with bad info. This error will be 2211 // reported in the final link. 2212 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_info()); 2213 if (text_shndx >= this->shnum()) 2214 return false; 2215 2216 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size; 2217 const elfcpp::Shdr<size, big_endian> text_shdr(pshdrs + 2218 text_shndx * shdr_size); 2219 return this->text_section_is_scannable(text_shdr, text_shndx, 2220 out_sections[text_shndx], symtab); 2221} 2222 2223 2224// Scan section SHNDX for erratum 843419 and 835769. 2225 2226template<int size, bool big_endian> 2227void 2228AArch64_relobj<size, big_endian>::scan_errata( 2229 unsigned int shndx, const elfcpp::Shdr<size, big_endian>& shdr, 2230 Output_section* os, const Symbol_table* symtab, 2231 The_target_aarch64* target) 2232{ 2233 if (shdr.get_sh_size() == 0 2234 || (shdr.get_sh_flags() & 2235 (elfcpp::SHF_ALLOC | elfcpp::SHF_EXECINSTR)) == 0 2236 || shdr.get_sh_type() != elfcpp::SHT_PROGBITS) 2237 return; 2238 2239 if (!os || symtab->is_section_folded(this, shndx)) return; 2240 2241 AArch64_address output_offset = this->get_output_section_offset(shndx); 2242 AArch64_address output_address; 2243 if (output_offset != invalid_address) 2244 output_address = os->address() + output_offset; 2245 else 2246 { 2247 const Output_relaxed_input_section* poris = 2248 os->find_relaxed_input_section(this, shndx); 2249 if (!poris) return; 2250 output_address = poris->address(); 2251 } 2252 2253 section_size_type input_view_size = 0; 2254 const unsigned char* input_view = 2255 this->section_contents(shndx, &input_view_size, false); 2256 2257 Mapping_symbol_position section_start(shndx, 0); 2258 // Find the first mapping symbol record within section shndx. 2259 typename Mapping_symbol_info::const_iterator p = 2260 this->mapping_symbol_info_.lower_bound(section_start); 2261 while (p != this->mapping_symbol_info_.end() && 2262 p->first.shndx_ == shndx) 2263 { 2264 typename Mapping_symbol_info::const_iterator prev = p; 2265 ++p; 2266 if (prev->second == 'x') 2267 { 2268 section_size_type span_start = 2269 convert_to_section_size_type(prev->first.offset_); 2270 section_size_type span_end; 2271 if (p != this->mapping_symbol_info_.end() 2272 && p->first.shndx_ == shndx) 2273 span_end = convert_to_section_size_type(p->first.offset_); 2274 else 2275 span_end = convert_to_section_size_type(shdr.get_sh_size()); 2276 2277 // Here we do not share the scanning code of both errata. For 843419, 2278 // only the last few insns of each page are examined, which is fast, 2279 // whereas, for 835769, every insn pair needs to be checked. 2280 2281 if (parameters->options().fix_cortex_a53_843419()) 2282 target->scan_erratum_843419_span( 2283 this, shndx, span_start, span_end, 2284 const_cast<unsigned char*>(input_view), output_address); 2285 2286 if (parameters->options().fix_cortex_a53_835769()) 2287 target->scan_erratum_835769_span( 2288 this, shndx, span_start, span_end, 2289 const_cast<unsigned char*>(input_view), output_address); 2290 } 2291 } 2292} 2293 2294 2295// Scan relocations for stub generation. 2296 2297template<int size, bool big_endian> 2298void 2299AArch64_relobj<size, big_endian>::scan_sections_for_stubs( 2300 The_target_aarch64* target, 2301 const Symbol_table* symtab, 2302 const Layout* layout) 2303{ 2304 unsigned int shnum = this->shnum(); 2305 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size; 2306 2307 // Read the section headers. 2308 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(), 2309 shnum * shdr_size, 2310 true, true); 2311 2312 // To speed up processing, we set up hash tables for fast lookup of 2313 // input offsets to output addresses. 2314 this->initialize_input_to_output_maps(); 2315 2316 const Relobj::Output_sections& out_sections(this->output_sections()); 2317 2318 Relocate_info<size, big_endian> relinfo; 2319 relinfo.symtab = symtab; 2320 relinfo.layout = layout; 2321 relinfo.object = this; 2322 2323 // Do relocation stubs scanning. 2324 const unsigned char* p = pshdrs + shdr_size; 2325 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size) 2326 { 2327 const elfcpp::Shdr<size, big_endian> shdr(p); 2328 if (parameters->options().fix_cortex_a53_843419() 2329 || parameters->options().fix_cortex_a53_835769()) 2330 scan_errata(i, shdr, out_sections[i], symtab, target); 2331 if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab, 2332 pshdrs)) 2333 { 2334 unsigned int index = this->adjust_shndx(shdr.get_sh_info()); 2335 AArch64_address output_offset = 2336 this->get_output_section_offset(index); 2337 AArch64_address output_address; 2338 if (output_offset != invalid_address) 2339 { 2340 output_address = out_sections[index]->address() + output_offset; 2341 } 2342 else 2343 { 2344 // Currently this only happens for a relaxed section. 2345 const Output_relaxed_input_section* poris = 2346 out_sections[index]->find_relaxed_input_section(this, index); 2347 gold_assert(poris != NULL); 2348 output_address = poris->address(); 2349 } 2350 2351 // Get the relocations. 2352 const unsigned char* prelocs = this->get_view(shdr.get_sh_offset(), 2353 shdr.get_sh_size(), 2354 true, false); 2355 2356 // Get the section contents. 2357 section_size_type input_view_size = 0; 2358 const unsigned char* input_view = 2359 this->section_contents(index, &input_view_size, false); 2360 2361 relinfo.reloc_shndx = i; 2362 relinfo.data_shndx = index; 2363 unsigned int sh_type = shdr.get_sh_type(); 2364 unsigned int reloc_size; 2365 gold_assert (sh_type == elfcpp::SHT_RELA); 2366 reloc_size = elfcpp::Elf_sizes<size>::rela_size; 2367 2368 Output_section* os = out_sections[index]; 2369 target->scan_section_for_stubs(&relinfo, sh_type, prelocs, 2370 shdr.get_sh_size() / reloc_size, 2371 os, 2372 output_offset == invalid_address, 2373 input_view, output_address, 2374 input_view_size); 2375 } 2376 } 2377} 2378 2379 2380// A class to wrap an ordinary input section containing executable code. 2381 2382template<int size, bool big_endian> 2383class AArch64_input_section : public Output_relaxed_input_section 2384{ 2385 public: 2386 typedef Stub_table<size, big_endian> The_stub_table; 2387 2388 AArch64_input_section(Relobj* relobj, unsigned int shndx) 2389 : Output_relaxed_input_section(relobj, shndx, 1), 2390 stub_table_(NULL), 2391 original_contents_(NULL), original_size_(0), 2392 original_addralign_(1) 2393 { } 2394 2395 ~AArch64_input_section() 2396 { delete[] this->original_contents_; } 2397 2398 // Initialize. 2399 void 2400 init(); 2401 2402 // Set the stub_table. 2403 void 2404 set_stub_table(The_stub_table* st) 2405 { this->stub_table_ = st; } 2406 2407 // Whether this is a stub table owner. 2408 bool 2409 is_stub_table_owner() const 2410 { return this->stub_table_ != NULL && this->stub_table_->owner() == this; } 2411 2412 // Return the original size of the section. 2413 uint32_t 2414 original_size() const 2415 { return this->original_size_; } 2416 2417 // Return the stub table. 2418 The_stub_table* 2419 stub_table() 2420 { return stub_table_; } 2421 2422 protected: 2423 // Write out this input section. 2424 void 2425 do_write(Output_file*); 2426 2427 // Return required alignment of this. 2428 uint64_t 2429 do_addralign() const 2430 { 2431 if (this->is_stub_table_owner()) 2432 return std::max(this->stub_table_->addralign(), 2433 static_cast<uint64_t>(this->original_addralign_)); 2434 else 2435 return this->original_addralign_; 2436 } 2437 2438 // Finalize data size. 2439 void 2440 set_final_data_size(); 2441 2442 // Reset address and file offset. 2443 void 2444 do_reset_address_and_file_offset(); 2445 2446 // Output offset. 2447 bool 2448 do_output_offset(const Relobj* object, unsigned int shndx, 2449 section_offset_type offset, 2450 section_offset_type* poutput) const 2451 { 2452 if ((object == this->relobj()) 2453 && (shndx == this->shndx()) 2454 && (offset >= 0) 2455 && (offset <= 2456 convert_types<section_offset_type, uint32_t>(this->original_size_))) 2457 { 2458 *poutput = offset; 2459 return true; 2460 } 2461 else 2462 return false; 2463 } 2464 2465 private: 2466 // Copying is not allowed. 2467 AArch64_input_section(const AArch64_input_section&); 2468 AArch64_input_section& operator=(const AArch64_input_section&); 2469 2470 // The relocation stubs. 2471 The_stub_table* stub_table_; 2472 // Original section contents. We have to make a copy here since the file 2473 // containing the original section may not be locked when we need to access 2474 // the contents. 2475 unsigned char* original_contents_; 2476 // Section size of the original input section. 2477 uint32_t original_size_; 2478 // Address alignment of the original input section. 2479 uint32_t original_addralign_; 2480}; // End of AArch64_input_section 2481 2482 2483// Finalize data size. 2484 2485template<int size, bool big_endian> 2486void 2487AArch64_input_section<size, big_endian>::set_final_data_size() 2488{ 2489 off_t off = convert_types<off_t, uint64_t>(this->original_size_); 2490 2491 if (this->is_stub_table_owner()) 2492 { 2493 this->stub_table_->finalize_data_size(); 2494 off = align_address(off, this->stub_table_->addralign()); 2495 off += this->stub_table_->data_size(); 2496 } 2497 this->set_data_size(off); 2498} 2499 2500 2501// Reset address and file offset. 2502 2503template<int size, bool big_endian> 2504void 2505AArch64_input_section<size, big_endian>::do_reset_address_and_file_offset() 2506{ 2507 // Size of the original input section contents. 2508 off_t off = convert_types<off_t, uint64_t>(this->original_size_); 2509 2510 // If this is a stub table owner, account for the stub table size. 2511 if (this->is_stub_table_owner()) 2512 { 2513 The_stub_table* stub_table = this->stub_table_; 2514 2515 // Reset the stub table's address and file offset. The 2516 // current data size for child will be updated after that. 2517 stub_table_->reset_address_and_file_offset(); 2518 off = align_address(off, stub_table_->addralign()); 2519 off += stub_table->current_data_size(); 2520 } 2521 2522 this->set_current_data_size(off); 2523} 2524 2525 2526// Initialize an Arm_input_section. 2527 2528template<int size, bool big_endian> 2529void 2530AArch64_input_section<size, big_endian>::init() 2531{ 2532 Relobj* relobj = this->relobj(); 2533 unsigned int shndx = this->shndx(); 2534 2535 // We have to cache original size, alignment and contents to avoid locking 2536 // the original file. 2537 this->original_addralign_ = 2538 convert_types<uint32_t, uint64_t>(relobj->section_addralign(shndx)); 2539 2540 // This is not efficient but we expect only a small number of relaxed 2541 // input sections for stubs. 2542 section_size_type section_size; 2543 const unsigned char* section_contents = 2544 relobj->section_contents(shndx, §ion_size, false); 2545 this->original_size_ = 2546 convert_types<uint32_t, uint64_t>(relobj->section_size(shndx)); 2547 2548 gold_assert(this->original_contents_ == NULL); 2549 this->original_contents_ = new unsigned char[section_size]; 2550 memcpy(this->original_contents_, section_contents, section_size); 2551 2552 // We want to make this look like the original input section after 2553 // output sections are finalized. 2554 Output_section* os = relobj->output_section(shndx); 2555 off_t offset = relobj->output_section_offset(shndx); 2556 gold_assert(os != NULL && !relobj->is_output_section_offset_invalid(shndx)); 2557 this->set_address(os->address() + offset); 2558 this->set_file_offset(os->offset() + offset); 2559 this->set_current_data_size(this->original_size_); 2560 this->finalize_data_size(); 2561} 2562 2563 2564// Write data to output file. 2565 2566template<int size, bool big_endian> 2567void 2568AArch64_input_section<size, big_endian>::do_write(Output_file* of) 2569{ 2570 // We have to write out the original section content. 2571 gold_assert(this->original_contents_ != NULL); 2572 of->write(this->offset(), this->original_contents_, 2573 this->original_size_); 2574 2575 // If this owns a stub table and it is not empty, write it. 2576 if (this->is_stub_table_owner() && !this->stub_table_->empty()) 2577 this->stub_table_->write(of); 2578} 2579 2580 2581// Arm output section class. This is defined mainly to add a number of stub 2582// generation methods. 2583 2584template<int size, bool big_endian> 2585class AArch64_output_section : public Output_section 2586{ 2587 public: 2588 typedef Target_aarch64<size, big_endian> The_target_aarch64; 2589 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj; 2590 typedef Stub_table<size, big_endian> The_stub_table; 2591 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section; 2592 2593 public: 2594 AArch64_output_section(const char* name, elfcpp::Elf_Word type, 2595 elfcpp::Elf_Xword flags) 2596 : Output_section(name, type, flags) 2597 { } 2598 2599 ~AArch64_output_section() {} 2600 2601 // Group input sections for stub generation. 2602 void 2603 group_sections(section_size_type, bool, Target_aarch64<size, big_endian>*, 2604 const Task*); 2605 2606 private: 2607 typedef Output_section::Input_section Input_section; 2608 typedef Output_section::Input_section_list Input_section_list; 2609 2610 // Create a stub group. 2611 void 2612 create_stub_group(Input_section_list::const_iterator, 2613 Input_section_list::const_iterator, 2614 Input_section_list::const_iterator, 2615 The_target_aarch64*, 2616 std::vector<Output_relaxed_input_section*>&, 2617 const Task*); 2618}; // End of AArch64_output_section 2619 2620 2621// Create a stub group for input sections from FIRST to LAST. OWNER points to 2622// the input section that will be the owner of the stub table. 2623 2624template<int size, bool big_endian> void 2625AArch64_output_section<size, big_endian>::create_stub_group( 2626 Input_section_list::const_iterator first, 2627 Input_section_list::const_iterator last, 2628 Input_section_list::const_iterator owner, 2629 The_target_aarch64* target, 2630 std::vector<Output_relaxed_input_section*>& new_relaxed_sections, 2631 const Task* task) 2632{ 2633 // Currently we convert ordinary input sections into relaxed sections only 2634 // at this point. 2635 The_aarch64_input_section* input_section; 2636 if (owner->is_relaxed_input_section()) 2637 gold_unreachable(); 2638 else 2639 { 2640 gold_assert(owner->is_input_section()); 2641 // Create a new relaxed input section. We need to lock the original 2642 // file. 2643 Task_lock_obj<Object> tl(task, owner->relobj()); 2644 input_section = 2645 target->new_aarch64_input_section(owner->relobj(), owner->shndx()); 2646 new_relaxed_sections.push_back(input_section); 2647 } 2648 2649 // Create a stub table. 2650 The_stub_table* stub_table = 2651 target->new_stub_table(input_section); 2652 2653 input_section->set_stub_table(stub_table); 2654 2655 Input_section_list::const_iterator p = first; 2656 // Look for input sections or relaxed input sections in [first ... last]. 2657 do 2658 { 2659 if (p->is_input_section() || p->is_relaxed_input_section()) 2660 { 2661 // The stub table information for input sections live 2662 // in their objects. 2663 The_aarch64_relobj* aarch64_relobj = 2664 static_cast<The_aarch64_relobj*>(p->relobj()); 2665 aarch64_relobj->set_stub_table(p->shndx(), stub_table); 2666 } 2667 } 2668 while (p++ != last); 2669} 2670 2671 2672// Group input sections for stub generation. GROUP_SIZE is roughly the limit of 2673// stub groups. We grow a stub group by adding input section until the size is 2674// just below GROUP_SIZE. The last input section will be converted into a stub 2675// table owner. If STUB_ALWAYS_AFTER_BRANCH is false, we also add input sectiond 2676// after the stub table, effectively doubling the group size. 2677// 2678// This is similar to the group_sections() function in elf32-arm.c but is 2679// implemented differently. 2680 2681template<int size, bool big_endian> 2682void AArch64_output_section<size, big_endian>::group_sections( 2683 section_size_type group_size, 2684 bool stubs_always_after_branch, 2685 Target_aarch64<size, big_endian>* target, 2686 const Task* task) 2687{ 2688 typedef enum 2689 { 2690 NO_GROUP, 2691 FINDING_STUB_SECTION, 2692 HAS_STUB_SECTION 2693 } State; 2694 2695 std::vector<Output_relaxed_input_section*> new_relaxed_sections; 2696 2697 State state = NO_GROUP; 2698 section_size_type off = 0; 2699 section_size_type group_begin_offset = 0; 2700 section_size_type group_end_offset = 0; 2701 section_size_type stub_table_end_offset = 0; 2702 Input_section_list::const_iterator group_begin = 2703 this->input_sections().end(); 2704 Input_section_list::const_iterator stub_table = 2705 this->input_sections().end(); 2706 Input_section_list::const_iterator group_end = this->input_sections().end(); 2707 for (Input_section_list::const_iterator p = this->input_sections().begin(); 2708 p != this->input_sections().end(); 2709 ++p) 2710 { 2711 section_size_type section_begin_offset = 2712 align_address(off, p->addralign()); 2713 section_size_type section_end_offset = 2714 section_begin_offset + p->data_size(); 2715 2716 // Check to see if we should group the previously seen sections. 2717 switch (state) 2718 { 2719 case NO_GROUP: 2720 break; 2721 2722 case FINDING_STUB_SECTION: 2723 // Adding this section makes the group larger than GROUP_SIZE. 2724 if (section_end_offset - group_begin_offset >= group_size) 2725 { 2726 if (stubs_always_after_branch) 2727 { 2728 gold_assert(group_end != this->input_sections().end()); 2729 this->create_stub_group(group_begin, group_end, group_end, 2730 target, new_relaxed_sections, 2731 task); 2732 state = NO_GROUP; 2733 } 2734 else 2735 { 2736 // Input sections up to stub_group_size bytes after the stub 2737 // table can be handled by it too. 2738 state = HAS_STUB_SECTION; 2739 stub_table = group_end; 2740 stub_table_end_offset = group_end_offset; 2741 } 2742 } 2743 break; 2744 2745 case HAS_STUB_SECTION: 2746 // Adding this section makes the post stub-section group larger 2747 // than GROUP_SIZE. 2748 gold_unreachable(); 2749 // NOT SUPPORTED YET. For completeness only. 2750 if (section_end_offset - stub_table_end_offset >= group_size) 2751 { 2752 gold_assert(group_end != this->input_sections().end()); 2753 this->create_stub_group(group_begin, group_end, stub_table, 2754 target, new_relaxed_sections, task); 2755 state = NO_GROUP; 2756 } 2757 break; 2758 2759 default: 2760 gold_unreachable(); 2761 } 2762 2763 // If we see an input section and currently there is no group, start 2764 // a new one. Skip any empty sections. We look at the data size 2765 // instead of calling p->relobj()->section_size() to avoid locking. 2766 if ((p->is_input_section() || p->is_relaxed_input_section()) 2767 && (p->data_size() != 0)) 2768 { 2769 if (state == NO_GROUP) 2770 { 2771 state = FINDING_STUB_SECTION; 2772 group_begin = p; 2773 group_begin_offset = section_begin_offset; 2774 } 2775 2776 // Keep track of the last input section seen. 2777 group_end = p; 2778 group_end_offset = section_end_offset; 2779 } 2780 2781 off = section_end_offset; 2782 } 2783 2784 // Create a stub group for any ungrouped sections. 2785 if (state == FINDING_STUB_SECTION || state == HAS_STUB_SECTION) 2786 { 2787 gold_assert(group_end != this->input_sections().end()); 2788 this->create_stub_group(group_begin, group_end, 2789 (state == FINDING_STUB_SECTION 2790 ? group_end 2791 : stub_table), 2792 target, new_relaxed_sections, task); 2793 } 2794 2795 if (!new_relaxed_sections.empty()) 2796 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections); 2797 2798 // Update the section offsets 2799 for (size_t i = 0; i < new_relaxed_sections.size(); ++i) 2800 { 2801 The_aarch64_relobj* relobj = static_cast<The_aarch64_relobj*>( 2802 new_relaxed_sections[i]->relobj()); 2803 unsigned int shndx = new_relaxed_sections[i]->shndx(); 2804 // Tell AArch64_relobj that this input section is converted. 2805 relobj->convert_input_section_to_relaxed_section(shndx); 2806 } 2807} // End of AArch64_output_section::group_sections 2808 2809 2810AArch64_reloc_property_table* aarch64_reloc_property_table = NULL; 2811 2812 2813// The aarch64 target class. 2814// See the ABI at 2815// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0056b/IHI0056B_aaelf64.pdf 2816template<int size, bool big_endian> 2817class Target_aarch64 : public Sized_target<size, big_endian> 2818{ 2819 public: 2820 typedef Target_aarch64<size, big_endian> This; 2821 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian> 2822 Reloc_section; 2823 typedef Relocate_info<size, big_endian> The_relocate_info; 2824 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address; 2825 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj; 2826 typedef Reloc_stub<size, big_endian> The_reloc_stub; 2827 typedef Erratum_stub<size, big_endian> The_erratum_stub; 2828 typedef typename Reloc_stub<size, big_endian>::Key The_reloc_stub_key; 2829 typedef Stub_table<size, big_endian> The_stub_table; 2830 typedef std::vector<The_stub_table*> Stub_table_list; 2831 typedef typename Stub_table_list::iterator Stub_table_iterator; 2832 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section; 2833 typedef AArch64_output_section<size, big_endian> The_aarch64_output_section; 2834 typedef Unordered_map<Section_id, 2835 AArch64_input_section<size, big_endian>*, 2836 Section_id_hash> AArch64_input_section_map; 2837 typedef AArch64_insn_utilities<big_endian> Insn_utilities; 2838 const static int TCB_SIZE = size / 8 * 2; 2839 2840 Target_aarch64(const Target::Target_info* info = &aarch64_info) 2841 : Sized_target<size, big_endian>(info), 2842 got_(NULL), plt_(NULL), got_plt_(NULL), got_irelative_(NULL), 2843 got_tlsdesc_(NULL), global_offset_table_(NULL), rela_dyn_(NULL), 2844 rela_irelative_(NULL), copy_relocs_(elfcpp::R_AARCH64_COPY), 2845 got_mod_index_offset_(-1U), 2846 tlsdesc_reloc_info_(), tls_base_symbol_defined_(false), 2847 stub_tables_(), stub_group_size_(0), aarch64_input_section_map_() 2848 { } 2849 2850 // Scan the relocations to determine unreferenced sections for 2851 // garbage collection. 2852 void 2853 gc_process_relocs(Symbol_table* symtab, 2854 Layout* layout, 2855 Sized_relobj_file<size, big_endian>* object, 2856 unsigned int data_shndx, 2857 unsigned int sh_type, 2858 const unsigned char* prelocs, 2859 size_t reloc_count, 2860 Output_section* output_section, 2861 bool needs_special_offset_handling, 2862 size_t local_symbol_count, 2863 const unsigned char* plocal_symbols); 2864 2865 // Scan the relocations to look for symbol adjustments. 2866 void 2867 scan_relocs(Symbol_table* symtab, 2868 Layout* layout, 2869 Sized_relobj_file<size, big_endian>* object, 2870 unsigned int data_shndx, 2871 unsigned int sh_type, 2872 const unsigned char* prelocs, 2873 size_t reloc_count, 2874 Output_section* output_section, 2875 bool needs_special_offset_handling, 2876 size_t local_symbol_count, 2877 const unsigned char* plocal_symbols); 2878 2879 // Finalize the sections. 2880 void 2881 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*); 2882 2883 // Return the value to use for a dynamic which requires special 2884 // treatment. 2885 uint64_t 2886 do_dynsym_value(const Symbol*) const; 2887 2888 // Relocate a section. 2889 void 2890 relocate_section(const Relocate_info<size, big_endian>*, 2891 unsigned int sh_type, 2892 const unsigned char* prelocs, 2893 size_t reloc_count, 2894 Output_section* output_section, 2895 bool needs_special_offset_handling, 2896 unsigned char* view, 2897 typename elfcpp::Elf_types<size>::Elf_Addr view_address, 2898 section_size_type view_size, 2899 const Reloc_symbol_changes*); 2900 2901 // Scan the relocs during a relocatable link. 2902 void 2903 scan_relocatable_relocs(Symbol_table* symtab, 2904 Layout* layout, 2905 Sized_relobj_file<size, big_endian>* object, 2906 unsigned int data_shndx, 2907 unsigned int sh_type, 2908 const unsigned char* prelocs, 2909 size_t reloc_count, 2910 Output_section* output_section, 2911 bool needs_special_offset_handling, 2912 size_t local_symbol_count, 2913 const unsigned char* plocal_symbols, 2914 Relocatable_relocs*); 2915 2916 // Scan the relocs for --emit-relocs. 2917 void 2918 emit_relocs_scan(Symbol_table* symtab, 2919 Layout* layout, 2920 Sized_relobj_file<size, big_endian>* object, 2921 unsigned int data_shndx, 2922 unsigned int sh_type, 2923 const unsigned char* prelocs, 2924 size_t reloc_count, 2925 Output_section* output_section, 2926 bool needs_special_offset_handling, 2927 size_t local_symbol_count, 2928 const unsigned char* plocal_syms, 2929 Relocatable_relocs* rr); 2930 2931 // Relocate a section during a relocatable link. 2932 void 2933 relocate_relocs( 2934 const Relocate_info<size, big_endian>*, 2935 unsigned int sh_type, 2936 const unsigned char* prelocs, 2937 size_t reloc_count, 2938 Output_section* output_section, 2939 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section, 2940 unsigned char* view, 2941 typename elfcpp::Elf_types<size>::Elf_Addr view_address, 2942 section_size_type view_size, 2943 unsigned char* reloc_view, 2944 section_size_type reloc_view_size); 2945 2946 // Return the symbol index to use for a target specific relocation. 2947 // The only target specific relocation is R_AARCH64_TLSDESC for a 2948 // local symbol, which is an absolute reloc. 2949 unsigned int 2950 do_reloc_symbol_index(void*, unsigned int r_type) const 2951 { 2952 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC); 2953 return 0; 2954 } 2955 2956 // Return the addend to use for a target specific relocation. 2957 uint64_t 2958 do_reloc_addend(void* arg, unsigned int r_type, uint64_t addend) const; 2959 2960 // Return the PLT section. 2961 uint64_t 2962 do_plt_address_for_global(const Symbol* gsym) const 2963 { return this->plt_section()->address_for_global(gsym); } 2964 2965 uint64_t 2966 do_plt_address_for_local(const Relobj* relobj, unsigned int symndx) const 2967 { return this->plt_section()->address_for_local(relobj, symndx); } 2968 2969 // This function should be defined in targets that can use relocation 2970 // types to determine (implemented in local_reloc_may_be_function_pointer 2971 // and global_reloc_may_be_function_pointer) 2972 // if a function's pointer is taken. ICF uses this in safe mode to only 2973 // fold those functions whose pointer is defintely not taken. 2974 bool 2975 do_can_check_for_function_pointers() const 2976 { return true; } 2977 2978 // Return the number of entries in the PLT. 2979 unsigned int 2980 plt_entry_count() const; 2981 2982 //Return the offset of the first non-reserved PLT entry. 2983 unsigned int 2984 first_plt_entry_offset() const; 2985 2986 // Return the size of each PLT entry. 2987 unsigned int 2988 plt_entry_size() const; 2989 2990 // Create a stub table. 2991 The_stub_table* 2992 new_stub_table(The_aarch64_input_section*); 2993 2994 // Create an aarch64 input section. 2995 The_aarch64_input_section* 2996 new_aarch64_input_section(Relobj*, unsigned int); 2997 2998 // Find an aarch64 input section instance for a given OBJ and SHNDX. 2999 The_aarch64_input_section* 3000 find_aarch64_input_section(Relobj*, unsigned int) const; 3001 3002 // Return the thread control block size. 3003 unsigned int 3004 tcb_size() const { return This::TCB_SIZE; } 3005 3006 // Scan a section for stub generation. 3007 void 3008 scan_section_for_stubs(const Relocate_info<size, big_endian>*, unsigned int, 3009 const unsigned char*, size_t, Output_section*, 3010 bool, const unsigned char*, 3011 Address, 3012 section_size_type); 3013 3014 // Scan a relocation section for stub. 3015 template<int sh_type> 3016 void 3017 scan_reloc_section_for_stubs( 3018 const The_relocate_info* relinfo, 3019 const unsigned char* prelocs, 3020 size_t reloc_count, 3021 Output_section* output_section, 3022 bool needs_special_offset_handling, 3023 const unsigned char* view, 3024 Address view_address, 3025 section_size_type); 3026 3027 // Relocate a single reloc stub. 3028 void 3029 relocate_reloc_stub(The_reloc_stub*, const Relocate_info<size, big_endian>*, 3030 Output_section*, unsigned char*, Address, 3031 section_size_type); 3032 3033 // Get the default AArch64 target. 3034 static This* 3035 current_target() 3036 { 3037 gold_assert(parameters->target().machine_code() == elfcpp::EM_AARCH64 3038 && parameters->target().get_size() == size 3039 && parameters->target().is_big_endian() == big_endian); 3040 return static_cast<This*>(parameters->sized_target<size, big_endian>()); 3041 } 3042 3043 3044 // Scan erratum 843419 for a part of a section. 3045 void 3046 scan_erratum_843419_span( 3047 AArch64_relobj<size, big_endian>*, 3048 unsigned int, 3049 const section_size_type, 3050 const section_size_type, 3051 unsigned char*, 3052 Address); 3053 3054 // Scan erratum 835769 for a part of a section. 3055 void 3056 scan_erratum_835769_span( 3057 AArch64_relobj<size, big_endian>*, 3058 unsigned int, 3059 const section_size_type, 3060 const section_size_type, 3061 unsigned char*, 3062 Address); 3063 3064 protected: 3065 void 3066 do_select_as_default_target() 3067 { 3068 gold_assert(aarch64_reloc_property_table == NULL); 3069 aarch64_reloc_property_table = new AArch64_reloc_property_table(); 3070 } 3071 3072 // Add a new reloc argument, returning the index in the vector. 3073 size_t 3074 add_tlsdesc_info(Sized_relobj_file<size, big_endian>* object, 3075 unsigned int r_sym) 3076 { 3077 this->tlsdesc_reloc_info_.push_back(Tlsdesc_info(object, r_sym)); 3078 return this->tlsdesc_reloc_info_.size() - 1; 3079 } 3080 3081 virtual Output_data_plt_aarch64<size, big_endian>* 3082 do_make_data_plt(Layout* layout, 3083 Output_data_got_aarch64<size, big_endian>* got, 3084 Output_data_space* got_plt, 3085 Output_data_space* got_irelative) 3086 { 3087 return new Output_data_plt_aarch64_standard<size, big_endian>( 3088 layout, got, got_plt, got_irelative); 3089 } 3090 3091 3092 // do_make_elf_object to override the same function in the base class. 3093 Object* 3094 do_make_elf_object(const std::string&, Input_file*, off_t, 3095 const elfcpp::Ehdr<size, big_endian>&); 3096 3097 Output_data_plt_aarch64<size, big_endian>* 3098 make_data_plt(Layout* layout, 3099 Output_data_got_aarch64<size, big_endian>* got, 3100 Output_data_space* got_plt, 3101 Output_data_space* got_irelative) 3102 { 3103 return this->do_make_data_plt(layout, got, got_plt, got_irelative); 3104 } 3105 3106 // We only need to generate stubs, and hence perform relaxation if we are 3107 // not doing relocatable linking. 3108 virtual bool 3109 do_may_relax() const 3110 { return !parameters->options().relocatable(); } 3111 3112 // Relaxation hook. This is where we do stub generation. 3113 virtual bool 3114 do_relax(int, const Input_objects*, Symbol_table*, Layout*, const Task*); 3115 3116 void 3117 group_sections(Layout* layout, 3118 section_size_type group_size, 3119 bool stubs_always_after_branch, 3120 const Task* task); 3121 3122 void 3123 scan_reloc_for_stub(const The_relocate_info*, unsigned int, 3124 const Sized_symbol<size>*, unsigned int, 3125 const Symbol_value<size>*, 3126 typename elfcpp::Elf_types<size>::Elf_Swxword, 3127 Address Elf_Addr); 3128 3129 // Make an output section. 3130 Output_section* 3131 do_make_output_section(const char* name, elfcpp::Elf_Word type, 3132 elfcpp::Elf_Xword flags) 3133 { return new The_aarch64_output_section(name, type, flags); } 3134 3135 private: 3136 // The class which scans relocations. 3137 class Scan 3138 { 3139 public: 3140 Scan() 3141 : issued_non_pic_error_(false) 3142 { } 3143 3144 inline void 3145 local(Symbol_table* symtab, Layout* layout, Target_aarch64* target, 3146 Sized_relobj_file<size, big_endian>* object, 3147 unsigned int data_shndx, 3148 Output_section* output_section, 3149 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type, 3150 const elfcpp::Sym<size, big_endian>& lsym, 3151 bool is_discarded); 3152 3153 inline void 3154 global(Symbol_table* symtab, Layout* layout, Target_aarch64* target, 3155 Sized_relobj_file<size, big_endian>* object, 3156 unsigned int data_shndx, 3157 Output_section* output_section, 3158 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type, 3159 Symbol* gsym); 3160 3161 inline bool 3162 local_reloc_may_be_function_pointer(Symbol_table* , Layout* , 3163 Target_aarch64<size, big_endian>* , 3164 Sized_relobj_file<size, big_endian>* , 3165 unsigned int , 3166 Output_section* , 3167 const elfcpp::Rela<size, big_endian>& , 3168 unsigned int r_type, 3169 const elfcpp::Sym<size, big_endian>&); 3170 3171 inline bool 3172 global_reloc_may_be_function_pointer(Symbol_table* , Layout* , 3173 Target_aarch64<size, big_endian>* , 3174 Sized_relobj_file<size, big_endian>* , 3175 unsigned int , 3176 Output_section* , 3177 const elfcpp::Rela<size, big_endian>& , 3178 unsigned int r_type, 3179 Symbol* gsym); 3180 3181 private: 3182 static void 3183 unsupported_reloc_local(Sized_relobj_file<size, big_endian>*, 3184 unsigned int r_type); 3185 3186 static void 3187 unsupported_reloc_global(Sized_relobj_file<size, big_endian>*, 3188 unsigned int r_type, Symbol*); 3189 3190 inline bool 3191 possible_function_pointer_reloc(unsigned int r_type); 3192 3193 void 3194 check_non_pic(Relobj*, unsigned int r_type); 3195 3196 bool 3197 reloc_needs_plt_for_ifunc(Sized_relobj_file<size, big_endian>*, 3198 unsigned int r_type); 3199 3200 // Whether we have issued an error about a non-PIC compilation. 3201 bool issued_non_pic_error_; 3202 }; 3203 3204 // The class which implements relocation. 3205 class Relocate 3206 { 3207 public: 3208 Relocate() 3209 : skip_call_tls_get_addr_(false) 3210 { } 3211 3212 ~Relocate() 3213 { } 3214 3215 // Do a relocation. Return false if the caller should not issue 3216 // any warnings about this relocation. 3217 inline bool 3218 relocate(const Relocate_info<size, big_endian>*, unsigned int, 3219 Target_aarch64*, Output_section*, size_t, const unsigned char*, 3220 const Sized_symbol<size>*, const Symbol_value<size>*, 3221 unsigned char*, typename elfcpp::Elf_types<size>::Elf_Addr, 3222 section_size_type); 3223 3224 private: 3225 inline typename AArch64_relocate_functions<size, big_endian>::Status 3226 relocate_tls(const Relocate_info<size, big_endian>*, 3227 Target_aarch64<size, big_endian>*, 3228 size_t, 3229 const elfcpp::Rela<size, big_endian>&, 3230 unsigned int r_type, const Sized_symbol<size>*, 3231 const Symbol_value<size>*, 3232 unsigned char*, 3233 typename elfcpp::Elf_types<size>::Elf_Addr); 3234 3235 inline typename AArch64_relocate_functions<size, big_endian>::Status 3236 tls_gd_to_le( 3237 const Relocate_info<size, big_endian>*, 3238 Target_aarch64<size, big_endian>*, 3239 const elfcpp::Rela<size, big_endian>&, 3240 unsigned int, 3241 unsigned char*, 3242 const Symbol_value<size>*); 3243 3244 inline typename AArch64_relocate_functions<size, big_endian>::Status 3245 tls_ld_to_le( 3246 const Relocate_info<size, big_endian>*, 3247 Target_aarch64<size, big_endian>*, 3248 const elfcpp::Rela<size, big_endian>&, 3249 unsigned int, 3250 unsigned char*, 3251 const Symbol_value<size>*); 3252 3253 inline typename AArch64_relocate_functions<size, big_endian>::Status 3254 tls_ie_to_le( 3255 const Relocate_info<size, big_endian>*, 3256 Target_aarch64<size, big_endian>*, 3257 const elfcpp::Rela<size, big_endian>&, 3258 unsigned int, 3259 unsigned char*, 3260 const Symbol_value<size>*); 3261 3262 inline typename AArch64_relocate_functions<size, big_endian>::Status 3263 tls_desc_gd_to_le( 3264 const Relocate_info<size, big_endian>*, 3265 Target_aarch64<size, big_endian>*, 3266 const elfcpp::Rela<size, big_endian>&, 3267 unsigned int, 3268 unsigned char*, 3269 const Symbol_value<size>*); 3270 3271 inline typename AArch64_relocate_functions<size, big_endian>::Status 3272 tls_desc_gd_to_ie( 3273 const Relocate_info<size, big_endian>*, 3274 Target_aarch64<size, big_endian>*, 3275 const elfcpp::Rela<size, big_endian>&, 3276 unsigned int, 3277 unsigned char*, 3278 const Symbol_value<size>*, 3279 typename elfcpp::Elf_types<size>::Elf_Addr, 3280 typename elfcpp::Elf_types<size>::Elf_Addr); 3281 3282 bool skip_call_tls_get_addr_; 3283 3284 }; // End of class Relocate 3285 3286 // Adjust TLS relocation type based on the options and whether this 3287 // is a local symbol. 3288 static tls::Tls_optimization 3289 optimize_tls_reloc(bool is_final, int r_type); 3290 3291 // Get the GOT section, creating it if necessary. 3292 Output_data_got_aarch64<size, big_endian>* 3293 got_section(Symbol_table*, Layout*); 3294 3295 // Get the GOT PLT section. 3296 Output_data_space* 3297 got_plt_section() const 3298 { 3299 gold_assert(this->got_plt_ != NULL); 3300 return this->got_plt_; 3301 } 3302 3303 // Get the GOT section for TLSDESC entries. 3304 Output_data_got<size, big_endian>* 3305 got_tlsdesc_section() const 3306 { 3307 gold_assert(this->got_tlsdesc_ != NULL); 3308 return this->got_tlsdesc_; 3309 } 3310 3311 // Create the PLT section. 3312 void 3313 make_plt_section(Symbol_table* symtab, Layout* layout); 3314 3315 // Create a PLT entry for a global symbol. 3316 void 3317 make_plt_entry(Symbol_table*, Layout*, Symbol*); 3318 3319 // Create a PLT entry for a local STT_GNU_IFUNC symbol. 3320 void 3321 make_local_ifunc_plt_entry(Symbol_table*, Layout*, 3322 Sized_relobj_file<size, big_endian>* relobj, 3323 unsigned int local_sym_index); 3324 3325 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment. 3326 void 3327 define_tls_base_symbol(Symbol_table*, Layout*); 3328 3329 // Create the reserved PLT and GOT entries for the TLS descriptor resolver. 3330 void 3331 reserve_tlsdesc_entries(Symbol_table* symtab, Layout* layout); 3332 3333 // Create a GOT entry for the TLS module index. 3334 unsigned int 3335 got_mod_index_entry(Symbol_table* symtab, Layout* layout, 3336 Sized_relobj_file<size, big_endian>* object); 3337 3338 // Get the PLT section. 3339 Output_data_plt_aarch64<size, big_endian>* 3340 plt_section() const 3341 { 3342 gold_assert(this->plt_ != NULL); 3343 return this->plt_; 3344 } 3345 3346 // Helper method to create erratum stubs for ST_E_843419 and ST_E_835769. For 3347 // ST_E_843419, we need an additional field for adrp offset. 3348 void create_erratum_stub( 3349 AArch64_relobj<size, big_endian>* relobj, 3350 unsigned int shndx, 3351 section_size_type erratum_insn_offset, 3352 Address erratum_address, 3353 typename Insn_utilities::Insntype erratum_insn, 3354 int erratum_type, 3355 unsigned int e843419_adrp_offset=0); 3356 3357 // Return whether this is a 3-insn erratum sequence. 3358 bool is_erratum_843419_sequence( 3359 typename elfcpp::Swap<32,big_endian>::Valtype insn1, 3360 typename elfcpp::Swap<32,big_endian>::Valtype insn2, 3361 typename elfcpp::Swap<32,big_endian>::Valtype insn3); 3362 3363 // Return whether this is a 835769 sequence. 3364 // (Similarly implemented as in elfnn-aarch64.c.) 3365 bool is_erratum_835769_sequence( 3366 typename elfcpp::Swap<32,big_endian>::Valtype, 3367 typename elfcpp::Swap<32,big_endian>::Valtype); 3368 3369 // Get the dynamic reloc section, creating it if necessary. 3370 Reloc_section* 3371 rela_dyn_section(Layout*); 3372 3373 // Get the section to use for TLSDESC relocations. 3374 Reloc_section* 3375 rela_tlsdesc_section(Layout*) const; 3376 3377 // Get the section to use for IRELATIVE relocations. 3378 Reloc_section* 3379 rela_irelative_section(Layout*); 3380 3381 // Add a potential copy relocation. 3382 void 3383 copy_reloc(Symbol_table* symtab, Layout* layout, 3384 Sized_relobj_file<size, big_endian>* object, 3385 unsigned int shndx, Output_section* output_section, 3386 Symbol* sym, const elfcpp::Rela<size, big_endian>& reloc) 3387 { 3388 unsigned int r_type = elfcpp::elf_r_type<size>(reloc.get_r_info()); 3389 this->copy_relocs_.copy_reloc(symtab, layout, 3390 symtab->get_sized_symbol<size>(sym), 3391 object, shndx, output_section, 3392 r_type, reloc.get_r_offset(), 3393 reloc.get_r_addend(), 3394 this->rela_dyn_section(layout)); 3395 } 3396 3397 // Information about this specific target which we pass to the 3398 // general Target structure. 3399 static const Target::Target_info aarch64_info; 3400 3401 // The types of GOT entries needed for this platform. 3402 // These values are exposed to the ABI in an incremental link. 3403 // Do not renumber existing values without changing the version 3404 // number of the .gnu_incremental_inputs section. 3405 enum Got_type 3406 { 3407 GOT_TYPE_STANDARD = 0, // GOT entry for a regular symbol 3408 GOT_TYPE_TLS_OFFSET = 1, // GOT entry for TLS offset 3409 GOT_TYPE_TLS_PAIR = 2, // GOT entry for TLS module/offset pair 3410 GOT_TYPE_TLS_DESC = 3 // GOT entry for TLS_DESC pair 3411 }; 3412 3413 // This type is used as the argument to the target specific 3414 // relocation routines. The only target specific reloc is 3415 // R_AARCh64_TLSDESC against a local symbol. 3416 struct Tlsdesc_info 3417 { 3418 Tlsdesc_info(Sized_relobj_file<size, big_endian>* a_object, 3419 unsigned int a_r_sym) 3420 : object(a_object), r_sym(a_r_sym) 3421 { } 3422 3423 // The object in which the local symbol is defined. 3424 Sized_relobj_file<size, big_endian>* object; 3425 // The local symbol index in the object. 3426 unsigned int r_sym; 3427 }; 3428 3429 // The GOT section. 3430 Output_data_got_aarch64<size, big_endian>* got_; 3431 // The PLT section. 3432 Output_data_plt_aarch64<size, big_endian>* plt_; 3433 // The GOT PLT section. 3434 Output_data_space* got_plt_; 3435 // The GOT section for IRELATIVE relocations. 3436 Output_data_space* got_irelative_; 3437 // The GOT section for TLSDESC relocations. 3438 Output_data_got<size, big_endian>* got_tlsdesc_; 3439 // The _GLOBAL_OFFSET_TABLE_ symbol. 3440 Symbol* global_offset_table_; 3441 // The dynamic reloc section. 3442 Reloc_section* rela_dyn_; 3443 // The section to use for IRELATIVE relocs. 3444 Reloc_section* rela_irelative_; 3445 // Relocs saved to avoid a COPY reloc. 3446 Copy_relocs<elfcpp::SHT_RELA, size, big_endian> copy_relocs_; 3447 // Offset of the GOT entry for the TLS module index. 3448 unsigned int got_mod_index_offset_; 3449 // We handle R_AARCH64_TLSDESC against a local symbol as a target 3450 // specific relocation. Here we store the object and local symbol 3451 // index for the relocation. 3452 std::vector<Tlsdesc_info> tlsdesc_reloc_info_; 3453 // True if the _TLS_MODULE_BASE_ symbol has been defined. 3454 bool tls_base_symbol_defined_; 3455 // List of stub_tables 3456 Stub_table_list stub_tables_; 3457 // Actual stub group size 3458 section_size_type stub_group_size_; 3459 AArch64_input_section_map aarch64_input_section_map_; 3460}; // End of Target_aarch64 3461 3462 3463template<> 3464const Target::Target_info Target_aarch64<64, false>::aarch64_info = 3465{ 3466 64, // size 3467 false, // is_big_endian 3468 elfcpp::EM_AARCH64, // machine_code 3469 false, // has_make_symbol 3470 false, // has_resolve 3471 false, // has_code_fill 3472 true, // is_default_stack_executable 3473 true, // can_icf_inline_merge_sections 3474 '\0', // wrap_char 3475 "/lib/ld.so.1", // program interpreter 3476 0x400000, // default_text_segment_address 3477 0x10000, // abi_pagesize (overridable by -z max-page-size) 3478 0x1000, // common_pagesize (overridable by -z common-page-size) 3479 false, // isolate_execinstr 3480 0, // rosegment_gap 3481 elfcpp::SHN_UNDEF, // small_common_shndx 3482 elfcpp::SHN_UNDEF, // large_common_shndx 3483 0, // small_common_section_flags 3484 0, // large_common_section_flags 3485 NULL, // attributes_section 3486 NULL, // attributes_vendor 3487 "_start", // entry_symbol_name 3488 32, // hash_entry_size 3489}; 3490 3491template<> 3492const Target::Target_info Target_aarch64<32, false>::aarch64_info = 3493{ 3494 32, // size 3495 false, // is_big_endian 3496 elfcpp::EM_AARCH64, // machine_code 3497 false, // has_make_symbol 3498 false, // has_resolve 3499 false, // has_code_fill 3500 true, // is_default_stack_executable 3501 false, // can_icf_inline_merge_sections 3502 '\0', // wrap_char 3503 "/lib/ld.so.1", // program interpreter 3504 0x400000, // default_text_segment_address 3505 0x10000, // abi_pagesize (overridable by -z max-page-size) 3506 0x1000, // common_pagesize (overridable by -z common-page-size) 3507 false, // isolate_execinstr 3508 0, // rosegment_gap 3509 elfcpp::SHN_UNDEF, // small_common_shndx 3510 elfcpp::SHN_UNDEF, // large_common_shndx 3511 0, // small_common_section_flags 3512 0, // large_common_section_flags 3513 NULL, // attributes_section 3514 NULL, // attributes_vendor 3515 "_start", // entry_symbol_name 3516 32, // hash_entry_size 3517}; 3518 3519template<> 3520const Target::Target_info Target_aarch64<64, true>::aarch64_info = 3521{ 3522 64, // size 3523 true, // is_big_endian 3524 elfcpp::EM_AARCH64, // machine_code 3525 false, // has_make_symbol 3526 false, // has_resolve 3527 false, // has_code_fill 3528 true, // is_default_stack_executable 3529 true, // can_icf_inline_merge_sections 3530 '\0', // wrap_char 3531 "/lib/ld.so.1", // program interpreter 3532 0x400000, // default_text_segment_address 3533 0x10000, // abi_pagesize (overridable by -z max-page-size) 3534 0x1000, // common_pagesize (overridable by -z common-page-size) 3535 false, // isolate_execinstr 3536 0, // rosegment_gap 3537 elfcpp::SHN_UNDEF, // small_common_shndx 3538 elfcpp::SHN_UNDEF, // large_common_shndx 3539 0, // small_common_section_flags 3540 0, // large_common_section_flags 3541 NULL, // attributes_section 3542 NULL, // attributes_vendor 3543 "_start", // entry_symbol_name 3544 32, // hash_entry_size 3545}; 3546 3547template<> 3548const Target::Target_info Target_aarch64<32, true>::aarch64_info = 3549{ 3550 32, // size 3551 true, // is_big_endian 3552 elfcpp::EM_AARCH64, // machine_code 3553 false, // has_make_symbol 3554 false, // has_resolve 3555 false, // has_code_fill 3556 true, // is_default_stack_executable 3557 false, // can_icf_inline_merge_sections 3558 '\0', // wrap_char 3559 "/lib/ld.so.1", // program interpreter 3560 0x400000, // default_text_segment_address 3561 0x10000, // abi_pagesize (overridable by -z max-page-size) 3562 0x1000, // common_pagesize (overridable by -z common-page-size) 3563 false, // isolate_execinstr 3564 0, // rosegment_gap 3565 elfcpp::SHN_UNDEF, // small_common_shndx 3566 elfcpp::SHN_UNDEF, // large_common_shndx 3567 0, // small_common_section_flags 3568 0, // large_common_section_flags 3569 NULL, // attributes_section 3570 NULL, // attributes_vendor 3571 "_start", // entry_symbol_name 3572 32, // hash_entry_size 3573}; 3574 3575// Get the GOT section, creating it if necessary. 3576 3577template<int size, bool big_endian> 3578Output_data_got_aarch64<size, big_endian>* 3579Target_aarch64<size, big_endian>::got_section(Symbol_table* symtab, 3580 Layout* layout) 3581{ 3582 if (this->got_ == NULL) 3583 { 3584 gold_assert(symtab != NULL && layout != NULL); 3585 3586 // When using -z now, we can treat .got.plt as a relro section. 3587 // Without -z now, it is modified after program startup by lazy 3588 // PLT relocations. 3589 bool is_got_plt_relro = parameters->options().now(); 3590 Output_section_order got_order = (is_got_plt_relro 3591 ? ORDER_RELRO 3592 : ORDER_RELRO_LAST); 3593 Output_section_order got_plt_order = (is_got_plt_relro 3594 ? ORDER_RELRO 3595 : ORDER_NON_RELRO_FIRST); 3596 3597 // Layout of .got and .got.plt sections. 3598 // .got[0] &_DYNAMIC <-_GLOBAL_OFFSET_TABLE_ 3599 // ... 3600 // .gotplt[0] reserved for ld.so (&linkmap) <--DT_PLTGOT 3601 // .gotplt[1] reserved for ld.so (resolver) 3602 // .gotplt[2] reserved 3603 3604 // Generate .got section. 3605 this->got_ = new Output_data_got_aarch64<size, big_endian>(symtab, 3606 layout); 3607 layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS, 3608 (elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE), 3609 this->got_, got_order, true); 3610 // The first word of GOT is reserved for the address of .dynamic. 3611 // We put 0 here now. The value will be replaced later in 3612 // Output_data_got_aarch64::do_write. 3613 this->got_->add_constant(0); 3614 3615 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT. 3616 // _GLOBAL_OFFSET_TABLE_ value points to the start of the .got section, 3617 // even if there is a .got.plt section. 3618 this->global_offset_table_ = 3619 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL, 3620 Symbol_table::PREDEFINED, 3621 this->got_, 3622 0, 0, elfcpp::STT_OBJECT, 3623 elfcpp::STB_LOCAL, 3624 elfcpp::STV_HIDDEN, 0, 3625 false, false); 3626 3627 // Generate .got.plt section. 3628 this->got_plt_ = new Output_data_space(size / 8, "** GOT PLT"); 3629 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS, 3630 (elfcpp::SHF_ALLOC 3631 | elfcpp::SHF_WRITE), 3632 this->got_plt_, got_plt_order, 3633 is_got_plt_relro); 3634 3635 // The first three entries are reserved. 3636 this->got_plt_->set_current_data_size( 3637 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8)); 3638 3639 // If there are any IRELATIVE relocations, they get GOT entries 3640 // in .got.plt after the jump slot entries. 3641 this->got_irelative_ = new Output_data_space(size / 8, 3642 "** GOT IRELATIVE PLT"); 3643 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS, 3644 (elfcpp::SHF_ALLOC 3645 | elfcpp::SHF_WRITE), 3646 this->got_irelative_, 3647 got_plt_order, 3648 is_got_plt_relro); 3649 3650 // If there are any TLSDESC relocations, they get GOT entries in 3651 // .got.plt after the jump slot and IRELATIVE entries. 3652 this->got_tlsdesc_ = new Output_data_got<size, big_endian>(); 3653 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS, 3654 (elfcpp::SHF_ALLOC 3655 | elfcpp::SHF_WRITE), 3656 this->got_tlsdesc_, 3657 got_plt_order, 3658 is_got_plt_relro); 3659 3660 if (!is_got_plt_relro) 3661 { 3662 // Those bytes can go into the relro segment. 3663 layout->increase_relro( 3664 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8)); 3665 } 3666 3667 } 3668 return this->got_; 3669} 3670 3671// Get the dynamic reloc section, creating it if necessary. 3672 3673template<int size, bool big_endian> 3674typename Target_aarch64<size, big_endian>::Reloc_section* 3675Target_aarch64<size, big_endian>::rela_dyn_section(Layout* layout) 3676{ 3677 if (this->rela_dyn_ == NULL) 3678 { 3679 gold_assert(layout != NULL); 3680 this->rela_dyn_ = new Reloc_section(parameters->options().combreloc()); 3681 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA, 3682 elfcpp::SHF_ALLOC, this->rela_dyn_, 3683 ORDER_DYNAMIC_RELOCS, false); 3684 } 3685 return this->rela_dyn_; 3686} 3687 3688// Get the section to use for IRELATIVE relocs, creating it if 3689// necessary. These go in .rela.dyn, but only after all other dynamic 3690// relocations. They need to follow the other dynamic relocations so 3691// that they can refer to global variables initialized by those 3692// relocs. 3693 3694template<int size, bool big_endian> 3695typename Target_aarch64<size, big_endian>::Reloc_section* 3696Target_aarch64<size, big_endian>::rela_irelative_section(Layout* layout) 3697{ 3698 if (this->rela_irelative_ == NULL) 3699 { 3700 // Make sure we have already created the dynamic reloc section. 3701 this->rela_dyn_section(layout); 3702 this->rela_irelative_ = new Reloc_section(false); 3703 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA, 3704 elfcpp::SHF_ALLOC, this->rela_irelative_, 3705 ORDER_DYNAMIC_RELOCS, false); 3706 gold_assert(this->rela_dyn_->output_section() 3707 == this->rela_irelative_->output_section()); 3708 } 3709 return this->rela_irelative_; 3710} 3711 3712 3713// do_make_elf_object to override the same function in the base class. We need 3714// to use a target-specific sub-class of Sized_relobj_file<size, big_endian> to 3715// store backend specific information. Hence we need to have our own ELF object 3716// creation. 3717 3718template<int size, bool big_endian> 3719Object* 3720Target_aarch64<size, big_endian>::do_make_elf_object( 3721 const std::string& name, 3722 Input_file* input_file, 3723 off_t offset, const elfcpp::Ehdr<size, big_endian>& ehdr) 3724{ 3725 int et = ehdr.get_e_type(); 3726 // ET_EXEC files are valid input for --just-symbols/-R, 3727 // and we treat them as relocatable objects. 3728 if (et == elfcpp::ET_EXEC && input_file->just_symbols()) 3729 return Sized_target<size, big_endian>::do_make_elf_object( 3730 name, input_file, offset, ehdr); 3731 else if (et == elfcpp::ET_REL) 3732 { 3733 AArch64_relobj<size, big_endian>* obj = 3734 new AArch64_relobj<size, big_endian>(name, input_file, offset, ehdr); 3735 obj->setup(); 3736 return obj; 3737 } 3738 else if (et == elfcpp::ET_DYN) 3739 { 3740 // Keep base implementation. 3741 Sized_dynobj<size, big_endian>* obj = 3742 new Sized_dynobj<size, big_endian>(name, input_file, offset, ehdr); 3743 obj->setup(); 3744 return obj; 3745 } 3746 else 3747 { 3748 gold_error(_("%s: unsupported ELF file type %d"), 3749 name.c_str(), et); 3750 return NULL; 3751 } 3752} 3753 3754 3755// Scan a relocation for stub generation. 3756 3757template<int size, bool big_endian> 3758void 3759Target_aarch64<size, big_endian>::scan_reloc_for_stub( 3760 const Relocate_info<size, big_endian>* relinfo, 3761 unsigned int r_type, 3762 const Sized_symbol<size>* gsym, 3763 unsigned int r_sym, 3764 const Symbol_value<size>* psymval, 3765 typename elfcpp::Elf_types<size>::Elf_Swxword addend, 3766 Address address) 3767{ 3768 const AArch64_relobj<size, big_endian>* aarch64_relobj = 3769 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object); 3770 3771 Symbol_value<size> symval; 3772 if (gsym != NULL) 3773 { 3774 const AArch64_reloc_property* arp = aarch64_reloc_property_table-> 3775 get_reloc_property(r_type); 3776 if (gsym->use_plt_offset(arp->reference_flags())) 3777 { 3778 // This uses a PLT, change the symbol value. 3779 symval.set_output_value(this->plt_address_for_global(gsym)); 3780 psymval = &symval; 3781 } 3782 else if (gsym->is_undefined()) 3783 { 3784 // There is no need to generate a stub symbol if the original symbol 3785 // is undefined. 3786 gold_debug(DEBUG_TARGET, 3787 "stub: not creating a stub for undefined symbol %s in file %s", 3788 gsym->name(), aarch64_relobj->name().c_str()); 3789 return; 3790 } 3791 } 3792 3793 // Get the symbol value. 3794 typename Symbol_value<size>::Value value = psymval->value(aarch64_relobj, 0); 3795 3796 // Owing to pipelining, the PC relative branches below actually skip 3797 // two instructions when the branch offset is 0. 3798 Address destination = static_cast<Address>(-1); 3799 switch (r_type) 3800 { 3801 case elfcpp::R_AARCH64_CALL26: 3802 case elfcpp::R_AARCH64_JUMP26: 3803 destination = value + addend; 3804 break; 3805 default: 3806 gold_unreachable(); 3807 } 3808 3809 int stub_type = The_reloc_stub:: 3810 stub_type_for_reloc(r_type, address, destination); 3811 if (stub_type == ST_NONE) 3812 return; 3813 3814 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx); 3815 gold_assert(stub_table != NULL); 3816 3817 The_reloc_stub_key key(stub_type, gsym, aarch64_relobj, r_sym, addend); 3818 The_reloc_stub* stub = stub_table->find_reloc_stub(key); 3819 if (stub == NULL) 3820 { 3821 stub = new The_reloc_stub(stub_type); 3822 stub_table->add_reloc_stub(stub, key); 3823 } 3824 stub->set_destination_address(destination); 3825} // End of Target_aarch64::scan_reloc_for_stub 3826 3827 3828// This function scans a relocation section for stub generation. 3829// The template parameter Relocate must be a class type which provides 3830// a single function, relocate(), which implements the machine 3831// specific part of a relocation. 3832 3833// BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type: 3834// SHT_REL or SHT_RELA. 3835 3836// PRELOCS points to the relocation data. RELOC_COUNT is the number 3837// of relocs. OUTPUT_SECTION is the output section. 3838// NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be 3839// mapped to output offsets. 3840 3841// VIEW is the section data, VIEW_ADDRESS is its memory address, and 3842// VIEW_SIZE is the size. These refer to the input section, unless 3843// NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to 3844// the output section. 3845 3846template<int size, bool big_endian> 3847template<int sh_type> 3848void inline 3849Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs( 3850 const Relocate_info<size, big_endian>* relinfo, 3851 const unsigned char* prelocs, 3852 size_t reloc_count, 3853 Output_section* /*output_section*/, 3854 bool /*needs_special_offset_handling*/, 3855 const unsigned char* /*view*/, 3856 Address view_address, 3857 section_size_type) 3858{ 3859 typedef typename Reloc_types<sh_type,size,big_endian>::Reloc Reltype; 3860 3861 const int reloc_size = 3862 Reloc_types<sh_type,size,big_endian>::reloc_size; 3863 AArch64_relobj<size, big_endian>* object = 3864 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object); 3865 unsigned int local_count = object->local_symbol_count(); 3866 3867 gold::Default_comdat_behavior default_comdat_behavior; 3868 Comdat_behavior comdat_behavior = CB_UNDETERMINED; 3869 3870 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size) 3871 { 3872 Reltype reloc(prelocs); 3873 typename elfcpp::Elf_types<size>::Elf_WXword r_info = reloc.get_r_info(); 3874 unsigned int r_sym = elfcpp::elf_r_sym<size>(r_info); 3875 unsigned int r_type = elfcpp::elf_r_type<size>(r_info); 3876 if (r_type != elfcpp::R_AARCH64_CALL26 3877 && r_type != elfcpp::R_AARCH64_JUMP26) 3878 continue; 3879 3880 section_offset_type offset = 3881 convert_to_section_size_type(reloc.get_r_offset()); 3882 3883 // Get the addend. 3884 typename elfcpp::Elf_types<size>::Elf_Swxword addend = 3885 reloc.get_r_addend(); 3886 3887 const Sized_symbol<size>* sym; 3888 Symbol_value<size> symval; 3889 const Symbol_value<size> *psymval; 3890 bool is_defined_in_discarded_section; 3891 unsigned int shndx; 3892 if (r_sym < local_count) 3893 { 3894 sym = NULL; 3895 psymval = object->local_symbol(r_sym); 3896 3897 // If the local symbol belongs to a section we are discarding, 3898 // and that section is a debug section, try to find the 3899 // corresponding kept section and map this symbol to its 3900 // counterpart in the kept section. The symbol must not 3901 // correspond to a section we are folding. 3902 bool is_ordinary; 3903 shndx = psymval->input_shndx(&is_ordinary); 3904 is_defined_in_discarded_section = 3905 (is_ordinary 3906 && shndx != elfcpp::SHN_UNDEF 3907 && !object->is_section_included(shndx) 3908 && !relinfo->symtab->is_section_folded(object, shndx)); 3909 3910 // We need to compute the would-be final value of this local 3911 // symbol. 3912 if (!is_defined_in_discarded_section) 3913 { 3914 typedef Sized_relobj_file<size, big_endian> ObjType; 3915 if (psymval->is_section_symbol()) 3916 symval.set_is_section_symbol(); 3917 typename ObjType::Compute_final_local_value_status status = 3918 object->compute_final_local_value(r_sym, psymval, &symval, 3919 relinfo->symtab); 3920 if (status == ObjType::CFLV_OK) 3921 { 3922 // Currently we cannot handle a branch to a target in 3923 // a merged section. If this is the case, issue an error 3924 // and also free the merge symbol value. 3925 if (!symval.has_output_value()) 3926 { 3927 const std::string& section_name = 3928 object->section_name(shndx); 3929 object->error(_("cannot handle branch to local %u " 3930 "in a merged section %s"), 3931 r_sym, section_name.c_str()); 3932 } 3933 psymval = &symval; 3934 } 3935 else 3936 { 3937 // We cannot determine the final value. 3938 continue; 3939 } 3940 } 3941 } 3942 else 3943 { 3944 const Symbol* gsym; 3945 gsym = object->global_symbol(r_sym); 3946 gold_assert(gsym != NULL); 3947 if (gsym->is_forwarder()) 3948 gsym = relinfo->symtab->resolve_forwards(gsym); 3949 3950 sym = static_cast<const Sized_symbol<size>*>(gsym); 3951 if (sym->has_symtab_index() && sym->symtab_index() != -1U) 3952 symval.set_output_symtab_index(sym->symtab_index()); 3953 else 3954 symval.set_no_output_symtab_entry(); 3955 3956 // We need to compute the would-be final value of this global 3957 // symbol. 3958 const Symbol_table* symtab = relinfo->symtab; 3959 const Sized_symbol<size>* sized_symbol = 3960 symtab->get_sized_symbol<size>(gsym); 3961 Symbol_table::Compute_final_value_status status; 3962 typename elfcpp::Elf_types<size>::Elf_Addr value = 3963 symtab->compute_final_value<size>(sized_symbol, &status); 3964 3965 // Skip this if the symbol has not output section. 3966 if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION) 3967 continue; 3968 symval.set_output_value(value); 3969 3970 if (gsym->type() == elfcpp::STT_TLS) 3971 symval.set_is_tls_symbol(); 3972 else if (gsym->type() == elfcpp::STT_GNU_IFUNC) 3973 symval.set_is_ifunc_symbol(); 3974 psymval = &symval; 3975 3976 is_defined_in_discarded_section = 3977 (gsym->is_defined_in_discarded_section() 3978 && gsym->is_undefined()); 3979 shndx = 0; 3980 } 3981 3982 Symbol_value<size> symval2; 3983 if (is_defined_in_discarded_section) 3984 { 3985 if (comdat_behavior == CB_UNDETERMINED) 3986 { 3987 std::string name = object->section_name(relinfo->data_shndx); 3988 comdat_behavior = default_comdat_behavior.get(name.c_str()); 3989 } 3990 if (comdat_behavior == CB_PRETEND) 3991 { 3992 bool found; 3993 typename elfcpp::Elf_types<size>::Elf_Addr value = 3994 object->map_to_kept_section(shndx, &found); 3995 if (found) 3996 symval2.set_output_value(value + psymval->input_value()); 3997 else 3998 symval2.set_output_value(0); 3999 } 4000 else 4001 { 4002 if (comdat_behavior == CB_WARNING) 4003 gold_warning_at_location(relinfo, i, offset, 4004 _("relocation refers to discarded " 4005 "section")); 4006 symval2.set_output_value(0); 4007 } 4008 symval2.set_no_output_symtab_entry(); 4009 psymval = &symval2; 4010 } 4011 4012 this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval, 4013 addend, view_address + offset); 4014 } // End of iterating relocs in a section 4015} // End of Target_aarch64::scan_reloc_section_for_stubs 4016 4017 4018// Scan an input section for stub generation. 4019 4020template<int size, bool big_endian> 4021void 4022Target_aarch64<size, big_endian>::scan_section_for_stubs( 4023 const Relocate_info<size, big_endian>* relinfo, 4024 unsigned int sh_type, 4025 const unsigned char* prelocs, 4026 size_t reloc_count, 4027 Output_section* output_section, 4028 bool needs_special_offset_handling, 4029 const unsigned char* view, 4030 Address view_address, 4031 section_size_type view_size) 4032{ 4033 gold_assert(sh_type == elfcpp::SHT_RELA); 4034 this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>( 4035 relinfo, 4036 prelocs, 4037 reloc_count, 4038 output_section, 4039 needs_special_offset_handling, 4040 view, 4041 view_address, 4042 view_size); 4043} 4044 4045 4046// Relocate a single reloc stub. 4047 4048template<int size, bool big_endian> 4049void Target_aarch64<size, big_endian>:: 4050relocate_reloc_stub(The_reloc_stub* stub, 4051 const The_relocate_info*, 4052 Output_section*, 4053 unsigned char* view, 4054 Address address, 4055 section_size_type) 4056{ 4057 typedef AArch64_relocate_functions<size, big_endian> The_reloc_functions; 4058 typedef typename The_reloc_functions::Status The_reloc_functions_status; 4059 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype; 4060 4061 Insntype* ip = reinterpret_cast<Insntype*>(view); 4062 int insn_number = stub->insn_num(); 4063 const uint32_t* insns = stub->insns(); 4064 // Check the insns are really those stub insns. 4065 for (int i = 0; i < insn_number; ++i) 4066 { 4067 Insntype insn = elfcpp::Swap<32,big_endian>::readval(ip + i); 4068 gold_assert(((uint32_t)insn == insns[i])); 4069 } 4070 4071 Address dest = stub->destination_address(); 4072 4073 switch(stub->type()) 4074 { 4075 case ST_ADRP_BRANCH: 4076 { 4077 // 1st reloc is ADR_PREL_PG_HI21 4078 The_reloc_functions_status status = 4079 The_reloc_functions::adrp(view, dest, address); 4080 // An error should never arise in the above step. If so, please 4081 // check 'aarch64_valid_for_adrp_p'. 4082 gold_assert(status == The_reloc_functions::STATUS_OKAY); 4083 4084 // 2nd reloc is ADD_ABS_LO12_NC 4085 const AArch64_reloc_property* arp = 4086 aarch64_reloc_property_table->get_reloc_property( 4087 elfcpp::R_AARCH64_ADD_ABS_LO12_NC); 4088 gold_assert(arp != NULL); 4089 status = The_reloc_functions::template 4090 rela_general<32>(view + 4, dest, 0, arp); 4091 // An error should never arise, it is an "_NC" relocation. 4092 gold_assert(status == The_reloc_functions::STATUS_OKAY); 4093 } 4094 break; 4095 4096 case ST_LONG_BRANCH_ABS: 4097 // 1st reloc is R_AARCH64_PREL64, at offset 8 4098 elfcpp::Swap<64,big_endian>::writeval(view + 8, dest); 4099 break; 4100 4101 case ST_LONG_BRANCH_PCREL: 4102 { 4103 // "PC" calculation is the 2nd insn in the stub. 4104 uint64_t offset = dest - (address + 4); 4105 // Offset is placed at offset 4 and 5. 4106 elfcpp::Swap<64,big_endian>::writeval(view + 16, offset); 4107 } 4108 break; 4109 4110 default: 4111 gold_unreachable(); 4112 } 4113} 4114 4115 4116// A class to handle the PLT data. 4117// This is an abstract base class that handles most of the linker details 4118// but does not know the actual contents of PLT entries. The derived 4119// classes below fill in those details. 4120 4121template<int size, bool big_endian> 4122class Output_data_plt_aarch64 : public Output_section_data 4123{ 4124 public: 4125 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian> 4126 Reloc_section; 4127 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address; 4128 4129 Output_data_plt_aarch64(Layout* layout, 4130 uint64_t addralign, 4131 Output_data_got_aarch64<size, big_endian>* got, 4132 Output_data_space* got_plt, 4133 Output_data_space* got_irelative) 4134 : Output_section_data(addralign), tlsdesc_rel_(NULL), irelative_rel_(NULL), 4135 got_(got), got_plt_(got_plt), got_irelative_(got_irelative), 4136 count_(0), irelative_count_(0), tlsdesc_got_offset_(-1U) 4137 { this->init(layout); } 4138 4139 // Initialize the PLT section. 4140 void 4141 init(Layout* layout); 4142 4143 // Add an entry to the PLT. 4144 void 4145 add_entry(Symbol_table*, Layout*, Symbol* gsym); 4146 4147 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol. 4148 unsigned int 4149 add_local_ifunc_entry(Symbol_table* symtab, Layout*, 4150 Sized_relobj_file<size, big_endian>* relobj, 4151 unsigned int local_sym_index); 4152 4153 // Add the relocation for a PLT entry. 4154 void 4155 add_relocation(Symbol_table*, Layout*, Symbol* gsym, 4156 unsigned int got_offset); 4157 4158 // Add the reserved TLSDESC_PLT entry to the PLT. 4159 void 4160 reserve_tlsdesc_entry(unsigned int got_offset) 4161 { this->tlsdesc_got_offset_ = got_offset; } 4162 4163 // Return true if a TLSDESC_PLT entry has been reserved. 4164 bool 4165 has_tlsdesc_entry() const 4166 { return this->tlsdesc_got_offset_ != -1U; } 4167 4168 // Return the GOT offset for the reserved TLSDESC_PLT entry. 4169 unsigned int 4170 get_tlsdesc_got_offset() const 4171 { return this->tlsdesc_got_offset_; } 4172 4173 // Return the PLT offset of the reserved TLSDESC_PLT entry. 4174 unsigned int 4175 get_tlsdesc_plt_offset() const 4176 { 4177 return (this->first_plt_entry_offset() + 4178 (this->count_ + this->irelative_count_) 4179 * this->get_plt_entry_size()); 4180 } 4181 4182 // Return the .rela.plt section data. 4183 Reloc_section* 4184 rela_plt() 4185 { return this->rel_; } 4186 4187 // Return where the TLSDESC relocations should go. 4188 Reloc_section* 4189 rela_tlsdesc(Layout*); 4190 4191 // Return where the IRELATIVE relocations should go in the PLT 4192 // relocations. 4193 Reloc_section* 4194 rela_irelative(Symbol_table*, Layout*); 4195 4196 // Return whether we created a section for IRELATIVE relocations. 4197 bool 4198 has_irelative_section() const 4199 { return this->irelative_rel_ != NULL; } 4200 4201 // Return the number of PLT entries. 4202 unsigned int 4203 entry_count() const 4204 { return this->count_ + this->irelative_count_; } 4205 4206 // Return the offset of the first non-reserved PLT entry. 4207 unsigned int 4208 first_plt_entry_offset() const 4209 { return this->do_first_plt_entry_offset(); } 4210 4211 // Return the size of a PLT entry. 4212 unsigned int 4213 get_plt_entry_size() const 4214 { return this->do_get_plt_entry_size(); } 4215 4216 // Return the reserved tlsdesc entry size. 4217 unsigned int 4218 get_plt_tlsdesc_entry_size() const 4219 { return this->do_get_plt_tlsdesc_entry_size(); } 4220 4221 // Return the PLT address to use for a global symbol. 4222 uint64_t 4223 address_for_global(const Symbol*); 4224 4225 // Return the PLT address to use for a local symbol. 4226 uint64_t 4227 address_for_local(const Relobj*, unsigned int symndx); 4228 4229 protected: 4230 // Fill in the first PLT entry. 4231 void 4232 fill_first_plt_entry(unsigned char* pov, 4233 Address got_address, 4234 Address plt_address) 4235 { this->do_fill_first_plt_entry(pov, got_address, plt_address); } 4236 4237 // Fill in a normal PLT entry. 4238 void 4239 fill_plt_entry(unsigned char* pov, 4240 Address got_address, 4241 Address plt_address, 4242 unsigned int got_offset, 4243 unsigned int plt_offset) 4244 { 4245 this->do_fill_plt_entry(pov, got_address, plt_address, 4246 got_offset, plt_offset); 4247 } 4248 4249 // Fill in the reserved TLSDESC PLT entry. 4250 void 4251 fill_tlsdesc_entry(unsigned char* pov, 4252 Address gotplt_address, 4253 Address plt_address, 4254 Address got_base, 4255 unsigned int tlsdesc_got_offset, 4256 unsigned int plt_offset) 4257 { 4258 this->do_fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base, 4259 tlsdesc_got_offset, plt_offset); 4260 } 4261 4262 virtual unsigned int 4263 do_first_plt_entry_offset() const = 0; 4264 4265 virtual unsigned int 4266 do_get_plt_entry_size() const = 0; 4267 4268 virtual unsigned int 4269 do_get_plt_tlsdesc_entry_size() const = 0; 4270 4271 virtual void 4272 do_fill_first_plt_entry(unsigned char* pov, 4273 Address got_addr, 4274 Address plt_addr) = 0; 4275 4276 virtual void 4277 do_fill_plt_entry(unsigned char* pov, 4278 Address got_address, 4279 Address plt_address, 4280 unsigned int got_offset, 4281 unsigned int plt_offset) = 0; 4282 4283 virtual void 4284 do_fill_tlsdesc_entry(unsigned char* pov, 4285 Address gotplt_address, 4286 Address plt_address, 4287 Address got_base, 4288 unsigned int tlsdesc_got_offset, 4289 unsigned int plt_offset) = 0; 4290 4291 void 4292 do_adjust_output_section(Output_section* os); 4293 4294 // Write to a map file. 4295 void 4296 do_print_to_mapfile(Mapfile* mapfile) const 4297 { mapfile->print_output_data(this, _("** PLT")); } 4298 4299 private: 4300 // Set the final size. 4301 void 4302 set_final_data_size(); 4303 4304 // Write out the PLT data. 4305 void 4306 do_write(Output_file*); 4307 4308 // The reloc section. 4309 Reloc_section* rel_; 4310 4311 // The TLSDESC relocs, if necessary. These must follow the regular 4312 // PLT relocs. 4313 Reloc_section* tlsdesc_rel_; 4314 4315 // The IRELATIVE relocs, if necessary. These must follow the 4316 // regular PLT relocations. 4317 Reloc_section* irelative_rel_; 4318 4319 // The .got section. 4320 Output_data_got_aarch64<size, big_endian>* got_; 4321 4322 // The .got.plt section. 4323 Output_data_space* got_plt_; 4324 4325 // The part of the .got.plt section used for IRELATIVE relocs. 4326 Output_data_space* got_irelative_; 4327 4328 // The number of PLT entries. 4329 unsigned int count_; 4330 4331 // Number of PLT entries with R_AARCH64_IRELATIVE relocs. These 4332 // follow the regular PLT entries. 4333 unsigned int irelative_count_; 4334 4335 // GOT offset of the reserved TLSDESC_GOT entry for the lazy trampoline. 4336 // Communicated to the loader via DT_TLSDESC_GOT. The magic value -1 4337 // indicates an offset is not allocated. 4338 unsigned int tlsdesc_got_offset_; 4339}; 4340 4341// Initialize the PLT section. 4342 4343template<int size, bool big_endian> 4344void 4345Output_data_plt_aarch64<size, big_endian>::init(Layout* layout) 4346{ 4347 this->rel_ = new Reloc_section(false); 4348 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA, 4349 elfcpp::SHF_ALLOC, this->rel_, 4350 ORDER_DYNAMIC_PLT_RELOCS, false); 4351} 4352 4353template<int size, bool big_endian> 4354void 4355Output_data_plt_aarch64<size, big_endian>::do_adjust_output_section( 4356 Output_section* os) 4357{ 4358 os->set_entsize(this->get_plt_entry_size()); 4359} 4360 4361// Add an entry to the PLT. 4362 4363template<int size, bool big_endian> 4364void 4365Output_data_plt_aarch64<size, big_endian>::add_entry(Symbol_table* symtab, 4366 Layout* layout, Symbol* gsym) 4367{ 4368 gold_assert(!gsym->has_plt_offset()); 4369 4370 unsigned int* pcount; 4371 unsigned int plt_reserved; 4372 Output_section_data_build* got; 4373 4374 if (gsym->type() == elfcpp::STT_GNU_IFUNC 4375 && gsym->can_use_relative_reloc(false)) 4376 { 4377 pcount = &this->irelative_count_; 4378 plt_reserved = 0; 4379 got = this->got_irelative_; 4380 } 4381 else 4382 { 4383 pcount = &this->count_; 4384 plt_reserved = this->first_plt_entry_offset(); 4385 got = this->got_plt_; 4386 } 4387 4388 gsym->set_plt_offset((*pcount) * this->get_plt_entry_size() 4389 + plt_reserved); 4390 4391 ++*pcount; 4392 4393 section_offset_type got_offset = got->current_data_size(); 4394 4395 // Every PLT entry needs a GOT entry which points back to the PLT 4396 // entry (this will be changed by the dynamic linker, normally 4397 // lazily when the function is called). 4398 got->set_current_data_size(got_offset + size / 8); 4399 4400 // Every PLT entry needs a reloc. 4401 this->add_relocation(symtab, layout, gsym, got_offset); 4402 4403 // Note that we don't need to save the symbol. The contents of the 4404 // PLT are independent of which symbols are used. The symbols only 4405 // appear in the relocations. 4406} 4407 4408// Add an entry to the PLT for a local STT_GNU_IFUNC symbol. Return 4409// the PLT offset. 4410 4411template<int size, bool big_endian> 4412unsigned int 4413Output_data_plt_aarch64<size, big_endian>::add_local_ifunc_entry( 4414 Symbol_table* symtab, 4415 Layout* layout, 4416 Sized_relobj_file<size, big_endian>* relobj, 4417 unsigned int local_sym_index) 4418{ 4419 unsigned int plt_offset = this->irelative_count_ * this->get_plt_entry_size(); 4420 ++this->irelative_count_; 4421 4422 section_offset_type got_offset = this->got_irelative_->current_data_size(); 4423 4424 // Every PLT entry needs a GOT entry which points back to the PLT 4425 // entry. 4426 this->got_irelative_->set_current_data_size(got_offset + size / 8); 4427 4428 // Every PLT entry needs a reloc. 4429 Reloc_section* rela = this->rela_irelative(symtab, layout); 4430 rela->add_symbolless_local_addend(relobj, local_sym_index, 4431 elfcpp::R_AARCH64_IRELATIVE, 4432 this->got_irelative_, got_offset, 0); 4433 4434 return plt_offset; 4435} 4436 4437// Add the relocation for a PLT entry. 4438 4439template<int size, bool big_endian> 4440void 4441Output_data_plt_aarch64<size, big_endian>::add_relocation( 4442 Symbol_table* symtab, Layout* layout, Symbol* gsym, unsigned int got_offset) 4443{ 4444 if (gsym->type() == elfcpp::STT_GNU_IFUNC 4445 && gsym->can_use_relative_reloc(false)) 4446 { 4447 Reloc_section* rela = this->rela_irelative(symtab, layout); 4448 rela->add_symbolless_global_addend(gsym, elfcpp::R_AARCH64_IRELATIVE, 4449 this->got_irelative_, got_offset, 0); 4450 } 4451 else 4452 { 4453 gsym->set_needs_dynsym_entry(); 4454 this->rel_->add_global(gsym, elfcpp::R_AARCH64_JUMP_SLOT, this->got_plt_, 4455 got_offset, 0); 4456 } 4457} 4458 4459// Return where the TLSDESC relocations should go, creating it if 4460// necessary. These follow the JUMP_SLOT relocations. 4461 4462template<int size, bool big_endian> 4463typename Output_data_plt_aarch64<size, big_endian>::Reloc_section* 4464Output_data_plt_aarch64<size, big_endian>::rela_tlsdesc(Layout* layout) 4465{ 4466 if (this->tlsdesc_rel_ == NULL) 4467 { 4468 this->tlsdesc_rel_ = new Reloc_section(false); 4469 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA, 4470 elfcpp::SHF_ALLOC, this->tlsdesc_rel_, 4471 ORDER_DYNAMIC_PLT_RELOCS, false); 4472 gold_assert(this->tlsdesc_rel_->output_section() 4473 == this->rel_->output_section()); 4474 } 4475 return this->tlsdesc_rel_; 4476} 4477 4478// Return where the IRELATIVE relocations should go in the PLT. These 4479// follow the JUMP_SLOT and the TLSDESC relocations. 4480 4481template<int size, bool big_endian> 4482typename Output_data_plt_aarch64<size, big_endian>::Reloc_section* 4483Output_data_plt_aarch64<size, big_endian>::rela_irelative(Symbol_table* symtab, 4484 Layout* layout) 4485{ 4486 if (this->irelative_rel_ == NULL) 4487 { 4488 // Make sure we have a place for the TLSDESC relocations, in 4489 // case we see any later on. 4490 this->rela_tlsdesc(layout); 4491 this->irelative_rel_ = new Reloc_section(false); 4492 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA, 4493 elfcpp::SHF_ALLOC, this->irelative_rel_, 4494 ORDER_DYNAMIC_PLT_RELOCS, false); 4495 gold_assert(this->irelative_rel_->output_section() 4496 == this->rel_->output_section()); 4497 4498 if (parameters->doing_static_link()) 4499 { 4500 // A statically linked executable will only have a .rela.plt 4501 // section to hold R_AARCH64_IRELATIVE relocs for 4502 // STT_GNU_IFUNC symbols. The library will use these 4503 // symbols to locate the IRELATIVE relocs at program startup 4504 // time. 4505 symtab->define_in_output_data("__rela_iplt_start", NULL, 4506 Symbol_table::PREDEFINED, 4507 this->irelative_rel_, 0, 0, 4508 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL, 4509 elfcpp::STV_HIDDEN, 0, false, true); 4510 symtab->define_in_output_data("__rela_iplt_end", NULL, 4511 Symbol_table::PREDEFINED, 4512 this->irelative_rel_, 0, 0, 4513 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL, 4514 elfcpp::STV_HIDDEN, 0, true, true); 4515 } 4516 } 4517 return this->irelative_rel_; 4518} 4519 4520// Return the PLT address to use for a global symbol. 4521 4522template<int size, bool big_endian> 4523uint64_t 4524Output_data_plt_aarch64<size, big_endian>::address_for_global( 4525 const Symbol* gsym) 4526{ 4527 uint64_t offset = 0; 4528 if (gsym->type() == elfcpp::STT_GNU_IFUNC 4529 && gsym->can_use_relative_reloc(false)) 4530 offset = (this->first_plt_entry_offset() + 4531 this->count_ * this->get_plt_entry_size()); 4532 return this->address() + offset + gsym->plt_offset(); 4533} 4534 4535// Return the PLT address to use for a local symbol. These are always 4536// IRELATIVE relocs. 4537 4538template<int size, bool big_endian> 4539uint64_t 4540Output_data_plt_aarch64<size, big_endian>::address_for_local( 4541 const Relobj* object, 4542 unsigned int r_sym) 4543{ 4544 return (this->address() 4545 + this->first_plt_entry_offset() 4546 + this->count_ * this->get_plt_entry_size() 4547 + object->local_plt_offset(r_sym)); 4548} 4549 4550// Set the final size. 4551 4552template<int size, bool big_endian> 4553void 4554Output_data_plt_aarch64<size, big_endian>::set_final_data_size() 4555{ 4556 unsigned int count = this->count_ + this->irelative_count_; 4557 unsigned int extra_size = 0; 4558 if (this->has_tlsdesc_entry()) 4559 extra_size += this->get_plt_tlsdesc_entry_size(); 4560 this->set_data_size(this->first_plt_entry_offset() 4561 + count * this->get_plt_entry_size() 4562 + extra_size); 4563} 4564 4565template<int size, bool big_endian> 4566class Output_data_plt_aarch64_standard : 4567 public Output_data_plt_aarch64<size, big_endian> 4568{ 4569 public: 4570 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address; 4571 Output_data_plt_aarch64_standard( 4572 Layout* layout, 4573 Output_data_got_aarch64<size, big_endian>* got, 4574 Output_data_space* got_plt, 4575 Output_data_space* got_irelative) 4576 : Output_data_plt_aarch64<size, big_endian>(layout, 4577 size == 32 ? 4 : 8, 4578 got, got_plt, 4579 got_irelative) 4580 { } 4581 4582 protected: 4583 // Return the offset of the first non-reserved PLT entry. 4584 virtual unsigned int 4585 do_first_plt_entry_offset() const 4586 { return this->first_plt_entry_size; } 4587 4588 // Return the size of a PLT entry 4589 virtual unsigned int 4590 do_get_plt_entry_size() const 4591 { return this->plt_entry_size; } 4592 4593 // Return the size of a tlsdesc entry 4594 virtual unsigned int 4595 do_get_plt_tlsdesc_entry_size() const 4596 { return this->plt_tlsdesc_entry_size; } 4597 4598 virtual void 4599 do_fill_first_plt_entry(unsigned char* pov, 4600 Address got_address, 4601 Address plt_address); 4602 4603 virtual void 4604 do_fill_plt_entry(unsigned char* pov, 4605 Address got_address, 4606 Address plt_address, 4607 unsigned int got_offset, 4608 unsigned int plt_offset); 4609 4610 virtual void 4611 do_fill_tlsdesc_entry(unsigned char* pov, 4612 Address gotplt_address, 4613 Address plt_address, 4614 Address got_base, 4615 unsigned int tlsdesc_got_offset, 4616 unsigned int plt_offset); 4617 4618 private: 4619 // The size of the first plt entry size. 4620 static const int first_plt_entry_size = 32; 4621 // The size of the plt entry size. 4622 static const int plt_entry_size = 16; 4623 // The size of the plt tlsdesc entry size. 4624 static const int plt_tlsdesc_entry_size = 32; 4625 // Template for the first PLT entry. 4626 static const uint32_t first_plt_entry[first_plt_entry_size / 4]; 4627 // Template for subsequent PLT entries. 4628 static const uint32_t plt_entry[plt_entry_size / 4]; 4629 // The reserved TLSDESC entry in the PLT for an executable. 4630 static const uint32_t tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4]; 4631}; 4632 4633// The first entry in the PLT for an executable. 4634 4635template<> 4636const uint32_t 4637Output_data_plt_aarch64_standard<32, false>:: 4638 first_plt_entry[first_plt_entry_size / 4] = 4639{ 4640 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */ 4641 0x90000010, /* adrp x16, PLT_GOT+0x8 */ 4642 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */ 4643 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */ 4644 0xd61f0220, /* br x17 */ 4645 0xd503201f, /* nop */ 4646 0xd503201f, /* nop */ 4647 0xd503201f, /* nop */ 4648}; 4649 4650 4651template<> 4652const uint32_t 4653Output_data_plt_aarch64_standard<32, true>:: 4654 first_plt_entry[first_plt_entry_size / 4] = 4655{ 4656 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */ 4657 0x90000010, /* adrp x16, PLT_GOT+0x8 */ 4658 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */ 4659 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */ 4660 0xd61f0220, /* br x17 */ 4661 0xd503201f, /* nop */ 4662 0xd503201f, /* nop */ 4663 0xd503201f, /* nop */ 4664}; 4665 4666 4667template<> 4668const uint32_t 4669Output_data_plt_aarch64_standard<64, false>:: 4670 first_plt_entry[first_plt_entry_size / 4] = 4671{ 4672 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */ 4673 0x90000010, /* adrp x16, PLT_GOT+16 */ 4674 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */ 4675 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */ 4676 0xd61f0220, /* br x17 */ 4677 0xd503201f, /* nop */ 4678 0xd503201f, /* nop */ 4679 0xd503201f, /* nop */ 4680}; 4681 4682 4683template<> 4684const uint32_t 4685Output_data_plt_aarch64_standard<64, true>:: 4686 first_plt_entry[first_plt_entry_size / 4] = 4687{ 4688 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */ 4689 0x90000010, /* adrp x16, PLT_GOT+16 */ 4690 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */ 4691 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */ 4692 0xd61f0220, /* br x17 */ 4693 0xd503201f, /* nop */ 4694 0xd503201f, /* nop */ 4695 0xd503201f, /* nop */ 4696}; 4697 4698 4699template<> 4700const uint32_t 4701Output_data_plt_aarch64_standard<32, false>:: 4702 plt_entry[plt_entry_size / 4] = 4703{ 4704 0x90000010, /* adrp x16, PLTGOT + n * 4 */ 4705 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */ 4706 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */ 4707 0xd61f0220, /* br x17. */ 4708}; 4709 4710 4711template<> 4712const uint32_t 4713Output_data_plt_aarch64_standard<32, true>:: 4714 plt_entry[plt_entry_size / 4] = 4715{ 4716 0x90000010, /* adrp x16, PLTGOT + n * 4 */ 4717 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */ 4718 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */ 4719 0xd61f0220, /* br x17. */ 4720}; 4721 4722 4723template<> 4724const uint32_t 4725Output_data_plt_aarch64_standard<64, false>:: 4726 plt_entry[plt_entry_size / 4] = 4727{ 4728 0x90000010, /* adrp x16, PLTGOT + n * 8 */ 4729 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */ 4730 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */ 4731 0xd61f0220, /* br x17. */ 4732}; 4733 4734 4735template<> 4736const uint32_t 4737Output_data_plt_aarch64_standard<64, true>:: 4738 plt_entry[plt_entry_size / 4] = 4739{ 4740 0x90000010, /* adrp x16, PLTGOT + n * 8 */ 4741 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */ 4742 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */ 4743 0xd61f0220, /* br x17. */ 4744}; 4745 4746 4747template<int size, bool big_endian> 4748void 4749Output_data_plt_aarch64_standard<size, big_endian>::do_fill_first_plt_entry( 4750 unsigned char* pov, 4751 Address got_address, 4752 Address plt_address) 4753{ 4754 // PLT0 of the small PLT looks like this in ELF64 - 4755 // stp x16, x30, [sp, #-16]! Save the reloc and lr on stack. 4756 // adrp x16, PLT_GOT + 16 Get the page base of the GOTPLT 4757 // ldr x17, [x16, #:lo12:PLT_GOT+16] Load the address of the 4758 // symbol resolver 4759 // add x16, x16, #:lo12:PLT_GOT+16 Load the lo12 bits of the 4760 // GOTPLT entry for this. 4761 // br x17 4762 // PLT0 will be slightly different in ELF32 due to different got entry 4763 // size. 4764 memcpy(pov, this->first_plt_entry, this->first_plt_entry_size); 4765 Address gotplt_2nd_ent = got_address + (size / 8) * 2; 4766 4767 // Fill in the top 21 bits for this: ADRP x16, PLT_GOT + 8 * 2. 4768 // ADRP: (PG(S+A)-PG(P)) >> 12) & 0x1fffff. 4769 // FIXME: This only works for 64bit 4770 AArch64_relocate_functions<size, big_endian>::adrp(pov + 4, 4771 gotplt_2nd_ent, plt_address + 4); 4772 4773 // Fill in R_AARCH64_LDST8_LO12 4774 elfcpp::Swap<32, big_endian>::writeval( 4775 pov + 8, 4776 ((this->first_plt_entry[2] & 0xffc003ff) 4777 | ((gotplt_2nd_ent & 0xff8) << 7))); 4778 4779 // Fill in R_AARCH64_ADD_ABS_LO12 4780 elfcpp::Swap<32, big_endian>::writeval( 4781 pov + 12, 4782 ((this->first_plt_entry[3] & 0xffc003ff) 4783 | ((gotplt_2nd_ent & 0xfff) << 10))); 4784} 4785 4786 4787// Subsequent entries in the PLT for an executable. 4788// FIXME: This only works for 64bit 4789 4790template<int size, bool big_endian> 4791void 4792Output_data_plt_aarch64_standard<size, big_endian>::do_fill_plt_entry( 4793 unsigned char* pov, 4794 Address got_address, 4795 Address plt_address, 4796 unsigned int got_offset, 4797 unsigned int plt_offset) 4798{ 4799 memcpy(pov, this->plt_entry, this->plt_entry_size); 4800 4801 Address gotplt_entry_address = got_address + got_offset; 4802 Address plt_entry_address = plt_address + plt_offset; 4803 4804 // Fill in R_AARCH64_PCREL_ADR_HI21 4805 AArch64_relocate_functions<size, big_endian>::adrp( 4806 pov, 4807 gotplt_entry_address, 4808 plt_entry_address); 4809 4810 // Fill in R_AARCH64_LDST64_ABS_LO12 4811 elfcpp::Swap<32, big_endian>::writeval( 4812 pov + 4, 4813 ((this->plt_entry[1] & 0xffc003ff) 4814 | ((gotplt_entry_address & 0xff8) << 7))); 4815 4816 // Fill in R_AARCH64_ADD_ABS_LO12 4817 elfcpp::Swap<32, big_endian>::writeval( 4818 pov + 8, 4819 ((this->plt_entry[2] & 0xffc003ff) 4820 | ((gotplt_entry_address & 0xfff) <<10))); 4821 4822} 4823 4824 4825template<> 4826const uint32_t 4827Output_data_plt_aarch64_standard<32, false>:: 4828 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] = 4829{ 4830 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */ 4831 0x90000002, /* adrp x2, 0 */ 4832 0x90000003, /* adrp x3, 0 */ 4833 0xb9400042, /* ldr w2, [w2, #0] */ 4834 0x11000063, /* add w3, w3, 0 */ 4835 0xd61f0040, /* br x2 */ 4836 0xd503201f, /* nop */ 4837 0xd503201f, /* nop */ 4838}; 4839 4840template<> 4841const uint32_t 4842Output_data_plt_aarch64_standard<32, true>:: 4843 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] = 4844{ 4845 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */ 4846 0x90000002, /* adrp x2, 0 */ 4847 0x90000003, /* adrp x3, 0 */ 4848 0xb9400042, /* ldr w2, [w2, #0] */ 4849 0x11000063, /* add w3, w3, 0 */ 4850 0xd61f0040, /* br x2 */ 4851 0xd503201f, /* nop */ 4852 0xd503201f, /* nop */ 4853}; 4854 4855template<> 4856const uint32_t 4857Output_data_plt_aarch64_standard<64, false>:: 4858 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] = 4859{ 4860 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */ 4861 0x90000002, /* adrp x2, 0 */ 4862 0x90000003, /* adrp x3, 0 */ 4863 0xf9400042, /* ldr x2, [x2, #0] */ 4864 0x91000063, /* add x3, x3, 0 */ 4865 0xd61f0040, /* br x2 */ 4866 0xd503201f, /* nop */ 4867 0xd503201f, /* nop */ 4868}; 4869 4870template<> 4871const uint32_t 4872Output_data_plt_aarch64_standard<64, true>:: 4873 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] = 4874{ 4875 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */ 4876 0x90000002, /* adrp x2, 0 */ 4877 0x90000003, /* adrp x3, 0 */ 4878 0xf9400042, /* ldr x2, [x2, #0] */ 4879 0x91000063, /* add x3, x3, 0 */ 4880 0xd61f0040, /* br x2 */ 4881 0xd503201f, /* nop */ 4882 0xd503201f, /* nop */ 4883}; 4884 4885template<int size, bool big_endian> 4886void 4887Output_data_plt_aarch64_standard<size, big_endian>::do_fill_tlsdesc_entry( 4888 unsigned char* pov, 4889 Address gotplt_address, 4890 Address plt_address, 4891 Address got_base, 4892 unsigned int tlsdesc_got_offset, 4893 unsigned int plt_offset) 4894{ 4895 memcpy(pov, tlsdesc_plt_entry, plt_tlsdesc_entry_size); 4896 4897 // move DT_TLSDESC_GOT address into x2 4898 // move .got.plt address into x3 4899 Address tlsdesc_got_entry = got_base + tlsdesc_got_offset; 4900 Address plt_entry_address = plt_address + plt_offset; 4901 4902 // R_AARCH64_ADR_PREL_PG_HI21 4903 AArch64_relocate_functions<size, big_endian>::adrp( 4904 pov + 4, 4905 tlsdesc_got_entry, 4906 plt_entry_address + 4); 4907 4908 // R_AARCH64_ADR_PREL_PG_HI21 4909 AArch64_relocate_functions<size, big_endian>::adrp( 4910 pov + 8, 4911 gotplt_address, 4912 plt_entry_address + 8); 4913 4914 // R_AARCH64_LDST64_ABS_LO12 4915 elfcpp::Swap<32, big_endian>::writeval( 4916 pov + 12, 4917 ((this->tlsdesc_plt_entry[3] & 0xffc003ff) 4918 | ((tlsdesc_got_entry & 0xff8) << 7))); 4919 4920 // R_AARCH64_ADD_ABS_LO12 4921 elfcpp::Swap<32, big_endian>::writeval( 4922 pov + 16, 4923 ((this->tlsdesc_plt_entry[4] & 0xffc003ff) 4924 | ((gotplt_address & 0xfff) << 10))); 4925} 4926 4927// Write out the PLT. This uses the hand-coded instructions above, 4928// and adjusts them as needed. This is specified by the AMD64 ABI. 4929 4930template<int size, bool big_endian> 4931void 4932Output_data_plt_aarch64<size, big_endian>::do_write(Output_file* of) 4933{ 4934 const off_t offset = this->offset(); 4935 const section_size_type oview_size = 4936 convert_to_section_size_type(this->data_size()); 4937 unsigned char* const oview = of->get_output_view(offset, oview_size); 4938 4939 const off_t got_file_offset = this->got_plt_->offset(); 4940 gold_assert(got_file_offset + this->got_plt_->data_size() 4941 == this->got_irelative_->offset()); 4942 4943 const section_size_type got_size = 4944 convert_to_section_size_type(this->got_plt_->data_size() 4945 + this->got_irelative_->data_size()); 4946 unsigned char* const got_view = of->get_output_view(got_file_offset, 4947 got_size); 4948 4949 unsigned char* pov = oview; 4950 4951 // The base address of the .plt section. 4952 typename elfcpp::Elf_types<size>::Elf_Addr plt_address = this->address(); 4953 // The base address of the PLT portion of the .got section. 4954 typename elfcpp::Elf_types<size>::Elf_Addr gotplt_address 4955 = this->got_plt_->address(); 4956 4957 this->fill_first_plt_entry(pov, gotplt_address, plt_address); 4958 pov += this->first_plt_entry_offset(); 4959 4960 // The first three entries in .got.plt are reserved. 4961 unsigned char* got_pov = got_view; 4962 memset(got_pov, 0, size / 8 * AARCH64_GOTPLT_RESERVE_COUNT); 4963 got_pov += (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT; 4964 4965 unsigned int plt_offset = this->first_plt_entry_offset(); 4966 unsigned int got_offset = (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT; 4967 const unsigned int count = this->count_ + this->irelative_count_; 4968 for (unsigned int plt_index = 0; 4969 plt_index < count; 4970 ++plt_index, 4971 pov += this->get_plt_entry_size(), 4972 got_pov += size / 8, 4973 plt_offset += this->get_plt_entry_size(), 4974 got_offset += size / 8) 4975 { 4976 // Set and adjust the PLT entry itself. 4977 this->fill_plt_entry(pov, gotplt_address, plt_address, 4978 got_offset, plt_offset); 4979 4980 // Set the entry in the GOT, which points to plt0. 4981 elfcpp::Swap<size, big_endian>::writeval(got_pov, plt_address); 4982 } 4983 4984 if (this->has_tlsdesc_entry()) 4985 { 4986 // Set and adjust the reserved TLSDESC PLT entry. 4987 unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset(); 4988 // The base address of the .base section. 4989 typename elfcpp::Elf_types<size>::Elf_Addr got_base = 4990 this->got_->address(); 4991 this->fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base, 4992 tlsdesc_got_offset, plt_offset); 4993 pov += this->get_plt_tlsdesc_entry_size(); 4994 } 4995 4996 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size); 4997 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size); 4998 4999 of->write_output_view(offset, oview_size, oview); 5000 of->write_output_view(got_file_offset, got_size, got_view); 5001} 5002 5003// Telling how to update the immediate field of an instruction. 5004struct AArch64_howto 5005{ 5006 // The immediate field mask. 5007 elfcpp::Elf_Xword dst_mask; 5008 5009 // The offset to apply relocation immediate 5010 int doffset; 5011 5012 // The second part offset, if the immediate field has two parts. 5013 // -1 if the immediate field has only one part. 5014 int doffset2; 5015}; 5016 5017static const AArch64_howto aarch64_howto[AArch64_reloc_property::INST_NUM] = 5018{ 5019 {0, -1, -1}, // DATA 5020 {0x1fffe0, 5, -1}, // MOVW [20:5]-imm16 5021 {0xffffe0, 5, -1}, // LD [23:5]-imm19 5022 {0x60ffffe0, 29, 5}, // ADR [30:29]-immlo [23:5]-immhi 5023 {0x60ffffe0, 29, 5}, // ADRP [30:29]-immlo [23:5]-immhi 5024 {0x3ffc00, 10, -1}, // ADD [21:10]-imm12 5025 {0x3ffc00, 10, -1}, // LDST [21:10]-imm12 5026 {0x7ffe0, 5, -1}, // TBZNZ [18:5]-imm14 5027 {0xffffe0, 5, -1}, // CONDB [23:5]-imm19 5028 {0x3ffffff, 0, -1}, // B [25:0]-imm26 5029 {0x3ffffff, 0, -1}, // CALL [25:0]-imm26 5030}; 5031 5032// AArch64 relocate function class 5033 5034template<int size, bool big_endian> 5035class AArch64_relocate_functions 5036{ 5037 public: 5038 typedef enum 5039 { 5040 STATUS_OKAY, // No error during relocation. 5041 STATUS_OVERFLOW, // Relocation overflow. 5042 STATUS_BAD_RELOC, // Relocation cannot be applied. 5043 } Status; 5044 5045 typedef AArch64_relocate_functions<size, big_endian> This; 5046 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address; 5047 typedef Relocate_info<size, big_endian> The_relocate_info; 5048 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj; 5049 typedef Reloc_stub<size, big_endian> The_reloc_stub; 5050 typedef Stub_table<size, big_endian> The_stub_table; 5051 typedef elfcpp::Rela<size, big_endian> The_rela; 5052 typedef typename elfcpp::Swap<size, big_endian>::Valtype AArch64_valtype; 5053 5054 // Return the page address of the address. 5055 // Page(address) = address & ~0xFFF 5056 5057 static inline AArch64_valtype 5058 Page(Address address) 5059 { 5060 return (address & (~static_cast<Address>(0xFFF))); 5061 } 5062 5063 private: 5064 // Update instruction (pointed by view) with selected bits (immed). 5065 // val = (val & ~dst_mask) | (immed << doffset) 5066 5067 template<int valsize> 5068 static inline void 5069 update_view(unsigned char* view, 5070 AArch64_valtype immed, 5071 elfcpp::Elf_Xword doffset, 5072 elfcpp::Elf_Xword dst_mask) 5073 { 5074 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype; 5075 Valtype* wv = reinterpret_cast<Valtype*>(view); 5076 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv); 5077 5078 // Clear immediate fields. 5079 val &= ~dst_mask; 5080 elfcpp::Swap<valsize, big_endian>::writeval(wv, 5081 static_cast<Valtype>(val | (immed << doffset))); 5082 } 5083 5084 // Update two parts of an instruction (pointed by view) with selected 5085 // bits (immed1 and immed2). 5086 // val = (val & ~dst_mask) | (immed1 << doffset1) | (immed2 << doffset2) 5087 5088 template<int valsize> 5089 static inline void 5090 update_view_two_parts( 5091 unsigned char* view, 5092 AArch64_valtype immed1, 5093 AArch64_valtype immed2, 5094 elfcpp::Elf_Xword doffset1, 5095 elfcpp::Elf_Xword doffset2, 5096 elfcpp::Elf_Xword dst_mask) 5097 { 5098 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype; 5099 Valtype* wv = reinterpret_cast<Valtype*>(view); 5100 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv); 5101 val &= ~dst_mask; 5102 elfcpp::Swap<valsize, big_endian>::writeval(wv, 5103 static_cast<Valtype>(val | (immed1 << doffset1) | 5104 (immed2 << doffset2))); 5105 } 5106 5107 // Update adr or adrp instruction with immed. 5108 // In adr and adrp: [30:29] immlo [23:5] immhi 5109 5110 static inline void 5111 update_adr(unsigned char* view, AArch64_valtype immed) 5112 { 5113 elfcpp::Elf_Xword dst_mask = (0x3 << 29) | (0x7ffff << 5); 5114 This::template update_view_two_parts<32>( 5115 view, 5116 immed & 0x3, 5117 (immed & 0x1ffffc) >> 2, 5118 29, 5119 5, 5120 dst_mask); 5121 } 5122 5123 // Update movz/movn instruction with bits immed. 5124 // Set instruction to movz if is_movz is true, otherwise set instruction 5125 // to movn. 5126 5127 static inline void 5128 update_movnz(unsigned char* view, 5129 AArch64_valtype immed, 5130 bool is_movz) 5131 { 5132 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype; 5133 Valtype* wv = reinterpret_cast<Valtype*>(view); 5134 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv); 5135 5136 const elfcpp::Elf_Xword doffset = 5137 aarch64_howto[AArch64_reloc_property::INST_MOVW].doffset; 5138 const elfcpp::Elf_Xword dst_mask = 5139 aarch64_howto[AArch64_reloc_property::INST_MOVW].dst_mask; 5140 5141 // Clear immediate fields and opc code. 5142 val &= ~(dst_mask | (0x3 << 29)); 5143 5144 // Set instruction to movz or movn. 5145 // movz: [30:29] is 10 movn: [30:29] is 00 5146 if (is_movz) 5147 val |= (0x2 << 29); 5148 5149 elfcpp::Swap<32, big_endian>::writeval(wv, 5150 static_cast<Valtype>(val | (immed << doffset))); 5151 } 5152 5153 public: 5154 5155 // Update selected bits in text. 5156 5157 template<int valsize> 5158 static inline typename This::Status 5159 reloc_common(unsigned char* view, Address x, 5160 const AArch64_reloc_property* reloc_property) 5161 { 5162 // Select bits from X. 5163 Address immed = reloc_property->select_x_value(x); 5164 5165 // Update view. 5166 const AArch64_reloc_property::Reloc_inst inst = 5167 reloc_property->reloc_inst(); 5168 // If it is a data relocation or instruction has 2 parts of immediate 5169 // fields, you should not call pcrela_general. 5170 gold_assert(aarch64_howto[inst].doffset2 == -1 && 5171 aarch64_howto[inst].doffset != -1); 5172 This::template update_view<valsize>(view, immed, 5173 aarch64_howto[inst].doffset, 5174 aarch64_howto[inst].dst_mask); 5175 5176 // Do check overflow or alignment if needed. 5177 return (reloc_property->checkup_x_value(x) 5178 ? This::STATUS_OKAY 5179 : This::STATUS_OVERFLOW); 5180 } 5181 5182 // Construct a B insn. Note, although we group it here with other relocation 5183 // operation, there is actually no 'relocation' involved here. 5184 static inline void 5185 construct_b(unsigned char* view, unsigned int branch_offset) 5186 { 5187 update_view_two_parts<32>(view, 0x05, (branch_offset >> 2), 5188 26, 0, 0xffffffff); 5189 } 5190 5191 // Do a simple rela relocation at unaligned addresses. 5192 5193 template<int valsize> 5194 static inline typename This::Status 5195 rela_ua(unsigned char* view, 5196 const Sized_relobj_file<size, big_endian>* object, 5197 const Symbol_value<size>* psymval, 5198 AArch64_valtype addend, 5199 const AArch64_reloc_property* reloc_property) 5200 { 5201 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype 5202 Valtype; 5203 typename elfcpp::Elf_types<size>::Elf_Addr x = 5204 psymval->value(object, addend); 5205 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view, 5206 static_cast<Valtype>(x)); 5207 return (reloc_property->checkup_x_value(x) 5208 ? This::STATUS_OKAY 5209 : This::STATUS_OVERFLOW); 5210 } 5211 5212 // Do a simple pc-relative relocation at unaligned addresses. 5213 5214 template<int valsize> 5215 static inline typename This::Status 5216 pcrela_ua(unsigned char* view, 5217 const Sized_relobj_file<size, big_endian>* object, 5218 const Symbol_value<size>* psymval, 5219 AArch64_valtype addend, 5220 Address address, 5221 const AArch64_reloc_property* reloc_property) 5222 { 5223 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype 5224 Valtype; 5225 Address x = psymval->value(object, addend) - address; 5226 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view, 5227 static_cast<Valtype>(x)); 5228 return (reloc_property->checkup_x_value(x) 5229 ? This::STATUS_OKAY 5230 : This::STATUS_OVERFLOW); 5231 } 5232 5233 // Do a simple rela relocation at aligned addresses. 5234 5235 template<int valsize> 5236 static inline typename This::Status 5237 rela( 5238 unsigned char* view, 5239 const Sized_relobj_file<size, big_endian>* object, 5240 const Symbol_value<size>* psymval, 5241 AArch64_valtype addend, 5242 const AArch64_reloc_property* reloc_property) 5243 { 5244 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype; 5245 Valtype* wv = reinterpret_cast<Valtype*>(view); 5246 Address x = psymval->value(object, addend); 5247 elfcpp::Swap<valsize, big_endian>::writeval(wv,static_cast<Valtype>(x)); 5248 return (reloc_property->checkup_x_value(x) 5249 ? This::STATUS_OKAY 5250 : This::STATUS_OVERFLOW); 5251 } 5252 5253 // Do relocate. Update selected bits in text. 5254 // new_val = (val & ~dst_mask) | (immed << doffset) 5255 5256 template<int valsize> 5257 static inline typename This::Status 5258 rela_general(unsigned char* view, 5259 const Sized_relobj_file<size, big_endian>* object, 5260 const Symbol_value<size>* psymval, 5261 AArch64_valtype addend, 5262 const AArch64_reloc_property* reloc_property) 5263 { 5264 // Calculate relocation. 5265 Address x = psymval->value(object, addend); 5266 return This::template reloc_common<valsize>(view, x, reloc_property); 5267 } 5268 5269 // Do relocate. Update selected bits in text. 5270 // new val = (val & ~dst_mask) | (immed << doffset) 5271 5272 template<int valsize> 5273 static inline typename This::Status 5274 rela_general( 5275 unsigned char* view, 5276 AArch64_valtype s, 5277 AArch64_valtype addend, 5278 const AArch64_reloc_property* reloc_property) 5279 { 5280 // Calculate relocation. 5281 Address x = s + addend; 5282 return This::template reloc_common<valsize>(view, x, reloc_property); 5283 } 5284 5285 // Do address relative relocate. Update selected bits in text. 5286 // new val = (val & ~dst_mask) | (immed << doffset) 5287 5288 template<int valsize> 5289 static inline typename This::Status 5290 pcrela_general( 5291 unsigned char* view, 5292 const Sized_relobj_file<size, big_endian>* object, 5293 const Symbol_value<size>* psymval, 5294 AArch64_valtype addend, 5295 Address address, 5296 const AArch64_reloc_property* reloc_property) 5297 { 5298 // Calculate relocation. 5299 Address x = psymval->value(object, addend) - address; 5300 return This::template reloc_common<valsize>(view, x, reloc_property); 5301 } 5302 5303 5304 // Calculate (S + A) - address, update adr instruction. 5305 5306 static inline typename This::Status 5307 adr(unsigned char* view, 5308 const Sized_relobj_file<size, big_endian>* object, 5309 const Symbol_value<size>* psymval, 5310 Address addend, 5311 Address address, 5312 const AArch64_reloc_property* /* reloc_property */) 5313 { 5314 AArch64_valtype x = psymval->value(object, addend) - address; 5315 // Pick bits [20:0] of X. 5316 AArch64_valtype immed = x & 0x1fffff; 5317 update_adr(view, immed); 5318 // Check -2^20 <= X < 2^20 5319 return (size == 64 && Bits<21>::has_overflow((x)) 5320 ? This::STATUS_OVERFLOW 5321 : This::STATUS_OKAY); 5322 } 5323 5324 // Calculate PG(S+A) - PG(address), update adrp instruction. 5325 // R_AARCH64_ADR_PREL_PG_HI21 5326 5327 static inline typename This::Status 5328 adrp( 5329 unsigned char* view, 5330 Address sa, 5331 Address address) 5332 { 5333 AArch64_valtype x = This::Page(sa) - This::Page(address); 5334 // Pick [32:12] of X. 5335 AArch64_valtype immed = (x >> 12) & 0x1fffff; 5336 update_adr(view, immed); 5337 // Check -2^32 <= X < 2^32 5338 return (size == 64 && Bits<33>::has_overflow((x)) 5339 ? This::STATUS_OVERFLOW 5340 : This::STATUS_OKAY); 5341 } 5342 5343 // Calculate PG(S+A) - PG(address), update adrp instruction. 5344 // R_AARCH64_ADR_PREL_PG_HI21 5345 5346 static inline typename This::Status 5347 adrp(unsigned char* view, 5348 const Sized_relobj_file<size, big_endian>* object, 5349 const Symbol_value<size>* psymval, 5350 Address addend, 5351 Address address, 5352 const AArch64_reloc_property* reloc_property) 5353 { 5354 Address sa = psymval->value(object, addend); 5355 AArch64_valtype x = This::Page(sa) - This::Page(address); 5356 // Pick [32:12] of X. 5357 AArch64_valtype immed = (x >> 12) & 0x1fffff; 5358 update_adr(view, immed); 5359 return (reloc_property->checkup_x_value(x) 5360 ? This::STATUS_OKAY 5361 : This::STATUS_OVERFLOW); 5362 } 5363 5364 // Update mov[n/z] instruction. Check overflow if needed. 5365 // If X >=0, set the instruction to movz and its immediate value to the 5366 // selected bits S. 5367 // If X < 0, set the instruction to movn and its immediate value to 5368 // NOT (selected bits of). 5369 5370 static inline typename This::Status 5371 movnz(unsigned char* view, 5372 AArch64_valtype x, 5373 const AArch64_reloc_property* reloc_property) 5374 { 5375 // Select bits from X. 5376 Address immed; 5377 bool is_movz; 5378 typedef typename elfcpp::Elf_types<size>::Elf_Swxword SignedW; 5379 if (static_cast<SignedW>(x) >= 0) 5380 { 5381 immed = reloc_property->select_x_value(x); 5382 is_movz = true; 5383 } 5384 else 5385 { 5386 immed = reloc_property->select_x_value(~x);; 5387 is_movz = false; 5388 } 5389 5390 // Update movnz instruction. 5391 update_movnz(view, immed, is_movz); 5392 5393 // Do check overflow or alignment if needed. 5394 return (reloc_property->checkup_x_value(x) 5395 ? This::STATUS_OKAY 5396 : This::STATUS_OVERFLOW); 5397 } 5398 5399 static inline bool 5400 maybe_apply_stub(unsigned int, 5401 const The_relocate_info*, 5402 const The_rela&, 5403 unsigned char*, 5404 Address, 5405 const Sized_symbol<size>*, 5406 const Symbol_value<size>*, 5407 const Sized_relobj_file<size, big_endian>*, 5408 section_size_type); 5409 5410}; // End of AArch64_relocate_functions 5411 5412 5413// For a certain relocation type (usually jump/branch), test to see if the 5414// destination needs a stub to fulfil. If so, re-route the destination of the 5415// original instruction to the stub, note, at this time, the stub has already 5416// been generated. 5417 5418template<int size, bool big_endian> 5419bool 5420AArch64_relocate_functions<size, big_endian>:: 5421maybe_apply_stub(unsigned int r_type, 5422 const The_relocate_info* relinfo, 5423 const The_rela& rela, 5424 unsigned char* view, 5425 Address address, 5426 const Sized_symbol<size>* gsym, 5427 const Symbol_value<size>* psymval, 5428 const Sized_relobj_file<size, big_endian>* object, 5429 section_size_type current_group_size) 5430{ 5431 if (parameters->options().relocatable()) 5432 return false; 5433 5434 typename elfcpp::Elf_types<size>::Elf_Swxword addend = rela.get_r_addend(); 5435 Address branch_target = psymval->value(object, 0) + addend; 5436 int stub_type = 5437 The_reloc_stub::stub_type_for_reloc(r_type, address, branch_target); 5438 if (stub_type == ST_NONE) 5439 return false; 5440 5441 const The_aarch64_relobj* aarch64_relobj = 5442 static_cast<const The_aarch64_relobj*>(object); 5443 const AArch64_reloc_property* arp = 5444 aarch64_reloc_property_table->get_reloc_property(r_type); 5445 gold_assert(arp != NULL); 5446 5447 // We don't create stubs for undefined symbols, but do for weak. 5448 if (gsym 5449 && !gsym->use_plt_offset(arp->reference_flags()) 5450 && gsym->is_undefined()) 5451 { 5452 gold_debug(DEBUG_TARGET, 5453 "stub: looking for a stub for undefined symbol %s in file %s", 5454 gsym->name(), aarch64_relobj->name().c_str()); 5455 return false; 5456 } 5457 5458 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx); 5459 gold_assert(stub_table != NULL); 5460 5461 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 5462 typename The_reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend); 5463 The_reloc_stub* stub = stub_table->find_reloc_stub(stub_key); 5464 gold_assert(stub != NULL); 5465 5466 Address new_branch_target = stub_table->address() + stub->offset(); 5467 typename elfcpp::Swap<size, big_endian>::Valtype branch_offset = 5468 new_branch_target - address; 5469 typename This::Status status = This::template 5470 rela_general<32>(view, branch_offset, 0, arp); 5471 if (status != This::STATUS_OKAY) 5472 gold_error(_("Stub is too far away, try a smaller value " 5473 "for '--stub-group-size'. The current value is 0x%lx."), 5474 static_cast<unsigned long>(current_group_size)); 5475 return true; 5476} 5477 5478 5479// Group input sections for stub generation. 5480// 5481// We group input sections in an output section so that the total size, 5482// including any padding space due to alignment is smaller than GROUP_SIZE 5483// unless the only input section in group is bigger than GROUP_SIZE already. 5484// Then an ARM stub table is created to follow the last input section 5485// in group. For each group an ARM stub table is created an is placed 5486// after the last group. If STUB_ALWAYS_AFTER_BRANCH is false, we further 5487// extend the group after the stub table. 5488 5489template<int size, bool big_endian> 5490void 5491Target_aarch64<size, big_endian>::group_sections( 5492 Layout* layout, 5493 section_size_type group_size, 5494 bool stubs_always_after_branch, 5495 const Task* task) 5496{ 5497 // Group input sections and insert stub table 5498 Layout::Section_list section_list; 5499 layout->get_executable_sections(§ion_list); 5500 for (Layout::Section_list::const_iterator p = section_list.begin(); 5501 p != section_list.end(); 5502 ++p) 5503 { 5504 AArch64_output_section<size, big_endian>* output_section = 5505 static_cast<AArch64_output_section<size, big_endian>*>(*p); 5506 output_section->group_sections(group_size, stubs_always_after_branch, 5507 this, task); 5508 } 5509} 5510 5511 5512// Find the AArch64_input_section object corresponding to the SHNDX-th input 5513// section of RELOBJ. 5514 5515template<int size, bool big_endian> 5516AArch64_input_section<size, big_endian>* 5517Target_aarch64<size, big_endian>::find_aarch64_input_section( 5518 Relobj* relobj, unsigned int shndx) const 5519{ 5520 Section_id sid(relobj, shndx); 5521 typename AArch64_input_section_map::const_iterator p = 5522 this->aarch64_input_section_map_.find(sid); 5523 return (p != this->aarch64_input_section_map_.end()) ? p->second : NULL; 5524} 5525 5526 5527// Make a new AArch64_input_section object. 5528 5529template<int size, bool big_endian> 5530AArch64_input_section<size, big_endian>* 5531Target_aarch64<size, big_endian>::new_aarch64_input_section( 5532 Relobj* relobj, unsigned int shndx) 5533{ 5534 Section_id sid(relobj, shndx); 5535 5536 AArch64_input_section<size, big_endian>* input_section = 5537 new AArch64_input_section<size, big_endian>(relobj, shndx); 5538 input_section->init(); 5539 5540 // Register new AArch64_input_section in map for look-up. 5541 std::pair<typename AArch64_input_section_map::iterator,bool> ins = 5542 this->aarch64_input_section_map_.insert( 5543 std::make_pair(sid, input_section)); 5544 5545 // Make sure that it we have not created another AArch64_input_section 5546 // for this input section already. 5547 gold_assert(ins.second); 5548 5549 return input_section; 5550} 5551 5552 5553// Relaxation hook. This is where we do stub generation. 5554 5555template<int size, bool big_endian> 5556bool 5557Target_aarch64<size, big_endian>::do_relax( 5558 int pass, 5559 const Input_objects* input_objects, 5560 Symbol_table* symtab, 5561 Layout* layout , 5562 const Task* task) 5563{ 5564 gold_assert(!parameters->options().relocatable()); 5565 if (pass == 1) 5566 { 5567 // We don't handle negative stub_group_size right now. 5568 this->stub_group_size_ = abs(parameters->options().stub_group_size()); 5569 if (this->stub_group_size_ == 1) 5570 { 5571 // Leave room for 4096 4-byte stub entries. If we exceed that, then we 5572 // will fail to link. The user will have to relink with an explicit 5573 // group size option. 5574 this->stub_group_size_ = The_reloc_stub::MAX_BRANCH_OFFSET - 5575 4096 * 4; 5576 } 5577 group_sections(layout, this->stub_group_size_, true, task); 5578 } 5579 else 5580 { 5581 // If this is not the first pass, addresses and file offsets have 5582 // been reset at this point, set them here. 5583 for (Stub_table_iterator sp = this->stub_tables_.begin(); 5584 sp != this->stub_tables_.end(); ++sp) 5585 { 5586 The_stub_table* stt = *sp; 5587 The_aarch64_input_section* owner = stt->owner(); 5588 off_t off = align_address(owner->original_size(), 5589 stt->addralign()); 5590 stt->set_address_and_file_offset(owner->address() + off, 5591 owner->offset() + off); 5592 } 5593 } 5594 5595 // Scan relocs for relocation stubs 5596 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin(); 5597 op != input_objects->relobj_end(); 5598 ++op) 5599 { 5600 The_aarch64_relobj* aarch64_relobj = 5601 static_cast<The_aarch64_relobj*>(*op); 5602 // Lock the object so we can read from it. This is only called 5603 // single-threaded from Layout::finalize, so it is OK to lock. 5604 Task_lock_obj<Object> tl(task, aarch64_relobj); 5605 aarch64_relobj->scan_sections_for_stubs(this, symtab, layout); 5606 } 5607 5608 bool any_stub_table_changed = false; 5609 for (Stub_table_iterator siter = this->stub_tables_.begin(); 5610 siter != this->stub_tables_.end() && !any_stub_table_changed; ++siter) 5611 { 5612 The_stub_table* stub_table = *siter; 5613 if (stub_table->update_data_size_changed_p()) 5614 { 5615 The_aarch64_input_section* owner = stub_table->owner(); 5616 uint64_t address = owner->address(); 5617 off_t offset = owner->offset(); 5618 owner->reset_address_and_file_offset(); 5619 owner->set_address_and_file_offset(address, offset); 5620 5621 any_stub_table_changed = true; 5622 } 5623 } 5624 5625 // Do not continue relaxation. 5626 bool continue_relaxation = any_stub_table_changed; 5627 if (!continue_relaxation) 5628 for (Stub_table_iterator sp = this->stub_tables_.begin(); 5629 (sp != this->stub_tables_.end()); 5630 ++sp) 5631 (*sp)->finalize_stubs(); 5632 5633 return continue_relaxation; 5634} 5635 5636 5637// Make a new Stub_table. 5638 5639template<int size, bool big_endian> 5640Stub_table<size, big_endian>* 5641Target_aarch64<size, big_endian>::new_stub_table( 5642 AArch64_input_section<size, big_endian>* owner) 5643{ 5644 Stub_table<size, big_endian>* stub_table = 5645 new Stub_table<size, big_endian>(owner); 5646 stub_table->set_address(align_address( 5647 owner->address() + owner->data_size(), 8)); 5648 stub_table->set_file_offset(owner->offset() + owner->data_size()); 5649 stub_table->finalize_data_size(); 5650 5651 this->stub_tables_.push_back(stub_table); 5652 5653 return stub_table; 5654} 5655 5656 5657template<int size, bool big_endian> 5658uint64_t 5659Target_aarch64<size, big_endian>::do_reloc_addend( 5660 void* arg, unsigned int r_type, uint64_t) const 5661{ 5662 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC); 5663 uintptr_t intarg = reinterpret_cast<uintptr_t>(arg); 5664 gold_assert(intarg < this->tlsdesc_reloc_info_.size()); 5665 const Tlsdesc_info& ti(this->tlsdesc_reloc_info_[intarg]); 5666 const Symbol_value<size>* psymval = ti.object->local_symbol(ti.r_sym); 5667 gold_assert(psymval->is_tls_symbol()); 5668 // The value of a TLS symbol is the offset in the TLS segment. 5669 return psymval->value(ti.object, 0); 5670} 5671 5672// Return the number of entries in the PLT. 5673 5674template<int size, bool big_endian> 5675unsigned int 5676Target_aarch64<size, big_endian>::plt_entry_count() const 5677{ 5678 if (this->plt_ == NULL) 5679 return 0; 5680 return this->plt_->entry_count(); 5681} 5682 5683// Return the offset of the first non-reserved PLT entry. 5684 5685template<int size, bool big_endian> 5686unsigned int 5687Target_aarch64<size, big_endian>::first_plt_entry_offset() const 5688{ 5689 return this->plt_->first_plt_entry_offset(); 5690} 5691 5692// Return the size of each PLT entry. 5693 5694template<int size, bool big_endian> 5695unsigned int 5696Target_aarch64<size, big_endian>::plt_entry_size() const 5697{ 5698 return this->plt_->get_plt_entry_size(); 5699} 5700 5701// Define the _TLS_MODULE_BASE_ symbol in the TLS segment. 5702 5703template<int size, bool big_endian> 5704void 5705Target_aarch64<size, big_endian>::define_tls_base_symbol( 5706 Symbol_table* symtab, Layout* layout) 5707{ 5708 if (this->tls_base_symbol_defined_) 5709 return; 5710 5711 Output_segment* tls_segment = layout->tls_segment(); 5712 if (tls_segment != NULL) 5713 { 5714 // _TLS_MODULE_BASE_ always points to the beginning of tls segment. 5715 symtab->define_in_output_segment("_TLS_MODULE_BASE_", NULL, 5716 Symbol_table::PREDEFINED, 5717 tls_segment, 0, 0, 5718 elfcpp::STT_TLS, 5719 elfcpp::STB_LOCAL, 5720 elfcpp::STV_HIDDEN, 0, 5721 Symbol::SEGMENT_START, 5722 true); 5723 } 5724 this->tls_base_symbol_defined_ = true; 5725} 5726 5727// Create the reserved PLT and GOT entries for the TLS descriptor resolver. 5728 5729template<int size, bool big_endian> 5730void 5731Target_aarch64<size, big_endian>::reserve_tlsdesc_entries( 5732 Symbol_table* symtab, Layout* layout) 5733{ 5734 if (this->plt_ == NULL) 5735 this->make_plt_section(symtab, layout); 5736 5737 if (!this->plt_->has_tlsdesc_entry()) 5738 { 5739 // Allocate the TLSDESC_GOT entry. 5740 Output_data_got_aarch64<size, big_endian>* got = 5741 this->got_section(symtab, layout); 5742 unsigned int got_offset = got->add_constant(0); 5743 5744 // Allocate the TLSDESC_PLT entry. 5745 this->plt_->reserve_tlsdesc_entry(got_offset); 5746 } 5747} 5748 5749// Create a GOT entry for the TLS module index. 5750 5751template<int size, bool big_endian> 5752unsigned int 5753Target_aarch64<size, big_endian>::got_mod_index_entry( 5754 Symbol_table* symtab, Layout* layout, 5755 Sized_relobj_file<size, big_endian>* object) 5756{ 5757 if (this->got_mod_index_offset_ == -1U) 5758 { 5759 gold_assert(symtab != NULL && layout != NULL && object != NULL); 5760 Reloc_section* rela_dyn = this->rela_dyn_section(layout); 5761 Output_data_got_aarch64<size, big_endian>* got = 5762 this->got_section(symtab, layout); 5763 unsigned int got_offset = got->add_constant(0); 5764 rela_dyn->add_local(object, 0, elfcpp::R_AARCH64_TLS_DTPMOD64, got, 5765 got_offset, 0); 5766 got->add_constant(0); 5767 this->got_mod_index_offset_ = got_offset; 5768 } 5769 return this->got_mod_index_offset_; 5770} 5771 5772// Optimize the TLS relocation type based on what we know about the 5773// symbol. IS_FINAL is true if the final address of this symbol is 5774// known at link time. 5775 5776template<int size, bool big_endian> 5777tls::Tls_optimization 5778Target_aarch64<size, big_endian>::optimize_tls_reloc(bool is_final, 5779 int r_type) 5780{ 5781 // If we are generating a shared library, then we can't do anything 5782 // in the linker 5783 if (parameters->options().shared()) 5784 return tls::TLSOPT_NONE; 5785 5786 switch (r_type) 5787 { 5788 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 5789 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: 5790 case elfcpp::R_AARCH64_TLSDESC_LD_PREL19: 5791 case elfcpp::R_AARCH64_TLSDESC_ADR_PREL21: 5792 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 5793 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 5794 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 5795 case elfcpp::R_AARCH64_TLSDESC_OFF_G1: 5796 case elfcpp::R_AARCH64_TLSDESC_OFF_G0_NC: 5797 case elfcpp::R_AARCH64_TLSDESC_LDR: 5798 case elfcpp::R_AARCH64_TLSDESC_ADD: 5799 case elfcpp::R_AARCH64_TLSDESC_CALL: 5800 // These are General-Dynamic which permits fully general TLS 5801 // access. Since we know that we are generating an executable, 5802 // we can convert this to Initial-Exec. If we also know that 5803 // this is a local symbol, we can further switch to Local-Exec. 5804 if (is_final) 5805 return tls::TLSOPT_TO_LE; 5806 return tls::TLSOPT_TO_IE; 5807 5808 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 5809 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: 5810 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 5811 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 5812 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 5813 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 5814 // These are Local-Dynamic, which refer to local symbols in the 5815 // dynamic TLS block. Since we know that we generating an 5816 // executable, we can switch to Local-Exec. 5817 return tls::TLSOPT_TO_LE; 5818 5819 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1: 5820 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: 5821 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 5822 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 5823 case elfcpp::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19: 5824 // These are Initial-Exec relocs which get the thread offset 5825 // from the GOT. If we know that we are linking against the 5826 // local symbol, we can switch to Local-Exec, which links the 5827 // thread offset into the instruction. 5828 if (is_final) 5829 return tls::TLSOPT_TO_LE; 5830 return tls::TLSOPT_NONE; 5831 5832 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 5833 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 5834 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 5835 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 5836 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 5837 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12: 5838 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12: 5839 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 5840 // When we already have Local-Exec, there is nothing further we 5841 // can do. 5842 return tls::TLSOPT_NONE; 5843 5844 default: 5845 gold_unreachable(); 5846 } 5847} 5848 5849// Returns true if this relocation type could be that of a function pointer. 5850 5851template<int size, bool big_endian> 5852inline bool 5853Target_aarch64<size, big_endian>::Scan::possible_function_pointer_reloc( 5854 unsigned int r_type) 5855{ 5856 switch (r_type) 5857 { 5858 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: 5859 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: 5860 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: 5861 case elfcpp::R_AARCH64_ADR_GOT_PAGE: 5862 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC: 5863 { 5864 return true; 5865 } 5866 } 5867 return false; 5868} 5869 5870// For safe ICF, scan a relocation for a local symbol to check if it 5871// corresponds to a function pointer being taken. In that case mark 5872// the function whose pointer was taken as not foldable. 5873 5874template<int size, bool big_endian> 5875inline bool 5876Target_aarch64<size, big_endian>::Scan::local_reloc_may_be_function_pointer( 5877 Symbol_table* , 5878 Layout* , 5879 Target_aarch64<size, big_endian>* , 5880 Sized_relobj_file<size, big_endian>* , 5881 unsigned int , 5882 Output_section* , 5883 const elfcpp::Rela<size, big_endian>& , 5884 unsigned int r_type, 5885 const elfcpp::Sym<size, big_endian>&) 5886{ 5887 // When building a shared library, do not fold any local symbols. 5888 return (parameters->options().shared() 5889 || possible_function_pointer_reloc(r_type)); 5890} 5891 5892// For safe ICF, scan a relocation for a global symbol to check if it 5893// corresponds to a function pointer being taken. In that case mark 5894// the function whose pointer was taken as not foldable. 5895 5896template<int size, bool big_endian> 5897inline bool 5898Target_aarch64<size, big_endian>::Scan::global_reloc_may_be_function_pointer( 5899 Symbol_table* , 5900 Layout* , 5901 Target_aarch64<size, big_endian>* , 5902 Sized_relobj_file<size, big_endian>* , 5903 unsigned int , 5904 Output_section* , 5905 const elfcpp::Rela<size, big_endian>& , 5906 unsigned int r_type, 5907 Symbol* gsym) 5908{ 5909 // When building a shared library, do not fold symbols whose visibility 5910 // is hidden, internal or protected. 5911 return ((parameters->options().shared() 5912 && (gsym->visibility() == elfcpp::STV_INTERNAL 5913 || gsym->visibility() == elfcpp::STV_PROTECTED 5914 || gsym->visibility() == elfcpp::STV_HIDDEN)) 5915 || possible_function_pointer_reloc(r_type)); 5916} 5917 5918// Report an unsupported relocation against a local symbol. 5919 5920template<int size, bool big_endian> 5921void 5922Target_aarch64<size, big_endian>::Scan::unsupported_reloc_local( 5923 Sized_relobj_file<size, big_endian>* object, 5924 unsigned int r_type) 5925{ 5926 gold_error(_("%s: unsupported reloc %u against local symbol"), 5927 object->name().c_str(), r_type); 5928} 5929 5930// We are about to emit a dynamic relocation of type R_TYPE. If the 5931// dynamic linker does not support it, issue an error. 5932 5933template<int size, bool big_endian> 5934void 5935Target_aarch64<size, big_endian>::Scan::check_non_pic(Relobj* object, 5936 unsigned int r_type) 5937{ 5938 gold_assert(r_type != elfcpp::R_AARCH64_NONE); 5939 5940 switch (r_type) 5941 { 5942 // These are the relocation types supported by glibc for AARCH64. 5943 case elfcpp::R_AARCH64_NONE: 5944 case elfcpp::R_AARCH64_COPY: 5945 case elfcpp::R_AARCH64_GLOB_DAT: 5946 case elfcpp::R_AARCH64_JUMP_SLOT: 5947 case elfcpp::R_AARCH64_RELATIVE: 5948 case elfcpp::R_AARCH64_TLS_DTPREL64: 5949 case elfcpp::R_AARCH64_TLS_DTPMOD64: 5950 case elfcpp::R_AARCH64_TLS_TPREL64: 5951 case elfcpp::R_AARCH64_TLSDESC: 5952 case elfcpp::R_AARCH64_IRELATIVE: 5953 case elfcpp::R_AARCH64_ABS32: 5954 case elfcpp::R_AARCH64_ABS64: 5955 return; 5956 5957 default: 5958 break; 5959 } 5960 5961 // This prevents us from issuing more than one error per reloc 5962 // section. But we can still wind up issuing more than one 5963 // error per object file. 5964 if (this->issued_non_pic_error_) 5965 return; 5966 gold_assert(parameters->options().output_is_position_independent()); 5967 object->error(_("requires unsupported dynamic reloc; " 5968 "recompile with -fPIC")); 5969 this->issued_non_pic_error_ = true; 5970 return; 5971} 5972 5973// Return whether we need to make a PLT entry for a relocation of the 5974// given type against a STT_GNU_IFUNC symbol. 5975 5976template<int size, bool big_endian> 5977bool 5978Target_aarch64<size, big_endian>::Scan::reloc_needs_plt_for_ifunc( 5979 Sized_relobj_file<size, big_endian>* object, 5980 unsigned int r_type) 5981{ 5982 const AArch64_reloc_property* arp = 5983 aarch64_reloc_property_table->get_reloc_property(r_type); 5984 gold_assert(arp != NULL); 5985 5986 int flags = arp->reference_flags(); 5987 if (flags & Symbol::TLS_REF) 5988 { 5989 gold_error(_("%s: unsupported TLS reloc %s for IFUNC symbol"), 5990 object->name().c_str(), arp->name().c_str()); 5991 return false; 5992 } 5993 return flags != 0; 5994} 5995 5996// Scan a relocation for a local symbol. 5997 5998template<int size, bool big_endian> 5999inline void 6000Target_aarch64<size, big_endian>::Scan::local( 6001 Symbol_table* symtab, 6002 Layout* layout, 6003 Target_aarch64<size, big_endian>* target, 6004 Sized_relobj_file<size, big_endian>* object, 6005 unsigned int data_shndx, 6006 Output_section* output_section, 6007 const elfcpp::Rela<size, big_endian>& rela, 6008 unsigned int r_type, 6009 const elfcpp::Sym<size, big_endian>& lsym, 6010 bool is_discarded) 6011{ 6012 if (is_discarded) 6013 return; 6014 6015 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian> 6016 Reloc_section; 6017 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 6018 6019 // A local STT_GNU_IFUNC symbol may require a PLT entry. 6020 bool is_ifunc = lsym.get_st_type() == elfcpp::STT_GNU_IFUNC; 6021 if (is_ifunc && this->reloc_needs_plt_for_ifunc(object, r_type)) 6022 target->make_local_ifunc_plt_entry(symtab, layout, object, r_sym); 6023 6024 switch (r_type) 6025 { 6026 case elfcpp::R_AARCH64_NONE: 6027 break; 6028 6029 case elfcpp::R_AARCH64_ABS32: 6030 case elfcpp::R_AARCH64_ABS16: 6031 if (parameters->options().output_is_position_independent()) 6032 { 6033 gold_error(_("%s: unsupported reloc %u in pos independent link."), 6034 object->name().c_str(), r_type); 6035 } 6036 break; 6037 6038 case elfcpp::R_AARCH64_ABS64: 6039 // If building a shared library or pie, we need to mark this as a dynmic 6040 // reloction, so that the dynamic loader can relocate it. 6041 if (parameters->options().output_is_position_independent()) 6042 { 6043 Reloc_section* rela_dyn = target->rela_dyn_section(layout); 6044 rela_dyn->add_local_relative(object, r_sym, 6045 elfcpp::R_AARCH64_RELATIVE, 6046 output_section, 6047 data_shndx, 6048 rela.get_r_offset(), 6049 rela.get_r_addend(), 6050 is_ifunc); 6051 } 6052 break; 6053 6054 case elfcpp::R_AARCH64_PREL64: 6055 case elfcpp::R_AARCH64_PREL32: 6056 case elfcpp::R_AARCH64_PREL16: 6057 break; 6058 6059 case elfcpp::R_AARCH64_ADR_GOT_PAGE: 6060 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC: 6061 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15: 6062 // The above relocations are used to access GOT entries. 6063 { 6064 Output_data_got_aarch64<size, big_endian>* got = 6065 target->got_section(symtab, layout); 6066 bool is_new = false; 6067 // This symbol requires a GOT entry. 6068 if (is_ifunc) 6069 is_new = got->add_local_plt(object, r_sym, GOT_TYPE_STANDARD); 6070 else 6071 is_new = got->add_local(object, r_sym, GOT_TYPE_STANDARD); 6072 if (is_new && parameters->options().output_is_position_independent()) 6073 target->rela_dyn_section(layout)-> 6074 add_local_relative(object, 6075 r_sym, 6076 elfcpp::R_AARCH64_RELATIVE, 6077 got, 6078 object->local_got_offset(r_sym, 6079 GOT_TYPE_STANDARD), 6080 0, 6081 false); 6082 } 6083 break; 6084 6085 case elfcpp::R_AARCH64_MOVW_UABS_G0: // 263 6086 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: // 264 6087 case elfcpp::R_AARCH64_MOVW_UABS_G1: // 265 6088 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: // 266 6089 case elfcpp::R_AARCH64_MOVW_UABS_G2: // 267 6090 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: // 268 6091 case elfcpp::R_AARCH64_MOVW_UABS_G3: // 269 6092 case elfcpp::R_AARCH64_MOVW_SABS_G0: // 270 6093 case elfcpp::R_AARCH64_MOVW_SABS_G1: // 271 6094 case elfcpp::R_AARCH64_MOVW_SABS_G2: // 272 6095 if (parameters->options().output_is_position_independent()) 6096 { 6097 gold_error(_("%s: unsupported reloc %u in pos independent link."), 6098 object->name().c_str(), r_type); 6099 } 6100 break; 6101 6102 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273 6103 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274 6104 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275 6105 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276 6106 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277 6107 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278 6108 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284 6109 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285 6110 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286 6111 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299 6112 break; 6113 6114 // Control flow, pc-relative. We don't need to do anything for a relative 6115 // addressing relocation against a local symbol if it does not reference 6116 // the GOT. 6117 case elfcpp::R_AARCH64_TSTBR14: 6118 case elfcpp::R_AARCH64_CONDBR19: 6119 case elfcpp::R_AARCH64_JUMP26: 6120 case elfcpp::R_AARCH64_CALL26: 6121 break; 6122 6123 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 6124 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 6125 { 6126 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6127 optimize_tls_reloc(!parameters->options().shared(), r_type); 6128 if (tlsopt == tls::TLSOPT_TO_LE) 6129 break; 6130 6131 layout->set_has_static_tls(); 6132 // Create a GOT entry for the tp-relative offset. 6133 if (!parameters->doing_static_link()) 6134 { 6135 Output_data_got_aarch64<size, big_endian>* got = 6136 target->got_section(symtab, layout); 6137 got->add_local_with_rel(object, r_sym, GOT_TYPE_TLS_OFFSET, 6138 target->rela_dyn_section(layout), 6139 elfcpp::R_AARCH64_TLS_TPREL64); 6140 } 6141 else if (!object->local_has_got_offset(r_sym, 6142 GOT_TYPE_TLS_OFFSET)) 6143 { 6144 Output_data_got_aarch64<size, big_endian>* got = 6145 target->got_section(symtab, layout); 6146 got->add_local(object, r_sym, GOT_TYPE_TLS_OFFSET); 6147 unsigned int got_offset = 6148 object->local_got_offset(r_sym, GOT_TYPE_TLS_OFFSET); 6149 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 6150 gold_assert(addend == 0); 6151 got->add_static_reloc(got_offset, elfcpp::R_AARCH64_TLS_TPREL64, 6152 object, r_sym); 6153 } 6154 } 6155 break; 6156 6157 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 6158 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: 6159 { 6160 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6161 optimize_tls_reloc(!parameters->options().shared(), r_type); 6162 if (tlsopt == tls::TLSOPT_TO_LE) 6163 { 6164 layout->set_has_static_tls(); 6165 break; 6166 } 6167 gold_assert(tlsopt == tls::TLSOPT_NONE); 6168 6169 Output_data_got_aarch64<size, big_endian>* got = 6170 target->got_section(symtab, layout); 6171 got->add_local_pair_with_rel(object,r_sym, data_shndx, 6172 GOT_TYPE_TLS_PAIR, 6173 target->rela_dyn_section(layout), 6174 elfcpp::R_AARCH64_TLS_DTPMOD64); 6175 } 6176 break; 6177 6178 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 6179 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 6180 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 6181 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 6182 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 6183 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12: 6184 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12: 6185 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 6186 { 6187 layout->set_has_static_tls(); 6188 bool output_is_shared = parameters->options().shared(); 6189 if (output_is_shared) 6190 gold_error(_("%s: unsupported TLSLE reloc %u in shared code."), 6191 object->name().c_str(), r_type); 6192 } 6193 break; 6194 6195 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 6196 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: 6197 { 6198 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6199 optimize_tls_reloc(!parameters->options().shared(), r_type); 6200 if (tlsopt == tls::TLSOPT_NONE) 6201 { 6202 // Create a GOT entry for the module index. 6203 target->got_mod_index_entry(symtab, layout, object); 6204 } 6205 else if (tlsopt != tls::TLSOPT_TO_LE) 6206 unsupported_reloc_local(object, r_type); 6207 } 6208 break; 6209 6210 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 6211 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 6212 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 6213 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 6214 break; 6215 6216 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 6217 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 6218 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 6219 { 6220 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6221 optimize_tls_reloc(!parameters->options().shared(), r_type); 6222 target->define_tls_base_symbol(symtab, layout); 6223 if (tlsopt == tls::TLSOPT_NONE) 6224 { 6225 // Create reserved PLT and GOT entries for the resolver. 6226 target->reserve_tlsdesc_entries(symtab, layout); 6227 6228 // Generate a double GOT entry with an R_AARCH64_TLSDESC reloc. 6229 // The R_AARCH64_TLSDESC reloc is resolved lazily, so the GOT 6230 // entry needs to be in an area in .got.plt, not .got. Call 6231 // got_section to make sure the section has been created. 6232 target->got_section(symtab, layout); 6233 Output_data_got<size, big_endian>* got = 6234 target->got_tlsdesc_section(); 6235 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 6236 if (!object->local_has_got_offset(r_sym, GOT_TYPE_TLS_DESC)) 6237 { 6238 unsigned int got_offset = got->add_constant(0); 6239 got->add_constant(0); 6240 object->set_local_got_offset(r_sym, GOT_TYPE_TLS_DESC, 6241 got_offset); 6242 Reloc_section* rt = target->rela_tlsdesc_section(layout); 6243 // We store the arguments we need in a vector, and use 6244 // the index into the vector as the parameter to pass 6245 // to the target specific routines. 6246 uintptr_t intarg = target->add_tlsdesc_info(object, r_sym); 6247 void* arg = reinterpret_cast<void*>(intarg); 6248 rt->add_target_specific(elfcpp::R_AARCH64_TLSDESC, arg, 6249 got, got_offset, 0); 6250 } 6251 } 6252 else if (tlsopt != tls::TLSOPT_TO_LE) 6253 unsupported_reloc_local(object, r_type); 6254 } 6255 break; 6256 6257 case elfcpp::R_AARCH64_TLSDESC_CALL: 6258 break; 6259 6260 default: 6261 unsupported_reloc_local(object, r_type); 6262 } 6263} 6264 6265 6266// Report an unsupported relocation against a global symbol. 6267 6268template<int size, bool big_endian> 6269void 6270Target_aarch64<size, big_endian>::Scan::unsupported_reloc_global( 6271 Sized_relobj_file<size, big_endian>* object, 6272 unsigned int r_type, 6273 Symbol* gsym) 6274{ 6275 gold_error(_("%s: unsupported reloc %u against global symbol %s"), 6276 object->name().c_str(), r_type, gsym->demangled_name().c_str()); 6277} 6278 6279template<int size, bool big_endian> 6280inline void 6281Target_aarch64<size, big_endian>::Scan::global( 6282 Symbol_table* symtab, 6283 Layout* layout, 6284 Target_aarch64<size, big_endian>* target, 6285 Sized_relobj_file<size, big_endian> * object, 6286 unsigned int data_shndx, 6287 Output_section* output_section, 6288 const elfcpp::Rela<size, big_endian>& rela, 6289 unsigned int r_type, 6290 Symbol* gsym) 6291{ 6292 // A STT_GNU_IFUNC symbol may require a PLT entry. 6293 if (gsym->type() == elfcpp::STT_GNU_IFUNC 6294 && this->reloc_needs_plt_for_ifunc(object, r_type)) 6295 target->make_plt_entry(symtab, layout, gsym); 6296 6297 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian> 6298 Reloc_section; 6299 const AArch64_reloc_property* arp = 6300 aarch64_reloc_property_table->get_reloc_property(r_type); 6301 gold_assert(arp != NULL); 6302 6303 switch (r_type) 6304 { 6305 case elfcpp::R_AARCH64_NONE: 6306 break; 6307 6308 case elfcpp::R_AARCH64_ABS16: 6309 case elfcpp::R_AARCH64_ABS32: 6310 case elfcpp::R_AARCH64_ABS64: 6311 { 6312 // Make a PLT entry if necessary. 6313 if (gsym->needs_plt_entry()) 6314 { 6315 target->make_plt_entry(symtab, layout, gsym); 6316 // Since this is not a PC-relative relocation, we may be 6317 // taking the address of a function. In that case we need to 6318 // set the entry in the dynamic symbol table to the address of 6319 // the PLT entry. 6320 if (gsym->is_from_dynobj() && !parameters->options().shared()) 6321 gsym->set_needs_dynsym_value(); 6322 } 6323 // Make a dynamic relocation if necessary. 6324 if (gsym->needs_dynamic_reloc(arp->reference_flags())) 6325 { 6326 if (!parameters->options().output_is_position_independent() 6327 && gsym->may_need_copy_reloc()) 6328 { 6329 target->copy_reloc(symtab, layout, object, 6330 data_shndx, output_section, gsym, rela); 6331 } 6332 else if (r_type == elfcpp::R_AARCH64_ABS64 6333 && gsym->type() == elfcpp::STT_GNU_IFUNC 6334 && gsym->can_use_relative_reloc(false) 6335 && !gsym->is_from_dynobj() 6336 && !gsym->is_undefined() 6337 && !gsym->is_preemptible()) 6338 { 6339 // Use an IRELATIVE reloc for a locally defined STT_GNU_IFUNC 6340 // symbol. This makes a function address in a PIE executable 6341 // match the address in a shared library that it links against. 6342 Reloc_section* rela_dyn = 6343 target->rela_irelative_section(layout); 6344 unsigned int r_type = elfcpp::R_AARCH64_IRELATIVE; 6345 rela_dyn->add_symbolless_global_addend(gsym, r_type, 6346 output_section, object, 6347 data_shndx, 6348 rela.get_r_offset(), 6349 rela.get_r_addend()); 6350 } 6351 else if (r_type == elfcpp::R_AARCH64_ABS64 6352 && gsym->can_use_relative_reloc(false)) 6353 { 6354 Reloc_section* rela_dyn = target->rela_dyn_section(layout); 6355 rela_dyn->add_global_relative(gsym, 6356 elfcpp::R_AARCH64_RELATIVE, 6357 output_section, 6358 object, 6359 data_shndx, 6360 rela.get_r_offset(), 6361 rela.get_r_addend(), 6362 false); 6363 } 6364 else 6365 { 6366 check_non_pic(object, r_type); 6367 Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>* 6368 rela_dyn = target->rela_dyn_section(layout); 6369 rela_dyn->add_global( 6370 gsym, r_type, output_section, object, 6371 data_shndx, rela.get_r_offset(),rela.get_r_addend()); 6372 } 6373 } 6374 } 6375 break; 6376 6377 case elfcpp::R_AARCH64_PREL16: 6378 case elfcpp::R_AARCH64_PREL32: 6379 case elfcpp::R_AARCH64_PREL64: 6380 // This is used to fill the GOT absolute address. 6381 if (gsym->needs_plt_entry()) 6382 { 6383 target->make_plt_entry(symtab, layout, gsym); 6384 } 6385 break; 6386 6387 case elfcpp::R_AARCH64_MOVW_UABS_G0: // 263 6388 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: // 264 6389 case elfcpp::R_AARCH64_MOVW_UABS_G1: // 265 6390 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: // 266 6391 case elfcpp::R_AARCH64_MOVW_UABS_G2: // 267 6392 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: // 268 6393 case elfcpp::R_AARCH64_MOVW_UABS_G3: // 269 6394 case elfcpp::R_AARCH64_MOVW_SABS_G0: // 270 6395 case elfcpp::R_AARCH64_MOVW_SABS_G1: // 271 6396 case elfcpp::R_AARCH64_MOVW_SABS_G2: // 272 6397 if (parameters->options().output_is_position_independent()) 6398 { 6399 gold_error(_("%s: unsupported reloc %u in pos independent link."), 6400 object->name().c_str(), r_type); 6401 } 6402 break; 6403 6404 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273 6405 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274 6406 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275 6407 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276 6408 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277 6409 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278 6410 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284 6411 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285 6412 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286 6413 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299 6414 { 6415 if (gsym->needs_plt_entry()) 6416 target->make_plt_entry(symtab, layout, gsym); 6417 // Make a dynamic relocation if necessary. 6418 if (gsym->needs_dynamic_reloc(arp->reference_flags())) 6419 { 6420 if (parameters->options().output_is_executable() 6421 && gsym->may_need_copy_reloc()) 6422 { 6423 target->copy_reloc(symtab, layout, object, 6424 data_shndx, output_section, gsym, rela); 6425 } 6426 } 6427 break; 6428 } 6429 6430 case elfcpp::R_AARCH64_ADR_GOT_PAGE: 6431 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC: 6432 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15: 6433 { 6434 // The above relocations are used to access GOT entries. 6435 // Note a GOT entry is an *address* to a symbol. 6436 // The symbol requires a GOT entry 6437 Output_data_got_aarch64<size, big_endian>* got = 6438 target->got_section(symtab, layout); 6439 if (gsym->final_value_is_known()) 6440 { 6441 // For a STT_GNU_IFUNC symbol we want the PLT address. 6442 if (gsym->type() == elfcpp::STT_GNU_IFUNC) 6443 got->add_global_plt(gsym, GOT_TYPE_STANDARD); 6444 else 6445 got->add_global(gsym, GOT_TYPE_STANDARD); 6446 } 6447 else 6448 { 6449 // If this symbol is not fully resolved, we need to add a dynamic 6450 // relocation for it. 6451 Reloc_section* rela_dyn = target->rela_dyn_section(layout); 6452 6453 // Use a GLOB_DAT rather than a RELATIVE reloc if: 6454 // 6455 // 1) The symbol may be defined in some other module. 6456 // 2) We are building a shared library and this is a protected 6457 // symbol; using GLOB_DAT means that the dynamic linker can use 6458 // the address of the PLT in the main executable when appropriate 6459 // so that function address comparisons work. 6460 // 3) This is a STT_GNU_IFUNC symbol in position dependent code, 6461 // again so that function address comparisons work. 6462 if (gsym->is_from_dynobj() 6463 || gsym->is_undefined() 6464 || gsym->is_preemptible() 6465 || (gsym->visibility() == elfcpp::STV_PROTECTED 6466 && parameters->options().shared()) 6467 || (gsym->type() == elfcpp::STT_GNU_IFUNC 6468 && parameters->options().output_is_position_independent())) 6469 got->add_global_with_rel(gsym, GOT_TYPE_STANDARD, 6470 rela_dyn, elfcpp::R_AARCH64_GLOB_DAT); 6471 else 6472 { 6473 // For a STT_GNU_IFUNC symbol we want to write the PLT 6474 // offset into the GOT, so that function pointer 6475 // comparisons work correctly. 6476 bool is_new; 6477 if (gsym->type() != elfcpp::STT_GNU_IFUNC) 6478 is_new = got->add_global(gsym, GOT_TYPE_STANDARD); 6479 else 6480 { 6481 is_new = got->add_global_plt(gsym, GOT_TYPE_STANDARD); 6482 // Tell the dynamic linker to use the PLT address 6483 // when resolving relocations. 6484 if (gsym->is_from_dynobj() 6485 && !parameters->options().shared()) 6486 gsym->set_needs_dynsym_value(); 6487 } 6488 if (is_new) 6489 { 6490 rela_dyn->add_global_relative( 6491 gsym, elfcpp::R_AARCH64_RELATIVE, 6492 got, 6493 gsym->got_offset(GOT_TYPE_STANDARD), 6494 0, 6495 false); 6496 } 6497 } 6498 } 6499 break; 6500 } 6501 6502 case elfcpp::R_AARCH64_TSTBR14: 6503 case elfcpp::R_AARCH64_CONDBR19: 6504 case elfcpp::R_AARCH64_JUMP26: 6505 case elfcpp::R_AARCH64_CALL26: 6506 { 6507 if (gsym->final_value_is_known()) 6508 break; 6509 6510 if (gsym->is_defined() && 6511 !gsym->is_from_dynobj() && 6512 !gsym->is_preemptible()) 6513 break; 6514 6515 // Make plt entry for function call. 6516 target->make_plt_entry(symtab, layout, gsym); 6517 break; 6518 } 6519 6520 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 6521 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // General dynamic 6522 { 6523 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6524 optimize_tls_reloc(gsym->final_value_is_known(), r_type); 6525 if (tlsopt == tls::TLSOPT_TO_LE) 6526 { 6527 layout->set_has_static_tls(); 6528 break; 6529 } 6530 gold_assert(tlsopt == tls::TLSOPT_NONE); 6531 6532 // General dynamic. 6533 Output_data_got_aarch64<size, big_endian>* got = 6534 target->got_section(symtab, layout); 6535 // Create 2 consecutive entries for module index and offset. 6536 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_PAIR, 6537 target->rela_dyn_section(layout), 6538 elfcpp::R_AARCH64_TLS_DTPMOD64, 6539 elfcpp::R_AARCH64_TLS_DTPREL64); 6540 } 6541 break; 6542 6543 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 6544 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local dynamic 6545 { 6546 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6547 optimize_tls_reloc(!parameters->options().shared(), r_type); 6548 if (tlsopt == tls::TLSOPT_NONE) 6549 { 6550 // Create a GOT entry for the module index. 6551 target->got_mod_index_entry(symtab, layout, object); 6552 } 6553 else if (tlsopt != tls::TLSOPT_TO_LE) 6554 unsupported_reloc_local(object, r_type); 6555 } 6556 break; 6557 6558 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 6559 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 6560 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 6561 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local dynamic 6562 break; 6563 6564 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 6565 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial executable 6566 { 6567 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6568 optimize_tls_reloc(gsym->final_value_is_known(), r_type); 6569 if (tlsopt == tls::TLSOPT_TO_LE) 6570 break; 6571 6572 layout->set_has_static_tls(); 6573 // Create a GOT entry for the tp-relative offset. 6574 Output_data_got_aarch64<size, big_endian>* got 6575 = target->got_section(symtab, layout); 6576 if (!parameters->doing_static_link()) 6577 { 6578 got->add_global_with_rel( 6579 gsym, GOT_TYPE_TLS_OFFSET, 6580 target->rela_dyn_section(layout), 6581 elfcpp::R_AARCH64_TLS_TPREL64); 6582 } 6583 if (!gsym->has_got_offset(GOT_TYPE_TLS_OFFSET)) 6584 { 6585 got->add_global(gsym, GOT_TYPE_TLS_OFFSET); 6586 unsigned int got_offset = 6587 gsym->got_offset(GOT_TYPE_TLS_OFFSET); 6588 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 6589 gold_assert(addend == 0); 6590 got->add_static_reloc(got_offset, 6591 elfcpp::R_AARCH64_TLS_TPREL64, gsym); 6592 } 6593 } 6594 break; 6595 6596 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 6597 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 6598 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 6599 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 6600 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 6601 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12: 6602 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12: 6603 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: // Local executable 6604 layout->set_has_static_tls(); 6605 if (parameters->options().shared()) 6606 gold_error(_("%s: unsupported TLSLE reloc type %u in shared objects."), 6607 object->name().c_str(), r_type); 6608 break; 6609 6610 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 6611 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 6612 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: // TLS descriptor 6613 { 6614 target->define_tls_base_symbol(symtab, layout); 6615 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 6616 optimize_tls_reloc(gsym->final_value_is_known(), r_type); 6617 if (tlsopt == tls::TLSOPT_NONE) 6618 { 6619 // Create reserved PLT and GOT entries for the resolver. 6620 target->reserve_tlsdesc_entries(symtab, layout); 6621 6622 // Create a double GOT entry with an R_AARCH64_TLSDESC 6623 // relocation. The R_AARCH64_TLSDESC is resolved lazily, so the GOT 6624 // entry needs to be in an area in .got.plt, not .got. Call 6625 // got_section to make sure the section has been created. 6626 target->got_section(symtab, layout); 6627 Output_data_got<size, big_endian>* got = 6628 target->got_tlsdesc_section(); 6629 Reloc_section* rt = target->rela_tlsdesc_section(layout); 6630 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_DESC, rt, 6631 elfcpp::R_AARCH64_TLSDESC, 0); 6632 } 6633 else if (tlsopt == tls::TLSOPT_TO_IE) 6634 { 6635 // Create a GOT entry for the tp-relative offset. 6636 Output_data_got<size, big_endian>* got 6637 = target->got_section(symtab, layout); 6638 got->add_global_with_rel(gsym, GOT_TYPE_TLS_OFFSET, 6639 target->rela_dyn_section(layout), 6640 elfcpp::R_AARCH64_TLS_TPREL64); 6641 } 6642 else if (tlsopt != tls::TLSOPT_TO_LE) 6643 unsupported_reloc_global(object, r_type, gsym); 6644 } 6645 break; 6646 6647 case elfcpp::R_AARCH64_TLSDESC_CALL: 6648 break; 6649 6650 default: 6651 gold_error(_("%s: unsupported reloc type in global scan"), 6652 aarch64_reloc_property_table-> 6653 reloc_name_in_error_message(r_type).c_str()); 6654 } 6655 return; 6656} // End of Scan::global 6657 6658 6659// Create the PLT section. 6660template<int size, bool big_endian> 6661void 6662Target_aarch64<size, big_endian>::make_plt_section( 6663 Symbol_table* symtab, Layout* layout) 6664{ 6665 if (this->plt_ == NULL) 6666 { 6667 // Create the GOT section first. 6668 this->got_section(symtab, layout); 6669 6670 this->plt_ = this->make_data_plt(layout, this->got_, this->got_plt_, 6671 this->got_irelative_); 6672 6673 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS, 6674 (elfcpp::SHF_ALLOC 6675 | elfcpp::SHF_EXECINSTR), 6676 this->plt_, ORDER_PLT, false); 6677 6678 // Make the sh_info field of .rela.plt point to .plt. 6679 Output_section* rela_plt_os = this->plt_->rela_plt()->output_section(); 6680 rela_plt_os->set_info_section(this->plt_->output_section()); 6681 } 6682} 6683 6684// Return the section for TLSDESC relocations. 6685 6686template<int size, bool big_endian> 6687typename Target_aarch64<size, big_endian>::Reloc_section* 6688Target_aarch64<size, big_endian>::rela_tlsdesc_section(Layout* layout) const 6689{ 6690 return this->plt_section()->rela_tlsdesc(layout); 6691} 6692 6693// Create a PLT entry for a global symbol. 6694 6695template<int size, bool big_endian> 6696void 6697Target_aarch64<size, big_endian>::make_plt_entry( 6698 Symbol_table* symtab, 6699 Layout* layout, 6700 Symbol* gsym) 6701{ 6702 if (gsym->has_plt_offset()) 6703 return; 6704 6705 if (this->plt_ == NULL) 6706 this->make_plt_section(symtab, layout); 6707 6708 this->plt_->add_entry(symtab, layout, gsym); 6709} 6710 6711// Make a PLT entry for a local STT_GNU_IFUNC symbol. 6712 6713template<int size, bool big_endian> 6714void 6715Target_aarch64<size, big_endian>::make_local_ifunc_plt_entry( 6716 Symbol_table* symtab, Layout* layout, 6717 Sized_relobj_file<size, big_endian>* relobj, 6718 unsigned int local_sym_index) 6719{ 6720 if (relobj->local_has_plt_offset(local_sym_index)) 6721 return; 6722 if (this->plt_ == NULL) 6723 this->make_plt_section(symtab, layout); 6724 unsigned int plt_offset = this->plt_->add_local_ifunc_entry(symtab, layout, 6725 relobj, 6726 local_sym_index); 6727 relobj->set_local_plt_offset(local_sym_index, plt_offset); 6728} 6729 6730template<int size, bool big_endian> 6731void 6732Target_aarch64<size, big_endian>::gc_process_relocs( 6733 Symbol_table* symtab, 6734 Layout* layout, 6735 Sized_relobj_file<size, big_endian>* object, 6736 unsigned int data_shndx, 6737 unsigned int sh_type, 6738 const unsigned char* prelocs, 6739 size_t reloc_count, 6740 Output_section* output_section, 6741 bool needs_special_offset_handling, 6742 size_t local_symbol_count, 6743 const unsigned char* plocal_symbols) 6744{ 6745 typedef Target_aarch64<size, big_endian> Aarch64; 6746 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian> 6747 Classify_reloc; 6748 6749 if (sh_type == elfcpp::SHT_REL) 6750 { 6751 return; 6752 } 6753 6754 gold::gc_process_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>( 6755 symtab, 6756 layout, 6757 this, 6758 object, 6759 data_shndx, 6760 prelocs, 6761 reloc_count, 6762 output_section, 6763 needs_special_offset_handling, 6764 local_symbol_count, 6765 plocal_symbols); 6766} 6767 6768// Scan relocations for a section. 6769 6770template<int size, bool big_endian> 6771void 6772Target_aarch64<size, big_endian>::scan_relocs( 6773 Symbol_table* symtab, 6774 Layout* layout, 6775 Sized_relobj_file<size, big_endian>* object, 6776 unsigned int data_shndx, 6777 unsigned int sh_type, 6778 const unsigned char* prelocs, 6779 size_t reloc_count, 6780 Output_section* output_section, 6781 bool needs_special_offset_handling, 6782 size_t local_symbol_count, 6783 const unsigned char* plocal_symbols) 6784{ 6785 typedef Target_aarch64<size, big_endian> Aarch64; 6786 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian> 6787 Classify_reloc; 6788 6789 if (sh_type == elfcpp::SHT_REL) 6790 { 6791 gold_error(_("%s: unsupported REL reloc section"), 6792 object->name().c_str()); 6793 return; 6794 } 6795 6796 gold::scan_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>( 6797 symtab, 6798 layout, 6799 this, 6800 object, 6801 data_shndx, 6802 prelocs, 6803 reloc_count, 6804 output_section, 6805 needs_special_offset_handling, 6806 local_symbol_count, 6807 plocal_symbols); 6808} 6809 6810// Return the value to use for a dynamic which requires special 6811// treatment. This is how we support equality comparisons of function 6812// pointers across shared library boundaries, as described in the 6813// processor specific ABI supplement. 6814 6815template<int size, bool big_endian> 6816uint64_t 6817Target_aarch64<size, big_endian>::do_dynsym_value(const Symbol* gsym) const 6818{ 6819 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset()); 6820 return this->plt_address_for_global(gsym); 6821} 6822 6823 6824// Finalize the sections. 6825 6826template<int size, bool big_endian> 6827void 6828Target_aarch64<size, big_endian>::do_finalize_sections( 6829 Layout* layout, 6830 const Input_objects*, 6831 Symbol_table* symtab) 6832{ 6833 const Reloc_section* rel_plt = (this->plt_ == NULL 6834 ? NULL 6835 : this->plt_->rela_plt()); 6836 layout->add_target_dynamic_tags(false, this->got_plt_, rel_plt, 6837 this->rela_dyn_, true, false); 6838 6839 // Emit any relocs we saved in an attempt to avoid generating COPY 6840 // relocs. 6841 if (this->copy_relocs_.any_saved_relocs()) 6842 this->copy_relocs_.emit(this->rela_dyn_section(layout)); 6843 6844 // Fill in some more dynamic tags. 6845 Output_data_dynamic* const odyn = layout->dynamic_data(); 6846 if (odyn != NULL) 6847 { 6848 if (this->plt_ != NULL 6849 && this->plt_->output_section() != NULL 6850 && this->plt_ ->has_tlsdesc_entry()) 6851 { 6852 unsigned int plt_offset = this->plt_->get_tlsdesc_plt_offset(); 6853 unsigned int got_offset = this->plt_->get_tlsdesc_got_offset(); 6854 this->got_->finalize_data_size(); 6855 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_PLT, 6856 this->plt_, plt_offset); 6857 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_GOT, 6858 this->got_, got_offset); 6859 } 6860 } 6861 6862 // Set the size of the _GLOBAL_OFFSET_TABLE_ symbol to the size of 6863 // the .got.plt section. 6864 Symbol* sym = this->global_offset_table_; 6865 if (sym != NULL) 6866 { 6867 uint64_t data_size = this->got_plt_->current_data_size(); 6868 symtab->get_sized_symbol<size>(sym)->set_symsize(data_size); 6869 6870 // If the .got section is more than 0x8000 bytes, we add 6871 // 0x8000 to the value of _GLOBAL_OFFSET_TABLE_, so that 16 6872 // bit relocations have a greater chance of working. 6873 if (data_size >= 0x8000) 6874 symtab->get_sized_symbol<size>(sym)->set_value( 6875 symtab->get_sized_symbol<size>(sym)->value() + 0x8000); 6876 } 6877 6878 if (parameters->doing_static_link() 6879 && (this->plt_ == NULL || !this->plt_->has_irelative_section())) 6880 { 6881 // If linking statically, make sure that the __rela_iplt symbols 6882 // were defined if necessary, even if we didn't create a PLT. 6883 static const Define_symbol_in_segment syms[] = 6884 { 6885 { 6886 "__rela_iplt_start", // name 6887 elfcpp::PT_LOAD, // segment_type 6888 elfcpp::PF_W, // segment_flags_set 6889 elfcpp::PF(0), // segment_flags_clear 6890 0, // value 6891 0, // size 6892 elfcpp::STT_NOTYPE, // type 6893 elfcpp::STB_GLOBAL, // binding 6894 elfcpp::STV_HIDDEN, // visibility 6895 0, // nonvis 6896 Symbol::SEGMENT_START, // offset_from_base 6897 true // only_if_ref 6898 }, 6899 { 6900 "__rela_iplt_end", // name 6901 elfcpp::PT_LOAD, // segment_type 6902 elfcpp::PF_W, // segment_flags_set 6903 elfcpp::PF(0), // segment_flags_clear 6904 0, // value 6905 0, // size 6906 elfcpp::STT_NOTYPE, // type 6907 elfcpp::STB_GLOBAL, // binding 6908 elfcpp::STV_HIDDEN, // visibility 6909 0, // nonvis 6910 Symbol::SEGMENT_START, // offset_from_base 6911 true // only_if_ref 6912 } 6913 }; 6914 6915 symtab->define_symbols(layout, 2, syms, 6916 layout->script_options()->saw_sections_clause()); 6917 } 6918 6919 return; 6920} 6921 6922// Perform a relocation. 6923 6924template<int size, bool big_endian> 6925inline bool 6926Target_aarch64<size, big_endian>::Relocate::relocate( 6927 const Relocate_info<size, big_endian>* relinfo, 6928 unsigned int, 6929 Target_aarch64<size, big_endian>* target, 6930 Output_section* , 6931 size_t relnum, 6932 const unsigned char* preloc, 6933 const Sized_symbol<size>* gsym, 6934 const Symbol_value<size>* psymval, 6935 unsigned char* view, 6936 typename elfcpp::Elf_types<size>::Elf_Addr address, 6937 section_size_type /* view_size */) 6938{ 6939 if (view == NULL) 6940 return true; 6941 6942 typedef AArch64_relocate_functions<size, big_endian> Reloc; 6943 6944 const elfcpp::Rela<size, big_endian> rela(preloc); 6945 unsigned int r_type = elfcpp::elf_r_type<size>(rela.get_r_info()); 6946 const AArch64_reloc_property* reloc_property = 6947 aarch64_reloc_property_table->get_reloc_property(r_type); 6948 6949 if (reloc_property == NULL) 6950 { 6951 std::string reloc_name = 6952 aarch64_reloc_property_table->reloc_name_in_error_message(r_type); 6953 gold_error_at_location(relinfo, relnum, rela.get_r_offset(), 6954 _("cannot relocate %s in object file"), 6955 reloc_name.c_str()); 6956 return true; 6957 } 6958 6959 const Sized_relobj_file<size, big_endian>* object = relinfo->object; 6960 6961 // Pick the value to use for symbols defined in the PLT. 6962 Symbol_value<size> symval; 6963 if (gsym != NULL 6964 && gsym->use_plt_offset(reloc_property->reference_flags())) 6965 { 6966 symval.set_output_value(target->plt_address_for_global(gsym)); 6967 psymval = &symval; 6968 } 6969 else if (gsym == NULL && psymval->is_ifunc_symbol()) 6970 { 6971 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 6972 if (object->local_has_plt_offset(r_sym)) 6973 { 6974 symval.set_output_value(target->plt_address_for_local(object, r_sym)); 6975 psymval = &symval; 6976 } 6977 } 6978 6979 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 6980 6981 // Get the GOT offset if needed. 6982 // For aarch64, the GOT pointer points to the start of the GOT section. 6983 bool have_got_offset = false; 6984 int got_offset = 0; 6985 int got_base = (target->got_ != NULL 6986 ? (target->got_->current_data_size() >= 0x8000 6987 ? 0x8000 : 0) 6988 : 0); 6989 switch (r_type) 6990 { 6991 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0: 6992 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0_NC: 6993 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1: 6994 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1_NC: 6995 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2: 6996 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2_NC: 6997 case elfcpp::R_AARCH64_MOVW_GOTOFF_G3: 6998 case elfcpp::R_AARCH64_GOTREL64: 6999 case elfcpp::R_AARCH64_GOTREL32: 7000 case elfcpp::R_AARCH64_GOT_LD_PREL19: 7001 case elfcpp::R_AARCH64_LD64_GOTOFF_LO15: 7002 case elfcpp::R_AARCH64_ADR_GOT_PAGE: 7003 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC: 7004 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15: 7005 if (gsym != NULL) 7006 { 7007 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD)); 7008 got_offset = gsym->got_offset(GOT_TYPE_STANDARD) - got_base; 7009 } 7010 else 7011 { 7012 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 7013 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD)); 7014 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD) 7015 - got_base); 7016 } 7017 have_got_offset = true; 7018 break; 7019 7020 default: 7021 break; 7022 } 7023 7024 typename Reloc::Status reloc_status = Reloc::STATUS_OKAY; 7025 typename elfcpp::Elf_types<size>::Elf_Addr value; 7026 switch (r_type) 7027 { 7028 case elfcpp::R_AARCH64_NONE: 7029 break; 7030 7031 case elfcpp::R_AARCH64_ABS64: 7032 if (!parameters->options().apply_dynamic_relocs() 7033 && parameters->options().output_is_position_independent() 7034 && gsym != NULL 7035 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()) 7036 && !gsym->can_use_relative_reloc(false)) 7037 // We have generated an absolute dynamic relocation, so do not 7038 // apply the relocation statically. (Works around bugs in older 7039 // Android dynamic linkers.) 7040 break; 7041 reloc_status = Reloc::template rela_ua<64>( 7042 view, object, psymval, addend, reloc_property); 7043 break; 7044 7045 case elfcpp::R_AARCH64_ABS32: 7046 if (!parameters->options().apply_dynamic_relocs() 7047 && parameters->options().output_is_position_independent() 7048 && gsym != NULL 7049 && gsym->needs_dynamic_reloc(reloc_property->reference_flags())) 7050 // We have generated an absolute dynamic relocation, so do not 7051 // apply the relocation statically. (Works around bugs in older 7052 // Android dynamic linkers.) 7053 break; 7054 reloc_status = Reloc::template rela_ua<32>( 7055 view, object, psymval, addend, reloc_property); 7056 break; 7057 7058 case elfcpp::R_AARCH64_ABS16: 7059 if (!parameters->options().apply_dynamic_relocs() 7060 && parameters->options().output_is_position_independent() 7061 && gsym != NULL 7062 && gsym->needs_dynamic_reloc(reloc_property->reference_flags())) 7063 // We have generated an absolute dynamic relocation, so do not 7064 // apply the relocation statically. (Works around bugs in older 7065 // Android dynamic linkers.) 7066 break; 7067 reloc_status = Reloc::template rela_ua<16>( 7068 view, object, psymval, addend, reloc_property); 7069 break; 7070 7071 case elfcpp::R_AARCH64_PREL64: 7072 reloc_status = Reloc::template pcrela_ua<64>( 7073 view, object, psymval, addend, address, reloc_property); 7074 break; 7075 7076 case elfcpp::R_AARCH64_PREL32: 7077 reloc_status = Reloc::template pcrela_ua<32>( 7078 view, object, psymval, addend, address, reloc_property); 7079 break; 7080 7081 case elfcpp::R_AARCH64_PREL16: 7082 reloc_status = Reloc::template pcrela_ua<16>( 7083 view, object, psymval, addend, address, reloc_property); 7084 break; 7085 7086 case elfcpp::R_AARCH64_MOVW_UABS_G0: 7087 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: 7088 case elfcpp::R_AARCH64_MOVW_UABS_G1: 7089 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: 7090 case elfcpp::R_AARCH64_MOVW_UABS_G2: 7091 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: 7092 case elfcpp::R_AARCH64_MOVW_UABS_G3: 7093 reloc_status = Reloc::template rela_general<32>( 7094 view, object, psymval, addend, reloc_property); 7095 break; 7096 case elfcpp::R_AARCH64_MOVW_SABS_G0: 7097 case elfcpp::R_AARCH64_MOVW_SABS_G1: 7098 case elfcpp::R_AARCH64_MOVW_SABS_G2: 7099 reloc_status = Reloc::movnz(view, psymval->value(object, addend), 7100 reloc_property); 7101 break; 7102 7103 case elfcpp::R_AARCH64_LD_PREL_LO19: 7104 reloc_status = Reloc::template pcrela_general<32>( 7105 view, object, psymval, addend, address, reloc_property); 7106 break; 7107 7108 case elfcpp::R_AARCH64_ADR_PREL_LO21: 7109 reloc_status = Reloc::adr(view, object, psymval, addend, 7110 address, reloc_property); 7111 break; 7112 7113 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: 7114 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: 7115 reloc_status = Reloc::adrp(view, object, psymval, addend, address, 7116 reloc_property); 7117 break; 7118 7119 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: 7120 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: 7121 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: 7122 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: 7123 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: 7124 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: 7125 reloc_status = Reloc::template rela_general<32>( 7126 view, object, psymval, addend, reloc_property); 7127 break; 7128 7129 case elfcpp::R_AARCH64_CALL26: 7130 if (this->skip_call_tls_get_addr_) 7131 { 7132 // Double check that the TLSGD insn has been optimized away. 7133 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 7134 Insntype insn = elfcpp::Swap<32, big_endian>::readval( 7135 reinterpret_cast<Insntype*>(view)); 7136 gold_assert((insn & 0xff000000) == 0x91000000); 7137 7138 reloc_status = Reloc::STATUS_OKAY; 7139 this->skip_call_tls_get_addr_ = false; 7140 // Return false to stop further processing this reloc. 7141 return false; 7142 } 7143 // Fall through. 7144 case elfcpp::R_AARCH64_JUMP26: 7145 if (Reloc::maybe_apply_stub(r_type, relinfo, rela, view, address, 7146 gsym, psymval, object, 7147 target->stub_group_size_)) 7148 break; 7149 // Fall through. 7150 case elfcpp::R_AARCH64_TSTBR14: 7151 case elfcpp::R_AARCH64_CONDBR19: 7152 reloc_status = Reloc::template pcrela_general<32>( 7153 view, object, psymval, addend, address, reloc_property); 7154 break; 7155 7156 case elfcpp::R_AARCH64_ADR_GOT_PAGE: 7157 gold_assert(have_got_offset); 7158 value = target->got_->address() + got_base + got_offset; 7159 reloc_status = Reloc::adrp(view, value + addend, address); 7160 break; 7161 7162 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC: 7163 gold_assert(have_got_offset); 7164 value = target->got_->address() + got_base + got_offset; 7165 reloc_status = Reloc::template rela_general<32>( 7166 view, value, addend, reloc_property); 7167 break; 7168 7169 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15: 7170 { 7171 gold_assert(have_got_offset); 7172 value = target->got_->address() + got_base + got_offset + addend - 7173 Reloc::Page(target->got_->address() + got_base); 7174 if ((value & 7) != 0) 7175 reloc_status = Reloc::STATUS_OVERFLOW; 7176 else 7177 reloc_status = Reloc::template reloc_common<32>( 7178 view, value, reloc_property); 7179 break; 7180 } 7181 7182 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 7183 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: 7184 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 7185 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: 7186 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 7187 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 7188 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 7189 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 7190 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 7191 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 7192 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 7193 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 7194 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 7195 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 7196 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 7197 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12: 7198 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12: 7199 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 7200 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 7201 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 7202 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 7203 case elfcpp::R_AARCH64_TLSDESC_CALL: 7204 reloc_status = relocate_tls(relinfo, target, relnum, rela, r_type, 7205 gsym, psymval, view, address); 7206 break; 7207 7208 // These are dynamic relocations, which are unexpected when linking. 7209 case elfcpp::R_AARCH64_COPY: 7210 case elfcpp::R_AARCH64_GLOB_DAT: 7211 case elfcpp::R_AARCH64_JUMP_SLOT: 7212 case elfcpp::R_AARCH64_RELATIVE: 7213 case elfcpp::R_AARCH64_IRELATIVE: 7214 case elfcpp::R_AARCH64_TLS_DTPREL64: 7215 case elfcpp::R_AARCH64_TLS_DTPMOD64: 7216 case elfcpp::R_AARCH64_TLS_TPREL64: 7217 case elfcpp::R_AARCH64_TLSDESC: 7218 gold_error_at_location(relinfo, relnum, rela.get_r_offset(), 7219 _("unexpected reloc %u in object file"), 7220 r_type); 7221 break; 7222 7223 default: 7224 gold_error_at_location(relinfo, relnum, rela.get_r_offset(), 7225 _("unsupported reloc %s"), 7226 reloc_property->name().c_str()); 7227 break; 7228 } 7229 7230 // Report any errors. 7231 switch (reloc_status) 7232 { 7233 case Reloc::STATUS_OKAY: 7234 break; 7235 case Reloc::STATUS_OVERFLOW: 7236 gold_error_at_location(relinfo, relnum, rela.get_r_offset(), 7237 _("relocation overflow in %s"), 7238 reloc_property->name().c_str()); 7239 break; 7240 case Reloc::STATUS_BAD_RELOC: 7241 gold_error_at_location( 7242 relinfo, 7243 relnum, 7244 rela.get_r_offset(), 7245 _("unexpected opcode while processing relocation %s"), 7246 reloc_property->name().c_str()); 7247 break; 7248 default: 7249 gold_unreachable(); 7250 } 7251 7252 return true; 7253} 7254 7255 7256template<int size, bool big_endian> 7257inline 7258typename AArch64_relocate_functions<size, big_endian>::Status 7259Target_aarch64<size, big_endian>::Relocate::relocate_tls( 7260 const Relocate_info<size, big_endian>* relinfo, 7261 Target_aarch64<size, big_endian>* target, 7262 size_t relnum, 7263 const elfcpp::Rela<size, big_endian>& rela, 7264 unsigned int r_type, const Sized_symbol<size>* gsym, 7265 const Symbol_value<size>* psymval, 7266 unsigned char* view, 7267 typename elfcpp::Elf_types<size>::Elf_Addr address) 7268{ 7269 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 7270 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 7271 7272 Output_segment* tls_segment = relinfo->layout->tls_segment(); 7273 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7274 const AArch64_reloc_property* reloc_property = 7275 aarch64_reloc_property_table->get_reloc_property(r_type); 7276 gold_assert(reloc_property != NULL); 7277 7278 const bool is_final = (gsym == NULL 7279 ? !parameters->options().shared() 7280 : gsym->final_value_is_known()); 7281 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>:: 7282 optimize_tls_reloc(is_final, r_type); 7283 7284 Sized_relobj_file<size, big_endian>* object = relinfo->object; 7285 int tls_got_offset_type; 7286 switch (r_type) 7287 { 7288 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 7289 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // Global-dynamic 7290 { 7291 if (tlsopt == tls::TLSOPT_TO_LE) 7292 { 7293 if (tls_segment == NULL) 7294 { 7295 gold_assert(parameters->errors()->error_count() > 0 7296 || issue_undefined_symbol_error(gsym)); 7297 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7298 } 7299 return tls_gd_to_le(relinfo, target, rela, r_type, view, 7300 psymval); 7301 } 7302 else if (tlsopt == tls::TLSOPT_NONE) 7303 { 7304 tls_got_offset_type = GOT_TYPE_TLS_PAIR; 7305 // Firstly get the address for the got entry. 7306 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address; 7307 if (gsym != NULL) 7308 { 7309 gold_assert(gsym->has_got_offset(tls_got_offset_type)); 7310 got_entry_address = target->got_->address() + 7311 gsym->got_offset(tls_got_offset_type); 7312 } 7313 else 7314 { 7315 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 7316 gold_assert( 7317 object->local_has_got_offset(r_sym, tls_got_offset_type)); 7318 got_entry_address = target->got_->address() + 7319 object->local_got_offset(r_sym, tls_got_offset_type); 7320 } 7321 7322 // Relocate the address into adrp/ld, adrp/add pair. 7323 switch (r_type) 7324 { 7325 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21: 7326 return aarch64_reloc_funcs::adrp( 7327 view, got_entry_address + addend, address); 7328 7329 break; 7330 7331 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: 7332 return aarch64_reloc_funcs::template rela_general<32>( 7333 view, got_entry_address, addend, reloc_property); 7334 break; 7335 7336 default: 7337 gold_unreachable(); 7338 } 7339 } 7340 gold_error_at_location(relinfo, relnum, rela.get_r_offset(), 7341 _("unsupported gd_to_ie relaxation on %u"), 7342 r_type); 7343 } 7344 break; 7345 7346 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 7347 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local-dynamic 7348 { 7349 if (tlsopt == tls::TLSOPT_TO_LE) 7350 { 7351 if (tls_segment == NULL) 7352 { 7353 gold_assert(parameters->errors()->error_count() > 0 7354 || issue_undefined_symbol_error(gsym)); 7355 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7356 } 7357 return this->tls_ld_to_le(relinfo, target, rela, r_type, view, 7358 psymval); 7359 } 7360 7361 gold_assert(tlsopt == tls::TLSOPT_NONE); 7362 // Relocate the field with the offset of the GOT entry for 7363 // the module index. 7364 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address; 7365 got_entry_address = (target->got_mod_index_entry(NULL, NULL, NULL) + 7366 target->got_->address()); 7367 7368 switch (r_type) 7369 { 7370 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21: 7371 return aarch64_reloc_funcs::adrp( 7372 view, got_entry_address + addend, address); 7373 break; 7374 7375 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: 7376 return aarch64_reloc_funcs::template rela_general<32>( 7377 view, got_entry_address, addend, reloc_property); 7378 break; 7379 7380 default: 7381 gold_unreachable(); 7382 } 7383 } 7384 break; 7385 7386 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 7387 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 7388 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 7389 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local-dynamic 7390 { 7391 AArch64_address value = psymval->value(object, 0); 7392 if (tlsopt == tls::TLSOPT_TO_LE) 7393 { 7394 if (tls_segment == NULL) 7395 { 7396 gold_assert(parameters->errors()->error_count() > 0 7397 || issue_undefined_symbol_error(gsym)); 7398 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7399 } 7400 } 7401 switch (r_type) 7402 { 7403 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1: 7404 return aarch64_reloc_funcs::movnz(view, value + addend, 7405 reloc_property); 7406 break; 7407 7408 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: 7409 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12: 7410 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: 7411 return aarch64_reloc_funcs::template rela_general<32>( 7412 view, value, addend, reloc_property); 7413 break; 7414 7415 default: 7416 gold_unreachable(); 7417 } 7418 // We should never reach here. 7419 } 7420 break; 7421 7422 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 7423 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial-exec 7424 { 7425 if (tlsopt == tls::TLSOPT_TO_LE) 7426 { 7427 if (tls_segment == NULL) 7428 { 7429 gold_assert(parameters->errors()->error_count() > 0 7430 || issue_undefined_symbol_error(gsym)); 7431 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7432 } 7433 return tls_ie_to_le(relinfo, target, rela, r_type, view, 7434 psymval); 7435 } 7436 tls_got_offset_type = GOT_TYPE_TLS_OFFSET; 7437 7438 // Firstly get the address for the got entry. 7439 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address; 7440 if (gsym != NULL) 7441 { 7442 gold_assert(gsym->has_got_offset(tls_got_offset_type)); 7443 got_entry_address = target->got_->address() + 7444 gsym->got_offset(tls_got_offset_type); 7445 } 7446 else 7447 { 7448 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 7449 gold_assert( 7450 object->local_has_got_offset(r_sym, tls_got_offset_type)); 7451 got_entry_address = target->got_->address() + 7452 object->local_got_offset(r_sym, tls_got_offset_type); 7453 } 7454 // Relocate the address into adrp/ld, adrp/add pair. 7455 switch (r_type) 7456 { 7457 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: 7458 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend, 7459 address); 7460 break; 7461 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: 7462 return aarch64_reloc_funcs::template rela_general<32>( 7463 view, got_entry_address, addend, reloc_property); 7464 default: 7465 gold_unreachable(); 7466 } 7467 } 7468 // We shall never reach here. 7469 break; 7470 7471 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 7472 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 7473 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC: 7474 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 7475 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: 7476 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12: 7477 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12: 7478 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: 7479 { 7480 gold_assert(tls_segment != NULL); 7481 AArch64_address value = psymval->value(object, 0); 7482 7483 if (!parameters->options().shared()) 7484 { 7485 AArch64_address aligned_tcb_size = 7486 align_address(target->tcb_size(), 7487 tls_segment->maximum_alignment()); 7488 value += aligned_tcb_size; 7489 switch (r_type) 7490 { 7491 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2: 7492 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1: 7493 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0: 7494 return aarch64_reloc_funcs::movnz(view, value + addend, 7495 reloc_property); 7496 default: 7497 return aarch64_reloc_funcs::template 7498 rela_general<32>(view, 7499 value, 7500 addend, 7501 reloc_property); 7502 } 7503 } 7504 else 7505 gold_error(_("%s: unsupported reloc %u " 7506 "in non-static TLSLE mode."), 7507 object->name().c_str(), r_type); 7508 } 7509 break; 7510 7511 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 7512 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 7513 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 7514 case elfcpp::R_AARCH64_TLSDESC_CALL: 7515 { 7516 if (tlsopt == tls::TLSOPT_TO_LE) 7517 { 7518 if (tls_segment == NULL) 7519 { 7520 gold_assert(parameters->errors()->error_count() > 0 7521 || issue_undefined_symbol_error(gsym)); 7522 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7523 } 7524 return tls_desc_gd_to_le(relinfo, target, rela, r_type, 7525 view, psymval); 7526 } 7527 else 7528 { 7529 tls_got_offset_type = (tlsopt == tls::TLSOPT_TO_IE 7530 ? GOT_TYPE_TLS_OFFSET 7531 : GOT_TYPE_TLS_DESC); 7532 unsigned int got_tlsdesc_offset = 0; 7533 if (r_type != elfcpp::R_AARCH64_TLSDESC_CALL 7534 && tlsopt == tls::TLSOPT_NONE) 7535 { 7536 // We created GOT entries in the .got.tlsdesc portion of the 7537 // .got.plt section, but the offset stored in the symbol is the 7538 // offset within .got.tlsdesc. 7539 got_tlsdesc_offset = (target->got_->data_size() 7540 + target->got_plt_section()->data_size()); 7541 } 7542 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address; 7543 if (gsym != NULL) 7544 { 7545 gold_assert(gsym->has_got_offset(tls_got_offset_type)); 7546 got_entry_address = target->got_->address() 7547 + got_tlsdesc_offset 7548 + gsym->got_offset(tls_got_offset_type); 7549 } 7550 else 7551 { 7552 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info()); 7553 gold_assert( 7554 object->local_has_got_offset(r_sym, tls_got_offset_type)); 7555 got_entry_address = target->got_->address() + 7556 got_tlsdesc_offset + 7557 object->local_got_offset(r_sym, tls_got_offset_type); 7558 } 7559 if (tlsopt == tls::TLSOPT_TO_IE) 7560 { 7561 return tls_desc_gd_to_ie(relinfo, target, rela, r_type, 7562 view, psymval, got_entry_address, 7563 address); 7564 } 7565 7566 // Now do tlsdesc relocation. 7567 switch (r_type) 7568 { 7569 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 7570 return aarch64_reloc_funcs::adrp(view, 7571 got_entry_address + addend, 7572 address); 7573 break; 7574 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 7575 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 7576 return aarch64_reloc_funcs::template rela_general<32>( 7577 view, got_entry_address, addend, reloc_property); 7578 break; 7579 case elfcpp::R_AARCH64_TLSDESC_CALL: 7580 return aarch64_reloc_funcs::STATUS_OKAY; 7581 break; 7582 default: 7583 gold_unreachable(); 7584 } 7585 } 7586 } 7587 break; 7588 7589 default: 7590 gold_error(_("%s: unsupported TLS reloc %u."), 7591 object->name().c_str(), r_type); 7592 } 7593 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7594} // End of relocate_tls. 7595 7596 7597template<int size, bool big_endian> 7598inline 7599typename AArch64_relocate_functions<size, big_endian>::Status 7600Target_aarch64<size, big_endian>::Relocate::tls_gd_to_le( 7601 const Relocate_info<size, big_endian>* relinfo, 7602 Target_aarch64<size, big_endian>* target, 7603 const elfcpp::Rela<size, big_endian>& rela, 7604 unsigned int r_type, 7605 unsigned char* view, 7606 const Symbol_value<size>* psymval) 7607{ 7608 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 7609 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 7610 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 7611 7612 Insntype* ip = reinterpret_cast<Insntype*>(view); 7613 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip); 7614 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1); 7615 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2); 7616 7617 if (r_type == elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC) 7618 { 7619 // This is the 2nd relocs, optimization should already have been 7620 // done. 7621 gold_assert((insn1 & 0xfff00000) == 0x91400000); 7622 return aarch64_reloc_funcs::STATUS_OKAY; 7623 } 7624 7625 // The original sequence is - 7626 // 90000000 adrp x0, 0 <main> 7627 // 91000000 add x0, x0, #0x0 7628 // 94000000 bl 0 <__tls_get_addr> 7629 // optimized to sequence - 7630 // d53bd040 mrs x0, tpidr_el0 7631 // 91400000 add x0, x0, #0x0, lsl #12 7632 // 91000000 add x0, x0, #0x0 7633 7634 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we 7635 // encounter the first relocation "R_AARCH64_TLSGD_ADR_PAGE21". Because we 7636 // have to change "bl tls_get_addr", which does not have a corresponding tls 7637 // relocation type. So before proceeding, we need to make sure compiler 7638 // does not change the sequence. 7639 if(!(insn1 == 0x90000000 // adrp x0,0 7640 && insn2 == 0x91000000 // add x0, x0, #0x0 7641 && insn3 == 0x94000000)) // bl 0 7642 { 7643 // Ideally we should give up gd_to_le relaxation and do gd access. 7644 // However the gd_to_le relaxation decision has been made early 7645 // in the scan stage, where we did not allocate any GOT entry for 7646 // this symbol. Therefore we have to exit and report error now. 7647 gold_error(_("unexpected reloc insn sequence while relaxing " 7648 "tls gd to le for reloc %u."), r_type); 7649 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7650 } 7651 7652 // Write new insns. 7653 insn1 = 0xd53bd040; // mrs x0, tpidr_el0 7654 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12 7655 insn3 = 0x91000000; // add x0, x0, #0x0 7656 elfcpp::Swap<32, big_endian>::writeval(ip, insn1); 7657 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2); 7658 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3); 7659 7660 // Calculate tprel value. 7661 Output_segment* tls_segment = relinfo->layout->tls_segment(); 7662 gold_assert(tls_segment != NULL); 7663 AArch64_address value = psymval->value(relinfo->object, 0); 7664 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7665 AArch64_address aligned_tcb_size = 7666 align_address(target->tcb_size(), tls_segment->maximum_alignment()); 7667 AArch64_address x = value + aligned_tcb_size; 7668 7669 // After new insns are written, apply TLSLE relocs. 7670 const AArch64_reloc_property* rp1 = 7671 aarch64_reloc_property_table->get_reloc_property( 7672 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12); 7673 const AArch64_reloc_property* rp2 = 7674 aarch64_reloc_property_table->get_reloc_property( 7675 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12); 7676 gold_assert(rp1 != NULL && rp2 != NULL); 7677 7678 typename aarch64_reloc_funcs::Status s1 = 7679 aarch64_reloc_funcs::template rela_general<32>(view + 4, 7680 x, 7681 addend, 7682 rp1); 7683 if (s1 != aarch64_reloc_funcs::STATUS_OKAY) 7684 return s1; 7685 7686 typename aarch64_reloc_funcs::Status s2 = 7687 aarch64_reloc_funcs::template rela_general<32>(view + 8, 7688 x, 7689 addend, 7690 rp2); 7691 7692 this->skip_call_tls_get_addr_ = true; 7693 return s2; 7694} // End of tls_gd_to_le 7695 7696 7697template<int size, bool big_endian> 7698inline 7699typename AArch64_relocate_functions<size, big_endian>::Status 7700Target_aarch64<size, big_endian>::Relocate::tls_ld_to_le( 7701 const Relocate_info<size, big_endian>* relinfo, 7702 Target_aarch64<size, big_endian>* target, 7703 const elfcpp::Rela<size, big_endian>& rela, 7704 unsigned int r_type, 7705 unsigned char* view, 7706 const Symbol_value<size>* psymval) 7707{ 7708 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 7709 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 7710 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 7711 7712 Insntype* ip = reinterpret_cast<Insntype*>(view); 7713 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip); 7714 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1); 7715 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2); 7716 7717 if (r_type == elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC) 7718 { 7719 // This is the 2nd relocs, optimization should already have been 7720 // done. 7721 gold_assert((insn1 & 0xfff00000) == 0x91400000); 7722 return aarch64_reloc_funcs::STATUS_OKAY; 7723 } 7724 7725 // The original sequence is - 7726 // 90000000 adrp x0, 0 <main> 7727 // 91000000 add x0, x0, #0x0 7728 // 94000000 bl 0 <__tls_get_addr> 7729 // optimized to sequence - 7730 // d53bd040 mrs x0, tpidr_el0 7731 // 91400000 add x0, x0, #0x0, lsl #12 7732 // 91000000 add x0, x0, #0x0 7733 7734 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we 7735 // encounter the first relocation "R_AARCH64_TLSLD_ADR_PAGE21". Because we 7736 // have to change "bl tls_get_addr", which does not have a corresponding tls 7737 // relocation type. So before proceeding, we need to make sure compiler 7738 // does not change the sequence. 7739 if(!(insn1 == 0x90000000 // adrp x0,0 7740 && insn2 == 0x91000000 // add x0, x0, #0x0 7741 && insn3 == 0x94000000)) // bl 0 7742 { 7743 // Ideally we should give up gd_to_le relaxation and do gd access. 7744 // However the gd_to_le relaxation decision has been made early 7745 // in the scan stage, where we did not allocate a GOT entry for 7746 // this symbol. Therefore we have to exit and report an error now. 7747 gold_error(_("unexpected reloc insn sequence while relaxing " 7748 "tls gd to le for reloc %u."), r_type); 7749 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7750 } 7751 7752 // Write new insns. 7753 insn1 = 0xd53bd040; // mrs x0, tpidr_el0 7754 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12 7755 insn3 = 0x91000000; // add x0, x0, #0x0 7756 elfcpp::Swap<32, big_endian>::writeval(ip, insn1); 7757 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2); 7758 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3); 7759 7760 // Calculate tprel value. 7761 Output_segment* tls_segment = relinfo->layout->tls_segment(); 7762 gold_assert(tls_segment != NULL); 7763 AArch64_address value = psymval->value(relinfo->object, 0); 7764 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7765 AArch64_address aligned_tcb_size = 7766 align_address(target->tcb_size(), tls_segment->maximum_alignment()); 7767 AArch64_address x = value + aligned_tcb_size; 7768 7769 // After new insns are written, apply TLSLE relocs. 7770 const AArch64_reloc_property* rp1 = 7771 aarch64_reloc_property_table->get_reloc_property( 7772 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12); 7773 const AArch64_reloc_property* rp2 = 7774 aarch64_reloc_property_table->get_reloc_property( 7775 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12); 7776 gold_assert(rp1 != NULL && rp2 != NULL); 7777 7778 typename aarch64_reloc_funcs::Status s1 = 7779 aarch64_reloc_funcs::template rela_general<32>(view + 4, 7780 x, 7781 addend, 7782 rp1); 7783 if (s1 != aarch64_reloc_funcs::STATUS_OKAY) 7784 return s1; 7785 7786 typename aarch64_reloc_funcs::Status s2 = 7787 aarch64_reloc_funcs::template rela_general<32>(view + 8, 7788 x, 7789 addend, 7790 rp2); 7791 7792 this->skip_call_tls_get_addr_ = true; 7793 return s2; 7794 7795} // End of tls_ld_to_le 7796 7797template<int size, bool big_endian> 7798inline 7799typename AArch64_relocate_functions<size, big_endian>::Status 7800Target_aarch64<size, big_endian>::Relocate::tls_ie_to_le( 7801 const Relocate_info<size, big_endian>* relinfo, 7802 Target_aarch64<size, big_endian>* target, 7803 const elfcpp::Rela<size, big_endian>& rela, 7804 unsigned int r_type, 7805 unsigned char* view, 7806 const Symbol_value<size>* psymval) 7807{ 7808 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 7809 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 7810 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 7811 7812 AArch64_address value = psymval->value(relinfo->object, 0); 7813 Output_segment* tls_segment = relinfo->layout->tls_segment(); 7814 AArch64_address aligned_tcb_address = 7815 align_address(target->tcb_size(), tls_segment->maximum_alignment()); 7816 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7817 AArch64_address x = value + addend + aligned_tcb_address; 7818 // "x" is the offset to tp, we can only do this if x is within 7819 // range [0, 2^32-1] 7820 if (!(size == 32 || (size == 64 && (static_cast<uint64_t>(x) >> 32) == 0))) 7821 { 7822 gold_error(_("TLS variable referred by reloc %u is too far from TP."), 7823 r_type); 7824 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7825 } 7826 7827 Insntype* ip = reinterpret_cast<Insntype*>(view); 7828 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip); 7829 unsigned int regno; 7830 Insntype newinsn; 7831 if (r_type == elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) 7832 { 7833 // Generate movz. 7834 regno = (insn & 0x1f); 7835 newinsn = (0xd2a00000 | regno) | (((x >> 16) & 0xffff) << 5); 7836 } 7837 else if (r_type == elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) 7838 { 7839 // Generate movk. 7840 regno = (insn & 0x1f); 7841 gold_assert(regno == ((insn >> 5) & 0x1f)); 7842 newinsn = (0xf2800000 | regno) | ((x & 0xffff) << 5); 7843 } 7844 else 7845 gold_unreachable(); 7846 7847 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn); 7848 return aarch64_reloc_funcs::STATUS_OKAY; 7849} // End of tls_ie_to_le 7850 7851 7852template<int size, bool big_endian> 7853inline 7854typename AArch64_relocate_functions<size, big_endian>::Status 7855Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_le( 7856 const Relocate_info<size, big_endian>* relinfo, 7857 Target_aarch64<size, big_endian>* target, 7858 const elfcpp::Rela<size, big_endian>& rela, 7859 unsigned int r_type, 7860 unsigned char* view, 7861 const Symbol_value<size>* psymval) 7862{ 7863 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address; 7864 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 7865 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 7866 7867 // TLSDESC-GD sequence is like: 7868 // adrp x0, :tlsdesc:v1 7869 // ldr x1, [x0, #:tlsdesc_lo12:v1] 7870 // add x0, x0, :tlsdesc_lo12:v1 7871 // .tlsdesccall v1 7872 // blr x1 7873 // After desc_gd_to_le optimization, the sequence will be like: 7874 // movz x0, #0x0, lsl #16 7875 // movk x0, #0x10 7876 // nop 7877 // nop 7878 7879 // Calculate tprel value. 7880 Output_segment* tls_segment = relinfo->layout->tls_segment(); 7881 gold_assert(tls_segment != NULL); 7882 Insntype* ip = reinterpret_cast<Insntype*>(view); 7883 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7884 AArch64_address value = psymval->value(relinfo->object, addend); 7885 AArch64_address aligned_tcb_size = 7886 align_address(target->tcb_size(), tls_segment->maximum_alignment()); 7887 AArch64_address x = value + aligned_tcb_size; 7888 // x is the offset to tp, we can only do this if x is within range 7889 // [0, 2^32-1]. If x is out of range, fail and exit. 7890 if (size == 64 && (static_cast<uint64_t>(x) >> 32) != 0) 7891 { 7892 gold_error(_("TLS variable referred by reloc %u is too far from TP. " 7893 "We Can't do gd_to_le relaxation.\n"), r_type); 7894 return aarch64_reloc_funcs::STATUS_BAD_RELOC; 7895 } 7896 Insntype newinsn; 7897 switch (r_type) 7898 { 7899 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 7900 case elfcpp::R_AARCH64_TLSDESC_CALL: 7901 // Change to nop 7902 newinsn = 0xd503201f; 7903 break; 7904 7905 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 7906 // Change to movz. 7907 newinsn = 0xd2a00000 | (((x >> 16) & 0xffff) << 5); 7908 break; 7909 7910 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 7911 // Change to movk. 7912 newinsn = 0xf2800000 | ((x & 0xffff) << 5); 7913 break; 7914 7915 default: 7916 gold_error(_("unsupported tlsdesc gd_to_le optimization on reloc %u"), 7917 r_type); 7918 gold_unreachable(); 7919 } 7920 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn); 7921 return aarch64_reloc_funcs::STATUS_OKAY; 7922} // End of tls_desc_gd_to_le 7923 7924 7925template<int size, bool big_endian> 7926inline 7927typename AArch64_relocate_functions<size, big_endian>::Status 7928Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_ie( 7929 const Relocate_info<size, big_endian>* /* relinfo */, 7930 Target_aarch64<size, big_endian>* /* target */, 7931 const elfcpp::Rela<size, big_endian>& rela, 7932 unsigned int r_type, 7933 unsigned char* view, 7934 const Symbol_value<size>* /* psymval */, 7935 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address, 7936 typename elfcpp::Elf_types<size>::Elf_Addr address) 7937{ 7938 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype; 7939 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs; 7940 7941 // TLSDESC-GD sequence is like: 7942 // adrp x0, :tlsdesc:v1 7943 // ldr x1, [x0, #:tlsdesc_lo12:v1] 7944 // add x0, x0, :tlsdesc_lo12:v1 7945 // .tlsdesccall v1 7946 // blr x1 7947 // After desc_gd_to_ie optimization, the sequence will be like: 7948 // adrp x0, :tlsie:v1 7949 // ldr x0, [x0, :tlsie_lo12:v1] 7950 // nop 7951 // nop 7952 7953 Insntype* ip = reinterpret_cast<Insntype*>(view); 7954 const elfcpp::Elf_Xword addend = rela.get_r_addend(); 7955 Insntype newinsn; 7956 switch (r_type) 7957 { 7958 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: 7959 case elfcpp::R_AARCH64_TLSDESC_CALL: 7960 // Change to nop 7961 newinsn = 0xd503201f; 7962 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn); 7963 break; 7964 7965 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21: 7966 { 7967 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend, 7968 address); 7969 } 7970 break; 7971 7972 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12: 7973 { 7974 // Set ldr target register to be x0. 7975 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip); 7976 insn &= 0xffffffe0; 7977 elfcpp::Swap<32, big_endian>::writeval(ip, insn); 7978 // Do relocation. 7979 const AArch64_reloc_property* reloc_property = 7980 aarch64_reloc_property_table->get_reloc_property( 7981 elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC); 7982 return aarch64_reloc_funcs::template rela_general<32>( 7983 view, got_entry_address, addend, reloc_property); 7984 } 7985 break; 7986 7987 default: 7988 gold_error(_("Don't support tlsdesc gd_to_ie optimization on reloc %u"), 7989 r_type); 7990 gold_unreachable(); 7991 } 7992 return aarch64_reloc_funcs::STATUS_OKAY; 7993} // End of tls_desc_gd_to_ie 7994 7995// Relocate section data. 7996 7997template<int size, bool big_endian> 7998void 7999Target_aarch64<size, big_endian>::relocate_section( 8000 const Relocate_info<size, big_endian>* relinfo, 8001 unsigned int sh_type, 8002 const unsigned char* prelocs, 8003 size_t reloc_count, 8004 Output_section* output_section, 8005 bool needs_special_offset_handling, 8006 unsigned char* view, 8007 typename elfcpp::Elf_types<size>::Elf_Addr address, 8008 section_size_type view_size, 8009 const Reloc_symbol_changes* reloc_symbol_changes) 8010{ 8011 typedef Target_aarch64<size, big_endian> Aarch64; 8012 typedef typename Target_aarch64<size, big_endian>::Relocate AArch64_relocate; 8013 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian> 8014 Classify_reloc; 8015 8016 gold_assert(sh_type == elfcpp::SHT_RELA); 8017 8018 gold::relocate_section<size, big_endian, Aarch64, AArch64_relocate, 8019 gold::Default_comdat_behavior, Classify_reloc>( 8020 relinfo, 8021 this, 8022 prelocs, 8023 reloc_count, 8024 output_section, 8025 needs_special_offset_handling, 8026 view, 8027 address, 8028 view_size, 8029 reloc_symbol_changes); 8030} 8031 8032// Scan the relocs during a relocatable link. 8033 8034template<int size, bool big_endian> 8035void 8036Target_aarch64<size, big_endian>::scan_relocatable_relocs( 8037 Symbol_table* symtab, 8038 Layout* layout, 8039 Sized_relobj_file<size, big_endian>* object, 8040 unsigned int data_shndx, 8041 unsigned int sh_type, 8042 const unsigned char* prelocs, 8043 size_t reloc_count, 8044 Output_section* output_section, 8045 bool needs_special_offset_handling, 8046 size_t local_symbol_count, 8047 const unsigned char* plocal_symbols, 8048 Relocatable_relocs* rr) 8049{ 8050 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian> 8051 Classify_reloc; 8052 typedef gold::Default_scan_relocatable_relocs<Classify_reloc> 8053 Scan_relocatable_relocs; 8054 8055 gold_assert(sh_type == elfcpp::SHT_RELA); 8056 8057 gold::scan_relocatable_relocs<size, big_endian, Scan_relocatable_relocs>( 8058 symtab, 8059 layout, 8060 object, 8061 data_shndx, 8062 prelocs, 8063 reloc_count, 8064 output_section, 8065 needs_special_offset_handling, 8066 local_symbol_count, 8067 plocal_symbols, 8068 rr); 8069} 8070 8071// Scan the relocs for --emit-relocs. 8072 8073template<int size, bool big_endian> 8074void 8075Target_aarch64<size, big_endian>::emit_relocs_scan( 8076 Symbol_table* symtab, 8077 Layout* layout, 8078 Sized_relobj_file<size, big_endian>* object, 8079 unsigned int data_shndx, 8080 unsigned int sh_type, 8081 const unsigned char* prelocs, 8082 size_t reloc_count, 8083 Output_section* output_section, 8084 bool needs_special_offset_handling, 8085 size_t local_symbol_count, 8086 const unsigned char* plocal_syms, 8087 Relocatable_relocs* rr) 8088{ 8089 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian> 8090 Classify_reloc; 8091 typedef gold::Default_emit_relocs_strategy<Classify_reloc> 8092 Emit_relocs_strategy; 8093 8094 gold_assert(sh_type == elfcpp::SHT_RELA); 8095 8096 gold::scan_relocatable_relocs<size, big_endian, Emit_relocs_strategy>( 8097 symtab, 8098 layout, 8099 object, 8100 data_shndx, 8101 prelocs, 8102 reloc_count, 8103 output_section, 8104 needs_special_offset_handling, 8105 local_symbol_count, 8106 plocal_syms, 8107 rr); 8108} 8109 8110// Relocate a section during a relocatable link. 8111 8112template<int size, bool big_endian> 8113void 8114Target_aarch64<size, big_endian>::relocate_relocs( 8115 const Relocate_info<size, big_endian>* relinfo, 8116 unsigned int sh_type, 8117 const unsigned char* prelocs, 8118 size_t reloc_count, 8119 Output_section* output_section, 8120 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section, 8121 unsigned char* view, 8122 typename elfcpp::Elf_types<size>::Elf_Addr view_address, 8123 section_size_type view_size, 8124 unsigned char* reloc_view, 8125 section_size_type reloc_view_size) 8126{ 8127 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian> 8128 Classify_reloc; 8129 8130 gold_assert(sh_type == elfcpp::SHT_RELA); 8131 8132 gold::relocate_relocs<size, big_endian, Classify_reloc>( 8133 relinfo, 8134 prelocs, 8135 reloc_count, 8136 output_section, 8137 offset_in_output_section, 8138 view, 8139 view_address, 8140 view_size, 8141 reloc_view, 8142 reloc_view_size); 8143} 8144 8145 8146// Return whether this is a 3-insn erratum sequence. 8147 8148template<int size, bool big_endian> 8149bool 8150Target_aarch64<size, big_endian>::is_erratum_843419_sequence( 8151 typename elfcpp::Swap<32,big_endian>::Valtype insn1, 8152 typename elfcpp::Swap<32,big_endian>::Valtype insn2, 8153 typename elfcpp::Swap<32,big_endian>::Valtype insn3) 8154{ 8155 unsigned rt1, rt2; 8156 bool load, pair; 8157 8158 // The 2nd insn is a single register load or store; or register pair 8159 // store. 8160 if (Insn_utilities::aarch64_mem_op_p(insn2, &rt1, &rt2, &pair, &load) 8161 && (!pair || (pair && !load))) 8162 { 8163 // The 3rd insn is a load or store instruction from the "Load/store 8164 // register (unsigned immediate)" encoding class, using Rn as the 8165 // base address register. 8166 if (Insn_utilities::aarch64_ldst_uimm(insn3) 8167 && (Insn_utilities::aarch64_rn(insn3) 8168 == Insn_utilities::aarch64_rd(insn1))) 8169 return true; 8170 } 8171 return false; 8172} 8173 8174 8175// Return whether this is a 835769 sequence. 8176// (Similarly implemented as in elfnn-aarch64.c.) 8177 8178template<int size, bool big_endian> 8179bool 8180Target_aarch64<size, big_endian>::is_erratum_835769_sequence( 8181 typename elfcpp::Swap<32,big_endian>::Valtype insn1, 8182 typename elfcpp::Swap<32,big_endian>::Valtype insn2) 8183{ 8184 uint32_t rt; 8185 uint32_t rt2 = 0; 8186 uint32_t rn; 8187 uint32_t rm; 8188 uint32_t ra; 8189 bool pair; 8190 bool load; 8191 8192 if (Insn_utilities::aarch64_mlxl(insn2) 8193 && Insn_utilities::aarch64_mem_op_p (insn1, &rt, &rt2, &pair, &load)) 8194 { 8195 /* Any SIMD memory op is independent of the subsequent MLA 8196 by definition of the erratum. */ 8197 if (Insn_utilities::aarch64_bit(insn1, 26)) 8198 return true; 8199 8200 /* If not SIMD, check for integer memory ops and MLA relationship. */ 8201 rn = Insn_utilities::aarch64_rn(insn2); 8202 ra = Insn_utilities::aarch64_ra(insn2); 8203 rm = Insn_utilities::aarch64_rm(insn2); 8204 8205 /* If this is a load and there's a true(RAW) dependency, we are safe 8206 and this is not an erratum sequence. */ 8207 if (load && 8208 (rt == rn || rt == rm || rt == ra 8209 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra)))) 8210 return false; 8211 8212 /* We conservatively put out stubs for all other cases (including 8213 writebacks). */ 8214 return true; 8215 } 8216 8217 return false; 8218} 8219 8220 8221// Helper method to create erratum stub for ST_E_843419 and ST_E_835769. 8222 8223template<int size, bool big_endian> 8224void 8225Target_aarch64<size, big_endian>::create_erratum_stub( 8226 AArch64_relobj<size, big_endian>* relobj, 8227 unsigned int shndx, 8228 section_size_type erratum_insn_offset, 8229 Address erratum_address, 8230 typename Insn_utilities::Insntype erratum_insn, 8231 int erratum_type, 8232 unsigned int e843419_adrp_offset) 8233{ 8234 gold_assert(erratum_type == ST_E_843419 || erratum_type == ST_E_835769); 8235 The_stub_table* stub_table = relobj->stub_table(shndx); 8236 gold_assert(stub_table != NULL); 8237 if (stub_table->find_erratum_stub(relobj, 8238 shndx, 8239 erratum_insn_offset) == NULL) 8240 { 8241 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN; 8242 The_erratum_stub* stub; 8243 if (erratum_type == ST_E_835769) 8244 stub = new The_erratum_stub(relobj, erratum_type, shndx, 8245 erratum_insn_offset); 8246 else if (erratum_type == ST_E_843419) 8247 stub = new E843419_stub<size, big_endian>( 8248 relobj, shndx, erratum_insn_offset, e843419_adrp_offset); 8249 else 8250 gold_unreachable(); 8251 stub->set_erratum_insn(erratum_insn); 8252 stub->set_erratum_address(erratum_address); 8253 // For erratum ST_E_843419 and ST_E_835769, the destination address is 8254 // always the next insn after erratum insn. 8255 stub->set_destination_address(erratum_address + BPI); 8256 stub_table->add_erratum_stub(stub); 8257 } 8258} 8259 8260 8261// Scan erratum for section SHNDX range [output_address + span_start, 8262// output_address + span_end). Note here we do not share the code with 8263// scan_erratum_843419_span function, because for 843419 we optimize by only 8264// scanning the last few insns of a page, whereas for 835769, we need to scan 8265// every insn. 8266 8267template<int size, bool big_endian> 8268void 8269Target_aarch64<size, big_endian>::scan_erratum_835769_span( 8270 AArch64_relobj<size, big_endian>* relobj, 8271 unsigned int shndx, 8272 const section_size_type span_start, 8273 const section_size_type span_end, 8274 unsigned char* input_view, 8275 Address output_address) 8276{ 8277 typedef typename Insn_utilities::Insntype Insntype; 8278 8279 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN; 8280 8281 // Adjust output_address and view to the start of span. 8282 output_address += span_start; 8283 input_view += span_start; 8284 8285 section_size_type span_length = span_end - span_start; 8286 section_size_type offset = 0; 8287 for (offset = 0; offset + BPI < span_length; offset += BPI) 8288 { 8289 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset); 8290 Insntype insn1 = ip[0]; 8291 Insntype insn2 = ip[1]; 8292 if (is_erratum_835769_sequence(insn1, insn2)) 8293 { 8294 Insntype erratum_insn = insn2; 8295 // "span_start + offset" is the offset for insn1. So for insn2, it is 8296 // "span_start + offset + BPI". 8297 section_size_type erratum_insn_offset = span_start + offset + BPI; 8298 Address erratum_address = output_address + offset + BPI; 8299 gold_info(_("Erratum 835769 found and fixed at \"%s\", " 8300 "section %d, offset 0x%08x."), 8301 relobj->name().c_str(), shndx, 8302 (unsigned int)(span_start + offset)); 8303 8304 this->create_erratum_stub(relobj, shndx, 8305 erratum_insn_offset, erratum_address, 8306 erratum_insn, ST_E_835769); 8307 offset += BPI; // Skip mac insn. 8308 } 8309 } 8310} // End of "Target_aarch64::scan_erratum_835769_span". 8311 8312 8313// Scan erratum for section SHNDX range 8314// [output_address + span_start, output_address + span_end). 8315 8316template<int size, bool big_endian> 8317void 8318Target_aarch64<size, big_endian>::scan_erratum_843419_span( 8319 AArch64_relobj<size, big_endian>* relobj, 8320 unsigned int shndx, 8321 const section_size_type span_start, 8322 const section_size_type span_end, 8323 unsigned char* input_view, 8324 Address output_address) 8325{ 8326 typedef typename Insn_utilities::Insntype Insntype; 8327 8328 // Adjust output_address and view to the start of span. 8329 output_address += span_start; 8330 input_view += span_start; 8331 8332 if ((output_address & 0x03) != 0) 8333 return; 8334 8335 section_size_type offset = 0; 8336 section_size_type span_length = span_end - span_start; 8337 // The first instruction must be ending at 0xFF8 or 0xFFC. 8338 unsigned int page_offset = output_address & 0xFFF; 8339 // Make sure starting position, that is "output_address+offset", 8340 // starts at page position 0xff8 or 0xffc. 8341 if (page_offset < 0xff8) 8342 offset = 0xff8 - page_offset; 8343 while (offset + 3 * Insn_utilities::BYTES_PER_INSN <= span_length) 8344 { 8345 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset); 8346 Insntype insn1 = ip[0]; 8347 if (Insn_utilities::is_adrp(insn1)) 8348 { 8349 Insntype insn2 = ip[1]; 8350 Insntype insn3 = ip[2]; 8351 Insntype erratum_insn; 8352 unsigned insn_offset; 8353 bool do_report = false; 8354 if (is_erratum_843419_sequence(insn1, insn2, insn3)) 8355 { 8356 do_report = true; 8357 erratum_insn = insn3; 8358 insn_offset = 2 * Insn_utilities::BYTES_PER_INSN; 8359 } 8360 else if (offset + 4 * Insn_utilities::BYTES_PER_INSN <= span_length) 8361 { 8362 // Optionally we can have an insn between ins2 and ins3 8363 Insntype insn_opt = ip[2]; 8364 // And insn_opt must not be a branch. 8365 if (!Insn_utilities::aarch64_b(insn_opt) 8366 && !Insn_utilities::aarch64_bl(insn_opt) 8367 && !Insn_utilities::aarch64_blr(insn_opt) 8368 && !Insn_utilities::aarch64_br(insn_opt)) 8369 { 8370 // And insn_opt must not write to dest reg in insn1. However 8371 // we do a conservative scan, which means we may fix/report 8372 // more than necessary, but it doesn't hurt. 8373 8374 Insntype insn4 = ip[3]; 8375 if (is_erratum_843419_sequence(insn1, insn2, insn4)) 8376 { 8377 do_report = true; 8378 erratum_insn = insn4; 8379 insn_offset = 3 * Insn_utilities::BYTES_PER_INSN; 8380 } 8381 } 8382 } 8383 if (do_report) 8384 { 8385 unsigned int erratum_insn_offset = 8386 span_start + offset + insn_offset; 8387 Address erratum_address = 8388 output_address + offset + insn_offset; 8389 create_erratum_stub(relobj, shndx, 8390 erratum_insn_offset, erratum_address, 8391 erratum_insn, ST_E_843419, 8392 span_start + offset); 8393 } 8394 } 8395 8396 // Advance to next candidate instruction. We only consider instruction 8397 // sequences starting at a page offset of 0xff8 or 0xffc. 8398 page_offset = (output_address + offset) & 0xfff; 8399 if (page_offset == 0xff8) 8400 offset += 4; 8401 else // (page_offset == 0xffc), we move to next page's 0xff8. 8402 offset += 0xffc; 8403 } 8404} // End of "Target_aarch64::scan_erratum_843419_span". 8405 8406 8407// The selector for aarch64 object files. 8408 8409template<int size, bool big_endian> 8410class Target_selector_aarch64 : public Target_selector 8411{ 8412 public: 8413 Target_selector_aarch64(); 8414 8415 virtual Target* 8416 do_instantiate_target() 8417 { return new Target_aarch64<size, big_endian>(); } 8418}; 8419 8420template<> 8421Target_selector_aarch64<32, true>::Target_selector_aarch64() 8422 : Target_selector(elfcpp::EM_AARCH64, 32, true, 8423 "elf32-bigaarch64", "aarch64_elf32_be_vec") 8424{ } 8425 8426template<> 8427Target_selector_aarch64<32, false>::Target_selector_aarch64() 8428 : Target_selector(elfcpp::EM_AARCH64, 32, false, 8429 "elf32-littleaarch64", "aarch64_elf32_le_vec") 8430{ } 8431 8432template<> 8433Target_selector_aarch64<64, true>::Target_selector_aarch64() 8434 : Target_selector(elfcpp::EM_AARCH64, 64, true, 8435 "elf64-bigaarch64", "aarch64_elf64_be_vec") 8436{ } 8437 8438template<> 8439Target_selector_aarch64<64, false>::Target_selector_aarch64() 8440 : Target_selector(elfcpp::EM_AARCH64, 64, false, 8441 "elf64-littleaarch64", "aarch64_elf64_le_vec") 8442{ } 8443 8444Target_selector_aarch64<32, true> target_selector_aarch64elf32b; 8445Target_selector_aarch64<32, false> target_selector_aarch64elf32; 8446Target_selector_aarch64<64, true> target_selector_aarch64elfb; 8447Target_selector_aarch64<64, false> target_selector_aarch64elf; 8448 8449} // End anonymous namespace. 8450