1/* Common target dependent code for GDB on ARM systems. 2 3 Copyright (C) 1988, 1989, 1991, 1992, 1993, 1995, 1996, 1998, 1999, 2000, 4 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 5 Free Software Foundation, Inc. 6 7 This file is part of GDB. 8 9 This program is free software; you can redistribute it and/or modify 10 it under the terms of the GNU General Public License as published by 11 the Free Software Foundation; either version 3 of the License, or 12 (at your option) any later version. 13 14 This program is distributed in the hope that it will be useful, 15 but WITHOUT ANY WARRANTY; without even the implied warranty of 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 GNU General Public License for more details. 18 19 You should have received a copy of the GNU General Public License 20 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 21 22#include <ctype.h> /* XXX for isupper (). */ 23 24#include "defs.h" 25#include "frame.h" 26#include "inferior.h" 27#include "gdbcmd.h" 28#include "gdbcore.h" 29#include "gdb_string.h" 30#include "dis-asm.h" /* For register styles. */ 31#include "regcache.h" 32#include "reggroups.h" 33#include "doublest.h" 34#include "value.h" 35#include "arch-utils.h" 36#include "osabi.h" 37#include "frame-unwind.h" 38#include "frame-base.h" 39#include "trad-frame.h" 40#include "objfiles.h" 41#include "dwarf2-frame.h" 42#include "gdbtypes.h" 43#include "prologue-value.h" 44#include "target-descriptions.h" 45#include "user-regs.h" 46#include "observer.h" 47 48#include "arm-tdep.h" 49#include "gdb/sim-arm.h" 50 51#include "elf-bfd.h" 52#include "coff/internal.h" 53#include "elf/arm.h" 54 55#include "gdb_assert.h" 56#include "vec.h" 57 58#include "features/arm-with-m.c" 59 60static int arm_debug; 61 62/* Macros for setting and testing a bit in a minimal symbol that marks 63 it as Thumb function. The MSB of the minimal symbol's "info" field 64 is used for this purpose. 65 66 MSYMBOL_SET_SPECIAL Actually sets the "special" bit. 67 MSYMBOL_IS_SPECIAL Tests the "special" bit in a minimal symbol. */ 68 69#define MSYMBOL_SET_SPECIAL(msym) \ 70 MSYMBOL_TARGET_FLAG_1 (msym) = 1 71 72#define MSYMBOL_IS_SPECIAL(msym) \ 73 MSYMBOL_TARGET_FLAG_1 (msym) 74 75/* Per-objfile data used for mapping symbols. */ 76static const struct objfile_data *arm_objfile_data_key; 77 78struct arm_mapping_symbol 79{ 80 bfd_vma value; 81 char type; 82}; 83typedef struct arm_mapping_symbol arm_mapping_symbol_s; 84DEF_VEC_O(arm_mapping_symbol_s); 85 86struct arm_per_objfile 87{ 88 VEC(arm_mapping_symbol_s) **section_maps; 89}; 90 91/* The list of available "set arm ..." and "show arm ..." commands. */ 92static struct cmd_list_element *setarmcmdlist = NULL; 93static struct cmd_list_element *showarmcmdlist = NULL; 94 95/* The type of floating-point to use. Keep this in sync with enum 96 arm_float_model, and the help string in _initialize_arm_tdep. */ 97static const char *fp_model_strings[] = 98{ 99 "auto", 100 "softfpa", 101 "fpa", 102 "softvfp", 103 "vfp", 104 NULL 105}; 106 107/* A variable that can be configured by the user. */ 108static enum arm_float_model arm_fp_model = ARM_FLOAT_AUTO; 109static const char *current_fp_model = "auto"; 110 111/* The ABI to use. Keep this in sync with arm_abi_kind. */ 112static const char *arm_abi_strings[] = 113{ 114 "auto", 115 "APCS", 116 "AAPCS", 117 NULL 118}; 119 120/* A variable that can be configured by the user. */ 121static enum arm_abi_kind arm_abi_global = ARM_ABI_AUTO; 122static const char *arm_abi_string = "auto"; 123 124/* The execution mode to assume. */ 125static const char *arm_mode_strings[] = 126 { 127 "auto", 128 "arm", 129 "thumb", 130 NULL 131 }; 132 133static const char *arm_fallback_mode_string = "auto"; 134static const char *arm_force_mode_string = "auto"; 135 136/* Internal override of the execution mode. -1 means no override, 137 0 means override to ARM mode, 1 means override to Thumb mode. 138 The effect is the same as if arm_force_mode has been set by the 139 user (except the internal override has precedence over a user's 140 arm_force_mode override). */ 141static int arm_override_mode = -1; 142 143/* Number of different reg name sets (options). */ 144static int num_disassembly_options; 145 146/* The standard register names, and all the valid aliases for them. Note 147 that `fp', `sp' and `pc' are not added in this alias list, because they 148 have been added as builtin user registers in 149 std-regs.c:_initialize_frame_reg. */ 150static const struct 151{ 152 const char *name; 153 int regnum; 154} arm_register_aliases[] = { 155 /* Basic register numbers. */ 156 { "r0", 0 }, 157 { "r1", 1 }, 158 { "r2", 2 }, 159 { "r3", 3 }, 160 { "r4", 4 }, 161 { "r5", 5 }, 162 { "r6", 6 }, 163 { "r7", 7 }, 164 { "r8", 8 }, 165 { "r9", 9 }, 166 { "r10", 10 }, 167 { "r11", 11 }, 168 { "r12", 12 }, 169 { "r13", 13 }, 170 { "r14", 14 }, 171 { "r15", 15 }, 172 /* Synonyms (argument and variable registers). */ 173 { "a1", 0 }, 174 { "a2", 1 }, 175 { "a3", 2 }, 176 { "a4", 3 }, 177 { "v1", 4 }, 178 { "v2", 5 }, 179 { "v3", 6 }, 180 { "v4", 7 }, 181 { "v5", 8 }, 182 { "v6", 9 }, 183 { "v7", 10 }, 184 { "v8", 11 }, 185 /* Other platform-specific names for r9. */ 186 { "sb", 9 }, 187 { "tr", 9 }, 188 /* Special names. */ 189 { "ip", 12 }, 190 { "lr", 14 }, 191 /* Names used by GCC (not listed in the ARM EABI). */ 192 { "sl", 10 }, 193 /* A special name from the older ATPCS. */ 194 { "wr", 7 }, 195}; 196 197static const char *const arm_register_names[] = 198{"r0", "r1", "r2", "r3", /* 0 1 2 3 */ 199 "r4", "r5", "r6", "r7", /* 4 5 6 7 */ 200 "r8", "r9", "r10", "r11", /* 8 9 10 11 */ 201 "r12", "sp", "lr", "pc", /* 12 13 14 15 */ 202 "f0", "f1", "f2", "f3", /* 16 17 18 19 */ 203 "f4", "f5", "f6", "f7", /* 20 21 22 23 */ 204 "fps", "cpsr" }; /* 24 25 */ 205 206/* Valid register name styles. */ 207static const char **valid_disassembly_styles; 208 209/* Disassembly style to use. Default to "std" register names. */ 210static const char *disassembly_style; 211 212/* This is used to keep the bfd arch_info in sync with the disassembly 213 style. */ 214static void set_disassembly_style_sfunc(char *, int, 215 struct cmd_list_element *); 216static void set_disassembly_style (void); 217 218static void convert_from_extended (const struct floatformat *, const void *, 219 void *, int); 220static void convert_to_extended (const struct floatformat *, void *, 221 const void *, int); 222 223static enum register_status arm_neon_quad_read (struct gdbarch *gdbarch, 224 struct regcache *regcache, 225 int regnum, gdb_byte *buf); 226static void arm_neon_quad_write (struct gdbarch *gdbarch, 227 struct regcache *regcache, 228 int regnum, const gdb_byte *buf); 229 230struct arm_prologue_cache 231{ 232 /* The stack pointer at the time this frame was created; i.e. the 233 caller's stack pointer when this function was called. It is used 234 to identify this frame. */ 235 CORE_ADDR prev_sp; 236 237 /* The frame base for this frame is just prev_sp - frame size. 238 FRAMESIZE is the distance from the frame pointer to the 239 initial stack pointer. */ 240 241 int framesize; 242 243 /* The register used to hold the frame pointer for this frame. */ 244 int framereg; 245 246 /* Saved register offsets. */ 247 struct trad_frame_saved_reg *saved_regs; 248}; 249 250static CORE_ADDR arm_analyze_prologue (struct gdbarch *gdbarch, 251 CORE_ADDR prologue_start, 252 CORE_ADDR prologue_end, 253 struct arm_prologue_cache *cache); 254 255/* Architecture version for displaced stepping. This effects the behaviour of 256 certain instructions, and really should not be hard-wired. */ 257 258#define DISPLACED_STEPPING_ARCH_VERSION 5 259 260/* Addresses for calling Thumb functions have the bit 0 set. 261 Here are some macros to test, set, or clear bit 0 of addresses. */ 262#define IS_THUMB_ADDR(addr) ((addr) & 1) 263#define MAKE_THUMB_ADDR(addr) ((addr) | 1) 264#define UNMAKE_THUMB_ADDR(addr) ((addr) & ~1) 265 266/* Set to true if the 32-bit mode is in use. */ 267 268int arm_apcs_32 = 1; 269 270/* Return the bit mask in ARM_PS_REGNUM that indicates Thumb mode. */ 271 272int 273arm_psr_thumb_bit (struct gdbarch *gdbarch) 274{ 275 if (gdbarch_tdep (gdbarch)->is_m) 276 return XPSR_T; 277 else 278 return CPSR_T; 279} 280 281/* Determine if FRAME is executing in Thumb mode. */ 282 283int 284arm_frame_is_thumb (struct frame_info *frame) 285{ 286 CORE_ADDR cpsr; 287 ULONGEST t_bit = arm_psr_thumb_bit (get_frame_arch (frame)); 288 289 /* Every ARM frame unwinder can unwind the T bit of the CPSR, either 290 directly (from a signal frame or dummy frame) or by interpreting 291 the saved LR (from a prologue or DWARF frame). So consult it and 292 trust the unwinders. */ 293 cpsr = get_frame_register_unsigned (frame, ARM_PS_REGNUM); 294 295 return (cpsr & t_bit) != 0; 296} 297 298/* Callback for VEC_lower_bound. */ 299 300static inline int 301arm_compare_mapping_symbols (const struct arm_mapping_symbol *lhs, 302 const struct arm_mapping_symbol *rhs) 303{ 304 return lhs->value < rhs->value; 305} 306 307/* Search for the mapping symbol covering MEMADDR. If one is found, 308 return its type. Otherwise, return 0. If START is non-NULL, 309 set *START to the location of the mapping symbol. */ 310 311static char 312arm_find_mapping_symbol (CORE_ADDR memaddr, CORE_ADDR *start) 313{ 314 struct obj_section *sec; 315 316 /* If there are mapping symbols, consult them. */ 317 sec = find_pc_section (memaddr); 318 if (sec != NULL) 319 { 320 struct arm_per_objfile *data; 321 VEC(arm_mapping_symbol_s) *map; 322 struct arm_mapping_symbol map_key = { memaddr - obj_section_addr (sec), 323 0 }; 324 unsigned int idx; 325 326 data = objfile_data (sec->objfile, arm_objfile_data_key); 327 if (data != NULL) 328 { 329 map = data->section_maps[sec->the_bfd_section->index]; 330 if (!VEC_empty (arm_mapping_symbol_s, map)) 331 { 332 struct arm_mapping_symbol *map_sym; 333 334 idx = VEC_lower_bound (arm_mapping_symbol_s, map, &map_key, 335 arm_compare_mapping_symbols); 336 337 /* VEC_lower_bound finds the earliest ordered insertion 338 point. If the following symbol starts at this exact 339 address, we use that; otherwise, the preceding 340 mapping symbol covers this address. */ 341 if (idx < VEC_length (arm_mapping_symbol_s, map)) 342 { 343 map_sym = VEC_index (arm_mapping_symbol_s, map, idx); 344 if (map_sym->value == map_key.value) 345 { 346 if (start) 347 *start = map_sym->value + obj_section_addr (sec); 348 return map_sym->type; 349 } 350 } 351 352 if (idx > 0) 353 { 354 map_sym = VEC_index (arm_mapping_symbol_s, map, idx - 1); 355 if (start) 356 *start = map_sym->value + obj_section_addr (sec); 357 return map_sym->type; 358 } 359 } 360 } 361 } 362 363 return 0; 364} 365 366/* Determine if the program counter specified in MEMADDR is in a Thumb 367 function. This function should be called for addresses unrelated to 368 any executing frame; otherwise, prefer arm_frame_is_thumb. */ 369 370int 371arm_pc_is_thumb (struct gdbarch *gdbarch, CORE_ADDR memaddr) 372{ 373 struct obj_section *sec; 374 struct minimal_symbol *sym; 375 char type; 376 struct displaced_step_closure* dsc 377 = get_displaced_step_closure_by_addr(memaddr); 378 379 /* If checking the mode of displaced instruction in copy area, the mode 380 should be determined by instruction on the original address. */ 381 if (dsc) 382 { 383 if (debug_displaced) 384 fprintf_unfiltered (gdb_stdlog, 385 "displaced: check mode of %.8lx instead of %.8lx\n", 386 (unsigned long) dsc->insn_addr, 387 (unsigned long) memaddr); 388 memaddr = dsc->insn_addr; 389 } 390 391 /* If bit 0 of the address is set, assume this is a Thumb address. */ 392 if (IS_THUMB_ADDR (memaddr)) 393 return 1; 394 395 /* Respect internal mode override if active. */ 396 if (arm_override_mode != -1) 397 return arm_override_mode; 398 399 /* If the user wants to override the symbol table, let him. */ 400 if (strcmp (arm_force_mode_string, "arm") == 0) 401 return 0; 402 if (strcmp (arm_force_mode_string, "thumb") == 0) 403 return 1; 404 405 /* ARM v6-M and v7-M are always in Thumb mode. */ 406 if (gdbarch_tdep (gdbarch)->is_m) 407 return 1; 408 409 /* If there are mapping symbols, consult them. */ 410 type = arm_find_mapping_symbol (memaddr, NULL); 411 if (type) 412 return type == 't'; 413 414 /* Thumb functions have a "special" bit set in minimal symbols. */ 415 sym = lookup_minimal_symbol_by_pc (memaddr); 416 if (sym) 417 return (MSYMBOL_IS_SPECIAL (sym)); 418 419 /* If the user wants to override the fallback mode, let them. */ 420 if (strcmp (arm_fallback_mode_string, "arm") == 0) 421 return 0; 422 if (strcmp (arm_fallback_mode_string, "thumb") == 0) 423 return 1; 424 425 /* If we couldn't find any symbol, but we're talking to a running 426 target, then trust the current value of $cpsr. This lets 427 "display/i $pc" always show the correct mode (though if there is 428 a symbol table we will not reach here, so it still may not be 429 displayed in the mode it will be executed). */ 430 if (target_has_registers) 431 return arm_frame_is_thumb (get_current_frame ()); 432 433 /* Otherwise we're out of luck; we assume ARM. */ 434 return 0; 435} 436 437/* Remove useless bits from addresses in a running program. */ 438static CORE_ADDR 439arm_addr_bits_remove (struct gdbarch *gdbarch, CORE_ADDR val) 440{ 441 if (arm_apcs_32) 442 return UNMAKE_THUMB_ADDR (val); 443 else 444 return (val & 0x03fffffc); 445} 446 447/* When reading symbols, we need to zap the low bit of the address, 448 which may be set to 1 for Thumb functions. */ 449static CORE_ADDR 450arm_smash_text_address (struct gdbarch *gdbarch, CORE_ADDR val) 451{ 452 return val & ~1; 453} 454 455/* Return 1 if PC is the start of a compiler helper function which 456 can be safely ignored during prologue skipping. IS_THUMB is true 457 if the function is known to be a Thumb function due to the way it 458 is being called. */ 459static int 460skip_prologue_function (struct gdbarch *gdbarch, CORE_ADDR pc, int is_thumb) 461{ 462 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 463 struct minimal_symbol *msym; 464 465 msym = lookup_minimal_symbol_by_pc (pc); 466 if (msym != NULL 467 && SYMBOL_VALUE_ADDRESS (msym) == pc 468 && SYMBOL_LINKAGE_NAME (msym) != NULL) 469 { 470 const char *name = SYMBOL_LINKAGE_NAME (msym); 471 472 /* The GNU linker's Thumb call stub to foo is named 473 __foo_from_thumb. */ 474 if (strstr (name, "_from_thumb") != NULL) 475 name += 2; 476 477 /* On soft-float targets, __truncdfsf2 is called to convert promoted 478 arguments to their argument types in non-prototyped 479 functions. */ 480 if (strncmp (name, "__truncdfsf2", strlen ("__truncdfsf2")) == 0) 481 return 1; 482 if (strncmp (name, "__aeabi_d2f", strlen ("__aeabi_d2f")) == 0) 483 return 1; 484 485 /* Internal functions related to thread-local storage. */ 486 if (strncmp (name, "__tls_get_addr", strlen ("__tls_get_addr")) == 0) 487 return 1; 488 if (strncmp (name, "__aeabi_read_tp", strlen ("__aeabi_read_tp")) == 0) 489 return 1; 490 } 491 else 492 { 493 /* If we run against a stripped glibc, we may be unable to identify 494 special functions by name. Check for one important case, 495 __aeabi_read_tp, by comparing the *code* against the default 496 implementation (this is hand-written ARM assembler in glibc). */ 497 498 if (!is_thumb 499 && read_memory_unsigned_integer (pc, 4, byte_order_for_code) 500 == 0xe3e00a0f /* mov r0, #0xffff0fff */ 501 && read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code) 502 == 0xe240f01f) /* sub pc, r0, #31 */ 503 return 1; 504 } 505 506 return 0; 507} 508 509/* Support routines for instruction parsing. */ 510#define submask(x) ((1L << ((x) + 1)) - 1) 511#define bit(obj,st) (((obj) >> (st)) & 1) 512#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st))) 513#define sbits(obj,st,fn) \ 514 ((long) (bits(obj,st,fn) | ((long) bit(obj,fn) * ~ submask (fn - st)))) 515#define BranchDest(addr,instr) \ 516 ((CORE_ADDR) (((long) (addr)) + 8 + (sbits (instr, 0, 23) << 2))) 517 518/* Extract the immediate from instruction movw/movt of encoding T. INSN1 is 519 the first 16-bit of instruction, and INSN2 is the second 16-bit of 520 instruction. */ 521#define EXTRACT_MOVW_MOVT_IMM_T(insn1, insn2) \ 522 ((bits ((insn1), 0, 3) << 12) \ 523 | (bits ((insn1), 10, 10) << 11) \ 524 | (bits ((insn2), 12, 14) << 8) \ 525 | bits ((insn2), 0, 7)) 526 527/* Extract the immediate from instruction movw/movt of encoding A. INSN is 528 the 32-bit instruction. */ 529#define EXTRACT_MOVW_MOVT_IMM_A(insn) \ 530 ((bits ((insn), 16, 19) << 12) \ 531 | bits ((insn), 0, 11)) 532 533/* Decode immediate value; implements ThumbExpandImmediate pseudo-op. */ 534 535static unsigned int 536thumb_expand_immediate (unsigned int imm) 537{ 538 unsigned int count = imm >> 7; 539 540 if (count < 8) 541 switch (count / 2) 542 { 543 case 0: 544 return imm & 0xff; 545 case 1: 546 return (imm & 0xff) | ((imm & 0xff) << 16); 547 case 2: 548 return ((imm & 0xff) << 8) | ((imm & 0xff) << 24); 549 case 3: 550 return (imm & 0xff) | ((imm & 0xff) << 8) 551 | ((imm & 0xff) << 16) | ((imm & 0xff) << 24); 552 } 553 554 return (0x80 | (imm & 0x7f)) << (32 - count); 555} 556 557/* Return 1 if the 16-bit Thumb instruction INST might change 558 control flow, 0 otherwise. */ 559 560static int 561thumb_instruction_changes_pc (unsigned short inst) 562{ 563 if ((inst & 0xff00) == 0xbd00) /* pop {rlist, pc} */ 564 return 1; 565 566 if ((inst & 0xf000) == 0xd000) /* conditional branch */ 567 return 1; 568 569 if ((inst & 0xf800) == 0xe000) /* unconditional branch */ 570 return 1; 571 572 if ((inst & 0xff00) == 0x4700) /* bx REG, blx REG */ 573 return 1; 574 575 if ((inst & 0xff87) == 0x4687) /* mov pc, REG */ 576 return 1; 577 578 if ((inst & 0xf500) == 0xb100) /* CBNZ or CBZ. */ 579 return 1; 580 581 return 0; 582} 583 584/* Return 1 if the 32-bit Thumb instruction in INST1 and INST2 585 might change control flow, 0 otherwise. */ 586 587static int 588thumb2_instruction_changes_pc (unsigned short inst1, unsigned short inst2) 589{ 590 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000) 591 { 592 /* Branches and miscellaneous control instructions. */ 593 594 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000) 595 { 596 /* B, BL, BLX. */ 597 return 1; 598 } 599 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00) 600 { 601 /* SUBS PC, LR, #imm8. */ 602 return 1; 603 } 604 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380) 605 { 606 /* Conditional branch. */ 607 return 1; 608 } 609 610 return 0; 611 } 612 613 if ((inst1 & 0xfe50) == 0xe810) 614 { 615 /* Load multiple or RFE. */ 616 617 if (bit (inst1, 7) && !bit (inst1, 8)) 618 { 619 /* LDMIA or POP */ 620 if (bit (inst2, 15)) 621 return 1; 622 } 623 else if (!bit (inst1, 7) && bit (inst1, 8)) 624 { 625 /* LDMDB */ 626 if (bit (inst2, 15)) 627 return 1; 628 } 629 else if (bit (inst1, 7) && bit (inst1, 8)) 630 { 631 /* RFEIA */ 632 return 1; 633 } 634 else if (!bit (inst1, 7) && !bit (inst1, 8)) 635 { 636 /* RFEDB */ 637 return 1; 638 } 639 640 return 0; 641 } 642 643 if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00) 644 { 645 /* MOV PC or MOVS PC. */ 646 return 1; 647 } 648 649 if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000) 650 { 651 /* LDR PC. */ 652 if (bits (inst1, 0, 3) == 15) 653 return 1; 654 if (bit (inst1, 7)) 655 return 1; 656 if (bit (inst2, 11)) 657 return 1; 658 if ((inst2 & 0x0fc0) == 0x0000) 659 return 1; 660 661 return 0; 662 } 663 664 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000) 665 { 666 /* TBB. */ 667 return 1; 668 } 669 670 if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010) 671 { 672 /* TBH. */ 673 return 1; 674 } 675 676 return 0; 677} 678 679/* Analyze a Thumb prologue, looking for a recognizable stack frame 680 and frame pointer. Scan until we encounter a store that could 681 clobber the stack frame unexpectedly, or an unknown instruction. 682 Return the last address which is definitely safe to skip for an 683 initial breakpoint. */ 684 685static CORE_ADDR 686thumb_analyze_prologue (struct gdbarch *gdbarch, 687 CORE_ADDR start, CORE_ADDR limit, 688 struct arm_prologue_cache *cache) 689{ 690 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 691 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 692 int i; 693 pv_t regs[16]; 694 struct pv_area *stack; 695 struct cleanup *back_to; 696 CORE_ADDR offset; 697 CORE_ADDR unrecognized_pc = 0; 698 699 for (i = 0; i < 16; i++) 700 regs[i] = pv_register (i, 0); 701 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch)); 702 back_to = make_cleanup_free_pv_area (stack); 703 704 while (start < limit) 705 { 706 unsigned short insn; 707 708 insn = read_memory_unsigned_integer (start, 2, byte_order_for_code); 709 710 if ((insn & 0xfe00) == 0xb400) /* push { rlist } */ 711 { 712 int regno; 713 int mask; 714 715 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM])) 716 break; 717 718 /* Bits 0-7 contain a mask for registers R0-R7. Bit 8 says 719 whether to save LR (R14). */ 720 mask = (insn & 0xff) | ((insn & 0x100) << 6); 721 722 /* Calculate offsets of saved R0-R7 and LR. */ 723 for (regno = ARM_LR_REGNUM; regno >= 0; regno--) 724 if (mask & (1 << regno)) 725 { 726 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], 727 -4); 728 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]); 729 } 730 } 731 else if ((insn & 0xff00) == 0xb000) /* add sp, #simm OR 732 sub sp, #simm */ 733 { 734 offset = (insn & 0x7f) << 2; /* get scaled offset */ 735 if (insn & 0x80) /* Check for SUB. */ 736 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], 737 -offset); 738 else 739 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], 740 offset); 741 } 742 else if ((insn & 0xf800) == 0xa800) /* add Rd, sp, #imm */ 743 regs[bits (insn, 8, 10)] = pv_add_constant (regs[ARM_SP_REGNUM], 744 (insn & 0xff) << 2); 745 else if ((insn & 0xfe00) == 0x1c00 /* add Rd, Rn, #imm */ 746 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)) 747 regs[bits (insn, 0, 2)] = pv_add_constant (regs[bits (insn, 3, 5)], 748 bits (insn, 6, 8)); 749 else if ((insn & 0xf800) == 0x3000 /* add Rd, #imm */ 750 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM)) 751 regs[bits (insn, 8, 10)] = pv_add_constant (regs[bits (insn, 8, 10)], 752 bits (insn, 0, 7)); 753 else if ((insn & 0xfe00) == 0x1800 /* add Rd, Rn, Rm */ 754 && pv_is_register (regs[bits (insn, 6, 8)], ARM_SP_REGNUM) 755 && pv_is_constant (regs[bits (insn, 3, 5)])) 756 regs[bits (insn, 0, 2)] = pv_add (regs[bits (insn, 3, 5)], 757 regs[bits (insn, 6, 8)]); 758 else if ((insn & 0xff00) == 0x4400 /* add Rd, Rm */ 759 && pv_is_constant (regs[bits (insn, 3, 6)])) 760 { 761 int rd = (bit (insn, 7) << 3) + bits (insn, 0, 2); 762 int rm = bits (insn, 3, 6); 763 regs[rd] = pv_add (regs[rd], regs[rm]); 764 } 765 else if ((insn & 0xff00) == 0x4600) /* mov hi, lo or mov lo, hi */ 766 { 767 int dst_reg = (insn & 0x7) + ((insn & 0x80) >> 4); 768 int src_reg = (insn & 0x78) >> 3; 769 regs[dst_reg] = regs[src_reg]; 770 } 771 else if ((insn & 0xf800) == 0x9000) /* str rd, [sp, #off] */ 772 { 773 /* Handle stores to the stack. Normally pushes are used, 774 but with GCC -mtpcs-frame, there may be other stores 775 in the prologue to create the frame. */ 776 int regno = (insn >> 8) & 0x7; 777 pv_t addr; 778 779 offset = (insn & 0xff) << 2; 780 addr = pv_add_constant (regs[ARM_SP_REGNUM], offset); 781 782 if (pv_area_store_would_trash (stack, addr)) 783 break; 784 785 pv_area_store (stack, addr, 4, regs[regno]); 786 } 787 else if ((insn & 0xf800) == 0x6000) /* str rd, [rn, #off] */ 788 { 789 int rd = bits (insn, 0, 2); 790 int rn = bits (insn, 3, 5); 791 pv_t addr; 792 793 offset = bits (insn, 6, 10) << 2; 794 addr = pv_add_constant (regs[rn], offset); 795 796 if (pv_area_store_would_trash (stack, addr)) 797 break; 798 799 pv_area_store (stack, addr, 4, regs[rd]); 800 } 801 else if (((insn & 0xf800) == 0x7000 /* strb Rd, [Rn, #off] */ 802 || (insn & 0xf800) == 0x8000) /* strh Rd, [Rn, #off] */ 803 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM)) 804 /* Ignore stores of argument registers to the stack. */ 805 ; 806 else if ((insn & 0xf800) == 0xc800 /* ldmia Rn!, { registers } */ 807 && pv_is_register (regs[bits (insn, 8, 10)], ARM_SP_REGNUM)) 808 /* Ignore block loads from the stack, potentially copying 809 parameters from memory. */ 810 ; 811 else if ((insn & 0xf800) == 0x9800 /* ldr Rd, [Rn, #immed] */ 812 || ((insn & 0xf800) == 0x6800 /* ldr Rd, [sp, #immed] */ 813 && pv_is_register (regs[bits (insn, 3, 5)], ARM_SP_REGNUM))) 814 /* Similarly ignore single loads from the stack. */ 815 ; 816 else if ((insn & 0xffc0) == 0x0000 /* lsls Rd, Rm, #0 */ 817 || (insn & 0xffc0) == 0x1c00) /* add Rd, Rn, #0 */ 818 /* Skip register copies, i.e. saves to another register 819 instead of the stack. */ 820 ; 821 else if ((insn & 0xf800) == 0x2000) /* movs Rd, #imm */ 822 /* Recognize constant loads; even with small stacks these are necessary 823 on Thumb. */ 824 regs[bits (insn, 8, 10)] = pv_constant (bits (insn, 0, 7)); 825 else if ((insn & 0xf800) == 0x4800) /* ldr Rd, [pc, #imm] */ 826 { 827 /* Constant pool loads, for the same reason. */ 828 unsigned int constant; 829 CORE_ADDR loc; 830 831 loc = start + 4 + bits (insn, 0, 7) * 4; 832 constant = read_memory_unsigned_integer (loc, 4, byte_order); 833 regs[bits (insn, 8, 10)] = pv_constant (constant); 834 } 835 else if ((insn & 0xe000) == 0xe000) 836 { 837 unsigned short inst2; 838 839 inst2 = read_memory_unsigned_integer (start + 2, 2, 840 byte_order_for_code); 841 842 if ((insn & 0xf800) == 0xf000 && (inst2 & 0xe800) == 0xe800) 843 { 844 /* BL, BLX. Allow some special function calls when 845 skipping the prologue; GCC generates these before 846 storing arguments to the stack. */ 847 CORE_ADDR nextpc; 848 int j1, j2, imm1, imm2; 849 850 imm1 = sbits (insn, 0, 10); 851 imm2 = bits (inst2, 0, 10); 852 j1 = bit (inst2, 13); 853 j2 = bit (inst2, 11); 854 855 offset = ((imm1 << 12) + (imm2 << 1)); 856 offset ^= ((!j2) << 22) | ((!j1) << 23); 857 858 nextpc = start + 4 + offset; 859 /* For BLX make sure to clear the low bits. */ 860 if (bit (inst2, 12) == 0) 861 nextpc = nextpc & 0xfffffffc; 862 863 if (!skip_prologue_function (gdbarch, nextpc, 864 bit (inst2, 12) != 0)) 865 break; 866 } 867 868 else if ((insn & 0xffd0) == 0xe900 /* stmdb Rn{!}, 869 { registers } */ 870 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM)) 871 { 872 pv_t addr = regs[bits (insn, 0, 3)]; 873 int regno; 874 875 if (pv_area_store_would_trash (stack, addr)) 876 break; 877 878 /* Calculate offsets of saved registers. */ 879 for (regno = ARM_LR_REGNUM; regno >= 0; regno--) 880 if (inst2 & (1 << regno)) 881 { 882 addr = pv_add_constant (addr, -4); 883 pv_area_store (stack, addr, 4, regs[regno]); 884 } 885 886 if (insn & 0x0020) 887 regs[bits (insn, 0, 3)] = addr; 888 } 889 890 else if ((insn & 0xff50) == 0xe940 /* strd Rt, Rt2, 891 [Rn, #+/-imm]{!} */ 892 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM)) 893 { 894 int regno1 = bits (inst2, 12, 15); 895 int regno2 = bits (inst2, 8, 11); 896 pv_t addr = regs[bits (insn, 0, 3)]; 897 898 offset = inst2 & 0xff; 899 if (insn & 0x0080) 900 addr = pv_add_constant (addr, offset); 901 else 902 addr = pv_add_constant (addr, -offset); 903 904 if (pv_area_store_would_trash (stack, addr)) 905 break; 906 907 pv_area_store (stack, addr, 4, regs[regno1]); 908 pv_area_store (stack, pv_add_constant (addr, 4), 909 4, regs[regno2]); 910 911 if (insn & 0x0020) 912 regs[bits (insn, 0, 3)] = addr; 913 } 914 915 else if ((insn & 0xfff0) == 0xf8c0 /* str Rt,[Rn,+/-#imm]{!} */ 916 && (inst2 & 0x0c00) == 0x0c00 917 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM)) 918 { 919 int regno = bits (inst2, 12, 15); 920 pv_t addr = regs[bits (insn, 0, 3)]; 921 922 offset = inst2 & 0xff; 923 if (inst2 & 0x0200) 924 addr = pv_add_constant (addr, offset); 925 else 926 addr = pv_add_constant (addr, -offset); 927 928 if (pv_area_store_would_trash (stack, addr)) 929 break; 930 931 pv_area_store (stack, addr, 4, regs[regno]); 932 933 if (inst2 & 0x0100) 934 regs[bits (insn, 0, 3)] = addr; 935 } 936 937 else if ((insn & 0xfff0) == 0xf8c0 /* str.w Rt,[Rn,#imm] */ 938 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM)) 939 { 940 int regno = bits (inst2, 12, 15); 941 pv_t addr; 942 943 offset = inst2 & 0xfff; 944 addr = pv_add_constant (regs[bits (insn, 0, 3)], offset); 945 946 if (pv_area_store_would_trash (stack, addr)) 947 break; 948 949 pv_area_store (stack, addr, 4, regs[regno]); 950 } 951 952 else if ((insn & 0xffd0) == 0xf880 /* str{bh}.w Rt,[Rn,#imm] */ 953 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM)) 954 /* Ignore stores of argument registers to the stack. */ 955 ; 956 957 else if ((insn & 0xffd0) == 0xf800 /* str{bh} Rt,[Rn,#+/-imm] */ 958 && (inst2 & 0x0d00) == 0x0c00 959 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM)) 960 /* Ignore stores of argument registers to the stack. */ 961 ; 962 963 else if ((insn & 0xffd0) == 0xe890 /* ldmia Rn[!], 964 { registers } */ 965 && (inst2 & 0x8000) == 0x0000 966 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM)) 967 /* Ignore block loads from the stack, potentially copying 968 parameters from memory. */ 969 ; 970 971 else if ((insn & 0xffb0) == 0xe950 /* ldrd Rt, Rt2, 972 [Rn, #+/-imm] */ 973 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM)) 974 /* Similarly ignore dual loads from the stack. */ 975 ; 976 977 else if ((insn & 0xfff0) == 0xf850 /* ldr Rt,[Rn,#+/-imm] */ 978 && (inst2 & 0x0d00) == 0x0c00 979 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM)) 980 /* Similarly ignore single loads from the stack. */ 981 ; 982 983 else if ((insn & 0xfff0) == 0xf8d0 /* ldr.w Rt,[Rn,#imm] */ 984 && pv_is_register (regs[bits (insn, 0, 3)], ARM_SP_REGNUM)) 985 /* Similarly ignore single loads from the stack. */ 986 ; 987 988 else if ((insn & 0xfbf0) == 0xf100 /* add.w Rd, Rn, #imm */ 989 && (inst2 & 0x8000) == 0x0000) 990 { 991 unsigned int imm = ((bits (insn, 10, 10) << 11) 992 | (bits (inst2, 12, 14) << 8) 993 | bits (inst2, 0, 7)); 994 995 regs[bits (inst2, 8, 11)] 996 = pv_add_constant (regs[bits (insn, 0, 3)], 997 thumb_expand_immediate (imm)); 998 } 999 1000 else if ((insn & 0xfbf0) == 0xf200 /* addw Rd, Rn, #imm */ 1001 && (inst2 & 0x8000) == 0x0000) 1002 { 1003 unsigned int imm = ((bits (insn, 10, 10) << 11) 1004 | (bits (inst2, 12, 14) << 8) 1005 | bits (inst2, 0, 7)); 1006 1007 regs[bits (inst2, 8, 11)] 1008 = pv_add_constant (regs[bits (insn, 0, 3)], imm); 1009 } 1010 1011 else if ((insn & 0xfbf0) == 0xf1a0 /* sub.w Rd, Rn, #imm */ 1012 && (inst2 & 0x8000) == 0x0000) 1013 { 1014 unsigned int imm = ((bits (insn, 10, 10) << 11) 1015 | (bits (inst2, 12, 14) << 8) 1016 | bits (inst2, 0, 7)); 1017 1018 regs[bits (inst2, 8, 11)] 1019 = pv_add_constant (regs[bits (insn, 0, 3)], 1020 - (CORE_ADDR) thumb_expand_immediate (imm)); 1021 } 1022 1023 else if ((insn & 0xfbf0) == 0xf2a0 /* subw Rd, Rn, #imm */ 1024 && (inst2 & 0x8000) == 0x0000) 1025 { 1026 unsigned int imm = ((bits (insn, 10, 10) << 11) 1027 | (bits (inst2, 12, 14) << 8) 1028 | bits (inst2, 0, 7)); 1029 1030 regs[bits (inst2, 8, 11)] 1031 = pv_add_constant (regs[bits (insn, 0, 3)], - (CORE_ADDR) imm); 1032 } 1033 1034 else if ((insn & 0xfbff) == 0xf04f) /* mov.w Rd, #const */ 1035 { 1036 unsigned int imm = ((bits (insn, 10, 10) << 11) 1037 | (bits (inst2, 12, 14) << 8) 1038 | bits (inst2, 0, 7)); 1039 1040 regs[bits (inst2, 8, 11)] 1041 = pv_constant (thumb_expand_immediate (imm)); 1042 } 1043 1044 else if ((insn & 0xfbf0) == 0xf240) /* movw Rd, #const */ 1045 { 1046 unsigned int imm 1047 = EXTRACT_MOVW_MOVT_IMM_T (insn, inst2); 1048 1049 regs[bits (inst2, 8, 11)] = pv_constant (imm); 1050 } 1051 1052 else if (insn == 0xea5f /* mov.w Rd,Rm */ 1053 && (inst2 & 0xf0f0) == 0) 1054 { 1055 int dst_reg = (inst2 & 0x0f00) >> 8; 1056 int src_reg = inst2 & 0xf; 1057 regs[dst_reg] = regs[src_reg]; 1058 } 1059 1060 else if ((insn & 0xff7f) == 0xf85f) /* ldr.w Rt,<label> */ 1061 { 1062 /* Constant pool loads. */ 1063 unsigned int constant; 1064 CORE_ADDR loc; 1065 1066 offset = bits (insn, 0, 11); 1067 if (insn & 0x0080) 1068 loc = start + 4 + offset; 1069 else 1070 loc = start + 4 - offset; 1071 1072 constant = read_memory_unsigned_integer (loc, 4, byte_order); 1073 regs[bits (inst2, 12, 15)] = pv_constant (constant); 1074 } 1075 1076 else if ((insn & 0xff7f) == 0xe95f) /* ldrd Rt,Rt2,<label> */ 1077 { 1078 /* Constant pool loads. */ 1079 unsigned int constant; 1080 CORE_ADDR loc; 1081 1082 offset = bits (insn, 0, 7) << 2; 1083 if (insn & 0x0080) 1084 loc = start + 4 + offset; 1085 else 1086 loc = start + 4 - offset; 1087 1088 constant = read_memory_unsigned_integer (loc, 4, byte_order); 1089 regs[bits (inst2, 12, 15)] = pv_constant (constant); 1090 1091 constant = read_memory_unsigned_integer (loc + 4, 4, byte_order); 1092 regs[bits (inst2, 8, 11)] = pv_constant (constant); 1093 } 1094 1095 else if (thumb2_instruction_changes_pc (insn, inst2)) 1096 { 1097 /* Don't scan past anything that might change control flow. */ 1098 break; 1099 } 1100 else 1101 { 1102 /* The optimizer might shove anything into the prologue, 1103 so we just skip what we don't recognize. */ 1104 unrecognized_pc = start; 1105 } 1106 1107 start += 2; 1108 } 1109 else if (thumb_instruction_changes_pc (insn)) 1110 { 1111 /* Don't scan past anything that might change control flow. */ 1112 break; 1113 } 1114 else 1115 { 1116 /* The optimizer might shove anything into the prologue, 1117 so we just skip what we don't recognize. */ 1118 unrecognized_pc = start; 1119 } 1120 1121 start += 2; 1122 } 1123 1124 if (arm_debug) 1125 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n", 1126 paddress (gdbarch, start)); 1127 1128 if (unrecognized_pc == 0) 1129 unrecognized_pc = start; 1130 1131 if (cache == NULL) 1132 { 1133 do_cleanups (back_to); 1134 return unrecognized_pc; 1135 } 1136 1137 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM)) 1138 { 1139 /* Frame pointer is fp. Frame size is constant. */ 1140 cache->framereg = ARM_FP_REGNUM; 1141 cache->framesize = -regs[ARM_FP_REGNUM].k; 1142 } 1143 else if (pv_is_register (regs[THUMB_FP_REGNUM], ARM_SP_REGNUM)) 1144 { 1145 /* Frame pointer is r7. Frame size is constant. */ 1146 cache->framereg = THUMB_FP_REGNUM; 1147 cache->framesize = -regs[THUMB_FP_REGNUM].k; 1148 } 1149 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM)) 1150 { 1151 /* Try the stack pointer... this is a bit desperate. */ 1152 cache->framereg = ARM_SP_REGNUM; 1153 cache->framesize = -regs[ARM_SP_REGNUM].k; 1154 } 1155 else 1156 { 1157 /* We're just out of luck. We don't know where the frame is. */ 1158 cache->framereg = -1; 1159 cache->framesize = 0; 1160 } 1161 1162 for (i = 0; i < 16; i++) 1163 if (pv_area_find_reg (stack, gdbarch, i, &offset)) 1164 cache->saved_regs[i].addr = offset; 1165 1166 do_cleanups (back_to); 1167 return unrecognized_pc; 1168} 1169 1170 1171/* Try to analyze the instructions starting from PC, which load symbol 1172 __stack_chk_guard. Return the address of instruction after loading this 1173 symbol, set the dest register number to *BASEREG, and set the size of 1174 instructions for loading symbol in OFFSET. Return 0 if instructions are 1175 not recognized. */ 1176 1177static CORE_ADDR 1178arm_analyze_load_stack_chk_guard(CORE_ADDR pc, struct gdbarch *gdbarch, 1179 unsigned int *destreg, int *offset) 1180{ 1181 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 1182 int is_thumb = arm_pc_is_thumb (gdbarch, pc); 1183 unsigned int low, high, address; 1184 1185 address = 0; 1186 if (is_thumb) 1187 { 1188 unsigned short insn1 1189 = read_memory_unsigned_integer (pc, 2, byte_order_for_code); 1190 1191 if ((insn1 & 0xf800) == 0x4800) /* ldr Rd, #immed */ 1192 { 1193 *destreg = bits (insn1, 8, 10); 1194 *offset = 2; 1195 address = bits (insn1, 0, 7); 1196 } 1197 else if ((insn1 & 0xfbf0) == 0xf240) /* movw Rd, #const */ 1198 { 1199 unsigned short insn2 1200 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code); 1201 1202 low = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2); 1203 1204 insn1 1205 = read_memory_unsigned_integer (pc + 4, 2, byte_order_for_code); 1206 insn2 1207 = read_memory_unsigned_integer (pc + 6, 2, byte_order_for_code); 1208 1209 /* movt Rd, #const */ 1210 if ((insn1 & 0xfbc0) == 0xf2c0) 1211 { 1212 high = EXTRACT_MOVW_MOVT_IMM_T (insn1, insn2); 1213 *destreg = bits (insn2, 8, 11); 1214 *offset = 8; 1215 address = (high << 16 | low); 1216 } 1217 } 1218 } 1219 else 1220 { 1221 unsigned int insn 1222 = read_memory_unsigned_integer (pc, 4, byte_order_for_code); 1223 1224 if ((insn & 0x0e5f0000) == 0x041f0000) /* ldr Rd, #immed */ 1225 { 1226 address = bits (insn, 0, 11); 1227 *destreg = bits (insn, 12, 15); 1228 *offset = 4; 1229 } 1230 else if ((insn & 0x0ff00000) == 0x03000000) /* movw Rd, #const */ 1231 { 1232 low = EXTRACT_MOVW_MOVT_IMM_A (insn); 1233 1234 insn 1235 = read_memory_unsigned_integer (pc + 4, 4, byte_order_for_code); 1236 1237 if ((insn & 0x0ff00000) == 0x03400000) /* movt Rd, #const */ 1238 { 1239 high = EXTRACT_MOVW_MOVT_IMM_A (insn); 1240 *destreg = bits (insn, 12, 15); 1241 *offset = 8; 1242 address = (high << 16 | low); 1243 } 1244 } 1245 } 1246 1247 return address; 1248} 1249 1250/* Try to skip a sequence of instructions used for stack protector. If PC 1251 points to the first instruction of this sequence, return the address of 1252 first instruction after this sequence, otherwise, return original PC. 1253 1254 On arm, this sequence of instructions is composed of mainly three steps, 1255 Step 1: load symbol __stack_chk_guard, 1256 Step 2: load from address of __stack_chk_guard, 1257 Step 3: store it to somewhere else. 1258 1259 Usually, instructions on step 2 and step 3 are the same on various ARM 1260 architectures. On step 2, it is one instruction 'ldr Rx, [Rn, #0]', and 1261 on step 3, it is also one instruction 'str Rx, [r7, #immd]'. However, 1262 instructions in step 1 vary from different ARM architectures. On ARMv7, 1263 they are, 1264 1265 movw Rn, #:lower16:__stack_chk_guard 1266 movt Rn, #:upper16:__stack_chk_guard 1267 1268 On ARMv5t, it is, 1269 1270 ldr Rn, .Label 1271 .... 1272 .Lable: 1273 .word __stack_chk_guard 1274 1275 Since ldr/str is a very popular instruction, we can't use them as 1276 'fingerprint' or 'signature' of stack protector sequence. Here we choose 1277 sequence {movw/movt, ldr}/ldr/str plus symbol __stack_chk_guard, if not 1278 stripped, as the 'fingerprint' of a stack protector cdoe sequence. */ 1279 1280static CORE_ADDR 1281arm_skip_stack_protector(CORE_ADDR pc, struct gdbarch *gdbarch) 1282{ 1283 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 1284 unsigned int address, basereg; 1285 struct minimal_symbol *stack_chk_guard; 1286 int offset; 1287 int is_thumb = arm_pc_is_thumb (gdbarch, pc); 1288 CORE_ADDR addr; 1289 1290 /* Try to parse the instructions in Step 1. */ 1291 addr = arm_analyze_load_stack_chk_guard (pc, gdbarch, 1292 &basereg, &offset); 1293 if (!addr) 1294 return pc; 1295 1296 stack_chk_guard = lookup_minimal_symbol_by_pc (addr); 1297 /* If name of symbol doesn't start with '__stack_chk_guard', this 1298 instruction sequence is not for stack protector. If symbol is 1299 removed, we conservatively think this sequence is for stack protector. */ 1300 if (stack_chk_guard 1301 && strncmp (SYMBOL_LINKAGE_NAME (stack_chk_guard), "__stack_chk_guard", 1302 strlen ("__stack_chk_guard")) != 0) 1303 return pc; 1304 1305 if (is_thumb) 1306 { 1307 unsigned int destreg; 1308 unsigned short insn 1309 = read_memory_unsigned_integer (pc + offset, 2, byte_order_for_code); 1310 1311 /* Step 2: ldr Rd, [Rn, #immed], encoding T1. */ 1312 if ((insn & 0xf800) != 0x6800) 1313 return pc; 1314 if (bits (insn, 3, 5) != basereg) 1315 return pc; 1316 destreg = bits (insn, 0, 2); 1317 1318 insn = read_memory_unsigned_integer (pc + offset + 2, 2, 1319 byte_order_for_code); 1320 /* Step 3: str Rd, [Rn, #immed], encoding T1. */ 1321 if ((insn & 0xf800) != 0x6000) 1322 return pc; 1323 if (destreg != bits (insn, 0, 2)) 1324 return pc; 1325 } 1326 else 1327 { 1328 unsigned int destreg; 1329 unsigned int insn 1330 = read_memory_unsigned_integer (pc + offset, 4, byte_order_for_code); 1331 1332 /* Step 2: ldr Rd, [Rn, #immed], encoding A1. */ 1333 if ((insn & 0x0e500000) != 0x04100000) 1334 return pc; 1335 if (bits (insn, 16, 19) != basereg) 1336 return pc; 1337 destreg = bits (insn, 12, 15); 1338 /* Step 3: str Rd, [Rn, #immed], encoding A1. */ 1339 insn = read_memory_unsigned_integer (pc + offset + 4, 1340 4, byte_order_for_code); 1341 if ((insn & 0x0e500000) != 0x04000000) 1342 return pc; 1343 if (bits (insn, 12, 15) != destreg) 1344 return pc; 1345 } 1346 /* The size of total two instructions ldr/str is 4 on Thumb-2, while 8 1347 on arm. */ 1348 if (is_thumb) 1349 return pc + offset + 4; 1350 else 1351 return pc + offset + 8; 1352} 1353 1354/* Advance the PC across any function entry prologue instructions to 1355 reach some "real" code. 1356 1357 The APCS (ARM Procedure Call Standard) defines the following 1358 prologue: 1359 1360 mov ip, sp 1361 [stmfd sp!, {a1,a2,a3,a4}] 1362 stmfd sp!, {...,fp,ip,lr,pc} 1363 [stfe f7, [sp, #-12]!] 1364 [stfe f6, [sp, #-12]!] 1365 [stfe f5, [sp, #-12]!] 1366 [stfe f4, [sp, #-12]!] 1367 sub fp, ip, #nn @@ nn == 20 or 4 depending on second insn. */ 1368 1369static CORE_ADDR 1370arm_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc) 1371{ 1372 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 1373 unsigned long inst; 1374 CORE_ADDR skip_pc; 1375 CORE_ADDR func_addr, limit_pc; 1376 struct symtab_and_line sal; 1377 1378 /* See if we can determine the end of the prologue via the symbol table. 1379 If so, then return either PC, or the PC after the prologue, whichever 1380 is greater. */ 1381 if (find_pc_partial_function (pc, NULL, &func_addr, NULL)) 1382 { 1383 CORE_ADDR post_prologue_pc 1384 = skip_prologue_using_sal (gdbarch, func_addr); 1385 struct symtab *s = find_pc_symtab (func_addr); 1386 1387 if (post_prologue_pc) 1388 post_prologue_pc 1389 = arm_skip_stack_protector (post_prologue_pc, gdbarch); 1390 1391 1392 /* GCC always emits a line note before the prologue and another 1393 one after, even if the two are at the same address or on the 1394 same line. Take advantage of this so that we do not need to 1395 know every instruction that might appear in the prologue. We 1396 will have producer information for most binaries; if it is 1397 missing (e.g. for -gstabs), assuming the GNU tools. */ 1398 if (post_prologue_pc 1399 && (s == NULL 1400 || s->producer == NULL 1401 || strncmp (s->producer, "GNU ", sizeof ("GNU ") - 1) == 0)) 1402 return post_prologue_pc; 1403 1404 if (post_prologue_pc != 0) 1405 { 1406 CORE_ADDR analyzed_limit; 1407 1408 /* For non-GCC compilers, make sure the entire line is an 1409 acceptable prologue; GDB will round this function's 1410 return value up to the end of the following line so we 1411 can not skip just part of a line (and we do not want to). 1412 1413 RealView does not treat the prologue specially, but does 1414 associate prologue code with the opening brace; so this 1415 lets us skip the first line if we think it is the opening 1416 brace. */ 1417 if (arm_pc_is_thumb (gdbarch, func_addr)) 1418 analyzed_limit = thumb_analyze_prologue (gdbarch, func_addr, 1419 post_prologue_pc, NULL); 1420 else 1421 analyzed_limit = arm_analyze_prologue (gdbarch, func_addr, 1422 post_prologue_pc, NULL); 1423 1424 if (analyzed_limit != post_prologue_pc) 1425 return func_addr; 1426 1427 return post_prologue_pc; 1428 } 1429 } 1430 1431 /* Can't determine prologue from the symbol table, need to examine 1432 instructions. */ 1433 1434 /* Find an upper limit on the function prologue using the debug 1435 information. If the debug information could not be used to provide 1436 that bound, then use an arbitrary large number as the upper bound. */ 1437 /* Like arm_scan_prologue, stop no later than pc + 64. */ 1438 limit_pc = skip_prologue_using_sal (gdbarch, pc); 1439 if (limit_pc == 0) 1440 limit_pc = pc + 64; /* Magic. */ 1441 1442 1443 /* Check if this is Thumb code. */ 1444 if (arm_pc_is_thumb (gdbarch, pc)) 1445 return thumb_analyze_prologue (gdbarch, pc, limit_pc, NULL); 1446 1447 for (skip_pc = pc; skip_pc < limit_pc; skip_pc += 4) 1448 { 1449 inst = read_memory_unsigned_integer (skip_pc, 4, byte_order_for_code); 1450 1451 /* "mov ip, sp" is no longer a required part of the prologue. */ 1452 if (inst == 0xe1a0c00d) /* mov ip, sp */ 1453 continue; 1454 1455 if ((inst & 0xfffff000) == 0xe28dc000) /* add ip, sp #n */ 1456 continue; 1457 1458 if ((inst & 0xfffff000) == 0xe24dc000) /* sub ip, sp #n */ 1459 continue; 1460 1461 /* Some prologues begin with "str lr, [sp, #-4]!". */ 1462 if (inst == 0xe52de004) /* str lr, [sp, #-4]! */ 1463 continue; 1464 1465 if ((inst & 0xfffffff0) == 0xe92d0000) /* stmfd sp!,{a1,a2,a3,a4} */ 1466 continue; 1467 1468 if ((inst & 0xfffff800) == 0xe92dd800) /* stmfd sp!,{fp,ip,lr,pc} */ 1469 continue; 1470 1471 /* Any insns after this point may float into the code, if it makes 1472 for better instruction scheduling, so we skip them only if we 1473 find them, but still consider the function to be frame-ful. */ 1474 1475 /* We may have either one sfmfd instruction here, or several stfe 1476 insns, depending on the version of floating point code we 1477 support. */ 1478 if ((inst & 0xffbf0fff) == 0xec2d0200) /* sfmfd fn, <cnt>, [sp]! */ 1479 continue; 1480 1481 if ((inst & 0xffff8fff) == 0xed6d0103) /* stfe fn, [sp, #-12]! */ 1482 continue; 1483 1484 if ((inst & 0xfffff000) == 0xe24cb000) /* sub fp, ip, #nn */ 1485 continue; 1486 1487 if ((inst & 0xfffff000) == 0xe24dd000) /* sub sp, sp, #nn */ 1488 continue; 1489 1490 if ((inst & 0xffffc000) == 0xe54b0000 /* strb r(0123),[r11,#-nn] */ 1491 || (inst & 0xffffc0f0) == 0xe14b00b0 /* strh r(0123),[r11,#-nn] */ 1492 || (inst & 0xffffc000) == 0xe50b0000) /* str r(0123),[r11,#-nn] */ 1493 continue; 1494 1495 if ((inst & 0xffffc000) == 0xe5cd0000 /* strb r(0123),[sp,#nn] */ 1496 || (inst & 0xffffc0f0) == 0xe1cd00b0 /* strh r(0123),[sp,#nn] */ 1497 || (inst & 0xffffc000) == 0xe58d0000) /* str r(0123),[sp,#nn] */ 1498 continue; 1499 1500 /* Un-recognized instruction; stop scanning. */ 1501 break; 1502 } 1503 1504 return skip_pc; /* End of prologue. */ 1505} 1506 1507/* *INDENT-OFF* */ 1508/* Function: thumb_scan_prologue (helper function for arm_scan_prologue) 1509 This function decodes a Thumb function prologue to determine: 1510 1) the size of the stack frame 1511 2) which registers are saved on it 1512 3) the offsets of saved regs 1513 4) the offset from the stack pointer to the frame pointer 1514 1515 A typical Thumb function prologue would create this stack frame 1516 (offsets relative to FP) 1517 old SP -> 24 stack parameters 1518 20 LR 1519 16 R7 1520 R7 -> 0 local variables (16 bytes) 1521 SP -> -12 additional stack space (12 bytes) 1522 The frame size would thus be 36 bytes, and the frame offset would be 1523 12 bytes. The frame register is R7. 1524 1525 The comments for thumb_skip_prolog() describe the algorithm we use 1526 to detect the end of the prolog. */ 1527/* *INDENT-ON* */ 1528 1529static void 1530thumb_scan_prologue (struct gdbarch *gdbarch, CORE_ADDR prev_pc, 1531 CORE_ADDR block_addr, struct arm_prologue_cache *cache) 1532{ 1533 CORE_ADDR prologue_start; 1534 CORE_ADDR prologue_end; 1535 CORE_ADDR current_pc; 1536 1537 if (find_pc_partial_function (block_addr, NULL, &prologue_start, 1538 &prologue_end)) 1539 { 1540 /* See comment in arm_scan_prologue for an explanation of 1541 this heuristics. */ 1542 if (prologue_end > prologue_start + 64) 1543 { 1544 prologue_end = prologue_start + 64; 1545 } 1546 } 1547 else 1548 /* We're in the boondocks: we have no idea where the start of the 1549 function is. */ 1550 return; 1551 1552 prologue_end = min (prologue_end, prev_pc); 1553 1554 thumb_analyze_prologue (gdbarch, prologue_start, prologue_end, cache); 1555} 1556 1557/* Return 1 if THIS_INSTR might change control flow, 0 otherwise. */ 1558 1559static int 1560arm_instruction_changes_pc (uint32_t this_instr) 1561{ 1562 if (bits (this_instr, 28, 31) == INST_NV) 1563 /* Unconditional instructions. */ 1564 switch (bits (this_instr, 24, 27)) 1565 { 1566 case 0xa: 1567 case 0xb: 1568 /* Branch with Link and change to Thumb. */ 1569 return 1; 1570 case 0xc: 1571 case 0xd: 1572 case 0xe: 1573 /* Coprocessor register transfer. */ 1574 if (bits (this_instr, 12, 15) == 15) 1575 error (_("Invalid update to pc in instruction")); 1576 return 0; 1577 default: 1578 return 0; 1579 } 1580 else 1581 switch (bits (this_instr, 25, 27)) 1582 { 1583 case 0x0: 1584 if (bits (this_instr, 23, 24) == 2 && bit (this_instr, 20) == 0) 1585 { 1586 /* Multiplies and extra load/stores. */ 1587 if (bit (this_instr, 4) == 1 && bit (this_instr, 7) == 1) 1588 /* Neither multiplies nor extension load/stores are allowed 1589 to modify PC. */ 1590 return 0; 1591 1592 /* Otherwise, miscellaneous instructions. */ 1593 1594 /* BX <reg>, BXJ <reg>, BLX <reg> */ 1595 if (bits (this_instr, 4, 27) == 0x12fff1 1596 || bits (this_instr, 4, 27) == 0x12fff2 1597 || bits (this_instr, 4, 27) == 0x12fff3) 1598 return 1; 1599 1600 /* Other miscellaneous instructions are unpredictable if they 1601 modify PC. */ 1602 return 0; 1603 } 1604 /* Data processing instruction. Fall through. */ 1605 1606 case 0x1: 1607 if (bits (this_instr, 12, 15) == 15) 1608 return 1; 1609 else 1610 return 0; 1611 1612 case 0x2: 1613 case 0x3: 1614 /* Media instructions and architecturally undefined instructions. */ 1615 if (bits (this_instr, 25, 27) == 3 && bit (this_instr, 4) == 1) 1616 return 0; 1617 1618 /* Stores. */ 1619 if (bit (this_instr, 20) == 0) 1620 return 0; 1621 1622 /* Loads. */ 1623 if (bits (this_instr, 12, 15) == ARM_PC_REGNUM) 1624 return 1; 1625 else 1626 return 0; 1627 1628 case 0x4: 1629 /* Load/store multiple. */ 1630 if (bit (this_instr, 20) == 1 && bit (this_instr, 15) == 1) 1631 return 1; 1632 else 1633 return 0; 1634 1635 case 0x5: 1636 /* Branch and branch with link. */ 1637 return 1; 1638 1639 case 0x6: 1640 case 0x7: 1641 /* Coprocessor transfers or SWIs can not affect PC. */ 1642 return 0; 1643 1644 default: 1645 internal_error (__FILE__, __LINE__, _("bad value in switch")); 1646 } 1647} 1648 1649/* Analyze an ARM mode prologue starting at PROLOGUE_START and 1650 continuing no further than PROLOGUE_END. If CACHE is non-NULL, 1651 fill it in. Return the first address not recognized as a prologue 1652 instruction. 1653 1654 We recognize all the instructions typically found in ARM prologues, 1655 plus harmless instructions which can be skipped (either for analysis 1656 purposes, or a more restrictive set that can be skipped when finding 1657 the end of the prologue). */ 1658 1659static CORE_ADDR 1660arm_analyze_prologue (struct gdbarch *gdbarch, 1661 CORE_ADDR prologue_start, CORE_ADDR prologue_end, 1662 struct arm_prologue_cache *cache) 1663{ 1664 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 1665 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 1666 int regno; 1667 CORE_ADDR offset, current_pc; 1668 pv_t regs[ARM_FPS_REGNUM]; 1669 struct pv_area *stack; 1670 struct cleanup *back_to; 1671 int framereg, framesize; 1672 CORE_ADDR unrecognized_pc = 0; 1673 1674 /* Search the prologue looking for instructions that set up the 1675 frame pointer, adjust the stack pointer, and save registers. 1676 1677 Be careful, however, and if it doesn't look like a prologue, 1678 don't try to scan it. If, for instance, a frameless function 1679 begins with stmfd sp!, then we will tell ourselves there is 1680 a frame, which will confuse stack traceback, as well as "finish" 1681 and other operations that rely on a knowledge of the stack 1682 traceback. */ 1683 1684 for (regno = 0; regno < ARM_FPS_REGNUM; regno++) 1685 regs[regno] = pv_register (regno, 0); 1686 stack = make_pv_area (ARM_SP_REGNUM, gdbarch_addr_bit (gdbarch)); 1687 back_to = make_cleanup_free_pv_area (stack); 1688 1689 for (current_pc = prologue_start; 1690 current_pc < prologue_end; 1691 current_pc += 4) 1692 { 1693 unsigned int insn 1694 = read_memory_unsigned_integer (current_pc, 4, byte_order_for_code); 1695 1696 if (insn == 0xe1a0c00d) /* mov ip, sp */ 1697 { 1698 regs[ARM_IP_REGNUM] = regs[ARM_SP_REGNUM]; 1699 continue; 1700 } 1701 else if ((insn & 0xfff00000) == 0xe2800000 /* add Rd, Rn, #n */ 1702 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM)) 1703 { 1704 unsigned imm = insn & 0xff; /* immediate value */ 1705 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */ 1706 int rd = bits (insn, 12, 15); 1707 imm = (imm >> rot) | (imm << (32 - rot)); 1708 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], imm); 1709 continue; 1710 } 1711 else if ((insn & 0xfff00000) == 0xe2400000 /* sub Rd, Rn, #n */ 1712 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM)) 1713 { 1714 unsigned imm = insn & 0xff; /* immediate value */ 1715 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */ 1716 int rd = bits (insn, 12, 15); 1717 imm = (imm >> rot) | (imm << (32 - rot)); 1718 regs[rd] = pv_add_constant (regs[bits (insn, 16, 19)], -imm); 1719 continue; 1720 } 1721 else if ((insn & 0xffff0fff) == 0xe52d0004) /* str Rd, 1722 [sp, #-4]! */ 1723 { 1724 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM])) 1725 break; 1726 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -4); 1727 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, 1728 regs[bits (insn, 12, 15)]); 1729 continue; 1730 } 1731 else if ((insn & 0xffff0000) == 0xe92d0000) 1732 /* stmfd sp!, {..., fp, ip, lr, pc} 1733 or 1734 stmfd sp!, {a1, a2, a3, a4} */ 1735 { 1736 int mask = insn & 0xffff; 1737 1738 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM])) 1739 break; 1740 1741 /* Calculate offsets of saved registers. */ 1742 for (regno = ARM_PC_REGNUM; regno >= 0; regno--) 1743 if (mask & (1 << regno)) 1744 { 1745 regs[ARM_SP_REGNUM] 1746 = pv_add_constant (regs[ARM_SP_REGNUM], -4); 1747 pv_area_store (stack, regs[ARM_SP_REGNUM], 4, regs[regno]); 1748 } 1749 } 1750 else if ((insn & 0xffff0000) == 0xe54b0000 /* strb rx,[r11,#-n] */ 1751 || (insn & 0xffff00f0) == 0xe14b00b0 /* strh rx,[r11,#-n] */ 1752 || (insn & 0xffffc000) == 0xe50b0000) /* str rx,[r11,#-n] */ 1753 { 1754 /* No need to add this to saved_regs -- it's just an arg reg. */ 1755 continue; 1756 } 1757 else if ((insn & 0xffff0000) == 0xe5cd0000 /* strb rx,[sp,#n] */ 1758 || (insn & 0xffff00f0) == 0xe1cd00b0 /* strh rx,[sp,#n] */ 1759 || (insn & 0xffffc000) == 0xe58d0000) /* str rx,[sp,#n] */ 1760 { 1761 /* No need to add this to saved_regs -- it's just an arg reg. */ 1762 continue; 1763 } 1764 else if ((insn & 0xfff00000) == 0xe8800000 /* stm Rn, 1765 { registers } */ 1766 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM)) 1767 { 1768 /* No need to add this to saved_regs -- it's just arg regs. */ 1769 continue; 1770 } 1771 else if ((insn & 0xfffff000) == 0xe24cb000) /* sub fp, ip #n */ 1772 { 1773 unsigned imm = insn & 0xff; /* immediate value */ 1774 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */ 1775 imm = (imm >> rot) | (imm << (32 - rot)); 1776 regs[ARM_FP_REGNUM] = pv_add_constant (regs[ARM_IP_REGNUM], -imm); 1777 } 1778 else if ((insn & 0xfffff000) == 0xe24dd000) /* sub sp, sp #n */ 1779 { 1780 unsigned imm = insn & 0xff; /* immediate value */ 1781 unsigned rot = (insn & 0xf00) >> 7; /* rotate amount */ 1782 imm = (imm >> rot) | (imm << (32 - rot)); 1783 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -imm); 1784 } 1785 else if ((insn & 0xffff7fff) == 0xed6d0103 /* stfe f?, 1786 [sp, -#c]! */ 1787 && gdbarch_tdep (gdbarch)->have_fpa_registers) 1788 { 1789 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM])) 1790 break; 1791 1792 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12); 1793 regno = ARM_F0_REGNUM + ((insn >> 12) & 0x07); 1794 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, regs[regno]); 1795 } 1796 else if ((insn & 0xffbf0fff) == 0xec2d0200 /* sfmfd f0, 4, 1797 [sp!] */ 1798 && gdbarch_tdep (gdbarch)->have_fpa_registers) 1799 { 1800 int n_saved_fp_regs; 1801 unsigned int fp_start_reg, fp_bound_reg; 1802 1803 if (pv_area_store_would_trash (stack, regs[ARM_SP_REGNUM])) 1804 break; 1805 1806 if ((insn & 0x800) == 0x800) /* N0 is set */ 1807 { 1808 if ((insn & 0x40000) == 0x40000) /* N1 is set */ 1809 n_saved_fp_regs = 3; 1810 else 1811 n_saved_fp_regs = 1; 1812 } 1813 else 1814 { 1815 if ((insn & 0x40000) == 0x40000) /* N1 is set */ 1816 n_saved_fp_regs = 2; 1817 else 1818 n_saved_fp_regs = 4; 1819 } 1820 1821 fp_start_reg = ARM_F0_REGNUM + ((insn >> 12) & 0x7); 1822 fp_bound_reg = fp_start_reg + n_saved_fp_regs; 1823 for (; fp_start_reg < fp_bound_reg; fp_start_reg++) 1824 { 1825 regs[ARM_SP_REGNUM] = pv_add_constant (regs[ARM_SP_REGNUM], -12); 1826 pv_area_store (stack, regs[ARM_SP_REGNUM], 12, 1827 regs[fp_start_reg++]); 1828 } 1829 } 1830 else if ((insn & 0xff000000) == 0xeb000000 && cache == NULL) /* bl */ 1831 { 1832 /* Allow some special function calls when skipping the 1833 prologue; GCC generates these before storing arguments to 1834 the stack. */ 1835 CORE_ADDR dest = BranchDest (current_pc, insn); 1836 1837 if (skip_prologue_function (gdbarch, dest, 0)) 1838 continue; 1839 else 1840 break; 1841 } 1842 else if ((insn & 0xf0000000) != 0xe0000000) 1843 break; /* Condition not true, exit early. */ 1844 else if (arm_instruction_changes_pc (insn)) 1845 /* Don't scan past anything that might change control flow. */ 1846 break; 1847 else if ((insn & 0xfe500000) == 0xe8100000 /* ldm */ 1848 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM)) 1849 /* Ignore block loads from the stack, potentially copying 1850 parameters from memory. */ 1851 continue; 1852 else if ((insn & 0xfc500000) == 0xe4100000 1853 && pv_is_register (regs[bits (insn, 16, 19)], ARM_SP_REGNUM)) 1854 /* Similarly ignore single loads from the stack. */ 1855 continue; 1856 else if ((insn & 0xffff0ff0) == 0xe1a00000) 1857 /* MOV Rd, Rm. Skip register copies, i.e. saves to another 1858 register instead of the stack. */ 1859 continue; 1860 else 1861 { 1862 /* The optimizer might shove anything into the prologue, 1863 so we just skip what we don't recognize. */ 1864 unrecognized_pc = current_pc; 1865 continue; 1866 } 1867 } 1868 1869 if (unrecognized_pc == 0) 1870 unrecognized_pc = current_pc; 1871 1872 /* The frame size is just the distance from the frame register 1873 to the original stack pointer. */ 1874 if (pv_is_register (regs[ARM_FP_REGNUM], ARM_SP_REGNUM)) 1875 { 1876 /* Frame pointer is fp. */ 1877 framereg = ARM_FP_REGNUM; 1878 framesize = -regs[ARM_FP_REGNUM].k; 1879 } 1880 else if (pv_is_register (regs[ARM_SP_REGNUM], ARM_SP_REGNUM)) 1881 { 1882 /* Try the stack pointer... this is a bit desperate. */ 1883 framereg = ARM_SP_REGNUM; 1884 framesize = -regs[ARM_SP_REGNUM].k; 1885 } 1886 else 1887 { 1888 /* We're just out of luck. We don't know where the frame is. */ 1889 framereg = -1; 1890 framesize = 0; 1891 } 1892 1893 if (cache) 1894 { 1895 cache->framereg = framereg; 1896 cache->framesize = framesize; 1897 1898 for (regno = 0; regno < ARM_FPS_REGNUM; regno++) 1899 if (pv_area_find_reg (stack, gdbarch, regno, &offset)) 1900 cache->saved_regs[regno].addr = offset; 1901 } 1902 1903 if (arm_debug) 1904 fprintf_unfiltered (gdb_stdlog, "Prologue scan stopped at %s\n", 1905 paddress (gdbarch, unrecognized_pc)); 1906 1907 do_cleanups (back_to); 1908 return unrecognized_pc; 1909} 1910 1911static void 1912arm_scan_prologue (struct frame_info *this_frame, 1913 struct arm_prologue_cache *cache) 1914{ 1915 struct gdbarch *gdbarch = get_frame_arch (this_frame); 1916 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 1917 int regno; 1918 CORE_ADDR prologue_start, prologue_end, current_pc; 1919 CORE_ADDR prev_pc = get_frame_pc (this_frame); 1920 CORE_ADDR block_addr = get_frame_address_in_block (this_frame); 1921 pv_t regs[ARM_FPS_REGNUM]; 1922 struct pv_area *stack; 1923 struct cleanup *back_to; 1924 CORE_ADDR offset; 1925 1926 /* Assume there is no frame until proven otherwise. */ 1927 cache->framereg = ARM_SP_REGNUM; 1928 cache->framesize = 0; 1929 1930 /* Check for Thumb prologue. */ 1931 if (arm_frame_is_thumb (this_frame)) 1932 { 1933 thumb_scan_prologue (gdbarch, prev_pc, block_addr, cache); 1934 return; 1935 } 1936 1937 /* Find the function prologue. If we can't find the function in 1938 the symbol table, peek in the stack frame to find the PC. */ 1939 if (find_pc_partial_function (block_addr, NULL, &prologue_start, 1940 &prologue_end)) 1941 { 1942 /* One way to find the end of the prologue (which works well 1943 for unoptimized code) is to do the following: 1944 1945 struct symtab_and_line sal = find_pc_line (prologue_start, 0); 1946 1947 if (sal.line == 0) 1948 prologue_end = prev_pc; 1949 else if (sal.end < prologue_end) 1950 prologue_end = sal.end; 1951 1952 This mechanism is very accurate so long as the optimizer 1953 doesn't move any instructions from the function body into the 1954 prologue. If this happens, sal.end will be the last 1955 instruction in the first hunk of prologue code just before 1956 the first instruction that the scheduler has moved from 1957 the body to the prologue. 1958 1959 In order to make sure that we scan all of the prologue 1960 instructions, we use a slightly less accurate mechanism which 1961 may scan more than necessary. To help compensate for this 1962 lack of accuracy, the prologue scanning loop below contains 1963 several clauses which'll cause the loop to terminate early if 1964 an implausible prologue instruction is encountered. 1965 1966 The expression 1967 1968 prologue_start + 64 1969 1970 is a suitable endpoint since it accounts for the largest 1971 possible prologue plus up to five instructions inserted by 1972 the scheduler. */ 1973 1974 if (prologue_end > prologue_start + 64) 1975 { 1976 prologue_end = prologue_start + 64; /* See above. */ 1977 } 1978 } 1979 else 1980 { 1981 /* We have no symbol information. Our only option is to assume this 1982 function has a standard stack frame and the normal frame register. 1983 Then, we can find the value of our frame pointer on entrance to 1984 the callee (or at the present moment if this is the innermost frame). 1985 The value stored there should be the address of the stmfd + 8. */ 1986 CORE_ADDR frame_loc; 1987 LONGEST return_value; 1988 1989 frame_loc = get_frame_register_unsigned (this_frame, ARM_FP_REGNUM); 1990 if (!safe_read_memory_integer (frame_loc, 4, byte_order, &return_value)) 1991 return; 1992 else 1993 { 1994 prologue_start = gdbarch_addr_bits_remove 1995 (gdbarch, return_value) - 8; 1996 prologue_end = prologue_start + 64; /* See above. */ 1997 } 1998 } 1999 2000 if (prev_pc < prologue_end) 2001 prologue_end = prev_pc; 2002 2003 arm_analyze_prologue (gdbarch, prologue_start, prologue_end, cache); 2004} 2005 2006static struct arm_prologue_cache * 2007arm_make_prologue_cache (struct frame_info *this_frame) 2008{ 2009 int reg; 2010 struct arm_prologue_cache *cache; 2011 CORE_ADDR unwound_fp; 2012 2013 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache); 2014 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame); 2015 2016 arm_scan_prologue (this_frame, cache); 2017 2018 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg); 2019 if (unwound_fp == 0) 2020 return cache; 2021 2022 cache->prev_sp = unwound_fp + cache->framesize; 2023 2024 /* Calculate actual addresses of saved registers using offsets 2025 determined by arm_scan_prologue. */ 2026 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++) 2027 if (trad_frame_addr_p (cache->saved_regs, reg)) 2028 cache->saved_regs[reg].addr += cache->prev_sp; 2029 2030 return cache; 2031} 2032 2033/* Our frame ID for a normal frame is the current function's starting PC 2034 and the caller's SP when we were called. */ 2035 2036static void 2037arm_prologue_this_id (struct frame_info *this_frame, 2038 void **this_cache, 2039 struct frame_id *this_id) 2040{ 2041 struct arm_prologue_cache *cache; 2042 struct frame_id id; 2043 CORE_ADDR pc, func; 2044 2045 if (*this_cache == NULL) 2046 *this_cache = arm_make_prologue_cache (this_frame); 2047 cache = *this_cache; 2048 2049 /* This is meant to halt the backtrace at "_start". */ 2050 pc = get_frame_pc (this_frame); 2051 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc) 2052 return; 2053 2054 /* If we've hit a wall, stop. */ 2055 if (cache->prev_sp == 0) 2056 return; 2057 2058 /* Use function start address as part of the frame ID. If we cannot 2059 identify the start address (due to missing symbol information), 2060 fall back to just using the current PC. */ 2061 func = get_frame_func (this_frame); 2062 if (!func) 2063 func = pc; 2064 2065 id = frame_id_build (cache->prev_sp, func); 2066 *this_id = id; 2067} 2068 2069static struct value * 2070arm_prologue_prev_register (struct frame_info *this_frame, 2071 void **this_cache, 2072 int prev_regnum) 2073{ 2074 struct gdbarch *gdbarch = get_frame_arch (this_frame); 2075 struct arm_prologue_cache *cache; 2076 2077 if (*this_cache == NULL) 2078 *this_cache = arm_make_prologue_cache (this_frame); 2079 cache = *this_cache; 2080 2081 /* If we are asked to unwind the PC, then we need to return the LR 2082 instead. The prologue may save PC, but it will point into this 2083 frame's prologue, not the next frame's resume location. Also 2084 strip the saved T bit. A valid LR may have the low bit set, but 2085 a valid PC never does. */ 2086 if (prev_regnum == ARM_PC_REGNUM) 2087 { 2088 CORE_ADDR lr; 2089 2090 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM); 2091 return frame_unwind_got_constant (this_frame, prev_regnum, 2092 arm_addr_bits_remove (gdbarch, lr)); 2093 } 2094 2095 /* SP is generally not saved to the stack, but this frame is 2096 identified by the next frame's stack pointer at the time of the call. 2097 The value was already reconstructed into PREV_SP. */ 2098 if (prev_regnum == ARM_SP_REGNUM) 2099 return frame_unwind_got_constant (this_frame, prev_regnum, cache->prev_sp); 2100 2101 /* The CPSR may have been changed by the call instruction and by the 2102 called function. The only bit we can reconstruct is the T bit, 2103 by checking the low bit of LR as of the call. This is a reliable 2104 indicator of Thumb-ness except for some ARM v4T pre-interworking 2105 Thumb code, which could get away with a clear low bit as long as 2106 the called function did not use bx. Guess that all other 2107 bits are unchanged; the condition flags are presumably lost, 2108 but the processor status is likely valid. */ 2109 if (prev_regnum == ARM_PS_REGNUM) 2110 { 2111 CORE_ADDR lr, cpsr; 2112 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch); 2113 2114 cpsr = get_frame_register_unsigned (this_frame, prev_regnum); 2115 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM); 2116 if (IS_THUMB_ADDR (lr)) 2117 cpsr |= t_bit; 2118 else 2119 cpsr &= ~t_bit; 2120 return frame_unwind_got_constant (this_frame, prev_regnum, cpsr); 2121 } 2122 2123 return trad_frame_get_prev_register (this_frame, cache->saved_regs, 2124 prev_regnum); 2125} 2126 2127struct frame_unwind arm_prologue_unwind = { 2128 NORMAL_FRAME, 2129 default_frame_unwind_stop_reason, 2130 arm_prologue_this_id, 2131 arm_prologue_prev_register, 2132 NULL, 2133 default_frame_sniffer 2134}; 2135 2136/* Maintain a list of ARM exception table entries per objfile, similar to the 2137 list of mapping symbols. We only cache entries for standard ARM-defined 2138 personality routines; the cache will contain only the frame unwinding 2139 instructions associated with the entry (not the descriptors). */ 2140 2141static const struct objfile_data *arm_exidx_data_key; 2142 2143struct arm_exidx_entry 2144{ 2145 bfd_vma addr; 2146 gdb_byte *entry; 2147}; 2148typedef struct arm_exidx_entry arm_exidx_entry_s; 2149DEF_VEC_O(arm_exidx_entry_s); 2150 2151struct arm_exidx_data 2152{ 2153 VEC(arm_exidx_entry_s) **section_maps; 2154}; 2155 2156static void 2157arm_exidx_data_free (struct objfile *objfile, void *arg) 2158{ 2159 struct arm_exidx_data *data = arg; 2160 unsigned int i; 2161 2162 for (i = 0; i < objfile->obfd->section_count; i++) 2163 VEC_free (arm_exidx_entry_s, data->section_maps[i]); 2164} 2165 2166static inline int 2167arm_compare_exidx_entries (const struct arm_exidx_entry *lhs, 2168 const struct arm_exidx_entry *rhs) 2169{ 2170 return lhs->addr < rhs->addr; 2171} 2172 2173static struct obj_section * 2174arm_obj_section_from_vma (struct objfile *objfile, bfd_vma vma) 2175{ 2176 struct obj_section *osect; 2177 2178 ALL_OBJFILE_OSECTIONS (objfile, osect) 2179 if (bfd_get_section_flags (objfile->obfd, 2180 osect->the_bfd_section) & SEC_ALLOC) 2181 { 2182 bfd_vma start, size; 2183 start = bfd_get_section_vma (objfile->obfd, osect->the_bfd_section); 2184 size = bfd_get_section_size (osect->the_bfd_section); 2185 2186 if (start <= vma && vma < start + size) 2187 return osect; 2188 } 2189 2190 return NULL; 2191} 2192 2193/* Parse contents of exception table and exception index sections 2194 of OBJFILE, and fill in the exception table entry cache. 2195 2196 For each entry that refers to a standard ARM-defined personality 2197 routine, extract the frame unwinding instructions (from either 2198 the index or the table section). The unwinding instructions 2199 are normalized by: 2200 - extracting them from the rest of the table data 2201 - converting to host endianness 2202 - appending the implicit 0xb0 ("Finish") code 2203 2204 The extracted and normalized instructions are stored for later 2205 retrieval by the arm_find_exidx_entry routine. */ 2206 2207static void 2208arm_exidx_new_objfile (struct objfile *objfile) 2209{ 2210 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL); 2211 struct arm_exidx_data *data; 2212 asection *exidx, *extab; 2213 bfd_vma exidx_vma = 0, extab_vma = 0; 2214 bfd_size_type exidx_size = 0, extab_size = 0; 2215 gdb_byte *exidx_data = NULL, *extab_data = NULL; 2216 LONGEST i; 2217 2218 /* If we've already touched this file, do nothing. */ 2219 if (!objfile || objfile_data (objfile, arm_exidx_data_key) != NULL) 2220 return; 2221 2222 /* Read contents of exception table and index. */ 2223 exidx = bfd_get_section_by_name (objfile->obfd, ".ARM.exidx"); 2224 if (exidx) 2225 { 2226 exidx_vma = bfd_section_vma (objfile->obfd, exidx); 2227 exidx_size = bfd_get_section_size (exidx); 2228 exidx_data = xmalloc (exidx_size); 2229 make_cleanup (xfree, exidx_data); 2230 2231 if (!bfd_get_section_contents (objfile->obfd, exidx, 2232 exidx_data, 0, exidx_size)) 2233 { 2234 do_cleanups (cleanups); 2235 return; 2236 } 2237 } 2238 2239 extab = bfd_get_section_by_name (objfile->obfd, ".ARM.extab"); 2240 if (extab) 2241 { 2242 extab_vma = bfd_section_vma (objfile->obfd, extab); 2243 extab_size = bfd_get_section_size (extab); 2244 extab_data = xmalloc (extab_size); 2245 make_cleanup (xfree, extab_data); 2246 2247 if (!bfd_get_section_contents (objfile->obfd, extab, 2248 extab_data, 0, extab_size)) 2249 { 2250 do_cleanups (cleanups); 2251 return; 2252 } 2253 } 2254 2255 /* Allocate exception table data structure. */ 2256 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct arm_exidx_data); 2257 set_objfile_data (objfile, arm_exidx_data_key, data); 2258 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack, 2259 objfile->obfd->section_count, 2260 VEC(arm_exidx_entry_s) *); 2261 2262 /* Fill in exception table. */ 2263 for (i = 0; i < exidx_size / 8; i++) 2264 { 2265 struct arm_exidx_entry new_exidx_entry; 2266 bfd_vma idx = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8); 2267 bfd_vma val = bfd_h_get_32 (objfile->obfd, exidx_data + i * 8 + 4); 2268 bfd_vma addr = 0, word = 0; 2269 int n_bytes = 0, n_words = 0; 2270 struct obj_section *sec; 2271 gdb_byte *entry = NULL; 2272 2273 /* Extract address of start of function. */ 2274 idx = ((idx & 0x7fffffff) ^ 0x40000000) - 0x40000000; 2275 idx += exidx_vma + i * 8; 2276 2277 /* Find section containing function and compute section offset. */ 2278 sec = arm_obj_section_from_vma (objfile, idx); 2279 if (sec == NULL) 2280 continue; 2281 idx -= bfd_get_section_vma (objfile->obfd, sec->the_bfd_section); 2282 2283 /* Determine address of exception table entry. */ 2284 if (val == 1) 2285 { 2286 /* EXIDX_CANTUNWIND -- no exception table entry present. */ 2287 } 2288 else if ((val & 0xff000000) == 0x80000000) 2289 { 2290 /* Exception table entry embedded in .ARM.exidx 2291 -- must be short form. */ 2292 word = val; 2293 n_bytes = 3; 2294 } 2295 else if (!(val & 0x80000000)) 2296 { 2297 /* Exception table entry in .ARM.extab. */ 2298 addr = ((val & 0x7fffffff) ^ 0x40000000) - 0x40000000; 2299 addr += exidx_vma + i * 8 + 4; 2300 2301 if (addr >= extab_vma && addr + 4 <= extab_vma + extab_size) 2302 { 2303 word = bfd_h_get_32 (objfile->obfd, 2304 extab_data + addr - extab_vma); 2305 addr += 4; 2306 2307 if ((word & 0xff000000) == 0x80000000) 2308 { 2309 /* Short form. */ 2310 n_bytes = 3; 2311 } 2312 else if ((word & 0xff000000) == 0x81000000 2313 || (word & 0xff000000) == 0x82000000) 2314 { 2315 /* Long form. */ 2316 n_bytes = 2; 2317 n_words = ((word >> 16) & 0xff); 2318 } 2319 else if (!(word & 0x80000000)) 2320 { 2321 bfd_vma pers; 2322 struct obj_section *pers_sec; 2323 int gnu_personality = 0; 2324 2325 /* Custom personality routine. */ 2326 pers = ((word & 0x7fffffff) ^ 0x40000000) - 0x40000000; 2327 pers = UNMAKE_THUMB_ADDR (pers + addr - 4); 2328 2329 /* Check whether we've got one of the variants of the 2330 GNU personality routines. */ 2331 pers_sec = arm_obj_section_from_vma (objfile, pers); 2332 if (pers_sec) 2333 { 2334 static const char *personality[] = 2335 { 2336 "__gcc_personality_v0", 2337 "__gxx_personality_v0", 2338 "__gcj_personality_v0", 2339 "__gnu_objc_personality_v0", 2340 NULL 2341 }; 2342 2343 CORE_ADDR pc = pers + obj_section_offset (pers_sec); 2344 int k; 2345 2346 for (k = 0; personality[k]; k++) 2347 if (lookup_minimal_symbol_by_pc_name 2348 (pc, personality[k], objfile)) 2349 { 2350 gnu_personality = 1; 2351 break; 2352 } 2353 } 2354 2355 /* If so, the next word contains a word count in the high 2356 byte, followed by the same unwind instructions as the 2357 pre-defined forms. */ 2358 if (gnu_personality 2359 && addr + 4 <= extab_vma + extab_size) 2360 { 2361 word = bfd_h_get_32 (objfile->obfd, 2362 extab_data + addr - extab_vma); 2363 addr += 4; 2364 n_bytes = 3; 2365 n_words = ((word >> 24) & 0xff); 2366 } 2367 } 2368 } 2369 } 2370 2371 /* Sanity check address. */ 2372 if (n_words) 2373 if (addr < extab_vma || addr + 4 * n_words > extab_vma + extab_size) 2374 n_words = n_bytes = 0; 2375 2376 /* The unwind instructions reside in WORD (only the N_BYTES least 2377 significant bytes are valid), followed by N_WORDS words in the 2378 extab section starting at ADDR. */ 2379 if (n_bytes || n_words) 2380 { 2381 gdb_byte *p = entry = obstack_alloc (&objfile->objfile_obstack, 2382 n_bytes + n_words * 4 + 1); 2383 2384 while (n_bytes--) 2385 *p++ = (gdb_byte) ((word >> (8 * n_bytes)) & 0xff); 2386 2387 while (n_words--) 2388 { 2389 word = bfd_h_get_32 (objfile->obfd, 2390 extab_data + addr - extab_vma); 2391 addr += 4; 2392 2393 *p++ = (gdb_byte) ((word >> 24) & 0xff); 2394 *p++ = (gdb_byte) ((word >> 16) & 0xff); 2395 *p++ = (gdb_byte) ((word >> 8) & 0xff); 2396 *p++ = (gdb_byte) (word & 0xff); 2397 } 2398 2399 /* Implied "Finish" to terminate the list. */ 2400 *p++ = 0xb0; 2401 } 2402 2403 /* Push entry onto vector. They are guaranteed to always 2404 appear in order of increasing addresses. */ 2405 new_exidx_entry.addr = idx; 2406 new_exidx_entry.entry = entry; 2407 VEC_safe_push (arm_exidx_entry_s, 2408 data->section_maps[sec->the_bfd_section->index], 2409 &new_exidx_entry); 2410 } 2411 2412 do_cleanups (cleanups); 2413} 2414 2415/* Search for the exception table entry covering MEMADDR. If one is found, 2416 return a pointer to its data. Otherwise, return 0. If START is non-NULL, 2417 set *START to the start of the region covered by this entry. */ 2418 2419static gdb_byte * 2420arm_find_exidx_entry (CORE_ADDR memaddr, CORE_ADDR *start) 2421{ 2422 struct obj_section *sec; 2423 2424 sec = find_pc_section (memaddr); 2425 if (sec != NULL) 2426 { 2427 struct arm_exidx_data *data; 2428 VEC(arm_exidx_entry_s) *map; 2429 struct arm_exidx_entry map_key = { memaddr - obj_section_addr (sec), 0 }; 2430 unsigned int idx; 2431 2432 data = objfile_data (sec->objfile, arm_exidx_data_key); 2433 if (data != NULL) 2434 { 2435 map = data->section_maps[sec->the_bfd_section->index]; 2436 if (!VEC_empty (arm_exidx_entry_s, map)) 2437 { 2438 struct arm_exidx_entry *map_sym; 2439 2440 idx = VEC_lower_bound (arm_exidx_entry_s, map, &map_key, 2441 arm_compare_exidx_entries); 2442 2443 /* VEC_lower_bound finds the earliest ordered insertion 2444 point. If the following symbol starts at this exact 2445 address, we use that; otherwise, the preceding 2446 exception table entry covers this address. */ 2447 if (idx < VEC_length (arm_exidx_entry_s, map)) 2448 { 2449 map_sym = VEC_index (arm_exidx_entry_s, map, idx); 2450 if (map_sym->addr == map_key.addr) 2451 { 2452 if (start) 2453 *start = map_sym->addr + obj_section_addr (sec); 2454 return map_sym->entry; 2455 } 2456 } 2457 2458 if (idx > 0) 2459 { 2460 map_sym = VEC_index (arm_exidx_entry_s, map, idx - 1); 2461 if (start) 2462 *start = map_sym->addr + obj_section_addr (sec); 2463 return map_sym->entry; 2464 } 2465 } 2466 } 2467 } 2468 2469 return NULL; 2470} 2471 2472/* Given the current frame THIS_FRAME, and its associated frame unwinding 2473 instruction list from the ARM exception table entry ENTRY, allocate and 2474 return a prologue cache structure describing how to unwind this frame. 2475 2476 Return NULL if the unwinding instruction list contains a "spare", 2477 "reserved" or "refuse to unwind" instruction as defined in section 2478 "9.3 Frame unwinding instructions" of the "Exception Handling ABI 2479 for the ARM Architecture" document. */ 2480 2481static struct arm_prologue_cache * 2482arm_exidx_fill_cache (struct frame_info *this_frame, gdb_byte *entry) 2483{ 2484 CORE_ADDR vsp = 0; 2485 int vsp_valid = 0; 2486 2487 struct arm_prologue_cache *cache; 2488 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache); 2489 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame); 2490 2491 for (;;) 2492 { 2493 gdb_byte insn; 2494 2495 /* Whenever we reload SP, we actually have to retrieve its 2496 actual value in the current frame. */ 2497 if (!vsp_valid) 2498 { 2499 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM)) 2500 { 2501 int reg = cache->saved_regs[ARM_SP_REGNUM].realreg; 2502 vsp = get_frame_register_unsigned (this_frame, reg); 2503 } 2504 else 2505 { 2506 CORE_ADDR addr = cache->saved_regs[ARM_SP_REGNUM].addr; 2507 vsp = get_frame_memory_unsigned (this_frame, addr, 4); 2508 } 2509 2510 vsp_valid = 1; 2511 } 2512 2513 /* Decode next unwind instruction. */ 2514 insn = *entry++; 2515 2516 if ((insn & 0xc0) == 0) 2517 { 2518 int offset = insn & 0x3f; 2519 vsp += (offset << 2) + 4; 2520 } 2521 else if ((insn & 0xc0) == 0x40) 2522 { 2523 int offset = insn & 0x3f; 2524 vsp -= (offset << 2) + 4; 2525 } 2526 else if ((insn & 0xf0) == 0x80) 2527 { 2528 int mask = ((insn & 0xf) << 8) | *entry++; 2529 int i; 2530 2531 /* The special case of an all-zero mask identifies 2532 "Refuse to unwind". We return NULL to fall back 2533 to the prologue analyzer. */ 2534 if (mask == 0) 2535 return NULL; 2536 2537 /* Pop registers r4..r15 under mask. */ 2538 for (i = 0; i < 12; i++) 2539 if (mask & (1 << i)) 2540 { 2541 cache->saved_regs[4 + i].addr = vsp; 2542 vsp += 4; 2543 } 2544 2545 /* Special-case popping SP -- we need to reload vsp. */ 2546 if (mask & (1 << (ARM_SP_REGNUM - 4))) 2547 vsp_valid = 0; 2548 } 2549 else if ((insn & 0xf0) == 0x90) 2550 { 2551 int reg = insn & 0xf; 2552 2553 /* Reserved cases. */ 2554 if (reg == ARM_SP_REGNUM || reg == ARM_PC_REGNUM) 2555 return NULL; 2556 2557 /* Set SP from another register and mark VSP for reload. */ 2558 cache->saved_regs[ARM_SP_REGNUM] = cache->saved_regs[reg]; 2559 vsp_valid = 0; 2560 } 2561 else if ((insn & 0xf0) == 0xa0) 2562 { 2563 int count = insn & 0x7; 2564 int pop_lr = (insn & 0x8) != 0; 2565 int i; 2566 2567 /* Pop r4..r[4+count]. */ 2568 for (i = 0; i <= count; i++) 2569 { 2570 cache->saved_regs[4 + i].addr = vsp; 2571 vsp += 4; 2572 } 2573 2574 /* If indicated by flag, pop LR as well. */ 2575 if (pop_lr) 2576 { 2577 cache->saved_regs[ARM_LR_REGNUM].addr = vsp; 2578 vsp += 4; 2579 } 2580 } 2581 else if (insn == 0xb0) 2582 { 2583 /* We could only have updated PC by popping into it; if so, it 2584 will show up as address. Otherwise, copy LR into PC. */ 2585 if (!trad_frame_addr_p (cache->saved_regs, ARM_PC_REGNUM)) 2586 cache->saved_regs[ARM_PC_REGNUM] 2587 = cache->saved_regs[ARM_LR_REGNUM]; 2588 2589 /* We're done. */ 2590 break; 2591 } 2592 else if (insn == 0xb1) 2593 { 2594 int mask = *entry++; 2595 int i; 2596 2597 /* All-zero mask and mask >= 16 is "spare". */ 2598 if (mask == 0 || mask >= 16) 2599 return NULL; 2600 2601 /* Pop r0..r3 under mask. */ 2602 for (i = 0; i < 4; i++) 2603 if (mask & (1 << i)) 2604 { 2605 cache->saved_regs[i].addr = vsp; 2606 vsp += 4; 2607 } 2608 } 2609 else if (insn == 0xb2) 2610 { 2611 ULONGEST offset = 0; 2612 unsigned shift = 0; 2613 2614 do 2615 { 2616 offset |= (*entry & 0x7f) << shift; 2617 shift += 7; 2618 } 2619 while (*entry++ & 0x80); 2620 2621 vsp += 0x204 + (offset << 2); 2622 } 2623 else if (insn == 0xb3) 2624 { 2625 int start = *entry >> 4; 2626 int count = (*entry++) & 0xf; 2627 int i; 2628 2629 /* Only registers D0..D15 are valid here. */ 2630 if (start + count >= 16) 2631 return NULL; 2632 2633 /* Pop VFP double-precision registers D[start]..D[start+count]. */ 2634 for (i = 0; i <= count; i++) 2635 { 2636 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp; 2637 vsp += 8; 2638 } 2639 2640 /* Add an extra 4 bytes for FSTMFDX-style stack. */ 2641 vsp += 4; 2642 } 2643 else if ((insn & 0xf8) == 0xb8) 2644 { 2645 int count = insn & 0x7; 2646 int i; 2647 2648 /* Pop VFP double-precision registers D[8]..D[8+count]. */ 2649 for (i = 0; i <= count; i++) 2650 { 2651 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp; 2652 vsp += 8; 2653 } 2654 2655 /* Add an extra 4 bytes for FSTMFDX-style stack. */ 2656 vsp += 4; 2657 } 2658 else if (insn == 0xc6) 2659 { 2660 int start = *entry >> 4; 2661 int count = (*entry++) & 0xf; 2662 int i; 2663 2664 /* Only registers WR0..WR15 are valid. */ 2665 if (start + count >= 16) 2666 return NULL; 2667 2668 /* Pop iwmmx registers WR[start]..WR[start+count]. */ 2669 for (i = 0; i <= count; i++) 2670 { 2671 cache->saved_regs[ARM_WR0_REGNUM + start + i].addr = vsp; 2672 vsp += 8; 2673 } 2674 } 2675 else if (insn == 0xc7) 2676 { 2677 int mask = *entry++; 2678 int i; 2679 2680 /* All-zero mask and mask >= 16 is "spare". */ 2681 if (mask == 0 || mask >= 16) 2682 return NULL; 2683 2684 /* Pop iwmmx general-purpose registers WCGR0..WCGR3 under mask. */ 2685 for (i = 0; i < 4; i++) 2686 if (mask & (1 << i)) 2687 { 2688 cache->saved_regs[ARM_WCGR0_REGNUM + i].addr = vsp; 2689 vsp += 4; 2690 } 2691 } 2692 else if ((insn & 0xf8) == 0xc0) 2693 { 2694 int count = insn & 0x7; 2695 int i; 2696 2697 /* Pop iwmmx registers WR[10]..WR[10+count]. */ 2698 for (i = 0; i <= count; i++) 2699 { 2700 cache->saved_regs[ARM_WR0_REGNUM + 10 + i].addr = vsp; 2701 vsp += 8; 2702 } 2703 } 2704 else if (insn == 0xc8) 2705 { 2706 int start = *entry >> 4; 2707 int count = (*entry++) & 0xf; 2708 int i; 2709 2710 /* Only registers D0..D31 are valid. */ 2711 if (start + count >= 16) 2712 return NULL; 2713 2714 /* Pop VFP double-precision registers 2715 D[16+start]..D[16+start+count]. */ 2716 for (i = 0; i <= count; i++) 2717 { 2718 cache->saved_regs[ARM_D0_REGNUM + 16 + start + i].addr = vsp; 2719 vsp += 8; 2720 } 2721 } 2722 else if (insn == 0xc9) 2723 { 2724 int start = *entry >> 4; 2725 int count = (*entry++) & 0xf; 2726 int i; 2727 2728 /* Pop VFP double-precision registers D[start]..D[start+count]. */ 2729 for (i = 0; i <= count; i++) 2730 { 2731 cache->saved_regs[ARM_D0_REGNUM + start + i].addr = vsp; 2732 vsp += 8; 2733 } 2734 } 2735 else if ((insn & 0xf8) == 0xd0) 2736 { 2737 int count = insn & 0x7; 2738 int i; 2739 2740 /* Pop VFP double-precision registers D[8]..D[8+count]. */ 2741 for (i = 0; i <= count; i++) 2742 { 2743 cache->saved_regs[ARM_D0_REGNUM + 8 + i].addr = vsp; 2744 vsp += 8; 2745 } 2746 } 2747 else 2748 { 2749 /* Everything else is "spare". */ 2750 return NULL; 2751 } 2752 } 2753 2754 /* If we restore SP from a register, assume this was the frame register. 2755 Otherwise just fall back to SP as frame register. */ 2756 if (trad_frame_realreg_p (cache->saved_regs, ARM_SP_REGNUM)) 2757 cache->framereg = cache->saved_regs[ARM_SP_REGNUM].realreg; 2758 else 2759 cache->framereg = ARM_SP_REGNUM; 2760 2761 /* Determine offset to previous frame. */ 2762 cache->framesize 2763 = vsp - get_frame_register_unsigned (this_frame, cache->framereg); 2764 2765 /* We already got the previous SP. */ 2766 cache->prev_sp = vsp; 2767 2768 return cache; 2769} 2770 2771/* Unwinding via ARM exception table entries. Note that the sniffer 2772 already computes a filled-in prologue cache, which is then used 2773 with the same arm_prologue_this_id and arm_prologue_prev_register 2774 routines also used for prologue-parsing based unwinding. */ 2775 2776static int 2777arm_exidx_unwind_sniffer (const struct frame_unwind *self, 2778 struct frame_info *this_frame, 2779 void **this_prologue_cache) 2780{ 2781 struct gdbarch *gdbarch = get_frame_arch (this_frame); 2782 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 2783 CORE_ADDR addr_in_block, exidx_region, func_start; 2784 struct arm_prologue_cache *cache; 2785 gdb_byte *entry; 2786 2787 /* See if we have an ARM exception table entry covering this address. */ 2788 addr_in_block = get_frame_address_in_block (this_frame); 2789 entry = arm_find_exidx_entry (addr_in_block, &exidx_region); 2790 if (!entry) 2791 return 0; 2792 2793 /* The ARM exception table does not describe unwind information 2794 for arbitrary PC values, but is guaranteed to be correct only 2795 at call sites. We have to decide here whether we want to use 2796 ARM exception table information for this frame, or fall back 2797 to using prologue parsing. (Note that if we have DWARF CFI, 2798 this sniffer isn't even called -- CFI is always preferred.) 2799 2800 Before we make this decision, however, we check whether we 2801 actually have *symbol* information for the current frame. 2802 If not, prologue parsing would not work anyway, so we might 2803 as well use the exception table and hope for the best. */ 2804 if (find_pc_partial_function (addr_in_block, NULL, &func_start, NULL)) 2805 { 2806 int exc_valid = 0; 2807 2808 /* If the next frame is "normal", we are at a call site in this 2809 frame, so exception information is guaranteed to be valid. */ 2810 if (get_next_frame (this_frame) 2811 && get_frame_type (get_next_frame (this_frame)) == NORMAL_FRAME) 2812 exc_valid = 1; 2813 2814 /* We also assume exception information is valid if we're currently 2815 blocked in a system call. The system library is supposed to 2816 ensure this, so that e.g. pthread cancellation works. */ 2817 if (arm_frame_is_thumb (this_frame)) 2818 { 2819 LONGEST insn; 2820 2821 if (safe_read_memory_integer (get_frame_pc (this_frame) - 2, 2, 2822 byte_order_for_code, &insn) 2823 && (insn & 0xff00) == 0xdf00 /* svc */) 2824 exc_valid = 1; 2825 } 2826 else 2827 { 2828 LONGEST insn; 2829 2830 if (safe_read_memory_integer (get_frame_pc (this_frame) - 4, 4, 2831 byte_order_for_code, &insn) 2832 && (insn & 0x0f000000) == 0x0f000000 /* svc */) 2833 exc_valid = 1; 2834 } 2835 2836 /* Bail out if we don't know that exception information is valid. */ 2837 if (!exc_valid) 2838 return 0; 2839 2840 /* The ARM exception index does not mark the *end* of the region 2841 covered by the entry, and some functions will not have any entry. 2842 To correctly recognize the end of the covered region, the linker 2843 should have inserted dummy records with a CANTUNWIND marker. 2844 2845 Unfortunately, current versions of GNU ld do not reliably do 2846 this, and thus we may have found an incorrect entry above. 2847 As a (temporary) sanity check, we only use the entry if it 2848 lies *within* the bounds of the function. Note that this check 2849 might reject perfectly valid entries that just happen to cover 2850 multiple functions; therefore this check ought to be removed 2851 once the linker is fixed. */ 2852 if (func_start > exidx_region) 2853 return 0; 2854 } 2855 2856 /* Decode the list of unwinding instructions into a prologue cache. 2857 Note that this may fail due to e.g. a "refuse to unwind" code. */ 2858 cache = arm_exidx_fill_cache (this_frame, entry); 2859 if (!cache) 2860 return 0; 2861 2862 *this_prologue_cache = cache; 2863 return 1; 2864} 2865 2866struct frame_unwind arm_exidx_unwind = { 2867 NORMAL_FRAME, 2868 default_frame_unwind_stop_reason, 2869 arm_prologue_this_id, 2870 arm_prologue_prev_register, 2871 NULL, 2872 arm_exidx_unwind_sniffer 2873}; 2874 2875static struct arm_prologue_cache * 2876arm_make_stub_cache (struct frame_info *this_frame) 2877{ 2878 struct arm_prologue_cache *cache; 2879 2880 cache = FRAME_OBSTACK_ZALLOC (struct arm_prologue_cache); 2881 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame); 2882 2883 cache->prev_sp = get_frame_register_unsigned (this_frame, ARM_SP_REGNUM); 2884 2885 return cache; 2886} 2887 2888/* Our frame ID for a stub frame is the current SP and LR. */ 2889 2890static void 2891arm_stub_this_id (struct frame_info *this_frame, 2892 void **this_cache, 2893 struct frame_id *this_id) 2894{ 2895 struct arm_prologue_cache *cache; 2896 2897 if (*this_cache == NULL) 2898 *this_cache = arm_make_stub_cache (this_frame); 2899 cache = *this_cache; 2900 2901 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame)); 2902} 2903 2904static int 2905arm_stub_unwind_sniffer (const struct frame_unwind *self, 2906 struct frame_info *this_frame, 2907 void **this_prologue_cache) 2908{ 2909 CORE_ADDR addr_in_block; 2910 char dummy[4]; 2911 2912 addr_in_block = get_frame_address_in_block (this_frame); 2913 if (in_plt_section (addr_in_block, NULL) 2914 /* We also use the stub winder if the target memory is unreadable 2915 to avoid having the prologue unwinder trying to read it. */ 2916 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0) 2917 return 1; 2918 2919 return 0; 2920} 2921 2922struct frame_unwind arm_stub_unwind = { 2923 NORMAL_FRAME, 2924 default_frame_unwind_stop_reason, 2925 arm_stub_this_id, 2926 arm_prologue_prev_register, 2927 NULL, 2928 arm_stub_unwind_sniffer 2929}; 2930 2931static CORE_ADDR 2932arm_normal_frame_base (struct frame_info *this_frame, void **this_cache) 2933{ 2934 struct arm_prologue_cache *cache; 2935 2936 if (*this_cache == NULL) 2937 *this_cache = arm_make_prologue_cache (this_frame); 2938 cache = *this_cache; 2939 2940 return cache->prev_sp - cache->framesize; 2941} 2942 2943struct frame_base arm_normal_base = { 2944 &arm_prologue_unwind, 2945 arm_normal_frame_base, 2946 arm_normal_frame_base, 2947 arm_normal_frame_base 2948}; 2949 2950/* Assuming THIS_FRAME is a dummy, return the frame ID of that 2951 dummy frame. The frame ID's base needs to match the TOS value 2952 saved by save_dummy_frame_tos() and returned from 2953 arm_push_dummy_call, and the PC needs to match the dummy frame's 2954 breakpoint. */ 2955 2956static struct frame_id 2957arm_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame) 2958{ 2959 return frame_id_build (get_frame_register_unsigned (this_frame, 2960 ARM_SP_REGNUM), 2961 get_frame_pc (this_frame)); 2962} 2963 2964/* Given THIS_FRAME, find the previous frame's resume PC (which will 2965 be used to construct the previous frame's ID, after looking up the 2966 containing function). */ 2967 2968static CORE_ADDR 2969arm_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame) 2970{ 2971 CORE_ADDR pc; 2972 pc = frame_unwind_register_unsigned (this_frame, ARM_PC_REGNUM); 2973 return arm_addr_bits_remove (gdbarch, pc); 2974} 2975 2976static CORE_ADDR 2977arm_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame) 2978{ 2979 return frame_unwind_register_unsigned (this_frame, ARM_SP_REGNUM); 2980} 2981 2982static struct value * 2983arm_dwarf2_prev_register (struct frame_info *this_frame, void **this_cache, 2984 int regnum) 2985{ 2986 struct gdbarch * gdbarch = get_frame_arch (this_frame); 2987 CORE_ADDR lr, cpsr; 2988 ULONGEST t_bit = arm_psr_thumb_bit (gdbarch); 2989 2990 switch (regnum) 2991 { 2992 case ARM_PC_REGNUM: 2993 /* The PC is normally copied from the return column, which 2994 describes saves of LR. However, that version may have an 2995 extra bit set to indicate Thumb state. The bit is not 2996 part of the PC. */ 2997 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM); 2998 return frame_unwind_got_constant (this_frame, regnum, 2999 arm_addr_bits_remove (gdbarch, lr)); 3000 3001 case ARM_PS_REGNUM: 3002 /* Reconstruct the T bit; see arm_prologue_prev_register for details. */ 3003 cpsr = get_frame_register_unsigned (this_frame, regnum); 3004 lr = frame_unwind_register_unsigned (this_frame, ARM_LR_REGNUM); 3005 if (IS_THUMB_ADDR (lr)) 3006 cpsr |= t_bit; 3007 else 3008 cpsr &= ~t_bit; 3009 return frame_unwind_got_constant (this_frame, regnum, cpsr); 3010 3011 default: 3012 internal_error (__FILE__, __LINE__, 3013 _("Unexpected register %d"), regnum); 3014 } 3015} 3016 3017static void 3018arm_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum, 3019 struct dwarf2_frame_state_reg *reg, 3020 struct frame_info *this_frame) 3021{ 3022 switch (regnum) 3023 { 3024 case ARM_PC_REGNUM: 3025 case ARM_PS_REGNUM: 3026 reg->how = DWARF2_FRAME_REG_FN; 3027 reg->loc.fn = arm_dwarf2_prev_register; 3028 break; 3029 case ARM_SP_REGNUM: 3030 reg->how = DWARF2_FRAME_REG_CFA; 3031 break; 3032 } 3033} 3034 3035/* Return true if we are in the function's epilogue, i.e. after the 3036 instruction that destroyed the function's stack frame. */ 3037 3038static int 3039thumb_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc) 3040{ 3041 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 3042 unsigned int insn, insn2; 3043 int found_return = 0, found_stack_adjust = 0; 3044 CORE_ADDR func_start, func_end; 3045 CORE_ADDR scan_pc; 3046 gdb_byte buf[4]; 3047 3048 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end)) 3049 return 0; 3050 3051 /* The epilogue is a sequence of instructions along the following lines: 3052 3053 - add stack frame size to SP or FP 3054 - [if frame pointer used] restore SP from FP 3055 - restore registers from SP [may include PC] 3056 - a return-type instruction [if PC wasn't already restored] 3057 3058 In a first pass, we scan forward from the current PC and verify the 3059 instructions we find as compatible with this sequence, ending in a 3060 return instruction. 3061 3062 However, this is not sufficient to distinguish indirect function calls 3063 within a function from indirect tail calls in the epilogue in some cases. 3064 Therefore, if we didn't already find any SP-changing instruction during 3065 forward scan, we add a backward scanning heuristic to ensure we actually 3066 are in the epilogue. */ 3067 3068 scan_pc = pc; 3069 while (scan_pc < func_end && !found_return) 3070 { 3071 if (target_read_memory (scan_pc, buf, 2)) 3072 break; 3073 3074 scan_pc += 2; 3075 insn = extract_unsigned_integer (buf, 2, byte_order_for_code); 3076 3077 if ((insn & 0xff80) == 0x4700) /* bx <Rm> */ 3078 found_return = 1; 3079 else if (insn == 0x46f7) /* mov pc, lr */ 3080 found_return = 1; 3081 else if (insn == 0x46bd) /* mov sp, r7 */ 3082 found_stack_adjust = 1; 3083 else if ((insn & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */ 3084 found_stack_adjust = 1; 3085 else if ((insn & 0xfe00) == 0xbc00) /* pop <registers> */ 3086 { 3087 found_stack_adjust = 1; 3088 if (insn & 0x0100) /* <registers> include PC. */ 3089 found_return = 1; 3090 } 3091 else if ((insn & 0xe000) == 0xe000) /* 32-bit Thumb-2 instruction */ 3092 { 3093 if (target_read_memory (scan_pc, buf, 2)) 3094 break; 3095 3096 scan_pc += 2; 3097 insn2 = extract_unsigned_integer (buf, 2, byte_order_for_code); 3098 3099 if (insn == 0xe8bd) /* ldm.w sp!, <registers> */ 3100 { 3101 found_stack_adjust = 1; 3102 if (insn2 & 0x8000) /* <registers> include PC. */ 3103 found_return = 1; 3104 } 3105 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */ 3106 && (insn2 & 0x0fff) == 0x0b04) 3107 { 3108 found_stack_adjust = 1; 3109 if ((insn2 & 0xf000) == 0xf000) /* <Rt> is PC. */ 3110 found_return = 1; 3111 } 3112 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */ 3113 && (insn2 & 0x0e00) == 0x0a00) 3114 found_stack_adjust = 1; 3115 else 3116 break; 3117 } 3118 else 3119 break; 3120 } 3121 3122 if (!found_return) 3123 return 0; 3124 3125 /* Since any instruction in the epilogue sequence, with the possible 3126 exception of return itself, updates the stack pointer, we need to 3127 scan backwards for at most one instruction. Try either a 16-bit or 3128 a 32-bit instruction. This is just a heuristic, so we do not worry 3129 too much about false positives. */ 3130 3131 if (!found_stack_adjust) 3132 { 3133 if (pc - 4 < func_start) 3134 return 0; 3135 if (target_read_memory (pc - 4, buf, 4)) 3136 return 0; 3137 3138 insn = extract_unsigned_integer (buf, 2, byte_order_for_code); 3139 insn2 = extract_unsigned_integer (buf + 2, 2, byte_order_for_code); 3140 3141 if (insn2 == 0x46bd) /* mov sp, r7 */ 3142 found_stack_adjust = 1; 3143 else if ((insn2 & 0xff00) == 0xb000) /* add sp, imm or sub sp, imm */ 3144 found_stack_adjust = 1; 3145 else if ((insn2 & 0xff00) == 0xbc00) /* pop <registers> without PC */ 3146 found_stack_adjust = 1; 3147 else if (insn == 0xe8bd) /* ldm.w sp!, <registers> */ 3148 found_stack_adjust = 1; 3149 else if (insn == 0xf85d /* ldr.w <Rt>, [sp], #4 */ 3150 && (insn2 & 0x0fff) == 0x0b04) 3151 found_stack_adjust = 1; 3152 else if ((insn & 0xffbf) == 0xecbd /* vldm sp!, <list> */ 3153 && (insn2 & 0x0e00) == 0x0a00) 3154 found_stack_adjust = 1; 3155 } 3156 3157 return found_stack_adjust; 3158} 3159 3160/* Return true if we are in the function's epilogue, i.e. after the 3161 instruction that destroyed the function's stack frame. */ 3162 3163static int 3164arm_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc) 3165{ 3166 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 3167 unsigned int insn; 3168 int found_return, found_stack_adjust; 3169 CORE_ADDR func_start, func_end; 3170 3171 if (arm_pc_is_thumb (gdbarch, pc)) 3172 return thumb_in_function_epilogue_p (gdbarch, pc); 3173 3174 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end)) 3175 return 0; 3176 3177 /* We are in the epilogue if the previous instruction was a stack 3178 adjustment and the next instruction is a possible return (bx, mov 3179 pc, or pop). We could have to scan backwards to find the stack 3180 adjustment, or forwards to find the return, but this is a decent 3181 approximation. First scan forwards. */ 3182 3183 found_return = 0; 3184 insn = read_memory_unsigned_integer (pc, 4, byte_order_for_code); 3185 if (bits (insn, 28, 31) != INST_NV) 3186 { 3187 if ((insn & 0x0ffffff0) == 0x012fff10) 3188 /* BX. */ 3189 found_return = 1; 3190 else if ((insn & 0x0ffffff0) == 0x01a0f000) 3191 /* MOV PC. */ 3192 found_return = 1; 3193 else if ((insn & 0x0fff0000) == 0x08bd0000 3194 && (insn & 0x0000c000) != 0) 3195 /* POP (LDMIA), including PC or LR. */ 3196 found_return = 1; 3197 } 3198 3199 if (!found_return) 3200 return 0; 3201 3202 /* Scan backwards. This is just a heuristic, so do not worry about 3203 false positives from mode changes. */ 3204 3205 if (pc < func_start + 4) 3206 return 0; 3207 3208 found_stack_adjust = 0; 3209 insn = read_memory_unsigned_integer (pc - 4, 4, byte_order_for_code); 3210 if (bits (insn, 28, 31) != INST_NV) 3211 { 3212 if ((insn & 0x0df0f000) == 0x0080d000) 3213 /* ADD SP (register or immediate). */ 3214 found_stack_adjust = 1; 3215 else if ((insn & 0x0df0f000) == 0x0040d000) 3216 /* SUB SP (register or immediate). */ 3217 found_stack_adjust = 1; 3218 else if ((insn & 0x0ffffff0) == 0x01a0d000) 3219 /* MOV SP. */ 3220 found_stack_adjust = 1; 3221 else if ((insn & 0x0fff0000) == 0x08bd0000) 3222 /* POP (LDMIA). */ 3223 found_stack_adjust = 1; 3224 } 3225 3226 if (found_stack_adjust) 3227 return 1; 3228 3229 return 0; 3230} 3231 3232 3233/* When arguments must be pushed onto the stack, they go on in reverse 3234 order. The code below implements a FILO (stack) to do this. */ 3235 3236struct stack_item 3237{ 3238 int len; 3239 struct stack_item *prev; 3240 void *data; 3241}; 3242 3243static struct stack_item * 3244push_stack_item (struct stack_item *prev, const void *contents, int len) 3245{ 3246 struct stack_item *si; 3247 si = xmalloc (sizeof (struct stack_item)); 3248 si->data = xmalloc (len); 3249 si->len = len; 3250 si->prev = prev; 3251 memcpy (si->data, contents, len); 3252 return si; 3253} 3254 3255static struct stack_item * 3256pop_stack_item (struct stack_item *si) 3257{ 3258 struct stack_item *dead = si; 3259 si = si->prev; 3260 xfree (dead->data); 3261 xfree (dead); 3262 return si; 3263} 3264 3265 3266/* Return the alignment (in bytes) of the given type. */ 3267 3268static int 3269arm_type_align (struct type *t) 3270{ 3271 int n; 3272 int align; 3273 int falign; 3274 3275 t = check_typedef (t); 3276 switch (TYPE_CODE (t)) 3277 { 3278 default: 3279 /* Should never happen. */ 3280 internal_error (__FILE__, __LINE__, _("unknown type alignment")); 3281 return 4; 3282 3283 case TYPE_CODE_PTR: 3284 case TYPE_CODE_ENUM: 3285 case TYPE_CODE_INT: 3286 case TYPE_CODE_FLT: 3287 case TYPE_CODE_SET: 3288 case TYPE_CODE_RANGE: 3289 case TYPE_CODE_BITSTRING: 3290 case TYPE_CODE_REF: 3291 case TYPE_CODE_CHAR: 3292 case TYPE_CODE_BOOL: 3293 return TYPE_LENGTH (t); 3294 3295 case TYPE_CODE_ARRAY: 3296 case TYPE_CODE_COMPLEX: 3297 /* TODO: What about vector types? */ 3298 return arm_type_align (TYPE_TARGET_TYPE (t)); 3299 3300 case TYPE_CODE_STRUCT: 3301 case TYPE_CODE_UNION: 3302 align = 1; 3303 for (n = 0; n < TYPE_NFIELDS (t); n++) 3304 { 3305 falign = arm_type_align (TYPE_FIELD_TYPE (t, n)); 3306 if (falign > align) 3307 align = falign; 3308 } 3309 return align; 3310 } 3311} 3312 3313/* Possible base types for a candidate for passing and returning in 3314 VFP registers. */ 3315 3316enum arm_vfp_cprc_base_type 3317{ 3318 VFP_CPRC_UNKNOWN, 3319 VFP_CPRC_SINGLE, 3320 VFP_CPRC_DOUBLE, 3321 VFP_CPRC_VEC64, 3322 VFP_CPRC_VEC128 3323}; 3324 3325/* The length of one element of base type B. */ 3326 3327static unsigned 3328arm_vfp_cprc_unit_length (enum arm_vfp_cprc_base_type b) 3329{ 3330 switch (b) 3331 { 3332 case VFP_CPRC_SINGLE: 3333 return 4; 3334 case VFP_CPRC_DOUBLE: 3335 return 8; 3336 case VFP_CPRC_VEC64: 3337 return 8; 3338 case VFP_CPRC_VEC128: 3339 return 16; 3340 default: 3341 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."), 3342 (int) b); 3343 } 3344} 3345 3346/* The character ('s', 'd' or 'q') for the type of VFP register used 3347 for passing base type B. */ 3348 3349static int 3350arm_vfp_cprc_reg_char (enum arm_vfp_cprc_base_type b) 3351{ 3352 switch (b) 3353 { 3354 case VFP_CPRC_SINGLE: 3355 return 's'; 3356 case VFP_CPRC_DOUBLE: 3357 return 'd'; 3358 case VFP_CPRC_VEC64: 3359 return 'd'; 3360 case VFP_CPRC_VEC128: 3361 return 'q'; 3362 default: 3363 internal_error (__FILE__, __LINE__, _("Invalid VFP CPRC type: %d."), 3364 (int) b); 3365 } 3366} 3367 3368/* Determine whether T may be part of a candidate for passing and 3369 returning in VFP registers, ignoring the limit on the total number 3370 of components. If *BASE_TYPE is VFP_CPRC_UNKNOWN, set it to the 3371 classification of the first valid component found; if it is not 3372 VFP_CPRC_UNKNOWN, all components must have the same classification 3373 as *BASE_TYPE. If it is found that T contains a type not permitted 3374 for passing and returning in VFP registers, a type differently 3375 classified from *BASE_TYPE, or two types differently classified 3376 from each other, return -1, otherwise return the total number of 3377 base-type elements found (possibly 0 in an empty structure or 3378 array). Vectors and complex types are not currently supported, 3379 matching the generic AAPCS support. */ 3380 3381static int 3382arm_vfp_cprc_sub_candidate (struct type *t, 3383 enum arm_vfp_cprc_base_type *base_type) 3384{ 3385 t = check_typedef (t); 3386 switch (TYPE_CODE (t)) 3387 { 3388 case TYPE_CODE_FLT: 3389 switch (TYPE_LENGTH (t)) 3390 { 3391 case 4: 3392 if (*base_type == VFP_CPRC_UNKNOWN) 3393 *base_type = VFP_CPRC_SINGLE; 3394 else if (*base_type != VFP_CPRC_SINGLE) 3395 return -1; 3396 return 1; 3397 3398 case 8: 3399 if (*base_type == VFP_CPRC_UNKNOWN) 3400 *base_type = VFP_CPRC_DOUBLE; 3401 else if (*base_type != VFP_CPRC_DOUBLE) 3402 return -1; 3403 return 1; 3404 3405 default: 3406 return -1; 3407 } 3408 break; 3409 3410 case TYPE_CODE_ARRAY: 3411 { 3412 int count; 3413 unsigned unitlen; 3414 count = arm_vfp_cprc_sub_candidate (TYPE_TARGET_TYPE (t), base_type); 3415 if (count == -1) 3416 return -1; 3417 if (TYPE_LENGTH (t) == 0) 3418 { 3419 gdb_assert (count == 0); 3420 return 0; 3421 } 3422 else if (count == 0) 3423 return -1; 3424 unitlen = arm_vfp_cprc_unit_length (*base_type); 3425 gdb_assert ((TYPE_LENGTH (t) % unitlen) == 0); 3426 return TYPE_LENGTH (t) / unitlen; 3427 } 3428 break; 3429 3430 case TYPE_CODE_STRUCT: 3431 { 3432 int count = 0; 3433 unsigned unitlen; 3434 int i; 3435 for (i = 0; i < TYPE_NFIELDS (t); i++) 3436 { 3437 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i), 3438 base_type); 3439 if (sub_count == -1) 3440 return -1; 3441 count += sub_count; 3442 } 3443 if (TYPE_LENGTH (t) == 0) 3444 { 3445 gdb_assert (count == 0); 3446 return 0; 3447 } 3448 else if (count == 0) 3449 return -1; 3450 unitlen = arm_vfp_cprc_unit_length (*base_type); 3451 if (TYPE_LENGTH (t) != unitlen * count) 3452 return -1; 3453 return count; 3454 } 3455 3456 case TYPE_CODE_UNION: 3457 { 3458 int count = 0; 3459 unsigned unitlen; 3460 int i; 3461 for (i = 0; i < TYPE_NFIELDS (t); i++) 3462 { 3463 int sub_count = arm_vfp_cprc_sub_candidate (TYPE_FIELD_TYPE (t, i), 3464 base_type); 3465 if (sub_count == -1) 3466 return -1; 3467 count = (count > sub_count ? count : sub_count); 3468 } 3469 if (TYPE_LENGTH (t) == 0) 3470 { 3471 gdb_assert (count == 0); 3472 return 0; 3473 } 3474 else if (count == 0) 3475 return -1; 3476 unitlen = arm_vfp_cprc_unit_length (*base_type); 3477 if (TYPE_LENGTH (t) != unitlen * count) 3478 return -1; 3479 return count; 3480 } 3481 3482 default: 3483 break; 3484 } 3485 3486 return -1; 3487} 3488 3489/* Determine whether T is a VFP co-processor register candidate (CPRC) 3490 if passed to or returned from a non-variadic function with the VFP 3491 ABI in effect. Return 1 if it is, 0 otherwise. If it is, set 3492 *BASE_TYPE to the base type for T and *COUNT to the number of 3493 elements of that base type before returning. */ 3494 3495static int 3496arm_vfp_call_candidate (struct type *t, enum arm_vfp_cprc_base_type *base_type, 3497 int *count) 3498{ 3499 enum arm_vfp_cprc_base_type b = VFP_CPRC_UNKNOWN; 3500 int c = arm_vfp_cprc_sub_candidate (t, &b); 3501 if (c <= 0 || c > 4) 3502 return 0; 3503 *base_type = b; 3504 *count = c; 3505 return 1; 3506} 3507 3508/* Return 1 if the VFP ABI should be used for passing arguments to and 3509 returning values from a function of type FUNC_TYPE, 0 3510 otherwise. */ 3511 3512static int 3513arm_vfp_abi_for_function (struct gdbarch *gdbarch, struct type *func_type) 3514{ 3515 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 3516 /* Variadic functions always use the base ABI. Assume that functions 3517 without debug info are not variadic. */ 3518 if (func_type && TYPE_VARARGS (check_typedef (func_type))) 3519 return 0; 3520 /* The VFP ABI is only supported as a variant of AAPCS. */ 3521 if (tdep->arm_abi != ARM_ABI_AAPCS) 3522 return 0; 3523 return gdbarch_tdep (gdbarch)->fp_model == ARM_FLOAT_VFP; 3524} 3525 3526/* We currently only support passing parameters in integer registers, which 3527 conforms with GCC's default model, and VFP argument passing following 3528 the VFP variant of AAPCS. Several other variants exist and 3529 we should probably support some of them based on the selected ABI. */ 3530 3531static CORE_ADDR 3532arm_push_dummy_call (struct gdbarch *gdbarch, struct value *function, 3533 struct regcache *regcache, CORE_ADDR bp_addr, int nargs, 3534 struct value **args, CORE_ADDR sp, int struct_return, 3535 CORE_ADDR struct_addr) 3536{ 3537 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 3538 int argnum; 3539 int argreg; 3540 int nstack; 3541 struct stack_item *si = NULL; 3542 int use_vfp_abi; 3543 struct type *ftype; 3544 unsigned vfp_regs_free = (1 << 16) - 1; 3545 3546 /* Determine the type of this function and whether the VFP ABI 3547 applies. */ 3548 ftype = check_typedef (value_type (function)); 3549 if (TYPE_CODE (ftype) == TYPE_CODE_PTR) 3550 ftype = check_typedef (TYPE_TARGET_TYPE (ftype)); 3551 use_vfp_abi = arm_vfp_abi_for_function (gdbarch, ftype); 3552 3553 /* Set the return address. For the ARM, the return breakpoint is 3554 always at BP_ADDR. */ 3555 if (arm_pc_is_thumb (gdbarch, bp_addr)) 3556 bp_addr |= 1; 3557 regcache_cooked_write_unsigned (regcache, ARM_LR_REGNUM, bp_addr); 3558 3559 /* Walk through the list of args and determine how large a temporary 3560 stack is required. Need to take care here as structs may be 3561 passed on the stack, and we have to to push them. */ 3562 nstack = 0; 3563 3564 argreg = ARM_A1_REGNUM; 3565 nstack = 0; 3566 3567 /* The struct_return pointer occupies the first parameter 3568 passing register. */ 3569 if (struct_return) 3570 { 3571 if (arm_debug) 3572 fprintf_unfiltered (gdb_stdlog, "struct return in %s = %s\n", 3573 gdbarch_register_name (gdbarch, argreg), 3574 paddress (gdbarch, struct_addr)); 3575 regcache_cooked_write_unsigned (regcache, argreg, struct_addr); 3576 argreg++; 3577 } 3578 3579 for (argnum = 0; argnum < nargs; argnum++) 3580 { 3581 int len; 3582 struct type *arg_type; 3583 struct type *target_type; 3584 enum type_code typecode; 3585 const bfd_byte *val; 3586 int align; 3587 enum arm_vfp_cprc_base_type vfp_base_type; 3588 int vfp_base_count; 3589 int may_use_core_reg = 1; 3590 3591 arg_type = check_typedef (value_type (args[argnum])); 3592 len = TYPE_LENGTH (arg_type); 3593 target_type = TYPE_TARGET_TYPE (arg_type); 3594 typecode = TYPE_CODE (arg_type); 3595 val = value_contents (args[argnum]); 3596 3597 align = arm_type_align (arg_type); 3598 /* Round alignment up to a whole number of words. */ 3599 align = (align + INT_REGISTER_SIZE - 1) & ~(INT_REGISTER_SIZE - 1); 3600 /* Different ABIs have different maximum alignments. */ 3601 if (gdbarch_tdep (gdbarch)->arm_abi == ARM_ABI_APCS) 3602 { 3603 /* The APCS ABI only requires word alignment. */ 3604 align = INT_REGISTER_SIZE; 3605 } 3606 else 3607 { 3608 /* The AAPCS requires at most doubleword alignment. */ 3609 if (align > INT_REGISTER_SIZE * 2) 3610 align = INT_REGISTER_SIZE * 2; 3611 } 3612 3613 if (use_vfp_abi 3614 && arm_vfp_call_candidate (arg_type, &vfp_base_type, 3615 &vfp_base_count)) 3616 { 3617 int regno; 3618 int unit_length; 3619 int shift; 3620 unsigned mask; 3621 3622 /* Because this is a CPRC it cannot go in a core register or 3623 cause a core register to be skipped for alignment. 3624 Either it goes in VFP registers and the rest of this loop 3625 iteration is skipped for this argument, or it goes on the 3626 stack (and the stack alignment code is correct for this 3627 case). */ 3628 may_use_core_reg = 0; 3629 3630 unit_length = arm_vfp_cprc_unit_length (vfp_base_type); 3631 shift = unit_length / 4; 3632 mask = (1 << (shift * vfp_base_count)) - 1; 3633 for (regno = 0; regno < 16; regno += shift) 3634 if (((vfp_regs_free >> regno) & mask) == mask) 3635 break; 3636 3637 if (regno < 16) 3638 { 3639 int reg_char; 3640 int reg_scaled; 3641 int i; 3642 3643 vfp_regs_free &= ~(mask << regno); 3644 reg_scaled = regno / shift; 3645 reg_char = arm_vfp_cprc_reg_char (vfp_base_type); 3646 for (i = 0; i < vfp_base_count; i++) 3647 { 3648 char name_buf[4]; 3649 int regnum; 3650 if (reg_char == 'q') 3651 arm_neon_quad_write (gdbarch, regcache, reg_scaled + i, 3652 val + i * unit_length); 3653 else 3654 { 3655 sprintf (name_buf, "%c%d", reg_char, reg_scaled + i); 3656 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf, 3657 strlen (name_buf)); 3658 regcache_cooked_write (regcache, regnum, 3659 val + i * unit_length); 3660 } 3661 } 3662 continue; 3663 } 3664 else 3665 { 3666 /* This CPRC could not go in VFP registers, so all VFP 3667 registers are now marked as used. */ 3668 vfp_regs_free = 0; 3669 } 3670 } 3671 3672 /* Push stack padding for dowubleword alignment. */ 3673 if (nstack & (align - 1)) 3674 { 3675 si = push_stack_item (si, val, INT_REGISTER_SIZE); 3676 nstack += INT_REGISTER_SIZE; 3677 } 3678 3679 /* Doubleword aligned quantities must go in even register pairs. */ 3680 if (may_use_core_reg 3681 && argreg <= ARM_LAST_ARG_REGNUM 3682 && align > INT_REGISTER_SIZE 3683 && argreg & 1) 3684 argreg++; 3685 3686 /* If the argument is a pointer to a function, and it is a 3687 Thumb function, create a LOCAL copy of the value and set 3688 the THUMB bit in it. */ 3689 if (TYPE_CODE_PTR == typecode 3690 && target_type != NULL 3691 && TYPE_CODE_FUNC == TYPE_CODE (check_typedef (target_type))) 3692 { 3693 CORE_ADDR regval = extract_unsigned_integer (val, len, byte_order); 3694 if (arm_pc_is_thumb (gdbarch, regval)) 3695 { 3696 bfd_byte *copy = alloca (len); 3697 store_unsigned_integer (copy, len, byte_order, 3698 MAKE_THUMB_ADDR (regval)); 3699 val = copy; 3700 } 3701 } 3702 3703 /* Copy the argument to general registers or the stack in 3704 register-sized pieces. Large arguments are split between 3705 registers and stack. */ 3706 while (len > 0) 3707 { 3708 int partial_len = len < INT_REGISTER_SIZE ? len : INT_REGISTER_SIZE; 3709 3710 if (may_use_core_reg && argreg <= ARM_LAST_ARG_REGNUM) 3711 { 3712 /* The argument is being passed in a general purpose 3713 register. */ 3714 CORE_ADDR regval 3715 = extract_unsigned_integer (val, partial_len, byte_order); 3716 if (byte_order == BFD_ENDIAN_BIG) 3717 regval <<= (INT_REGISTER_SIZE - partial_len) * 8; 3718 if (arm_debug) 3719 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n", 3720 argnum, 3721 gdbarch_register_name 3722 (gdbarch, argreg), 3723 phex (regval, INT_REGISTER_SIZE)); 3724 regcache_cooked_write_unsigned (regcache, argreg, regval); 3725 argreg++; 3726 } 3727 else 3728 { 3729 /* Push the arguments onto the stack. */ 3730 if (arm_debug) 3731 fprintf_unfiltered (gdb_stdlog, "arg %d @ sp + %d\n", 3732 argnum, nstack); 3733 si = push_stack_item (si, val, INT_REGISTER_SIZE); 3734 nstack += INT_REGISTER_SIZE; 3735 } 3736 3737 len -= partial_len; 3738 val += partial_len; 3739 } 3740 } 3741 /* If we have an odd number of words to push, then decrement the stack 3742 by one word now, so first stack argument will be dword aligned. */ 3743 if (nstack & 4) 3744 sp -= 4; 3745 3746 while (si) 3747 { 3748 sp -= si->len; 3749 write_memory (sp, si->data, si->len); 3750 si = pop_stack_item (si); 3751 } 3752 3753 /* Finally, update teh SP register. */ 3754 regcache_cooked_write_unsigned (regcache, ARM_SP_REGNUM, sp); 3755 3756 return sp; 3757} 3758 3759 3760/* Always align the frame to an 8-byte boundary. This is required on 3761 some platforms and harmless on the rest. */ 3762 3763static CORE_ADDR 3764arm_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp) 3765{ 3766 /* Align the stack to eight bytes. */ 3767 return sp & ~ (CORE_ADDR) 7; 3768} 3769 3770static void 3771print_fpu_flags (int flags) 3772{ 3773 if (flags & (1 << 0)) 3774 fputs ("IVO ", stdout); 3775 if (flags & (1 << 1)) 3776 fputs ("DVZ ", stdout); 3777 if (flags & (1 << 2)) 3778 fputs ("OFL ", stdout); 3779 if (flags & (1 << 3)) 3780 fputs ("UFL ", stdout); 3781 if (flags & (1 << 4)) 3782 fputs ("INX ", stdout); 3783 putchar ('\n'); 3784} 3785 3786/* Print interesting information about the floating point processor 3787 (if present) or emulator. */ 3788static void 3789arm_print_float_info (struct gdbarch *gdbarch, struct ui_file *file, 3790 struct frame_info *frame, const char *args) 3791{ 3792 unsigned long status = get_frame_register_unsigned (frame, ARM_FPS_REGNUM); 3793 int type; 3794 3795 type = (status >> 24) & 127; 3796 if (status & (1 << 31)) 3797 printf (_("Hardware FPU type %d\n"), type); 3798 else 3799 printf (_("Software FPU type %d\n"), type); 3800 /* i18n: [floating point unit] mask */ 3801 fputs (_("mask: "), stdout); 3802 print_fpu_flags (status >> 16); 3803 /* i18n: [floating point unit] flags */ 3804 fputs (_("flags: "), stdout); 3805 print_fpu_flags (status); 3806} 3807 3808/* Construct the ARM extended floating point type. */ 3809static struct type * 3810arm_ext_type (struct gdbarch *gdbarch) 3811{ 3812 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 3813 3814 if (!tdep->arm_ext_type) 3815 tdep->arm_ext_type 3816 = arch_float_type (gdbarch, -1, "builtin_type_arm_ext", 3817 floatformats_arm_ext); 3818 3819 return tdep->arm_ext_type; 3820} 3821 3822static struct type * 3823arm_neon_double_type (struct gdbarch *gdbarch) 3824{ 3825 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 3826 3827 if (tdep->neon_double_type == NULL) 3828 { 3829 struct type *t, *elem; 3830 3831 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_d", 3832 TYPE_CODE_UNION); 3833 elem = builtin_type (gdbarch)->builtin_uint8; 3834 append_composite_type_field (t, "u8", init_vector_type (elem, 8)); 3835 elem = builtin_type (gdbarch)->builtin_uint16; 3836 append_composite_type_field (t, "u16", init_vector_type (elem, 4)); 3837 elem = builtin_type (gdbarch)->builtin_uint32; 3838 append_composite_type_field (t, "u32", init_vector_type (elem, 2)); 3839 elem = builtin_type (gdbarch)->builtin_uint64; 3840 append_composite_type_field (t, "u64", elem); 3841 elem = builtin_type (gdbarch)->builtin_float; 3842 append_composite_type_field (t, "f32", init_vector_type (elem, 2)); 3843 elem = builtin_type (gdbarch)->builtin_double; 3844 append_composite_type_field (t, "f64", elem); 3845 3846 TYPE_VECTOR (t) = 1; 3847 TYPE_NAME (t) = "neon_d"; 3848 tdep->neon_double_type = t; 3849 } 3850 3851 return tdep->neon_double_type; 3852} 3853 3854/* FIXME: The vector types are not correctly ordered on big-endian 3855 targets. Just as s0 is the low bits of d0, d0[0] is also the low 3856 bits of d0 - regardless of what unit size is being held in d0. So 3857 the offset of the first uint8 in d0 is 7, but the offset of the 3858 first float is 4. This code works as-is for little-endian 3859 targets. */ 3860 3861static struct type * 3862arm_neon_quad_type (struct gdbarch *gdbarch) 3863{ 3864 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 3865 3866 if (tdep->neon_quad_type == NULL) 3867 { 3868 struct type *t, *elem; 3869 3870 t = arch_composite_type (gdbarch, "__gdb_builtin_type_neon_q", 3871 TYPE_CODE_UNION); 3872 elem = builtin_type (gdbarch)->builtin_uint8; 3873 append_composite_type_field (t, "u8", init_vector_type (elem, 16)); 3874 elem = builtin_type (gdbarch)->builtin_uint16; 3875 append_composite_type_field (t, "u16", init_vector_type (elem, 8)); 3876 elem = builtin_type (gdbarch)->builtin_uint32; 3877 append_composite_type_field (t, "u32", init_vector_type (elem, 4)); 3878 elem = builtin_type (gdbarch)->builtin_uint64; 3879 append_composite_type_field (t, "u64", init_vector_type (elem, 2)); 3880 elem = builtin_type (gdbarch)->builtin_float; 3881 append_composite_type_field (t, "f32", init_vector_type (elem, 4)); 3882 elem = builtin_type (gdbarch)->builtin_double; 3883 append_composite_type_field (t, "f64", init_vector_type (elem, 2)); 3884 3885 TYPE_VECTOR (t) = 1; 3886 TYPE_NAME (t) = "neon_q"; 3887 tdep->neon_quad_type = t; 3888 } 3889 3890 return tdep->neon_quad_type; 3891} 3892 3893/* Return the GDB type object for the "standard" data type of data in 3894 register N. */ 3895 3896static struct type * 3897arm_register_type (struct gdbarch *gdbarch, int regnum) 3898{ 3899 int num_regs = gdbarch_num_regs (gdbarch); 3900 3901 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos 3902 && regnum >= num_regs && regnum < num_regs + 32) 3903 return builtin_type (gdbarch)->builtin_float; 3904 3905 if (gdbarch_tdep (gdbarch)->have_neon_pseudos 3906 && regnum >= num_regs + 32 && regnum < num_regs + 32 + 16) 3907 return arm_neon_quad_type (gdbarch); 3908 3909 /* If the target description has register information, we are only 3910 in this function so that we can override the types of 3911 double-precision registers for NEON. */ 3912 if (tdesc_has_registers (gdbarch_target_desc (gdbarch))) 3913 { 3914 struct type *t = tdesc_register_type (gdbarch, regnum); 3915 3916 if (regnum >= ARM_D0_REGNUM && regnum < ARM_D0_REGNUM + 32 3917 && TYPE_CODE (t) == TYPE_CODE_FLT 3918 && gdbarch_tdep (gdbarch)->have_neon) 3919 return arm_neon_double_type (gdbarch); 3920 else 3921 return t; 3922 } 3923 3924 if (regnum >= ARM_F0_REGNUM && regnum < ARM_F0_REGNUM + NUM_FREGS) 3925 { 3926 if (!gdbarch_tdep (gdbarch)->have_fpa_registers) 3927 return builtin_type (gdbarch)->builtin_void; 3928 3929 return arm_ext_type (gdbarch); 3930 } 3931 else if (regnum == ARM_SP_REGNUM) 3932 return builtin_type (gdbarch)->builtin_data_ptr; 3933 else if (regnum == ARM_PC_REGNUM) 3934 return builtin_type (gdbarch)->builtin_func_ptr; 3935 else if (regnum >= ARRAY_SIZE (arm_register_names)) 3936 /* These registers are only supported on targets which supply 3937 an XML description. */ 3938 return builtin_type (gdbarch)->builtin_int0; 3939 else 3940 return builtin_type (gdbarch)->builtin_uint32; 3941} 3942 3943/* Map a DWARF register REGNUM onto the appropriate GDB register 3944 number. */ 3945 3946static int 3947arm_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg) 3948{ 3949 /* Core integer regs. */ 3950 if (reg >= 0 && reg <= 15) 3951 return reg; 3952 3953 /* Legacy FPA encoding. These were once used in a way which 3954 overlapped with VFP register numbering, so their use is 3955 discouraged, but GDB doesn't support the ARM toolchain 3956 which used them for VFP. */ 3957 if (reg >= 16 && reg <= 23) 3958 return ARM_F0_REGNUM + reg - 16; 3959 3960 /* New assignments for the FPA registers. */ 3961 if (reg >= 96 && reg <= 103) 3962 return ARM_F0_REGNUM + reg - 96; 3963 3964 /* WMMX register assignments. */ 3965 if (reg >= 104 && reg <= 111) 3966 return ARM_WCGR0_REGNUM + reg - 104; 3967 3968 if (reg >= 112 && reg <= 127) 3969 return ARM_WR0_REGNUM + reg - 112; 3970 3971 if (reg >= 192 && reg <= 199) 3972 return ARM_WC0_REGNUM + reg - 192; 3973 3974 /* VFP v2 registers. A double precision value is actually 3975 in d1 rather than s2, but the ABI only defines numbering 3976 for the single precision registers. This will "just work" 3977 in GDB for little endian targets (we'll read eight bytes, 3978 starting in s0 and then progressing to s1), but will be 3979 reversed on big endian targets with VFP. This won't 3980 be a problem for the new Neon quad registers; you're supposed 3981 to use DW_OP_piece for those. */ 3982 if (reg >= 64 && reg <= 95) 3983 { 3984 char name_buf[4]; 3985 3986 sprintf (name_buf, "s%d", reg - 64); 3987 return user_reg_map_name_to_regnum (gdbarch, name_buf, 3988 strlen (name_buf)); 3989 } 3990 3991 /* VFP v3 / Neon registers. This range is also used for VFP v2 3992 registers, except that it now describes d0 instead of s0. */ 3993 if (reg >= 256 && reg <= 287) 3994 { 3995 char name_buf[4]; 3996 3997 sprintf (name_buf, "d%d", reg - 256); 3998 return user_reg_map_name_to_regnum (gdbarch, name_buf, 3999 strlen (name_buf)); 4000 } 4001 4002 return -1; 4003} 4004 4005/* Map GDB internal REGNUM onto the Arm simulator register numbers. */ 4006static int 4007arm_register_sim_regno (struct gdbarch *gdbarch, int regnum) 4008{ 4009 int reg = regnum; 4010 gdb_assert (reg >= 0 && reg < gdbarch_num_regs (gdbarch)); 4011 4012 if (regnum >= ARM_WR0_REGNUM && regnum <= ARM_WR15_REGNUM) 4013 return regnum - ARM_WR0_REGNUM + SIM_ARM_IWMMXT_COP0R0_REGNUM; 4014 4015 if (regnum >= ARM_WC0_REGNUM && regnum <= ARM_WC7_REGNUM) 4016 return regnum - ARM_WC0_REGNUM + SIM_ARM_IWMMXT_COP1R0_REGNUM; 4017 4018 if (regnum >= ARM_WCGR0_REGNUM && regnum <= ARM_WCGR7_REGNUM) 4019 return regnum - ARM_WCGR0_REGNUM + SIM_ARM_IWMMXT_COP1R8_REGNUM; 4020 4021 if (reg < NUM_GREGS) 4022 return SIM_ARM_R0_REGNUM + reg; 4023 reg -= NUM_GREGS; 4024 4025 if (reg < NUM_FREGS) 4026 return SIM_ARM_FP0_REGNUM + reg; 4027 reg -= NUM_FREGS; 4028 4029 if (reg < NUM_SREGS) 4030 return SIM_ARM_FPS_REGNUM + reg; 4031 reg -= NUM_SREGS; 4032 4033 internal_error (__FILE__, __LINE__, _("Bad REGNUM %d"), regnum); 4034} 4035 4036/* NOTE: cagney/2001-08-20: Both convert_from_extended() and 4037 convert_to_extended() use floatformat_arm_ext_littlebyte_bigword. 4038 It is thought that this is is the floating-point register format on 4039 little-endian systems. */ 4040 4041static void 4042convert_from_extended (const struct floatformat *fmt, const void *ptr, 4043 void *dbl, int endianess) 4044{ 4045 DOUBLEST d; 4046 4047 if (endianess == BFD_ENDIAN_BIG) 4048 floatformat_to_doublest (&floatformat_arm_ext_big, ptr, &d); 4049 else 4050 floatformat_to_doublest (&floatformat_arm_ext_littlebyte_bigword, 4051 ptr, &d); 4052 floatformat_from_doublest (fmt, &d, dbl); 4053} 4054 4055static void 4056convert_to_extended (const struct floatformat *fmt, void *dbl, const void *ptr, 4057 int endianess) 4058{ 4059 DOUBLEST d; 4060 4061 floatformat_to_doublest (fmt, ptr, &d); 4062 if (endianess == BFD_ENDIAN_BIG) 4063 floatformat_from_doublest (&floatformat_arm_ext_big, &d, dbl); 4064 else 4065 floatformat_from_doublest (&floatformat_arm_ext_littlebyte_bigword, 4066 &d, dbl); 4067} 4068 4069static int 4070condition_true (unsigned long cond, unsigned long status_reg) 4071{ 4072 if (cond == INST_AL || cond == INST_NV) 4073 return 1; 4074 4075 switch (cond) 4076 { 4077 case INST_EQ: 4078 return ((status_reg & FLAG_Z) != 0); 4079 case INST_NE: 4080 return ((status_reg & FLAG_Z) == 0); 4081 case INST_CS: 4082 return ((status_reg & FLAG_C) != 0); 4083 case INST_CC: 4084 return ((status_reg & FLAG_C) == 0); 4085 case INST_MI: 4086 return ((status_reg & FLAG_N) != 0); 4087 case INST_PL: 4088 return ((status_reg & FLAG_N) == 0); 4089 case INST_VS: 4090 return ((status_reg & FLAG_V) != 0); 4091 case INST_VC: 4092 return ((status_reg & FLAG_V) == 0); 4093 case INST_HI: 4094 return ((status_reg & (FLAG_C | FLAG_Z)) == FLAG_C); 4095 case INST_LS: 4096 return ((status_reg & (FLAG_C | FLAG_Z)) != FLAG_C); 4097 case INST_GE: 4098 return (((status_reg & FLAG_N) == 0) == ((status_reg & FLAG_V) == 0)); 4099 case INST_LT: 4100 return (((status_reg & FLAG_N) == 0) != ((status_reg & FLAG_V) == 0)); 4101 case INST_GT: 4102 return (((status_reg & FLAG_Z) == 0) 4103 && (((status_reg & FLAG_N) == 0) 4104 == ((status_reg & FLAG_V) == 0))); 4105 case INST_LE: 4106 return (((status_reg & FLAG_Z) != 0) 4107 || (((status_reg & FLAG_N) == 0) 4108 != ((status_reg & FLAG_V) == 0))); 4109 } 4110 return 1; 4111} 4112 4113static unsigned long 4114shifted_reg_val (struct frame_info *frame, unsigned long inst, int carry, 4115 unsigned long pc_val, unsigned long status_reg) 4116{ 4117 unsigned long res, shift; 4118 int rm = bits (inst, 0, 3); 4119 unsigned long shifttype = bits (inst, 5, 6); 4120 4121 if (bit (inst, 4)) 4122 { 4123 int rs = bits (inst, 8, 11); 4124 shift = (rs == 15 ? pc_val + 8 4125 : get_frame_register_unsigned (frame, rs)) & 0xFF; 4126 } 4127 else 4128 shift = bits (inst, 7, 11); 4129 4130 res = (rm == ARM_PC_REGNUM 4131 ? (pc_val + (bit (inst, 4) ? 12 : 8)) 4132 : get_frame_register_unsigned (frame, rm)); 4133 4134 switch (shifttype) 4135 { 4136 case 0: /* LSL */ 4137 res = shift >= 32 ? 0 : res << shift; 4138 break; 4139 4140 case 1: /* LSR */ 4141 res = shift >= 32 ? 0 : res >> shift; 4142 break; 4143 4144 case 2: /* ASR */ 4145 if (shift >= 32) 4146 shift = 31; 4147 res = ((res & 0x80000000L) 4148 ? ~((~res) >> shift) : res >> shift); 4149 break; 4150 4151 case 3: /* ROR/RRX */ 4152 shift &= 31; 4153 if (shift == 0) 4154 res = (res >> 1) | (carry ? 0x80000000L : 0); 4155 else 4156 res = (res >> shift) | (res << (32 - shift)); 4157 break; 4158 } 4159 4160 return res & 0xffffffff; 4161} 4162 4163/* Return number of 1-bits in VAL. */ 4164 4165static int 4166bitcount (unsigned long val) 4167{ 4168 int nbits; 4169 for (nbits = 0; val != 0; nbits++) 4170 val &= val - 1; /* Delete rightmost 1-bit in val. */ 4171 return nbits; 4172} 4173 4174/* Return the size in bytes of the complete Thumb instruction whose 4175 first halfword is INST1. */ 4176 4177static int 4178thumb_insn_size (unsigned short inst1) 4179{ 4180 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0) 4181 return 4; 4182 else 4183 return 2; 4184} 4185 4186static int 4187thumb_advance_itstate (unsigned int itstate) 4188{ 4189 /* Preserve IT[7:5], the first three bits of the condition. Shift 4190 the upcoming condition flags left by one bit. */ 4191 itstate = (itstate & 0xe0) | ((itstate << 1) & 0x1f); 4192 4193 /* If we have finished the IT block, clear the state. */ 4194 if ((itstate & 0x0f) == 0) 4195 itstate = 0; 4196 4197 return itstate; 4198} 4199 4200/* Find the next PC after the current instruction executes. In some 4201 cases we can not statically determine the answer (see the IT state 4202 handling in this function); in that case, a breakpoint may be 4203 inserted in addition to the returned PC, which will be used to set 4204 another breakpoint by our caller. */ 4205 4206static CORE_ADDR 4207thumb_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc) 4208{ 4209 struct gdbarch *gdbarch = get_frame_arch (frame); 4210 struct address_space *aspace = get_frame_address_space (frame); 4211 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 4212 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 4213 unsigned long pc_val = ((unsigned long) pc) + 4; /* PC after prefetch */ 4214 unsigned short inst1; 4215 CORE_ADDR nextpc = pc + 2; /* Default is next instruction. */ 4216 unsigned long offset; 4217 ULONGEST status, itstate; 4218 4219 nextpc = MAKE_THUMB_ADDR (nextpc); 4220 pc_val = MAKE_THUMB_ADDR (pc_val); 4221 4222 inst1 = read_memory_unsigned_integer (pc, 2, byte_order_for_code); 4223 4224 /* Thumb-2 conditional execution support. There are eight bits in 4225 the CPSR which describe conditional execution state. Once 4226 reconstructed (they're in a funny order), the low five bits 4227 describe the low bit of the condition for each instruction and 4228 how many instructions remain. The high three bits describe the 4229 base condition. One of the low four bits will be set if an IT 4230 block is active. These bits read as zero on earlier 4231 processors. */ 4232 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM); 4233 itstate = ((status >> 8) & 0xfc) | ((status >> 25) & 0x3); 4234 4235 /* If-Then handling. On GNU/Linux, where this routine is used, we 4236 use an undefined instruction as a breakpoint. Unlike BKPT, IT 4237 can disable execution of the undefined instruction. So we might 4238 miss the breakpoint if we set it on a skipped conditional 4239 instruction. Because conditional instructions can change the 4240 flags, affecting the execution of further instructions, we may 4241 need to set two breakpoints. */ 4242 4243 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint != NULL) 4244 { 4245 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0) 4246 { 4247 /* An IT instruction. Because this instruction does not 4248 modify the flags, we can accurately predict the next 4249 executed instruction. */ 4250 itstate = inst1 & 0x00ff; 4251 pc += thumb_insn_size (inst1); 4252 4253 while (itstate != 0 && ! condition_true (itstate >> 4, status)) 4254 { 4255 inst1 = read_memory_unsigned_integer (pc, 2, 4256 byte_order_for_code); 4257 pc += thumb_insn_size (inst1); 4258 itstate = thumb_advance_itstate (itstate); 4259 } 4260 4261 return MAKE_THUMB_ADDR (pc); 4262 } 4263 else if (itstate != 0) 4264 { 4265 /* We are in a conditional block. Check the condition. */ 4266 if (! condition_true (itstate >> 4, status)) 4267 { 4268 /* Advance to the next executed instruction. */ 4269 pc += thumb_insn_size (inst1); 4270 itstate = thumb_advance_itstate (itstate); 4271 4272 while (itstate != 0 && ! condition_true (itstate >> 4, status)) 4273 { 4274 inst1 = read_memory_unsigned_integer (pc, 2, 4275 byte_order_for_code); 4276 pc += thumb_insn_size (inst1); 4277 itstate = thumb_advance_itstate (itstate); 4278 } 4279 4280 return MAKE_THUMB_ADDR (pc); 4281 } 4282 else if ((itstate & 0x0f) == 0x08) 4283 { 4284 /* This is the last instruction of the conditional 4285 block, and it is executed. We can handle it normally 4286 because the following instruction is not conditional, 4287 and we must handle it normally because it is 4288 permitted to branch. Fall through. */ 4289 } 4290 else 4291 { 4292 int cond_negated; 4293 4294 /* There are conditional instructions after this one. 4295 If this instruction modifies the flags, then we can 4296 not predict what the next executed instruction will 4297 be. Fortunately, this instruction is architecturally 4298 forbidden to branch; we know it will fall through. 4299 Start by skipping past it. */ 4300 pc += thumb_insn_size (inst1); 4301 itstate = thumb_advance_itstate (itstate); 4302 4303 /* Set a breakpoint on the following instruction. */ 4304 gdb_assert ((itstate & 0x0f) != 0); 4305 arm_insert_single_step_breakpoint (gdbarch, aspace, 4306 MAKE_THUMB_ADDR (pc)); 4307 cond_negated = (itstate >> 4) & 1; 4308 4309 /* Skip all following instructions with the same 4310 condition. If there is a later instruction in the IT 4311 block with the opposite condition, set the other 4312 breakpoint there. If not, then set a breakpoint on 4313 the instruction after the IT block. */ 4314 do 4315 { 4316 inst1 = read_memory_unsigned_integer (pc, 2, 4317 byte_order_for_code); 4318 pc += thumb_insn_size (inst1); 4319 itstate = thumb_advance_itstate (itstate); 4320 } 4321 while (itstate != 0 && ((itstate >> 4) & 1) == cond_negated); 4322 4323 return MAKE_THUMB_ADDR (pc); 4324 } 4325 } 4326 } 4327 else if (itstate & 0x0f) 4328 { 4329 /* We are in a conditional block. Check the condition. */ 4330 int cond = itstate >> 4; 4331 4332 if (! condition_true (cond, status)) 4333 { 4334 /* Advance to the next instruction. All the 32-bit 4335 instructions share a common prefix. */ 4336 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0) 4337 return MAKE_THUMB_ADDR (pc + 4); 4338 else 4339 return MAKE_THUMB_ADDR (pc + 2); 4340 } 4341 4342 /* Otherwise, handle the instruction normally. */ 4343 } 4344 4345 if ((inst1 & 0xff00) == 0xbd00) /* pop {rlist, pc} */ 4346 { 4347 CORE_ADDR sp; 4348 4349 /* Fetch the saved PC from the stack. It's stored above 4350 all of the other registers. */ 4351 offset = bitcount (bits (inst1, 0, 7)) * INT_REGISTER_SIZE; 4352 sp = get_frame_register_unsigned (frame, ARM_SP_REGNUM); 4353 nextpc = read_memory_unsigned_integer (sp + offset, 4, byte_order); 4354 } 4355 else if ((inst1 & 0xf000) == 0xd000) /* conditional branch */ 4356 { 4357 unsigned long cond = bits (inst1, 8, 11); 4358 if (cond == 0x0f) /* 0x0f = SWI */ 4359 { 4360 struct gdbarch_tdep *tdep; 4361 tdep = gdbarch_tdep (gdbarch); 4362 4363 if (tdep->syscall_next_pc != NULL) 4364 nextpc = tdep->syscall_next_pc (frame); 4365 4366 } 4367 else if (cond != 0x0f && condition_true (cond, status)) 4368 nextpc = pc_val + (sbits (inst1, 0, 7) << 1); 4369 } 4370 else if ((inst1 & 0xf800) == 0xe000) /* unconditional branch */ 4371 { 4372 nextpc = pc_val + (sbits (inst1, 0, 10) << 1); 4373 } 4374 else if ((inst1 & 0xe000) == 0xe000) /* 32-bit instruction */ 4375 { 4376 unsigned short inst2; 4377 inst2 = read_memory_unsigned_integer (pc + 2, 2, byte_order_for_code); 4378 4379 /* Default to the next instruction. */ 4380 nextpc = pc + 4; 4381 nextpc = MAKE_THUMB_ADDR (nextpc); 4382 4383 if ((inst1 & 0xf800) == 0xf000 && (inst2 & 0x8000) == 0x8000) 4384 { 4385 /* Branches and miscellaneous control instructions. */ 4386 4387 if ((inst2 & 0x1000) != 0 || (inst2 & 0xd001) == 0xc000) 4388 { 4389 /* B, BL, BLX. */ 4390 int j1, j2, imm1, imm2; 4391 4392 imm1 = sbits (inst1, 0, 10); 4393 imm2 = bits (inst2, 0, 10); 4394 j1 = bit (inst2, 13); 4395 j2 = bit (inst2, 11); 4396 4397 offset = ((imm1 << 12) + (imm2 << 1)); 4398 offset ^= ((!j2) << 22) | ((!j1) << 23); 4399 4400 nextpc = pc_val + offset; 4401 /* For BLX make sure to clear the low bits. */ 4402 if (bit (inst2, 12) == 0) 4403 nextpc = nextpc & 0xfffffffc; 4404 } 4405 else if (inst1 == 0xf3de && (inst2 & 0xff00) == 0x3f00) 4406 { 4407 /* SUBS PC, LR, #imm8. */ 4408 nextpc = get_frame_register_unsigned (frame, ARM_LR_REGNUM); 4409 nextpc -= inst2 & 0x00ff; 4410 } 4411 else if ((inst2 & 0xd000) == 0x8000 && (inst1 & 0x0380) != 0x0380) 4412 { 4413 /* Conditional branch. */ 4414 if (condition_true (bits (inst1, 6, 9), status)) 4415 { 4416 int sign, j1, j2, imm1, imm2; 4417 4418 sign = sbits (inst1, 10, 10); 4419 imm1 = bits (inst1, 0, 5); 4420 imm2 = bits (inst2, 0, 10); 4421 j1 = bit (inst2, 13); 4422 j2 = bit (inst2, 11); 4423 4424 offset = (sign << 20) + (j2 << 19) + (j1 << 18); 4425 offset += (imm1 << 12) + (imm2 << 1); 4426 4427 nextpc = pc_val + offset; 4428 } 4429 } 4430 } 4431 else if ((inst1 & 0xfe50) == 0xe810) 4432 { 4433 /* Load multiple or RFE. */ 4434 int rn, offset, load_pc = 1; 4435 4436 rn = bits (inst1, 0, 3); 4437 if (bit (inst1, 7) && !bit (inst1, 8)) 4438 { 4439 /* LDMIA or POP */ 4440 if (!bit (inst2, 15)) 4441 load_pc = 0; 4442 offset = bitcount (inst2) * 4 - 4; 4443 } 4444 else if (!bit (inst1, 7) && bit (inst1, 8)) 4445 { 4446 /* LDMDB */ 4447 if (!bit (inst2, 15)) 4448 load_pc = 0; 4449 offset = -4; 4450 } 4451 else if (bit (inst1, 7) && bit (inst1, 8)) 4452 { 4453 /* RFEIA */ 4454 offset = 0; 4455 } 4456 else if (!bit (inst1, 7) && !bit (inst1, 8)) 4457 { 4458 /* RFEDB */ 4459 offset = -8; 4460 } 4461 else 4462 load_pc = 0; 4463 4464 if (load_pc) 4465 { 4466 CORE_ADDR addr = get_frame_register_unsigned (frame, rn); 4467 nextpc = get_frame_memory_unsigned (frame, addr + offset, 4); 4468 } 4469 } 4470 else if ((inst1 & 0xffef) == 0xea4f && (inst2 & 0xfff0) == 0x0f00) 4471 { 4472 /* MOV PC or MOVS PC. */ 4473 nextpc = get_frame_register_unsigned (frame, bits (inst2, 0, 3)); 4474 nextpc = MAKE_THUMB_ADDR (nextpc); 4475 } 4476 else if ((inst1 & 0xff70) == 0xf850 && (inst2 & 0xf000) == 0xf000) 4477 { 4478 /* LDR PC. */ 4479 CORE_ADDR base; 4480 int rn, load_pc = 1; 4481 4482 rn = bits (inst1, 0, 3); 4483 base = get_frame_register_unsigned (frame, rn); 4484 if (rn == ARM_PC_REGNUM) 4485 { 4486 base = (base + 4) & ~(CORE_ADDR) 0x3; 4487 if (bit (inst1, 7)) 4488 base += bits (inst2, 0, 11); 4489 else 4490 base -= bits (inst2, 0, 11); 4491 } 4492 else if (bit (inst1, 7)) 4493 base += bits (inst2, 0, 11); 4494 else if (bit (inst2, 11)) 4495 { 4496 if (bit (inst2, 10)) 4497 { 4498 if (bit (inst2, 9)) 4499 base += bits (inst2, 0, 7); 4500 else 4501 base -= bits (inst2, 0, 7); 4502 } 4503 } 4504 else if ((inst2 & 0x0fc0) == 0x0000) 4505 { 4506 int shift = bits (inst2, 4, 5), rm = bits (inst2, 0, 3); 4507 base += get_frame_register_unsigned (frame, rm) << shift; 4508 } 4509 else 4510 /* Reserved. */ 4511 load_pc = 0; 4512 4513 if (load_pc) 4514 nextpc = get_frame_memory_unsigned (frame, base, 4); 4515 } 4516 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf000) 4517 { 4518 /* TBB. */ 4519 CORE_ADDR tbl_reg, table, offset, length; 4520 4521 tbl_reg = bits (inst1, 0, 3); 4522 if (tbl_reg == 0x0f) 4523 table = pc + 4; /* Regcache copy of PC isn't right yet. */ 4524 else 4525 table = get_frame_register_unsigned (frame, tbl_reg); 4526 4527 offset = get_frame_register_unsigned (frame, bits (inst2, 0, 3)); 4528 length = 2 * get_frame_memory_unsigned (frame, table + offset, 1); 4529 nextpc = pc_val + length; 4530 } 4531 else if ((inst1 & 0xfff0) == 0xe8d0 && (inst2 & 0xfff0) == 0xf010) 4532 { 4533 /* TBH. */ 4534 CORE_ADDR tbl_reg, table, offset, length; 4535 4536 tbl_reg = bits (inst1, 0, 3); 4537 if (tbl_reg == 0x0f) 4538 table = pc + 4; /* Regcache copy of PC isn't right yet. */ 4539 else 4540 table = get_frame_register_unsigned (frame, tbl_reg); 4541 4542 offset = 2 * get_frame_register_unsigned (frame, bits (inst2, 0, 3)); 4543 length = 2 * get_frame_memory_unsigned (frame, table + offset, 2); 4544 nextpc = pc_val + length; 4545 } 4546 } 4547 else if ((inst1 & 0xff00) == 0x4700) /* bx REG, blx REG */ 4548 { 4549 if (bits (inst1, 3, 6) == 0x0f) 4550 nextpc = pc_val; 4551 else 4552 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6)); 4553 } 4554 else if ((inst1 & 0xff87) == 0x4687) /* mov pc, REG */ 4555 { 4556 if (bits (inst1, 3, 6) == 0x0f) 4557 nextpc = pc_val; 4558 else 4559 nextpc = get_frame_register_unsigned (frame, bits (inst1, 3, 6)); 4560 4561 nextpc = MAKE_THUMB_ADDR (nextpc); 4562 } 4563 else if ((inst1 & 0xf500) == 0xb100) 4564 { 4565 /* CBNZ or CBZ. */ 4566 int imm = (bit (inst1, 9) << 6) + (bits (inst1, 3, 7) << 1); 4567 ULONGEST reg = get_frame_register_unsigned (frame, bits (inst1, 0, 2)); 4568 4569 if (bit (inst1, 11) && reg != 0) 4570 nextpc = pc_val + imm; 4571 else if (!bit (inst1, 11) && reg == 0) 4572 nextpc = pc_val + imm; 4573 } 4574 return nextpc; 4575} 4576 4577/* Get the raw next address. PC is the current program counter, in 4578 FRAME, which is assumed to be executing in ARM mode. 4579 4580 The value returned has the execution state of the next instruction 4581 encoded in it. Use IS_THUMB_ADDR () to see whether the instruction is 4582 in Thumb-State, and gdbarch_addr_bits_remove () to get the plain memory 4583 address. */ 4584 4585static CORE_ADDR 4586arm_get_next_pc_raw (struct frame_info *frame, CORE_ADDR pc) 4587{ 4588 struct gdbarch *gdbarch = get_frame_arch (frame); 4589 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 4590 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 4591 unsigned long pc_val; 4592 unsigned long this_instr; 4593 unsigned long status; 4594 CORE_ADDR nextpc; 4595 4596 pc_val = (unsigned long) pc; 4597 this_instr = read_memory_unsigned_integer (pc, 4, byte_order_for_code); 4598 4599 status = get_frame_register_unsigned (frame, ARM_PS_REGNUM); 4600 nextpc = (CORE_ADDR) (pc_val + 4); /* Default case */ 4601 4602 if (bits (this_instr, 28, 31) == INST_NV) 4603 switch (bits (this_instr, 24, 27)) 4604 { 4605 case 0xa: 4606 case 0xb: 4607 { 4608 /* Branch with Link and change to Thumb. */ 4609 nextpc = BranchDest (pc, this_instr); 4610 nextpc |= bit (this_instr, 24) << 1; 4611 nextpc = MAKE_THUMB_ADDR (nextpc); 4612 break; 4613 } 4614 case 0xc: 4615 case 0xd: 4616 case 0xe: 4617 /* Coprocessor register transfer. */ 4618 if (bits (this_instr, 12, 15) == 15) 4619 error (_("Invalid update to pc in instruction")); 4620 break; 4621 } 4622 else if (condition_true (bits (this_instr, 28, 31), status)) 4623 { 4624 switch (bits (this_instr, 24, 27)) 4625 { 4626 case 0x0: 4627 case 0x1: /* data processing */ 4628 case 0x2: 4629 case 0x3: 4630 { 4631 unsigned long operand1, operand2, result = 0; 4632 unsigned long rn; 4633 int c; 4634 4635 if (bits (this_instr, 12, 15) != 15) 4636 break; 4637 4638 if (bits (this_instr, 22, 25) == 0 4639 && bits (this_instr, 4, 7) == 9) /* multiply */ 4640 error (_("Invalid update to pc in instruction")); 4641 4642 /* BX <reg>, BLX <reg> */ 4643 if (bits (this_instr, 4, 27) == 0x12fff1 4644 || bits (this_instr, 4, 27) == 0x12fff3) 4645 { 4646 rn = bits (this_instr, 0, 3); 4647 nextpc = ((rn == ARM_PC_REGNUM) 4648 ? (pc_val + 8) 4649 : get_frame_register_unsigned (frame, rn)); 4650 4651 return nextpc; 4652 } 4653 4654 /* Multiply into PC. */ 4655 c = (status & FLAG_C) ? 1 : 0; 4656 rn = bits (this_instr, 16, 19); 4657 operand1 = ((rn == ARM_PC_REGNUM) 4658 ? (pc_val + 8) 4659 : get_frame_register_unsigned (frame, rn)); 4660 4661 if (bit (this_instr, 25)) 4662 { 4663 unsigned long immval = bits (this_instr, 0, 7); 4664 unsigned long rotate = 2 * bits (this_instr, 8, 11); 4665 operand2 = ((immval >> rotate) | (immval << (32 - rotate))) 4666 & 0xffffffff; 4667 } 4668 else /* operand 2 is a shifted register. */ 4669 operand2 = shifted_reg_val (frame, this_instr, c, 4670 pc_val, status); 4671 4672 switch (bits (this_instr, 21, 24)) 4673 { 4674 case 0x0: /*and */ 4675 result = operand1 & operand2; 4676 break; 4677 4678 case 0x1: /*eor */ 4679 result = operand1 ^ operand2; 4680 break; 4681 4682 case 0x2: /*sub */ 4683 result = operand1 - operand2; 4684 break; 4685 4686 case 0x3: /*rsb */ 4687 result = operand2 - operand1; 4688 break; 4689 4690 case 0x4: /*add */ 4691 result = operand1 + operand2; 4692 break; 4693 4694 case 0x5: /*adc */ 4695 result = operand1 + operand2 + c; 4696 break; 4697 4698 case 0x6: /*sbc */ 4699 result = operand1 - operand2 + c; 4700 break; 4701 4702 case 0x7: /*rsc */ 4703 result = operand2 - operand1 + c; 4704 break; 4705 4706 case 0x8: 4707 case 0x9: 4708 case 0xa: 4709 case 0xb: /* tst, teq, cmp, cmn */ 4710 result = (unsigned long) nextpc; 4711 break; 4712 4713 case 0xc: /*orr */ 4714 result = operand1 | operand2; 4715 break; 4716 4717 case 0xd: /*mov */ 4718 /* Always step into a function. */ 4719 result = operand2; 4720 break; 4721 4722 case 0xe: /*bic */ 4723 result = operand1 & ~operand2; 4724 break; 4725 4726 case 0xf: /*mvn */ 4727 result = ~operand2; 4728 break; 4729 } 4730 4731 /* In 26-bit APCS the bottom two bits of the result are 4732 ignored, and we always end up in ARM state. */ 4733 if (!arm_apcs_32) 4734 nextpc = arm_addr_bits_remove (gdbarch, result); 4735 else 4736 nextpc = result; 4737 4738 break; 4739 } 4740 4741 case 0x4: 4742 case 0x5: /* data transfer */ 4743 case 0x6: 4744 case 0x7: 4745 if (bit (this_instr, 20)) 4746 { 4747 /* load */ 4748 if (bits (this_instr, 12, 15) == 15) 4749 { 4750 /* rd == pc */ 4751 unsigned long rn; 4752 unsigned long base; 4753 4754 if (bit (this_instr, 22)) 4755 error (_("Invalid update to pc in instruction")); 4756 4757 /* byte write to PC */ 4758 rn = bits (this_instr, 16, 19); 4759 base = ((rn == ARM_PC_REGNUM) 4760 ? (pc_val + 8) 4761 : get_frame_register_unsigned (frame, rn)); 4762 4763 if (bit (this_instr, 24)) 4764 { 4765 /* pre-indexed */ 4766 int c = (status & FLAG_C) ? 1 : 0; 4767 unsigned long offset = 4768 (bit (this_instr, 25) 4769 ? shifted_reg_val (frame, this_instr, c, pc_val, status) 4770 : bits (this_instr, 0, 11)); 4771 4772 if (bit (this_instr, 23)) 4773 base += offset; 4774 else 4775 base -= offset; 4776 } 4777 nextpc = (CORE_ADDR) read_memory_integer ((CORE_ADDR) base, 4778 4, byte_order); 4779 } 4780 } 4781 break; 4782 4783 case 0x8: 4784 case 0x9: /* block transfer */ 4785 if (bit (this_instr, 20)) 4786 { 4787 /* LDM */ 4788 if (bit (this_instr, 15)) 4789 { 4790 /* loading pc */ 4791 int offset = 0; 4792 4793 if (bit (this_instr, 23)) 4794 { 4795 /* up */ 4796 unsigned long reglist = bits (this_instr, 0, 14); 4797 offset = bitcount (reglist) * 4; 4798 if (bit (this_instr, 24)) /* pre */ 4799 offset += 4; 4800 } 4801 else if (bit (this_instr, 24)) 4802 offset = -4; 4803 4804 { 4805 unsigned long rn_val = 4806 get_frame_register_unsigned (frame, 4807 bits (this_instr, 16, 19)); 4808 nextpc = 4809 (CORE_ADDR) read_memory_integer ((CORE_ADDR) (rn_val 4810 + offset), 4811 4, byte_order); 4812 } 4813 } 4814 } 4815 break; 4816 4817 case 0xb: /* branch & link */ 4818 case 0xa: /* branch */ 4819 { 4820 nextpc = BranchDest (pc, this_instr); 4821 break; 4822 } 4823 4824 case 0xc: 4825 case 0xd: 4826 case 0xe: /* coproc ops */ 4827 break; 4828 case 0xf: /* SWI */ 4829 { 4830 struct gdbarch_tdep *tdep; 4831 tdep = gdbarch_tdep (gdbarch); 4832 4833 if (tdep->syscall_next_pc != NULL) 4834 nextpc = tdep->syscall_next_pc (frame); 4835 4836 } 4837 break; 4838 4839 default: 4840 fprintf_filtered (gdb_stderr, _("Bad bit-field extraction\n")); 4841 return (pc); 4842 } 4843 } 4844 4845 return nextpc; 4846} 4847 4848/* Determine next PC after current instruction executes. Will call either 4849 arm_get_next_pc_raw or thumb_get_next_pc_raw. Error out if infinite 4850 loop is detected. */ 4851 4852CORE_ADDR 4853arm_get_next_pc (struct frame_info *frame, CORE_ADDR pc) 4854{ 4855 CORE_ADDR nextpc; 4856 4857 if (arm_frame_is_thumb (frame)) 4858 { 4859 nextpc = thumb_get_next_pc_raw (frame, pc); 4860 if (nextpc == MAKE_THUMB_ADDR (pc)) 4861 error (_("Infinite loop detected")); 4862 } 4863 else 4864 { 4865 nextpc = arm_get_next_pc_raw (frame, pc); 4866 if (nextpc == pc) 4867 error (_("Infinite loop detected")); 4868 } 4869 4870 return nextpc; 4871} 4872 4873/* Like insert_single_step_breakpoint, but make sure we use a breakpoint 4874 of the appropriate mode (as encoded in the PC value), even if this 4875 differs from what would be expected according to the symbol tables. */ 4876 4877void 4878arm_insert_single_step_breakpoint (struct gdbarch *gdbarch, 4879 struct address_space *aspace, 4880 CORE_ADDR pc) 4881{ 4882 struct cleanup *old_chain 4883 = make_cleanup_restore_integer (&arm_override_mode); 4884 4885 arm_override_mode = IS_THUMB_ADDR (pc); 4886 pc = gdbarch_addr_bits_remove (gdbarch, pc); 4887 4888 insert_single_step_breakpoint (gdbarch, aspace, pc); 4889 4890 do_cleanups (old_chain); 4891} 4892 4893/* single_step() is called just before we want to resume the inferior, 4894 if we want to single-step it but there is no hardware or kernel 4895 single-step support. We find the target of the coming instruction 4896 and breakpoint it. */ 4897 4898int 4899arm_software_single_step (struct frame_info *frame) 4900{ 4901 struct gdbarch *gdbarch = get_frame_arch (frame); 4902 struct address_space *aspace = get_frame_address_space (frame); 4903 CORE_ADDR next_pc = arm_get_next_pc (frame, get_frame_pc (frame)); 4904 4905 arm_insert_single_step_breakpoint (gdbarch, aspace, next_pc); 4906 4907 return 1; 4908} 4909 4910/* Given BUF, which is OLD_LEN bytes ending at ENDADDR, expand 4911 the buffer to be NEW_LEN bytes ending at ENDADDR. Return 4912 NULL if an error occurs. BUF is freed. */ 4913 4914static gdb_byte * 4915extend_buffer_earlier (gdb_byte *buf, CORE_ADDR endaddr, 4916 int old_len, int new_len) 4917{ 4918 gdb_byte *new_buf, *middle; 4919 int bytes_to_read = new_len - old_len; 4920 4921 new_buf = xmalloc (new_len); 4922 memcpy (new_buf + bytes_to_read, buf, old_len); 4923 xfree (buf); 4924 if (target_read_memory (endaddr - new_len, new_buf, bytes_to_read) != 0) 4925 { 4926 xfree (new_buf); 4927 return NULL; 4928 } 4929 return new_buf; 4930} 4931 4932/* An IT block is at most the 2-byte IT instruction followed by 4933 four 4-byte instructions. The furthest back we must search to 4934 find an IT block that affects the current instruction is thus 4935 2 + 3 * 4 == 14 bytes. */ 4936#define MAX_IT_BLOCK_PREFIX 14 4937 4938/* Use a quick scan if there are more than this many bytes of 4939 code. */ 4940#define IT_SCAN_THRESHOLD 32 4941 4942/* Adjust a breakpoint's address to move breakpoints out of IT blocks. 4943 A breakpoint in an IT block may not be hit, depending on the 4944 condition flags. */ 4945static CORE_ADDR 4946arm_adjust_breakpoint_address (struct gdbarch *gdbarch, CORE_ADDR bpaddr) 4947{ 4948 gdb_byte *buf; 4949 char map_type; 4950 CORE_ADDR boundary, func_start; 4951 int buf_len, buf2_len; 4952 enum bfd_endian order = gdbarch_byte_order_for_code (gdbarch); 4953 int i, any, last_it, last_it_count; 4954 4955 /* If we are using BKPT breakpoints, none of this is necessary. */ 4956 if (gdbarch_tdep (gdbarch)->thumb2_breakpoint == NULL) 4957 return bpaddr; 4958 4959 /* ARM mode does not have this problem. */ 4960 if (!arm_pc_is_thumb (gdbarch, bpaddr)) 4961 return bpaddr; 4962 4963 /* We are setting a breakpoint in Thumb code that could potentially 4964 contain an IT block. The first step is to find how much Thumb 4965 code there is; we do not need to read outside of known Thumb 4966 sequences. */ 4967 map_type = arm_find_mapping_symbol (bpaddr, &boundary); 4968 if (map_type == 0) 4969 /* Thumb-2 code must have mapping symbols to have a chance. */ 4970 return bpaddr; 4971 4972 bpaddr = gdbarch_addr_bits_remove (gdbarch, bpaddr); 4973 4974 if (find_pc_partial_function (bpaddr, NULL, &func_start, NULL) 4975 && func_start > boundary) 4976 boundary = func_start; 4977 4978 /* Search for a candidate IT instruction. We have to do some fancy 4979 footwork to distinguish a real IT instruction from the second 4980 half of a 32-bit instruction, but there is no need for that if 4981 there's no candidate. */ 4982 buf_len = min (bpaddr - boundary, MAX_IT_BLOCK_PREFIX); 4983 if (buf_len == 0) 4984 /* No room for an IT instruction. */ 4985 return bpaddr; 4986 4987 buf = xmalloc (buf_len); 4988 if (target_read_memory (bpaddr - buf_len, buf, buf_len) != 0) 4989 return bpaddr; 4990 any = 0; 4991 for (i = 0; i < buf_len; i += 2) 4992 { 4993 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order); 4994 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0) 4995 { 4996 any = 1; 4997 break; 4998 } 4999 } 5000 if (any == 0) 5001 { 5002 xfree (buf); 5003 return bpaddr; 5004 } 5005 5006 /* OK, the code bytes before this instruction contain at least one 5007 halfword which resembles an IT instruction. We know that it's 5008 Thumb code, but there are still two possibilities. Either the 5009 halfword really is an IT instruction, or it is the second half of 5010 a 32-bit Thumb instruction. The only way we can tell is to 5011 scan forwards from a known instruction boundary. */ 5012 if (bpaddr - boundary > IT_SCAN_THRESHOLD) 5013 { 5014 int definite; 5015 5016 /* There's a lot of code before this instruction. Start with an 5017 optimistic search; it's easy to recognize halfwords that can 5018 not be the start of a 32-bit instruction, and use that to 5019 lock on to the instruction boundaries. */ 5020 buf = extend_buffer_earlier (buf, bpaddr, buf_len, IT_SCAN_THRESHOLD); 5021 if (buf == NULL) 5022 return bpaddr; 5023 buf_len = IT_SCAN_THRESHOLD; 5024 5025 definite = 0; 5026 for (i = 0; i < buf_len - sizeof (buf) && ! definite; i += 2) 5027 { 5028 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order); 5029 if (thumb_insn_size (inst1) == 2) 5030 { 5031 definite = 1; 5032 break; 5033 } 5034 } 5035 5036 /* At this point, if DEFINITE, BUF[I] is the first place we 5037 are sure that we know the instruction boundaries, and it is far 5038 enough from BPADDR that we could not miss an IT instruction 5039 affecting BPADDR. If ! DEFINITE, give up - start from a 5040 known boundary. */ 5041 if (! definite) 5042 { 5043 buf = extend_buffer_earlier (buf, bpaddr, buf_len, 5044 bpaddr - boundary); 5045 if (buf == NULL) 5046 return bpaddr; 5047 buf_len = bpaddr - boundary; 5048 i = 0; 5049 } 5050 } 5051 else 5052 { 5053 buf = extend_buffer_earlier (buf, bpaddr, buf_len, bpaddr - boundary); 5054 if (buf == NULL) 5055 return bpaddr; 5056 buf_len = bpaddr - boundary; 5057 i = 0; 5058 } 5059 5060 /* Scan forwards. Find the last IT instruction before BPADDR. */ 5061 last_it = -1; 5062 last_it_count = 0; 5063 while (i < buf_len) 5064 { 5065 unsigned short inst1 = extract_unsigned_integer (&buf[i], 2, order); 5066 last_it_count--; 5067 if ((inst1 & 0xff00) == 0xbf00 && (inst1 & 0x000f) != 0) 5068 { 5069 last_it = i; 5070 if (inst1 & 0x0001) 5071 last_it_count = 4; 5072 else if (inst1 & 0x0002) 5073 last_it_count = 3; 5074 else if (inst1 & 0x0004) 5075 last_it_count = 2; 5076 else 5077 last_it_count = 1; 5078 } 5079 i += thumb_insn_size (inst1); 5080 } 5081 5082 xfree (buf); 5083 5084 if (last_it == -1) 5085 /* There wasn't really an IT instruction after all. */ 5086 return bpaddr; 5087 5088 if (last_it_count < 1) 5089 /* It was too far away. */ 5090 return bpaddr; 5091 5092 /* This really is a trouble spot. Move the breakpoint to the IT 5093 instruction. */ 5094 return bpaddr - buf_len + last_it; 5095} 5096 5097/* ARM displaced stepping support. 5098 5099 Generally ARM displaced stepping works as follows: 5100 5101 1. When an instruction is to be single-stepped, it is first decoded by 5102 arm_process_displaced_insn (called from arm_displaced_step_copy_insn). 5103 Depending on the type of instruction, it is then copied to a scratch 5104 location, possibly in a modified form. The copy_* set of functions 5105 performs such modification, as necessary. A breakpoint is placed after 5106 the modified instruction in the scratch space to return control to GDB. 5107 Note in particular that instructions which modify the PC will no longer 5108 do so after modification. 5109 5110 2. The instruction is single-stepped, by setting the PC to the scratch 5111 location address, and resuming. Control returns to GDB when the 5112 breakpoint is hit. 5113 5114 3. A cleanup function (cleanup_*) is called corresponding to the copy_* 5115 function used for the current instruction. This function's job is to 5116 put the CPU/memory state back to what it would have been if the 5117 instruction had been executed unmodified in its original location. */ 5118 5119/* NOP instruction (mov r0, r0). */ 5120#define ARM_NOP 0xe1a00000 5121 5122/* Helper for register reads for displaced stepping. In particular, this 5123 returns the PC as it would be seen by the instruction at its original 5124 location. */ 5125 5126ULONGEST 5127displaced_read_reg (struct regcache *regs, struct displaced_step_closure *dsc, 5128 int regno) 5129{ 5130 ULONGEST ret; 5131 CORE_ADDR from = dsc->insn_addr; 5132 5133 if (regno == ARM_PC_REGNUM) 5134 { 5135 /* Compute pipeline offset: 5136 - When executing an ARM instruction, PC reads as the address of the 5137 current instruction plus 8. 5138 - When executing a Thumb instruction, PC reads as the address of the 5139 current instruction plus 4. */ 5140 5141 if (!dsc->is_thumb) 5142 from += 8; 5143 else 5144 from += 4; 5145 5146 if (debug_displaced) 5147 fprintf_unfiltered (gdb_stdlog, "displaced: read pc value %.8lx\n", 5148 (unsigned long) from); 5149 return (ULONGEST) from; 5150 } 5151 else 5152 { 5153 regcache_cooked_read_unsigned (regs, regno, &ret); 5154 if (debug_displaced) 5155 fprintf_unfiltered (gdb_stdlog, "displaced: read r%d value %.8lx\n", 5156 regno, (unsigned long) ret); 5157 return ret; 5158 } 5159} 5160 5161static int 5162displaced_in_arm_mode (struct regcache *regs) 5163{ 5164 ULONGEST ps; 5165 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs)); 5166 5167 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps); 5168 5169 return (ps & t_bit) == 0; 5170} 5171 5172/* Write to the PC as from a branch instruction. */ 5173 5174static void 5175branch_write_pc (struct regcache *regs, struct displaced_step_closure *dsc, 5176 ULONGEST val) 5177{ 5178 if (!dsc->is_thumb) 5179 /* Note: If bits 0/1 are set, this branch would be unpredictable for 5180 architecture versions < 6. */ 5181 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, 5182 val & ~(ULONGEST) 0x3); 5183 else 5184 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, 5185 val & ~(ULONGEST) 0x1); 5186} 5187 5188/* Write to the PC as from a branch-exchange instruction. */ 5189 5190static void 5191bx_write_pc (struct regcache *regs, ULONGEST val) 5192{ 5193 ULONGEST ps; 5194 ULONGEST t_bit = arm_psr_thumb_bit (get_regcache_arch (regs)); 5195 5196 regcache_cooked_read_unsigned (regs, ARM_PS_REGNUM, &ps); 5197 5198 if ((val & 1) == 1) 5199 { 5200 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps | t_bit); 5201 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffe); 5202 } 5203 else if ((val & 2) == 0) 5204 { 5205 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit); 5206 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val); 5207 } 5208 else 5209 { 5210 /* Unpredictable behaviour. Try to do something sensible (switch to ARM 5211 mode, align dest to 4 bytes). */ 5212 warning (_("Single-stepping BX to non-word-aligned ARM instruction.")); 5213 regcache_cooked_write_unsigned (regs, ARM_PS_REGNUM, ps & ~t_bit); 5214 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, val & 0xfffffffc); 5215 } 5216} 5217 5218/* Write to the PC as if from a load instruction. */ 5219 5220static void 5221load_write_pc (struct regcache *regs, struct displaced_step_closure *dsc, 5222 ULONGEST val) 5223{ 5224 if (DISPLACED_STEPPING_ARCH_VERSION >= 5) 5225 bx_write_pc (regs, val); 5226 else 5227 branch_write_pc (regs, dsc, val); 5228} 5229 5230/* Write to the PC as if from an ALU instruction. */ 5231 5232static void 5233alu_write_pc (struct regcache *regs, struct displaced_step_closure *dsc, 5234 ULONGEST val) 5235{ 5236 if (DISPLACED_STEPPING_ARCH_VERSION >= 7 && !dsc->is_thumb) 5237 bx_write_pc (regs, val); 5238 else 5239 branch_write_pc (regs, dsc, val); 5240} 5241 5242/* Helper for writing to registers for displaced stepping. Writing to the PC 5243 has a varying effects depending on the instruction which does the write: 5244 this is controlled by the WRITE_PC argument. */ 5245 5246void 5247displaced_write_reg (struct regcache *regs, struct displaced_step_closure *dsc, 5248 int regno, ULONGEST val, enum pc_write_style write_pc) 5249{ 5250 if (regno == ARM_PC_REGNUM) 5251 { 5252 if (debug_displaced) 5253 fprintf_unfiltered (gdb_stdlog, "displaced: writing pc %.8lx\n", 5254 (unsigned long) val); 5255 switch (write_pc) 5256 { 5257 case BRANCH_WRITE_PC: 5258 branch_write_pc (regs, dsc, val); 5259 break; 5260 5261 case BX_WRITE_PC: 5262 bx_write_pc (regs, val); 5263 break; 5264 5265 case LOAD_WRITE_PC: 5266 load_write_pc (regs, dsc, val); 5267 break; 5268 5269 case ALU_WRITE_PC: 5270 alu_write_pc (regs, dsc, val); 5271 break; 5272 5273 case CANNOT_WRITE_PC: 5274 warning (_("Instruction wrote to PC in an unexpected way when " 5275 "single-stepping")); 5276 break; 5277 5278 default: 5279 internal_error (__FILE__, __LINE__, 5280 _("Invalid argument to displaced_write_reg")); 5281 } 5282 5283 dsc->wrote_to_pc = 1; 5284 } 5285 else 5286 { 5287 if (debug_displaced) 5288 fprintf_unfiltered (gdb_stdlog, "displaced: writing r%d value %.8lx\n", 5289 regno, (unsigned long) val); 5290 regcache_cooked_write_unsigned (regs, regno, val); 5291 } 5292} 5293 5294/* This function is used to concisely determine if an instruction INSN 5295 references PC. Register fields of interest in INSN should have the 5296 corresponding fields of BITMASK set to 0b1111. The function 5297 returns return 1 if any of these fields in INSN reference the PC 5298 (also 0b1111, r15), else it returns 0. */ 5299 5300static int 5301insn_references_pc (uint32_t insn, uint32_t bitmask) 5302{ 5303 uint32_t lowbit = 1; 5304 5305 while (bitmask != 0) 5306 { 5307 uint32_t mask; 5308 5309 for (; lowbit && (bitmask & lowbit) == 0; lowbit <<= 1) 5310 ; 5311 5312 if (!lowbit) 5313 break; 5314 5315 mask = lowbit * 0xf; 5316 5317 if ((insn & mask) == mask) 5318 return 1; 5319 5320 bitmask &= ~mask; 5321 } 5322 5323 return 0; 5324} 5325 5326/* The simplest copy function. Many instructions have the same effect no 5327 matter what address they are executed at: in those cases, use this. */ 5328 5329static int 5330copy_unmodified (struct gdbarch *gdbarch, uint32_t insn, 5331 const char *iname, struct displaced_step_closure *dsc) 5332{ 5333 if (debug_displaced) 5334 fprintf_unfiltered (gdb_stdlog, "displaced: copying insn %.8lx, " 5335 "opcode/class '%s' unmodified\n", (unsigned long) insn, 5336 iname); 5337 5338 dsc->modinsn[0] = insn; 5339 5340 return 0; 5341} 5342 5343/* Preload instructions with immediate offset. */ 5344 5345static void 5346cleanup_preload (struct gdbarch *gdbarch, 5347 struct regcache *regs, struct displaced_step_closure *dsc) 5348{ 5349 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC); 5350 if (!dsc->u.preload.immed) 5351 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC); 5352} 5353 5354static int 5355copy_preload (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs, 5356 struct displaced_step_closure *dsc) 5357{ 5358 unsigned int rn = bits (insn, 16, 19); 5359 ULONGEST rn_val; 5360 5361 if (!insn_references_pc (insn, 0x000f0000ul)) 5362 return copy_unmodified (gdbarch, insn, "preload", dsc); 5363 5364 if (debug_displaced) 5365 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n", 5366 (unsigned long) insn); 5367 5368 /* Preload instructions: 5369 5370 {pli/pld} [rn, #+/-imm] 5371 -> 5372 {pli/pld} [r0, #+/-imm]. */ 5373 5374 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0); 5375 rn_val = displaced_read_reg (regs, dsc, rn); 5376 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC); 5377 5378 dsc->u.preload.immed = 1; 5379 5380 dsc->modinsn[0] = insn & 0xfff0ffff; 5381 5382 dsc->cleanup = &cleanup_preload; 5383 5384 return 0; 5385} 5386 5387/* Preload instructions with register offset. */ 5388 5389static int 5390copy_preload_reg (struct gdbarch *gdbarch, uint32_t insn, 5391 struct regcache *regs, 5392 struct displaced_step_closure *dsc) 5393{ 5394 unsigned int rn = bits (insn, 16, 19); 5395 unsigned int rm = bits (insn, 0, 3); 5396 ULONGEST rn_val, rm_val; 5397 5398 if (!insn_references_pc (insn, 0x000f000ful)) 5399 return copy_unmodified (gdbarch, insn, "preload reg", dsc); 5400 5401 if (debug_displaced) 5402 fprintf_unfiltered (gdb_stdlog, "displaced: copying preload insn %.8lx\n", 5403 (unsigned long) insn); 5404 5405 /* Preload register-offset instructions: 5406 5407 {pli/pld} [rn, rm {, shift}] 5408 -> 5409 {pli/pld} [r0, r1 {, shift}]. */ 5410 5411 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0); 5412 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1); 5413 rn_val = displaced_read_reg (regs, dsc, rn); 5414 rm_val = displaced_read_reg (regs, dsc, rm); 5415 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC); 5416 displaced_write_reg (regs, dsc, 1, rm_val, CANNOT_WRITE_PC); 5417 5418 dsc->u.preload.immed = 0; 5419 5420 dsc->modinsn[0] = (insn & 0xfff0fff0) | 0x1; 5421 5422 dsc->cleanup = &cleanup_preload; 5423 5424 return 0; 5425} 5426 5427/* Copy/cleanup coprocessor load and store instructions. */ 5428 5429static void 5430cleanup_copro_load_store (struct gdbarch *gdbarch, 5431 struct regcache *regs, 5432 struct displaced_step_closure *dsc) 5433{ 5434 ULONGEST rn_val = displaced_read_reg (regs, dsc, 0); 5435 5436 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC); 5437 5438 if (dsc->u.ldst.writeback) 5439 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, LOAD_WRITE_PC); 5440} 5441 5442static int 5443copy_copro_load_store (struct gdbarch *gdbarch, uint32_t insn, 5444 struct regcache *regs, 5445 struct displaced_step_closure *dsc) 5446{ 5447 unsigned int rn = bits (insn, 16, 19); 5448 ULONGEST rn_val; 5449 5450 if (!insn_references_pc (insn, 0x000f0000ul)) 5451 return copy_unmodified (gdbarch, insn, "copro load/store", dsc); 5452 5453 if (debug_displaced) 5454 fprintf_unfiltered (gdb_stdlog, "displaced: copying coprocessor " 5455 "load/store insn %.8lx\n", (unsigned long) insn); 5456 5457 /* Coprocessor load/store instructions: 5458 5459 {stc/stc2} [<Rn>, #+/-imm] (and other immediate addressing modes) 5460 -> 5461 {stc/stc2} [r0, #+/-imm]. 5462 5463 ldc/ldc2 are handled identically. */ 5464 5465 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0); 5466 rn_val = displaced_read_reg (regs, dsc, rn); 5467 displaced_write_reg (regs, dsc, 0, rn_val, CANNOT_WRITE_PC); 5468 5469 dsc->u.ldst.writeback = bit (insn, 25); 5470 dsc->u.ldst.rn = rn; 5471 5472 dsc->modinsn[0] = insn & 0xfff0ffff; 5473 5474 dsc->cleanup = &cleanup_copro_load_store; 5475 5476 return 0; 5477} 5478 5479/* Clean up branch instructions (actually perform the branch, by setting 5480 PC). */ 5481 5482static void 5483cleanup_branch (struct gdbarch *gdbarch, struct regcache *regs, 5484 struct displaced_step_closure *dsc) 5485{ 5486 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM); 5487 int branch_taken = condition_true (dsc->u.branch.cond, status); 5488 enum pc_write_style write_pc = dsc->u.branch.exchange 5489 ? BX_WRITE_PC : BRANCH_WRITE_PC; 5490 5491 if (!branch_taken) 5492 return; 5493 5494 if (dsc->u.branch.link) 5495 { 5496 ULONGEST pc = displaced_read_reg (regs, dsc, ARM_PC_REGNUM); 5497 displaced_write_reg (regs, dsc, ARM_LR_REGNUM, pc - 4, CANNOT_WRITE_PC); 5498 } 5499 5500 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, dsc->u.branch.dest, write_pc); 5501} 5502 5503/* Copy B/BL/BLX instructions with immediate destinations. */ 5504 5505static int 5506copy_b_bl_blx (struct gdbarch *gdbarch, uint32_t insn, 5507 struct regcache *regs, struct displaced_step_closure *dsc) 5508{ 5509 unsigned int cond = bits (insn, 28, 31); 5510 int exchange = (cond == 0xf); 5511 int link = exchange || bit (insn, 24); 5512 CORE_ADDR from = dsc->insn_addr; 5513 long offset; 5514 5515 if (debug_displaced) 5516 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s immediate insn " 5517 "%.8lx\n", (exchange) ? "blx" : (link) ? "bl" : "b", 5518 (unsigned long) insn); 5519 5520 /* Implement "BL<cond> <label>" as: 5521 5522 Preparation: cond <- instruction condition 5523 Insn: mov r0, r0 (nop) 5524 Cleanup: if (condition true) { r14 <- pc; pc <- label }. 5525 5526 B<cond> similar, but don't set r14 in cleanup. */ 5527 5528 if (exchange) 5529 /* For BLX, set bit 0 of the destination. The cleanup_branch function will 5530 then arrange the switch into Thumb mode. */ 5531 offset = (bits (insn, 0, 23) << 2) | (bit (insn, 24) << 1) | 1; 5532 else 5533 offset = bits (insn, 0, 23) << 2; 5534 5535 if (bit (offset, 25)) 5536 offset = offset | ~0x3ffffff; 5537 5538 dsc->u.branch.cond = cond; 5539 dsc->u.branch.link = link; 5540 dsc->u.branch.exchange = exchange; 5541 dsc->u.branch.dest = from + 8 + offset; 5542 5543 dsc->modinsn[0] = ARM_NOP; 5544 5545 dsc->cleanup = &cleanup_branch; 5546 5547 return 0; 5548} 5549 5550/* Copy BX/BLX with register-specified destinations. */ 5551 5552static int 5553copy_bx_blx_reg (struct gdbarch *gdbarch, uint32_t insn, 5554 struct regcache *regs, struct displaced_step_closure *dsc) 5555{ 5556 unsigned int cond = bits (insn, 28, 31); 5557 /* BX: x12xxx1x 5558 BLX: x12xxx3x. */ 5559 int link = bit (insn, 5); 5560 unsigned int rm = bits (insn, 0, 3); 5561 5562 if (debug_displaced) 5563 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s register insn " 5564 "%.8lx\n", (link) ? "blx" : "bx", 5565 (unsigned long) insn); 5566 5567 /* Implement {BX,BLX}<cond> <reg>" as: 5568 5569 Preparation: cond <- instruction condition 5570 Insn: mov r0, r0 (nop) 5571 Cleanup: if (condition true) { r14 <- pc; pc <- dest; }. 5572 5573 Don't set r14 in cleanup for BX. */ 5574 5575 dsc->u.branch.dest = displaced_read_reg (regs, dsc, rm); 5576 5577 dsc->u.branch.cond = cond; 5578 dsc->u.branch.link = link; 5579 dsc->u.branch.exchange = 1; 5580 5581 dsc->modinsn[0] = ARM_NOP; 5582 5583 dsc->cleanup = &cleanup_branch; 5584 5585 return 0; 5586} 5587 5588/* Copy/cleanup arithmetic/logic instruction with immediate RHS. */ 5589 5590static void 5591cleanup_alu_imm (struct gdbarch *gdbarch, 5592 struct regcache *regs, struct displaced_step_closure *dsc) 5593{ 5594 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0); 5595 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC); 5596 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC); 5597 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC); 5598} 5599 5600static int 5601copy_alu_imm (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs, 5602 struct displaced_step_closure *dsc) 5603{ 5604 unsigned int rn = bits (insn, 16, 19); 5605 unsigned int rd = bits (insn, 12, 15); 5606 unsigned int op = bits (insn, 21, 24); 5607 int is_mov = (op == 0xd); 5608 ULONGEST rd_val, rn_val; 5609 5610 if (!insn_references_pc (insn, 0x000ff000ul)) 5611 return copy_unmodified (gdbarch, insn, "ALU immediate", dsc); 5612 5613 if (debug_displaced) 5614 fprintf_unfiltered (gdb_stdlog, "displaced: copying immediate %s insn " 5615 "%.8lx\n", is_mov ? "move" : "ALU", 5616 (unsigned long) insn); 5617 5618 /* Instruction is of form: 5619 5620 <op><cond> rd, [rn,] #imm 5621 5622 Rewrite as: 5623 5624 Preparation: tmp1, tmp2 <- r0, r1; 5625 r0, r1 <- rd, rn 5626 Insn: <op><cond> r0, r1, #imm 5627 Cleanup: rd <- r0; r0 <- tmp1; r1 <- tmp2 5628 */ 5629 5630 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0); 5631 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1); 5632 rn_val = displaced_read_reg (regs, dsc, rn); 5633 rd_val = displaced_read_reg (regs, dsc, rd); 5634 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC); 5635 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC); 5636 dsc->rd = rd; 5637 5638 if (is_mov) 5639 dsc->modinsn[0] = insn & 0xfff00fff; 5640 else 5641 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x10000; 5642 5643 dsc->cleanup = &cleanup_alu_imm; 5644 5645 return 0; 5646} 5647 5648/* Copy/cleanup arithmetic/logic insns with register RHS. */ 5649 5650static void 5651cleanup_alu_reg (struct gdbarch *gdbarch, 5652 struct regcache *regs, struct displaced_step_closure *dsc) 5653{ 5654 ULONGEST rd_val; 5655 int i; 5656 5657 rd_val = displaced_read_reg (regs, dsc, 0); 5658 5659 for (i = 0; i < 3; i++) 5660 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC); 5661 5662 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC); 5663} 5664 5665static int 5666copy_alu_reg (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs, 5667 struct displaced_step_closure *dsc) 5668{ 5669 unsigned int rn = bits (insn, 16, 19); 5670 unsigned int rm = bits (insn, 0, 3); 5671 unsigned int rd = bits (insn, 12, 15); 5672 unsigned int op = bits (insn, 21, 24); 5673 int is_mov = (op == 0xd); 5674 ULONGEST rd_val, rn_val, rm_val; 5675 5676 if (!insn_references_pc (insn, 0x000ff00ful)) 5677 return copy_unmodified (gdbarch, insn, "ALU reg", dsc); 5678 5679 if (debug_displaced) 5680 fprintf_unfiltered (gdb_stdlog, "displaced: copying reg %s insn %.8lx\n", 5681 is_mov ? "move" : "ALU", (unsigned long) insn); 5682 5683 /* Instruction is of form: 5684 5685 <op><cond> rd, [rn,] rm [, <shift>] 5686 5687 Rewrite as: 5688 5689 Preparation: tmp1, tmp2, tmp3 <- r0, r1, r2; 5690 r0, r1, r2 <- rd, rn, rm 5691 Insn: <op><cond> r0, r1, r2 [, <shift>] 5692 Cleanup: rd <- r0; r0, r1, r2 <- tmp1, tmp2, tmp3 5693 */ 5694 5695 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0); 5696 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1); 5697 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2); 5698 rd_val = displaced_read_reg (regs, dsc, rd); 5699 rn_val = displaced_read_reg (regs, dsc, rn); 5700 rm_val = displaced_read_reg (regs, dsc, rm); 5701 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC); 5702 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC); 5703 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC); 5704 dsc->rd = rd; 5705 5706 if (is_mov) 5707 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x2; 5708 else 5709 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x10002; 5710 5711 dsc->cleanup = &cleanup_alu_reg; 5712 5713 return 0; 5714} 5715 5716/* Cleanup/copy arithmetic/logic insns with shifted register RHS. */ 5717 5718static void 5719cleanup_alu_shifted_reg (struct gdbarch *gdbarch, 5720 struct regcache *regs, 5721 struct displaced_step_closure *dsc) 5722{ 5723 ULONGEST rd_val = displaced_read_reg (regs, dsc, 0); 5724 int i; 5725 5726 for (i = 0; i < 4; i++) 5727 displaced_write_reg (regs, dsc, i, dsc->tmp[i], CANNOT_WRITE_PC); 5728 5729 displaced_write_reg (regs, dsc, dsc->rd, rd_val, ALU_WRITE_PC); 5730} 5731 5732static int 5733copy_alu_shifted_reg (struct gdbarch *gdbarch, uint32_t insn, 5734 struct regcache *regs, 5735 struct displaced_step_closure *dsc) 5736{ 5737 unsigned int rn = bits (insn, 16, 19); 5738 unsigned int rm = bits (insn, 0, 3); 5739 unsigned int rd = bits (insn, 12, 15); 5740 unsigned int rs = bits (insn, 8, 11); 5741 unsigned int op = bits (insn, 21, 24); 5742 int is_mov = (op == 0xd), i; 5743 ULONGEST rd_val, rn_val, rm_val, rs_val; 5744 5745 if (!insn_references_pc (insn, 0x000fff0ful)) 5746 return copy_unmodified (gdbarch, insn, "ALU shifted reg", dsc); 5747 5748 if (debug_displaced) 5749 fprintf_unfiltered (gdb_stdlog, "displaced: copying shifted reg %s insn " 5750 "%.8lx\n", is_mov ? "move" : "ALU", 5751 (unsigned long) insn); 5752 5753 /* Instruction is of form: 5754 5755 <op><cond> rd, [rn,] rm, <shift> rs 5756 5757 Rewrite as: 5758 5759 Preparation: tmp1, tmp2, tmp3, tmp4 <- r0, r1, r2, r3 5760 r0, r1, r2, r3 <- rd, rn, rm, rs 5761 Insn: <op><cond> r0, r1, r2, <shift> r3 5762 Cleanup: tmp5 <- r0 5763 r0, r1, r2, r3 <- tmp1, tmp2, tmp3, tmp4 5764 rd <- tmp5 5765 */ 5766 5767 for (i = 0; i < 4; i++) 5768 dsc->tmp[i] = displaced_read_reg (regs, dsc, i); 5769 5770 rd_val = displaced_read_reg (regs, dsc, rd); 5771 rn_val = displaced_read_reg (regs, dsc, rn); 5772 rm_val = displaced_read_reg (regs, dsc, rm); 5773 rs_val = displaced_read_reg (regs, dsc, rs); 5774 displaced_write_reg (regs, dsc, 0, rd_val, CANNOT_WRITE_PC); 5775 displaced_write_reg (regs, dsc, 1, rn_val, CANNOT_WRITE_PC); 5776 displaced_write_reg (regs, dsc, 2, rm_val, CANNOT_WRITE_PC); 5777 displaced_write_reg (regs, dsc, 3, rs_val, CANNOT_WRITE_PC); 5778 dsc->rd = rd; 5779 5780 if (is_mov) 5781 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x302; 5782 else 5783 dsc->modinsn[0] = (insn & 0xfff000f0) | 0x10302; 5784 5785 dsc->cleanup = &cleanup_alu_shifted_reg; 5786 5787 return 0; 5788} 5789 5790/* Clean up load instructions. */ 5791 5792static void 5793cleanup_load (struct gdbarch *gdbarch, struct regcache *regs, 5794 struct displaced_step_closure *dsc) 5795{ 5796 ULONGEST rt_val, rt_val2 = 0, rn_val; 5797 5798 rt_val = displaced_read_reg (regs, dsc, 0); 5799 if (dsc->u.ldst.xfersize == 8) 5800 rt_val2 = displaced_read_reg (regs, dsc, 1); 5801 rn_val = displaced_read_reg (regs, dsc, 2); 5802 5803 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC); 5804 if (dsc->u.ldst.xfersize > 4) 5805 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC); 5806 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC); 5807 if (!dsc->u.ldst.immed) 5808 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC); 5809 5810 /* Handle register writeback. */ 5811 if (dsc->u.ldst.writeback) 5812 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC); 5813 /* Put result in right place. */ 5814 displaced_write_reg (regs, dsc, dsc->rd, rt_val, LOAD_WRITE_PC); 5815 if (dsc->u.ldst.xfersize == 8) 5816 displaced_write_reg (regs, dsc, dsc->rd + 1, rt_val2, LOAD_WRITE_PC); 5817} 5818 5819/* Clean up store instructions. */ 5820 5821static void 5822cleanup_store (struct gdbarch *gdbarch, struct regcache *regs, 5823 struct displaced_step_closure *dsc) 5824{ 5825 ULONGEST rn_val = displaced_read_reg (regs, dsc, 2); 5826 5827 displaced_write_reg (regs, dsc, 0, dsc->tmp[0], CANNOT_WRITE_PC); 5828 if (dsc->u.ldst.xfersize > 4) 5829 displaced_write_reg (regs, dsc, 1, dsc->tmp[1], CANNOT_WRITE_PC); 5830 displaced_write_reg (regs, dsc, 2, dsc->tmp[2], CANNOT_WRITE_PC); 5831 if (!dsc->u.ldst.immed) 5832 displaced_write_reg (regs, dsc, 3, dsc->tmp[3], CANNOT_WRITE_PC); 5833 if (!dsc->u.ldst.restore_r4) 5834 displaced_write_reg (regs, dsc, 4, dsc->tmp[4], CANNOT_WRITE_PC); 5835 5836 /* Writeback. */ 5837 if (dsc->u.ldst.writeback) 5838 displaced_write_reg (regs, dsc, dsc->u.ldst.rn, rn_val, CANNOT_WRITE_PC); 5839} 5840 5841/* Copy "extra" load/store instructions. These are halfword/doubleword 5842 transfers, which have a different encoding to byte/word transfers. */ 5843 5844static int 5845copy_extra_ld_st (struct gdbarch *gdbarch, uint32_t insn, int unpriveleged, 5846 struct regcache *regs, struct displaced_step_closure *dsc) 5847{ 5848 unsigned int op1 = bits (insn, 20, 24); 5849 unsigned int op2 = bits (insn, 5, 6); 5850 unsigned int rt = bits (insn, 12, 15); 5851 unsigned int rn = bits (insn, 16, 19); 5852 unsigned int rm = bits (insn, 0, 3); 5853 char load[12] = {0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1}; 5854 char bytesize[12] = {2, 2, 2, 2, 8, 1, 8, 1, 8, 2, 8, 2}; 5855 int immed = (op1 & 0x4) != 0; 5856 int opcode; 5857 ULONGEST rt_val, rt_val2 = 0, rn_val, rm_val = 0; 5858 5859 if (!insn_references_pc (insn, 0x000ff00ful)) 5860 return copy_unmodified (gdbarch, insn, "extra load/store", dsc); 5861 5862 if (debug_displaced) 5863 fprintf_unfiltered (gdb_stdlog, "displaced: copying %sextra load/store " 5864 "insn %.8lx\n", unpriveleged ? "unpriveleged " : "", 5865 (unsigned long) insn); 5866 5867 opcode = ((op2 << 2) | (op1 & 0x1) | ((op1 & 0x4) >> 1)) - 4; 5868 5869 if (opcode < 0) 5870 internal_error (__FILE__, __LINE__, 5871 _("copy_extra_ld_st: instruction decode error")); 5872 5873 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0); 5874 dsc->tmp[1] = displaced_read_reg (regs, dsc, 1); 5875 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2); 5876 if (!immed) 5877 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3); 5878 5879 rt_val = displaced_read_reg (regs, dsc, rt); 5880 if (bytesize[opcode] == 8) 5881 rt_val2 = displaced_read_reg (regs, dsc, rt + 1); 5882 rn_val = displaced_read_reg (regs, dsc, rn); 5883 if (!immed) 5884 rm_val = displaced_read_reg (regs, dsc, rm); 5885 5886 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC); 5887 if (bytesize[opcode] == 8) 5888 displaced_write_reg (regs, dsc, 1, rt_val2, CANNOT_WRITE_PC); 5889 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC); 5890 if (!immed) 5891 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC); 5892 5893 dsc->rd = rt; 5894 dsc->u.ldst.xfersize = bytesize[opcode]; 5895 dsc->u.ldst.rn = rn; 5896 dsc->u.ldst.immed = immed; 5897 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0; 5898 dsc->u.ldst.restore_r4 = 0; 5899 5900 if (immed) 5901 /* {ldr,str}<width><cond> rt, [rt2,] [rn, #imm] 5902 -> 5903 {ldr,str}<width><cond> r0, [r1,] [r2, #imm]. */ 5904 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000; 5905 else 5906 /* {ldr,str}<width><cond> rt, [rt2,] [rn, +/-rm] 5907 -> 5908 {ldr,str}<width><cond> r0, [r1,] [r2, +/-r3]. */ 5909 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003; 5910 5911 dsc->cleanup = load[opcode] ? &cleanup_load : &cleanup_store; 5912 5913 return 0; 5914} 5915 5916/* Copy byte/word loads and stores. */ 5917 5918static int 5919copy_ldr_str_ldrb_strb (struct gdbarch *gdbarch, uint32_t insn, 5920 struct regcache *regs, 5921 struct displaced_step_closure *dsc, int load, int byte, 5922 int usermode) 5923{ 5924 int immed = !bit (insn, 25); 5925 unsigned int rt = bits (insn, 12, 15); 5926 unsigned int rn = bits (insn, 16, 19); 5927 unsigned int rm = bits (insn, 0, 3); /* Only valid if !immed. */ 5928 ULONGEST rt_val, rn_val, rm_val = 0; 5929 5930 if (!insn_references_pc (insn, 0x000ff00ful)) 5931 return copy_unmodified (gdbarch, insn, "load/store", dsc); 5932 5933 if (debug_displaced) 5934 fprintf_unfiltered (gdb_stdlog, "displaced: copying %s%s insn %.8lx\n", 5935 load ? (byte ? "ldrb" : "ldr") 5936 : (byte ? "strb" : "str"), usermode ? "t" : "", 5937 (unsigned long) insn); 5938 5939 dsc->tmp[0] = displaced_read_reg (regs, dsc, 0); 5940 dsc->tmp[2] = displaced_read_reg (regs, dsc, 2); 5941 if (!immed) 5942 dsc->tmp[3] = displaced_read_reg (regs, dsc, 3); 5943 if (!load) 5944 dsc->tmp[4] = displaced_read_reg (regs, dsc, 4); 5945 5946 rt_val = displaced_read_reg (regs, dsc, rt); 5947 rn_val = displaced_read_reg (regs, dsc, rn); 5948 if (!immed) 5949 rm_val = displaced_read_reg (regs, dsc, rm); 5950 5951 displaced_write_reg (regs, dsc, 0, rt_val, CANNOT_WRITE_PC); 5952 displaced_write_reg (regs, dsc, 2, rn_val, CANNOT_WRITE_PC); 5953 if (!immed) 5954 displaced_write_reg (regs, dsc, 3, rm_val, CANNOT_WRITE_PC); 5955 5956 dsc->rd = rt; 5957 dsc->u.ldst.xfersize = byte ? 1 : 4; 5958 dsc->u.ldst.rn = rn; 5959 dsc->u.ldst.immed = immed; 5960 dsc->u.ldst.writeback = bit (insn, 24) == 0 || bit (insn, 21) != 0; 5961 5962 /* To write PC we can do: 5963 5964 Before this sequence of instructions: 5965 r0 is the PC value got from displaced_read_reg, so r0 = from + 8; 5966 r2 is the Rn value got from dispalced_read_reg. 5967 5968 Insn1: push {pc} Write address of STR instruction + offset on stack 5969 Insn2: pop {r4} Read it back from stack, r4 = addr(Insn1) + offset 5970 Insn3: sub r4, r4, pc r4 = addr(Insn1) + offset - pc 5971 = addr(Insn1) + offset - addr(Insn3) - 8 5972 = offset - 16 5973 Insn4: add r4, r4, #8 r4 = offset - 8 5974 Insn5: add r0, r0, r4 r0 = from + 8 + offset - 8 5975 = from + offset 5976 Insn6: str r0, [r2, #imm] (or str r0, [r2, r3]) 5977 5978 Otherwise we don't know what value to write for PC, since the offset is 5979 architecture-dependent (sometimes PC+8, sometimes PC+12). More details 5980 of this can be found in Section "Saving from r15" in 5981 http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0204g/Cihbjifh.html */ 5982 5983 if (load || rt != ARM_PC_REGNUM) 5984 { 5985 dsc->u.ldst.restore_r4 = 0; 5986 5987 if (immed) 5988 /* {ldr,str}[b]<cond> rt, [rn, #imm], etc. 5989 -> 5990 {ldr,str}[b]<cond> r0, [r2, #imm]. */ 5991 dsc->modinsn[0] = (insn & 0xfff00fff) | 0x20000; 5992 else 5993 /* {ldr,str}[b]<cond> rt, [rn, rm], etc. 5994 -> 5995 {ldr,str}[b]<cond> r0, [r2, r3]. */ 5996 dsc->modinsn[0] = (insn & 0xfff00ff0) | 0x20003; 5997 } 5998 else 5999 { 6000 /* We need to use r4 as scratch. Make sure it's restored afterwards. */ 6001 dsc->u.ldst.restore_r4 = 1; 6002 dsc->modinsn[0] = 0xe92d8000; /* push {pc} */ 6003 dsc->modinsn[1] = 0xe8bd0010; /* pop {r4} */ 6004 dsc->modinsn[2] = 0xe044400f; /* sub r4, r4, pc. */ 6005 dsc->modinsn[3] = 0xe2844008; /* add r4, r4, #8. */ 6006 dsc->modinsn[4] = 0xe0800004; /* add r0, r0, r4. */ 6007 6008 /* As above. */ 6009 if (immed) 6010 dsc->modinsn[5] = (insn & 0xfff00fff) | 0x20000; 6011 else 6012 dsc->modinsn[5] = (insn & 0xfff00ff0) | 0x20003; 6013 6014 dsc->numinsns = 6; 6015 } 6016 6017 dsc->cleanup = load ? &cleanup_load : &cleanup_store; 6018 6019 return 0; 6020} 6021 6022/* Cleanup LDM instructions with fully-populated register list. This is an 6023 unfortunate corner case: it's impossible to implement correctly by modifying 6024 the instruction. The issue is as follows: we have an instruction, 6025 6026 ldm rN, {r0-r15} 6027 6028 which we must rewrite to avoid loading PC. A possible solution would be to 6029 do the load in two halves, something like (with suitable cleanup 6030 afterwards): 6031 6032 mov r8, rN 6033 ldm[id][ab] r8!, {r0-r7} 6034 str r7, <temp> 6035 ldm[id][ab] r8, {r7-r14} 6036 <bkpt> 6037 6038 but at present there's no suitable place for <temp>, since the scratch space 6039 is overwritten before the cleanup routine is called. For now, we simply 6040 emulate the instruction. */ 6041 6042static void 6043cleanup_block_load_all (struct gdbarch *gdbarch, struct regcache *regs, 6044 struct displaced_step_closure *dsc) 6045{ 6046 int inc = dsc->u.block.increment; 6047 int bump_before = dsc->u.block.before ? (inc ? 4 : -4) : 0; 6048 int bump_after = dsc->u.block.before ? 0 : (inc ? 4 : -4); 6049 uint32_t regmask = dsc->u.block.regmask; 6050 int regno = inc ? 0 : 15; 6051 CORE_ADDR xfer_addr = dsc->u.block.xfer_addr; 6052 int exception_return = dsc->u.block.load && dsc->u.block.user 6053 && (regmask & 0x8000) != 0; 6054 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM); 6055 int do_transfer = condition_true (dsc->u.block.cond, status); 6056 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 6057 6058 if (!do_transfer) 6059 return; 6060 6061 /* If the instruction is ldm rN, {...pc}^, I don't think there's anything 6062 sensible we can do here. Complain loudly. */ 6063 if (exception_return) 6064 error (_("Cannot single-step exception return")); 6065 6066 /* We don't handle any stores here for now. */ 6067 gdb_assert (dsc->u.block.load != 0); 6068 6069 if (debug_displaced) 6070 fprintf_unfiltered (gdb_stdlog, "displaced: emulating block transfer: " 6071 "%s %s %s\n", dsc->u.block.load ? "ldm" : "stm", 6072 dsc->u.block.increment ? "inc" : "dec", 6073 dsc->u.block.before ? "before" : "after"); 6074 6075 while (regmask) 6076 { 6077 uint32_t memword; 6078 6079 if (inc) 6080 while (regno <= ARM_PC_REGNUM && (regmask & (1 << regno)) == 0) 6081 regno++; 6082 else 6083 while (regno >= 0 && (regmask & (1 << regno)) == 0) 6084 regno--; 6085 6086 xfer_addr += bump_before; 6087 6088 memword = read_memory_unsigned_integer (xfer_addr, 4, byte_order); 6089 displaced_write_reg (regs, dsc, regno, memword, LOAD_WRITE_PC); 6090 6091 xfer_addr += bump_after; 6092 6093 regmask &= ~(1 << regno); 6094 } 6095 6096 if (dsc->u.block.writeback) 6097 displaced_write_reg (regs, dsc, dsc->u.block.rn, xfer_addr, 6098 CANNOT_WRITE_PC); 6099} 6100 6101/* Clean up an STM which included the PC in the register list. */ 6102 6103static void 6104cleanup_block_store_pc (struct gdbarch *gdbarch, struct regcache *regs, 6105 struct displaced_step_closure *dsc) 6106{ 6107 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM); 6108 int store_executed = condition_true (dsc->u.block.cond, status); 6109 CORE_ADDR pc_stored_at, transferred_regs = bitcount (dsc->u.block.regmask); 6110 CORE_ADDR stm_insn_addr; 6111 uint32_t pc_val; 6112 long offset; 6113 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 6114 6115 /* If condition code fails, there's nothing else to do. */ 6116 if (!store_executed) 6117 return; 6118 6119 if (dsc->u.block.increment) 6120 { 6121 pc_stored_at = dsc->u.block.xfer_addr + 4 * transferred_regs; 6122 6123 if (dsc->u.block.before) 6124 pc_stored_at += 4; 6125 } 6126 else 6127 { 6128 pc_stored_at = dsc->u.block.xfer_addr; 6129 6130 if (dsc->u.block.before) 6131 pc_stored_at -= 4; 6132 } 6133 6134 pc_val = read_memory_unsigned_integer (pc_stored_at, 4, byte_order); 6135 stm_insn_addr = dsc->scratch_base; 6136 offset = pc_val - stm_insn_addr; 6137 6138 if (debug_displaced) 6139 fprintf_unfiltered (gdb_stdlog, "displaced: detected PC offset %.8lx for " 6140 "STM instruction\n", offset); 6141 6142 /* Rewrite the stored PC to the proper value for the non-displaced original 6143 instruction. */ 6144 write_memory_unsigned_integer (pc_stored_at, 4, byte_order, 6145 dsc->insn_addr + offset); 6146} 6147 6148/* Clean up an LDM which includes the PC in the register list. We clumped all 6149 the registers in the transferred list into a contiguous range r0...rX (to 6150 avoid loading PC directly and losing control of the debugged program), so we 6151 must undo that here. */ 6152 6153static void 6154cleanup_block_load_pc (struct gdbarch *gdbarch, 6155 struct regcache *regs, 6156 struct displaced_step_closure *dsc) 6157{ 6158 uint32_t status = displaced_read_reg (regs, dsc, ARM_PS_REGNUM); 6159 int load_executed = condition_true (dsc->u.block.cond, status), i; 6160 unsigned int mask = dsc->u.block.regmask, write_reg = ARM_PC_REGNUM; 6161 unsigned int regs_loaded = bitcount (mask); 6162 unsigned int num_to_shuffle = regs_loaded, clobbered; 6163 6164 /* The method employed here will fail if the register list is fully populated 6165 (we need to avoid loading PC directly). */ 6166 gdb_assert (num_to_shuffle < 16); 6167 6168 if (!load_executed) 6169 return; 6170 6171 clobbered = (1 << num_to_shuffle) - 1; 6172 6173 while (num_to_shuffle > 0) 6174 { 6175 if ((mask & (1 << write_reg)) != 0) 6176 { 6177 unsigned int read_reg = num_to_shuffle - 1; 6178 6179 if (read_reg != write_reg) 6180 { 6181 ULONGEST rval = displaced_read_reg (regs, dsc, read_reg); 6182 displaced_write_reg (regs, dsc, write_reg, rval, LOAD_WRITE_PC); 6183 if (debug_displaced) 6184 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: move " 6185 "loaded register r%d to r%d\n"), read_reg, 6186 write_reg); 6187 } 6188 else if (debug_displaced) 6189 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: register " 6190 "r%d already in the right place\n"), 6191 write_reg); 6192 6193 clobbered &= ~(1 << write_reg); 6194 6195 num_to_shuffle--; 6196 } 6197 6198 write_reg--; 6199 } 6200 6201 /* Restore any registers we scribbled over. */ 6202 for (write_reg = 0; clobbered != 0; write_reg++) 6203 { 6204 if ((clobbered & (1 << write_reg)) != 0) 6205 { 6206 displaced_write_reg (regs, dsc, write_reg, dsc->tmp[write_reg], 6207 CANNOT_WRITE_PC); 6208 if (debug_displaced) 6209 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM: restored " 6210 "clobbered register r%d\n"), write_reg); 6211 clobbered &= ~(1 << write_reg); 6212 } 6213 } 6214 6215 /* Perform register writeback manually. */ 6216 if (dsc->u.block.writeback) 6217 { 6218 ULONGEST new_rn_val = dsc->u.block.xfer_addr; 6219 6220 if (dsc->u.block.increment) 6221 new_rn_val += regs_loaded * 4; 6222 else 6223 new_rn_val -= regs_loaded * 4; 6224 6225 displaced_write_reg (regs, dsc, dsc->u.block.rn, new_rn_val, 6226 CANNOT_WRITE_PC); 6227 } 6228} 6229 6230/* Handle ldm/stm, apart from some tricky cases which are unlikely to occur 6231 in user-level code (in particular exception return, ldm rn, {...pc}^). */ 6232 6233static int 6234copy_block_xfer (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs, 6235 struct displaced_step_closure *dsc) 6236{ 6237 int load = bit (insn, 20); 6238 int user = bit (insn, 22); 6239 int increment = bit (insn, 23); 6240 int before = bit (insn, 24); 6241 int writeback = bit (insn, 21); 6242 int rn = bits (insn, 16, 19); 6243 6244 /* Block transfers which don't mention PC can be run directly 6245 out-of-line. */ 6246 if (rn != ARM_PC_REGNUM && (insn & 0x8000) == 0) 6247 return copy_unmodified (gdbarch, insn, "ldm/stm", dsc); 6248 6249 if (rn == ARM_PC_REGNUM) 6250 { 6251 warning (_("displaced: Unpredictable LDM or STM with " 6252 "base register r15")); 6253 return copy_unmodified (gdbarch, insn, "unpredictable ldm/stm", dsc); 6254 } 6255 6256 if (debug_displaced) 6257 fprintf_unfiltered (gdb_stdlog, "displaced: copying block transfer insn " 6258 "%.8lx\n", (unsigned long) insn); 6259 6260 dsc->u.block.xfer_addr = displaced_read_reg (regs, dsc, rn); 6261 dsc->u.block.rn = rn; 6262 6263 dsc->u.block.load = load; 6264 dsc->u.block.user = user; 6265 dsc->u.block.increment = increment; 6266 dsc->u.block.before = before; 6267 dsc->u.block.writeback = writeback; 6268 dsc->u.block.cond = bits (insn, 28, 31); 6269 6270 dsc->u.block.regmask = insn & 0xffff; 6271 6272 if (load) 6273 { 6274 if ((insn & 0xffff) == 0xffff) 6275 { 6276 /* LDM with a fully-populated register list. This case is 6277 particularly tricky. Implement for now by fully emulating the 6278 instruction (which might not behave perfectly in all cases, but 6279 these instructions should be rare enough for that not to matter 6280 too much). */ 6281 dsc->modinsn[0] = ARM_NOP; 6282 6283 dsc->cleanup = &cleanup_block_load_all; 6284 } 6285 else 6286 { 6287 /* LDM of a list of registers which includes PC. Implement by 6288 rewriting the list of registers to be transferred into a 6289 contiguous chunk r0...rX before doing the transfer, then shuffling 6290 registers into the correct places in the cleanup routine. */ 6291 unsigned int regmask = insn & 0xffff; 6292 unsigned int num_in_list = bitcount (regmask), new_regmask, bit = 1; 6293 unsigned int to = 0, from = 0, i, new_rn; 6294 6295 for (i = 0; i < num_in_list; i++) 6296 dsc->tmp[i] = displaced_read_reg (regs, dsc, i); 6297 6298 /* Writeback makes things complicated. We need to avoid clobbering 6299 the base register with one of the registers in our modified 6300 register list, but just using a different register can't work in 6301 all cases, e.g.: 6302 6303 ldm r14!, {r0-r13,pc} 6304 6305 which would need to be rewritten as: 6306 6307 ldm rN!, {r0-r14} 6308 6309 but that can't work, because there's no free register for N. 6310 6311 Solve this by turning off the writeback bit, and emulating 6312 writeback manually in the cleanup routine. */ 6313 6314 if (writeback) 6315 insn &= ~(1 << 21); 6316 6317 new_regmask = (1 << num_in_list) - 1; 6318 6319 if (debug_displaced) 6320 fprintf_unfiltered (gdb_stdlog, _("displaced: LDM r%d%s, " 6321 "{..., pc}: original reg list %.4x, modified " 6322 "list %.4x\n"), rn, writeback ? "!" : "", 6323 (int) insn & 0xffff, new_regmask); 6324 6325 dsc->modinsn[0] = (insn & ~0xffff) | (new_regmask & 0xffff); 6326 6327 dsc->cleanup = &cleanup_block_load_pc; 6328 } 6329 } 6330 else 6331 { 6332 /* STM of a list of registers which includes PC. Run the instruction 6333 as-is, but out of line: this will store the wrong value for the PC, 6334 so we must manually fix up the memory in the cleanup routine. 6335 Doing things this way has the advantage that we can auto-detect 6336 the offset of the PC write (which is architecture-dependent) in 6337 the cleanup routine. */ 6338 dsc->modinsn[0] = insn; 6339 6340 dsc->cleanup = &cleanup_block_store_pc; 6341 } 6342 6343 return 0; 6344} 6345 6346/* Cleanup/copy SVC (SWI) instructions. These two functions are overridden 6347 for Linux, where some SVC instructions must be treated specially. */ 6348 6349static void 6350cleanup_svc (struct gdbarch *gdbarch, struct regcache *regs, 6351 struct displaced_step_closure *dsc) 6352{ 6353 CORE_ADDR resume_addr = dsc->insn_addr + 4; 6354 6355 if (debug_displaced) 6356 fprintf_unfiltered (gdb_stdlog, "displaced: cleanup for svc, resume at " 6357 "%.8lx\n", (unsigned long) resume_addr); 6358 6359 displaced_write_reg (regs, dsc, ARM_PC_REGNUM, resume_addr, BRANCH_WRITE_PC); 6360} 6361 6362static int 6363copy_svc (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to, 6364 struct regcache *regs, struct displaced_step_closure *dsc) 6365{ 6366 /* Allow OS-specific code to override SVC handling. */ 6367 if (dsc->u.svc.copy_svc_os) 6368 return dsc->u.svc.copy_svc_os (gdbarch, insn, to, regs, dsc); 6369 6370 if (debug_displaced) 6371 fprintf_unfiltered (gdb_stdlog, "displaced: copying svc insn %.8lx\n", 6372 (unsigned long) insn); 6373 6374 /* Preparation: none. 6375 Insn: unmodified svc. 6376 Cleanup: pc <- insn_addr + 4. */ 6377 6378 dsc->modinsn[0] = insn; 6379 6380 dsc->cleanup = &cleanup_svc; 6381 /* Pretend we wrote to the PC, so cleanup doesn't set PC to the next 6382 instruction. */ 6383 dsc->wrote_to_pc = 1; 6384 6385 return 0; 6386} 6387 6388/* Copy undefined instructions. */ 6389 6390static int 6391copy_undef (struct gdbarch *gdbarch, uint32_t insn, 6392 struct displaced_step_closure *dsc) 6393{ 6394 if (debug_displaced) 6395 fprintf_unfiltered (gdb_stdlog, 6396 "displaced: copying undefined insn %.8lx\n", 6397 (unsigned long) insn); 6398 6399 dsc->modinsn[0] = insn; 6400 6401 return 0; 6402} 6403 6404/* Copy unpredictable instructions. */ 6405 6406static int 6407copy_unpred (struct gdbarch *gdbarch, uint32_t insn, 6408 struct displaced_step_closure *dsc) 6409{ 6410 if (debug_displaced) 6411 fprintf_unfiltered (gdb_stdlog, "displaced: copying unpredictable insn " 6412 "%.8lx\n", (unsigned long) insn); 6413 6414 dsc->modinsn[0] = insn; 6415 6416 return 0; 6417} 6418 6419/* The decode_* functions are instruction decoding helpers. They mostly follow 6420 the presentation in the ARM ARM. */ 6421 6422static int 6423decode_misc_memhint_neon (struct gdbarch *gdbarch, uint32_t insn, 6424 struct regcache *regs, 6425 struct displaced_step_closure *dsc) 6426{ 6427 unsigned int op1 = bits (insn, 20, 26), op2 = bits (insn, 4, 7); 6428 unsigned int rn = bits (insn, 16, 19); 6429 6430 if (op1 == 0x10 && (op2 & 0x2) == 0x0 && (rn & 0xe) == 0x0) 6431 return copy_unmodified (gdbarch, insn, "cps", dsc); 6432 else if (op1 == 0x10 && op2 == 0x0 && (rn & 0xe) == 0x1) 6433 return copy_unmodified (gdbarch, insn, "setend", dsc); 6434 else if ((op1 & 0x60) == 0x20) 6435 return copy_unmodified (gdbarch, insn, "neon dataproc", dsc); 6436 else if ((op1 & 0x71) == 0x40) 6437 return copy_unmodified (gdbarch, insn, "neon elt/struct load/store", dsc); 6438 else if ((op1 & 0x77) == 0x41) 6439 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc); 6440 else if ((op1 & 0x77) == 0x45) 6441 return copy_preload (gdbarch, insn, regs, dsc); /* pli. */ 6442 else if ((op1 & 0x77) == 0x51) 6443 { 6444 if (rn != 0xf) 6445 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */ 6446 else 6447 return copy_unpred (gdbarch, insn, dsc); 6448 } 6449 else if ((op1 & 0x77) == 0x55) 6450 return copy_preload (gdbarch, insn, regs, dsc); /* pld/pldw. */ 6451 else if (op1 == 0x57) 6452 switch (op2) 6453 { 6454 case 0x1: return copy_unmodified (gdbarch, insn, "clrex", dsc); 6455 case 0x4: return copy_unmodified (gdbarch, insn, "dsb", dsc); 6456 case 0x5: return copy_unmodified (gdbarch, insn, "dmb", dsc); 6457 case 0x6: return copy_unmodified (gdbarch, insn, "isb", dsc); 6458 default: return copy_unpred (gdbarch, insn, dsc); 6459 } 6460 else if ((op1 & 0x63) == 0x43) 6461 return copy_unpred (gdbarch, insn, dsc); 6462 else if ((op2 & 0x1) == 0x0) 6463 switch (op1 & ~0x80) 6464 { 6465 case 0x61: 6466 return copy_unmodified (gdbarch, insn, "unallocated mem hint", dsc); 6467 case 0x65: 6468 return copy_preload_reg (gdbarch, insn, regs, dsc); /* pli reg. */ 6469 case 0x71: case 0x75: 6470 /* pld/pldw reg. */ 6471 return copy_preload_reg (gdbarch, insn, regs, dsc); 6472 case 0x63: case 0x67: case 0x73: case 0x77: 6473 return copy_unpred (gdbarch, insn, dsc); 6474 default: 6475 return copy_undef (gdbarch, insn, dsc); 6476 } 6477 else 6478 return copy_undef (gdbarch, insn, dsc); /* Probably unreachable. */ 6479} 6480 6481static int 6482decode_unconditional (struct gdbarch *gdbarch, uint32_t insn, 6483 struct regcache *regs, 6484 struct displaced_step_closure *dsc) 6485{ 6486 if (bit (insn, 27) == 0) 6487 return decode_misc_memhint_neon (gdbarch, insn, regs, dsc); 6488 /* Switch on bits: 0bxxxxx321xxx0xxxxxxxxxxxxxxxxxxxx. */ 6489 else switch (((insn & 0x7000000) >> 23) | ((insn & 0x100000) >> 20)) 6490 { 6491 case 0x0: case 0x2: 6492 return copy_unmodified (gdbarch, insn, "srs", dsc); 6493 6494 case 0x1: case 0x3: 6495 return copy_unmodified (gdbarch, insn, "rfe", dsc); 6496 6497 case 0x4: case 0x5: case 0x6: case 0x7: 6498 return copy_b_bl_blx (gdbarch, insn, regs, dsc); 6499 6500 case 0x8: 6501 switch ((insn & 0xe00000) >> 21) 6502 { 6503 case 0x1: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7: 6504 /* stc/stc2. */ 6505 return copy_copro_load_store (gdbarch, insn, regs, dsc); 6506 6507 case 0x2: 6508 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc); 6509 6510 default: 6511 return copy_undef (gdbarch, insn, dsc); 6512 } 6513 6514 case 0x9: 6515 { 6516 int rn_f = (bits (insn, 16, 19) == 0xf); 6517 switch ((insn & 0xe00000) >> 21) 6518 { 6519 case 0x1: case 0x3: 6520 /* ldc/ldc2 imm (undefined for rn == pc). */ 6521 return rn_f ? copy_undef (gdbarch, insn, dsc) 6522 : copy_copro_load_store (gdbarch, insn, regs, dsc); 6523 6524 case 0x2: 6525 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc); 6526 6527 case 0x4: case 0x5: case 0x6: case 0x7: 6528 /* ldc/ldc2 lit (undefined for rn != pc). */ 6529 return rn_f ? copy_copro_load_store (gdbarch, insn, regs, dsc) 6530 : copy_undef (gdbarch, insn, dsc); 6531 6532 default: 6533 return copy_undef (gdbarch, insn, dsc); 6534 } 6535 } 6536 6537 case 0xa: 6538 return copy_unmodified (gdbarch, insn, "stc/stc2", dsc); 6539 6540 case 0xb: 6541 if (bits (insn, 16, 19) == 0xf) 6542 /* ldc/ldc2 lit. */ 6543 return copy_copro_load_store (gdbarch, insn, regs, dsc); 6544 else 6545 return copy_undef (gdbarch, insn, dsc); 6546 6547 case 0xc: 6548 if (bit (insn, 4)) 6549 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc); 6550 else 6551 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc); 6552 6553 case 0xd: 6554 if (bit (insn, 4)) 6555 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc); 6556 else 6557 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc); 6558 6559 default: 6560 return copy_undef (gdbarch, insn, dsc); 6561 } 6562} 6563 6564/* Decode miscellaneous instructions in dp/misc encoding space. */ 6565 6566static int 6567decode_miscellaneous (struct gdbarch *gdbarch, uint32_t insn, 6568 struct regcache *regs, 6569 struct displaced_step_closure *dsc) 6570{ 6571 unsigned int op2 = bits (insn, 4, 6); 6572 unsigned int op = bits (insn, 21, 22); 6573 unsigned int op1 = bits (insn, 16, 19); 6574 6575 switch (op2) 6576 { 6577 case 0x0: 6578 return copy_unmodified (gdbarch, insn, "mrs/msr", dsc); 6579 6580 case 0x1: 6581 if (op == 0x1) /* bx. */ 6582 return copy_bx_blx_reg (gdbarch, insn, regs, dsc); 6583 else if (op == 0x3) 6584 return copy_unmodified (gdbarch, insn, "clz", dsc); 6585 else 6586 return copy_undef (gdbarch, insn, dsc); 6587 6588 case 0x2: 6589 if (op == 0x1) 6590 /* Not really supported. */ 6591 return copy_unmodified (gdbarch, insn, "bxj", dsc); 6592 else 6593 return copy_undef (gdbarch, insn, dsc); 6594 6595 case 0x3: 6596 if (op == 0x1) 6597 return copy_bx_blx_reg (gdbarch, insn, 6598 regs, dsc); /* blx register. */ 6599 else 6600 return copy_undef (gdbarch, insn, dsc); 6601 6602 case 0x5: 6603 return copy_unmodified (gdbarch, insn, "saturating add/sub", dsc); 6604 6605 case 0x7: 6606 if (op == 0x1) 6607 return copy_unmodified (gdbarch, insn, "bkpt", dsc); 6608 else if (op == 0x3) 6609 /* Not really supported. */ 6610 return copy_unmodified (gdbarch, insn, "smc", dsc); 6611 6612 default: 6613 return copy_undef (gdbarch, insn, dsc); 6614 } 6615} 6616 6617static int 6618decode_dp_misc (struct gdbarch *gdbarch, uint32_t insn, struct regcache *regs, 6619 struct displaced_step_closure *dsc) 6620{ 6621 if (bit (insn, 25)) 6622 switch (bits (insn, 20, 24)) 6623 { 6624 case 0x10: 6625 return copy_unmodified (gdbarch, insn, "movw", dsc); 6626 6627 case 0x14: 6628 return copy_unmodified (gdbarch, insn, "movt", dsc); 6629 6630 case 0x12: case 0x16: 6631 return copy_unmodified (gdbarch, insn, "msr imm", dsc); 6632 6633 default: 6634 return copy_alu_imm (gdbarch, insn, regs, dsc); 6635 } 6636 else 6637 { 6638 uint32_t op1 = bits (insn, 20, 24), op2 = bits (insn, 4, 7); 6639 6640 if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0x0) 6641 return copy_alu_reg (gdbarch, insn, regs, dsc); 6642 else if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1) 6643 return copy_alu_shifted_reg (gdbarch, insn, regs, dsc); 6644 else if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0x0) 6645 return decode_miscellaneous (gdbarch, insn, regs, dsc); 6646 else if ((op1 & 0x19) == 0x10 && (op2 & 0x9) == 0x8) 6647 return copy_unmodified (gdbarch, insn, "halfword mul/mla", dsc); 6648 else if ((op1 & 0x10) == 0x00 && op2 == 0x9) 6649 return copy_unmodified (gdbarch, insn, "mul/mla", dsc); 6650 else if ((op1 & 0x10) == 0x10 && op2 == 0x9) 6651 return copy_unmodified (gdbarch, insn, "synch", dsc); 6652 else if (op2 == 0xb || (op2 & 0xd) == 0xd) 6653 /* 2nd arg means "unpriveleged". */ 6654 return copy_extra_ld_st (gdbarch, insn, (op1 & 0x12) == 0x02, regs, 6655 dsc); 6656 } 6657 6658 /* Should be unreachable. */ 6659 return 1; 6660} 6661 6662static int 6663decode_ld_st_word_ubyte (struct gdbarch *gdbarch, uint32_t insn, 6664 struct regcache *regs, 6665 struct displaced_step_closure *dsc) 6666{ 6667 int a = bit (insn, 25), b = bit (insn, 4); 6668 uint32_t op1 = bits (insn, 20, 24); 6669 int rn_f = bits (insn, 16, 19) == 0xf; 6670 6671 if ((!a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02) 6672 || (a && (op1 & 0x05) == 0x00 && (op1 & 0x17) != 0x02 && !b)) 6673 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 0); 6674 else if ((!a && (op1 & 0x17) == 0x02) 6675 || (a && (op1 & 0x17) == 0x02 && !b)) 6676 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 0, 1); 6677 else if ((!a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03) 6678 || (a && (op1 & 0x05) == 0x01 && (op1 & 0x17) != 0x03 && !b)) 6679 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 0); 6680 else if ((!a && (op1 & 0x17) == 0x03) 6681 || (a && (op1 & 0x17) == 0x03 && !b)) 6682 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 0, 1); 6683 else if ((!a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06) 6684 || (a && (op1 & 0x05) == 0x04 && (op1 & 0x17) != 0x06 && !b)) 6685 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 0); 6686 else if ((!a && (op1 & 0x17) == 0x06) 6687 || (a && (op1 & 0x17) == 0x06 && !b)) 6688 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 0, 1, 1); 6689 else if ((!a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07) 6690 || (a && (op1 & 0x05) == 0x05 && (op1 & 0x17) != 0x07 && !b)) 6691 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 0); 6692 else if ((!a && (op1 & 0x17) == 0x07) 6693 || (a && (op1 & 0x17) == 0x07 && !b)) 6694 return copy_ldr_str_ldrb_strb (gdbarch, insn, regs, dsc, 1, 1, 1); 6695 6696 /* Should be unreachable. */ 6697 return 1; 6698} 6699 6700static int 6701decode_media (struct gdbarch *gdbarch, uint32_t insn, 6702 struct displaced_step_closure *dsc) 6703{ 6704 switch (bits (insn, 20, 24)) 6705 { 6706 case 0x00: case 0x01: case 0x02: case 0x03: 6707 return copy_unmodified (gdbarch, insn, "parallel add/sub signed", dsc); 6708 6709 case 0x04: case 0x05: case 0x06: case 0x07: 6710 return copy_unmodified (gdbarch, insn, "parallel add/sub unsigned", dsc); 6711 6712 case 0x08: case 0x09: case 0x0a: case 0x0b: 6713 case 0x0c: case 0x0d: case 0x0e: case 0x0f: 6714 return copy_unmodified (gdbarch, insn, 6715 "decode/pack/unpack/saturate/reverse", dsc); 6716 6717 case 0x18: 6718 if (bits (insn, 5, 7) == 0) /* op2. */ 6719 { 6720 if (bits (insn, 12, 15) == 0xf) 6721 return copy_unmodified (gdbarch, insn, "usad8", dsc); 6722 else 6723 return copy_unmodified (gdbarch, insn, "usada8", dsc); 6724 } 6725 else 6726 return copy_undef (gdbarch, insn, dsc); 6727 6728 case 0x1a: case 0x1b: 6729 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */ 6730 return copy_unmodified (gdbarch, insn, "sbfx", dsc); 6731 else 6732 return copy_undef (gdbarch, insn, dsc); 6733 6734 case 0x1c: case 0x1d: 6735 if (bits (insn, 5, 6) == 0x0) /* op2[1:0]. */ 6736 { 6737 if (bits (insn, 0, 3) == 0xf) 6738 return copy_unmodified (gdbarch, insn, "bfc", dsc); 6739 else 6740 return copy_unmodified (gdbarch, insn, "bfi", dsc); 6741 } 6742 else 6743 return copy_undef (gdbarch, insn, dsc); 6744 6745 case 0x1e: case 0x1f: 6746 if (bits (insn, 5, 6) == 0x2) /* op2[1:0]. */ 6747 return copy_unmodified (gdbarch, insn, "ubfx", dsc); 6748 else 6749 return copy_undef (gdbarch, insn, dsc); 6750 } 6751 6752 /* Should be unreachable. */ 6753 return 1; 6754} 6755 6756static int 6757decode_b_bl_ldmstm (struct gdbarch *gdbarch, int32_t insn, 6758 struct regcache *regs, struct displaced_step_closure *dsc) 6759{ 6760 if (bit (insn, 25)) 6761 return copy_b_bl_blx (gdbarch, insn, regs, dsc); 6762 else 6763 return copy_block_xfer (gdbarch, insn, regs, dsc); 6764} 6765 6766static int 6767decode_ext_reg_ld_st (struct gdbarch *gdbarch, uint32_t insn, 6768 struct regcache *regs, 6769 struct displaced_step_closure *dsc) 6770{ 6771 unsigned int opcode = bits (insn, 20, 24); 6772 6773 switch (opcode) 6774 { 6775 case 0x04: case 0x05: /* VFP/Neon mrrc/mcrr. */ 6776 return copy_unmodified (gdbarch, insn, "vfp/neon mrrc/mcrr", dsc); 6777 6778 case 0x08: case 0x0a: case 0x0c: case 0x0e: 6779 case 0x12: case 0x16: 6780 return copy_unmodified (gdbarch, insn, "vfp/neon vstm/vpush", dsc); 6781 6782 case 0x09: case 0x0b: case 0x0d: case 0x0f: 6783 case 0x13: case 0x17: 6784 return copy_unmodified (gdbarch, insn, "vfp/neon vldm/vpop", dsc); 6785 6786 case 0x10: case 0x14: case 0x18: case 0x1c: /* vstr. */ 6787 case 0x11: case 0x15: case 0x19: case 0x1d: /* vldr. */ 6788 /* Note: no writeback for these instructions. Bit 25 will always be 6789 zero though (via caller), so the following works OK. */ 6790 return copy_copro_load_store (gdbarch, insn, regs, dsc); 6791 } 6792 6793 /* Should be unreachable. */ 6794 return 1; 6795} 6796 6797static int 6798decode_svc_copro (struct gdbarch *gdbarch, uint32_t insn, CORE_ADDR to, 6799 struct regcache *regs, struct displaced_step_closure *dsc) 6800{ 6801 unsigned int op1 = bits (insn, 20, 25); 6802 int op = bit (insn, 4); 6803 unsigned int coproc = bits (insn, 8, 11); 6804 unsigned int rn = bits (insn, 16, 19); 6805 6806 if ((op1 & 0x20) == 0x00 && (op1 & 0x3a) != 0x00 && (coproc & 0xe) == 0xa) 6807 return decode_ext_reg_ld_st (gdbarch, insn, regs, dsc); 6808 else if ((op1 & 0x21) == 0x00 && (op1 & 0x3a) != 0x00 6809 && (coproc & 0xe) != 0xa) 6810 /* stc/stc2. */ 6811 return copy_copro_load_store (gdbarch, insn, regs, dsc); 6812 else if ((op1 & 0x21) == 0x01 && (op1 & 0x3a) != 0x00 6813 && (coproc & 0xe) != 0xa) 6814 /* ldc/ldc2 imm/lit. */ 6815 return copy_copro_load_store (gdbarch, insn, regs, dsc); 6816 else if ((op1 & 0x3e) == 0x00) 6817 return copy_undef (gdbarch, insn, dsc); 6818 else if ((op1 & 0x3e) == 0x04 && (coproc & 0xe) == 0xa) 6819 return copy_unmodified (gdbarch, insn, "neon 64bit xfer", dsc); 6820 else if (op1 == 0x04 && (coproc & 0xe) != 0xa) 6821 return copy_unmodified (gdbarch, insn, "mcrr/mcrr2", dsc); 6822 else if (op1 == 0x05 && (coproc & 0xe) != 0xa) 6823 return copy_unmodified (gdbarch, insn, "mrrc/mrrc2", dsc); 6824 else if ((op1 & 0x30) == 0x20 && !op) 6825 { 6826 if ((coproc & 0xe) == 0xa) 6827 return copy_unmodified (gdbarch, insn, "vfp dataproc", dsc); 6828 else 6829 return copy_unmodified (gdbarch, insn, "cdp/cdp2", dsc); 6830 } 6831 else if ((op1 & 0x30) == 0x20 && op) 6832 return copy_unmodified (gdbarch, insn, "neon 8/16/32 bit xfer", dsc); 6833 else if ((op1 & 0x31) == 0x20 && op && (coproc & 0xe) != 0xa) 6834 return copy_unmodified (gdbarch, insn, "mcr/mcr2", dsc); 6835 else if ((op1 & 0x31) == 0x21 && op && (coproc & 0xe) != 0xa) 6836 return copy_unmodified (gdbarch, insn, "mrc/mrc2", dsc); 6837 else if ((op1 & 0x30) == 0x30) 6838 return copy_svc (gdbarch, insn, to, regs, dsc); 6839 else 6840 return copy_undef (gdbarch, insn, dsc); /* Possibly unreachable. */ 6841} 6842 6843static void 6844thumb_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from, 6845 CORE_ADDR to, struct regcache *regs, 6846 struct displaced_step_closure *dsc) 6847{ 6848 error (_("Displaced stepping is only supported in ARM mode")); 6849} 6850 6851void 6852arm_process_displaced_insn (struct gdbarch *gdbarch, CORE_ADDR from, 6853 CORE_ADDR to, struct regcache *regs, 6854 struct displaced_step_closure *dsc) 6855{ 6856 int err = 0; 6857 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 6858 uint32_t insn; 6859 6860 /* Most displaced instructions use a 1-instruction scratch space, so set this 6861 here and override below if/when necessary. */ 6862 dsc->numinsns = 1; 6863 dsc->insn_addr = from; 6864 dsc->scratch_base = to; 6865 dsc->cleanup = NULL; 6866 dsc->wrote_to_pc = 0; 6867 6868 if (!displaced_in_arm_mode (regs)) 6869 return thumb_process_displaced_insn (gdbarch, from, to, regs, dsc); 6870 6871 dsc->is_thumb = 0; 6872 dsc->insn_size = 4; 6873 insn = read_memory_unsigned_integer (from, 4, byte_order_for_code); 6874 if (debug_displaced) 6875 fprintf_unfiltered (gdb_stdlog, "displaced: stepping insn %.8lx " 6876 "at %.8lx\n", (unsigned long) insn, 6877 (unsigned long) from); 6878 6879 if ((insn & 0xf0000000) == 0xf0000000) 6880 err = decode_unconditional (gdbarch, insn, regs, dsc); 6881 else switch (((insn & 0x10) >> 4) | ((insn & 0xe000000) >> 24)) 6882 { 6883 case 0x0: case 0x1: case 0x2: case 0x3: 6884 err = decode_dp_misc (gdbarch, insn, regs, dsc); 6885 break; 6886 6887 case 0x4: case 0x5: case 0x6: 6888 err = decode_ld_st_word_ubyte (gdbarch, insn, regs, dsc); 6889 break; 6890 6891 case 0x7: 6892 err = decode_media (gdbarch, insn, dsc); 6893 break; 6894 6895 case 0x8: case 0x9: case 0xa: case 0xb: 6896 err = decode_b_bl_ldmstm (gdbarch, insn, regs, dsc); 6897 break; 6898 6899 case 0xc: case 0xd: case 0xe: case 0xf: 6900 err = decode_svc_copro (gdbarch, insn, to, regs, dsc); 6901 break; 6902 } 6903 6904 if (err) 6905 internal_error (__FILE__, __LINE__, 6906 _("arm_process_displaced_insn: Instruction decode error")); 6907} 6908 6909/* Actually set up the scratch space for a displaced instruction. */ 6910 6911void 6912arm_displaced_init_closure (struct gdbarch *gdbarch, CORE_ADDR from, 6913 CORE_ADDR to, struct displaced_step_closure *dsc) 6914{ 6915 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 6916 unsigned int i, len, offset; 6917 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 6918 int size = dsc->is_thumb? 2 : 4; 6919 const unsigned char *bkp_insn; 6920 6921 offset = 0; 6922 /* Poke modified instruction(s). */ 6923 for (i = 0; i < dsc->numinsns; i++) 6924 { 6925 if (debug_displaced) 6926 { 6927 fprintf_unfiltered (gdb_stdlog, "displaced: writing insn "); 6928 if (size == 4) 6929 fprintf_unfiltered (gdb_stdlog, "%.8lx", 6930 dsc->modinsn[i]); 6931 else if (size == 2) 6932 fprintf_unfiltered (gdb_stdlog, "%.4x", 6933 (unsigned short)dsc->modinsn[i]); 6934 6935 fprintf_unfiltered (gdb_stdlog, " at %.8lx\n", 6936 (unsigned long) to + offset); 6937 6938 } 6939 write_memory_unsigned_integer (to + offset, size, 6940 byte_order_for_code, 6941 dsc->modinsn[i]); 6942 offset += size; 6943 } 6944 6945 /* Choose the correct breakpoint instruction. */ 6946 if (dsc->is_thumb) 6947 { 6948 bkp_insn = tdep->thumb_breakpoint; 6949 len = tdep->thumb_breakpoint_size; 6950 } 6951 else 6952 { 6953 bkp_insn = tdep->arm_breakpoint; 6954 len = tdep->arm_breakpoint_size; 6955 } 6956 6957 /* Put breakpoint afterwards. */ 6958 write_memory (to + offset, bkp_insn, len); 6959 6960 if (debug_displaced) 6961 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ", 6962 paddress (gdbarch, from), paddress (gdbarch, to)); 6963} 6964 6965/* Entry point for copying an instruction into scratch space for displaced 6966 stepping. */ 6967 6968struct displaced_step_closure * 6969arm_displaced_step_copy_insn (struct gdbarch *gdbarch, 6970 CORE_ADDR from, CORE_ADDR to, 6971 struct regcache *regs) 6972{ 6973 struct displaced_step_closure *dsc 6974 = xmalloc (sizeof (struct displaced_step_closure)); 6975 arm_process_displaced_insn (gdbarch, from, to, regs, dsc); 6976 arm_displaced_init_closure (gdbarch, from, to, dsc); 6977 6978 return dsc; 6979} 6980 6981/* Entry point for cleaning things up after a displaced instruction has been 6982 single-stepped. */ 6983 6984void 6985arm_displaced_step_fixup (struct gdbarch *gdbarch, 6986 struct displaced_step_closure *dsc, 6987 CORE_ADDR from, CORE_ADDR to, 6988 struct regcache *regs) 6989{ 6990 if (dsc->cleanup) 6991 dsc->cleanup (gdbarch, regs, dsc); 6992 6993 if (!dsc->wrote_to_pc) 6994 regcache_cooked_write_unsigned (regs, ARM_PC_REGNUM, 6995 dsc->insn_addr + dsc->insn_size); 6996 6997} 6998 6999#include "bfd-in2.h" 7000#include "libcoff.h" 7001 7002static int 7003gdb_print_insn_arm (bfd_vma memaddr, disassemble_info *info) 7004{ 7005 struct gdbarch *gdbarch = info->application_data; 7006 7007 if (arm_pc_is_thumb (gdbarch, memaddr)) 7008 { 7009 static asymbol *asym; 7010 static combined_entry_type ce; 7011 static struct coff_symbol_struct csym; 7012 static struct bfd fake_bfd; 7013 static bfd_target fake_target; 7014 7015 if (csym.native == NULL) 7016 { 7017 /* Create a fake symbol vector containing a Thumb symbol. 7018 This is solely so that the code in print_insn_little_arm() 7019 and print_insn_big_arm() in opcodes/arm-dis.c will detect 7020 the presence of a Thumb symbol and switch to decoding 7021 Thumb instructions. */ 7022 7023 fake_target.flavour = bfd_target_coff_flavour; 7024 fake_bfd.xvec = &fake_target; 7025 ce.u.syment.n_sclass = C_THUMBEXTFUNC; 7026 csym.native = &ce; 7027 csym.symbol.the_bfd = &fake_bfd; 7028 csym.symbol.name = "fake"; 7029 asym = (asymbol *) & csym; 7030 } 7031 7032 memaddr = UNMAKE_THUMB_ADDR (memaddr); 7033 info->symbols = &asym; 7034 } 7035 else 7036 info->symbols = NULL; 7037 7038 if (info->endian == BFD_ENDIAN_BIG) 7039 return print_insn_big_arm (memaddr, info); 7040 else 7041 return print_insn_little_arm (memaddr, info); 7042} 7043 7044/* The following define instruction sequences that will cause ARM 7045 cpu's to take an undefined instruction trap. These are used to 7046 signal a breakpoint to GDB. 7047 7048 The newer ARMv4T cpu's are capable of operating in ARM or Thumb 7049 modes. A different instruction is required for each mode. The ARM 7050 cpu's can also be big or little endian. Thus four different 7051 instructions are needed to support all cases. 7052 7053 Note: ARMv4 defines several new instructions that will take the 7054 undefined instruction trap. ARM7TDMI is nominally ARMv4T, but does 7055 not in fact add the new instructions. The new undefined 7056 instructions in ARMv4 are all instructions that had no defined 7057 behaviour in earlier chips. There is no guarantee that they will 7058 raise an exception, but may be treated as NOP's. In practice, it 7059 may only safe to rely on instructions matching: 7060 7061 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 7062 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 7063 C C C C 0 1 1 x x x x x x x x x x x x x x x x x x x x 1 x x x x 7064 7065 Even this may only true if the condition predicate is true. The 7066 following use a condition predicate of ALWAYS so it is always TRUE. 7067 7068 There are other ways of forcing a breakpoint. GNU/Linux, RISC iX, 7069 and NetBSD all use a software interrupt rather than an undefined 7070 instruction to force a trap. This can be handled by by the 7071 abi-specific code during establishment of the gdbarch vector. */ 7072 7073#define ARM_LE_BREAKPOINT {0xFE,0xDE,0xFF,0xE7} 7074#define ARM_BE_BREAKPOINT {0xE7,0xFF,0xDE,0xFE} 7075#define THUMB_LE_BREAKPOINT {0xbe,0xbe} 7076#define THUMB_BE_BREAKPOINT {0xbe,0xbe} 7077 7078static const char arm_default_arm_le_breakpoint[] = ARM_LE_BREAKPOINT; 7079static const char arm_default_arm_be_breakpoint[] = ARM_BE_BREAKPOINT; 7080static const char arm_default_thumb_le_breakpoint[] = THUMB_LE_BREAKPOINT; 7081static const char arm_default_thumb_be_breakpoint[] = THUMB_BE_BREAKPOINT; 7082 7083/* Determine the type and size of breakpoint to insert at PCPTR. Uses 7084 the program counter value to determine whether a 16-bit or 32-bit 7085 breakpoint should be used. It returns a pointer to a string of 7086 bytes that encode a breakpoint instruction, stores the length of 7087 the string to *lenptr, and adjusts the program counter (if 7088 necessary) to point to the actual memory location where the 7089 breakpoint should be inserted. */ 7090 7091static const unsigned char * 7092arm_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, int *lenptr) 7093{ 7094 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 7095 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); 7096 7097 if (arm_pc_is_thumb (gdbarch, *pcptr)) 7098 { 7099 *pcptr = UNMAKE_THUMB_ADDR (*pcptr); 7100 7101 /* If we have a separate 32-bit breakpoint instruction for Thumb-2, 7102 check whether we are replacing a 32-bit instruction. */ 7103 if (tdep->thumb2_breakpoint != NULL) 7104 { 7105 gdb_byte buf[2]; 7106 if (target_read_memory (*pcptr, buf, 2) == 0) 7107 { 7108 unsigned short inst1; 7109 inst1 = extract_unsigned_integer (buf, 2, byte_order_for_code); 7110 if ((inst1 & 0xe000) == 0xe000 && (inst1 & 0x1800) != 0) 7111 { 7112 *lenptr = tdep->thumb2_breakpoint_size; 7113 return tdep->thumb2_breakpoint; 7114 } 7115 } 7116 } 7117 7118 *lenptr = tdep->thumb_breakpoint_size; 7119 return tdep->thumb_breakpoint; 7120 } 7121 else 7122 { 7123 *lenptr = tdep->arm_breakpoint_size; 7124 return tdep->arm_breakpoint; 7125 } 7126} 7127 7128static void 7129arm_remote_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr, 7130 int *kindptr) 7131{ 7132 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 7133 7134 arm_breakpoint_from_pc (gdbarch, pcptr, kindptr); 7135 7136 if (arm_pc_is_thumb (gdbarch, *pcptr) && *kindptr == 4) 7137 /* The documented magic value for a 32-bit Thumb-2 breakpoint, so 7138 that this is not confused with a 32-bit ARM breakpoint. */ 7139 *kindptr = 3; 7140} 7141 7142/* Extract from an array REGBUF containing the (raw) register state a 7143 function return value of type TYPE, and copy that, in virtual 7144 format, into VALBUF. */ 7145 7146static void 7147arm_extract_return_value (struct type *type, struct regcache *regs, 7148 gdb_byte *valbuf) 7149{ 7150 struct gdbarch *gdbarch = get_regcache_arch (regs); 7151 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 7152 7153 if (TYPE_CODE_FLT == TYPE_CODE (type)) 7154 { 7155 switch (gdbarch_tdep (gdbarch)->fp_model) 7156 { 7157 case ARM_FLOAT_FPA: 7158 { 7159 /* The value is in register F0 in internal format. We need to 7160 extract the raw value and then convert it to the desired 7161 internal type. */ 7162 bfd_byte tmpbuf[FP_REGISTER_SIZE]; 7163 7164 regcache_cooked_read (regs, ARM_F0_REGNUM, tmpbuf); 7165 convert_from_extended (floatformat_from_type (type), tmpbuf, 7166 valbuf, gdbarch_byte_order (gdbarch)); 7167 } 7168 break; 7169 7170 case ARM_FLOAT_SOFT_FPA: 7171 case ARM_FLOAT_SOFT_VFP: 7172 /* ARM_FLOAT_VFP can arise if this is a variadic function so 7173 not using the VFP ABI code. */ 7174 case ARM_FLOAT_VFP: 7175 regcache_cooked_read (regs, ARM_A1_REGNUM, valbuf); 7176 if (TYPE_LENGTH (type) > 4) 7177 regcache_cooked_read (regs, ARM_A1_REGNUM + 1, 7178 valbuf + INT_REGISTER_SIZE); 7179 break; 7180 7181 default: 7182 internal_error (__FILE__, __LINE__, 7183 _("arm_extract_return_value: " 7184 "Floating point model not supported")); 7185 break; 7186 } 7187 } 7188 else if (TYPE_CODE (type) == TYPE_CODE_INT 7189 || TYPE_CODE (type) == TYPE_CODE_CHAR 7190 || TYPE_CODE (type) == TYPE_CODE_BOOL 7191 || TYPE_CODE (type) == TYPE_CODE_PTR 7192 || TYPE_CODE (type) == TYPE_CODE_REF 7193 || TYPE_CODE (type) == TYPE_CODE_ENUM) 7194 { 7195 /* If the type is a plain integer, then the access is 7196 straight-forward. Otherwise we have to play around a bit 7197 more. */ 7198 int len = TYPE_LENGTH (type); 7199 int regno = ARM_A1_REGNUM; 7200 ULONGEST tmp; 7201 7202 while (len > 0) 7203 { 7204 /* By using store_unsigned_integer we avoid having to do 7205 anything special for small big-endian values. */ 7206 regcache_cooked_read_unsigned (regs, regno++, &tmp); 7207 store_unsigned_integer (valbuf, 7208 (len > INT_REGISTER_SIZE 7209 ? INT_REGISTER_SIZE : len), 7210 byte_order, tmp); 7211 len -= INT_REGISTER_SIZE; 7212 valbuf += INT_REGISTER_SIZE; 7213 } 7214 } 7215 else 7216 { 7217 /* For a structure or union the behaviour is as if the value had 7218 been stored to word-aligned memory and then loaded into 7219 registers with 32-bit load instruction(s). */ 7220 int len = TYPE_LENGTH (type); 7221 int regno = ARM_A1_REGNUM; 7222 bfd_byte tmpbuf[INT_REGISTER_SIZE]; 7223 7224 while (len > 0) 7225 { 7226 regcache_cooked_read (regs, regno++, tmpbuf); 7227 memcpy (valbuf, tmpbuf, 7228 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len); 7229 len -= INT_REGISTER_SIZE; 7230 valbuf += INT_REGISTER_SIZE; 7231 } 7232 } 7233} 7234 7235 7236/* Will a function return an aggregate type in memory or in a 7237 register? Return 0 if an aggregate type can be returned in a 7238 register, 1 if it must be returned in memory. */ 7239 7240static int 7241arm_return_in_memory (struct gdbarch *gdbarch, struct type *type) 7242{ 7243 int nRc; 7244 enum type_code code; 7245 7246 CHECK_TYPEDEF (type); 7247 7248 /* In the ARM ABI, "integer" like aggregate types are returned in 7249 registers. For an aggregate type to be integer like, its size 7250 must be less than or equal to INT_REGISTER_SIZE and the 7251 offset of each addressable subfield must be zero. Note that bit 7252 fields are not addressable, and all addressable subfields of 7253 unions always start at offset zero. 7254 7255 This function is based on the behaviour of GCC 2.95.1. 7256 See: gcc/arm.c: arm_return_in_memory() for details. 7257 7258 Note: All versions of GCC before GCC 2.95.2 do not set up the 7259 parameters correctly for a function returning the following 7260 structure: struct { float f;}; This should be returned in memory, 7261 not a register. Richard Earnshaw sent me a patch, but I do not 7262 know of any way to detect if a function like the above has been 7263 compiled with the correct calling convention. */ 7264 7265 /* All aggregate types that won't fit in a register must be returned 7266 in memory. */ 7267 if (TYPE_LENGTH (type) > INT_REGISTER_SIZE) 7268 { 7269 return 1; 7270 } 7271 7272 /* The AAPCS says all aggregates not larger than a word are returned 7273 in a register. */ 7274 if (gdbarch_tdep (gdbarch)->arm_abi != ARM_ABI_APCS) 7275 return 0; 7276 7277 /* The only aggregate types that can be returned in a register are 7278 structs and unions. Arrays must be returned in memory. */ 7279 code = TYPE_CODE (type); 7280 if ((TYPE_CODE_STRUCT != code) && (TYPE_CODE_UNION != code)) 7281 { 7282 return 1; 7283 } 7284 7285 /* Assume all other aggregate types can be returned in a register. 7286 Run a check for structures, unions and arrays. */ 7287 nRc = 0; 7288 7289 if ((TYPE_CODE_STRUCT == code) || (TYPE_CODE_UNION == code)) 7290 { 7291 int i; 7292 /* Need to check if this struct/union is "integer" like. For 7293 this to be true, its size must be less than or equal to 7294 INT_REGISTER_SIZE and the offset of each addressable 7295 subfield must be zero. Note that bit fields are not 7296 addressable, and unions always start at offset zero. If any 7297 of the subfields is a floating point type, the struct/union 7298 cannot be an integer type. */ 7299 7300 /* For each field in the object, check: 7301 1) Is it FP? --> yes, nRc = 1; 7302 2) Is it addressable (bitpos != 0) and 7303 not packed (bitsize == 0)? 7304 --> yes, nRc = 1 7305 */ 7306 7307 for (i = 0; i < TYPE_NFIELDS (type); i++) 7308 { 7309 enum type_code field_type_code; 7310 field_type_code = TYPE_CODE (check_typedef (TYPE_FIELD_TYPE (type, 7311 i))); 7312 7313 /* Is it a floating point type field? */ 7314 if (field_type_code == TYPE_CODE_FLT) 7315 { 7316 nRc = 1; 7317 break; 7318 } 7319 7320 /* If bitpos != 0, then we have to care about it. */ 7321 if (TYPE_FIELD_BITPOS (type, i) != 0) 7322 { 7323 /* Bitfields are not addressable. If the field bitsize is 7324 zero, then the field is not packed. Hence it cannot be 7325 a bitfield or any other packed type. */ 7326 if (TYPE_FIELD_BITSIZE (type, i) == 0) 7327 { 7328 nRc = 1; 7329 break; 7330 } 7331 } 7332 } 7333 } 7334 7335 return nRc; 7336} 7337 7338/* Write into appropriate registers a function return value of type 7339 TYPE, given in virtual format. */ 7340 7341static void 7342arm_store_return_value (struct type *type, struct regcache *regs, 7343 const gdb_byte *valbuf) 7344{ 7345 struct gdbarch *gdbarch = get_regcache_arch (regs); 7346 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 7347 7348 if (TYPE_CODE (type) == TYPE_CODE_FLT) 7349 { 7350 char buf[MAX_REGISTER_SIZE]; 7351 7352 switch (gdbarch_tdep (gdbarch)->fp_model) 7353 { 7354 case ARM_FLOAT_FPA: 7355 7356 convert_to_extended (floatformat_from_type (type), buf, valbuf, 7357 gdbarch_byte_order (gdbarch)); 7358 regcache_cooked_write (regs, ARM_F0_REGNUM, buf); 7359 break; 7360 7361 case ARM_FLOAT_SOFT_FPA: 7362 case ARM_FLOAT_SOFT_VFP: 7363 /* ARM_FLOAT_VFP can arise if this is a variadic function so 7364 not using the VFP ABI code. */ 7365 case ARM_FLOAT_VFP: 7366 regcache_cooked_write (regs, ARM_A1_REGNUM, valbuf); 7367 if (TYPE_LENGTH (type) > 4) 7368 regcache_cooked_write (regs, ARM_A1_REGNUM + 1, 7369 valbuf + INT_REGISTER_SIZE); 7370 break; 7371 7372 default: 7373 internal_error (__FILE__, __LINE__, 7374 _("arm_store_return_value: Floating " 7375 "point model not supported")); 7376 break; 7377 } 7378 } 7379 else if (TYPE_CODE (type) == TYPE_CODE_INT 7380 || TYPE_CODE (type) == TYPE_CODE_CHAR 7381 || TYPE_CODE (type) == TYPE_CODE_BOOL 7382 || TYPE_CODE (type) == TYPE_CODE_PTR 7383 || TYPE_CODE (type) == TYPE_CODE_REF 7384 || TYPE_CODE (type) == TYPE_CODE_ENUM) 7385 { 7386 if (TYPE_LENGTH (type) <= 4) 7387 { 7388 /* Values of one word or less are zero/sign-extended and 7389 returned in r0. */ 7390 bfd_byte tmpbuf[INT_REGISTER_SIZE]; 7391 LONGEST val = unpack_long (type, valbuf); 7392 7393 store_signed_integer (tmpbuf, INT_REGISTER_SIZE, byte_order, val); 7394 regcache_cooked_write (regs, ARM_A1_REGNUM, tmpbuf); 7395 } 7396 else 7397 { 7398 /* Integral values greater than one word are stored in consecutive 7399 registers starting with r0. This will always be a multiple of 7400 the regiser size. */ 7401 int len = TYPE_LENGTH (type); 7402 int regno = ARM_A1_REGNUM; 7403 7404 while (len > 0) 7405 { 7406 regcache_cooked_write (regs, regno++, valbuf); 7407 len -= INT_REGISTER_SIZE; 7408 valbuf += INT_REGISTER_SIZE; 7409 } 7410 } 7411 } 7412 else 7413 { 7414 /* For a structure or union the behaviour is as if the value had 7415 been stored to word-aligned memory and then loaded into 7416 registers with 32-bit load instruction(s). */ 7417 int len = TYPE_LENGTH (type); 7418 int regno = ARM_A1_REGNUM; 7419 bfd_byte tmpbuf[INT_REGISTER_SIZE]; 7420 7421 while (len > 0) 7422 { 7423 memcpy (tmpbuf, valbuf, 7424 len > INT_REGISTER_SIZE ? INT_REGISTER_SIZE : len); 7425 regcache_cooked_write (regs, regno++, tmpbuf); 7426 len -= INT_REGISTER_SIZE; 7427 valbuf += INT_REGISTER_SIZE; 7428 } 7429 } 7430} 7431 7432 7433/* Handle function return values. */ 7434 7435static enum return_value_convention 7436arm_return_value (struct gdbarch *gdbarch, struct type *func_type, 7437 struct type *valtype, struct regcache *regcache, 7438 gdb_byte *readbuf, const gdb_byte *writebuf) 7439{ 7440 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 7441 enum arm_vfp_cprc_base_type vfp_base_type; 7442 int vfp_base_count; 7443 7444 if (arm_vfp_abi_for_function (gdbarch, func_type) 7445 && arm_vfp_call_candidate (valtype, &vfp_base_type, &vfp_base_count)) 7446 { 7447 int reg_char = arm_vfp_cprc_reg_char (vfp_base_type); 7448 int unit_length = arm_vfp_cprc_unit_length (vfp_base_type); 7449 int i; 7450 for (i = 0; i < vfp_base_count; i++) 7451 { 7452 if (reg_char == 'q') 7453 { 7454 if (writebuf) 7455 arm_neon_quad_write (gdbarch, regcache, i, 7456 writebuf + i * unit_length); 7457 7458 if (readbuf) 7459 arm_neon_quad_read (gdbarch, regcache, i, 7460 readbuf + i * unit_length); 7461 } 7462 else 7463 { 7464 char name_buf[4]; 7465 int regnum; 7466 7467 sprintf (name_buf, "%c%d", reg_char, i); 7468 regnum = user_reg_map_name_to_regnum (gdbarch, name_buf, 7469 strlen (name_buf)); 7470 if (writebuf) 7471 regcache_cooked_write (regcache, regnum, 7472 writebuf + i * unit_length); 7473 if (readbuf) 7474 regcache_cooked_read (regcache, regnum, 7475 readbuf + i * unit_length); 7476 } 7477 } 7478 return RETURN_VALUE_REGISTER_CONVENTION; 7479 } 7480 7481 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT 7482 || TYPE_CODE (valtype) == TYPE_CODE_UNION 7483 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY) 7484 { 7485 if (tdep->struct_return == pcc_struct_return 7486 || arm_return_in_memory (gdbarch, valtype)) 7487 return RETURN_VALUE_STRUCT_CONVENTION; 7488 } 7489 7490 if (writebuf) 7491 arm_store_return_value (valtype, regcache, writebuf); 7492 7493 if (readbuf) 7494 arm_extract_return_value (valtype, regcache, readbuf); 7495 7496 return RETURN_VALUE_REGISTER_CONVENTION; 7497} 7498 7499 7500static int 7501arm_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc) 7502{ 7503 struct gdbarch *gdbarch = get_frame_arch (frame); 7504 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 7505 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); 7506 CORE_ADDR jb_addr; 7507 char buf[INT_REGISTER_SIZE]; 7508 7509 jb_addr = get_frame_register_unsigned (frame, ARM_A1_REGNUM); 7510 7511 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf, 7512 INT_REGISTER_SIZE)) 7513 return 0; 7514 7515 *pc = extract_unsigned_integer (buf, INT_REGISTER_SIZE, byte_order); 7516 return 1; 7517} 7518 7519/* Recognize GCC and GNU ld's trampolines. If we are in a trampoline, 7520 return the target PC. Otherwise return 0. */ 7521 7522CORE_ADDR 7523arm_skip_stub (struct frame_info *frame, CORE_ADDR pc) 7524{ 7525 char *name; 7526 int namelen; 7527 CORE_ADDR start_addr; 7528 7529 /* Find the starting address and name of the function containing the PC. */ 7530 if (find_pc_partial_function (pc, &name, &start_addr, NULL) == 0) 7531 return 0; 7532 7533 /* If PC is in a Thumb call or return stub, return the address of the 7534 target PC, which is in a register. The thunk functions are called 7535 _call_via_xx, where x is the register name. The possible names 7536 are r0-r9, sl, fp, ip, sp, and lr. ARM RealView has similar 7537 functions, named __ARM_call_via_r[0-7]. */ 7538 if (strncmp (name, "_call_via_", 10) == 0 7539 || strncmp (name, "__ARM_call_via_", strlen ("__ARM_call_via_")) == 0) 7540 { 7541 /* Use the name suffix to determine which register contains the 7542 target PC. */ 7543 static char *table[15] = 7544 {"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", 7545 "r8", "r9", "sl", "fp", "ip", "sp", "lr" 7546 }; 7547 int regno; 7548 int offset = strlen (name) - 2; 7549 7550 for (regno = 0; regno <= 14; regno++) 7551 if (strcmp (&name[offset], table[regno]) == 0) 7552 return get_frame_register_unsigned (frame, regno); 7553 } 7554 7555 /* GNU ld generates __foo_from_arm or __foo_from_thumb for 7556 non-interworking calls to foo. We could decode the stubs 7557 to find the target but it's easier to use the symbol table. */ 7558 namelen = strlen (name); 7559 if (name[0] == '_' && name[1] == '_' 7560 && ((namelen > 2 + strlen ("_from_thumb") 7561 && strncmp (name + namelen - strlen ("_from_thumb"), "_from_thumb", 7562 strlen ("_from_thumb")) == 0) 7563 || (namelen > 2 + strlen ("_from_arm") 7564 && strncmp (name + namelen - strlen ("_from_arm"), "_from_arm", 7565 strlen ("_from_arm")) == 0))) 7566 { 7567 char *target_name; 7568 int target_len = namelen - 2; 7569 struct minimal_symbol *minsym; 7570 struct objfile *objfile; 7571 struct obj_section *sec; 7572 7573 if (name[namelen - 1] == 'b') 7574 target_len -= strlen ("_from_thumb"); 7575 else 7576 target_len -= strlen ("_from_arm"); 7577 7578 target_name = alloca (target_len + 1); 7579 memcpy (target_name, name + 2, target_len); 7580 target_name[target_len] = '\0'; 7581 7582 sec = find_pc_section (pc); 7583 objfile = (sec == NULL) ? NULL : sec->objfile; 7584 minsym = lookup_minimal_symbol (target_name, NULL, objfile); 7585 if (minsym != NULL) 7586 return SYMBOL_VALUE_ADDRESS (minsym); 7587 else 7588 return 0; 7589 } 7590 7591 return 0; /* not a stub */ 7592} 7593 7594static void 7595set_arm_command (char *args, int from_tty) 7596{ 7597 printf_unfiltered (_("\ 7598\"set arm\" must be followed by an apporpriate subcommand.\n")); 7599 help_list (setarmcmdlist, "set arm ", all_commands, gdb_stdout); 7600} 7601 7602static void 7603show_arm_command (char *args, int from_tty) 7604{ 7605 cmd_show_list (showarmcmdlist, from_tty, ""); 7606} 7607 7608static void 7609arm_update_current_architecture (void) 7610{ 7611 struct gdbarch_info info; 7612 7613 /* If the current architecture is not ARM, we have nothing to do. */ 7614 if (gdbarch_bfd_arch_info (target_gdbarch)->arch != bfd_arch_arm) 7615 return; 7616 7617 /* Update the architecture. */ 7618 gdbarch_info_init (&info); 7619 7620 if (!gdbarch_update_p (info)) 7621 internal_error (__FILE__, __LINE__, _("could not update architecture")); 7622} 7623 7624static void 7625set_fp_model_sfunc (char *args, int from_tty, 7626 struct cmd_list_element *c) 7627{ 7628 enum arm_float_model fp_model; 7629 7630 for (fp_model = ARM_FLOAT_AUTO; fp_model != ARM_FLOAT_LAST; fp_model++) 7631 if (strcmp (current_fp_model, fp_model_strings[fp_model]) == 0) 7632 { 7633 arm_fp_model = fp_model; 7634 break; 7635 } 7636 7637 if (fp_model == ARM_FLOAT_LAST) 7638 internal_error (__FILE__, __LINE__, _("Invalid fp model accepted: %s."), 7639 current_fp_model); 7640 7641 arm_update_current_architecture (); 7642} 7643 7644static void 7645show_fp_model (struct ui_file *file, int from_tty, 7646 struct cmd_list_element *c, const char *value) 7647{ 7648 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch); 7649 7650 if (arm_fp_model == ARM_FLOAT_AUTO 7651 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm) 7652 fprintf_filtered (file, _("\ 7653The current ARM floating point model is \"auto\" (currently \"%s\").\n"), 7654 fp_model_strings[tdep->fp_model]); 7655 else 7656 fprintf_filtered (file, _("\ 7657The current ARM floating point model is \"%s\".\n"), 7658 fp_model_strings[arm_fp_model]); 7659} 7660 7661static void 7662arm_set_abi (char *args, int from_tty, 7663 struct cmd_list_element *c) 7664{ 7665 enum arm_abi_kind arm_abi; 7666 7667 for (arm_abi = ARM_ABI_AUTO; arm_abi != ARM_ABI_LAST; arm_abi++) 7668 if (strcmp (arm_abi_string, arm_abi_strings[arm_abi]) == 0) 7669 { 7670 arm_abi_global = arm_abi; 7671 break; 7672 } 7673 7674 if (arm_abi == ARM_ABI_LAST) 7675 internal_error (__FILE__, __LINE__, _("Invalid ABI accepted: %s."), 7676 arm_abi_string); 7677 7678 arm_update_current_architecture (); 7679} 7680 7681static void 7682arm_show_abi (struct ui_file *file, int from_tty, 7683 struct cmd_list_element *c, const char *value) 7684{ 7685 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch); 7686 7687 if (arm_abi_global == ARM_ABI_AUTO 7688 && gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_arm) 7689 fprintf_filtered (file, _("\ 7690The current ARM ABI is \"auto\" (currently \"%s\").\n"), 7691 arm_abi_strings[tdep->arm_abi]); 7692 else 7693 fprintf_filtered (file, _("The current ARM ABI is \"%s\".\n"), 7694 arm_abi_string); 7695} 7696 7697static void 7698arm_show_fallback_mode (struct ui_file *file, int from_tty, 7699 struct cmd_list_element *c, const char *value) 7700{ 7701 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch); 7702 7703 fprintf_filtered (file, 7704 _("The current execution mode assumed " 7705 "(when symbols are unavailable) is \"%s\".\n"), 7706 arm_fallback_mode_string); 7707} 7708 7709static void 7710arm_show_force_mode (struct ui_file *file, int from_tty, 7711 struct cmd_list_element *c, const char *value) 7712{ 7713 struct gdbarch_tdep *tdep = gdbarch_tdep (target_gdbarch); 7714 7715 fprintf_filtered (file, 7716 _("The current execution mode assumed " 7717 "(even when symbols are available) is \"%s\".\n"), 7718 arm_force_mode_string); 7719} 7720 7721/* If the user changes the register disassembly style used for info 7722 register and other commands, we have to also switch the style used 7723 in opcodes for disassembly output. This function is run in the "set 7724 arm disassembly" command, and does that. */ 7725 7726static void 7727set_disassembly_style_sfunc (char *args, int from_tty, 7728 struct cmd_list_element *c) 7729{ 7730 set_disassembly_style (); 7731} 7732 7733/* Return the ARM register name corresponding to register I. */ 7734static const char * 7735arm_register_name (struct gdbarch *gdbarch, int i) 7736{ 7737 const int num_regs = gdbarch_num_regs (gdbarch); 7738 7739 if (gdbarch_tdep (gdbarch)->have_vfp_pseudos 7740 && i >= num_regs && i < num_regs + 32) 7741 { 7742 static const char *const vfp_pseudo_names[] = { 7743 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", 7744 "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15", 7745 "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", 7746 "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", 7747 }; 7748 7749 return vfp_pseudo_names[i - num_regs]; 7750 } 7751 7752 if (gdbarch_tdep (gdbarch)->have_neon_pseudos 7753 && i >= num_regs + 32 && i < num_regs + 32 + 16) 7754 { 7755 static const char *const neon_pseudo_names[] = { 7756 "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", 7757 "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", 7758 }; 7759 7760 return neon_pseudo_names[i - num_regs - 32]; 7761 } 7762 7763 if (i >= ARRAY_SIZE (arm_register_names)) 7764 /* These registers are only supported on targets which supply 7765 an XML description. */ 7766 return ""; 7767 7768 return arm_register_names[i]; 7769} 7770 7771static void 7772set_disassembly_style (void) 7773{ 7774 int current; 7775 7776 /* Find the style that the user wants. */ 7777 for (current = 0; current < num_disassembly_options; current++) 7778 if (disassembly_style == valid_disassembly_styles[current]) 7779 break; 7780 gdb_assert (current < num_disassembly_options); 7781 7782 /* Synchronize the disassembler. */ 7783 set_arm_regname_option (current); 7784} 7785 7786/* Test whether the coff symbol specific value corresponds to a Thumb 7787 function. */ 7788 7789static int 7790coff_sym_is_thumb (int val) 7791{ 7792 return (val == C_THUMBEXT 7793 || val == C_THUMBSTAT 7794 || val == C_THUMBEXTFUNC 7795 || val == C_THUMBSTATFUNC 7796 || val == C_THUMBLABEL); 7797} 7798 7799/* arm_coff_make_msymbol_special() 7800 arm_elf_make_msymbol_special() 7801 7802 These functions test whether the COFF or ELF symbol corresponds to 7803 an address in thumb code, and set a "special" bit in a minimal 7804 symbol to indicate that it does. */ 7805 7806static void 7807arm_elf_make_msymbol_special(asymbol *sym, struct minimal_symbol *msym) 7808{ 7809 if (ARM_SYM_BRANCH_TYPE (&((elf_symbol_type *)sym)->internal_elf_sym) 7810 == ST_BRANCH_TO_THUMB) 7811 MSYMBOL_SET_SPECIAL (msym); 7812} 7813 7814static void 7815arm_coff_make_msymbol_special(int val, struct minimal_symbol *msym) 7816{ 7817 if (coff_sym_is_thumb (val)) 7818 MSYMBOL_SET_SPECIAL (msym); 7819} 7820 7821static void 7822arm_objfile_data_free (struct objfile *objfile, void *arg) 7823{ 7824 struct arm_per_objfile *data = arg; 7825 unsigned int i; 7826 7827 for (i = 0; i < objfile->obfd->section_count; i++) 7828 VEC_free (arm_mapping_symbol_s, data->section_maps[i]); 7829} 7830 7831static void 7832arm_record_special_symbol (struct gdbarch *gdbarch, struct objfile *objfile, 7833 asymbol *sym) 7834{ 7835 const char *name = bfd_asymbol_name (sym); 7836 struct arm_per_objfile *data; 7837 VEC(arm_mapping_symbol_s) **map_p; 7838 struct arm_mapping_symbol new_map_sym; 7839 7840 gdb_assert (name[0] == '$'); 7841 if (name[1] != 'a' && name[1] != 't' && name[1] != 'd') 7842 return; 7843 7844 data = objfile_data (objfile, arm_objfile_data_key); 7845 if (data == NULL) 7846 { 7847 data = OBSTACK_ZALLOC (&objfile->objfile_obstack, 7848 struct arm_per_objfile); 7849 set_objfile_data (objfile, arm_objfile_data_key, data); 7850 data->section_maps = OBSTACK_CALLOC (&objfile->objfile_obstack, 7851 objfile->obfd->section_count, 7852 VEC(arm_mapping_symbol_s) *); 7853 } 7854 map_p = &data->section_maps[bfd_get_section (sym)->index]; 7855 7856 new_map_sym.value = sym->value; 7857 new_map_sym.type = name[1]; 7858 7859 /* Assume that most mapping symbols appear in order of increasing 7860 value. If they were randomly distributed, it would be faster to 7861 always push here and then sort at first use. */ 7862 if (!VEC_empty (arm_mapping_symbol_s, *map_p)) 7863 { 7864 struct arm_mapping_symbol *prev_map_sym; 7865 7866 prev_map_sym = VEC_last (arm_mapping_symbol_s, *map_p); 7867 if (prev_map_sym->value >= sym->value) 7868 { 7869 unsigned int idx; 7870 idx = VEC_lower_bound (arm_mapping_symbol_s, *map_p, &new_map_sym, 7871 arm_compare_mapping_symbols); 7872 VEC_safe_insert (arm_mapping_symbol_s, *map_p, idx, &new_map_sym); 7873 return; 7874 } 7875 } 7876 7877 VEC_safe_push (arm_mapping_symbol_s, *map_p, &new_map_sym); 7878} 7879 7880static void 7881arm_write_pc (struct regcache *regcache, CORE_ADDR pc) 7882{ 7883 struct gdbarch *gdbarch = get_regcache_arch (regcache); 7884 regcache_cooked_write_unsigned (regcache, ARM_PC_REGNUM, pc); 7885 7886 /* If necessary, set the T bit. */ 7887 if (arm_apcs_32) 7888 { 7889 ULONGEST val, t_bit; 7890 regcache_cooked_read_unsigned (regcache, ARM_PS_REGNUM, &val); 7891 t_bit = arm_psr_thumb_bit (gdbarch); 7892 if (arm_pc_is_thumb (gdbarch, pc)) 7893 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM, 7894 val | t_bit); 7895 else 7896 regcache_cooked_write_unsigned (regcache, ARM_PS_REGNUM, 7897 val & ~t_bit); 7898 } 7899} 7900 7901/* Read the contents of a NEON quad register, by reading from two 7902 double registers. This is used to implement the quad pseudo 7903 registers, and for argument passing in case the quad registers are 7904 missing; vectors are passed in quad registers when using the VFP 7905 ABI, even if a NEON unit is not present. REGNUM is the index of 7906 the quad register, in [0, 15]. */ 7907 7908static enum register_status 7909arm_neon_quad_read (struct gdbarch *gdbarch, struct regcache *regcache, 7910 int regnum, gdb_byte *buf) 7911{ 7912 char name_buf[4]; 7913 gdb_byte reg_buf[8]; 7914 int offset, double_regnum; 7915 enum register_status status; 7916 7917 sprintf (name_buf, "d%d", regnum << 1); 7918 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf, 7919 strlen (name_buf)); 7920 7921 /* d0 is always the least significant half of q0. */ 7922 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG) 7923 offset = 8; 7924 else 7925 offset = 0; 7926 7927 status = regcache_raw_read (regcache, double_regnum, reg_buf); 7928 if (status != REG_VALID) 7929 return status; 7930 memcpy (buf + offset, reg_buf, 8); 7931 7932 offset = 8 - offset; 7933 status = regcache_raw_read (regcache, double_regnum + 1, reg_buf); 7934 if (status != REG_VALID) 7935 return status; 7936 memcpy (buf + offset, reg_buf, 8); 7937 7938 return REG_VALID; 7939} 7940 7941static enum register_status 7942arm_pseudo_read (struct gdbarch *gdbarch, struct regcache *regcache, 7943 int regnum, gdb_byte *buf) 7944{ 7945 const int num_regs = gdbarch_num_regs (gdbarch); 7946 char name_buf[4]; 7947 gdb_byte reg_buf[8]; 7948 int offset, double_regnum; 7949 7950 gdb_assert (regnum >= num_regs); 7951 regnum -= num_regs; 7952 7953 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48) 7954 /* Quad-precision register. */ 7955 return arm_neon_quad_read (gdbarch, regcache, regnum - 32, buf); 7956 else 7957 { 7958 enum register_status status; 7959 7960 /* Single-precision register. */ 7961 gdb_assert (regnum < 32); 7962 7963 /* s0 is always the least significant half of d0. */ 7964 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG) 7965 offset = (regnum & 1) ? 0 : 4; 7966 else 7967 offset = (regnum & 1) ? 4 : 0; 7968 7969 sprintf (name_buf, "d%d", regnum >> 1); 7970 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf, 7971 strlen (name_buf)); 7972 7973 status = regcache_raw_read (regcache, double_regnum, reg_buf); 7974 if (status == REG_VALID) 7975 memcpy (buf, reg_buf + offset, 4); 7976 return status; 7977 } 7978} 7979 7980/* Store the contents of BUF to a NEON quad register, by writing to 7981 two double registers. This is used to implement the quad pseudo 7982 registers, and for argument passing in case the quad registers are 7983 missing; vectors are passed in quad registers when using the VFP 7984 ABI, even if a NEON unit is not present. REGNUM is the index 7985 of the quad register, in [0, 15]. */ 7986 7987static void 7988arm_neon_quad_write (struct gdbarch *gdbarch, struct regcache *regcache, 7989 int regnum, const gdb_byte *buf) 7990{ 7991 char name_buf[4]; 7992 gdb_byte reg_buf[8]; 7993 int offset, double_regnum; 7994 7995 sprintf (name_buf, "d%d", regnum << 1); 7996 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf, 7997 strlen (name_buf)); 7998 7999 /* d0 is always the least significant half of q0. */ 8000 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG) 8001 offset = 8; 8002 else 8003 offset = 0; 8004 8005 regcache_raw_write (regcache, double_regnum, buf + offset); 8006 offset = 8 - offset; 8007 regcache_raw_write (regcache, double_regnum + 1, buf + offset); 8008} 8009 8010static void 8011arm_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache, 8012 int regnum, const gdb_byte *buf) 8013{ 8014 const int num_regs = gdbarch_num_regs (gdbarch); 8015 char name_buf[4]; 8016 gdb_byte reg_buf[8]; 8017 int offset, double_regnum; 8018 8019 gdb_assert (regnum >= num_regs); 8020 regnum -= num_regs; 8021 8022 if (gdbarch_tdep (gdbarch)->have_neon_pseudos && regnum >= 32 && regnum < 48) 8023 /* Quad-precision register. */ 8024 arm_neon_quad_write (gdbarch, regcache, regnum - 32, buf); 8025 else 8026 { 8027 /* Single-precision register. */ 8028 gdb_assert (regnum < 32); 8029 8030 /* s0 is always the least significant half of d0. */ 8031 if (gdbarch_byte_order (gdbarch) == BFD_ENDIAN_BIG) 8032 offset = (regnum & 1) ? 0 : 4; 8033 else 8034 offset = (regnum & 1) ? 4 : 0; 8035 8036 sprintf (name_buf, "d%d", regnum >> 1); 8037 double_regnum = user_reg_map_name_to_regnum (gdbarch, name_buf, 8038 strlen (name_buf)); 8039 8040 regcache_raw_read (regcache, double_regnum, reg_buf); 8041 memcpy (reg_buf + offset, buf, 4); 8042 regcache_raw_write (regcache, double_regnum, reg_buf); 8043 } 8044} 8045 8046static struct value * 8047value_of_arm_user_reg (struct frame_info *frame, const void *baton) 8048{ 8049 const int *reg_p = baton; 8050 return value_of_register (*reg_p, frame); 8051} 8052 8053static enum gdb_osabi 8054arm_elf_osabi_sniffer (bfd *abfd) 8055{ 8056 unsigned int elfosabi; 8057 enum gdb_osabi osabi = GDB_OSABI_UNKNOWN; 8058 8059 elfosabi = elf_elfheader (abfd)->e_ident[EI_OSABI]; 8060 8061 if (elfosabi == ELFOSABI_ARM) 8062 /* GNU tools use this value. Check note sections in this case, 8063 as well. */ 8064 bfd_map_over_sections (abfd, 8065 generic_elf_osabi_sniff_abi_tag_sections, 8066 &osabi); 8067 8068 /* Anything else will be handled by the generic ELF sniffer. */ 8069 return osabi; 8070} 8071 8072static int 8073arm_register_reggroup_p (struct gdbarch *gdbarch, int regnum, 8074 struct reggroup *group) 8075{ 8076 /* FPS register's type is INT, but belongs to float_reggroup. Beside 8077 this, FPS register belongs to save_regroup, restore_reggroup, and 8078 all_reggroup, of course. */ 8079 if (regnum == ARM_FPS_REGNUM) 8080 return (group == float_reggroup 8081 || group == save_reggroup 8082 || group == restore_reggroup 8083 || group == all_reggroup); 8084 else 8085 return default_register_reggroup_p (gdbarch, regnum, group); 8086} 8087 8088 8089/* Initialize the current architecture based on INFO. If possible, 8090 re-use an architecture from ARCHES, which is a list of 8091 architectures already created during this debugging session. 8092 8093 Called e.g. at program startup, when reading a core file, and when 8094 reading a binary file. */ 8095 8096static struct gdbarch * 8097arm_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) 8098{ 8099 struct gdbarch_tdep *tdep; 8100 struct gdbarch *gdbarch; 8101 struct gdbarch_list *best_arch; 8102 enum arm_abi_kind arm_abi = arm_abi_global; 8103 enum arm_float_model fp_model = arm_fp_model; 8104 struct tdesc_arch_data *tdesc_data = NULL; 8105 int i, is_m = 0; 8106 int have_vfp_registers = 0, have_vfp_pseudos = 0, have_neon_pseudos = 0; 8107 int have_neon = 0; 8108 int have_fpa_registers = 1; 8109 const struct target_desc *tdesc = info.target_desc; 8110 8111 /* If we have an object to base this architecture on, try to determine 8112 its ABI. */ 8113 8114 if (arm_abi == ARM_ABI_AUTO && info.abfd != NULL) 8115 { 8116 int ei_osabi, e_flags; 8117 8118 switch (bfd_get_flavour (info.abfd)) 8119 { 8120 case bfd_target_aout_flavour: 8121 /* Assume it's an old APCS-style ABI. */ 8122 arm_abi = ARM_ABI_APCS; 8123 break; 8124 8125 case bfd_target_coff_flavour: 8126 /* Assume it's an old APCS-style ABI. */ 8127 /* XXX WinCE? */ 8128 arm_abi = ARM_ABI_APCS; 8129 break; 8130 8131 case bfd_target_elf_flavour: 8132 ei_osabi = elf_elfheader (info.abfd)->e_ident[EI_OSABI]; 8133 e_flags = elf_elfheader (info.abfd)->e_flags; 8134 8135 if (ei_osabi == ELFOSABI_ARM) 8136 { 8137 /* GNU tools used to use this value, but do not for EABI 8138 objects. There's nowhere to tag an EABI version 8139 anyway, so assume APCS. */ 8140 arm_abi = ARM_ABI_APCS; 8141 } 8142 else if (ei_osabi == ELFOSABI_NONE) 8143 { 8144 int eabi_ver = EF_ARM_EABI_VERSION (e_flags); 8145 int attr_arch, attr_profile; 8146 8147 switch (eabi_ver) 8148 { 8149 case EF_ARM_EABI_UNKNOWN: 8150 /* Assume GNU tools. */ 8151 arm_abi = ARM_ABI_APCS; 8152 break; 8153 8154 case EF_ARM_EABI_VER4: 8155 case EF_ARM_EABI_VER5: 8156 arm_abi = ARM_ABI_AAPCS; 8157 /* EABI binaries default to VFP float ordering. 8158 They may also contain build attributes that can 8159 be used to identify if the VFP argument-passing 8160 ABI is in use. */ 8161 if (fp_model == ARM_FLOAT_AUTO) 8162 { 8163#ifdef HAVE_ELF 8164 switch (bfd_elf_get_obj_attr_int (info.abfd, 8165 OBJ_ATTR_PROC, 8166 Tag_ABI_VFP_args)) 8167 { 8168 case 0: 8169 /* "The user intended FP parameter/result 8170 passing to conform to AAPCS, base 8171 variant". */ 8172 fp_model = ARM_FLOAT_SOFT_VFP; 8173 break; 8174 case 1: 8175 /* "The user intended FP parameter/result 8176 passing to conform to AAPCS, VFP 8177 variant". */ 8178 fp_model = ARM_FLOAT_VFP; 8179 break; 8180 case 2: 8181 /* "The user intended FP parameter/result 8182 passing to conform to tool chain-specific 8183 conventions" - we don't know any such 8184 conventions, so leave it as "auto". */ 8185 break; 8186 default: 8187 /* Attribute value not mentioned in the 8188 October 2008 ABI, so leave it as 8189 "auto". */ 8190 break; 8191 } 8192#else 8193 fp_model = ARM_FLOAT_SOFT_VFP; 8194#endif 8195 } 8196 break; 8197 8198 default: 8199 /* Leave it as "auto". */ 8200 warning (_("unknown ARM EABI version 0x%x"), eabi_ver); 8201 break; 8202 } 8203 8204#ifdef HAVE_ELF 8205 /* Detect M-profile programs. This only works if the 8206 executable file includes build attributes; GCC does 8207 copy them to the executable, but e.g. RealView does 8208 not. */ 8209 attr_arch = bfd_elf_get_obj_attr_int (info.abfd, OBJ_ATTR_PROC, 8210 Tag_CPU_arch); 8211 attr_profile = bfd_elf_get_obj_attr_int (info.abfd, 8212 OBJ_ATTR_PROC, 8213 Tag_CPU_arch_profile); 8214 /* GCC specifies the profile for v6-M; RealView only 8215 specifies the profile for architectures starting with 8216 V7 (as opposed to architectures with a tag 8217 numerically greater than TAG_CPU_ARCH_V7). */ 8218 if (!tdesc_has_registers (tdesc) 8219 && (attr_arch == TAG_CPU_ARCH_V6_M 8220 || attr_arch == TAG_CPU_ARCH_V6S_M 8221 || attr_profile == 'M')) 8222 tdesc = tdesc_arm_with_m; 8223#endif 8224 } 8225 8226 if (fp_model == ARM_FLOAT_AUTO) 8227 { 8228 int e_flags = elf_elfheader (info.abfd)->e_flags; 8229 8230 switch (e_flags & (EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT)) 8231 { 8232 case 0: 8233 /* Leave it as "auto". Strictly speaking this case 8234 means FPA, but almost nobody uses that now, and 8235 many toolchains fail to set the appropriate bits 8236 for the floating-point model they use. */ 8237 break; 8238 case EF_ARM_SOFT_FLOAT: 8239 fp_model = ARM_FLOAT_SOFT_FPA; 8240 break; 8241 case EF_ARM_VFP_FLOAT: 8242 fp_model = ARM_FLOAT_VFP; 8243 break; 8244 case EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT: 8245 fp_model = ARM_FLOAT_SOFT_VFP; 8246 break; 8247 } 8248 } 8249 8250 if (e_flags & EF_ARM_BE8) 8251 info.byte_order_for_code = BFD_ENDIAN_LITTLE; 8252 8253 break; 8254 8255 default: 8256 /* Leave it as "auto". */ 8257 break; 8258 } 8259 } 8260 8261 /* Check any target description for validity. */ 8262 if (tdesc_has_registers (tdesc)) 8263 { 8264 /* For most registers we require GDB's default names; but also allow 8265 the numeric names for sp / lr / pc, as a convenience. */ 8266 static const char *const arm_sp_names[] = { "r13", "sp", NULL }; 8267 static const char *const arm_lr_names[] = { "r14", "lr", NULL }; 8268 static const char *const arm_pc_names[] = { "r15", "pc", NULL }; 8269 8270 const struct tdesc_feature *feature; 8271 int valid_p; 8272 8273 feature = tdesc_find_feature (tdesc, 8274 "org.gnu.gdb.arm.core"); 8275 if (feature == NULL) 8276 { 8277 feature = tdesc_find_feature (tdesc, 8278 "org.gnu.gdb.arm.m-profile"); 8279 if (feature == NULL) 8280 return NULL; 8281 else 8282 is_m = 1; 8283 } 8284 8285 tdesc_data = tdesc_data_alloc (); 8286 8287 valid_p = 1; 8288 for (i = 0; i < ARM_SP_REGNUM; i++) 8289 valid_p &= tdesc_numbered_register (feature, tdesc_data, i, 8290 arm_register_names[i]); 8291 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data, 8292 ARM_SP_REGNUM, 8293 arm_sp_names); 8294 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data, 8295 ARM_LR_REGNUM, 8296 arm_lr_names); 8297 valid_p &= tdesc_numbered_register_choices (feature, tdesc_data, 8298 ARM_PC_REGNUM, 8299 arm_pc_names); 8300 if (is_m) 8301 valid_p &= tdesc_numbered_register (feature, tdesc_data, 8302 ARM_PS_REGNUM, "xpsr"); 8303 else 8304 valid_p &= tdesc_numbered_register (feature, tdesc_data, 8305 ARM_PS_REGNUM, "cpsr"); 8306 8307 if (!valid_p) 8308 { 8309 tdesc_data_cleanup (tdesc_data); 8310 return NULL; 8311 } 8312 8313 feature = tdesc_find_feature (tdesc, 8314 "org.gnu.gdb.arm.fpa"); 8315 if (feature != NULL) 8316 { 8317 valid_p = 1; 8318 for (i = ARM_F0_REGNUM; i <= ARM_FPS_REGNUM; i++) 8319 valid_p &= tdesc_numbered_register (feature, tdesc_data, i, 8320 arm_register_names[i]); 8321 if (!valid_p) 8322 { 8323 tdesc_data_cleanup (tdesc_data); 8324 return NULL; 8325 } 8326 } 8327 else 8328 have_fpa_registers = 0; 8329 8330 feature = tdesc_find_feature (tdesc, 8331 "org.gnu.gdb.xscale.iwmmxt"); 8332 if (feature != NULL) 8333 { 8334 static const char *const iwmmxt_names[] = { 8335 "wR0", "wR1", "wR2", "wR3", "wR4", "wR5", "wR6", "wR7", 8336 "wR8", "wR9", "wR10", "wR11", "wR12", "wR13", "wR14", "wR15", 8337 "wCID", "wCon", "wCSSF", "wCASF", "", "", "", "", 8338 "wCGR0", "wCGR1", "wCGR2", "wCGR3", "", "", "", "", 8339 }; 8340 8341 valid_p = 1; 8342 for (i = ARM_WR0_REGNUM; i <= ARM_WR15_REGNUM; i++) 8343 valid_p 8344 &= tdesc_numbered_register (feature, tdesc_data, i, 8345 iwmmxt_names[i - ARM_WR0_REGNUM]); 8346 8347 /* Check for the control registers, but do not fail if they 8348 are missing. */ 8349 for (i = ARM_WC0_REGNUM; i <= ARM_WCASF_REGNUM; i++) 8350 tdesc_numbered_register (feature, tdesc_data, i, 8351 iwmmxt_names[i - ARM_WR0_REGNUM]); 8352 8353 for (i = ARM_WCGR0_REGNUM; i <= ARM_WCGR3_REGNUM; i++) 8354 valid_p 8355 &= tdesc_numbered_register (feature, tdesc_data, i, 8356 iwmmxt_names[i - ARM_WR0_REGNUM]); 8357 8358 if (!valid_p) 8359 { 8360 tdesc_data_cleanup (tdesc_data); 8361 return NULL; 8362 } 8363 } 8364 8365 /* If we have a VFP unit, check whether the single precision registers 8366 are present. If not, then we will synthesize them as pseudo 8367 registers. */ 8368 feature = tdesc_find_feature (tdesc, 8369 "org.gnu.gdb.arm.vfp"); 8370 if (feature != NULL) 8371 { 8372 static const char *const vfp_double_names[] = { 8373 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", 8374 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", 8375 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", 8376 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31", 8377 }; 8378 8379 /* Require the double precision registers. There must be either 8380 16 or 32. */ 8381 valid_p = 1; 8382 for (i = 0; i < 32; i++) 8383 { 8384 valid_p &= tdesc_numbered_register (feature, tdesc_data, 8385 ARM_D0_REGNUM + i, 8386 vfp_double_names[i]); 8387 if (!valid_p) 8388 break; 8389 } 8390 if (!valid_p && i == 16) 8391 valid_p = 1; 8392 8393 /* Also require FPSCR. */ 8394 valid_p &= tdesc_numbered_register (feature, tdesc_data, 8395 ARM_FPSCR_REGNUM, "fpscr"); 8396 if (!valid_p) 8397 { 8398 tdesc_data_cleanup (tdesc_data); 8399 return NULL; 8400 } 8401 8402 if (tdesc_unnumbered_register (feature, "s0") == 0) 8403 have_vfp_pseudos = 1; 8404 8405 have_vfp_registers = 1; 8406 8407 /* If we have VFP, also check for NEON. The architecture allows 8408 NEON without VFP (integer vector operations only), but GDB 8409 does not support that. */ 8410 feature = tdesc_find_feature (tdesc, 8411 "org.gnu.gdb.arm.neon"); 8412 if (feature != NULL) 8413 { 8414 /* NEON requires 32 double-precision registers. */ 8415 if (i != 32) 8416 { 8417 tdesc_data_cleanup (tdesc_data); 8418 return NULL; 8419 } 8420 8421 /* If there are quad registers defined by the stub, use 8422 their type; otherwise (normally) provide them with 8423 the default type. */ 8424 if (tdesc_unnumbered_register (feature, "q0") == 0) 8425 have_neon_pseudos = 1; 8426 8427 have_neon = 1; 8428 } 8429 } 8430 } 8431 8432 /* If there is already a candidate, use it. */ 8433 for (best_arch = gdbarch_list_lookup_by_info (arches, &info); 8434 best_arch != NULL; 8435 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info)) 8436 { 8437 if (arm_abi != ARM_ABI_AUTO 8438 && arm_abi != gdbarch_tdep (best_arch->gdbarch)->arm_abi) 8439 continue; 8440 8441 if (fp_model != ARM_FLOAT_AUTO 8442 && fp_model != gdbarch_tdep (best_arch->gdbarch)->fp_model) 8443 continue; 8444 8445 /* There are various other properties in tdep that we do not 8446 need to check here: those derived from a target description, 8447 since gdbarches with a different target description are 8448 automatically disqualified. */ 8449 8450 /* Do check is_m, though, since it might come from the binary. */ 8451 if (is_m != gdbarch_tdep (best_arch->gdbarch)->is_m) 8452 continue; 8453 8454 /* Found a match. */ 8455 break; 8456 } 8457 8458 if (best_arch != NULL) 8459 { 8460 if (tdesc_data != NULL) 8461 tdesc_data_cleanup (tdesc_data); 8462 return best_arch->gdbarch; 8463 } 8464 8465 tdep = xcalloc (1, sizeof (struct gdbarch_tdep)); 8466 gdbarch = gdbarch_alloc (&info, tdep); 8467 8468 /* Record additional information about the architecture we are defining. 8469 These are gdbarch discriminators, like the OSABI. */ 8470 tdep->arm_abi = arm_abi; 8471 tdep->fp_model = fp_model; 8472 tdep->is_m = is_m; 8473 tdep->have_fpa_registers = have_fpa_registers; 8474 tdep->have_vfp_registers = have_vfp_registers; 8475 tdep->have_vfp_pseudos = have_vfp_pseudos; 8476 tdep->have_neon_pseudos = have_neon_pseudos; 8477 tdep->have_neon = have_neon; 8478 8479 /* Breakpoints. */ 8480 switch (info.byte_order_for_code) 8481 { 8482 case BFD_ENDIAN_BIG: 8483 tdep->arm_breakpoint = arm_default_arm_be_breakpoint; 8484 tdep->arm_breakpoint_size = sizeof (arm_default_arm_be_breakpoint); 8485 tdep->thumb_breakpoint = arm_default_thumb_be_breakpoint; 8486 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_be_breakpoint); 8487 8488 break; 8489 8490 case BFD_ENDIAN_LITTLE: 8491 tdep->arm_breakpoint = arm_default_arm_le_breakpoint; 8492 tdep->arm_breakpoint_size = sizeof (arm_default_arm_le_breakpoint); 8493 tdep->thumb_breakpoint = arm_default_thumb_le_breakpoint; 8494 tdep->thumb_breakpoint_size = sizeof (arm_default_thumb_le_breakpoint); 8495 8496 break; 8497 8498 default: 8499 internal_error (__FILE__, __LINE__, 8500 _("arm_gdbarch_init: bad byte order for float format")); 8501 } 8502 8503 /* On ARM targets char defaults to unsigned. */ 8504 set_gdbarch_char_signed (gdbarch, 0); 8505 8506 /* Note: for displaced stepping, this includes the breakpoint, and one word 8507 of additional scratch space. This setting isn't used for anything beside 8508 displaced stepping at present. */ 8509 set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS); 8510 8511 /* This should be low enough for everything. */ 8512 tdep->lowest_pc = 0x20; 8513 tdep->jb_pc = -1; /* Longjump support not enabled by default. */ 8514 8515 /* The default, for both APCS and AAPCS, is to return small 8516 structures in registers. */ 8517 tdep->struct_return = reg_struct_return; 8518 8519 set_gdbarch_push_dummy_call (gdbarch, arm_push_dummy_call); 8520 set_gdbarch_frame_align (gdbarch, arm_frame_align); 8521 8522 set_gdbarch_write_pc (gdbarch, arm_write_pc); 8523 8524 /* Frame handling. */ 8525 set_gdbarch_dummy_id (gdbarch, arm_dummy_id); 8526 set_gdbarch_unwind_pc (gdbarch, arm_unwind_pc); 8527 set_gdbarch_unwind_sp (gdbarch, arm_unwind_sp); 8528 8529 frame_base_set_default (gdbarch, &arm_normal_base); 8530 8531 /* Address manipulation. */ 8532 set_gdbarch_smash_text_address (gdbarch, arm_smash_text_address); 8533 set_gdbarch_addr_bits_remove (gdbarch, arm_addr_bits_remove); 8534 8535 /* Advance PC across function entry code. */ 8536 set_gdbarch_skip_prologue (gdbarch, arm_skip_prologue); 8537 8538 /* Detect whether PC is in function epilogue. */ 8539 set_gdbarch_in_function_epilogue_p (gdbarch, arm_in_function_epilogue_p); 8540 8541 /* Skip trampolines. */ 8542 set_gdbarch_skip_trampoline_code (gdbarch, arm_skip_stub); 8543 8544 /* The stack grows downward. */ 8545 set_gdbarch_inner_than (gdbarch, core_addr_lessthan); 8546 8547 /* Breakpoint manipulation. */ 8548 set_gdbarch_breakpoint_from_pc (gdbarch, arm_breakpoint_from_pc); 8549 set_gdbarch_remote_breakpoint_from_pc (gdbarch, 8550 arm_remote_breakpoint_from_pc); 8551 8552 /* Information about registers, etc. */ 8553 set_gdbarch_sp_regnum (gdbarch, ARM_SP_REGNUM); 8554 set_gdbarch_pc_regnum (gdbarch, ARM_PC_REGNUM); 8555 set_gdbarch_num_regs (gdbarch, ARM_NUM_REGS); 8556 set_gdbarch_register_type (gdbarch, arm_register_type); 8557 set_gdbarch_register_reggroup_p (gdbarch, arm_register_reggroup_p); 8558 8559 /* This "info float" is FPA-specific. Use the generic version if we 8560 do not have FPA. */ 8561 if (gdbarch_tdep (gdbarch)->have_fpa_registers) 8562 set_gdbarch_print_float_info (gdbarch, arm_print_float_info); 8563 8564 /* Internal <-> external register number maps. */ 8565 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, arm_dwarf_reg_to_regnum); 8566 set_gdbarch_register_sim_regno (gdbarch, arm_register_sim_regno); 8567 8568 set_gdbarch_register_name (gdbarch, arm_register_name); 8569 8570 /* Returning results. */ 8571 set_gdbarch_return_value (gdbarch, arm_return_value); 8572 8573 /* Disassembly. */ 8574 set_gdbarch_print_insn (gdbarch, gdb_print_insn_arm); 8575 8576 /* Minsymbol frobbing. */ 8577 set_gdbarch_elf_make_msymbol_special (gdbarch, arm_elf_make_msymbol_special); 8578 set_gdbarch_coff_make_msymbol_special (gdbarch, 8579 arm_coff_make_msymbol_special); 8580 set_gdbarch_record_special_symbol (gdbarch, arm_record_special_symbol); 8581 8582 /* Thumb-2 IT block support. */ 8583 set_gdbarch_adjust_breakpoint_address (gdbarch, 8584 arm_adjust_breakpoint_address); 8585 8586 /* Virtual tables. */ 8587 set_gdbarch_vbit_in_delta (gdbarch, 1); 8588 8589 /* Hook in the ABI-specific overrides, if they have been registered. */ 8590 gdbarch_init_osabi (info, gdbarch); 8591 8592 dwarf2_frame_set_init_reg (gdbarch, arm_dwarf2_frame_init_reg); 8593 8594 /* Add some default predicates. */ 8595 frame_unwind_append_unwinder (gdbarch, &arm_stub_unwind); 8596 dwarf2_append_unwinders (gdbarch); 8597 frame_unwind_append_unwinder (gdbarch, &arm_exidx_unwind); 8598 frame_unwind_append_unwinder (gdbarch, &arm_prologue_unwind); 8599 8600 /* Now we have tuned the configuration, set a few final things, 8601 based on what the OS ABI has told us. */ 8602 8603 /* If the ABI is not otherwise marked, assume the old GNU APCS. EABI 8604 binaries are always marked. */ 8605 if (tdep->arm_abi == ARM_ABI_AUTO) 8606 tdep->arm_abi = ARM_ABI_APCS; 8607 8608 /* Watchpoints are not steppable. */ 8609 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1); 8610 8611 /* We used to default to FPA for generic ARM, but almost nobody 8612 uses that now, and we now provide a way for the user to force 8613 the model. So default to the most useful variant. */ 8614 if (tdep->fp_model == ARM_FLOAT_AUTO) 8615 tdep->fp_model = ARM_FLOAT_SOFT_FPA; 8616 8617 if (tdep->jb_pc >= 0) 8618 set_gdbarch_get_longjmp_target (gdbarch, arm_get_longjmp_target); 8619 8620 /* Floating point sizes and format. */ 8621 set_gdbarch_float_format (gdbarch, floatformats_ieee_single); 8622 if (tdep->fp_model == ARM_FLOAT_SOFT_FPA || tdep->fp_model == ARM_FLOAT_FPA) 8623 { 8624 set_gdbarch_double_format 8625 (gdbarch, floatformats_ieee_double_littlebyte_bigword); 8626 set_gdbarch_long_double_format 8627 (gdbarch, floatformats_ieee_double_littlebyte_bigword); 8628 } 8629 else 8630 { 8631 set_gdbarch_double_format (gdbarch, floatformats_ieee_double); 8632 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double); 8633 } 8634 8635 if (have_vfp_pseudos) 8636 { 8637 /* NOTE: These are the only pseudo registers used by 8638 the ARM target at the moment. If more are added, a 8639 little more care in numbering will be needed. */ 8640 8641 int num_pseudos = 32; 8642 if (have_neon_pseudos) 8643 num_pseudos += 16; 8644 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudos); 8645 set_gdbarch_pseudo_register_read (gdbarch, arm_pseudo_read); 8646 set_gdbarch_pseudo_register_write (gdbarch, arm_pseudo_write); 8647 } 8648 8649 if (tdesc_data) 8650 { 8651 set_tdesc_pseudo_register_name (gdbarch, arm_register_name); 8652 8653 tdesc_use_registers (gdbarch, tdesc, tdesc_data); 8654 8655 /* Override tdesc_register_type to adjust the types of VFP 8656 registers for NEON. */ 8657 set_gdbarch_register_type (gdbarch, arm_register_type); 8658 } 8659 8660 /* Add standard register aliases. We add aliases even for those 8661 nanes which are used by the current architecture - it's simpler, 8662 and does no harm, since nothing ever lists user registers. */ 8663 for (i = 0; i < ARRAY_SIZE (arm_register_aliases); i++) 8664 user_reg_add (gdbarch, arm_register_aliases[i].name, 8665 value_of_arm_user_reg, &arm_register_aliases[i].regnum); 8666 8667 return gdbarch; 8668} 8669 8670static void 8671arm_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file) 8672{ 8673 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); 8674 8675 if (tdep == NULL) 8676 return; 8677 8678 fprintf_unfiltered (file, _("arm_dump_tdep: Lowest pc = 0x%lx"), 8679 (unsigned long) tdep->lowest_pc); 8680} 8681 8682extern initialize_file_ftype _initialize_arm_tdep; /* -Wmissing-prototypes */ 8683 8684void 8685_initialize_arm_tdep (void) 8686{ 8687 struct ui_file *stb; 8688 long length; 8689 struct cmd_list_element *new_set, *new_show; 8690 const char *setname; 8691 const char *setdesc; 8692 const char *const *regnames; 8693 int numregs, i, j; 8694 static char *helptext; 8695 char regdesc[1024], *rdptr = regdesc; 8696 size_t rest = sizeof (regdesc); 8697 8698 gdbarch_register (bfd_arch_arm, arm_gdbarch_init, arm_dump_tdep); 8699 8700 arm_objfile_data_key 8701 = register_objfile_data_with_cleanup (NULL, arm_objfile_data_free); 8702 8703 /* Add ourselves to objfile event chain. */ 8704 observer_attach_new_objfile (arm_exidx_new_objfile); 8705 arm_exidx_data_key 8706 = register_objfile_data_with_cleanup (NULL, arm_exidx_data_free); 8707 8708 /* Register an ELF OS ABI sniffer for ARM binaries. */ 8709 gdbarch_register_osabi_sniffer (bfd_arch_arm, 8710 bfd_target_elf_flavour, 8711 arm_elf_osabi_sniffer); 8712 8713 /* Initialize the standard target descriptions. */ 8714 initialize_tdesc_arm_with_m (); 8715 8716 /* Get the number of possible sets of register names defined in opcodes. */ 8717 num_disassembly_options = get_arm_regname_num_options (); 8718 8719 /* Add root prefix command for all "set arm"/"show arm" commands. */ 8720 add_prefix_cmd ("arm", no_class, set_arm_command, 8721 _("Various ARM-specific commands."), 8722 &setarmcmdlist, "set arm ", 0, &setlist); 8723 8724 add_prefix_cmd ("arm", no_class, show_arm_command, 8725 _("Various ARM-specific commands."), 8726 &showarmcmdlist, "show arm ", 0, &showlist); 8727 8728 /* Sync the opcode insn printer with our register viewer. */ 8729 parse_arm_disassembler_option ("reg-names-std"); 8730 8731 /* Initialize the array that will be passed to 8732 add_setshow_enum_cmd(). */ 8733 valid_disassembly_styles 8734 = xmalloc ((num_disassembly_options + 1) * sizeof (char *)); 8735 for (i = 0; i < num_disassembly_options; i++) 8736 { 8737 numregs = get_arm_regnames (i, &setname, &setdesc, ®names); 8738 valid_disassembly_styles[i] = setname; 8739 length = snprintf (rdptr, rest, "%s - %s\n", setname, setdesc); 8740 rdptr += length; 8741 rest -= length; 8742 /* When we find the default names, tell the disassembler to use 8743 them. */ 8744 if (!strcmp (setname, "std")) 8745 { 8746 disassembly_style = setname; 8747 set_arm_regname_option (i); 8748 } 8749 } 8750 /* Mark the end of valid options. */ 8751 valid_disassembly_styles[num_disassembly_options] = NULL; 8752 8753 /* Create the help text. */ 8754 stb = mem_fileopen (); 8755 fprintf_unfiltered (stb, "%s%s%s", 8756 _("The valid values are:\n"), 8757 regdesc, 8758 _("The default is \"std\".")); 8759 helptext = ui_file_xstrdup (stb, NULL); 8760 ui_file_delete (stb); 8761 8762 add_setshow_enum_cmd("disassembler", no_class, 8763 valid_disassembly_styles, &disassembly_style, 8764 _("Set the disassembly style."), 8765 _("Show the disassembly style."), 8766 helptext, 8767 set_disassembly_style_sfunc, 8768 NULL, /* FIXME: i18n: The disassembly style is 8769 \"%s\". */ 8770 &setarmcmdlist, &showarmcmdlist); 8771 8772 add_setshow_boolean_cmd ("apcs32", no_class, &arm_apcs_32, 8773 _("Set usage of ARM 32-bit mode."), 8774 _("Show usage of ARM 32-bit mode."), 8775 _("When off, a 26-bit PC will be used."), 8776 NULL, 8777 NULL, /* FIXME: i18n: Usage of ARM 32-bit 8778 mode is %s. */ 8779 &setarmcmdlist, &showarmcmdlist); 8780 8781 /* Add a command to allow the user to force the FPU model. */ 8782 add_setshow_enum_cmd ("fpu", no_class, fp_model_strings, ¤t_fp_model, 8783 _("Set the floating point type."), 8784 _("Show the floating point type."), 8785 _("auto - Determine the FP typefrom the OS-ABI.\n\ 8786softfpa - Software FP, mixed-endian doubles on little-endian ARMs.\n\ 8787fpa - FPA co-processor (GCC compiled).\n\ 8788softvfp - Software FP with pure-endian doubles.\n\ 8789vfp - VFP co-processor."), 8790 set_fp_model_sfunc, show_fp_model, 8791 &setarmcmdlist, &showarmcmdlist); 8792 8793 /* Add a command to allow the user to force the ABI. */ 8794 add_setshow_enum_cmd ("abi", class_support, arm_abi_strings, &arm_abi_string, 8795 _("Set the ABI."), 8796 _("Show the ABI."), 8797 NULL, arm_set_abi, arm_show_abi, 8798 &setarmcmdlist, &showarmcmdlist); 8799 8800 /* Add two commands to allow the user to force the assumed 8801 execution mode. */ 8802 add_setshow_enum_cmd ("fallback-mode", class_support, 8803 arm_mode_strings, &arm_fallback_mode_string, 8804 _("Set the mode assumed when symbols are unavailable."), 8805 _("Show the mode assumed when symbols are unavailable."), 8806 NULL, NULL, arm_show_fallback_mode, 8807 &setarmcmdlist, &showarmcmdlist); 8808 add_setshow_enum_cmd ("force-mode", class_support, 8809 arm_mode_strings, &arm_force_mode_string, 8810 _("Set the mode assumed even when symbols are available."), 8811 _("Show the mode assumed even when symbols are available."), 8812 NULL, NULL, arm_show_force_mode, 8813 &setarmcmdlist, &showarmcmdlist); 8814 8815 /* Debugging flag. */ 8816 add_setshow_boolean_cmd ("arm", class_maintenance, &arm_debug, 8817 _("Set ARM debugging."), 8818 _("Show ARM debugging."), 8819 _("When on, arm-specific debugging is enabled."), 8820 NULL, 8821 NULL, /* FIXME: i18n: "ARM debugging is %s. */ 8822 &setdebuglist, &showdebuglist); 8823} 8824