1#ifndef INSNS_TABLE_ONLY 2 3#include <stdlib.h> 4#include <stdarg.h> 5#include <string.h> 6#include <mach-o/arm/reloc.h> 7#include "as.h" 8#include "flonum.h" 9#include "md.h" 10#include "obstack.h" 11#include "xmalloc.h" 12#include "symbols.h" 13#include "messages.h" 14#include "atof-ieee.h" 15#include "input-scrub.h" 16#include "sections.h" 17#include "dwarf2dbg.h" 18#include "arm_reloc.h" 19 20#include "opcode/arm.h" 21 22#define ISALNUM(xXx) (isalnum(xXx)) 23 24/* 25 * These are the default cputype and cpusubtype for the arm architecture. 26 */ 27const cpu_type_t md_cputype = CPU_TYPE_ARM; 28cpu_subtype_t md_cpusubtype = CPU_SUBTYPE_ARM_V4T; 29 30/* This is the byte sex for the arm architecture */ 31const enum byte_sex md_target_byte_sex = LITTLE_ENDIAN_BYTE_SEX; 32 33/* These characters start a comment anywhere on the line */ 34const char md_comment_chars[] = "@"; 35 36/* These characters only start a comment at the beginning of a line */ 37const char md_line_comment_chars[] = "#"; 38 39/* 40 * These characters can be used to separate mantissa decimal digits from 41 * exponent decimal digits in floating point numbers. 42 */ 43const char md_EXP_CHARS[] = "eE"; 44 45/* 46 * The characters after a leading 0 that means this number is a floating point 47 * constant as in 0f123.456 or 0d1.234E-12 (see md_EXP_CHARS above). 48 */ 49const char md_FLT_CHARS[] = "dDfF"; 50 51/* HACK here to forward declare this md_* routine, only in the ARM assembler */ 52symbolS *md_undefined_symbol (char * name ATTRIBUTE_UNUSED); 53 54/* HACKS for bfd_* and BFD_RELOC_* These would come from bfd/reloc.c */ 55typedef int bfd_reloc_code_real_type; 56typedef int bfd_vma; 57#define BFD_RELOC_UNUSED 0 58enum { 59 BFD_RELOC_ARM_IMMEDIATE = NO_RELOC+1, 60 BFD_RELOC_ARM_ADRL_IMMEDIATE, 61 BFD_RELOC_ARM_OFFSET_IMM, 62 BFD_RELOC_ARM_SHIFT_IMM, 63 BFD_RELOC_ARM_MULTI, 64 BFD_RELOC_ARM_SMI, 65 BFD_RELOC_ARM_SWI, 66 BFD_RELOC_ARM_LITERAL, 67 BFD_RELOC_ARM_OFFSET_IMM8, 68 BFD_RELOC_ARM_HWLITERAL, 69 BFD_RELOC_ARM_THUMB_ADD, 70 BFD_RELOC_ARM_THUMB_IMM, 71 BFD_RELOC_ARM_THUMB_SHIFT, 72 BFD_RELOC_ARM_THUMB_OFFSET, 73 BFD_RELOC_THUMB_PCREL_BRANCH9, 74 BFD_RELOC_THUMB_PCREL_BRANCH12, 75 BFD_RELOC_THUMB_PCREL_BLX, 76 BFD_RELOC_ARM_PCREL_BLX, 77 BFD_RELOC_ARM_CP_OFF_IMM, 78 BFD_RELOC_ARM_CP_OFF_IMM_S2, 79 BFD_RELOC_ARM_ALU_PC_G0, 80 BFD_RELOC_ARM_ALU_PC_G0_NC, 81 BFD_RELOC_ARM_ALU_PC_G1, 82 BFD_RELOC_ARM_ALU_PC_G1_NC, 83 BFD_RELOC_ARM_ALU_PC_G2, 84 BFD_RELOC_ARM_ALU_SB_G0, 85 BFD_RELOC_ARM_ALU_SB_G0_NC, 86 BFD_RELOC_ARM_ALU_SB_G1, 87 BFD_RELOC_ARM_ALU_SB_G1_NC, 88 BFD_RELOC_ARM_ALU_SB_G2, 89 BFD_RELOC_ARM_LDC_PC_G0, 90 BFD_RELOC_ARM_LDC_PC_G1, 91 BFD_RELOC_ARM_LDC_PC_G2, 92 BFD_RELOC_ARM_LDC_SB_G0, 93 BFD_RELOC_ARM_LDC_SB_G1, 94 BFD_RELOC_ARM_LDC_SB_G2, 95 BFD_RELOC_ARM_LDRS_PC_G0, 96 BFD_RELOC_ARM_LDRS_PC_G1, 97 BFD_RELOC_ARM_LDRS_PC_G2, 98 BFD_RELOC_ARM_LDRS_SB_G0, 99 BFD_RELOC_ARM_LDRS_SB_G1, 100 BFD_RELOC_ARM_LDRS_SB_G2, 101 BFD_RELOC_ARM_LDR_PC_G0, 102 BFD_RELOC_ARM_LDR_PC_G1, 103 BFD_RELOC_ARM_LDR_PC_G2, 104 BFD_RELOC_ARM_LDR_SB_G0, 105 BFD_RELOC_ARM_LDR_SB_G1, 106 BFD_RELOC_ARM_LDR_SB_G2, 107 BFD_RELOC_ARM_T32_CP_OFF_IMM, 108 BFD_RELOC_ARM_PLT32, 109 BFD_RELOC_ARM_SMC, 110 BFD_RELOC_ARM_T32_CP_OFF_IMM_S2, 111 BFD_RELOC_ARM_T32_OFFSET_IMM, 112 BFD_RELOC_ARM_T32_IMM12, 113 BFD_RELOC_ARM_T32_IMMEDIATE, 114 BFD_RELOC_ARM_T32_ADD_IMM, 115 BFD_RELOC_ARM_T32_ADD_PC12, 116 BFD_RELOC_THUMB_PCREL_BRANCH25, 117 BFD_RELOC_THUMB_PCREL_BRANCH7, 118 BFD_RELOC_ARM_T32_OFFSET_U8, 119 BFD_RELOC_ARM_PCREL_CALL, 120 BFD_RELOC_ARM_PCREL_JUMP, 121 BFD_RELOC_8, 122 BFD_RELOC_16, 123 BFD_RELOC_RVA, 124 BFD_RELOC_32, 125 BFD_RELOC_ARM_TARGET1, 126 BFD_RELOC_ARM_ROSEGREL32, 127 BFD_RELOC_ARM_SBREL32, 128 BFD_RELOC_32_PCREL, 129 BFD_RELOC_THUMB_PCREL_BRANCH20, 130 BFD_RELOC_THUMB_PCREL_BRANCH23 = ARM_THUMB_RELOC_BR22, 131 BFD_RELOC_ARM_PCREL_BRANCH = ARM_RELOC_BR24, 132 BFD_RELOC_ARM_MOVW = ARM_RELOC_LO16, 133 BFD_RELOC_ARM_MOVT = ARM_RELOC_HI16, 134 BFD_RELOC_ARM_THUMB_MOVW = ARM_THUMB_RELOC_LO16, 135 BFD_RELOC_ARM_THUMB_MOVT = ARM_THUMB_RELOC_HI16 136}; 137 138/* HACKS for the change in gas/expr.h to change from X_seg to X_op (expr type)*/ 139#define X_op X_seg 140#define X_op_symbol X_add_symbol 141typedef enum { 142 /* An illegal expression. */ 143 O_illegal = SEG_NONE, 144} operatorT; 145 146/* HACKS for as_tsktsk() warning routine */ 147#define as_tsktsk as_warn 148 149/* STUFF FROM gas/asintl.h */ 150# define _(String) (String) 151# define N_(String) (String) 152 153/* STUFF FROM gas/as.h */ 154extern subsegT now_subseg; 155 156/* STUFF FROM gas/config/tc-arm.h */ 157#define ARM_FLAG_THUMB (1 << 0) /* The symbol is a Thumb symbol rather than an Arm symbol. */ 158#define ARM_FLAG_INTERWORK (1 << 1) /* The symbol is attached to code that supports interworking. */ 159#define THUMB_FLAG_FUNC (1 << 2) /* The symbol is attached to the start of a Thumb function. */ 160 161#define ARM_GET_FLAG(s) (*symbol_get_tc (s)) 162#define ARM_SET_FLAG(s,v) (*symbol_get_tc (s) |= (v)) 163#define ARM_RESET_FLAG(s,v) (*symbol_get_tc (s) &= ~(v)) 164 165#define ARM_IS_THUMB(s) (ARM_GET_FLAG (s) & ARM_FLAG_THUMB) 166#define ARM_IS_INTERWORK(s) (ARM_GET_FLAG (s) & ARM_FLAG_INTERWORK) 167#define THUMB_IS_FUNC(s) (ARM_GET_FLAG (s) & THUMB_FLAG_FUNC) 168 169#define ARM_SET_THUMB(s,t) ((t) ? ARM_SET_FLAG (s, ARM_FLAG_THUMB) : ARM_RESET_FLAG (s, ARM_FLAG_THUMB)) 170#define ARM_SET_INTERWORK(s,t) ((t) ? ARM_SET_FLAG (s, ARM_FLAG_INTERWORK) : ARM_RESET_FLAG (s, ARM_FLAG_INTERWORK)) 171#define THUMB_SET_FUNC(s,t) ((t) ? ARM_SET_FLAG (s, THUMB_FLAG_FUNC) : ARM_RESET_FLAG (s, THUMB_FLAG_FUNC)) 172 173/* STUFF FROM gas/config/tc-arm.c */ 174/* line 30 is #include "safe-ctype.h" to avoid this these HACKS are used */ 175#include <ctype.h> 176#define ISALPHA(c) isalpha(c) 177#define ISDIGIT(c) isdigit(c) 178#define TOUPPER(c) toupper(c) 179#define TOLOWER(c) tolower(c) 180 181#define streq(a, b) (strcmp (a, b) == 0) 182 183/* On darwin, default to arm 920 for now. */ 184static arm_feature_set cpu_variant /* HACK */ = ARM_FEATURE (ARM_AEXT_V4T, FPU_FPA); 185static arm_feature_set arm_arch_used; 186static arm_feature_set thumb_arch_used; 187 188#endif /* INSNS_TABLE_ONLY */ 189 190/* Constants for known architecture features. */ 191static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1; 192static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2; 193static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3; 194static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1; 195static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA; 196static const arm_feature_set fpu_any_hard = FPU_ANY_HARD; 197static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK; 198static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE; 199 200static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0); 201static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0); 202static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0); 203static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0); 204static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0); 205static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0); 206static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0); 207static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0); 208static const arm_feature_set arm_ext_v4t_5 = 209 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0); 210static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0); 211static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0); 212static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0); 213static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0); 214static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0); 215static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0); 216static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0); 217static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0); 218static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0); 219static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0); 220static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0); 221static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0); 222static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0); 223static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0); 224 225static const arm_feature_set arm_arch_any = ARM_ANY; 226static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1); 227static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2; 228static const arm_feature_set arm_arch_none = ARM_ARCH_NONE; 229 230static const arm_feature_set arm_cext_iwmmxt2 = 231 ARM_FEATURE (0, ARM_CEXT_IWMMXT2); 232static const arm_feature_set arm_cext_iwmmxt = 233 ARM_FEATURE (0, ARM_CEXT_IWMMXT); 234static const arm_feature_set arm_cext_xscale = 235 ARM_FEATURE (0, ARM_CEXT_XSCALE); 236static const arm_feature_set arm_cext_maverick = 237 ARM_FEATURE (0, ARM_CEXT_MAVERICK); 238static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1); 239static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2); 240static const arm_feature_set fpu_vfp_ext_v1xd = 241 ARM_FEATURE (0, FPU_VFP_EXT_V1xD); 242static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1); 243static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2); 244static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3); 245static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1); 246static const arm_feature_set fpu_vfp_v3_or_neon_ext = 247 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3); 248 249#ifndef INSNS_TABLE_ONLY 250 251/* Prefix characters that indicate the start of an immediate 252 value. */ 253#define is_immediate_prefix(C) ((C) == '#' || (C) == '$') 254 255/* 0: assemble for ARM, 256 1: assemble for Thumb, 257 2: assemble for Thumb even though target CPU does not support thumb 258 instructions. */ 259int thumb_mode = 0; 260 261/* If unified_syntax is true, we are processing the new unified 262 ARM/Thumb syntax. Important differences from the old ARM mode: 263 264 - Immediate operands do not require a # prefix. 265 - Conditional affixes always appear at the end of the 266 instruction. (For backward compatibility, those instructions 267 that formerly had them in the middle, continue to accept them 268 there.) 269 - The IT instruction may appear, and if it does is validated 270 against subsequent conditional affixes. It does not generate 271 machine code. 272 273 Important differences from the old Thumb mode: 274 275 - Immediate operands do not require a # prefix. 276 - Most of the V6T2 instructions are only available in unified mode. 277 - The .N and .W suffixes are recognized and honored (it is an error 278 if they cannot be honored). 279 - All instructions set the flags if and only if they have an 's' affix. 280 - Conditional affixes may be used. They are validated against 281 preceding IT instructions. Unlike ARM mode, you cannot use a 282 conditional affix except in the scope of an IT instruction. */ 283 284static bfd_boolean unified_syntax = FALSE; 285 286enum neon_el_type 287{ 288 NT_invtype, 289 NT_untyped, 290 NT_integer, 291 NT_float, 292 NT_poly, 293 NT_signed, 294 NT_unsigned 295}; 296 297struct neon_type_el 298{ 299 enum neon_el_type type; 300 unsigned size; 301}; 302 303#endif /* INSNS_TABLE_ONLY */ 304 305#define NEON_MAX_TYPE_ELS 4 306 307#ifndef INSNS_TABLE_ONLY 308 309struct neon_type 310{ 311 struct neon_type_el el[NEON_MAX_TYPE_ELS]; 312 unsigned elems; 313}; 314 315struct arm_it 316{ 317 const char * error; 318 uint32_t instruction; 319 int size; 320 int size_req; 321 int cond; 322 /* "uncond_value" is set to the value in place of the conditional field in 323 unconditional versions of the instruction, or -1 if nothing is 324 appropriate. */ 325 int uncond_value; 326 struct neon_type vectype; 327 /* Set to the opcode if the instruction needs relaxation. 328 Zero if the instruction is not relaxed. */ 329 uint32_t relax; 330 struct 331 { 332 bfd_reloc_code_real_type type; 333 expressionS exp; 334 int pc_rel; 335 /* HACK_GUESS, force relocation entry to support scatteed loading */ 336 int pcrel_reloc; 337 } reloc; 338 339 struct 340 { 341 unsigned reg; 342 signed int imm; 343 struct neon_type_el vectype; 344 unsigned present : 1; /* Operand present. */ 345 unsigned isreg : 1; /* Operand was a register. */ 346 unsigned immisreg : 1; /* .imm field is a second register. */ 347 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */ 348 unsigned immisalign : 1; /* Immediate is an alignment specifier. */ 349 unsigned immisfloat : 1; /* Immediate was parsed as a float. */ 350 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV 351 instructions. This allows us to disambiguate ARM <-> vector insns. */ 352 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */ 353 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */ 354 unsigned isquad : 1; /* Operand is Neon quad-precision register. */ 355 unsigned issingle : 1; /* Operand is VFP single-precision register. */ 356 unsigned hasreloc : 1; /* Operand has relocation suffix. */ 357 unsigned writeback : 1; /* Operand has trailing ! */ 358 unsigned preind : 1; /* Preindexed address. */ 359 unsigned postind : 1; /* Postindexed address. */ 360 unsigned negative : 1; /* Index register was negated. */ 361 unsigned shifted : 1; /* Shift applied to operation. */ 362 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */ 363 } operands[6]; 364}; 365 366static struct arm_it inst; 367 368#define NUM_FLOAT_VALS 8 369 370const char * fp_const[] = 371{ 372 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0 373}; 374 375/* Number of littlenums required to hold an extended precision number. */ 376#define MAX_LITTLENUMS 6 377 378LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS]; 379 380#define FAIL (-1) 381#define SUCCESS (0) 382 383#define SUFF_S 1 384#define SUFF_D 2 385#define SUFF_E 3 386#define SUFF_P 4 387 388#define CP_T_X 0x00008000 389#define CP_T_Y 0x00400000 390 391#define CONDS_BIT 0x00100000 392#define LOAD_BIT 0x00100000 393 394#define DOUBLE_LOAD_FLAG 0x00000001 395 396struct asm_cond 397{ 398 const char * template; 399 uint32_t value; 400}; 401 402#define COND_ALWAYS 0xE 403 404struct asm_psr 405{ 406 const char *template; 407 uint32_t field; 408}; 409 410struct asm_barrier_opt 411{ 412 const char *template; 413 uint32_t value; 414}; 415 416/* The bit that distinguishes CPSR and SPSR. */ 417#define SPSR_BIT (1 << 22) 418 419/* The individual PSR flag bits. */ 420#define PSR_c (1 << 16) 421#define PSR_x (1 << 17) 422#define PSR_s (1 << 18) 423#define PSR_f (1 << 19) 424 425struct reloc_entry 426{ 427 char *name; 428 bfd_reloc_code_real_type reloc; 429}; 430 431enum vfp_reg_pos 432{ 433 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn, 434 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn 435}; 436 437enum vfp_ldstm_type 438{ 439 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX 440}; 441 442/* Bits for DEFINED field in neon_typed_alias. */ 443#define NTA_HASTYPE 1 444#define NTA_HASINDEX 2 445 446struct neon_typed_alias 447{ 448 unsigned char defined; 449 unsigned char index; 450 struct neon_type_el eltype; 451}; 452 453/* ARM register categories. This includes coprocessor numbers and various 454 architecture extensions' registers. */ 455enum arm_reg_type 456{ 457 REG_TYPE_RN, 458 REG_TYPE_CP, 459 REG_TYPE_CN, 460 REG_TYPE_FN, 461 REG_TYPE_VFS, 462 REG_TYPE_VFD, 463 REG_TYPE_NQ, 464 REG_TYPE_VFSD, 465 REG_TYPE_NDQ, 466 REG_TYPE_NSDQ, 467 REG_TYPE_VFC, 468 REG_TYPE_MVF, 469 REG_TYPE_MVD, 470 REG_TYPE_MVFX, 471 REG_TYPE_MVDX, 472 REG_TYPE_MVAX, 473 REG_TYPE_DSPSC, 474 REG_TYPE_MMXWR, 475 REG_TYPE_MMXWC, 476 REG_TYPE_MMXWCG, 477 REG_TYPE_XSCALE, 478}; 479 480/* Structure for a hash table entry for a register. 481 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra 482 information which states whether a vector type or index is specified (for a 483 register alias created with .dn or .qn). Otherwise NEON should be NULL. */ 484struct reg_entry 485{ 486 const char *name; 487 unsigned char number; 488 unsigned char type; 489 unsigned char builtin; 490 struct neon_typed_alias *neon; 491}; 492 493/* Diagnostics used when we don't get a register of the expected type. */ 494const char *const reg_expected_msgs[] = 495{ 496 N_("ARM register expected"), 497 N_("bad or missing co-processor number"), 498 N_("co-processor register expected"), 499 N_("FPA register expected"), 500 N_("VFP single precision register expected"), 501 N_("VFP/Neon double precision register expected"), 502 N_("Neon quad precision register expected"), 503 N_("VFP single or double precision register expected"), 504 N_("Neon double or quad precision register expected"), 505 N_("VFP single, double or Neon quad precision register expected"), 506 N_("VFP system register expected"), 507 N_("Maverick MVF register expected"), 508 N_("Maverick MVD register expected"), 509 N_("Maverick MVFX register expected"), 510 N_("Maverick MVDX register expected"), 511 N_("Maverick MVAX register expected"), 512 N_("Maverick DSPSC register expected"), 513 N_("iWMMXt data register expected"), 514 N_("iWMMXt control register expected"), 515 N_("iWMMXt scalar register expected"), 516 N_("XScale accumulator register expected"), 517}; 518 519/* Some well known registers that we refer to directly elsewhere. */ 520#define REG_SP 13 521#define REG_LR 14 522#define REG_PC 15 523 524/* ARM instructions take 4bytes in the object file, Thumb instructions 525 take 2: */ 526#define INSN_SIZE 4 527 528#endif /* INSNS_TABLE_ONLY */ 529 530struct asm_opcode 531{ 532 /* Basic string to match. */ 533 const char *template; 534 535 /* Parameters to instruction. */ 536 unsigned char operands[8]; 537 538 /* Conditional tag - see opcode_lookup. */ 539 unsigned int tag : 4; 540 541 /* Basic instruction code. */ 542 unsigned int avalue : 28; 543 544 /* Thumb-format instruction code. */ 545 unsigned int tvalue; 546 547 /* Which architecture variant provides this instruction. */ 548 const arm_feature_set *avariant; 549 const arm_feature_set *tvariant; 550 551 /* Function to call to encode instruction in ARM format. */ 552 void (* aencode) (void); 553 554 /* Function to call to encode instruction in Thumb format. */ 555 void (* tencode) (void); 556}; 557 558#ifndef INSNS_TABLE_ONLY 559 560/* Defines for various bits that we will want to toggle. */ 561#define INST_IMMEDIATE 0x02000000 562#define OFFSET_REG 0x02000000 563#define HWOFFSET_IMM 0x00400000 564#define SHIFT_BY_REG 0x00000010 565#define PRE_INDEX 0x01000000 566#define INDEX_UP 0x00800000 567#define WRITE_BACK 0x00200000 568#define LDM_TYPE_2_OR_3 0x00400000 569#define CPSI_MMOD 0x00020000 570 571#define LITERAL_MASK 0xf000f000 572#define OPCODE_MASK 0xfe1fffff 573#define V4_STR_BIT 0x00000020 574 575#define T2_SUBS_PC_LR 0xf3de8f00 576 577#define DATA_OP_SHIFT 21 578 579#define T2_OPCODE_MASK 0xfe1fffff 580#define T2_DATA_OP_SHIFT 21 581 582/* Codes to distinguish the arithmetic instructions. */ 583#define OPCODE_AND 0 584#define OPCODE_EOR 1 585#define OPCODE_SUB 2 586#define OPCODE_RSB 3 587#define OPCODE_ADD 4 588#define OPCODE_ADC 5 589#define OPCODE_SBC 6 590#define OPCODE_RSC 7 591#define OPCODE_TST 8 592#define OPCODE_TEQ 9 593#define OPCODE_CMP 10 594#define OPCODE_CMN 11 595#define OPCODE_ORR 12 596#define OPCODE_MOV 13 597#define OPCODE_BIC 14 598#define OPCODE_MVN 15 599 600#define T2_OPCODE_AND 0 601#define T2_OPCODE_BIC 1 602#define T2_OPCODE_ORR 2 603#define T2_OPCODE_ORN 3 604#define T2_OPCODE_EOR 4 605#define T2_OPCODE_ADD 8 606#define T2_OPCODE_ADC 10 607#define T2_OPCODE_SBC 11 608#define T2_OPCODE_SUB 13 609#define T2_OPCODE_RSB 14 610 611#define T_OPCODE_MUL 0x4340 612#define T_OPCODE_TST 0x4200 613#define T_OPCODE_CMN 0x42c0 614#define T_OPCODE_NEG 0x4240 615#define T_OPCODE_MVN 0x43c0 616 617#define T_OPCODE_ADD_R3 0x1800 618#define T_OPCODE_SUB_R3 0x1a00 619#define T_OPCODE_ADD_HI 0x4400 620#define T_OPCODE_ADD_ST 0xb000 621#define T_OPCODE_SUB_ST 0xb080 622#define T_OPCODE_ADD_SP 0xa800 623#define T_OPCODE_ADD_PC 0xa000 624#define T_OPCODE_ADD_I8 0x3000 625#define T_OPCODE_SUB_I8 0x3800 626#define T_OPCODE_ADD_I3 0x1c00 627#define T_OPCODE_SUB_I3 0x1e00 628 629#define T_OPCODE_ASR_R 0x4100 630#define T_OPCODE_LSL_R 0x4080 631#define T_OPCODE_LSR_R 0x40c0 632#define T_OPCODE_ROR_R 0x41c0 633#define T_OPCODE_ASR_I 0x1000 634#define T_OPCODE_LSL_I 0x0000 635#define T_OPCODE_LSR_I 0x0800 636 637#define T_OPCODE_MOV_I8 0x2000 638#define T_OPCODE_CMP_I8 0x2800 639#define T_OPCODE_CMP_LR 0x4280 640#define T_OPCODE_MOV_HR 0x4600 641#define T_OPCODE_CMP_HR 0x4500 642 643#define T_OPCODE_LDR_PC 0x4800 644#define T_OPCODE_LDR_SP 0x9800 645#define T_OPCODE_STR_SP 0x9000 646#define T_OPCODE_LDR_IW 0x6800 647#define T_OPCODE_STR_IW 0x6000 648#define T_OPCODE_LDR_IH 0x8800 649#define T_OPCODE_STR_IH 0x8000 650#define T_OPCODE_LDR_IB 0x7800 651#define T_OPCODE_STR_IB 0x7000 652#define T_OPCODE_LDR_RW 0x5800 653#define T_OPCODE_STR_RW 0x5000 654#define T_OPCODE_LDR_RH 0x5a00 655#define T_OPCODE_STR_RH 0x5200 656#define T_OPCODE_LDR_RB 0x5c00 657#define T_OPCODE_STR_RB 0x5400 658 659#define T_OPCODE_PUSH 0xb400 660#define T_OPCODE_POP 0xbc00 661 662#define T_OPCODE_BRANCH 0xe000 663 664#define THUMB_SIZE 2 /* Size of thumb instruction. */ 665#define THUMB_PP_PC_LR 0x0100 666#define THUMB_LOAD_BIT 0x0800 667#define THUMB2_LOAD_BIT 0x00100000 668 669#define BAD_ARGS _("bad arguments to instruction") 670#define BAD_PC _("r15 not allowed here") 671#define BAD_COND _("instruction cannot be conditional") 672#define BAD_OVERLAP _("registers may not be the same") 673#define BAD_HIREG _("lo register required") 674#define BAD_THUMB32 _("instruction not supported in Thumb16 mode") 675#define BAD_ADDR_MODE _("instruction does not accept this addressing mode"); 676#define BAD_BRANCH _("branch must be last instruction in IT block") 677#define BAD_NOT_IT _("instruction not allowed in IT block") 678#define BAD_FPU _("selected FPU does not support instruction") 679 680static struct hash_control *arm_ops_hsh; 681static struct hash_control *arm_cond_hsh; 682static struct hash_control *arm_shift_hsh; 683static struct hash_control *arm_psr_hsh; 684static struct hash_control *arm_v7m_psr_hsh; 685static struct hash_control *arm_reg_hsh; 686static struct hash_control *arm_reloc_hsh; 687static struct hash_control *arm_barrier_opt_hsh; 688 689/* Stuff needed to resolve the label ambiguity 690 As: 691 ... 692 label: <insn> 693 may differ from: 694 ... 695 label: 696 <insn> 697*/ 698 699symbolS * last_label_seen; 700static int label_is_thumb_function_name = FALSE; 701 702/* Literal Pool stuff. */ 703 704#define MAX_LITERAL_POOL_SIZE 1024 705 706/* Literal pool structure. Held on a per-section 707 and per-sub-section basis. */ 708 709typedef struct literal_pool 710{ 711 expressionS literals [MAX_LITERAL_POOL_SIZE]; 712 unsigned int next_free_entry; 713 unsigned int id; 714 symbolS * symbol; 715 segT section; 716 subsegT sub_section; 717 struct literal_pool * next; 718} literal_pool; 719 720/* Pointer to a linked list of literal pools. */ 721literal_pool * list_of_pools = NULL; 722 723/* State variables for IT block handling. */ 724static bfd_boolean current_it_mask = 0; 725static int current_cc; 726 727/* Separator character handling. */ 728 729#define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0) 730 731static inline int 732skip_past_char (char ** str, char c) 733{ 734 if (**str == c) 735 { 736 (*str)++; 737 return SUCCESS; 738 } 739 else 740 return FAIL; 741} 742#define skip_past_comma(str) skip_past_char (str, ',') 743 744/* Arithmetic expressions (possibly involving symbols). */ 745 746/* Return TRUE if anything in the expression is a bignum. */ 747 748static int 749walk_no_bignums (symbolS * sp) 750{ 751#ifdef NOTYET 752 if (symbol_get_value_expression (sp)->X_op == O_big) 753 return 1; 754 755 if (symbol_get_value_expression (sp)->X_add_symbol) 756 { 757 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol) 758 || (symbol_get_value_expression (sp)->X_op_symbol 759 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol))); 760 } 761#endif /* NOTYET */ 762 763 return 0; 764} 765 766static int in_my_get_expression = 0; 767 768/* Third argument to my_get_expression. */ 769#define GE_NO_PREFIX 0 770#define GE_IMM_PREFIX 1 771#define GE_OPT_PREFIX 2 772/* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit) 773 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */ 774#define GE_OPT_PREFIX_BIG 3 775 776static int 777my_get_expression (expressionS * ep, char ** str, int prefix_mode) 778{ 779 char * save_in; 780 segT seg; 781 782 /* In unified syntax, all prefixes are optional. */ 783 if (unified_syntax) 784 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode 785 : GE_OPT_PREFIX; 786 787 switch (prefix_mode) 788 { 789 case GE_NO_PREFIX: break; 790 case GE_IMM_PREFIX: 791 if (!is_immediate_prefix (**str)) 792 { 793 inst.error = _("immediate expression requires a # prefix"); 794 return FAIL; 795 } 796 (*str)++; 797 break; 798 case GE_OPT_PREFIX: 799 case GE_OPT_PREFIX_BIG: 800 if (is_immediate_prefix (**str)) 801 (*str)++; 802 break; 803 default: abort (); 804 } 805 806 memset (ep, 0, sizeof (expressionS)); 807 808 save_in = input_line_pointer; 809 input_line_pointer = *str; 810 in_my_get_expression = 1; 811 seg = expression (ep); 812 in_my_get_expression = 0; 813 814 if (ep->X_op == O_illegal) 815 { 816 /* We found a bad expression in md_operand(). */ 817 *str = input_line_pointer; 818 input_line_pointer = save_in; 819 if (inst.error == NULL) 820 inst.error = _("bad expression"); 821 return 1; 822 } 823 824#ifdef OBJ_AOUT 825 if (seg != absolute_section 826 && seg != text_section 827 && seg != data_section 828 && seg != bss_section 829 && seg != undefined_section) 830 { 831 inst.error = _("bad segment"); 832 *str = input_line_pointer; 833 input_line_pointer = save_in; 834 return 1; 835 } 836#endif 837 838 /* Get rid of any bignums now, so that we don't generate an error for which 839 we can't establish a line number later on. Big numbers are never valid 840 in instructions, which is where this routine is always called. */ 841 if (prefix_mode != GE_OPT_PREFIX_BIG 842 && (ep->X_op == O_big 843 || (ep->X_add_symbol 844 && (walk_no_bignums (ep->X_add_symbol) 845 || (ep->X_op_symbol 846 && walk_no_bignums (ep->X_op_symbol)))))) 847 { 848 inst.error = _("invalid constant"); 849 *str = input_line_pointer; 850 input_line_pointer = save_in; 851 return 1; 852 } 853 854 *str = input_line_pointer; 855 input_line_pointer = save_in; 856 return 0; 857} 858 859/* Turn a string in input_line_pointer into a floating point constant 860 of type TYPE, and store the appropriate bytes in *LITP. The number 861 of LITTLENUMS emitted is stored in *SIZEP. An error message is 862 returned, or NULL on OK. 863 864 Note that fp constants aren't represent in the normal way on the ARM. 865 In big endian mode, things are as expected. However, in little endian 866 mode fp constants are big-endian word-wise, and little-endian byte-wise 867 within the words. For example, (double) 1.1 in big endian mode is 868 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is 869 the byte sequence 99 99 f1 3f 9a 99 99 99. 870 871 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */ 872 873char * 874md_atof (int type, char * litP, int * sizeP) 875{ 876 int prec; 877 LITTLENUM_TYPE words[MAX_LITTLENUMS]; 878 char *t; 879 int i; 880 881 switch (type) 882 { 883 case 'f': 884 case 'F': 885 case 's': 886 case 'S': 887 prec = 2; 888 break; 889 890 case 'd': 891 case 'D': 892 case 'r': 893 case 'R': 894 prec = 4; 895 break; 896 897 case 'x': 898 case 'X': 899 prec = 6; 900 break; 901 902 case 'p': 903 case 'P': 904 prec = 6; 905 break; 906 907 default: 908 *sizeP = 0; 909 return _("bad call to MD_ATOF()"); 910 } 911 912 t = atof_ieee (input_line_pointer, type, words); 913 if (t) 914 input_line_pointer = t; 915 *sizeP = prec * 2; 916 917 if (target_big_endian) 918 { 919 for (i = 0; i < prec; i++) 920 { 921 md_number_to_chars (litP, (valueT) words[i], 2); 922 litP += 2; 923 } 924 } 925 else 926 { 927 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure)) 928 for (i = prec - 1; i >= 0; i--) 929 { 930 md_number_to_chars (litP, (valueT) words[i], 2); 931 litP += 2; 932 } 933 else 934 /* For a 4 byte float the order of elements in `words' is 1 0. 935 For an 8 byte float the order is 1 0 3 2. */ 936 for (i = 0; i < prec; i += 2) 937 { 938 md_number_to_chars (litP, (valueT) words[i + 1], 2); 939 md_number_to_chars (litP + 2, (valueT) words[i], 2); 940 litP += 4; 941 } 942 } 943 944 return 0; 945} 946 947/* We handle all bad expressions here, so that we can report the faulty 948 instruction in the error message. */ 949void 950md_operand (expressionS * expr) 951{ 952 if (in_my_get_expression) 953 expr->X_op = (segT)O_illegal; 954} 955 956/* Register parsing. */ 957 958/* Generic register parser. CCP points to what should be the 959 beginning of a register name. If it is indeed a valid register 960 name, advance CCP over it and return the reg_entry structure; 961 otherwise return NULL. Does not issue diagnostics. */ 962 963static struct reg_entry * 964arm_reg_parse_multi (char **ccp) 965{ 966 char *start = *ccp; 967 char *p; 968 struct reg_entry *reg; 969 970#ifdef REGISTER_PREFIX 971 if (*start != REGISTER_PREFIX) 972 return NULL; 973 start++; 974#endif 975#ifdef OPTIONAL_REGISTER_PREFIX 976 if (*start == OPTIONAL_REGISTER_PREFIX) 977 start++; 978#endif 979 980 p = start; 981 if (!ISALPHA (*p) || !is_name_beginner (*p)) 982 return NULL; 983 984 do 985 p++; 986 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_'); 987 988 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start); 989 990 if (!reg) 991 return NULL; 992 993 *ccp = p; 994 return reg; 995} 996 997static int 998arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg, 999 enum arm_reg_type type) 1000{ 1001 /* Alternative syntaxes are accepted for a few register classes. */ 1002 switch (type) 1003 { 1004 case REG_TYPE_MVF: 1005 case REG_TYPE_MVD: 1006 case REG_TYPE_MVFX: 1007 case REG_TYPE_MVDX: 1008 /* Generic coprocessor register names are allowed for these. */ 1009 if (reg && reg->type == REG_TYPE_CN) 1010 return reg->number; 1011 break; 1012 1013 case REG_TYPE_CP: 1014 /* For backward compatibility, a bare number is valid here. */ 1015 { 1016 uint32_t processor = strtoul (start, ccp, 10); 1017 if (*ccp != start && processor <= 15) 1018 return processor; 1019 } 1020 1021 case REG_TYPE_MMXWC: 1022 /* WC includes WCG. ??? I'm not sure this is true for all 1023 instructions that take WC registers. */ 1024 if (reg && reg->type == REG_TYPE_MMXWCG) 1025 return reg->number; 1026 break; 1027 1028 default: 1029 break; 1030 } 1031 1032 return FAIL; 1033} 1034 1035/* As arm_reg_parse_multi, but the register must be of type TYPE, and the 1036 return value is the register number or FAIL. */ 1037 1038static int 1039arm_reg_parse (char **ccp, enum arm_reg_type type) 1040{ 1041 char *start = *ccp; 1042 struct reg_entry *reg = arm_reg_parse_multi (ccp); 1043 int ret; 1044 1045 /* Do not allow a scalar (reg+index) to parse as a register. */ 1046 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX)) 1047 return FAIL; 1048 1049 if (reg && reg->type == type) 1050 return reg->number; 1051 1052 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL) 1053 return ret; 1054 1055 *ccp = start; 1056 return FAIL; 1057} 1058 1059/* Parse a Neon type specifier. *STR should point at the leading '.' 1060 character. Does no verification at this stage that the type fits the opcode 1061 properly. E.g., 1062 1063 .i32.i32.s16 1064 .s32.f32 1065 .u16 1066 1067 Can all be legally parsed by this function. 1068 1069 Fills in neon_type struct pointer with parsed information, and updates STR 1070 to point after the parsed type specifier. Returns SUCCESS if this was a legal 1071 type, FAIL if not. */ 1072 1073static int 1074parse_neon_type (struct neon_type *type, char **str) 1075{ 1076 char *ptr = *str; 1077 1078 if (type) 1079 type->elems = 0; 1080 1081 while (type->elems < NEON_MAX_TYPE_ELS) 1082 { 1083 enum neon_el_type thistype = NT_untyped; 1084 unsigned thissize = -1u; 1085 1086 if (*ptr != '.') 1087 break; 1088 1089 ptr++; 1090 1091 /* Just a size without an explicit type. */ 1092 if (ISDIGIT (*ptr)) 1093 goto parsesize; 1094 1095 switch (TOLOWER (*ptr)) 1096 { 1097 case 'i': thistype = NT_integer; break; 1098 case 'f': thistype = NT_float; break; 1099 case 'p': thistype = NT_poly; break; 1100 case 's': thistype = NT_signed; break; 1101 case 'u': thistype = NT_unsigned; break; 1102 case 'd': 1103 thistype = NT_float; 1104 thissize = 64; 1105 ptr++; 1106 goto done; 1107 default: 1108 as_bad (_("unexpected character `%c' in type specifier"), *ptr); 1109 return FAIL; 1110 } 1111 1112 ptr++; 1113 1114 /* .f is an abbreviation for .f32. */ 1115 if (thistype == NT_float && !ISDIGIT (*ptr)) 1116 thissize = 32; 1117 else 1118 { 1119 parsesize: 1120 thissize = strtoul (ptr, &ptr, 10); 1121 1122 if (thissize != 8 && thissize != 16 && thissize != 32 1123 && thissize != 64) 1124 { 1125 as_bad (_("bad size %d in type specifier"), thissize); 1126 return FAIL; 1127 } 1128 } 1129 1130 done: 1131 if (type) 1132 { 1133 type->el[type->elems].type = thistype; 1134 type->el[type->elems].size = thissize; 1135 type->elems++; 1136 } 1137 } 1138 1139 /* Empty/missing type is not a successful parse. */ 1140 if (type->elems == 0) 1141 return FAIL; 1142 1143 *str = ptr; 1144 1145 return SUCCESS; 1146} 1147 1148/* Errors may be set multiple times during parsing or bit encoding 1149 (particularly in the Neon bits), but usually the earliest error which is set 1150 will be the most meaningful. Avoid overwriting it with later (cascading) 1151 errors by calling this function. */ 1152 1153static void 1154first_error (const char *err) 1155{ 1156 if (!inst.error) 1157 inst.error = err; 1158} 1159 1160/* Parse a single type, e.g. ".s32", leading period included. */ 1161static int 1162parse_neon_operand_type (struct neon_type_el *vectype, char **ccp) 1163{ 1164 char *str = *ccp; 1165 struct neon_type optype; 1166 1167 if (*str == '.') 1168 { 1169 if (parse_neon_type (&optype, &str) == SUCCESS) 1170 { 1171 if (optype.elems == 1) 1172 *vectype = optype.el[0]; 1173 else 1174 { 1175 first_error (_("only one type should be specified for operand")); 1176 return FAIL; 1177 } 1178 } 1179 else 1180 { 1181 first_error (_("vector type expected")); 1182 return FAIL; 1183 } 1184 } 1185 else 1186 return FAIL; 1187 1188 *ccp = str; 1189 1190 return SUCCESS; 1191} 1192 1193/* Special meanings for indices (which have a range of 0-7), which will fit into 1194 a 4-bit integer. */ 1195 1196#define NEON_ALL_LANES 15 1197#define NEON_INTERLEAVE_LANES 14 1198 1199/* Parse either a register or a scalar, with an optional type. Return the 1200 register number, and optionally fill in the actual type of the register 1201 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and 1202 type/index information in *TYPEINFO. */ 1203 1204static int 1205parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type, 1206 enum arm_reg_type *rtype, 1207 struct neon_typed_alias *typeinfo) 1208{ 1209 char *str = *ccp; 1210 struct reg_entry *reg = arm_reg_parse_multi (&str); 1211 struct neon_typed_alias atype; 1212 struct neon_type_el parsetype; 1213 1214 atype.defined = 0; 1215 atype.index = -1; 1216 atype.eltype.type = NT_invtype; 1217 atype.eltype.size = -1; 1218 1219 /* Try alternate syntax for some types of register. Note these are mutually 1220 exclusive with the Neon syntax extensions. */ 1221 if (reg == NULL) 1222 { 1223 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type); 1224 if (altreg != FAIL) 1225 *ccp = str; 1226 if (typeinfo) 1227 *typeinfo = atype; 1228 return altreg; 1229 } 1230 1231 /* Undo polymorphism when a set of register types may be accepted. */ 1232 if ((type == REG_TYPE_NDQ 1233 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD)) 1234 || (type == REG_TYPE_VFSD 1235 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD)) 1236 || (type == REG_TYPE_NSDQ 1237 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD 1238 || reg->type == REG_TYPE_NQ)) 1239 || (type == REG_TYPE_MMXWC 1240 && (reg->type == REG_TYPE_MMXWCG))) 1241 type = reg->type; 1242 1243 if (type != reg->type) 1244 return FAIL; 1245 1246 if (reg->neon) 1247 atype = *reg->neon; 1248 1249 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS) 1250 { 1251 if ((atype.defined & NTA_HASTYPE) != 0) 1252 { 1253 first_error (_("can't redefine type for operand")); 1254 return FAIL; 1255 } 1256 atype.defined |= NTA_HASTYPE; 1257 atype.eltype = parsetype; 1258 } 1259 1260 if (skip_past_char (&str, '[') == SUCCESS) 1261 { 1262 if (type != REG_TYPE_VFD) 1263 { 1264 first_error (_("only D registers may be indexed")); 1265 return FAIL; 1266 } 1267 1268 if ((atype.defined & NTA_HASINDEX) != 0) 1269 { 1270 first_error (_("can't change index for operand")); 1271 return FAIL; 1272 } 1273 1274 atype.defined |= NTA_HASINDEX; 1275 1276 if (skip_past_char (&str, ']') == SUCCESS) 1277 atype.index = NEON_ALL_LANES; 1278 else 1279 { 1280 expressionS exp; 1281 1282 my_get_expression (&exp, &str, GE_NO_PREFIX); 1283 1284 if (exp.X_op != O_constant) 1285 { 1286 first_error (_("constant expression required")); 1287 return FAIL; 1288 } 1289 1290 if (skip_past_char (&str, ']') == FAIL) 1291 return FAIL; 1292 1293 atype.index = exp.X_add_number; 1294 } 1295 } 1296 1297 if (typeinfo) 1298 *typeinfo = atype; 1299 1300 if (rtype) 1301 *rtype = type; 1302 1303 *ccp = str; 1304 1305 return reg->number; 1306} 1307 1308/* Like arm_reg_parse, but allow allow the following extra features: 1309 - If RTYPE is non-zero, return the (possibly restricted) type of the 1310 register (e.g. Neon double or quad reg when either has been requested). 1311 - If this is a Neon vector type with additional type information, fill 1312 in the struct pointed to by VECTYPE (if non-NULL). 1313 This function will fault on encountering a scalar. 1314*/ 1315 1316static int 1317arm_typed_reg_parse (char **ccp, enum arm_reg_type type, 1318 enum arm_reg_type *rtype, struct neon_type_el *vectype) 1319{ 1320 struct neon_typed_alias atype; 1321 char *str = *ccp; 1322 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype); 1323 1324 if (reg == FAIL) 1325 return FAIL; 1326 1327 /* Do not allow a scalar (reg+index) to parse as a register. */ 1328 if ((atype.defined & NTA_HASINDEX) != 0) 1329 { 1330 first_error (_("register operand expected, but got scalar")); 1331 return FAIL; 1332 } 1333 1334 if (vectype) 1335 *vectype = atype.eltype; 1336 1337 *ccp = str; 1338 1339 return reg; 1340} 1341 1342#define NEON_SCALAR_REG(X) ((X) >> 4) 1343#define NEON_SCALAR_INDEX(X) ((X) & 15) 1344 1345/* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't 1346 have enough information to be able to do a good job bounds-checking. So, we 1347 just do easy checks here, and do further checks later. */ 1348 1349static int 1350parse_scalar (char **ccp, int elsize, struct neon_type_el *type) 1351{ 1352 int reg; 1353 char *str = *ccp; 1354 struct neon_typed_alias atype; 1355 1356 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype); 1357 1358 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0) 1359 return FAIL; 1360 1361 if (atype.index == NEON_ALL_LANES) 1362 { 1363 first_error (_("scalar must have an index")); 1364 return FAIL; 1365 } 1366 else if (atype.index >= 64 / elsize) 1367 { 1368 first_error (_("scalar index out of range")); 1369 return FAIL; 1370 } 1371 1372 if (type) 1373 *type = atype.eltype; 1374 1375 *ccp = str; 1376 1377 return reg * 16 + atype.index; 1378} 1379 1380/* Parse an ARM register list. Returns the bitmask, or FAIL. */ 1381static int32_t 1382parse_reg_list (char ** strp) 1383{ 1384 char * str = * strp; 1385 int32_t range = 0; 1386 int another_range; 1387 1388 /* We come back here if we get ranges concatenated by '+' or '|'. */ 1389 do 1390 { 1391 another_range = 0; 1392 1393 if (*str == '{') 1394 { 1395 int in_range = 0; 1396 int cur_reg = -1; 1397 1398 str++; 1399 do 1400 { 1401 int reg; 1402 1403 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL) 1404 { 1405 first_error (_(reg_expected_msgs[REG_TYPE_RN])); 1406 return FAIL; 1407 } 1408 1409 if (in_range) 1410 { 1411 int i; 1412 1413 if (reg <= cur_reg) 1414 { 1415 first_error (_("bad range in register list")); 1416 return FAIL; 1417 } 1418 1419 for (i = cur_reg + 1; i < reg; i++) 1420 { 1421 if (range & (1 << i)) 1422 as_tsktsk 1423 (_("Warning: duplicated register (r%d) in register list"), 1424 i); 1425 else 1426 range |= 1 << i; 1427 } 1428 in_range = 0; 1429 } 1430 1431 if (range & (1 << reg)) 1432 as_tsktsk (_("Warning: duplicated register (r%d) in register list"), 1433 reg); 1434 else if (reg <= cur_reg) 1435 as_tsktsk (_("Warning: register range not in ascending order")); 1436 1437 range |= 1 << reg; 1438 cur_reg = reg; 1439 } 1440 while (skip_past_comma (&str) != FAIL 1441 || (in_range = 1, *str++ == '-')); 1442 str--; 1443 1444 if (*str++ != '}') 1445 { 1446 first_error (_("missing `}'")); 1447 return FAIL; 1448 } 1449 } 1450 else 1451 { 1452 expressionS expr; 1453 1454 if (my_get_expression (&expr, &str, GE_NO_PREFIX)) 1455 return FAIL; 1456 1457 if (expr.X_op == O_constant) 1458 { 1459 if (expr.X_add_number 1460 != (expr.X_add_number & 0x0000ffff)) 1461 { 1462 inst.error = _("invalid register mask"); 1463 return FAIL; 1464 } 1465 1466 if ((range & expr.X_add_number) != 0) 1467 { 1468 int regno = range & expr.X_add_number; 1469 1470 regno &= -regno; 1471 regno = (1 << regno) - 1; 1472 as_tsktsk 1473 (_("Warning: duplicated register (r%d) in register list"), 1474 regno); 1475 } 1476 1477 range |= expr.X_add_number; 1478 } 1479 else 1480 { 1481 if (inst.reloc.type != 0) 1482 { 1483 inst.error = _("expression too complex"); 1484 return FAIL; 1485 } 1486 1487 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS)); 1488 inst.reloc.type = BFD_RELOC_ARM_MULTI; 1489 inst.reloc.pc_rel = 0; 1490 } 1491 } 1492 1493 if (*str == '|' || *str == '+') 1494 { 1495 str++; 1496 another_range = 1; 1497 } 1498 } 1499 while (another_range); 1500 1501 *strp = str; 1502 return range; 1503} 1504 1505/* Types of registers in a list. */ 1506 1507enum reg_list_els 1508{ 1509 REGLIST_VFP_S, 1510 REGLIST_VFP_D, 1511 REGLIST_NEON_D 1512}; 1513 1514/* Parse a VFP register list. If the string is invalid return FAIL. 1515 Otherwise return the number of registers, and set PBASE to the first 1516 register. Parses registers of type ETYPE. 1517 If REGLIST_NEON_D is used, several syntax enhancements are enabled: 1518 - Q registers can be used to specify pairs of D registers 1519 - { } can be omitted from around a singleton register list 1520 FIXME: This is not implemented, as it would require backtracking in 1521 some cases, e.g.: 1522 vtbl.8 d3,d4,d5 1523 This could be done (the meaning isn't really ambiguous), but doesn't 1524 fit in well with the current parsing framework. 1525 - 32 D registers may be used (also true for VFPv3). 1526 FIXME: Types are ignored in these register lists, which is probably a 1527 bug. */ 1528 1529static int 1530parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype) 1531{ 1532 char *str = *ccp; 1533 int base_reg; 1534 int new_base; 1535 enum arm_reg_type regtype = 0; 1536 int max_regs = 0; 1537 int count = 0; 1538 int warned = 0; 1539 uint32_t mask = 0; 1540 int i; 1541 1542 if (*str != '{') 1543 { 1544 inst.error = _("expecting {"); 1545 return FAIL; 1546 } 1547 1548 str++; 1549 1550 switch (etype) 1551 { 1552 case REGLIST_VFP_S: 1553 regtype = REG_TYPE_VFS; 1554 max_regs = 32; 1555 break; 1556 1557 case REGLIST_VFP_D: 1558 regtype = REG_TYPE_VFD; 1559 break; 1560 1561 case REGLIST_NEON_D: 1562 regtype = REG_TYPE_NDQ; 1563 break; 1564 } 1565 1566 if (etype != REGLIST_VFP_S) 1567 { 1568 /* VFPv3 allows 32 D registers. */ 1569 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3)) 1570 { 1571 max_regs = 32; 1572 if (thumb_mode) 1573 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, 1574 fpu_vfp_ext_v3); 1575 else 1576 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, 1577 fpu_vfp_ext_v3); 1578 } 1579 else 1580 max_regs = 16; 1581 } 1582 1583 base_reg = max_regs; 1584 1585 do 1586 { 1587 int setmask = 1, addregs = 1; 1588 1589 new_base = arm_typed_reg_parse (&str, regtype, ®type, NULL); 1590 1591 if (new_base == FAIL) 1592 { 1593 first_error (_(reg_expected_msgs[regtype])); 1594 return FAIL; 1595 } 1596 1597 if (new_base >= max_regs) 1598 { 1599 first_error (_("register out of range in list")); 1600 return FAIL; 1601 } 1602 1603 /* Note: a value of 2 * n is returned for the register Q<n>. */ 1604 if (regtype == REG_TYPE_NQ) 1605 { 1606 setmask = 3; 1607 addregs = 2; 1608 } 1609 1610 if (new_base < base_reg) 1611 base_reg = new_base; 1612 1613 if (mask & (setmask << new_base)) 1614 { 1615 first_error (_("invalid register list")); 1616 return FAIL; 1617 } 1618 1619 if ((mask >> new_base) != 0 && ! warned) 1620 { 1621 as_tsktsk (_("register list not in ascending order")); 1622 warned = 1; 1623 } 1624 1625 mask |= setmask << new_base; 1626 count += addregs; 1627 1628 if (*str == '-') /* We have the start of a range expression */ 1629 { 1630 int high_range; 1631 1632 str++; 1633 1634 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL)) 1635 == FAIL) 1636 { 1637 inst.error = /* HACK gettext */ (reg_expected_msgs[regtype]); 1638 return FAIL; 1639 } 1640 1641 if (high_range >= max_regs) 1642 { 1643 first_error (_("register out of range in list")); 1644 return FAIL; 1645 } 1646 1647 if (regtype == REG_TYPE_NQ) 1648 high_range = high_range + 1; 1649 1650 if (high_range <= new_base) 1651 { 1652 inst.error = _("register range not in ascending order"); 1653 return FAIL; 1654 } 1655 1656 for (new_base += addregs; new_base <= high_range; new_base += addregs) 1657 { 1658 if (mask & (setmask << new_base)) 1659 { 1660 inst.error = _("invalid register list"); 1661 return FAIL; 1662 } 1663 1664 mask |= setmask << new_base; 1665 count += addregs; 1666 } 1667 } 1668 } 1669 while (skip_past_comma (&str) != FAIL); 1670 1671 str++; 1672 1673 /* Sanity check -- should have raised a parse error above. */ 1674 if (count == 0 || count > max_regs) 1675 abort (); 1676 1677 *pbase = base_reg; 1678 1679 /* Final test -- the registers must be consecutive. */ 1680 mask >>= base_reg; 1681 for (i = 0; i < count; i++) 1682 { 1683 if ((mask & (1u << i)) == 0) 1684 { 1685 inst.error = _("non-contiguous register range"); 1686 return FAIL; 1687 } 1688 } 1689 1690 *ccp = str; 1691 1692 return count; 1693} 1694 1695/* True if two alias types are the same. */ 1696 1697static int 1698neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b) 1699{ 1700 if (!a && !b) 1701 return 1; 1702 1703 if (!a || !b) 1704 return 0; 1705 1706 if (a->defined != b->defined) 1707 return 0; 1708 1709 if ((a->defined & NTA_HASTYPE) != 0 1710 && (a->eltype.type != b->eltype.type 1711 || a->eltype.size != b->eltype.size)) 1712 return 0; 1713 1714 if ((a->defined & NTA_HASINDEX) != 0 1715 && (a->index != b->index)) 1716 return 0; 1717 1718 return 1; 1719} 1720 1721/* Parse element/structure lists for Neon VLD<n> and VST<n> instructions. 1722 The base register is put in *PBASE. 1723 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of 1724 the return value. 1725 The register stride (minus one) is put in bit 4 of the return value. 1726 Bits [6:5] encode the list length (minus one). 1727 The type of the list elements is put in *ELTYPE, if non-NULL. */ 1728 1729#define NEON_LANE(X) ((X) & 0xf) 1730#define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1) 1731#define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1) 1732 1733static int 1734parse_neon_el_struct_list (char **str, unsigned *pbase, 1735 struct neon_type_el *eltype) 1736{ 1737 char *ptr = *str; 1738 int base_reg = -1; 1739 int reg_incr = -1; 1740 int count = 0; 1741 int lane = -1; 1742 int leading_brace = 0; 1743 enum arm_reg_type rtype = REG_TYPE_NDQ; 1744 int addregs = 1; 1745 const char *const incr_error = "register stride must be 1 or 2"; 1746 const char *const type_error = "mismatched element/structure types in list"; 1747 struct neon_typed_alias firsttype = { 0 }; 1748 1749 if (skip_past_char (&ptr, '{') == SUCCESS) 1750 leading_brace = 1; 1751 1752 do 1753 { 1754 struct neon_typed_alias atype; 1755 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype); 1756 1757 if (getreg == FAIL) 1758 { 1759 first_error (_(reg_expected_msgs[rtype])); 1760 return FAIL; 1761 } 1762 1763 if (base_reg == -1) 1764 { 1765 base_reg = getreg; 1766 if (rtype == REG_TYPE_NQ) 1767 { 1768 reg_incr = 1; 1769 addregs = 2; 1770 } 1771 firsttype = atype; 1772 } 1773 else if (reg_incr == -1) 1774 { 1775 reg_incr = getreg - base_reg; 1776 if (reg_incr < 1 || reg_incr > 2) 1777 { 1778 first_error (_(incr_error)); 1779 return FAIL; 1780 } 1781 } 1782 else if (getreg != base_reg + reg_incr * count) 1783 { 1784 first_error (_(incr_error)); 1785 return FAIL; 1786 } 1787 1788 if (!neon_alias_types_same (&atype, &firsttype)) 1789 { 1790 first_error (_(type_error)); 1791 return FAIL; 1792 } 1793 1794 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list 1795 modes. */ 1796 if (ptr[0] == '-') 1797 { 1798 struct neon_typed_alias htype; 1799 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1; 1800 if ((atype.defined & NTA_HASINDEX) != 0) 1801 { 1802 if (lane == -1) 1803 lane = atype.index; 1804 else if (lane != atype.index) 1805 { 1806 first_error (_(type_error)); 1807 return FAIL; 1808 } 1809 } 1810 else if (lane == -1) 1811 lane = NEON_INTERLEAVE_LANES; 1812 else if (lane != NEON_INTERLEAVE_LANES) 1813 { 1814 first_error (_(type_error)); 1815 return FAIL; 1816 } 1817 if (reg_incr == -1) 1818 reg_incr = 1; 1819 else if (reg_incr != 1) 1820 { 1821 first_error (_("don't use Rn-Rm syntax with non-unit stride")); 1822 return FAIL; 1823 } 1824 ptr++; 1825 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype); 1826 if (hireg == FAIL) 1827 { 1828 first_error (_(reg_expected_msgs[rtype])); 1829 return FAIL; 1830 } 1831 if (!neon_alias_types_same (&htype, &firsttype)) 1832 { 1833 first_error (_(type_error)); 1834 return FAIL; 1835 } 1836 count += hireg + dregs - getreg; 1837 continue; 1838 } 1839 1840 /* If we're using Q registers, we can't use [] or [n] syntax. */ 1841 if (rtype == REG_TYPE_NQ) 1842 { 1843 count += 2; 1844 continue; 1845 } 1846 1847 if ((atype.defined & NTA_HASINDEX) != 0) 1848 { 1849 if (lane == -1) 1850 lane = atype.index; 1851 else if (lane != atype.index) 1852 { 1853 first_error (_(type_error)); 1854 return FAIL; 1855 } 1856 } 1857 else if (lane == -1) 1858 lane = NEON_INTERLEAVE_LANES; 1859 else if (lane != NEON_INTERLEAVE_LANES) 1860 { 1861 first_error (_(type_error)); 1862 return FAIL; 1863 } 1864 count++; 1865 } 1866 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL); 1867 1868 /* No lane set by [x]. We must be interleaving structures. */ 1869 if (lane == -1) 1870 lane = NEON_INTERLEAVE_LANES; 1871 1872 /* Sanity check. */ 1873 if (lane == -1 || base_reg == -1 || count < 1 || count > 4 1874 || (count > 1 && reg_incr == -1)) 1875 { 1876 first_error (_("error parsing element/structure list")); 1877 return FAIL; 1878 } 1879 1880 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL) 1881 { 1882 first_error (_("expected }")); 1883 return FAIL; 1884 } 1885 1886 if (reg_incr == -1) 1887 reg_incr = 1; 1888 1889 if (eltype) 1890 *eltype = firsttype.eltype; 1891 1892 *pbase = base_reg; 1893 *str = ptr; 1894 1895 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5); 1896} 1897 1898/* Parse an explicit relocation suffix on an expression. This is 1899 either nothing, or a word in parentheses. Note that if !OBJ_ELF, 1900 arm_reloc_hsh contains no entries, so this function can only 1901 succeed if there is no () after the word. Returns -1 on error, 1902 BFD_RELOC_UNUSED if there wasn't any suffix. */ 1903static int 1904parse_reloc (char **str) 1905{ 1906 struct reloc_entry *r; 1907 char *p, *q; 1908 1909 if (**str != '(') 1910 return BFD_RELOC_UNUSED; 1911 1912 p = *str + 1; 1913 q = p; 1914 1915 while (*q && *q != ')' && *q != ',') 1916 q++; 1917 if (*q != ')') 1918 return -1; 1919 1920 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL) 1921 return -1; 1922 1923 *str = q + 1; 1924 return r->reloc; 1925} 1926 1927/* Directives: register aliases. */ 1928 1929static struct reg_entry * 1930insert_reg_alias (char *str, int number, int type) 1931{ 1932 struct reg_entry *new; 1933 const char *name; 1934 1935 if ((new = hash_find (arm_reg_hsh, str)) != 0) 1936 { 1937 if (new->builtin) 1938 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str); 1939 1940 /* Only warn about a redefinition if it's not defined as the 1941 same register. */ 1942 else if (new->number != number || new->type != type) 1943 as_warn (_("ignoring redefinition of register alias '%s'"), str); 1944 1945 return 0; 1946 } 1947 1948#ifdef NOTYET 1949 name = xstrdup (str); 1950#else 1951 name = xmalloc (strlen(str) + 1); 1952 strcpy((char *)name, str); 1953#endif 1954 new = xmalloc (sizeof (struct reg_entry)); 1955 1956 new->name = name; 1957 new->number = number; 1958 new->type = type; 1959 new->builtin = FALSE; 1960 new->neon = NULL; 1961 1962 if (hash_insert (arm_reg_hsh, name, (PTR) new)) 1963 abort (); 1964 1965 return new; 1966} 1967 1968static void 1969insert_neon_reg_alias (char *str, int number, int type, 1970 struct neon_typed_alias *atype) 1971{ 1972 struct reg_entry *reg = insert_reg_alias (str, number, type); 1973 1974 if (!reg) 1975 { 1976 first_error (_("attempt to redefine typed alias")); 1977 return; 1978 } 1979 1980 if (atype) 1981 { 1982 reg->neon = xmalloc (sizeof (struct neon_typed_alias)); 1983 *reg->neon = *atype; 1984 } 1985} 1986 1987/* Look for the .req directive. This is of the form: 1988 1989 new_register_name .req existing_register_name 1990 1991 If we find one, or if it looks sufficiently like one that we want to 1992 handle any error here, return non-zero. Otherwise return zero. */ 1993 1994static int 1995create_register_alias (char * newname, char *p) 1996{ 1997 struct reg_entry *old; 1998 char *oldname, *nbuf; 1999 size_t nlen; 2000 2001 /* The input scrubber ensures that whitespace after the mnemonic is 2002 collapsed to single spaces. */ 2003 oldname = p; 2004 if (strncmp (oldname, " .req ", 6) != 0) 2005 return 0; 2006 2007 oldname += 6; 2008 if (*oldname == '\0') 2009 return 0; 2010 2011 old = hash_find (arm_reg_hsh, oldname); 2012 if (!old) 2013 { 2014 as_warn (_("unknown register '%s' -- .req ignored"), oldname); 2015 return 1; 2016 } 2017 2018#define TC_CASE_SENSITIVE /* HACK */ 2019 /* If TC_CASE_SENSITIVE is defined, then newname already points to 2020 the desired alias name, and p points to its end. If not, then 2021 the desired alias name is in the global original_case_string. */ 2022#ifdef TC_CASE_SENSITIVE 2023 nlen = p - newname; 2024#else 2025 newname = original_case_string; 2026 nlen = strlen (newname); 2027#endif 2028 2029 nbuf = alloca (nlen + 1); 2030 memcpy (nbuf, newname, nlen); 2031 nbuf[nlen] = '\0'; 2032 2033 /* Create aliases under the new name as stated; an all-lowercase 2034 version of the new name; and an all-uppercase version of the new 2035 name. */ 2036 insert_reg_alias (nbuf, old->number, old->type); 2037 2038 for (p = nbuf; *p; p++) 2039 *p = TOUPPER (*p); 2040 2041 if (strncmp (nbuf, newname, nlen)) 2042 insert_reg_alias (nbuf, old->number, old->type); 2043 2044 for (p = nbuf; *p; p++) 2045 *p = TOLOWER (*p); 2046 2047 if (strncmp (nbuf, newname, nlen)) 2048 insert_reg_alias (nbuf, old->number, old->type); 2049 2050 return 1; 2051} 2052 2053/* Create a Neon typed/indexed register alias using directives, e.g.: 2054 X .dn d5.s32[1] 2055 Y .qn 6.s16 2056 Z .dn d7 2057 T .dn Z[0] 2058 These typed registers can be used instead of the types specified after the 2059 Neon mnemonic, so long as all operands given have types. Types can also be 2060 specified directly, e.g.: 2061 vadd d0.s32, d1.s32, d2.s32 2062*/ 2063 2064static int 2065create_neon_reg_alias (char *newname, char *p) 2066{ 2067 enum arm_reg_type basetype; 2068 struct reg_entry *basereg; 2069 struct reg_entry mybasereg; 2070 struct neon_type ntype; 2071 struct neon_typed_alias typeinfo; 2072 char *namebuf, *nameend; 2073 int namelen; 2074 2075 typeinfo.defined = 0; 2076 typeinfo.eltype.type = NT_invtype; 2077 typeinfo.eltype.size = -1; 2078 typeinfo.index = -1; 2079 2080 nameend = p; 2081 2082 if (strncmp (p, " .dn ", 5) == 0) 2083 basetype = REG_TYPE_VFD; 2084 else if (strncmp (p, " .qn ", 5) == 0) 2085 basetype = REG_TYPE_NQ; 2086 else 2087 return 0; 2088 2089 p += 5; 2090 2091 if (*p == '\0') 2092 return 0; 2093 2094 basereg = arm_reg_parse_multi (&p); 2095 2096 if (basereg && basereg->type != basetype) 2097 { 2098 as_bad (_("bad type for register")); 2099 return 0; 2100 } 2101 2102 if (basereg == NULL) 2103 { 2104 expressionS exp; 2105 /* Try parsing as an integer. */ 2106 my_get_expression (&exp, &p, GE_NO_PREFIX); 2107 if (exp.X_op != O_constant) 2108 { 2109 as_bad (_("expression must be constant")); 2110 return 0; 2111 } 2112 basereg = &mybasereg; 2113 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2 2114 : exp.X_add_number; 2115 basereg->neon = 0; 2116 } 2117 2118 if (basereg->neon) 2119 typeinfo = *basereg->neon; 2120 2121 if (parse_neon_type (&ntype, &p) == SUCCESS) 2122 { 2123 /* We got a type. */ 2124 if (typeinfo.defined & NTA_HASTYPE) 2125 { 2126 as_bad (_("can't redefine the type of a register alias")); 2127 return 0; 2128 } 2129 2130 typeinfo.defined |= NTA_HASTYPE; 2131 if (ntype.elems != 1) 2132 { 2133 as_bad (_("you must specify a single type only")); 2134 return 0; 2135 } 2136 typeinfo.eltype = ntype.el[0]; 2137 } 2138 2139 if (skip_past_char (&p, '[') == SUCCESS) 2140 { 2141 expressionS exp; 2142 /* We got a scalar index. */ 2143 2144 if (typeinfo.defined & NTA_HASINDEX) 2145 { 2146 as_bad (_("can't redefine the index of a scalar alias")); 2147 return 0; 2148 } 2149 2150 my_get_expression (&exp, &p, GE_NO_PREFIX); 2151 2152 if (exp.X_op != O_constant) 2153 { 2154 as_bad (_("scalar index must be constant")); 2155 return 0; 2156 } 2157 2158 typeinfo.defined |= NTA_HASINDEX; 2159 typeinfo.index = exp.X_add_number; 2160 2161 if (skip_past_char (&p, ']') == FAIL) 2162 { 2163 as_bad (_("expecting ]")); 2164 return 0; 2165 } 2166 } 2167 2168 namelen = nameend - newname; 2169 namebuf = alloca (namelen + 1); 2170 strncpy (namebuf, newname, namelen); 2171 namebuf[namelen] = '\0'; 2172 2173 insert_neon_reg_alias (namebuf, basereg->number, basetype, 2174 typeinfo.defined != 0 ? &typeinfo : NULL); 2175 2176 /* Insert name in all uppercase. */ 2177 for (p = namebuf; *p; p++) 2178 *p = TOUPPER (*p); 2179 2180 if (strncmp (namebuf, newname, namelen)) 2181 insert_neon_reg_alias (namebuf, basereg->number, basetype, 2182 typeinfo.defined != 0 ? &typeinfo : NULL); 2183 2184 /* Insert name in all lowercase. */ 2185 for (p = namebuf; *p; p++) 2186 *p = TOLOWER (*p); 2187 2188 if (strncmp (namebuf, newname, namelen)) 2189 insert_neon_reg_alias (namebuf, basereg->number, basetype, 2190 typeinfo.defined != 0 ? &typeinfo : NULL); 2191 2192 return 1; 2193} 2194 2195/* Should never be called, as .req goes between the alias and the 2196 register name, not at the beginning of the line. */ 2197static void 2198s_req (uintptr_t a ATTRIBUTE_UNUSED) 2199{ 2200 as_bad (_("invalid syntax for .req directive")); 2201} 2202 2203static void 2204s_dn (uintptr_t a ATTRIBUTE_UNUSED) 2205{ 2206 as_bad (_("invalid syntax for .dn directive")); 2207} 2208 2209static void 2210s_qn (uintptr_t a ATTRIBUTE_UNUSED) 2211{ 2212 as_bad (_("invalid syntax for .qn directive")); 2213} 2214 2215/* The .unreq directive deletes an alias which was previously defined 2216 by .req. For example: 2217 2218 my_alias .req r11 2219 .unreq my_alias */ 2220 2221static void 2222s_unreq (uintptr_t a ATTRIBUTE_UNUSED) 2223{ 2224 char * name; 2225 char saved_char; 2226 2227 name = input_line_pointer; 2228 2229 while (*input_line_pointer != 0 2230 && *input_line_pointer != ' ' 2231 && *input_line_pointer != '\n') 2232 ++input_line_pointer; 2233 2234 saved_char = *input_line_pointer; 2235 *input_line_pointer = 0; 2236 2237 if (!*name) 2238 as_bad (_("invalid syntax for .unreq directive")); 2239 else 2240 { 2241 struct reg_entry *reg = hash_find (arm_reg_hsh, name); 2242 2243 if (!reg) 2244 as_bad (_("unknown register alias '%s'"), name); 2245 else if (reg->builtin) 2246 as_warn (_("ignoring attempt to undefine built-in register '%s'"), 2247 name); 2248 else 2249 { 2250 hash_delete (arm_reg_hsh, name); 2251 free ((char *) reg->name); 2252 if (reg->neon) 2253 free (reg->neon); 2254 free (reg); 2255 } 2256 } 2257 2258 *input_line_pointer = saved_char; 2259 demand_empty_rest_of_line (); 2260} 2261 2262/* Directives: Instruction set selection. */ 2263 2264#ifdef OBJ_ELF 2265/* unused OBJ_ELF code removed */ 2266#else 2267#define mapping_state(x) /* nothing */ 2268#endif 2269 2270/* Find the real, Thumb encoded start of a Thumb function. */ 2271 2272static symbolS * 2273find_real_start (symbolS * symbolP) 2274{ 2275 char * real_start; 2276 const char * name = S_GET_NAME (symbolP); 2277 symbolS * new_target; 2278 2279 /* This definition must agree with the one in gcc/config/arm/thumb.c. */ 2280#define STUB_NAME ".real_start_of" 2281 2282 if (name == NULL) 2283 abort (); 2284 2285 /* The compiler may generate BL instructions to local labels because 2286 it needs to perform a branch to a far away location. These labels 2287 do not have a corresponding ".real_start_of" label. We check 2288 both for S_IS_LOCAL and for a leading dot, to give a way to bypass 2289 the ".real_start_of" convention for nonlocal branches. */ 2290 if (S_IS_LOCAL (symbolP) || name[0] == '.') 2291 return symbolP; 2292 2293 real_start = malloc (strlen (name) + strlen (STUB_NAME) + 1); 2294 sprintf (real_start, "%s%s", STUB_NAME, name); 2295 new_target = symbol_find (real_start); 2296 2297#ifdef NOTYET 2298 if (new_target == NULL) 2299 { 2300 as_warn ("Failed to find real start of function: %s\n", name); 2301 new_target = symbolP; 2302 } 2303#endif 2304 2305 free (real_start); 2306 2307 return new_target; 2308} 2309 2310static void 2311opcode_select (int width) 2312{ 2313 switch (width) 2314 { 2315 case 16: 2316 if (! thumb_mode) 2317 { 2318 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t)) 2319 as_bad (_("selected processor does not support THUMB opcodes")); 2320 2321 thumb_mode = 1; 2322#ifdef NOTYET 2323 /* No need to force the alignment, since we will have been 2324 coming from ARM mode, which is word-aligned. */ 2325 record_alignment (now_seg, 1); 2326#endif /* NOTYET */ 2327 } 2328 mapping_state (MAP_THUMB); 2329 break; 2330 2331 case 32: 2332 if (thumb_mode) 2333 { 2334 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)) 2335 as_bad (_("selected processor does not support ARM opcodes")); 2336 2337 thumb_mode = 0; 2338#ifdef NOTYET 2339 if (!need_pass_2) 2340 frag_align (2, 0, 0); 2341 2342 record_alignment (now_seg, 1); 2343#endif /* NOTYET */ 2344 } 2345 mapping_state (MAP_ARM); 2346 break; 2347 2348 default: 2349 as_bad (_("invalid instruction size selected (%d)"), width); 2350 } 2351} 2352 2353static void 2354s_arm (uintptr_t ignore ATTRIBUTE_UNUSED) 2355{ 2356 opcode_select (32); 2357 demand_empty_rest_of_line (); 2358} 2359 2360static void 2361s_thumb (uintptr_t ignore ATTRIBUTE_UNUSED) 2362{ 2363 opcode_select (16); 2364 demand_empty_rest_of_line (); 2365} 2366 2367static void 2368s_code (uintptr_t unused ATTRIBUTE_UNUSED) 2369{ 2370 int temp; 2371 2372 temp = get_absolute_expression (); 2373 switch (temp) 2374 { 2375 case 16: 2376 case 32: 2377 opcode_select (temp); 2378 break; 2379 2380 default: 2381 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp); 2382 } 2383} 2384 2385static void 2386s_force_thumb (uintptr_t ignore ATTRIBUTE_UNUSED) 2387{ 2388 /* If we are not already in thumb mode go into it, EVEN if 2389 the target processor does not support thumb instructions. 2390 This is used by gcc/config/arm/lib1funcs.asm for example 2391 to compile interworking support functions even if the 2392 target processor should not support interworking. */ 2393 if (! thumb_mode) 2394 { 2395 thumb_mode = 2; 2396#ifdef NOTYET 2397 record_alignment (now_seg, 1); 2398#endif /* NOTYET */ 2399 } 2400 2401 demand_empty_rest_of_line (); 2402} 2403 2404/* We will support '.thumb_func' a la binutils, but we will also support 2405 '.thumb_func /symbol_name/', to avoid the inherent pitfalls of 2406 looking for the next valid label. */ 2407static void 2408s_thumb_func (uintptr_t ignore ATTRIBUTE_UNUSED) 2409{ 2410 if (is_end_of_line(*input_line_pointer)) 2411 { 2412 /* No symbol specified - we'll use the next one we find. */ 2413 if (! thumb_mode) 2414 opcode_select (16); 2415 2416 /* The following label is the name/address of the start of a Thumb function. 2417 We need to know this for the interworking support. */ 2418 label_is_thumb_function_name = TRUE; 2419 } 2420 else 2421 { 2422 /* Symbol name specified. */ 2423 char *name; 2424 int c; 2425 symbolS *symbolP; 2426 2427 if (*input_line_pointer == '"') 2428 name = input_line_pointer + 1; 2429 else 2430 name = input_line_pointer; 2431 2432 c = get_symbol_end(); 2433 symbolP = symbol_find_or_make (name); 2434 *input_line_pointer = c; 2435 SKIP_WHITESPACE(); 2436 2437 THUMB_SET_FUNC (symbolP, 1); 2438 symbolP->sy_desc |= N_ARM_THUMB_DEF; 2439 } 2440 2441 demand_empty_rest_of_line (); 2442} 2443 2444/* Perform a .set directive, but also mark the alias as 2445 being a thumb function. */ 2446 2447static void 2448s_thumb_set (uintptr_t equiv) 2449{ 2450 /* XXX the following is a duplicate of the code for s_set() in read.c 2451 We cannot just call that code as we need to get at the symbol that 2452 is created. */ 2453 char * name; 2454 char delim; 2455 char * end_name; 2456 symbolS * symbolP; 2457 2458 /* Especial apologies for the random logic: 2459 This just grew, and could be parsed much more simply! 2460 Dean - in haste. */ 2461 name = input_line_pointer; 2462 delim = get_symbol_end (); 2463 end_name = input_line_pointer; 2464 *end_name = delim; 2465 2466 if (*input_line_pointer != ',') 2467 { 2468 *end_name = 0; 2469 as_bad (_("expected comma after name \"%s\""), name); 2470 *end_name = delim; 2471 ignore_rest_of_line (); 2472 return; 2473 } 2474 2475 input_line_pointer++; 2476 *end_name = 0; 2477 2478 if (name[0] == '.' && name[1] == '\0') 2479 { 2480 /* XXX - this should not happen to .thumb_set. */ 2481 abort (); 2482 } 2483 2484 if ((symbolP = symbol_find (name)) == NULL 2485 && (symbolP = md_undefined_symbol (name)) == NULL) 2486 { 2487#define NO_LISTING /* HACK */ 2488#ifndef NO_LISTING 2489 /* When doing symbol listings, play games with dummy fragments living 2490 outside the normal fragment chain to record the file and line info 2491 for this symbol. */ 2492 if (listing & LISTING_SYMBOLS) 2493 { 2494 extern struct list_info_struct * listing_tail; 2495 fragS * dummy_frag = xmalloc (sizeof (fragS)); 2496 2497 memset (dummy_frag, 0, sizeof (fragS)); 2498 dummy_frag->fr_type = rs_fill; 2499 dummy_frag->line = listing_tail; 2500 symbolP = symbol_new (name, undefined_section, 0, dummy_frag); 2501 dummy_frag->fr_symbol = symbolP; 2502 } 2503 else 2504#endif 2505 symbolP = symbol_new (name, undefined_section, 0, 0, 0, &zero_address_frag); 2506#ifdef OBJ_COFF 2507 /* "set" symbols are local unless otherwise specified. */ 2508 SF_SET_LOCAL (symbolP); 2509#endif /* OBJ_COFF */ 2510 } /* Make a new symbol. */ 2511 2512 symbol_table_insert (symbolP); 2513 2514 * end_name = delim; 2515 2516#ifdef NOTYET 2517 if (equiv 2518 && S_IS_DEFINED (symbolP) 2519 && S_GET_SEGMENT (symbolP) != reg_section) 2520 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP)); 2521#endif /* NOTYET */ 2522 2523 pseudo_set (symbolP); 2524 2525 demand_empty_rest_of_line (); 2526 2527 /* XXX Now we come to the Thumb specific bit of code. */ 2528 2529 THUMB_SET_FUNC (symbolP, 1); 2530 ARM_SET_THUMB (symbolP, 1); 2531 symbolP->sy_desc |= N_ARM_THUMB_DEF; 2532#if defined OBJ_ELF || defined OBJ_COFF 2533 ARM_SET_INTERWORK (symbolP, support_interwork); 2534#endif 2535} 2536 2537/* Directives: Mode selection. */ 2538 2539/* .syntax [unified|divided] - choose the new unified syntax 2540 (same for Arm and Thumb encoding, modulo slight differences in what 2541 can be represented) or the old divergent syntax for each mode. */ 2542static void 2543s_syntax (uintptr_t unused ATTRIBUTE_UNUSED) 2544{ 2545 char *name, delim; 2546 2547 name = input_line_pointer; 2548 delim = get_symbol_end (); 2549 2550 if (!strcasecmp (name, "unified")) 2551 unified_syntax = TRUE; 2552 else if (!strcasecmp (name, "divided")) 2553 unified_syntax = FALSE; 2554 else 2555 { 2556 as_bad (_("unrecognized syntax mode \"%s\""), name); 2557 return; 2558 } 2559 *input_line_pointer = delim; 2560 demand_empty_rest_of_line (); 2561} 2562 2563/* Directives: sectioning and alignment. */ 2564 2565#ifdef NOTYET 2566/* Same as s_align_ptwo but align 0 => align 2. */ 2567 2568static void 2569s_align (uintptr_t unused ATTRIBUTE_UNUSED) 2570{ 2571 int temp; 2572 bfd_boolean fill_p; 2573 int32_t temp_fill; 2574 int32_t max_alignment = 15; 2575 2576 temp = get_absolute_expression (); 2577 if (temp > max_alignment) 2578 as_bad (_("alignment too large: %d assumed"), temp = max_alignment); 2579 else if (temp < 0) 2580 { 2581 as_bad (_("alignment negative. 0 assumed.")); 2582 temp = 0; 2583 } 2584 2585 if (*input_line_pointer == ',') 2586 { 2587 input_line_pointer++; 2588 temp_fill = get_absolute_expression (); 2589 fill_p = TRUE; 2590 } 2591 else 2592 { 2593 fill_p = FALSE; 2594 temp_fill = 0; 2595 } 2596 2597 if (!temp) 2598 temp = 2; 2599 2600 /* Only make a frag if we HAVE to. */ 2601 if (temp 2602#ifdef NOTYET 2603 && !need_pass_2 2604#endif /* NOTYET */ 2605 ) 2606 { 2607#ifdef NOTYET 2608 if (!fill_p && subseg_text_p (now_seg)) 2609 frag_align_code (temp, 0); 2610 else 2611#endif /* NOTYET */ 2612 { 2613 char fill[4]; 2614 md_number_to_chars(fill, temp_fill, 4); 2615 frag_align (temp, fill, 0, 0); 2616 } 2617 } 2618 demand_empty_rest_of_line (); 2619 2620#ifdef NOTYET 2621 record_alignment (now_seg, temp); 2622#endif /* NOTYET */ 2623} 2624#endif /* NOTYET */ 2625 2626static void 2627s_bss (uintptr_t ignore ATTRIBUTE_UNUSED) 2628{ 2629#ifdef NOTYET 2630 /* We don't support putting frags in the BSS segment, we fake it by 2631 marking in_bss, then looking at s_skip for clues. */ 2632 subseg_set (bss_section, 0); 2633 demand_empty_rest_of_line (); 2634 mapping_state (MAP_DATA); 2635#else 2636 as_fatal(".bss directive not supported, use .zerofill for Mach-O files"); 2637#endif /* NOTYET */ 2638} 2639 2640static void 2641s_even (uintptr_t ignore ATTRIBUTE_UNUSED) 2642{ 2643 /* Never make frag if expect extra pass. */ 2644#ifdef NOTYET 2645 if (!need_pass_2) 2646#endif /* NOTYET */ 2647 frag_align (1, 0, 0, 0); 2648 2649#ifdef NOTYET 2650 record_alignment (now_seg, 1); 2651#endif /* NOTYET */ 2652 2653 demand_empty_rest_of_line (); 2654} 2655 2656/* Directives: Literal pools. */ 2657 2658static literal_pool * 2659find_literal_pool (void) 2660{ 2661 literal_pool * pool; 2662 2663 for (pool = list_of_pools; pool != NULL; pool = pool->next) 2664 { 2665 if (pool->section == now_seg 2666 && pool->sub_section == now_subseg) 2667 break; 2668 } 2669 2670 return pool; 2671} 2672 2673static literal_pool * 2674find_or_make_literal_pool (void) 2675{ 2676 /* Next literal pool ID number. */ 2677 static unsigned int latest_pool_num = 1; 2678 literal_pool * pool; 2679 2680 pool = find_literal_pool (); 2681 2682 if (pool == NULL) 2683 { 2684 /* Create a new pool. */ 2685 pool = xmalloc (sizeof (* pool)); 2686 if (! pool) 2687 return NULL; 2688 2689 pool->next_free_entry = 0; 2690 pool->section = now_seg; 2691 pool->sub_section = now_subseg; 2692 pool->next = list_of_pools; 2693 pool->symbol = NULL; 2694 2695 /* Add it to the list. */ 2696 list_of_pools = pool; 2697 } 2698 2699 /* New pools, and emptied pools, will have a NULL symbol. */ 2700 if (pool->symbol == NULL) 2701 { 2702 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section, 2703 (valueT) 0, &zero_address_frag); 2704 pool->id = latest_pool_num ++; 2705 } 2706 2707 /* Done. */ 2708 return pool; 2709} 2710 2711/* Add the literal in the global 'inst' 2712 structure to the relevent literal pool. */ 2713 2714static int 2715add_to_lit_pool (void) 2716{ 2717 literal_pool * pool; 2718 unsigned int entry; 2719 2720 pool = find_or_make_literal_pool (); 2721 2722 /* Check if this literal value is already in the pool. */ 2723 for (entry = 0; entry < pool->next_free_entry; entry ++) 2724 { 2725 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op) 2726 && (inst.reloc.exp.X_op == O_constant) 2727 && (pool->literals[entry].X_add_number 2728 == inst.reloc.exp.X_add_number) 2729#ifdef NOTYET 2730 && (pool->literals[entry].X_unsigned 2731 == inst.reloc.exp.X_unsigned)) 2732#else 2733 ) 2734#endif 2735 break; 2736 2737 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op) 2738 && (inst.reloc.exp.X_op == O_symbol) 2739 && (pool->literals[entry].X_add_number 2740 == inst.reloc.exp.X_add_number) 2741 && (pool->literals[entry].X_add_symbol 2742 == inst.reloc.exp.X_add_symbol) 2743 && (pool->literals[entry].X_op_symbol 2744 == inst.reloc.exp.X_op_symbol)) 2745 break; 2746 } 2747 2748 /* Do we need to create a new entry? */ 2749 if (entry == pool->next_free_entry) 2750 { 2751 if (entry >= MAX_LITERAL_POOL_SIZE) 2752 { 2753 inst.error = _("literal pool overflow"); 2754 return FAIL; 2755 } 2756 2757 pool->literals[entry] = inst.reloc.exp; 2758 pool->next_free_entry += 1; 2759 } 2760 2761 inst.reloc.exp.X_op = O_symbol; 2762 inst.reloc.exp.X_add_number = ((int) entry) * 4 - 8; 2763 inst.reloc.exp.X_add_symbol = pool->symbol; 2764 2765 return SUCCESS; 2766} 2767 2768#ifdef NOTYET 2769/* Can't use symbol_new here, so have to create a symbol and then at 2770 a later date assign it a value. Thats what these functions do. */ 2771 2772static void 2773symbol_locate (symbolS * symbolP, 2774 const char * name, /* It is copied, the caller can modify. */ 2775 segT segment, /* Segment identifier (SEG_<something>). */ 2776 valueT valu, /* Symbol value. */ 2777 fragS * frag) /* Associated fragment. */ 2778{ 2779 unsigned int name_length; 2780 char * preserved_copy_of_name; 2781 2782 name_length = strlen (name) + 1; /* +1 for \0. */ 2783 obstack_grow (¬es, name, name_length); 2784 preserved_copy_of_name = obstack_finish (¬es); 2785 2786#ifdef tc_canonicalize_symbol_name 2787 preserved_copy_of_name = 2788 tc_canonicalize_symbol_name (preserved_copy_of_name); 2789#endif 2790 2791 S_SET_NAME (symbolP, preserved_copy_of_name); 2792 2793 S_SET_SEGMENT (symbolP, segment); 2794 S_SET_VALUE (symbolP, valu); 2795 symbol_clear_list_pointers (symbolP); 2796 2797 symbol_set_frag (symbolP, frag); 2798 2799 /* Link to end of symbol chain. */ 2800 { 2801 extern int symbol_table_frozen; 2802 2803 if (symbol_table_frozen) 2804 abort (); 2805 } 2806 2807 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP); 2808 2809 obj_symbol_new_hook (symbolP); 2810 2811#ifdef tc_symbol_new_hook 2812 tc_symbol_new_hook (symbolP); 2813#endif 2814 2815#ifdef DEBUG_SYMS 2816 verify_symbol_chain (symbol_rootP, symbol_lastP); 2817#endif /* DEBUG_SYMS */ 2818} 2819 2820static void 2821s_ltorg (uintptr_t ignored ATTRIBUTE_UNUSED) 2822{ 2823 unsigned int entry; 2824 literal_pool * pool; 2825 char sym_name[20]; 2826 2827 pool = find_literal_pool (); 2828 if (pool == NULL 2829 || pool->symbol == NULL 2830 || pool->next_free_entry == 0) 2831 return; 2832 2833 mapping_state (MAP_DATA); 2834 2835 /* Align pool as you have word accesses. 2836 Only make a frag if we have to. */ 2837#ifdef NOTYET 2838 if (!need_pass_2) 2839#endif /* NOTYET */ 2840 frag_align (2, 0, 0, 0); 2841 2842#ifdef NOTYET 2843 record_alignment (now_seg, 2); 2844#endif /* NOTYET */ 2845 2846 sprintf (sym_name, "$$lit_\002%x", pool->id); 2847 2848 symbol_locate (pool->symbol, sym_name, now_seg, 2849 (valueT) frag_now_fix (), frag_now); 2850 symbol_table_insert (pool->symbol); 2851 2852 ARM_SET_THUMB (pool->symbol, thumb_mode); 2853 2854#if defined OBJ_COFF || defined OBJ_ELF 2855 ARM_SET_INTERWORK (pool->symbol, support_interwork); 2856#endif 2857 2858 for (entry = 0; entry < pool->next_free_entry; entry ++) 2859 /* First output the expression in the instruction to the pool. */ 2860 emit_expr (&(pool->literals[entry]), 4); /* .word */ 2861 2862 /* Mark the pool as empty. */ 2863 pool->next_free_entry = 0; 2864 pool->symbol = NULL; 2865} 2866#endif /* NOTYET */ 2867 2868/* This table describes all the machine specific pseudo-ops the assembler 2869 has to support. The fields are: 2870 pseudo-op name without dot 2871 function to call to execute this pseudo-op 2872 Integer arg to pass to the function. */ 2873 2874const pseudo_typeS md_pseudo_table[] = 2875{ 2876 /* Never called because '.req' does not start a line. */ 2877 { "req", s_req, 0 }, 2878 /* Following two are likewise never called. */ 2879 { "dn", s_dn, 0 }, 2880 { "qn", s_qn, 0 }, 2881 { "unreq", s_unreq, 0 }, 2882 { "bss", s_bss, 0 }, 2883#ifdef NOTYET 2884 { "align", s_align, 0 }, 2885#endif 2886 { "arm", s_arm, 0 }, 2887 { "thumb", s_thumb, 0 }, 2888 { "code", s_code, 0 }, 2889 { "force_thumb", s_force_thumb, 0 }, 2890 { "thumb_func", s_thumb_func, 0 }, 2891 { "thumb_set", s_thumb_set, 0 }, 2892 { "even", s_even, 0 }, 2893#ifdef NOTYET 2894 { "ltorg", s_ltorg, 0 }, 2895 { "pool", s_ltorg, 0 }, 2896#endif /* NOTYET */ 2897 { "syntax", s_syntax, 0 }, 2898#ifdef NOTYET 2899 { "cpu", s_arm_cpu, 0 }, 2900 { "arch", s_arm_arch, 0 }, 2901 { "object_arch", s_arm_object_arch, 0 }, 2902 { "fpu", s_arm_fpu, 0 }, 2903#endif 2904#ifdef OBJ_ELF 2905/* unused OBJ_ELF directives removed */ 2906#else 2907 { "word", cons, 4}, 2908 2909 /* These are used for dwarf. */ 2910 {"2byte", cons, 2}, 2911 {"4byte", cons, 4}, 2912 {"8byte", cons, 8}, 2913 /* These are used for dwarf2. */ 2914 { "file", (void (*) (uintptr_t)) dwarf2_directive_file, 0 }, 2915 { "loc", dwarf2_directive_loc, 0 }, 2916 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 }, 2917#endif 2918 { "extend", float_cons, 'x' }, 2919 { "ldouble", float_cons, 'x' }, 2920 { "packed", float_cons, 'p' }, 2921 { 0, 0, 0 } 2922}; 2923 2924/* Parser functions used exclusively in instruction operands. */ 2925 2926/* Generic immediate-value read function for use in insn parsing. 2927 STR points to the beginning of the immediate (the leading #); 2928 VAL receives the value; if the value is outside [MIN, MAX] 2929 issue an error. PREFIX_OPT is true if the immediate prefix is 2930 optional. */ 2931 2932static int 2933parse_immediate (char **str, int *val, int min, int max, 2934 bfd_boolean prefix_opt) 2935{ 2936 expressionS exp; 2937 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX); 2938 if (exp.X_op != O_constant) 2939 { 2940 inst.error = _("constant expression required"); 2941 return FAIL; 2942 } 2943 2944 if (exp.X_add_number < min || exp.X_add_number > max) 2945 { 2946 inst.error = _("immediate value out of range"); 2947 return FAIL; 2948 } 2949 2950 *val = exp.X_add_number; 2951 return SUCCESS; 2952} 2953 2954/* Less-generic immediate-value read function with the possibility of loading a 2955 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate 2956 instructions. Puts the result directly in inst.operands[i]. */ 2957 2958static int 2959parse_big_immediate (char **str, int i) 2960{ 2961 expressionS exp; 2962 char *ptr = *str; 2963 2964 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG); 2965 2966 if (exp.X_op == O_constant) 2967 { 2968 inst.operands[i].imm = exp.X_add_number & 0xffffffff; 2969 /* If we're on a 64-bit host, then a 64-bit number can be returned using 2970 O_constant. We have to be careful not to break compilation for 2971 32-bit X_add_number, though. */ 2972 if ((exp.X_add_number & ~0xffffffffl) != 0) 2973 { 2974 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */ 2975 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff; 2976 inst.operands[i].regisimm = 1; 2977 } 2978 } 2979 else if (exp.X_op == O_big 2980 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32 2981 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64) 2982 { 2983 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0; 2984 /* Bignums have their least significant bits in 2985 generic_bignum[0]. Make sure we put 32 bits in imm and 2986 32 bits in reg, in a (hopefully) portable way. */ 2987 assert (parts != 0); 2988 inst.operands[i].imm = 0; 2989 for (j = 0; j < parts; j++, idx++) 2990 inst.operands[i].imm |= generic_bignum[idx] 2991 << (LITTLENUM_NUMBER_OF_BITS * j); 2992 inst.operands[i].reg = 0; 2993 for (j = 0; j < parts; j++, idx++) 2994 inst.operands[i].reg |= generic_bignum[idx] 2995 << (LITTLENUM_NUMBER_OF_BITS * j); 2996 inst.operands[i].regisimm = 1; 2997 } 2998 else 2999 return FAIL; 3000 3001 *str = ptr; 3002 3003 return SUCCESS; 3004} 3005 3006/* Returns the pseudo-register number of an FPA immediate constant, 3007 or FAIL if there isn't a valid constant here. */ 3008 3009static int 3010parse_fpa_immediate (char ** str) 3011{ 3012 LITTLENUM_TYPE words[MAX_LITTLENUMS]; 3013 char * save_in; 3014 expressionS exp; 3015 int i; 3016 int j; 3017 3018 /* First try and match exact strings, this is to guarantee 3019 that some formats will work even for cross assembly. */ 3020 3021 for (i = 0; fp_const[i]; i++) 3022 { 3023 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0) 3024 { 3025 char *start = *str; 3026 3027 *str += strlen (fp_const[i]); 3028 if (is_end_of_line((unsigned char) **str)) 3029 return i + 8; 3030 *str = start; 3031 } 3032 } 3033 3034 /* Just because we didn't get a match doesn't mean that the constant 3035 isn't valid, just that it is in a format that we don't 3036 automatically recognize. Try parsing it with the standard 3037 expression routines. */ 3038 3039 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE)); 3040 3041 /* Look for a raw floating point number. */ 3042 if ((save_in = atof_ieee (*str, 'x', words)) != NULL 3043 && is_end_of_line((unsigned char) *save_in)) 3044 { 3045 for (i = 0; i < NUM_FLOAT_VALS; i++) 3046 { 3047 for (j = 0; j < MAX_LITTLENUMS; j++) 3048 { 3049 if (words[j] != fp_values[i][j]) 3050 break; 3051 } 3052 3053 if (j == MAX_LITTLENUMS) 3054 { 3055 *str = save_in; 3056 return i + 8; 3057 } 3058 } 3059 } 3060 3061 /* Try and parse a more complex expression, this will probably fail 3062 unless the code uses a floating point prefix (eg "0f"). */ 3063 save_in = input_line_pointer; 3064 input_line_pointer = *str; 3065 if (expression (&exp) == absolute_section 3066 && exp.X_op == O_big 3067 && exp.X_add_number < 0) 3068 { 3069 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it. 3070 Ditto for 15. */ 3071 if (gen_to_words (words, 5, (int32_t) 15) == 0) 3072 { 3073 for (i = 0; i < NUM_FLOAT_VALS; i++) 3074 { 3075 for (j = 0; j < MAX_LITTLENUMS; j++) 3076 { 3077 if (words[j] != fp_values[i][j]) 3078 break; 3079 } 3080 3081 if (j == MAX_LITTLENUMS) 3082 { 3083 *str = input_line_pointer; 3084 input_line_pointer = save_in; 3085 return i + 8; 3086 } 3087 } 3088 } 3089 } 3090 3091 *str = input_line_pointer; 3092 input_line_pointer = save_in; 3093 inst.error = _("invalid FPA immediate expression"); 3094 return FAIL; 3095} 3096 3097/* Returns 1 if a number has "quarter-precision" float format 3098 0baBbbbbbc defgh000 00000000 00000000. */ 3099 3100static int 3101is_quarter_float (unsigned imm) 3102{ 3103 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000; 3104 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0; 3105} 3106 3107/* Parse an 8-bit "quarter-precision" floating point number of the form: 3108 0baBbbbbbc defgh000 00000000 00000000. 3109 The zero and minus-zero cases need special handling, since they can't be 3110 encoded in the "quarter-precision" float format, but can nonetheless be 3111 loaded as integer constants. */ 3112 3113static unsigned 3114parse_qfloat_immediate (char **ccp, int *immed) 3115{ 3116 char *str = *ccp; 3117 char *fpnum; 3118 LITTLENUM_TYPE words[MAX_LITTLENUMS]; 3119 int found_fpchar = 0; 3120 3121 skip_past_char (&str, '#'); 3122 3123 /* We must not accidentally parse an integer as a floating-point number. Make 3124 sure that the value we parse is not an integer by checking for special 3125 characters '.' or 'e'. 3126 FIXME: This is a horrible hack, but doing better is tricky because type 3127 information isn't in a very usable state at parse time. */ 3128 fpnum = str; 3129 skip_whitespace (fpnum); 3130 3131 if (strncmp (fpnum, "0x", 2) == 0) 3132 return FAIL; 3133 else 3134 { 3135 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++) 3136 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E') 3137 { 3138 found_fpchar = 1; 3139 break; 3140 } 3141 3142 if (!found_fpchar) 3143 return FAIL; 3144 } 3145 3146 if ((str = atof_ieee (str, 's', words)) != NULL) 3147 { 3148 unsigned fpword = 0; 3149 int i; 3150 3151 /* Our FP word must be 32 bits (single-precision FP). */ 3152 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++) 3153 { 3154 fpword <<= LITTLENUM_NUMBER_OF_BITS; 3155 fpword |= words[i]; 3156 } 3157 3158 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0) 3159 *immed = fpword; 3160 else 3161 return FAIL; 3162 3163 *ccp = str; 3164 3165 return SUCCESS; 3166 } 3167 3168 return FAIL; 3169} 3170 3171/* Shift operands. */ 3172enum shift_kind 3173{ 3174 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX 3175}; 3176 3177struct asm_shift_name 3178{ 3179 const char *name; 3180 enum shift_kind kind; 3181}; 3182 3183/* Third argument to parse_shift. */ 3184enum parse_shift_mode 3185{ 3186 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */ 3187 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */ 3188 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */ 3189 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */ 3190 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */ 3191}; 3192 3193/* Parse a <shift> specifier on an ARM data processing instruction. 3194 This has three forms: 3195 3196 (LSL|LSR|ASL|ASR|ROR) Rs 3197 (LSL|LSR|ASL|ASR|ROR) #imm 3198 RRX 3199 3200 Note that ASL is assimilated to LSL in the instruction encoding, and 3201 RRX to ROR #0 (which cannot be written as such). */ 3202 3203static int 3204parse_shift (char **str, int i, enum parse_shift_mode mode) 3205{ 3206 const struct asm_shift_name *shift_name; 3207 enum shift_kind shift; 3208 char *s = *str; 3209 char *p = s; 3210 int reg; 3211 3212 for (p = *str; ISALPHA (*p); p++) 3213 ; 3214 3215 if (p == *str) 3216 { 3217 inst.error = _("shift expression expected"); 3218 return FAIL; 3219 } 3220 3221 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str); 3222 3223 if (shift_name == NULL) 3224 { 3225 inst.error = _("shift expression expected"); 3226 return FAIL; 3227 } 3228 3229 shift = shift_name->kind; 3230 3231 switch (mode) 3232 { 3233 case NO_SHIFT_RESTRICT: 3234 case SHIFT_IMMEDIATE: break; 3235 3236 case SHIFT_LSL_OR_ASR_IMMEDIATE: 3237 if (shift != SHIFT_LSL && shift != SHIFT_ASR) 3238 { 3239 inst.error = _("'LSL' or 'ASR' required"); 3240 return FAIL; 3241 } 3242 break; 3243 3244 case SHIFT_LSL_IMMEDIATE: 3245 if (shift != SHIFT_LSL) 3246 { 3247 inst.error = _("'LSL' required"); 3248 return FAIL; 3249 } 3250 break; 3251 3252 case SHIFT_ASR_IMMEDIATE: 3253 if (shift != SHIFT_ASR) 3254 { 3255 inst.error = _("'ASR' required"); 3256 return FAIL; 3257 } 3258 break; 3259 3260 default: abort (); 3261 } 3262 3263 if (shift != SHIFT_RRX) 3264 { 3265 /* Whitespace can appear here if the next thing is a bare digit. */ 3266 skip_whitespace (p); 3267 3268 if (mode == NO_SHIFT_RESTRICT 3269 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL) 3270 { 3271 inst.operands[i].imm = reg; 3272 inst.operands[i].immisreg = 1; 3273 } 3274 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX)) 3275 return FAIL; 3276 } 3277 inst.operands[i].shift_kind = shift; 3278 inst.operands[i].shifted = 1; 3279 *str = p; 3280 return SUCCESS; 3281} 3282 3283/* Parse a <shifter_operand> for an ARM data processing instruction: 3284 3285 #<immediate> 3286 #<immediate>, <rotate> 3287 <Rm> 3288 <Rm>, <shift> 3289 3290 where <shift> is defined by parse_shift above, and <rotate> is a 3291 multiple of 2 between 0 and 30. Validation of immediate operands 3292 is deferred to md_apply_fix. */ 3293 3294static int 3295parse_shifter_operand (char **str, int i) 3296{ 3297 int value; 3298 expressionS expr; 3299 3300 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL) 3301 { 3302 inst.operands[i].reg = value; 3303 inst.operands[i].isreg = 1; 3304 3305 /* parse_shift will override this if appropriate */ 3306 inst.reloc.exp.X_op = O_constant; 3307 inst.reloc.exp.X_add_number = 0; 3308 3309 if (skip_past_comma (str) == FAIL) 3310 return SUCCESS; 3311 3312 /* Shift operation on register. */ 3313 return parse_shift (str, i, NO_SHIFT_RESTRICT); 3314 } 3315 3316 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX)) 3317 return FAIL; 3318 3319 if (skip_past_comma (str) == SUCCESS) 3320 { 3321 /* #x, y -- ie explicit rotation by Y. */ 3322 if (my_get_expression (&expr, str, GE_NO_PREFIX)) 3323 return FAIL; 3324 3325 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant) 3326 { 3327 inst.error = _("constant expression expected"); 3328 return FAIL; 3329 } 3330 3331 value = expr.X_add_number; 3332 if (value < 0 || value > 30 || value % 2 != 0) 3333 { 3334 inst.error = _("invalid rotation"); 3335 return FAIL; 3336 } 3337 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255) 3338 { 3339 inst.error = _("invalid constant"); 3340 return FAIL; 3341 } 3342 3343 /* Convert to decoded value. md_apply_fix will put it back. */ 3344 inst.reloc.exp.X_add_number 3345 = (((inst.reloc.exp.X_add_number << (32 - value)) 3346 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff); 3347 } 3348 3349 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE; 3350 inst.reloc.pc_rel = 0; 3351 return SUCCESS; 3352} 3353 3354/* Group relocation information. Each entry in the table contains the 3355 textual name of the relocation as may appear in assembler source 3356 and must end with a colon. 3357 Along with this textual name are the relocation codes to be used if 3358 the corresponding instruction is an ALU instruction (ADD or SUB only), 3359 an LDR, an LDRS, or an LDC. */ 3360 3361struct group_reloc_table_entry 3362{ 3363 const char *name; 3364 int alu_code; 3365 int ldr_code; 3366 int ldrs_code; 3367 int ldc_code; 3368}; 3369 3370typedef enum 3371{ 3372 /* Varieties of non-ALU group relocation. */ 3373 3374 GROUP_LDR, 3375 GROUP_LDRS, 3376 GROUP_LDC 3377} group_reloc_type; 3378 3379static struct group_reloc_table_entry group_reloc_table[] = 3380 { /* Program counter relative: */ 3381 { "pc_g0_nc", 3382 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */ 3383 0, /* LDR */ 3384 0, /* LDRS */ 3385 0 }, /* LDC */ 3386 { "pc_g0", 3387 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */ 3388 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */ 3389 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */ 3390 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */ 3391 { "pc_g1_nc", 3392 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */ 3393 0, /* LDR */ 3394 0, /* LDRS */ 3395 0 }, /* LDC */ 3396 { "pc_g1", 3397 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */ 3398 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */ 3399 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */ 3400 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */ 3401 { "pc_g2", 3402 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */ 3403 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */ 3404 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */ 3405 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */ 3406 /* Section base relative */ 3407 { "sb_g0_nc", 3408 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */ 3409 0, /* LDR */ 3410 0, /* LDRS */ 3411 0 }, /* LDC */ 3412 { "sb_g0", 3413 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */ 3414 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */ 3415 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */ 3416 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */ 3417 { "sb_g1_nc", 3418 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */ 3419 0, /* LDR */ 3420 0, /* LDRS */ 3421 0 }, /* LDC */ 3422 { "sb_g1", 3423 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */ 3424 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */ 3425 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */ 3426 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */ 3427 { "sb_g2", 3428 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */ 3429 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */ 3430 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */ 3431 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */ 3432 3433/* Given the address of a pointer pointing to the textual name of a group 3434 relocation as may appear in assembler source, attempt to find its details 3435 in group_reloc_table. The pointer will be updated to the character after 3436 the trailing colon. On failure, FAIL will be returned; SUCCESS 3437 otherwise. On success, *entry will be updated to point at the relevant 3438 group_reloc_table entry. */ 3439 3440static int 3441find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out) 3442{ 3443 unsigned int i; 3444 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++) 3445 { 3446 int length = strlen (group_reloc_table[i].name); 3447 3448 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0 && 3449 (*str)[length] == ':') 3450 { 3451 *out = &group_reloc_table[i]; 3452 *str += (length + 1); 3453 return SUCCESS; 3454 } 3455 } 3456 3457 return FAIL; 3458} 3459 3460/* Results from operand parsing worker functions. */ 3461 3462typedef enum 3463{ 3464 PARSE_OPERAND_SUCCESS, 3465 PARSE_OPERAND_FAIL, 3466 PARSE_OPERAND_FAIL_NO_BACKTRACK 3467} parse_operand_result; 3468 3469/* Parse a <shifter_operand> for an ARM data processing instruction 3470 (as for parse_shifter_operand) where group relocations are allowed: 3471 3472 #<immediate> 3473 #<immediate>, <rotate> 3474 #:<group_reloc>:<expression> 3475 <Rm> 3476 <Rm>, <shift> 3477 3478 where <group_reloc> is one of the strings defined in group_reloc_table. 3479 The hashes are optional. 3480 3481 Everything else is as for parse_shifter_operand. */ 3482 3483static parse_operand_result 3484parse_shifter_operand_group_reloc (char **str, int i) 3485{ 3486 /* Determine if we have the sequence of characters #: or just : 3487 coming next. If we do, then we check for a group relocation. 3488 If we don't, punt the whole lot to parse_shifter_operand. */ 3489 3490 if (((*str)[0] == '#' && (*str)[1] == ':') 3491 || (*str)[0] == ':') 3492 { 3493 struct group_reloc_table_entry *entry; 3494 3495 if ((*str)[0] == '#') 3496 (*str) += 2; 3497 else 3498 (*str)++; 3499 3500 /* Try to parse a group relocation. Anything else is an error. */ 3501 if (find_group_reloc_table_entry (str, &entry) == FAIL) 3502 { 3503 inst.error = _("unknown group relocation"); 3504 return PARSE_OPERAND_FAIL_NO_BACKTRACK; 3505 } 3506 3507 /* We now have the group relocation table entry corresponding to 3508 the name in the assembler source. Next, we parse the expression. */ 3509 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX)) 3510 return PARSE_OPERAND_FAIL_NO_BACKTRACK; 3511 3512 /* Record the relocation type (always the ALU variant here). */ 3513 inst.reloc.type = entry->alu_code; 3514 assert (inst.reloc.type != 0); 3515 3516 return PARSE_OPERAND_SUCCESS; 3517 } 3518 else 3519 return parse_shifter_operand (str, i) == SUCCESS 3520 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL; 3521 3522 /* Never reached. */ 3523} 3524 3525/* Parse all forms of an ARM address expression. Information is written 3526 to inst.operands[i] and/or inst.reloc. 3527 3528 Preindexed addressing (.preind=1): 3529 3530 [Rn, #offset] .reg=Rn .reloc.exp=offset 3531 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 3532 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 3533 .shift_kind=shift .reloc.exp=shift_imm 3534 3535 These three may have a trailing ! which causes .writeback to be set also. 3536 3537 Postindexed addressing (.postind=1, .writeback=1): 3538 3539 [Rn], #offset .reg=Rn .reloc.exp=offset 3540 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 3541 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1 3542 .shift_kind=shift .reloc.exp=shift_imm 3543 3544 Unindexed addressing (.preind=0, .postind=0): 3545 3546 [Rn], {option} .reg=Rn .imm=option .immisreg=0 3547 3548 Other: 3549 3550 [Rn]{!} shorthand for [Rn,#0]{!} 3551 =immediate .isreg=0 .reloc.exp=immediate 3552 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label 3553 3554 It is the caller's responsibility to check for addressing modes not 3555 supported by the instruction, and to set inst.reloc.type. */ 3556 3557static parse_operand_result 3558parse_address_main (char **str, int i, int group_relocations, 3559 group_reloc_type group_type) 3560{ 3561 char *p = *str; 3562 int reg; 3563 3564 if (skip_past_char (&p, '[') == FAIL) 3565 { 3566 if (skip_past_char (&p, '=') == FAIL) 3567 { 3568 /* bare address - translate to PC-relative offset */ 3569 inst.reloc.pc_rel = 1; 3570 inst.operands[i].reg = REG_PC; 3571 inst.operands[i].isreg = 1; 3572 inst.operands[i].preind = 1; 3573 } 3574 /* else a load-constant pseudo op, no special treatment needed here */ 3575 3576 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX)) 3577 return PARSE_OPERAND_FAIL; 3578 3579 *str = p; 3580 return PARSE_OPERAND_SUCCESS; 3581 } 3582 3583 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL) 3584 { 3585 inst.error = _(reg_expected_msgs[REG_TYPE_RN]); 3586 return PARSE_OPERAND_FAIL; 3587 } 3588 inst.operands[i].reg = reg; 3589 inst.operands[i].isreg = 1; 3590 3591 if (skip_past_comma (&p) == SUCCESS) 3592 { 3593 inst.operands[i].preind = 1; 3594 3595 if (*p == '+') p++; 3596 else if (*p == '-') p++, inst.operands[i].negative = 1; 3597 3598 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL) 3599 { 3600 inst.operands[i].imm = reg; 3601 inst.operands[i].immisreg = 1; 3602 3603 if (skip_past_comma (&p) == SUCCESS) 3604 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL) 3605 return PARSE_OPERAND_FAIL; 3606 } 3607 else if (skip_past_char (&p, ':') == SUCCESS) 3608 { 3609 /* FIXME: '@' should be used here, but it's filtered out by generic 3610 code before we get to see it here. This may be subject to 3611 change. */ 3612 expressionS exp; 3613 my_get_expression (&exp, &p, GE_NO_PREFIX); 3614 if (exp.X_op != O_constant) 3615 { 3616 inst.error = _("alignment must be constant"); 3617 return PARSE_OPERAND_FAIL; 3618 } 3619 inst.operands[i].imm = exp.X_add_number << 8; 3620 inst.operands[i].immisalign = 1; 3621 /* Alignments are not pre-indexes. */ 3622 inst.operands[i].preind = 0; 3623 } 3624 else 3625 { 3626 if (inst.operands[i].negative) 3627 { 3628 inst.operands[i].negative = 0; 3629 p--; 3630 } 3631 3632 if (group_relocations && 3633 ((*p == '#' && *(p + 1) == ':') || *p == ':')) 3634 3635 { 3636 struct group_reloc_table_entry *entry; 3637 3638 /* Skip over the #: or : sequence. */ 3639 if (*p == '#') 3640 p += 2; 3641 else 3642 p++; 3643 3644 /* Try to parse a group relocation. Anything else is an 3645 error. */ 3646 if (find_group_reloc_table_entry (&p, &entry) == FAIL) 3647 { 3648 inst.error = _("unknown group relocation"); 3649 return PARSE_OPERAND_FAIL_NO_BACKTRACK; 3650 } 3651 3652 /* We now have the group relocation table entry corresponding to 3653 the name in the assembler source. Next, we parse the 3654 expression. */ 3655 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX)) 3656 return PARSE_OPERAND_FAIL_NO_BACKTRACK; 3657 3658 /* Record the relocation type. */ 3659 switch (group_type) 3660 { 3661 case GROUP_LDR: 3662 inst.reloc.type = entry->ldr_code; 3663 break; 3664 3665 case GROUP_LDRS: 3666 inst.reloc.type = entry->ldrs_code; 3667 break; 3668 3669 case GROUP_LDC: 3670 inst.reloc.type = entry->ldc_code; 3671 break; 3672 3673 default: 3674 assert (0); 3675 } 3676 3677 if (inst.reloc.type == 0) 3678 { 3679 inst.error = _("this group relocation is not allowed on this instruction"); 3680 return PARSE_OPERAND_FAIL_NO_BACKTRACK; 3681 } 3682 } 3683 else 3684 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX)) 3685 return PARSE_OPERAND_FAIL; 3686 } 3687 } 3688 3689 if (skip_past_char (&p, ']') == FAIL) 3690 { 3691 inst.error = _("']' expected"); 3692 return PARSE_OPERAND_FAIL; 3693 } 3694 3695 if (skip_past_char (&p, '!') == SUCCESS) 3696 inst.operands[i].writeback = 1; 3697 3698 else if (skip_past_comma (&p) == SUCCESS) 3699 { 3700 if (skip_past_char (&p, '{') == SUCCESS) 3701 { 3702 /* [Rn], {expr} - unindexed, with option */ 3703 if (parse_immediate (&p, &inst.operands[i].imm, 3704 0, 255, TRUE) == FAIL) 3705 return PARSE_OPERAND_FAIL; 3706 3707 if (skip_past_char (&p, '}') == FAIL) 3708 { 3709 inst.error = _("'}' expected at end of 'option' field"); 3710 return PARSE_OPERAND_FAIL; 3711 } 3712 if (inst.operands[i].preind) 3713 { 3714 inst.error = _("cannot combine index with option"); 3715 return PARSE_OPERAND_FAIL; 3716 } 3717 *str = p; 3718 return PARSE_OPERAND_SUCCESS; 3719 } 3720 else 3721 { 3722 inst.operands[i].postind = 1; 3723 inst.operands[i].writeback = 1; 3724 3725 if (inst.operands[i].preind) 3726 { 3727 inst.error = _("cannot combine pre- and post-indexing"); 3728 return PARSE_OPERAND_FAIL; 3729 } 3730 3731 if (*p == '+') p++; 3732 else if (*p == '-') p++, inst.operands[i].negative = 1; 3733 3734 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL) 3735 { 3736 /* We might be using the immediate for alignment already. If we 3737 are, OR the register number into the low-order bits. */ 3738 if (inst.operands[i].immisalign) 3739 inst.operands[i].imm |= reg; 3740 else 3741 inst.operands[i].imm = reg; 3742 inst.operands[i].immisreg = 1; 3743 3744 if (skip_past_comma (&p) == SUCCESS) 3745 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL) 3746 return PARSE_OPERAND_FAIL; 3747 } 3748 else 3749 { 3750 if (inst.operands[i].negative) 3751 { 3752 inst.operands[i].negative = 0; 3753 p--; 3754 } 3755 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX)) 3756 return PARSE_OPERAND_FAIL; 3757 } 3758 } 3759 } 3760 3761 /* If at this point neither .preind nor .postind is set, we have a 3762 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */ 3763 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0) 3764 { 3765 inst.operands[i].preind = 1; 3766 inst.reloc.exp.X_op = O_constant; 3767 inst.reloc.exp.X_add_number = 0; 3768 } 3769 *str = p; 3770 return PARSE_OPERAND_SUCCESS; 3771} 3772 3773static int 3774parse_address (char **str, int i) 3775{ 3776 return parse_address_main (str, i, 0, 0) == PARSE_OPERAND_SUCCESS 3777 ? SUCCESS : FAIL; 3778} 3779 3780static parse_operand_result 3781parse_address_group_reloc (char **str, int i, group_reloc_type type) 3782{ 3783 return parse_address_main (str, i, 1, type); 3784} 3785 3786/* Parse an operand for a MOVW or MOVT instruction. */ 3787static int 3788parse_half (char **str) 3789{ 3790 char * p; 3791 3792 p = *str; 3793 skip_past_char (&p, '#'); 3794 if (strncasecmp (p, ":lower16:", 9) == 0) 3795 inst.reloc.type = BFD_RELOC_ARM_MOVW; 3796 else if (strncasecmp (p, ":upper16:", 9) == 0) 3797 inst.reloc.type = BFD_RELOC_ARM_MOVT; 3798 3799 if (inst.reloc.type != BFD_RELOC_UNUSED) 3800 { 3801 p += 9; 3802 skip_whitespace(p); 3803 } 3804 3805 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX)) 3806 return FAIL; 3807 3808 if (inst.reloc.type == BFD_RELOC_UNUSED) 3809 { 3810 if (inst.reloc.exp.X_op != O_constant) 3811 { 3812 inst.error = _("constant expression expected"); 3813 return FAIL; 3814 } 3815 if (inst.reloc.exp.X_add_number < 0 3816 || inst.reloc.exp.X_add_number > 0xffff) 3817 { 3818 inst.error = _("immediate value out of range"); 3819 return FAIL; 3820 } 3821 } 3822 *str = p; 3823 return SUCCESS; 3824} 3825 3826/* Miscellaneous. */ 3827 3828/* Parse a PSR flag operand. The value returned is FAIL on syntax error, 3829 or a bitmask suitable to be or-ed into the ARM msr instruction. */ 3830static int 3831parse_psr (char **str) 3832{ 3833 char *p; 3834 uint32_t psr_field; 3835 const struct asm_psr *psr; 3836 char *start; 3837 3838 /* CPSR's and SPSR's can now be lowercase. This is just a convenience 3839 feature for ease of use and backwards compatibility. */ 3840 p = *str; 3841 if (strncasecmp (p, "SPSR", 4) == 0) 3842 psr_field = SPSR_BIT; 3843 else if (strncasecmp (p, "CPSR", 4) == 0) 3844 psr_field = 0; 3845 else 3846 { 3847 start = p; 3848 do 3849 p++; 3850 while (ISALNUM (*p) || *p == '_'); 3851 3852 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start); 3853 if (!psr) 3854 return FAIL; 3855 3856 *str = p; 3857 return psr->field; 3858 } 3859 3860 p += 4; 3861 if (*p == '_') 3862 { 3863 /* A suffix follows. */ 3864 p++; 3865 start = p; 3866 3867 do 3868 p++; 3869 while (ISALNUM (*p) || *p == '_'); 3870 3871 psr = hash_find_n (arm_psr_hsh, start, p - start); 3872 if (!psr) 3873 goto error; 3874 3875 psr_field |= psr->field; 3876 } 3877 else 3878 { 3879 if (ISALNUM (*p)) 3880 goto error; /* Garbage after "[CS]PSR". */ 3881 3882 psr_field |= (PSR_c | PSR_f); 3883 } 3884 *str = p; 3885 return psr_field; 3886 3887 error: 3888 inst.error = _("flag for {c}psr instruction expected"); 3889 return FAIL; 3890} 3891 3892/* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a 3893 value suitable for splatting into the AIF field of the instruction. */ 3894 3895static int 3896parse_cps_flags (char **str) 3897{ 3898 int val = 0; 3899 int saw_a_flag = 0; 3900 char *s = *str; 3901 3902 for (;;) 3903 switch (*s++) 3904 { 3905 case '\0': case ',': 3906 goto done; 3907 3908 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break; 3909 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break; 3910 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break; 3911 3912 default: 3913 inst.error = _("unrecognized CPS flag"); 3914 return FAIL; 3915 } 3916 3917 done: 3918 if (saw_a_flag == 0) 3919 { 3920 inst.error = _("missing CPS flags"); 3921 return FAIL; 3922 } 3923 3924 *str = s - 1; 3925 return val; 3926} 3927 3928/* Parse an endian specifier ("BE" or "LE", case insensitive); 3929 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */ 3930 3931static int 3932parse_endian_specifier (char **str) 3933{ 3934 int little_endian; 3935 char *s = *str; 3936 3937 if (strncasecmp (s, "BE", 2)) 3938 little_endian = 0; 3939 else if (strncasecmp (s, "LE", 2)) 3940 little_endian = 1; 3941 else 3942 { 3943 inst.error = _("valid endian specifiers are be or le"); 3944 return FAIL; 3945 } 3946 3947 if (ISALNUM (s[2]) || s[2] == '_') 3948 { 3949 inst.error = _("valid endian specifiers are be or le"); 3950 return FAIL; 3951 } 3952 3953 *str = s + 2; 3954 return little_endian; 3955} 3956 3957/* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a 3958 value suitable for poking into the rotate field of an sxt or sxta 3959 instruction, or FAIL on error. */ 3960 3961static int 3962parse_ror (char **str) 3963{ 3964 int rot; 3965 char *s = *str; 3966 3967 if (strncasecmp (s, "ROR", 3) == 0) 3968 s += 3; 3969 else 3970 { 3971 inst.error = _("missing rotation field after comma"); 3972 return FAIL; 3973 } 3974 3975 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL) 3976 return FAIL; 3977 3978 switch (rot) 3979 { 3980 case 0: *str = s; return 0x0; 3981 case 8: *str = s; return 0x1; 3982 case 16: *str = s; return 0x2; 3983 case 24: *str = s; return 0x3; 3984 3985 default: 3986 inst.error = _("rotation can only be 0, 8, 16, or 24"); 3987 return FAIL; 3988 } 3989} 3990 3991/* Parse a conditional code (from conds[] below). The value returned is in the 3992 range 0 .. 14, or FAIL. */ 3993static int 3994parse_cond (char **str) 3995{ 3996 char *p, *q; 3997 const struct asm_cond *c; 3998 3999 p = q = *str; 4000 while (ISALPHA (*q)) 4001 q++; 4002 4003 c = hash_find_n (arm_cond_hsh, p, q - p); 4004 if (!c) 4005 { 4006 inst.error = _("condition required"); 4007 return FAIL; 4008 } 4009 4010 *str = q; 4011 return c->value; 4012} 4013 4014/* Parse an option for a barrier instruction. Returns the encoding for the 4015 option, or FAIL. */ 4016static int 4017parse_barrier (char **str) 4018{ 4019 char *p, *q; 4020 const struct asm_barrier_opt *o; 4021 4022 p = q = *str; 4023 while (ISALPHA (*q)) 4024 q++; 4025 4026 o = hash_find_n (arm_barrier_opt_hsh, p, q - p); 4027 if (!o) 4028 return FAIL; 4029 4030 *str = q; 4031 return o->value; 4032} 4033 4034/* Parse the operands of a table branch instruction. Similar to a memory 4035 operand. */ 4036static int 4037parse_tb (char **str) 4038{ 4039 char * p = *str; 4040 int reg; 4041 4042 if (skip_past_char (&p, '[') == FAIL) 4043 { 4044 inst.error = _("'[' expected"); 4045 return FAIL; 4046 } 4047 4048 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL) 4049 { 4050 inst.error = _(reg_expected_msgs[REG_TYPE_RN]); 4051 return FAIL; 4052 } 4053 inst.operands[0].reg = reg; 4054 4055 if (skip_past_comma (&p) == FAIL) 4056 { 4057 inst.error = _("',' expected"); 4058 return FAIL; 4059 } 4060 4061 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL) 4062 { 4063 inst.error = _(reg_expected_msgs[REG_TYPE_RN]); 4064 return FAIL; 4065 } 4066 inst.operands[0].imm = reg; 4067 4068 if (skip_past_comma (&p) == SUCCESS) 4069 { 4070 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL) 4071 return FAIL; 4072 if (inst.reloc.exp.X_add_number != 1) 4073 { 4074 inst.error = _("invalid shift"); 4075 return FAIL; 4076 } 4077 inst.operands[0].shifted = 1; 4078 } 4079 4080 if (skip_past_char (&p, ']') == FAIL) 4081 { 4082 inst.error = _("']' expected"); 4083 return FAIL; 4084 } 4085 *str = p; 4086 return SUCCESS; 4087} 4088 4089/* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more 4090 information on the types the operands can take and how they are encoded. 4091 Up to four operands may be read; this function handles setting the 4092 ".present" field for each read operand itself. 4093 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS, 4094 else returns FAIL. */ 4095 4096static int 4097parse_neon_mov (char **str, int *which_operand) 4098{ 4099 int i = *which_operand, val; 4100 enum arm_reg_type rtype; 4101 char *ptr = *str; 4102 struct neon_type_el optype; 4103 4104 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL) 4105 { 4106 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */ 4107 inst.operands[i].reg = val; 4108 inst.operands[i].isscalar = 1; 4109 inst.operands[i].vectype = optype; 4110 inst.operands[i++].present = 1; 4111 4112 if (skip_past_comma (&ptr) == FAIL) 4113 goto wanted_comma; 4114 4115 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) 4116 goto wanted_arm; 4117 4118 inst.operands[i].reg = val; 4119 inst.operands[i].isreg = 1; 4120 inst.operands[i].present = 1; 4121 } 4122 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype)) 4123 != FAIL) 4124 { 4125 /* Cases 0, 1, 2, 3, 5 (D only). */ 4126 if (skip_past_comma (&ptr) == FAIL) 4127 goto wanted_comma; 4128 4129 inst.operands[i].reg = val; 4130 inst.operands[i].isreg = 1; 4131 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); 4132 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); 4133 inst.operands[i].isvec = 1; 4134 inst.operands[i].vectype = optype; 4135 inst.operands[i++].present = 1; 4136 4137 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL) 4138 { 4139 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>. 4140 Case 13: VMOV <Sd>, <Rm> */ 4141 inst.operands[i].reg = val; 4142 inst.operands[i].isreg = 1; 4143 inst.operands[i].present = 1; 4144 4145 if (rtype == REG_TYPE_NQ) 4146 { 4147 first_error (_("can't use Neon quad register here")); 4148 return FAIL; 4149 } 4150 else if (rtype != REG_TYPE_VFS) 4151 { 4152 i++; 4153 if (skip_past_comma (&ptr) == FAIL) 4154 goto wanted_comma; 4155 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) 4156 goto wanted_arm; 4157 inst.operands[i].reg = val; 4158 inst.operands[i].isreg = 1; 4159 inst.operands[i].present = 1; 4160 } 4161 } 4162 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS) 4163 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm> 4164 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm> 4165 Case 10: VMOV.F32 <Sd>, #<imm> 4166 Case 11: VMOV.F64 <Dd>, #<imm> */ 4167 inst.operands[i].immisfloat = 1; 4168 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, 4169 &optype)) != FAIL) 4170 { 4171 /* Case 0: VMOV<c><q> <Qd>, <Qm> 4172 Case 1: VMOV<c><q> <Dd>, <Dm> 4173 Case 8: VMOV.F32 <Sd>, <Sm> 4174 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */ 4175 4176 inst.operands[i].reg = val; 4177 inst.operands[i].isreg = 1; 4178 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); 4179 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); 4180 inst.operands[i].isvec = 1; 4181 inst.operands[i].vectype = optype; 4182 inst.operands[i].present = 1; 4183 4184 if (skip_past_comma (&ptr) == SUCCESS) 4185 { 4186 /* Case 15. */ 4187 i++; 4188 4189 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) 4190 goto wanted_arm; 4191 4192 inst.operands[i].reg = val; 4193 inst.operands[i].isreg = 1; 4194 inst.operands[i++].present = 1; 4195 4196 if (skip_past_comma (&ptr) == FAIL) 4197 goto wanted_comma; 4198 4199 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL) 4200 goto wanted_arm; 4201 4202 inst.operands[i].reg = val; 4203 inst.operands[i].isreg = 1; 4204 inst.operands[i++].present = 1; 4205 } 4206 } 4207 else if (parse_big_immediate (&ptr, i) == SUCCESS) 4208 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm> 4209 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */ 4210 ; 4211 else 4212 { 4213 first_error (_("expected <Rm> or <Dm> or <Qm> operand")); 4214 return FAIL; 4215 } 4216 } 4217 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL) 4218 { 4219 /* Cases 6, 7. */ 4220 inst.operands[i].reg = val; 4221 inst.operands[i].isreg = 1; 4222 inst.operands[i++].present = 1; 4223 4224 if (skip_past_comma (&ptr) == FAIL) 4225 goto wanted_comma; 4226 4227 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL) 4228 { 4229 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */ 4230 inst.operands[i].reg = val; 4231 inst.operands[i].isscalar = 1; 4232 inst.operands[i].present = 1; 4233 inst.operands[i].vectype = optype; 4234 } 4235 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL) 4236 { 4237 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */ 4238 inst.operands[i].reg = val; 4239 inst.operands[i].isreg = 1; 4240 inst.operands[i++].present = 1; 4241 4242 if (skip_past_comma (&ptr) == FAIL) 4243 goto wanted_comma; 4244 4245 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype)) 4246 == FAIL) 4247 { 4248 first_error (_(reg_expected_msgs[REG_TYPE_VFSD])); 4249 return FAIL; 4250 } 4251 4252 inst.operands[i].reg = val; 4253 inst.operands[i].isreg = 1; 4254 inst.operands[i].isvec = 1; 4255 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); 4256 inst.operands[i].vectype = optype; 4257 inst.operands[i].present = 1; 4258 4259 if (rtype == REG_TYPE_VFS) 4260 { 4261 /* Case 14. */ 4262 i++; 4263 if (skip_past_comma (&ptr) == FAIL) 4264 goto wanted_comma; 4265 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, 4266 &optype)) == FAIL) 4267 { 4268 first_error (_(reg_expected_msgs[REG_TYPE_VFS])); 4269 return FAIL; 4270 } 4271 inst.operands[i].reg = val; 4272 inst.operands[i].isreg = 1; 4273 inst.operands[i].isvec = 1; 4274 inst.operands[i].issingle = 1; 4275 inst.operands[i].vectype = optype; 4276 inst.operands[i].present = 1; 4277 } 4278 } 4279 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype)) 4280 != FAIL) 4281 { 4282 /* Case 13. */ 4283 inst.operands[i].reg = val; 4284 inst.operands[i].isreg = 1; 4285 inst.operands[i].isvec = 1; 4286 inst.operands[i].issingle = 1; 4287 inst.operands[i].vectype = optype; 4288 inst.operands[i++].present = 1; 4289 } 4290 } 4291 else 4292 { 4293 first_error (_("parse error")); 4294 return FAIL; 4295 } 4296 4297 /* Successfully parsed the operands. Update args. */ 4298 *which_operand = i; 4299 *str = ptr; 4300 return SUCCESS; 4301 4302 wanted_comma: 4303 first_error (_("expected comma")); 4304 return FAIL; 4305 4306 wanted_arm: 4307 first_error (_(reg_expected_msgs[REG_TYPE_RN])); 4308 return FAIL; 4309} 4310 4311#endif /* INSNS_TABLE_ONLY */ 4312 4313/* Matcher codes for parse_operands. */ 4314enum operand_parse_code 4315{ 4316 OP_stop, /* end of line */ 4317 4318 OP_RR, /* ARM register */ 4319 OP_RRnpc, /* ARM register, not r15 */ 4320 OP_RRnpcb, /* ARM register, not r15, in square brackets */ 4321 OP_RRw, /* ARM register, not r15, optional trailing ! */ 4322 OP_RCP, /* Coprocessor number */ 4323 OP_RCN, /* Coprocessor register */ 4324 OP_RF, /* FPA register */ 4325 OP_RVS, /* VFP single precision register */ 4326 OP_RVD, /* VFP double precision register (0..15) */ 4327 OP_RND, /* Neon double precision register (0..31) */ 4328 OP_RNQ, /* Neon quad precision register */ 4329 OP_RVSD, /* VFP single or double precision register */ 4330 OP_RNDQ, /* Neon double or quad precision register */ 4331 OP_RNSDQ, /* Neon single, double or quad precision register */ 4332 OP_RNSC, /* Neon scalar D[X] */ 4333 OP_RVC, /* VFP control register */ 4334 OP_RMF, /* Maverick F register */ 4335 OP_RMD, /* Maverick D register */ 4336 OP_RMFX, /* Maverick FX register */ 4337 OP_RMDX, /* Maverick DX register */ 4338 OP_RMAX, /* Maverick AX register */ 4339 OP_RMDS, /* Maverick DSPSC register */ 4340 OP_RIWR, /* iWMMXt wR register */ 4341 OP_RIWC, /* iWMMXt wC register */ 4342 OP_RIWG, /* iWMMXt wCG register */ 4343 OP_RXA, /* XScale accumulator register */ 4344 4345 OP_REGLST, /* ARM register list */ 4346 OP_VRSLST, /* VFP single-precision register list */ 4347 OP_VRDLST, /* VFP double-precision register list */ 4348 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */ 4349 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */ 4350 OP_NSTRLST, /* Neon element/structure list */ 4351 4352 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */ 4353 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */ 4354 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */ 4355 OP_RR_RNSC, /* ARM reg or Neon scalar. */ 4356 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */ 4357 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */ 4358 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */ 4359 OP_VMOV, /* Neon VMOV operands. */ 4360 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */ 4361 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */ 4362 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */ 4363 4364 OP_I0, /* immediate zero */ 4365 OP_I7, /* immediate value 0 .. 7 */ 4366 OP_I15, /* 0 .. 15 */ 4367 OP_I16, /* 1 .. 16 */ 4368 OP_I16z, /* 0 .. 16 */ 4369 OP_I31, /* 0 .. 31 */ 4370 OP_I31w, /* 0 .. 31, optional trailing ! */ 4371 OP_I32, /* 1 .. 32 */ 4372 OP_I32z, /* 0 .. 32 */ 4373 OP_I63, /* 0 .. 63 */ 4374 OP_I63s, /* -64 .. 63 */ 4375 OP_I64, /* 1 .. 64 */ 4376 OP_I64z, /* 0 .. 64 */ 4377 OP_I255, /* 0 .. 255 */ 4378 4379 OP_I4b, /* immediate, prefix optional, 1 .. 4 */ 4380 OP_I7b, /* 0 .. 7 */ 4381 OP_I15b, /* 0 .. 15 */ 4382 OP_I31b, /* 0 .. 31 */ 4383 4384 OP_SH, /* shifter operand */ 4385 OP_SHG, /* shifter operand with possible group relocation */ 4386 OP_ADDR, /* Memory address expression (any mode) */ 4387 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */ 4388 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */ 4389 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */ 4390 OP_EXP, /* arbitrary expression */ 4391 OP_EXPi, /* same, with optional immediate prefix */ 4392 OP_EXPr, /* same, with optional relocation suffix */ 4393 OP_HALF, /* 0 .. 65535 or low/high reloc. */ 4394 4395 OP_CPSF, /* CPS flags */ 4396 OP_ENDI, /* Endianness specifier */ 4397 OP_PSR, /* CPSR/SPSR mask for msr */ 4398 OP_COND, /* conditional code */ 4399 OP_TB, /* Table branch. */ 4400 4401 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */ 4402 OP_APSR_RR, /* ARM register or "APSR_nzcv". */ 4403 4404 OP_RRnpc_I0, /* ARM register or literal 0 */ 4405 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */ 4406 OP_RR_EXi, /* ARM register or expression with imm prefix */ 4407 OP_RF_IF, /* FPA register or immediate */ 4408 OP_RIWR_RIWC, /* iWMMXt R or C reg */ 4409 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */ 4410 4411 /* Optional operands. */ 4412 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */ 4413 OP_oI31b, /* 0 .. 31 */ 4414 OP_oI32b, /* 1 .. 32 */ 4415 OP_oIffffb, /* 0 .. 65535 */ 4416 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */ 4417 4418 OP_oRR, /* ARM register */ 4419 OP_oRRnpc, /* ARM register, not the PC */ 4420 OP_oRRw, /* ARM register, not r15, optional trailing ! */ 4421 OP_oRND, /* Optional Neon double precision register */ 4422 OP_oRNQ, /* Optional Neon quad precision register */ 4423 OP_oRNDQ, /* Optional Neon double or quad precision register */ 4424 OP_oRNSDQ, /* Optional single, double or quad precision vector register */ 4425 OP_oSHll, /* LSL immediate */ 4426 OP_oSHar, /* ASR immediate */ 4427 OP_oSHllar, /* LSL or ASR immediate */ 4428 OP_oROR, /* ROR 0/8/16/24 */ 4429 OP_oBARRIER, /* Option argument for a barrier instruction. */ 4430 4431 OP_FIRST_OPTIONAL = OP_oI7b 4432}; 4433 4434#ifndef INSNS_TABLE_ONLY 4435 4436/* Generic instruction operand parser. This does no encoding and no 4437 semantic validation; it merely squirrels values away in the inst 4438 structure. Returns SUCCESS or FAIL depending on whether the 4439 specified grammar matched. */ 4440static int 4441parse_operands (char *str, const unsigned char *pattern) 4442{ 4443 unsigned const char *upat = pattern; 4444 char *backtrack_pos = 0; 4445 const char *backtrack_error = 0; 4446 int i, val, backtrack_index = 0; 4447 enum arm_reg_type rtype; 4448 parse_operand_result result; 4449 4450#define po_char_or_fail(chr) do { \ 4451 if (skip_past_char (&str, chr) == FAIL) \ 4452 goto bad_args; \ 4453} while (0) 4454 4455#define po_reg_or_fail(regtype) do { \ 4456 val = arm_typed_reg_parse (&str, regtype, &rtype, \ 4457 &inst.operands[i].vectype); \ 4458 if (val == FAIL) \ 4459 { \ 4460 first_error (_(reg_expected_msgs[regtype])); \ 4461 goto failure; \ 4462 } \ 4463 inst.operands[i].reg = val; \ 4464 inst.operands[i].isreg = 1; \ 4465 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \ 4466 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \ 4467 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \ 4468 || rtype == REG_TYPE_VFD \ 4469 || rtype == REG_TYPE_NQ); \ 4470} while (0) 4471 4472#define po_reg_or_goto(regtype, label) do { \ 4473 val = arm_typed_reg_parse (&str, regtype, &rtype, \ 4474 &inst.operands[i].vectype); \ 4475 if (val == FAIL) \ 4476 goto label; \ 4477 \ 4478 inst.operands[i].reg = val; \ 4479 inst.operands[i].isreg = 1; \ 4480 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \ 4481 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \ 4482 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \ 4483 || rtype == REG_TYPE_VFD \ 4484 || rtype == REG_TYPE_NQ); \ 4485} while (0) 4486 4487#define po_imm_or_fail(min, max, popt) do { \ 4488 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \ 4489 goto failure; \ 4490 inst.operands[i].imm = val; \ 4491} while (0) 4492 4493#define po_scalar_or_goto(elsz, label) do { \ 4494 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \ 4495 if (val == FAIL) \ 4496 goto label; \ 4497 inst.operands[i].reg = val; \ 4498 inst.operands[i].isscalar = 1; \ 4499} while (0) 4500 4501#define po_misc_or_fail(expr) do { \ 4502 if (expr) \ 4503 goto failure; \ 4504} while (0) 4505 4506#define po_misc_or_fail_no_backtrack(expr) do { \ 4507 result = expr; \ 4508 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\ 4509 backtrack_pos = 0; \ 4510 if (result != PARSE_OPERAND_SUCCESS) \ 4511 goto failure; \ 4512} while (0) 4513 4514 skip_whitespace (str); 4515 4516 for (i = 0; upat[i] != OP_stop; i++) 4517 { 4518 if (upat[i] >= OP_FIRST_OPTIONAL) 4519 { 4520 /* Remember where we are in case we need to backtrack. */ 4521 assert (!backtrack_pos); 4522 backtrack_pos = str; 4523 backtrack_error = inst.error; 4524 backtrack_index = i; 4525 } 4526 4527 if (i > 0 && (i > 1 || inst.operands[0].present)) 4528 po_char_or_fail (','); 4529 4530 switch (upat[i]) 4531 { 4532 /* Registers */ 4533 case OP_oRRnpc: 4534 case OP_RRnpc: 4535 case OP_oRR: 4536 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break; 4537 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break; 4538 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break; 4539 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break; 4540 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break; 4541 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break; 4542 case OP_oRND: 4543 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break; 4544 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break; 4545 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break; 4546 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break; 4547 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break; 4548 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break; 4549 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break; 4550 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break; 4551 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break; 4552 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break; 4553 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break; 4554 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break; 4555 case OP_oRNQ: 4556 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break; 4557 case OP_oRNDQ: 4558 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break; 4559 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break; 4560 case OP_oRNSDQ: 4561 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break; 4562 4563 /* Neon scalar. Using an element size of 8 means that some invalid 4564 scalars are accepted here, so deal with those in later code. */ 4565 case OP_RNSC: po_scalar_or_goto (8, failure); break; 4566 4567 /* WARNING: We can expand to two operands here. This has the potential 4568 to totally confuse the backtracking mechanism! It will be OK at 4569 least as long as we don't try to use optional args as well, 4570 though. */ 4571 case OP_NILO: 4572 { 4573 po_reg_or_goto (REG_TYPE_NDQ, try_imm); 4574 inst.operands[i].present = 1; 4575 i++; 4576 skip_past_comma (&str); 4577 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only); 4578 break; 4579 one_reg_only: 4580 /* Optional register operand was omitted. Unfortunately, it's in 4581 operands[i-1] and we need it to be in inst.operands[i]. Fix that 4582 here (this is a bit grotty). */ 4583 inst.operands[i] = inst.operands[i-1]; 4584 inst.operands[i-1].present = 0; 4585 break; 4586 try_imm: 4587 /* There's a possibility of getting a 64-bit immediate here, so 4588 we need special handling. */ 4589 if (parse_big_immediate (&str, i) == FAIL) 4590 { 4591 inst.error = _("immediate value is out of range"); 4592 goto failure; 4593 } 4594 } 4595 break; 4596 4597 case OP_RNDQ_I0: 4598 { 4599 po_reg_or_goto (REG_TYPE_NDQ, try_imm0); 4600 break; 4601 try_imm0: 4602 po_imm_or_fail (0, 0, TRUE); 4603 } 4604 break; 4605 4606 case OP_RVSD_I0: 4607 po_reg_or_goto (REG_TYPE_VFSD, try_imm0); 4608 break; 4609 4610 case OP_RR_RNSC: 4611 { 4612 po_scalar_or_goto (8, try_rr); 4613 break; 4614 try_rr: 4615 po_reg_or_fail (REG_TYPE_RN); 4616 } 4617 break; 4618 4619 case OP_RNSDQ_RNSC: 4620 { 4621 po_scalar_or_goto (8, try_nsdq); 4622 break; 4623 try_nsdq: 4624 po_reg_or_fail (REG_TYPE_NSDQ); 4625 } 4626 break; 4627 4628 case OP_RNDQ_RNSC: 4629 { 4630 po_scalar_or_goto (8, try_ndq); 4631 break; 4632 try_ndq: 4633 po_reg_or_fail (REG_TYPE_NDQ); 4634 } 4635 break; 4636 4637 case OP_RND_RNSC: 4638 { 4639 po_scalar_or_goto (8, try_vfd); 4640 break; 4641 try_vfd: 4642 po_reg_or_fail (REG_TYPE_VFD); 4643 } 4644 break; 4645 4646 case OP_VMOV: 4647 /* WARNING: parse_neon_mov can move the operand counter, i. If we're 4648 not careful then bad things might happen. */ 4649 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL); 4650 break; 4651 4652 case OP_RNDQ_IMVNb: 4653 { 4654 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm); 4655 break; 4656 try_mvnimm: 4657 /* There's a possibility of getting a 64-bit immediate here, so 4658 we need special handling. */ 4659 if (parse_big_immediate (&str, i) == FAIL) 4660 { 4661 inst.error = _("immediate value is out of range"); 4662 goto failure; 4663 } 4664 } 4665 break; 4666 4667 case OP_RNDQ_I63b: 4668 { 4669 po_reg_or_goto (REG_TYPE_NDQ, try_shimm); 4670 break; 4671 try_shimm: 4672 po_imm_or_fail (0, 63, TRUE); 4673 } 4674 break; 4675 4676 case OP_RRnpcb: 4677 po_char_or_fail ('['); 4678 po_reg_or_fail (REG_TYPE_RN); 4679 po_char_or_fail (']'); 4680 break; 4681 4682 case OP_RRw: 4683 case OP_oRRw: 4684 po_reg_or_fail (REG_TYPE_RN); 4685 if (skip_past_char (&str, '!') == SUCCESS) 4686 inst.operands[i].writeback = 1; 4687 break; 4688 4689 /* Immediates */ 4690 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break; 4691 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break; 4692 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break; 4693 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break; 4694 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break; 4695 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break; 4696 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break; 4697 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break; 4698 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break; 4699 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break; 4700 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break; 4701 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break; 4702 4703 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break; 4704 case OP_oI7b: 4705 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break; 4706 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break; 4707 case OP_oI31b: 4708 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break; 4709 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break; 4710 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break; 4711 4712 /* Immediate variants */ 4713 case OP_oI255c: 4714 po_char_or_fail ('{'); 4715 po_imm_or_fail (0, 255, TRUE); 4716 po_char_or_fail ('}'); 4717 break; 4718 4719 case OP_I31w: 4720 /* The expression parser chokes on a trailing !, so we have 4721 to find it first and zap it. */ 4722 { 4723 char *s = str; 4724 while (*s && *s != ',') 4725 s++; 4726 if (s[-1] == '!') 4727 { 4728 s[-1] = '\0'; 4729 inst.operands[i].writeback = 1; 4730 } 4731 po_imm_or_fail (0, 31, TRUE); 4732 if (str == s - 1) 4733 str = s; 4734 } 4735 break; 4736 4737 /* Expressions */ 4738 case OP_EXPi: EXPi: 4739 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str, 4740 GE_OPT_PREFIX)); 4741 break; 4742 4743 case OP_EXP: 4744 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str, 4745 GE_NO_PREFIX)); 4746 break; 4747 4748 case OP_EXPr: EXPr: 4749 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str, 4750 GE_NO_PREFIX)); 4751 if (inst.reloc.exp.X_op == O_symbol) 4752 { 4753 val = parse_reloc (&str); 4754 if (val == -1) 4755 { 4756 inst.error = _("unrecognized relocation suffix"); 4757 goto failure; 4758 } 4759 else if (val != BFD_RELOC_UNUSED) 4760 { 4761 inst.operands[i].imm = val; 4762 inst.operands[i].hasreloc = 1; 4763 } 4764 } 4765 break; 4766 4767 /* Operand for MOVW or MOVT. */ 4768 case OP_HALF: 4769 po_misc_or_fail (parse_half (&str)); 4770 break; 4771 4772 /* Register or expression */ 4773 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break; 4774 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break; 4775 4776 /* Register or immediate */ 4777 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break; 4778 I0: po_imm_or_fail (0, 0, FALSE); break; 4779 4780 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break; 4781 IF: 4782 if (!is_immediate_prefix (*str)) 4783 goto bad_args; 4784 str++; 4785 val = parse_fpa_immediate (&str); 4786 if (val == FAIL) 4787 goto failure; 4788 /* FPA immediates are encoded as registers 8-15. 4789 parse_fpa_immediate has already applied the offset. */ 4790 inst.operands[i].reg = val; 4791 inst.operands[i].isreg = 1; 4792 break; 4793 4794 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break; 4795 I32z: po_imm_or_fail (0, 32, FALSE); break; 4796 4797 /* Two kinds of register */ 4798 case OP_RIWR_RIWC: 4799 { 4800 struct reg_entry *rege = arm_reg_parse_multi (&str); 4801 if (!rege 4802 || (rege->type != REG_TYPE_MMXWR 4803 && rege->type != REG_TYPE_MMXWC 4804 && rege->type != REG_TYPE_MMXWCG)) 4805 { 4806 inst.error = _("iWMMXt data or control register expected"); 4807 goto failure; 4808 } 4809 inst.operands[i].reg = rege->number; 4810 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR); 4811 } 4812 break; 4813 4814 case OP_RIWC_RIWG: 4815 { 4816 struct reg_entry *rege = arm_reg_parse_multi (&str); 4817 if (!rege 4818 || (rege->type != REG_TYPE_MMXWC 4819 && rege->type != REG_TYPE_MMXWCG)) 4820 { 4821 inst.error = _("iWMMXt control register expected"); 4822 goto failure; 4823 } 4824 inst.operands[i].reg = rege->number; 4825 inst.operands[i].isreg = 1; 4826 } 4827 break; 4828 4829 /* Misc */ 4830 case OP_CPSF: val = parse_cps_flags (&str); break; 4831 case OP_ENDI: val = parse_endian_specifier (&str); break; 4832 case OP_oROR: val = parse_ror (&str); break; 4833 case OP_PSR: val = parse_psr (&str); break; 4834 case OP_COND: val = parse_cond (&str); break; 4835 case OP_oBARRIER:val = parse_barrier (&str); break; 4836 4837 case OP_RVC_PSR: 4838 po_reg_or_goto (REG_TYPE_VFC, try_psr); 4839 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */ 4840 break; 4841 try_psr: 4842 val = parse_psr (&str); 4843 break; 4844 4845 case OP_APSR_RR: 4846 po_reg_or_goto (REG_TYPE_RN, try_apsr); 4847 break; 4848 try_apsr: 4849 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS 4850 instruction). */ 4851 if (strncasecmp (str, "APSR_", 5) == 0) 4852 { 4853 unsigned found = 0; 4854 str += 5; 4855 while (found < 15) 4856 switch (*str++) 4857 { 4858 case 'c': found = (found & 1) ? 16 : found | 1; break; 4859 case 'n': found = (found & 2) ? 16 : found | 2; break; 4860 case 'z': found = (found & 4) ? 16 : found | 4; break; 4861 case 'v': found = (found & 8) ? 16 : found | 8; break; 4862 default: found = 16; 4863 } 4864 if (found != 15) 4865 goto failure; 4866 inst.operands[i].isvec = 1; 4867 } 4868 else 4869 goto failure; 4870 break; 4871 4872 case OP_TB: 4873 po_misc_or_fail (parse_tb (&str)); 4874 break; 4875 4876 /* Register lists */ 4877 case OP_REGLST: 4878 val = parse_reg_list (&str); 4879 if (*str == '^') 4880 { 4881 inst.operands[1].writeback = 1; 4882 str++; 4883 } 4884 break; 4885 4886 case OP_VRSLST: 4887 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S); 4888 break; 4889 4890 case OP_VRDLST: 4891 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D); 4892 break; 4893 4894 case OP_VRSDLST: 4895 /* Allow Q registers too. */ 4896 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, 4897 REGLIST_NEON_D); 4898 if (val == FAIL) 4899 { 4900 inst.error = NULL; 4901 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, 4902 REGLIST_VFP_S); 4903 inst.operands[i].issingle = 1; 4904 } 4905 break; 4906 4907 case OP_NRDLST: 4908 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, 4909 REGLIST_NEON_D); 4910 break; 4911 4912 case OP_NSTRLST: 4913 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg, 4914 &inst.operands[i].vectype); 4915 break; 4916 4917 /* Addressing modes */ 4918 case OP_ADDR: 4919 po_misc_or_fail (parse_address (&str, i)); 4920 break; 4921 4922 case OP_ADDRGLDR: 4923 po_misc_or_fail_no_backtrack ( 4924 parse_address_group_reloc (&str, i, GROUP_LDR)); 4925 break; 4926 4927 case OP_ADDRGLDRS: 4928 po_misc_or_fail_no_backtrack ( 4929 parse_address_group_reloc (&str, i, GROUP_LDRS)); 4930 break; 4931 4932 case OP_ADDRGLDC: 4933 po_misc_or_fail_no_backtrack ( 4934 parse_address_group_reloc (&str, i, GROUP_LDC)); 4935 break; 4936 4937 case OP_SH: 4938 po_misc_or_fail (parse_shifter_operand (&str, i)); 4939 break; 4940 4941 case OP_SHG: 4942 po_misc_or_fail_no_backtrack ( 4943 parse_shifter_operand_group_reloc (&str, i)); 4944 break; 4945 4946 case OP_oSHll: 4947 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE)); 4948 break; 4949 4950 case OP_oSHar: 4951 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE)); 4952 break; 4953 4954 case OP_oSHllar: 4955 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE)); 4956 break; 4957 4958 default: 4959 as_fatal ("unhandled operand code %d", upat[i]); 4960 } 4961 4962 /* Various value-based sanity checks and shared operations. We 4963 do not signal immediate failures for the register constraints; 4964 this allows a syntax error to take precedence. */ 4965 switch (upat[i]) 4966 { 4967 case OP_oRRnpc: 4968 case OP_RRnpc: 4969 case OP_RRnpcb: 4970 case OP_RRw: 4971 case OP_oRRw: 4972 case OP_RRnpc_I0: 4973 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC) 4974 inst.error = BAD_PC; 4975 break; 4976 4977 case OP_CPSF: 4978 case OP_ENDI: 4979 case OP_oROR: 4980 case OP_PSR: 4981 case OP_RVC_PSR: 4982 case OP_COND: 4983 case OP_oBARRIER: 4984 case OP_REGLST: 4985 case OP_VRSLST: 4986 case OP_VRDLST: 4987 case OP_VRSDLST: 4988 case OP_NRDLST: 4989 case OP_NSTRLST: 4990 if (val == FAIL) 4991 goto failure; 4992 inst.operands[i].imm = val; 4993 break; 4994 4995 default: 4996 break; 4997 } 4998 4999 /* If we get here, this operand was successfully parsed. */ 5000 inst.operands[i].present = 1; 5001 continue; 5002 5003 bad_args: 5004 inst.error = BAD_ARGS; 5005 5006 failure: 5007 if (!backtrack_pos) 5008 { 5009 /* The parse routine should already have set inst.error, but set a 5010 defaut here just in case. */ 5011 if (!inst.error) 5012 inst.error = _("syntax error"); 5013 return FAIL; 5014 } 5015 5016 /* Do not backtrack over a trailing optional argument that 5017 absorbed some text. We will only fail again, with the 5018 'garbage following instruction' error message, which is 5019 probably less helpful than the current one. */ 5020 if (backtrack_index == i && backtrack_pos != str 5021 && upat[i+1] == OP_stop) 5022 { 5023 if (!inst.error) 5024 inst.error = _("syntax error"); 5025 return FAIL; 5026 } 5027 5028 /* Try again, skipping the optional argument at backtrack_pos. */ 5029 str = backtrack_pos; 5030 inst.error = backtrack_error; 5031 inst.operands[backtrack_index].present = 0; 5032 i = backtrack_index; 5033 backtrack_pos = 0; 5034 } 5035 5036 /* Check that we have parsed all the arguments. */ 5037 if (*str != '\0' && !inst.error) 5038 inst.error = _("garbage following instruction"); 5039 5040 return inst.error ? FAIL : SUCCESS; 5041} 5042 5043#undef po_char_or_fail 5044#undef po_reg_or_fail 5045#undef po_reg_or_goto 5046#undef po_imm_or_fail 5047#undef po_scalar_or_fail 5048 5049/* Shorthand macro for instruction encoding functions issuing errors. */ 5050#define constraint(expr, err) do { \ 5051 if (expr) \ 5052 { \ 5053 inst.error = err; \ 5054 return; \ 5055 } \ 5056} while (0) 5057 5058/* Functions for operand encoding. ARM, then Thumb. */ 5059 5060#define rotate_left(v, n) (v << n | v >> (32 - n)) 5061 5062/* If VAL can be encoded in the immediate field of an ARM instruction, 5063 return the encoded form. Otherwise, return FAIL. */ 5064 5065static unsigned int 5066encode_arm_immediate (unsigned int val) 5067{ 5068 unsigned int a, i; 5069 5070 for (i = 0; i < 32; i += 2) 5071 if ((a = rotate_left (val, i)) <= 0xff) 5072 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */ 5073 5074 return FAIL; 5075} 5076 5077/* If VAL can be encoded in the immediate field of a Thumb32 instruction, 5078 return the encoded form. Otherwise, return FAIL. */ 5079static unsigned int 5080encode_thumb32_immediate (unsigned int val) 5081{ 5082 unsigned int a, i; 5083 5084 if (val <= 0xff) 5085 return val; 5086 5087 for (i = 1; i <= 24; i++) 5088 { 5089 a = val >> i; 5090 if ((val & ~(0xff << i)) == 0) 5091 return ((val >> i) & 0x7f) | ((32 - i) << 7); 5092 } 5093 5094 a = val & 0xff; 5095 if (val == ((a << 16) | a)) 5096 return 0x100 | a; 5097 if (val == ((a << 24) | (a << 16) | (a << 8) | a)) 5098 return 0x300 | a; 5099 5100 a = val & 0xff00; 5101 if (val == ((a << 16) | a)) 5102 return 0x200 | (a >> 8); 5103 5104 return FAIL; 5105} 5106/* Encode a VFP SP or DP register number into inst.instruction. */ 5107 5108static void 5109encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos) 5110{ 5111 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm) 5112 && reg > 15) 5113 { 5114 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3)) 5115 { 5116 if (thumb_mode) 5117 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, 5118 fpu_vfp_ext_v3); 5119 else 5120 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, 5121 fpu_vfp_ext_v3); 5122 } 5123 else 5124 { 5125 first_error (_("D register out of range for selected VFP version")); 5126 return; 5127 } 5128 } 5129 5130 switch (pos) 5131 { 5132 case VFP_REG_Sd: 5133 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22); 5134 break; 5135 5136 case VFP_REG_Sn: 5137 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7); 5138 break; 5139 5140 case VFP_REG_Sm: 5141 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5); 5142 break; 5143 5144 case VFP_REG_Dd: 5145 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22); 5146 break; 5147 5148 case VFP_REG_Dn: 5149 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7); 5150 break; 5151 5152 case VFP_REG_Dm: 5153 inst.instruction |= (reg & 15) | ((reg >> 4) << 5); 5154 break; 5155 5156 default: 5157 abort (); 5158 } 5159} 5160 5161/* Encode a <shift> in an ARM-format instruction. The immediate, 5162 if any, is handled by md_apply_fix. */ 5163static void 5164encode_arm_shift (int i) 5165{ 5166 if (inst.operands[i].shift_kind == SHIFT_RRX) 5167 inst.instruction |= SHIFT_ROR << 5; 5168 else 5169 { 5170 inst.instruction |= inst.operands[i].shift_kind << 5; 5171 if (inst.operands[i].immisreg) 5172 { 5173 inst.instruction |= SHIFT_BY_REG; 5174 inst.instruction |= inst.operands[i].imm << 8; 5175 } 5176 else 5177 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM; 5178 } 5179} 5180 5181static void 5182encode_arm_shifter_operand (int i) 5183{ 5184 if (inst.operands[i].isreg) 5185 { 5186 inst.instruction |= inst.operands[i].reg; 5187 encode_arm_shift (i); 5188 } 5189 else 5190 inst.instruction |= INST_IMMEDIATE; 5191} 5192 5193/* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */ 5194static void 5195encode_arm_addr_mode_common (int i, bfd_boolean is_t) 5196{ 5197 assert (inst.operands[i].isreg); 5198 inst.instruction |= inst.operands[i].reg << 16; 5199 5200 if (inst.operands[i].preind) 5201 { 5202 if (is_t) 5203 { 5204 inst.error = _("instruction does not accept preindexed addressing"); 5205 return; 5206 } 5207 inst.instruction |= PRE_INDEX; 5208 if (inst.operands[i].writeback) 5209 inst.instruction |= WRITE_BACK; 5210 5211 } 5212 else if (inst.operands[i].postind) 5213 { 5214 assert (inst.operands[i].writeback); 5215 if (is_t) 5216 inst.instruction |= WRITE_BACK; 5217 } 5218 else /* unindexed - only for coprocessor */ 5219 { 5220 inst.error = _("instruction does not accept unindexed addressing"); 5221 return; 5222 } 5223 5224 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX)) 5225 && (((inst.instruction & 0x000f0000) >> 16) 5226 == ((inst.instruction & 0x0000f000) >> 12))) 5227 as_warn ((inst.instruction & LOAD_BIT) 5228 ? _("destination register same as write-back base") 5229 : _("source register same as write-back base")); 5230} 5231 5232/* inst.operands[i] was set up by parse_address. Encode it into an 5233 ARM-format mode 2 load or store instruction. If is_t is true, 5234 reject forms that cannot be used with a T instruction (i.e. not 5235 post-indexed). */ 5236static void 5237encode_arm_addr_mode_2 (int i, bfd_boolean is_t) 5238{ 5239 encode_arm_addr_mode_common (i, is_t); 5240 5241 if (inst.operands[i].immisreg) 5242 { 5243 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */ 5244 inst.instruction |= inst.operands[i].imm; 5245 if (!inst.operands[i].negative) 5246 inst.instruction |= INDEX_UP; 5247 if (inst.operands[i].shifted) 5248 { 5249 if (inst.operands[i].shift_kind == SHIFT_RRX) 5250 inst.instruction |= SHIFT_ROR << 5; 5251 else 5252 { 5253 inst.instruction |= inst.operands[i].shift_kind << 5; 5254 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM; 5255 } 5256 } 5257 } 5258 else /* immediate offset in inst.reloc */ 5259 { 5260 if (inst.reloc.type == BFD_RELOC_UNUSED) 5261 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM; 5262 } 5263} 5264 5265/* inst.operands[i] was set up by parse_address. Encode it into an 5266 ARM-format mode 3 load or store instruction. Reject forms that 5267 cannot be used with such instructions. If is_t is true, reject 5268 forms that cannot be used with a T instruction (i.e. not 5269 post-indexed). */ 5270static void 5271encode_arm_addr_mode_3 (int i, bfd_boolean is_t) 5272{ 5273 if (inst.operands[i].immisreg && inst.operands[i].shifted) 5274 { 5275 inst.error = _("instruction does not accept scaled register index"); 5276 return; 5277 } 5278 5279 encode_arm_addr_mode_common (i, is_t); 5280 5281 if (inst.operands[i].immisreg) 5282 { 5283 inst.instruction |= inst.operands[i].imm; 5284 if (!inst.operands[i].negative) 5285 inst.instruction |= INDEX_UP; 5286 } 5287 else /* immediate offset in inst.reloc */ 5288 { 5289 inst.instruction |= HWOFFSET_IMM; 5290 if (inst.reloc.type == BFD_RELOC_UNUSED) 5291 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8; 5292 } 5293} 5294 5295/* inst.operands[i] was set up by parse_address. Encode it into an 5296 ARM-format instruction. Reject all forms which cannot be encoded 5297 into a coprocessor load/store instruction. If wb_ok is false, 5298 reject use of writeback; if unind_ok is false, reject use of 5299 unindexed addressing. If reloc_override is not 0, use it instead 5300 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one 5301 (in which case it is preserved). */ 5302 5303static int 5304encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override) 5305{ 5306 inst.instruction |= inst.operands[i].reg << 16; 5307 5308 assert (!(inst.operands[i].preind && inst.operands[i].postind)); 5309 5310 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */ 5311 { 5312 assert (!inst.operands[i].writeback); 5313 if (!unind_ok) 5314 { 5315 inst.error = _("instruction does not support unindexed addressing"); 5316 return FAIL; 5317 } 5318 inst.instruction |= inst.operands[i].imm; 5319 inst.instruction |= INDEX_UP; 5320 return SUCCESS; 5321 } 5322 5323 if (inst.operands[i].preind) 5324 inst.instruction |= PRE_INDEX; 5325 5326 if (inst.operands[i].writeback) 5327 { 5328 if (inst.operands[i].reg == REG_PC) 5329 { 5330 inst.error = _("pc may not be used with write-back"); 5331 return FAIL; 5332 } 5333 if (!wb_ok) 5334 { 5335 inst.error = _("instruction does not support writeback"); 5336 return FAIL; 5337 } 5338 inst.instruction |= WRITE_BACK; 5339 } 5340 5341 if (reloc_override) 5342 inst.reloc.type = reloc_override; 5343 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC 5344 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2) 5345 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0) 5346 { 5347 if (thumb_mode) 5348 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM; 5349 else 5350 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM; 5351 } 5352 5353 return SUCCESS; 5354} 5355 5356/* inst.reloc.exp describes an "=expr" load pseudo-operation. 5357 Determine whether it can be performed with a move instruction; if 5358 it can, convert inst.instruction to that move instruction and 5359 return 1; if it can't, convert inst.instruction to a literal-pool 5360 load and return 0. If this is not a valid thing to do in the 5361 current context, set inst.error and return 1. 5362 5363 inst.operands[i] describes the destination register. */ 5364 5365static int 5366move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3) 5367{ 5368 uint32_t tbit; 5369 5370 if (thumb_p) 5371 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT; 5372 else 5373 tbit = LOAD_BIT; 5374 5375 if ((inst.instruction & tbit) == 0) 5376 { 5377 inst.error = _("invalid pseudo operation"); 5378 return 1; 5379 } 5380 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol) 5381 { 5382 inst.error = _("constant expression expected"); 5383 return 1; 5384 } 5385 if (inst.reloc.exp.X_op == O_constant) 5386 { 5387 if (thumb_p) 5388 { 5389 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0) 5390 { 5391 /* This can be done with a mov(1) instruction. */ 5392 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8); 5393 inst.instruction |= inst.reloc.exp.X_add_number; 5394 return 1; 5395 } 5396 } 5397 else 5398 { 5399 int value = encode_arm_immediate (inst.reloc.exp.X_add_number); 5400 if (value != FAIL) 5401 { 5402 /* This can be done with a mov instruction. */ 5403 inst.instruction &= LITERAL_MASK; 5404 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT); 5405 inst.instruction |= value & 0xfff; 5406 return 1; 5407 } 5408 5409 value = encode_arm_immediate (~inst.reloc.exp.X_add_number); 5410 if (value != FAIL) 5411 { 5412 /* This can be done with a mvn instruction. */ 5413 inst.instruction &= LITERAL_MASK; 5414 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT); 5415 inst.instruction |= value & 0xfff; 5416 return 1; 5417 } 5418 } 5419 } 5420 5421 if (add_to_lit_pool () == FAIL) 5422 { 5423 inst.error = _("literal pool insertion failed"); 5424 return 1; 5425 } 5426 inst.operands[1].reg = REG_PC; 5427 inst.operands[1].isreg = 1; 5428 inst.operands[1].preind = 1; 5429 inst.reloc.pc_rel = 1; 5430 inst.reloc.type = (thumb_p 5431 ? BFD_RELOC_ARM_THUMB_OFFSET 5432 : (mode_3 5433 ? BFD_RELOC_ARM_HWLITERAL 5434 : BFD_RELOC_ARM_LITERAL)); 5435 return 0; 5436} 5437 5438/* Functions for instruction encoding, sorted by subarchitecture. 5439 First some generics; their names are taken from the conventional 5440 bit positions for register arguments in ARM format instructions. */ 5441 5442static void 5443do_noargs (void) 5444{ 5445} 5446 5447static void 5448do_rd (void) 5449{ 5450 inst.instruction |= inst.operands[0].reg << 12; 5451} 5452 5453static void 5454do_rd_rm (void) 5455{ 5456 inst.instruction |= inst.operands[0].reg << 12; 5457 inst.instruction |= inst.operands[1].reg; 5458} 5459 5460static void 5461do_rd_rn (void) 5462{ 5463 inst.instruction |= inst.operands[0].reg << 12; 5464 inst.instruction |= inst.operands[1].reg << 16; 5465} 5466 5467static void 5468do_rn_rd (void) 5469{ 5470 inst.instruction |= inst.operands[0].reg << 16; 5471 inst.instruction |= inst.operands[1].reg << 12; 5472} 5473 5474static void 5475do_rd_rm_rn (void) 5476{ 5477 unsigned Rn = inst.operands[2].reg; 5478 /* Enforce restrictions on SWP instruction. */ 5479 if ((inst.instruction & 0x0fbfffff) == 0x01000090) 5480 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg, 5481 _("Rn must not overlap other operands")); 5482 inst.instruction |= inst.operands[0].reg << 12; 5483 inst.instruction |= inst.operands[1].reg; 5484 inst.instruction |= Rn << 16; 5485} 5486 5487static void 5488do_rd_rn_rm (void) 5489{ 5490 inst.instruction |= inst.operands[0].reg << 12; 5491 inst.instruction |= inst.operands[1].reg << 16; 5492 inst.instruction |= inst.operands[2].reg; 5493} 5494 5495static void 5496do_rm_rd_rn (void) 5497{ 5498 inst.instruction |= inst.operands[0].reg; 5499 inst.instruction |= inst.operands[1].reg << 12; 5500 inst.instruction |= inst.operands[2].reg << 16; 5501} 5502 5503static void 5504do_imm0 (void) 5505{ 5506 inst.instruction |= inst.operands[0].imm; 5507} 5508 5509static void 5510do_rd_cpaddr (void) 5511{ 5512 inst.instruction |= inst.operands[0].reg << 12; 5513 encode_arm_cp_address (1, TRUE, TRUE, 0); 5514} 5515 5516/* ARM instructions, in alphabetical order by function name (except 5517 that wrapper functions appear immediately after the function they 5518 wrap). */ 5519 5520/* This is a pseudo-op of the form "adr rd, label" to be converted 5521 into a relative address of the form "add rd, pc, #label-.-8". */ 5522 5523static void 5524do_adr (void) 5525{ 5526 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */ 5527 5528 /* Frag hacking will turn this into a sub instruction if the offset turns 5529 out to be negative. */ 5530 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE; 5531 inst.reloc.pc_rel = 1; 5532 inst.reloc.exp.X_add_number -= 8; 5533} 5534 5535/* This is a pseudo-op of the form "adrl rd, label" to be converted 5536 into a relative address of the form: 5537 add rd, pc, #low(label-.-8)" 5538 add rd, rd, #high(label-.-8)" */ 5539 5540static void 5541do_adrl (void) 5542{ 5543 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */ 5544 5545 /* Frag hacking will turn this into a sub instruction if the offset turns 5546 out to be negative. */ 5547 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE; 5548 inst.reloc.pc_rel = 1; 5549 inst.size = INSN_SIZE * 2; 5550 inst.reloc.exp.X_add_number -= 8; 5551} 5552 5553static void 5554do_arit (void) 5555{ 5556 if (!inst.operands[1].present) 5557 inst.operands[1].reg = inst.operands[0].reg; 5558 inst.instruction |= inst.operands[0].reg << 12; 5559 inst.instruction |= inst.operands[1].reg << 16; 5560 encode_arm_shifter_operand (2); 5561} 5562 5563static void 5564do_barrier (void) 5565{ 5566 if (inst.operands[0].present) 5567 { 5568 constraint ((inst.instruction & 0xf0) == 0x60 5569 && inst.operands[0].imm != 0xf, 5570 "bad barrier type"); 5571 inst.instruction |= inst.operands[0].imm; 5572 } 5573 else 5574 inst.instruction |= 0xf; 5575} 5576 5577static void 5578do_bfc (void) 5579{ 5580 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm; 5581 constraint (msb > 32, _("bit-field extends past end of register")); 5582 /* The instruction encoding stores the LSB and MSB, 5583 not the LSB and width. */ 5584 inst.instruction |= inst.operands[0].reg << 12; 5585 inst.instruction |= inst.operands[1].imm << 7; 5586 inst.instruction |= (msb - 1) << 16; 5587} 5588 5589static void 5590do_bfi (void) 5591{ 5592 unsigned int msb; 5593 5594 /* #0 in second position is alternative syntax for bfc, which is 5595 the same instruction but with REG_PC in the Rm field. */ 5596 if (!inst.operands[1].isreg) 5597 inst.operands[1].reg = REG_PC; 5598 5599 msb = inst.operands[2].imm + inst.operands[3].imm; 5600 constraint (msb > 32, _("bit-field extends past end of register")); 5601 /* The instruction encoding stores the LSB and MSB, 5602 not the LSB and width. */ 5603 inst.instruction |= inst.operands[0].reg << 12; 5604 inst.instruction |= inst.operands[1].reg; 5605 inst.instruction |= inst.operands[2].imm << 7; 5606 inst.instruction |= (msb - 1) << 16; 5607} 5608 5609static void 5610do_bfx (void) 5611{ 5612 constraint (inst.operands[2].imm + inst.operands[3].imm > 32, 5613 _("bit-field extends past end of register")); 5614 inst.instruction |= inst.operands[0].reg << 12; 5615 inst.instruction |= inst.operands[1].reg; 5616 inst.instruction |= inst.operands[2].imm << 7; 5617 inst.instruction |= (inst.operands[3].imm - 1) << 16; 5618} 5619 5620/* ARM V5 breakpoint instruction (argument parse) 5621 BKPT <16 bit unsigned immediate> 5622 Instruction is not conditional. 5623 The bit pattern given in insns[] has the COND_ALWAYS condition, 5624 and it is an error if the caller tried to override that. */ 5625 5626static void 5627do_bkpt (void) 5628{ 5629 /* Top 12 of 16 bits to bits 19:8. */ 5630 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4; 5631 5632 /* Bottom 4 of 16 bits to bits 3:0. */ 5633 inst.instruction |= inst.operands[0].imm & 0xf; 5634} 5635 5636static void 5637encode_branch (int default_reloc) 5638{ 5639 if (inst.operands[0].hasreloc) 5640 { 5641 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32, 5642 _("the only suffix valid here is '(plt)'")); 5643 inst.reloc.type = BFD_RELOC_ARM_PLT32; 5644 } 5645 else 5646 { 5647 inst.reloc.type = default_reloc; 5648 } 5649 inst.reloc.pc_rel = 1; 5650} 5651 5652static void 5653do_branch (void) 5654{ 5655#ifdef OBJ_ELF 5656 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) 5657 encode_branch (BFD_RELOC_ARM_PCREL_JUMP); 5658 else 5659#endif 5660 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH); 5661} 5662 5663static void 5664do_bl (void) 5665{ 5666#ifdef OBJ_ELF 5667 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) 5668 { 5669 if (inst.cond == COND_ALWAYS) 5670 encode_branch (BFD_RELOC_ARM_PCREL_CALL); 5671 else 5672 encode_branch (BFD_RELOC_ARM_PCREL_JUMP); 5673 } 5674 else 5675#endif 5676 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH); 5677} 5678 5679/* ARM V5 branch-link-exchange instruction (argument parse) 5680 BLX <target_addr> ie BLX(1) 5681 BLX{<condition>} <Rm> ie BLX(2) 5682 Unfortunately, there are two different opcodes for this mnemonic. 5683 So, the insns[].value is not used, and the code here zaps values 5684 into inst.instruction. 5685 Also, the <target_addr> can be 25 bits, hence has its own reloc. */ 5686 5687static void 5688do_blx (void) 5689{ 5690 if (inst.operands[0].isreg) 5691 { 5692 /* Arg is a register; the opcode provided by insns[] is correct. 5693 It is not illegal to do "blx pc", just useless. */ 5694 if (inst.operands[0].reg == REG_PC) 5695 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful")); 5696 5697 inst.instruction |= inst.operands[0].reg; 5698 } 5699 else 5700 { 5701 /* Arg is an address; this instruction cannot be executed 5702 conditionally, and the opcode must be adjusted. */ 5703 constraint (inst.cond != COND_ALWAYS, BAD_COND); 5704 inst.instruction = 0xfa000000; 5705#ifdef OBJ_ELF 5706 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) 5707 encode_branch (BFD_RELOC_ARM_PCREL_CALL); 5708 else 5709#endif 5710 encode_branch (BFD_RELOC_ARM_PCREL_BLX); 5711 } 5712} 5713 5714static void 5715do_bx (void) 5716{ 5717 if (inst.operands[0].reg == REG_PC) 5718 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful")); 5719 5720 inst.instruction |= inst.operands[0].reg; 5721} 5722 5723 5724/* ARM v5TEJ. Jump to Jazelle code. */ 5725 5726static void 5727do_bxj (void) 5728{ 5729 if (inst.operands[0].reg == REG_PC) 5730 as_tsktsk (_("use of r15 in bxj is not really useful")); 5731 5732 inst.instruction |= inst.operands[0].reg; 5733} 5734 5735/* Co-processor data operation: 5736 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} 5737 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */ 5738static void 5739do_cdp (void) 5740{ 5741 inst.instruction |= inst.operands[0].reg << 8; 5742 inst.instruction |= inst.operands[1].imm << 20; 5743 inst.instruction |= inst.operands[2].reg << 12; 5744 inst.instruction |= inst.operands[3].reg << 16; 5745 inst.instruction |= inst.operands[4].reg; 5746 inst.instruction |= inst.operands[5].imm << 5; 5747} 5748 5749static void 5750do_cmp (void) 5751{ 5752 inst.instruction |= inst.operands[0].reg << 16; 5753 encode_arm_shifter_operand (1); 5754} 5755 5756/* Transfer between coprocessor and ARM registers. 5757 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>} 5758 MRC2 5759 MCR{cond} 5760 MCR2 5761 5762 No special properties. */ 5763 5764static void 5765do_co_reg (void) 5766{ 5767 inst.instruction |= inst.operands[0].reg << 8; 5768 inst.instruction |= inst.operands[1].imm << 21; 5769 inst.instruction |= inst.operands[2].reg << 12; 5770 inst.instruction |= inst.operands[3].reg << 16; 5771 inst.instruction |= inst.operands[4].reg; 5772 inst.instruction |= inst.operands[5].imm << 5; 5773} 5774 5775/* Transfer between coprocessor register and pair of ARM registers. 5776 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>. 5777 MCRR2 5778 MRRC{cond} 5779 MRRC2 5780 5781 Two XScale instructions are special cases of these: 5782 5783 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0 5784 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0 5785 5786 Result unpredicatable if Rd or Rn is R15. */ 5787 5788static void 5789do_co_reg2c (void) 5790{ 5791 inst.instruction |= inst.operands[0].reg << 8; 5792 inst.instruction |= inst.operands[1].imm << 4; 5793 inst.instruction |= inst.operands[2].reg << 12; 5794 inst.instruction |= inst.operands[3].reg << 16; 5795 inst.instruction |= inst.operands[4].reg; 5796} 5797 5798static void 5799do_cpsi (void) 5800{ 5801 inst.instruction |= inst.operands[0].imm << 6; 5802 if (inst.operands[1].present) 5803 { 5804 inst.instruction |= CPSI_MMOD; 5805 inst.instruction |= inst.operands[1].imm; 5806 } 5807} 5808 5809static void 5810do_dbg (void) 5811{ 5812 inst.instruction |= inst.operands[0].imm; 5813} 5814 5815static void 5816do_div (void) 5817{ 5818 if (!inst.operands[1].present) 5819 inst.operands[1].reg = inst.operands[0].reg; 5820 inst.instruction |= inst.operands[0].reg << 16; 5821 inst.instruction |= inst.operands[1].reg; 5822 inst.instruction |= inst.operands[2].reg << 8; 5823} 5824 5825static void 5826do_it (void) 5827{ 5828 /* There is no IT instruction in ARM mode. We 5829 process it but do not generate code for it. */ 5830 inst.size = 0; 5831} 5832 5833static void 5834do_ldmstm (void) 5835{ 5836 int base_reg = inst.operands[0].reg; 5837 int range = inst.operands[1].imm; 5838 5839 inst.instruction |= base_reg << 16; 5840 inst.instruction |= range; 5841 5842 if (inst.operands[1].writeback) 5843 inst.instruction |= LDM_TYPE_2_OR_3; 5844 5845 if (inst.operands[0].writeback) 5846 { 5847 inst.instruction |= WRITE_BACK; 5848 /* Check for unpredictable uses of writeback. */ 5849 if (inst.instruction & LOAD_BIT) 5850 { 5851 /* Not allowed in LDM type 2. */ 5852 if ((inst.instruction & LDM_TYPE_2_OR_3) 5853 && ((range & (1 << REG_PC)) == 0)) 5854 as_warn (_("writeback of base register is UNPREDICTABLE")); 5855 /* Only allowed if base reg not in list for other types. */ 5856 else if (range & (1 << base_reg)) 5857 as_warn (_("writeback of base register when in register list is UNPREDICTABLE")); 5858 } 5859 else /* STM. */ 5860 { 5861 /* Not allowed for type 2. */ 5862 if (inst.instruction & LDM_TYPE_2_OR_3) 5863 as_warn (_("writeback of base register is UNPREDICTABLE")); 5864 /* Only allowed if base reg not in list, or first in list. */ 5865 else if ((range & (1 << base_reg)) 5866 && (range & ((1 << base_reg) - 1))) 5867 as_warn (_("if writeback register is in list, it must be the lowest reg in the list")); 5868 } 5869 } 5870} 5871 5872/* ARMv5TE load-consecutive (argument parse) 5873 Mode is like LDRH. 5874 5875 LDRccD R, mode 5876 STRccD R, mode. */ 5877 5878static void 5879do_ldrd (void) 5880{ 5881 constraint (inst.operands[0].reg % 2 != 0, 5882 _("first destination register must be even")); 5883 constraint (inst.operands[1].present 5884 && inst.operands[1].reg != inst.operands[0].reg + 1, 5885 _("can only load two consecutive registers")); 5886 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here")); 5887 constraint (!inst.operands[2].isreg, _("'[' expected")); 5888 5889 if (!inst.operands[1].present) 5890 inst.operands[1].reg = inst.operands[0].reg + 1; 5891 5892 if (inst.instruction & LOAD_BIT) 5893 { 5894 /* encode_arm_addr_mode_3 will diagnose overlap between the base 5895 register and the first register written; we have to diagnose 5896 overlap between the base and the second register written here. */ 5897 5898 if (inst.operands[2].reg == inst.operands[1].reg 5899 && (inst.operands[2].writeback || inst.operands[2].postind)) 5900 as_warn (_("base register written back, and overlaps " 5901 "second destination register")); 5902 5903 /* For an index-register load, the index register must not overlap the 5904 destination (even if not write-back). */ 5905 else if (inst.operands[2].immisreg 5906 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg 5907 || (unsigned) inst.operands[2].imm == inst.operands[1].reg)) 5908 as_warn (_("index register overlaps destination register")); 5909 } 5910 5911 inst.instruction |= inst.operands[0].reg << 12; 5912 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE); 5913} 5914 5915static void 5916do_ldrex (void) 5917{ 5918 constraint (!inst.operands[1].isreg || !inst.operands[1].preind 5919 || inst.operands[1].postind || inst.operands[1].writeback 5920 || inst.operands[1].immisreg || inst.operands[1].shifted 5921 || inst.operands[1].negative 5922 /* This can arise if the programmer has written 5923 strex rN, rM, foo 5924 or if they have mistakenly used a register name as the last 5925 operand, eg: 5926 strex rN, rM, rX 5927 It is very difficult to distinguish between these two cases 5928 because "rX" might actually be a label. ie the register 5929 name has been occluded by a symbol of the same name. So we 5930 just generate a general 'bad addressing mode' type error 5931 message and leave it up to the programmer to discover the 5932 true cause and fix their mistake. */ 5933 || (inst.operands[1].reg == REG_PC), 5934 BAD_ADDR_MODE); 5935 5936 constraint (inst.reloc.exp.X_op != O_constant 5937 || inst.reloc.exp.X_add_number != 0, 5938 _("offset must be zero in ARM encoding")); 5939 5940 inst.instruction |= inst.operands[0].reg << 12; 5941 inst.instruction |= inst.operands[1].reg << 16; 5942 inst.reloc.type = BFD_RELOC_UNUSED; 5943} 5944 5945static void 5946do_ldrexd (void) 5947{ 5948 constraint (inst.operands[0].reg % 2 != 0, 5949 _("even register required")); 5950 constraint (inst.operands[1].present 5951 && inst.operands[1].reg != inst.operands[0].reg + 1, 5952 _("can only load two consecutive registers")); 5953 /* If op 1 were present and equal to PC, this function wouldn't 5954 have been called in the first place. */ 5955 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here")); 5956 5957 inst.instruction |= inst.operands[0].reg << 12; 5958 inst.instruction |= inst.operands[2].reg << 16; 5959} 5960 5961static void 5962do_ldst (void) 5963{ 5964 inst.instruction |= inst.operands[0].reg << 12; 5965 if (!inst.operands[1].isreg) 5966 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE)) 5967 return; 5968 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE); 5969} 5970 5971static void 5972do_ldstt (void) 5973{ 5974 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and 5975 reject [Rn,...]. */ 5976 if (inst.operands[1].preind) 5977 { 5978 constraint (inst.reloc.exp.X_op != O_constant || 5979 inst.reloc.exp.X_add_number != 0, 5980 _("this instruction requires a post-indexed address")); 5981 5982 inst.operands[1].preind = 0; 5983 inst.operands[1].postind = 1; 5984 inst.operands[1].writeback = 1; 5985 } 5986 inst.instruction |= inst.operands[0].reg << 12; 5987 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE); 5988} 5989 5990/* Halfword and signed-byte load/store operations. */ 5991 5992static void 5993do_ldstv4 (void) 5994{ 5995 inst.instruction |= inst.operands[0].reg << 12; 5996 if (!inst.operands[1].isreg) 5997 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE)) 5998 return; 5999 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE); 6000} 6001 6002static void 6003do_ldsttv4 (void) 6004{ 6005 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and 6006 reject [Rn,...]. */ 6007 if (inst.operands[1].preind) 6008 { 6009 constraint (inst.reloc.exp.X_op != O_constant || 6010 inst.reloc.exp.X_add_number != 0, 6011 _("this instruction requires a post-indexed address")); 6012 6013 inst.operands[1].preind = 0; 6014 inst.operands[1].postind = 1; 6015 inst.operands[1].writeback = 1; 6016 } 6017 inst.instruction |= inst.operands[0].reg << 12; 6018 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE); 6019} 6020 6021/* Co-processor register load/store. 6022 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */ 6023static void 6024do_lstc (void) 6025{ 6026 inst.instruction |= inst.operands[0].reg << 8; 6027 inst.instruction |= inst.operands[1].reg << 12; 6028 encode_arm_cp_address (2, TRUE, TRUE, 0); 6029} 6030 6031static void 6032do_mlas (void) 6033{ 6034 /* This restriction does not apply to mls (nor to mla in v6 or later). */ 6035 /* Only restrict on pre-V4 architectures - radar 4474226 */ 6036 if (inst.operands[0].reg == inst.operands[1].reg 6037 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4) 6038 && !force_cpusubtype_ALL) 6039 as_tsktsk (_("rd and rm should be different in mla")); 6040 6041 inst.instruction |= inst.operands[0].reg << 16; 6042 inst.instruction |= inst.operands[1].reg; 6043 inst.instruction |= inst.operands[2].reg << 8; 6044 inst.instruction |= inst.operands[3].reg << 12; 6045} 6046 6047static void 6048do_mov (void) 6049{ 6050 inst.instruction |= inst.operands[0].reg << 12; 6051 encode_arm_shifter_operand (1); 6052} 6053 6054/* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */ 6055static void 6056do_mov16 (void) 6057{ 6058 bfd_vma imm; 6059 bfd_boolean top; 6060 6061 top = (inst.instruction & 0x00400000) != 0; 6062 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW, 6063 _(":lower16: not allowed this instruction")); 6064 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT, 6065 _(":upper16: not allowed instruction")); 6066 inst.instruction |= inst.operands[0].reg << 12; 6067 if (inst.reloc.type == BFD_RELOC_UNUSED) 6068 { 6069 imm = inst.reloc.exp.X_add_number; 6070 /* The value is in two pieces: 0:11, 16:19. */ 6071 inst.instruction |= (imm & 0x00000fff); 6072 inst.instruction |= (imm & 0x0000f000) << 4; 6073 } 6074} 6075 6076static void do_vfp_nsyn_opcode (const char *); 6077 6078static int 6079do_vfp_nsyn_mrs (void) 6080{ 6081 if (inst.operands[0].isvec) 6082 { 6083 if (inst.operands[1].reg != 1) 6084 first_error (_("operand 1 must be FPSCR")); 6085 memset (&inst.operands[0], '\0', sizeof (inst.operands[0])); 6086 memset (&inst.operands[1], '\0', sizeof (inst.operands[1])); 6087 do_vfp_nsyn_opcode ("fmstat"); 6088 } 6089 else if (inst.operands[1].isvec) 6090 do_vfp_nsyn_opcode ("fmrx"); 6091 else 6092 return FAIL; 6093 6094 return SUCCESS; 6095} 6096 6097static int 6098do_vfp_nsyn_msr (void) 6099{ 6100 if (inst.operands[0].isvec) 6101 do_vfp_nsyn_opcode ("fmxr"); 6102 else 6103 return FAIL; 6104 6105 return SUCCESS; 6106} 6107 6108static void 6109do_mrs (void) 6110{ 6111 if (do_vfp_nsyn_mrs () == SUCCESS) 6112 return; 6113 6114 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */ 6115 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f)) 6116 != (PSR_c|PSR_f), 6117 _("'CPSR' or 'SPSR' expected")); 6118 inst.instruction |= inst.operands[0].reg << 12; 6119 inst.instruction |= (inst.operands[1].imm & SPSR_BIT); 6120} 6121 6122static void 6123do_vmrs (void) 6124{ 6125 if (inst.operands[0].isvec) 6126 { 6127 if (inst.operands[1].reg != 1) 6128 first_error (_("operand 1 must be FPSCR")); 6129 memset (&inst.operands[0], '\0', sizeof (inst.operands[0])); 6130 memset (&inst.operands[1], '\0', sizeof (inst.operands[1])); 6131 do_vfp_nsyn_opcode ("fmstat"); 6132 } 6133 else 6134 do_rd_rn(); 6135} 6136 6137/* Two possible forms: 6138 "{C|S}PSR_<field>, Rm", 6139 "{C|S}PSR_f, #expression". */ 6140 6141static void 6142do_msr (void) 6143{ 6144 if (do_vfp_nsyn_msr () == SUCCESS) 6145 return; 6146 6147 inst.instruction |= inst.operands[0].imm; 6148 if (inst.operands[1].isreg) 6149 inst.instruction |= inst.operands[1].reg; 6150 else 6151 { 6152 inst.instruction |= INST_IMMEDIATE; 6153 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE; 6154 inst.reloc.pc_rel = 0; 6155 } 6156} 6157 6158static void 6159do_mul (void) 6160{ 6161 if (!inst.operands[2].present) 6162 inst.operands[2].reg = inst.operands[0].reg; 6163 inst.instruction |= inst.operands[0].reg << 16; 6164 inst.instruction |= inst.operands[1].reg; 6165 inst.instruction |= inst.operands[2].reg << 8; 6166 6167 /* Only restrict on pre-V4 architectures - radar 4474226 */ 6168 if (inst.operands[0].reg == inst.operands[1].reg 6169 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4) 6170 && !force_cpusubtype_ALL) 6171 as_tsktsk (_("Rd and Rm should be different in mul")); 6172} 6173 6174/* Long Multiply Parser 6175 UMULL RdLo, RdHi, Rm, Rs 6176 SMULL RdLo, RdHi, Rm, Rs 6177 UMLAL RdLo, RdHi, Rm, Rs 6178 SMLAL RdLo, RdHi, Rm, Rs. */ 6179 6180static void 6181do_mull (void) 6182{ 6183 inst.instruction |= inst.operands[0].reg << 12; 6184 inst.instruction |= inst.operands[1].reg << 16; 6185 inst.instruction |= inst.operands[2].reg; 6186 inst.instruction |= inst.operands[3].reg << 8; 6187 6188 /* Only restrict rm on pre-V4 architectures - radar 4474226 */ 6189 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4) 6190 || force_cpusubtype_ALL) 6191 { 6192 if (inst.operands[0].reg == inst.operands[1].reg) 6193 as_tsktsk (_("rdhi and rdlo must be different")); 6194 } 6195 else 6196 { 6197 /* rdhi, rdlo and rm must all be different. */ 6198 if (inst.operands[0].reg == inst.operands[1].reg 6199 || inst.operands[0].reg == inst.operands[2].reg 6200 || inst.operands[1].reg == inst.operands[2].reg) 6201 as_tsktsk (_("rdhi, rdlo and rm must all be different")); 6202 } 6203} 6204 6205static void 6206do_nop (void) 6207{ 6208 if (inst.operands[0].present) 6209 { 6210 /* Architectural NOP hints are CPSR sets with no bits selected. */ 6211 inst.instruction &= 0xf0000000; 6212 inst.instruction |= 0x0320f000 + inst.operands[0].imm; 6213 } 6214} 6215 6216/* ARM V6 Pack Halfword Bottom Top instruction (argument parse). 6217 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>} 6218 Condition defaults to COND_ALWAYS. 6219 Error if Rd, Rn or Rm are R15. */ 6220 6221static void 6222do_pkhbt (void) 6223{ 6224 inst.instruction |= inst.operands[0].reg << 12; 6225 inst.instruction |= inst.operands[1].reg << 16; 6226 inst.instruction |= inst.operands[2].reg; 6227 if (inst.operands[3].present) 6228 encode_arm_shift (3); 6229} 6230 6231/* ARM V6 PKHTB (Argument Parse). */ 6232 6233static void 6234do_pkhtb (void) 6235{ 6236 if (!inst.operands[3].present) 6237 { 6238 /* If the shift specifier is omitted, turn the instruction 6239 into pkhbt rd, rm, rn. */ 6240 inst.instruction &= 0xfff00010; 6241 inst.instruction |= inst.operands[0].reg << 12; 6242 inst.instruction |= inst.operands[1].reg; 6243 inst.instruction |= inst.operands[2].reg << 16; 6244 } 6245 else 6246 { 6247 inst.instruction |= inst.operands[0].reg << 12; 6248 inst.instruction |= inst.operands[1].reg << 16; 6249 inst.instruction |= inst.operands[2].reg; 6250 encode_arm_shift (3); 6251 } 6252} 6253 6254/* ARMv5TE: Preload-Cache 6255 6256 PLD <addr_mode> 6257 6258 Syntactically, like LDR with B=1, W=0, L=1. */ 6259 6260static void 6261do_pld (void) 6262{ 6263 constraint (!inst.operands[0].isreg, 6264 _("'[' expected after PLD mnemonic")); 6265 constraint (inst.operands[0].postind, 6266 _("post-indexed expression used in preload instruction")); 6267 constraint (inst.operands[0].writeback, 6268 _("writeback used in preload instruction")); 6269 constraint (!inst.operands[0].preind, 6270 _("unindexed addressing used in preload instruction")); 6271 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE); 6272} 6273 6274/* ARMv7: PLI <addr_mode> */ 6275static void 6276do_pli (void) 6277{ 6278 constraint (!inst.operands[0].isreg, 6279 _("'[' expected after PLI mnemonic")); 6280 constraint (inst.operands[0].postind, 6281 _("post-indexed expression used in preload instruction")); 6282 constraint (inst.operands[0].writeback, 6283 _("writeback used in preload instruction")); 6284 constraint (!inst.operands[0].preind, 6285 _("unindexed addressing used in preload instruction")); 6286 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE); 6287 inst.instruction &= ~PRE_INDEX; 6288} 6289 6290static void 6291do_push_pop (void) 6292{ 6293 inst.operands[1] = inst.operands[0]; 6294 memset (&inst.operands[0], 0, sizeof inst.operands[0]); 6295 inst.operands[0].isreg = 1; 6296 inst.operands[0].writeback = 1; 6297 inst.operands[0].reg = REG_SP; 6298 do_ldmstm (); 6299} 6300 6301/* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the 6302 word at the specified address and the following word 6303 respectively. 6304 Unconditionally executed. 6305 Error if Rn is R15. */ 6306 6307static void 6308do_rfe (void) 6309{ 6310 inst.instruction |= inst.operands[0].reg << 16; 6311 if (inst.operands[0].writeback) 6312 inst.instruction |= WRITE_BACK; 6313} 6314 6315/* ARM V6 ssat (argument parse). */ 6316 6317static void 6318do_ssat (void) 6319{ 6320 inst.instruction |= inst.operands[0].reg << 12; 6321 inst.instruction |= (inst.operands[1].imm - 1) << 16; 6322 inst.instruction |= inst.operands[2].reg; 6323 6324 if (inst.operands[3].present) 6325 encode_arm_shift (3); 6326} 6327 6328/* ARM V6 usat (argument parse). */ 6329 6330static void 6331do_usat (void) 6332{ 6333 inst.instruction |= inst.operands[0].reg << 12; 6334 inst.instruction |= inst.operands[1].imm << 16; 6335 inst.instruction |= inst.operands[2].reg; 6336 6337 if (inst.operands[3].present) 6338 encode_arm_shift (3); 6339} 6340 6341/* ARM V6 ssat16 (argument parse). */ 6342 6343static void 6344do_ssat16 (void) 6345{ 6346 inst.instruction |= inst.operands[0].reg << 12; 6347 inst.instruction |= ((inst.operands[1].imm - 1) << 16); 6348 inst.instruction |= inst.operands[2].reg; 6349} 6350 6351static void 6352do_usat16 (void) 6353{ 6354 inst.instruction |= inst.operands[0].reg << 12; 6355 inst.instruction |= inst.operands[1].imm << 16; 6356 inst.instruction |= inst.operands[2].reg; 6357} 6358 6359/* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while 6360 preserving the other bits. 6361 6362 setend <endian_specifier>, where <endian_specifier> is either 6363 BE or LE. */ 6364 6365static void 6366do_setend (void) 6367{ 6368 if (inst.operands[0].imm) 6369 inst.instruction |= 0x200; 6370} 6371 6372static void 6373do_shift (void) 6374{ 6375 unsigned int Rm = (inst.operands[1].present 6376 ? inst.operands[1].reg 6377 : inst.operands[0].reg); 6378 6379 inst.instruction |= inst.operands[0].reg << 12; 6380 inst.instruction |= Rm; 6381 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */ 6382 { 6383 inst.instruction |= inst.operands[2].reg << 8; 6384 inst.instruction |= SHIFT_BY_REG; 6385 } 6386 else 6387 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM; 6388} 6389 6390static void 6391do_smc (void) 6392{ 6393 inst.reloc.type = BFD_RELOC_ARM_SMC; 6394 inst.reloc.pc_rel = 0; 6395} 6396 6397static void 6398do_swi (void) 6399{ 6400 inst.reloc.type = BFD_RELOC_ARM_SWI; 6401 inst.reloc.pc_rel = 0; 6402} 6403 6404/* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse) 6405 SMLAxy{cond} Rd,Rm,Rs,Rn 6406 SMLAWy{cond} Rd,Rm,Rs,Rn 6407 Error if any register is R15. */ 6408 6409static void 6410do_smla (void) 6411{ 6412 inst.instruction |= inst.operands[0].reg << 16; 6413 inst.instruction |= inst.operands[1].reg; 6414 inst.instruction |= inst.operands[2].reg << 8; 6415 inst.instruction |= inst.operands[3].reg << 12; 6416} 6417 6418/* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse) 6419 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs 6420 Error if any register is R15. 6421 Warning if Rdlo == Rdhi. */ 6422 6423static void 6424do_smlal (void) 6425{ 6426 inst.instruction |= inst.operands[0].reg << 12; 6427 inst.instruction |= inst.operands[1].reg << 16; 6428 inst.instruction |= inst.operands[2].reg; 6429 inst.instruction |= inst.operands[3].reg << 8; 6430 6431 if (inst.operands[0].reg == inst.operands[1].reg) 6432 as_tsktsk (_("rdhi and rdlo must be different")); 6433} 6434 6435/* ARM V5E (El Segundo) signed-multiply (argument parse) 6436 SMULxy{cond} Rd,Rm,Rs 6437 Error if any register is R15. */ 6438 6439static void 6440do_smul (void) 6441{ 6442 inst.instruction |= inst.operands[0].reg << 16; 6443 inst.instruction |= inst.operands[1].reg; 6444 inst.instruction |= inst.operands[2].reg << 8; 6445} 6446 6447/* ARM V6 srs (argument parse). The variable fields in the encoding are 6448 the same for both ARM and Thumb-2. */ 6449 6450static void 6451do_srs (void) 6452{ 6453 int reg; 6454 6455 if (inst.operands[0].present) 6456 { 6457 reg = inst.operands[0].reg; 6458 constraint (reg != 13, _("SRS base register must be r13")); 6459 } 6460 else 6461 reg = 13; 6462 6463 inst.instruction |= reg << 16; 6464 inst.instruction |= inst.operands[1].imm; 6465 if (inst.operands[0].writeback || inst.operands[1].writeback) 6466 inst.instruction |= WRITE_BACK; 6467} 6468 6469/* ARM V6 strex (argument parse). */ 6470 6471static void 6472do_strex (void) 6473{ 6474 constraint (!inst.operands[2].isreg || !inst.operands[2].preind 6475 || inst.operands[2].postind || inst.operands[2].writeback 6476 || inst.operands[2].immisreg || inst.operands[2].shifted 6477 || inst.operands[2].negative 6478 /* See comment in do_ldrex(). */ 6479 || (inst.operands[2].reg == REG_PC), 6480 BAD_ADDR_MODE); 6481 6482 constraint (inst.operands[0].reg == inst.operands[1].reg 6483 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP); 6484 6485 constraint (inst.reloc.exp.X_op != O_constant 6486 || inst.reloc.exp.X_add_number != 0, 6487 _("offset must be zero in ARM encoding")); 6488 6489 inst.instruction |= inst.operands[0].reg << 12; 6490 inst.instruction |= inst.operands[1].reg; 6491 inst.instruction |= inst.operands[2].reg << 16; 6492 inst.reloc.type = BFD_RELOC_UNUSED; 6493} 6494 6495static void 6496do_strexd (void) 6497{ 6498 constraint (inst.operands[1].reg % 2 != 0, 6499 _("even register required")); 6500 constraint (inst.operands[2].present 6501 && inst.operands[2].reg != inst.operands[1].reg + 1, 6502 _("can only store two consecutive registers")); 6503 /* If op 2 were present and equal to PC, this function wouldn't 6504 have been called in the first place. */ 6505 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here")); 6506 6507 constraint (inst.operands[0].reg == inst.operands[1].reg 6508 || inst.operands[0].reg == inst.operands[1].reg + 1 6509 || inst.operands[0].reg == inst.operands[3].reg, 6510 BAD_OVERLAP); 6511 6512 inst.instruction |= inst.operands[0].reg << 12; 6513 inst.instruction |= inst.operands[1].reg; 6514 inst.instruction |= inst.operands[3].reg << 16; 6515} 6516 6517/* ARM V6 SXTAH extracts a 16-bit value from a register, sign 6518 extends it to 32-bits, and adds the result to a value in another 6519 register. You can specify a rotation by 0, 8, 16, or 24 bits 6520 before extracting the 16-bit value. 6521 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>} 6522 Condition defaults to COND_ALWAYS. 6523 Error if any register uses R15. */ 6524 6525static void 6526do_sxtah (void) 6527{ 6528 inst.instruction |= inst.operands[0].reg << 12; 6529 inst.instruction |= inst.operands[1].reg << 16; 6530 inst.instruction |= inst.operands[2].reg; 6531 inst.instruction |= inst.operands[3].imm << 10; 6532} 6533 6534/* ARM V6 SXTH. 6535 6536 SXTH {<cond>} <Rd>, <Rm>{, <rotation>} 6537 Condition defaults to COND_ALWAYS. 6538 Error if any register uses R15. */ 6539 6540static void 6541do_sxth (void) 6542{ 6543 inst.instruction |= inst.operands[0].reg << 12; 6544 inst.instruction |= inst.operands[1].reg; 6545 inst.instruction |= inst.operands[2].imm << 10; 6546} 6547 6548/* VFP instructions. In a logical order: SP variant first, monad 6549 before dyad, arithmetic then move then load/store. */ 6550 6551static void 6552do_vfp_sp_monadic (void) 6553{ 6554 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 6555 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm); 6556} 6557 6558static void 6559do_vfp_sp_dyadic (void) 6560{ 6561 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 6562 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn); 6563 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm); 6564} 6565 6566static void 6567do_vfp_sp_compare_z (void) 6568{ 6569 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 6570} 6571 6572static void 6573do_vfp_dp_sp_cvt (void) 6574{ 6575 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 6576 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm); 6577} 6578 6579static void 6580do_vfp_sp_dp_cvt (void) 6581{ 6582 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 6583 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm); 6584} 6585 6586static void 6587do_vfp_sp_hp_cvt (void) 6588{ 6589 if (thumb_mode) 6590 inst.instruction |= 0xff000000; 6591 else 6592 { 6593 inst.instruction |= 0xf3000000; 6594 if(inst.cond != COND_ALWAYS) 6595 { 6596 /* delayed diagnostic */ 6597 inst.error = BAD_COND; 6598 inst.cond = COND_ALWAYS; 6599 } 6600 } 6601 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 6602 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm); 6603} 6604 6605static void 6606do_vfp_hp_sp_cvt (void) 6607{ 6608 if (thumb_mode) 6609 inst.instruction |= 0xff000000; 6610 else 6611 { 6612 inst.instruction |= 0xf3000000; 6613 if(inst.cond != COND_ALWAYS) 6614 { 6615 /* delayed diagnostic */ 6616 inst.error = BAD_COND; 6617 inst.cond = COND_ALWAYS; 6618 } 6619 } 6620 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 6621 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm); 6622} 6623 6624static void 6625do_vfp_t_sp_hp_cvt (void) 6626{ 6627 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 6628 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm); 6629} 6630 6631static void 6632do_vfp_b_sp_hp_cvt (void) 6633{ 6634 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 6635 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm); 6636} 6637 6638static void 6639do_vfp_t_hp_sp_cvt (void) 6640{ 6641 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 6642 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm); 6643} 6644 6645static void 6646do_vfp_b_hp_sp_cvt (void) 6647{ 6648 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 6649 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm); 6650} 6651 6652static void 6653do_vfp_reg_from_sp (void) 6654{ 6655 inst.instruction |= inst.operands[0].reg << 12; 6656 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn); 6657} 6658 6659static void 6660do_vfp_reg2_from_sp2 (void) 6661{ 6662 constraint (inst.operands[2].imm != 2, 6663 _("only two consecutive VFP SP registers allowed here")); 6664 inst.instruction |= inst.operands[0].reg << 12; 6665 inst.instruction |= inst.operands[1].reg << 16; 6666 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm); 6667} 6668 6669static void 6670do_vfp_sp_from_reg (void) 6671{ 6672 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn); 6673 inst.instruction |= inst.operands[1].reg << 12; 6674} 6675 6676static void 6677do_vfp_sp2_from_reg2 (void) 6678{ 6679 constraint (inst.operands[0].imm != 2, 6680 _("only two consecutive VFP SP registers allowed here")); 6681 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm); 6682 inst.instruction |= inst.operands[1].reg << 12; 6683 inst.instruction |= inst.operands[2].reg << 16; 6684} 6685 6686static void 6687do_vfp_sp_ldst (void) 6688{ 6689 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 6690 encode_arm_cp_address (1, FALSE, TRUE, 0); 6691} 6692 6693static void 6694do_vfp_dp_ldst (void) 6695{ 6696 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 6697 encode_arm_cp_address (1, FALSE, TRUE, 0); 6698} 6699 6700 6701static void 6702vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type) 6703{ 6704 if (inst.operands[0].writeback) 6705 inst.instruction |= WRITE_BACK; 6706 else 6707 constraint (ldstm_type != VFP_LDSTMIA, 6708 _("this addressing mode requires base-register writeback")); 6709 inst.instruction |= inst.operands[0].reg << 16; 6710 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd); 6711 inst.instruction |= inst.operands[1].imm; 6712} 6713 6714static void 6715vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type) 6716{ 6717 int count; 6718 6719 if (inst.operands[0].writeback) 6720 inst.instruction |= WRITE_BACK; 6721 else 6722 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX, 6723 _("this addressing mode requires base-register writeback")); 6724 6725 inst.instruction |= inst.operands[0].reg << 16; 6726 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd); 6727 6728 count = inst.operands[1].imm << 1; 6729 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX) 6730 count += 1; 6731 6732 inst.instruction |= count; 6733} 6734 6735static void 6736do_vfp_sp_ldstmia (void) 6737{ 6738 vfp_sp_ldstm (VFP_LDSTMIA); 6739} 6740 6741static void 6742do_vfp_sp_ldstmdb (void) 6743{ 6744 vfp_sp_ldstm (VFP_LDSTMDB); 6745} 6746 6747static void 6748do_vfp_dp_ldstmia (void) 6749{ 6750 vfp_dp_ldstm (VFP_LDSTMIA); 6751} 6752 6753static void 6754do_vfp_dp_ldstmdb (void) 6755{ 6756 vfp_dp_ldstm (VFP_LDSTMDB); 6757} 6758 6759static void 6760do_vfp_xp_ldstmia (void) 6761{ 6762 vfp_dp_ldstm (VFP_LDSTMIAX); 6763} 6764 6765static void 6766do_vfp_xp_ldstmdb (void) 6767{ 6768 vfp_dp_ldstm (VFP_LDSTMDBX); 6769} 6770 6771static void 6772do_vfp_dp_rd_rm (void) 6773{ 6774 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 6775 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm); 6776} 6777 6778static void 6779do_vfp_dp_rn_rd (void) 6780{ 6781 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn); 6782 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd); 6783} 6784 6785static void 6786do_vfp_dp_rd_rn (void) 6787{ 6788 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 6789 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn); 6790} 6791 6792static void 6793do_vfp_dp_rd_rn_rm (void) 6794{ 6795 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 6796 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn); 6797 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm); 6798} 6799 6800static void 6801do_vfp_dp_rd (void) 6802{ 6803 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 6804} 6805 6806static void 6807do_vfp_dp_rm_rd_rn (void) 6808{ 6809 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm); 6810 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd); 6811 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn); 6812} 6813 6814/* VFPv3 instructions. */ 6815static void 6816do_vfp_sp_const (void) 6817{ 6818 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 6819 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12; 6820 inst.instruction |= (inst.operands[1].imm & 0x0f); 6821} 6822 6823static void 6824do_vfp_dp_const (void) 6825{ 6826 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 6827 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12; 6828 inst.instruction |= (inst.operands[1].imm & 0x0f); 6829} 6830 6831static void 6832vfp_conv (int srcsize) 6833{ 6834 unsigned immbits = srcsize - inst.operands[1].imm; 6835 inst.instruction |= (immbits & 1) << 5; 6836 inst.instruction |= (immbits >> 1); 6837} 6838 6839static void 6840do_vfp_sp_conv_16 (void) 6841{ 6842 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 6843 vfp_conv (16); 6844} 6845 6846static void 6847do_vfp_dp_conv_16 (void) 6848{ 6849 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 6850 vfp_conv (16); 6851} 6852 6853static void 6854do_vfp_sp_conv_32 (void) 6855{ 6856 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd); 6857 vfp_conv (32); 6858} 6859 6860static void 6861do_vfp_dp_conv_32 (void) 6862{ 6863 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd); 6864 vfp_conv (32); 6865} 6866 6867 6868/* FPA instructions. Also in a logical order. */ 6869 6870static void 6871do_fpa_cmp (void) 6872{ 6873 inst.instruction |= inst.operands[0].reg << 16; 6874 inst.instruction |= inst.operands[1].reg; 6875} 6876 6877static void 6878do_fpa_ldmstm (void) 6879{ 6880 inst.instruction |= inst.operands[0].reg << 12; 6881 switch (inst.operands[1].imm) 6882 { 6883 case 1: inst.instruction |= CP_T_X; break; 6884 case 2: inst.instruction |= CP_T_Y; break; 6885 case 3: inst.instruction |= CP_T_Y | CP_T_X; break; 6886 case 4: break; 6887 default: abort (); 6888 } 6889 6890 if (inst.instruction & (PRE_INDEX | INDEX_UP)) 6891 { 6892 /* The instruction specified "ea" or "fd", so we can only accept 6893 [Rn]{!}. The instruction does not really support stacking or 6894 unstacking, so we have to emulate these by setting appropriate 6895 bits and offsets. */ 6896 constraint (inst.reloc.exp.X_op != O_constant 6897 || inst.reloc.exp.X_add_number != 0, 6898 _("this instruction does not support indexing")); 6899 6900 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback) 6901 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm; 6902 6903 if (!(inst.instruction & INDEX_UP)) 6904 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number; 6905 6906 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback) 6907 { 6908 inst.operands[2].preind = 0; 6909 inst.operands[2].postind = 1; 6910 } 6911 } 6912 6913 encode_arm_cp_address (2, TRUE, TRUE, 0); 6914} 6915 6916 6917/* iWMMXt instructions: strictly in alphabetical order. */ 6918 6919static void 6920do_iwmmxt_tandorc (void) 6921{ 6922 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here")); 6923} 6924 6925static void 6926do_iwmmxt_textrc (void) 6927{ 6928 inst.instruction |= inst.operands[0].reg << 12; 6929 inst.instruction |= inst.operands[1].imm; 6930} 6931 6932static void 6933do_iwmmxt_textrm (void) 6934{ 6935 inst.instruction |= inst.operands[0].reg << 12; 6936 inst.instruction |= inst.operands[1].reg << 16; 6937 inst.instruction |= inst.operands[2].imm; 6938} 6939 6940static void 6941do_iwmmxt_tinsr (void) 6942{ 6943 inst.instruction |= inst.operands[0].reg << 16; 6944 inst.instruction |= inst.operands[1].reg << 12; 6945 inst.instruction |= inst.operands[2].imm; 6946} 6947 6948static void 6949do_iwmmxt_tmia (void) 6950{ 6951 inst.instruction |= inst.operands[0].reg << 5; 6952 inst.instruction |= inst.operands[1].reg; 6953 inst.instruction |= inst.operands[2].reg << 12; 6954} 6955 6956static void 6957do_iwmmxt_waligni (void) 6958{ 6959 inst.instruction |= inst.operands[0].reg << 12; 6960 inst.instruction |= inst.operands[1].reg << 16; 6961 inst.instruction |= inst.operands[2].reg; 6962 inst.instruction |= inst.operands[3].imm << 20; 6963} 6964 6965static void 6966do_iwmmxt_wmerge (void) 6967{ 6968 inst.instruction |= inst.operands[0].reg << 12; 6969 inst.instruction |= inst.operands[1].reg << 16; 6970 inst.instruction |= inst.operands[2].reg; 6971 inst.instruction |= inst.operands[3].imm << 21; 6972} 6973 6974static void 6975do_iwmmxt_wmov (void) 6976{ 6977 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */ 6978 inst.instruction |= inst.operands[0].reg << 12; 6979 inst.instruction |= inst.operands[1].reg << 16; 6980 inst.instruction |= inst.operands[1].reg; 6981} 6982 6983static void 6984do_iwmmxt_wldstbh (void) 6985{ 6986 int reloc; 6987 inst.instruction |= inst.operands[0].reg << 12; 6988 if (thumb_mode) 6989 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2; 6990 else 6991 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2; 6992 encode_arm_cp_address (1, TRUE, FALSE, reloc); 6993} 6994 6995static void 6996do_iwmmxt_wldstw (void) 6997{ 6998 /* RIWR_RIWC clears .isreg for a control register. */ 6999 if (!inst.operands[0].isreg) 7000 { 7001 constraint (inst.cond != COND_ALWAYS, BAD_COND); 7002 inst.instruction |= 0xf0000000; 7003 } 7004 7005 inst.instruction |= inst.operands[0].reg << 12; 7006 encode_arm_cp_address (1, TRUE, TRUE, 0); 7007} 7008 7009static void 7010do_iwmmxt_wldstd (void) 7011{ 7012 inst.instruction |= inst.operands[0].reg << 12; 7013 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2) 7014 && inst.operands[1].immisreg) 7015 { 7016 inst.instruction &= ~0x1a000ff; 7017 inst.instruction |= (0xf << 28); 7018 if (inst.operands[1].preind) 7019 inst.instruction |= PRE_INDEX; 7020 if (!inst.operands[1].negative) 7021 inst.instruction |= INDEX_UP; 7022 if (inst.operands[1].writeback) 7023 inst.instruction |= WRITE_BACK; 7024 inst.instruction |= inst.operands[1].reg << 16; 7025 inst.instruction |= inst.reloc.exp.X_add_number << 4; 7026 inst.instruction |= inst.operands[1].imm; 7027 } 7028 else 7029 encode_arm_cp_address (1, TRUE, FALSE, 0); 7030} 7031 7032static void 7033do_iwmmxt_wshufh (void) 7034{ 7035 inst.instruction |= inst.operands[0].reg << 12; 7036 inst.instruction |= inst.operands[1].reg << 16; 7037 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16); 7038 inst.instruction |= (inst.operands[2].imm & 0x0f); 7039} 7040 7041static void 7042do_iwmmxt_wzero (void) 7043{ 7044 /* WZERO reg is an alias for WANDN reg, reg, reg. */ 7045 inst.instruction |= inst.operands[0].reg; 7046 inst.instruction |= inst.operands[0].reg << 12; 7047 inst.instruction |= inst.operands[0].reg << 16; 7048} 7049 7050static void 7051do_iwmmxt_wrwrwr_or_imm5 (void) 7052{ 7053 if (inst.operands[2].isreg) 7054 do_rd_rn_rm (); 7055 else { 7056 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2), 7057 _("immediate operand requires iWMMXt2")); 7058 do_rd_rn (); 7059 if (inst.operands[2].imm == 0) 7060 { 7061 switch ((inst.instruction >> 20) & 0xf) 7062 { 7063 case 4: 7064 case 5: 7065 case 6: 7066 case 7: 7067 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */ 7068 inst.operands[2].imm = 16; 7069 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20); 7070 break; 7071 case 8: 7072 case 9: 7073 case 10: 7074 case 11: 7075 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */ 7076 inst.operands[2].imm = 32; 7077 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20); 7078 break; 7079 case 12: 7080 case 13: 7081 case 14: 7082 case 15: 7083 { 7084 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */ 7085 uint32_t wrn; 7086 wrn = (inst.instruction >> 16) & 0xf; 7087 inst.instruction &= 0xff0fff0f; 7088 inst.instruction |= wrn; 7089 /* Bail out here; the instruction is now assembled. */ 7090 return; 7091 } 7092 } 7093 } 7094 /* Map 32 -> 0, etc. */ 7095 inst.operands[2].imm &= 0x1f; 7096 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf); 7097 } 7098} 7099 7100 7101/* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register 7102 operations first, then control, shift, and load/store. */ 7103 7104/* Insns like "foo X,Y,Z". */ 7105 7106static void 7107do_mav_triple (void) 7108{ 7109 inst.instruction |= inst.operands[0].reg << 16; 7110 inst.instruction |= inst.operands[1].reg; 7111 inst.instruction |= inst.operands[2].reg << 12; 7112} 7113 7114/* Insns like "foo W,X,Y,Z". 7115 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */ 7116 7117static void 7118do_mav_quad (void) 7119{ 7120 inst.instruction |= inst.operands[0].reg << 5; 7121 inst.instruction |= inst.operands[1].reg << 12; 7122 inst.instruction |= inst.operands[2].reg << 16; 7123 inst.instruction |= inst.operands[3].reg; 7124} 7125 7126/* cfmvsc32<cond> DSPSC,MVDX[15:0]. */ 7127static void 7128do_mav_dspsc (void) 7129{ 7130 inst.instruction |= inst.operands[1].reg << 12; 7131} 7132 7133/* Maverick shift immediate instructions. 7134 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0]. 7135 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */ 7136 7137static void 7138do_mav_shift (void) 7139{ 7140 int imm = inst.operands[2].imm; 7141 7142 inst.instruction |= inst.operands[0].reg << 12; 7143 inst.instruction |= inst.operands[1].reg << 16; 7144 7145 /* Bits 0-3 of the insn should have bits 0-3 of the immediate. 7146 Bits 5-7 of the insn should have bits 4-6 of the immediate. 7147 Bit 4 should be 0. */ 7148 imm = (imm & 0xf) | ((imm & 0x70) << 1); 7149 7150 inst.instruction |= imm; 7151} 7152 7153 7154/* XScale instructions. Also sorted arithmetic before move. */ 7155 7156/* Xscale multiply-accumulate (argument parse) 7157 MIAcc acc0,Rm,Rs 7158 MIAPHcc acc0,Rm,Rs 7159 MIAxycc acc0,Rm,Rs. */ 7160 7161static void 7162do_xsc_mia (void) 7163{ 7164 inst.instruction |= inst.operands[1].reg; 7165 inst.instruction |= inst.operands[2].reg << 12; 7166} 7167 7168/* Xscale move-accumulator-register (argument parse) 7169 7170 MARcc acc0,RdLo,RdHi. */ 7171 7172static void 7173do_xsc_mar (void) 7174{ 7175 inst.instruction |= inst.operands[1].reg << 12; 7176 inst.instruction |= inst.operands[2].reg << 16; 7177} 7178 7179/* Xscale move-register-accumulator (argument parse) 7180 7181 MRAcc RdLo,RdHi,acc0. */ 7182 7183static void 7184do_xsc_mra (void) 7185{ 7186 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP); 7187 inst.instruction |= inst.operands[0].reg << 12; 7188 inst.instruction |= inst.operands[1].reg << 16; 7189} 7190 7191/* Encoding functions relevant only to Thumb. */ 7192 7193/* inst.operands[i] is a shifted-register operand; encode 7194 it into inst.instruction in the format used by Thumb32. */ 7195 7196static void 7197encode_thumb32_shifted_operand (int i) 7198{ 7199 unsigned int value = inst.reloc.exp.X_add_number; 7200 unsigned int shift = inst.operands[i].shift_kind; 7201 7202 constraint (inst.operands[i].immisreg, 7203 _("shift by register not allowed in thumb mode")); 7204 inst.instruction |= inst.operands[i].reg; 7205 if (shift == SHIFT_RRX) 7206 inst.instruction |= SHIFT_ROR << 4; 7207 else 7208 { 7209 constraint (inst.reloc.exp.X_op != O_constant, 7210 _("expression too complex")); 7211 7212 constraint (value > 32 7213 || (value == 32 && (shift == SHIFT_LSL 7214 || shift == SHIFT_ROR)), 7215 _("shift expression is too large")); 7216 7217 if (value == 0) 7218 shift = SHIFT_LSL; 7219 else if (value == 32) 7220 value = 0; 7221 7222 inst.instruction |= shift << 4; 7223 inst.instruction |= (value & 0x1c) << 10; 7224 inst.instruction |= (value & 0x03) << 6; 7225 } 7226} 7227 7228 7229/* inst.operands[i] was set up by parse_address. Encode it into a 7230 Thumb32 format load or store instruction. Reject forms that cannot 7231 be used with such instructions. If is_t is true, reject forms that 7232 cannot be used with a T instruction; if is_d is true, reject forms 7233 that cannot be used with a D instruction. */ 7234 7235static void 7236encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d) 7237{ 7238 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC); 7239 7240 constraint (!inst.operands[i].isreg, 7241 _("Instruction does not support =N addresses")); 7242 7243 inst.instruction |= inst.operands[i].reg << 16; 7244 if (inst.operands[i].immisreg) 7245 { 7246 constraint (is_pc, _("cannot use register index with PC-relative addressing")); 7247 constraint (is_t || is_d, _("cannot use register index with this instruction")); 7248 constraint (inst.operands[i].negative, 7249 _("Thumb does not support negative register indexing")); 7250 constraint (inst.operands[i].postind, 7251 _("Thumb does not support register post-indexing")); 7252 constraint (inst.operands[i].writeback, 7253 _("Thumb does not support register indexing with writeback")); 7254 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL, 7255 _("Thumb supports only LSL in shifted register indexing")); 7256 7257 inst.instruction |= inst.operands[i].imm; 7258 if (inst.operands[i].shifted) 7259 { 7260 constraint (inst.reloc.exp.X_op != O_constant, 7261 _("expression too complex")); 7262 constraint (inst.reloc.exp.X_add_number < 0 7263 || inst.reloc.exp.X_add_number > 3, 7264 _("shift out of range")); 7265 inst.instruction |= inst.reloc.exp.X_add_number << 4; 7266 } 7267 inst.reloc.type = BFD_RELOC_UNUSED; 7268 } 7269 else if (inst.operands[i].preind) 7270 { 7271 constraint (is_pc && inst.operands[i].writeback, 7272 _("cannot use writeback with PC-relative addressing")); 7273 constraint (is_t && inst.operands[i].writeback, 7274 _("cannot use writeback with this instruction")); 7275 7276 if (is_d) 7277 { 7278 inst.instruction |= 0x01000000; 7279 if (inst.operands[i].writeback) 7280 inst.instruction |= 0x00200000; 7281 } 7282 else 7283 { 7284 inst.instruction |= 0x00000c00; 7285 if (inst.operands[i].writeback) 7286 inst.instruction |= 0x00000100; 7287 } 7288 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM; 7289 } 7290 else if (inst.operands[i].postind) 7291 { 7292 assert (inst.operands[i].writeback); 7293 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing")); 7294 constraint (is_t, _("cannot use post-indexing with this instruction")); 7295 7296 if (is_d) 7297 inst.instruction |= 0x00200000; 7298 else 7299 inst.instruction |= 0x00000900; 7300 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM; 7301 } 7302 else /* unindexed - only for coprocessor */ 7303 inst.error = _("instruction does not accept unindexed addressing"); 7304} 7305 7306#endif /* INSNS_TABLE_ONLY */ 7307 7308/* Table of Thumb instructions which exist in both 16- and 32-bit 7309 encodings (the latter only in post-V6T2 cores). The index is the 7310 value used in the insns table below. When there is more than one 7311 possible 16-bit encoding for the instruction, this table always 7312 holds variant (1). 7313 Also contains several pseudo-instructions used during relaxation. */ 7314#define T16_32_TAB \ 7315 X(adc, 4140, eb400000), \ 7316 X(adcs, 4140, eb500000), \ 7317 X(add, 1c00, eb000000), \ 7318 X(adds, 1c00, eb100000), \ 7319 X(addi, 0000, f1000000), \ 7320 X(addis, 0000, f1100000), \ 7321 X(add_pc,000f, f20f0000), \ 7322 X(add_sp,000d, f10d0000), \ 7323 X(adr, 000f, f20f0000), \ 7324 X(and, 4000, ea000000), \ 7325 X(ands, 4000, ea100000), \ 7326 X(asr, 1000, fa40f000), \ 7327 X(asrs, 1000, fa50f000), \ 7328 X(b, e000, f000b000), \ 7329 X(bcond, d000, f0008000), \ 7330 X(bic, 4380, ea200000), \ 7331 X(bics, 4380, ea300000), \ 7332 X(cmn, 42c0, eb100f00), \ 7333 X(cmp, 2800, ebb00f00), \ 7334 X(cpsie, b660, f3af8400), \ 7335 X(cpsid, b670, f3af8600), \ 7336 X(cpy, 4600, ea4f0000), \ 7337 X(dec_sp,80dd, f1ad0d00), \ 7338 X(eor, 4040, ea800000), \ 7339 X(eors, 4040, ea900000), \ 7340 X(inc_sp,00dd, f10d0d00), \ 7341 X(ldmia, c800, e8900000), \ 7342 X(ldr, 6800, f8500000), \ 7343 X(ldrb, 7800, f8100000), \ 7344 X(ldrh, 8800, f8300000), \ 7345 X(ldrsb, 5600, f9100000), \ 7346 X(ldrsh, 5e00, f9300000), \ 7347 X(ldr_pc,4800, f85f0000), \ 7348 X(ldr_pc2,4800, f85f0000), \ 7349 X(ldr_sp,9800, f85d0000), \ 7350 X(lsl, 0000, fa00f000), \ 7351 X(lsls, 0000, fa10f000), \ 7352 X(lsr, 0800, fa20f000), \ 7353 X(lsrs, 0800, fa30f000), \ 7354 X(mov, 2000, ea4f0000), \ 7355 X(movs, 2000, ea5f0000), \ 7356 X(mul, 4340, fb00f000), \ 7357 X(muls, 4340, ffffffff), /* no 32b muls */ \ 7358 X(mvn, 43c0, ea6f0000), \ 7359 X(mvns, 43c0, ea7f0000), \ 7360 X(neg, 4240, f1c00000), /* rsb #0 */ \ 7361 X(negs, 4240, f1d00000), /* rsbs #0 */ \ 7362 X(orr, 4300, ea400000), \ 7363 X(orrs, 4300, ea500000), \ 7364 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \ 7365 X(push, b400, e92d0000), /* stmdb sp!,... */ \ 7366 X(rev, ba00, fa90f080), \ 7367 X(rev16, ba40, fa90f090), \ 7368 X(revsh, bac0, fa90f0b0), \ 7369 X(ror, 41c0, fa60f000), \ 7370 X(rors, 41c0, fa70f000), \ 7371 X(sbc, 4180, eb600000), \ 7372 X(sbcs, 4180, eb700000), \ 7373 X(stmia, c000, e8800000), \ 7374 X(str, 6000, f8400000), \ 7375 X(strb, 7000, f8000000), \ 7376 X(strh, 8000, f8200000), \ 7377 X(str_sp,9000, f84d0000), \ 7378 X(sub, 1e00, eba00000), \ 7379 X(subs, 1e00, ebb00000), \ 7380 X(subi, 8000, f1a00000), \ 7381 X(subis, 8000, f1b00000), \ 7382 X(sxtb, b240, fa4ff080), \ 7383 X(sxth, b200, fa0ff080), \ 7384 X(tst, 4200, ea100f00), \ 7385 X(uxtb, b2c0, fa5ff080), \ 7386 X(uxth, b280, fa1ff080), \ 7387 X(nop, bf00, f3af8000), \ 7388 X(yield, bf10, f3af8001), \ 7389 X(wfe, bf20, f3af8002), \ 7390 X(wfi, bf30, f3af8003), \ 7391 X(sev, bf40, f3af8004), 7392 7393/* To catch errors in encoding functions, the codes are all offset by 7394 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined 7395 as 16-bit instructions. */ 7396#define X(a,b,c) T_MNEM_##a 7397enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB }; 7398#undef X 7399 7400#define X(a,b,c) 0x##b 7401static const unsigned short thumb_op16[] = { T16_32_TAB }; 7402#define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)]) 7403#undef X 7404 7405#define X(a,b,c) 0x##c 7406static const unsigned int thumb_op32[] = { T16_32_TAB }; 7407#define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)]) 7408#define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000) 7409#undef X 7410#undef T16_32_TAB 7411 7412#ifndef INSNS_TABLE_ONLY 7413 7414/* Thumb instruction encoders, in alphabetical order. */ 7415 7416/* ADDW or SUBW. */ 7417static void 7418do_t_add_sub_w (void) 7419{ 7420 int Rd, Rn; 7421 7422 Rd = inst.operands[0].reg; 7423 Rn = inst.operands[1].reg; 7424 7425 constraint (Rd == 15, _("PC not allowed as destination")); 7426 inst.instruction |= (Rn << 16) | (Rd << 8); 7427 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12; 7428} 7429 7430/* Parse an add or subtract instruction. We get here with inst.instruction 7431 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */ 7432 7433static void 7434do_t_add_sub (void) 7435{ 7436 int Rd, Rs, Rn; 7437 7438 Rd = inst.operands[0].reg; 7439 Rs = (inst.operands[1].present 7440 ? inst.operands[1].reg /* Rd, Rs, foo */ 7441 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ 7442 7443 if (unified_syntax) 7444 { 7445 bfd_boolean flags; 7446 bfd_boolean narrow; 7447 int opcode; 7448 7449 flags = (inst.instruction == T_MNEM_adds 7450 || inst.instruction == T_MNEM_subs); 7451 if (flags) 7452 narrow = (current_it_mask == 0); 7453 else 7454 narrow = (current_it_mask != 0); 7455 if (!inst.operands[2].isreg) 7456 { 7457 int add; 7458 7459 add = (inst.instruction == T_MNEM_add 7460 || inst.instruction == T_MNEM_adds); 7461 opcode = 0; 7462 if (inst.size_req != 4) 7463 { 7464 /* Attempt to use a narrow opcode, with relaxation if 7465 appropriate. */ 7466 if (Rd == REG_SP && Rs == REG_SP && !flags) 7467 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp; 7468 else if (Rd <= 7 && Rs == REG_SP && add && !flags) 7469 opcode = T_MNEM_add_sp; 7470 else if (Rd <= 7 && Rs == REG_PC && add && !flags) 7471 opcode = T_MNEM_add_pc; 7472 else if (Rd <= 7 && Rs <= 7 && narrow) 7473 { 7474 if (flags) 7475 opcode = add ? T_MNEM_addis : T_MNEM_subis; 7476 else 7477 opcode = add ? T_MNEM_addi : T_MNEM_subi; 7478 } 7479 if (opcode) 7480 { 7481 inst.instruction = THUMB_OP16(opcode); 7482 inst.instruction |= (Rd << 4) | Rs; 7483 inst.instruction |= (inst.operands[1].present 7484 ? 0x0100 : 0x0000); 7485 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD; 7486 if (inst.size_req != 2) 7487 inst.relax = opcode; 7488 } 7489 else 7490 constraint (inst.size_req == 2, BAD_HIREG); 7491 } 7492 if (inst.size_req == 4 7493 || (inst.size_req != 2 && !opcode)) 7494 { 7495 if (Rd == REG_PC) 7496 { 7497 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs, 7498 _("only SUBS PC, LR, #const allowed")); 7499 constraint (inst.reloc.exp.X_op != O_constant, 7500 _("expression too complex")); 7501 constraint (inst.reloc.exp.X_add_number < 0 7502 || inst.reloc.exp.X_add_number > 0xff, 7503 _("immediate value out of range")); 7504 inst.instruction = T2_SUBS_PC_LR 7505 | inst.reloc.exp.X_add_number; 7506 inst.reloc.type = BFD_RELOC_UNUSED; 7507 return; 7508 } 7509 else if (Rs == REG_PC) 7510 { 7511 /* Always use addw/subw. */ 7512 inst.instruction = add ? 0xf20f0000 : 0xf2af0000; 7513 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12; 7514 } 7515 else 7516 { 7517 inst.instruction = THUMB_OP32 (inst.instruction); 7518 inst.instruction = (inst.instruction & 0xe1ffffff) 7519 | 0x10000000; 7520 if (flags) 7521 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 7522 else 7523 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM; 7524 } 7525 inst.instruction |= Rd << 8; 7526 inst.instruction |= Rs << 16; 7527 } 7528 } 7529 else 7530 { 7531 Rn = inst.operands[2].reg; 7532 /* See if we can do this with a 16-bit instruction. */ 7533 if (!inst.operands[2].shifted && inst.size_req != 4) 7534 { 7535 if (Rd > 7 || Rs > 7 || Rn > 7) 7536 narrow = FALSE; 7537 7538 if (narrow) 7539 { 7540 inst.instruction = ((inst.instruction == T_MNEM_adds 7541 || inst.instruction == T_MNEM_add) 7542 ? T_OPCODE_ADD_R3 7543 : T_OPCODE_SUB_R3); 7544 inst.instruction |= Rd | (Rs << 3) | (Rn << 6); 7545 return; 7546 } 7547 7548 if (inst.instruction == T_MNEM_add) 7549 { 7550 if (Rd == Rs) 7551 { 7552 inst.instruction = T_OPCODE_ADD_HI; 7553 inst.instruction |= (Rd & 8) << 4; 7554 inst.instruction |= (Rd & 7); 7555 inst.instruction |= Rn << 3; 7556 return; 7557 } 7558 /* ... because addition is commutative! */ 7559 else if (Rd == Rn) 7560 { 7561 inst.instruction = T_OPCODE_ADD_HI; 7562 inst.instruction |= (Rd & 8) << 4; 7563 inst.instruction |= (Rd & 7); 7564 inst.instruction |= Rs << 3; 7565 return; 7566 } 7567 } 7568 } 7569 /* If we get here, it can't be done in 16 bits. */ 7570 constraint (inst.operands[2].shifted && inst.operands[2].immisreg, 7571 _("shift must be constant")); 7572 inst.instruction = THUMB_OP32 (inst.instruction); 7573 inst.instruction |= Rd << 8; 7574 inst.instruction |= Rs << 16; 7575 encode_thumb32_shifted_operand (2); 7576 } 7577 } 7578 else 7579 { 7580 constraint (inst.instruction == T_MNEM_adds 7581 || inst.instruction == T_MNEM_subs, 7582 BAD_THUMB32); 7583 7584 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */ 7585 { 7586 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP)) 7587 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC), 7588 BAD_HIREG); 7589 7590 inst.instruction = (inst.instruction == T_MNEM_add 7591 ? 0x0000 : 0x8000); 7592 inst.instruction |= (Rd << 4) | Rs; 7593 inst.instruction |= (inst.operands[1].present 7594 ? 0x0100 : 0x0000); 7595 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD; 7596 return; 7597 } 7598 7599 Rn = inst.operands[2].reg; 7600 constraint (inst.operands[2].shifted, _("unshifted register required")); 7601 7602 /* We now have Rd, Rs, and Rn set to registers. */ 7603 if (Rd > 7 || Rs > 7 || Rn > 7) 7604 { 7605 /* Can't do this for SUB. */ 7606 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG); 7607 inst.instruction = T_OPCODE_ADD_HI; 7608 inst.instruction |= (Rd & 8) << 4; 7609 inst.instruction |= (Rd & 7); 7610 if (Rs == Rd) 7611 inst.instruction |= Rn << 3; 7612 else if (Rn == Rd) 7613 inst.instruction |= Rs << 3; 7614 else 7615 constraint (1, _("dest must overlap one source register")); 7616 } 7617 else 7618 { 7619 inst.instruction = (inst.instruction == T_MNEM_add 7620 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3); 7621 inst.instruction |= Rd | (Rs << 3) | (Rn << 6); 7622 } 7623 } 7624} 7625 7626static void 7627do_t_adr (void) 7628{ 7629 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7) 7630 { 7631 /* Defer to section relaxation. */ 7632 inst.relax = inst.instruction; 7633 inst.instruction = THUMB_OP16 (inst.instruction); 7634 inst.instruction |= inst.operands[0].reg << 4; 7635 } 7636 else if (unified_syntax && inst.size_req != 2) 7637 { 7638 /* Generate a 32-bit opcode. */ 7639 inst.instruction = THUMB_OP32 (inst.instruction); 7640 inst.instruction |= inst.operands[0].reg << 8; 7641 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12; 7642 inst.reloc.pc_rel = 1; 7643 } 7644 else 7645 { 7646 /* Generate a 16-bit opcode. */ 7647 inst.instruction = THUMB_OP16 (inst.instruction); 7648 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD; 7649 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */ 7650 inst.reloc.pc_rel = 1; 7651 7652 inst.instruction |= inst.operands[0].reg << 4; 7653 } 7654} 7655 7656/* Arithmetic instructions for which there is just one 16-bit 7657 instruction encoding, and it allows only two low registers. 7658 For maximal compatibility with ARM syntax, we allow three register 7659 operands even when Thumb-32 instructions are not available, as long 7660 as the first two are identical. For instance, both "sbc r0,r1" and 7661 "sbc r0,r0,r1" are allowed. */ 7662static void 7663do_t_arit3 (void) 7664{ 7665 int Rd, Rs, Rn; 7666 7667 Rd = inst.operands[0].reg; 7668 Rs = (inst.operands[1].present 7669 ? inst.operands[1].reg /* Rd, Rs, foo */ 7670 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ 7671 Rn = inst.operands[2].reg; 7672 7673 if (unified_syntax) 7674 { 7675 if (!inst.operands[2].isreg) 7676 { 7677 /* For an immediate, we always generate a 32-bit opcode; 7678 section relaxation will shrink it later if possible. */ 7679 inst.instruction = THUMB_OP32 (inst.instruction); 7680 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; 7681 inst.instruction |= Rd << 8; 7682 inst.instruction |= Rs << 16; 7683 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 7684 } 7685 else 7686 { 7687 bfd_boolean narrow; 7688 7689 /* See if we can do this with a 16-bit instruction. */ 7690 if (THUMB_SETS_FLAGS (inst.instruction)) 7691 narrow = current_it_mask == 0; 7692 else 7693 narrow = current_it_mask != 0; 7694 7695 if (Rd > 7 || Rn > 7 || Rs > 7) 7696 narrow = FALSE; 7697 if (inst.operands[2].shifted) 7698 narrow = FALSE; 7699 if (inst.size_req == 4) 7700 narrow = FALSE; 7701 7702 if (narrow 7703 && Rd == Rs) 7704 { 7705 inst.instruction = THUMB_OP16 (inst.instruction); 7706 inst.instruction |= Rd; 7707 inst.instruction |= Rn << 3; 7708 return; 7709 } 7710 7711 /* If we get here, it can't be done in 16 bits. */ 7712 constraint (inst.operands[2].shifted 7713 && inst.operands[2].immisreg, 7714 _("shift must be constant")); 7715 inst.instruction = THUMB_OP32 (inst.instruction); 7716 inst.instruction |= Rd << 8; 7717 inst.instruction |= Rs << 16; 7718 encode_thumb32_shifted_operand (2); 7719 } 7720 } 7721 else 7722 { 7723 /* On its face this is a lie - the instruction does set the 7724 flags. However, the only supported mnemonic in this mode 7725 says it doesn't. */ 7726 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); 7727 7728 constraint (!inst.operands[2].isreg || inst.operands[2].shifted, 7729 _("unshifted register required")); 7730 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG); 7731 constraint (Rd != Rs, 7732 _("dest and source1 must be the same register")); 7733 7734 inst.instruction = THUMB_OP16 (inst.instruction); 7735 inst.instruction |= Rd; 7736 inst.instruction |= Rn << 3; 7737 } 7738} 7739 7740/* Similarly, but for instructions where the arithmetic operation is 7741 commutative, so we can allow either of them to be different from 7742 the destination operand in a 16-bit instruction. For instance, all 7743 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are 7744 accepted. */ 7745static void 7746do_t_arit3c (void) 7747{ 7748 int Rd, Rs, Rn; 7749 7750 Rd = inst.operands[0].reg; 7751 Rs = (inst.operands[1].present 7752 ? inst.operands[1].reg /* Rd, Rs, foo */ 7753 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ 7754 Rn = inst.operands[2].reg; 7755 7756 if (unified_syntax) 7757 { 7758 if (!inst.operands[2].isreg) 7759 { 7760 /* For an immediate, we always generate a 32-bit opcode; 7761 section relaxation will shrink it later if possible. */ 7762 inst.instruction = THUMB_OP32 (inst.instruction); 7763 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; 7764 inst.instruction |= Rd << 8; 7765 inst.instruction |= Rs << 16; 7766 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 7767 } 7768 else 7769 { 7770 bfd_boolean narrow; 7771 7772 /* See if we can do this with a 16-bit instruction. */ 7773 if (THUMB_SETS_FLAGS (inst.instruction)) 7774 narrow = current_it_mask == 0; 7775 else 7776 narrow = current_it_mask != 0; 7777 7778 if (Rd > 7 || Rn > 7 || Rs > 7) 7779 narrow = FALSE; 7780 if (inst.operands[2].shifted) 7781 narrow = FALSE; 7782 if (inst.size_req == 4) 7783 narrow = FALSE; 7784 7785 if (narrow) 7786 { 7787 if (Rd == Rs) 7788 { 7789 inst.instruction = THUMB_OP16 (inst.instruction); 7790 inst.instruction |= Rd; 7791 inst.instruction |= Rn << 3; 7792 return; 7793 } 7794 if (Rd == Rn) 7795 { 7796 inst.instruction = THUMB_OP16 (inst.instruction); 7797 inst.instruction |= Rd; 7798 inst.instruction |= Rs << 3; 7799 return; 7800 } 7801 } 7802 7803 /* If we get here, it can't be done in 16 bits. */ 7804 constraint (inst.operands[2].shifted 7805 && inst.operands[2].immisreg, 7806 _("shift must be constant")); 7807 inst.instruction = THUMB_OP32 (inst.instruction); 7808 inst.instruction |= Rd << 8; 7809 inst.instruction |= Rs << 16; 7810 encode_thumb32_shifted_operand (2); 7811 } 7812 } 7813 else 7814 { 7815 /* On its face this is a lie - the instruction does set the 7816 flags. However, the only supported mnemonic in this mode 7817 says it doesn't. */ 7818 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); 7819 7820 constraint (!inst.operands[2].isreg || inst.operands[2].shifted, 7821 _("unshifted register required")); 7822 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG); 7823 7824 inst.instruction = THUMB_OP16 (inst.instruction); 7825 inst.instruction |= Rd; 7826 7827 if (Rd == Rs) 7828 inst.instruction |= Rn << 3; 7829 else if (Rd == Rn) 7830 inst.instruction |= Rs << 3; 7831 else 7832 constraint (1, _("dest must overlap one source register")); 7833 } 7834} 7835 7836static void 7837do_t_barrier (void) 7838{ 7839 if (inst.operands[0].present) 7840 { 7841 constraint ((inst.instruction & 0xf0) == 0x60 7842 && inst.operands[0].imm != 0xf, 7843 "bad barrier type"); 7844 inst.instruction |= inst.operands[0].imm; 7845 } 7846 else 7847 inst.instruction |= 0xf; 7848} 7849 7850static void 7851do_t_bfc (void) 7852{ 7853 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm; 7854 constraint (msb > 32, _("bit-field extends past end of register")); 7855 /* The instruction encoding stores the LSB and MSB, 7856 not the LSB and width. */ 7857 inst.instruction |= inst.operands[0].reg << 8; 7858 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10; 7859 inst.instruction |= (inst.operands[1].imm & 0x03) << 6; 7860 inst.instruction |= msb - 1; 7861} 7862 7863static void 7864do_t_bfi (void) 7865{ 7866 unsigned int msb; 7867 7868 /* #0 in second position is alternative syntax for bfc, which is 7869 the same instruction but with REG_PC in the Rm field. */ 7870 if (!inst.operands[1].isreg) 7871 inst.operands[1].reg = REG_PC; 7872 7873 msb = inst.operands[2].imm + inst.operands[3].imm; 7874 constraint (msb > 32, _("bit-field extends past end of register")); 7875 /* The instruction encoding stores the LSB and MSB, 7876 not the LSB and width. */ 7877 inst.instruction |= inst.operands[0].reg << 8; 7878 inst.instruction |= inst.operands[1].reg << 16; 7879 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10; 7880 inst.instruction |= (inst.operands[2].imm & 0x03) << 6; 7881 inst.instruction |= msb - 1; 7882} 7883 7884static void 7885do_t_bfx (void) 7886{ 7887 constraint (inst.operands[2].imm + inst.operands[3].imm > 32, 7888 _("bit-field extends past end of register")); 7889 inst.instruction |= inst.operands[0].reg << 8; 7890 inst.instruction |= inst.operands[1].reg << 16; 7891 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10; 7892 inst.instruction |= (inst.operands[2].imm & 0x03) << 6; 7893 inst.instruction |= inst.operands[3].imm - 1; 7894} 7895 7896/* ARM V5 Thumb BLX (argument parse) 7897 BLX <target_addr> which is BLX(1) 7898 BLX <Rm> which is BLX(2) 7899 Unfortunately, there are two different opcodes for this mnemonic. 7900 So, the insns[].value is not used, and the code here zaps values 7901 into inst.instruction. 7902 7903 ??? How to take advantage of the additional two bits of displacement 7904 available in Thumb32 mode? Need new relocation? */ 7905 7906static void 7907do_t_blx (void) 7908{ 7909 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); 7910 if (inst.operands[0].isreg) 7911 /* We have a register, so this is BLX(2). */ 7912 inst.instruction |= inst.operands[0].reg << 3; 7913 else 7914 { 7915 /* No register. This must be BLX(1). */ 7916 inst.instruction = 0xf000e800; 7917#ifdef OBJ_ELF 7918 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4) 7919 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23; 7920 else 7921#endif 7922 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX; 7923 inst.reloc.pc_rel = 1; 7924 } 7925} 7926 7927static void 7928do_t_branch (void) 7929{ 7930 int opcode; 7931 int cond; 7932 7933 if (current_it_mask) 7934 { 7935 /* Conditional branches inside IT blocks are encoded as unconditional 7936 branches. */ 7937 cond = COND_ALWAYS; 7938 /* A branch must be the last instruction in an IT block. */ 7939 constraint (current_it_mask != 0x10, BAD_BRANCH); 7940 } 7941 else 7942 cond = inst.cond; 7943 7944 if (cond != COND_ALWAYS) 7945 opcode = T_MNEM_bcond; 7946 else 7947 opcode = inst.instruction; 7948 7949 if (unified_syntax && inst.size_req == 4) 7950 { 7951 inst.instruction = THUMB_OP32(opcode); 7952 if (cond == COND_ALWAYS) 7953 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25; 7954 else 7955 { 7956 assert (cond != 0xF); 7957 inst.instruction |= cond << 22; 7958 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20; 7959 } 7960 } 7961 else 7962 { 7963 inst.instruction = THUMB_OP16(opcode); 7964 if (cond == COND_ALWAYS) 7965 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12; 7966 else 7967 { 7968 inst.instruction |= cond << 8; 7969 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9; 7970 } 7971 /* Allow section relaxation. */ 7972 if (unified_syntax && inst.size_req != 2) 7973 inst.relax = opcode; 7974 } 7975 7976 inst.reloc.pc_rel = 1; 7977} 7978 7979static void 7980do_t_bkpt (void) 7981{ 7982 constraint (inst.cond != COND_ALWAYS, 7983 _("instruction is always unconditional")); 7984 if (inst.operands[0].present) 7985 { 7986 constraint (inst.operands[0].imm > 255, 7987 _("immediate value out of range")); 7988 inst.instruction |= inst.operands[0].imm; 7989 } 7990} 7991 7992static void 7993do_t_branch23 (void) 7994{ 7995 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); 7996 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23; 7997 inst.reloc.pc_rel = 1; 7998 7999 /* If the destination of the branch is a defined symbol which does not have 8000 the THUMB_FUNC attribute, then we must be calling a function which has 8001 the (interfacearm) attribute. We look for the Thumb entry point to that 8002 function and change the branch to refer to that function instead. */ 8003 if ( inst.reloc.exp.X_op == O_symbol 8004 && inst.reloc.exp.X_add_symbol != NULL 8005 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol) 8006 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol)) 8007 inst.reloc.exp.X_add_symbol = 8008 find_real_start (inst.reloc.exp.X_add_symbol); 8009} 8010 8011static void 8012do_t_bx (void) 8013{ 8014 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); 8015 inst.instruction |= inst.operands[0].reg << 3; 8016 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc 8017 should cause the alignment to be checked once it is known. This is 8018 because BX PC only works if the instruction is word aligned. */ 8019} 8020 8021static void 8022do_t_bxj (void) 8023{ 8024 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); 8025 if (inst.operands[0].reg == REG_PC) 8026 as_tsktsk (_("use of r15 in bxj is not really useful")); 8027 8028 inst.instruction |= inst.operands[0].reg << 16; 8029} 8030 8031static void 8032do_t_clz (void) 8033{ 8034 inst.instruction |= inst.operands[0].reg << 8; 8035 inst.instruction |= inst.operands[1].reg << 16; 8036 inst.instruction |= inst.operands[1].reg; 8037} 8038 8039static void 8040do_t_cps (void) 8041{ 8042 constraint (current_it_mask, BAD_NOT_IT); 8043 inst.instruction |= inst.operands[0].imm; 8044} 8045 8046static void 8047do_t_cpsi (void) 8048{ 8049 constraint (current_it_mask, BAD_NOT_IT); 8050 if (unified_syntax 8051 && (inst.operands[1].present || inst.size_req == 4) 8052 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm)) 8053 { 8054 unsigned int imod = (inst.instruction & 0x0030) >> 4; 8055 inst.instruction = 0xf3af8000; 8056 inst.instruction |= imod << 9; 8057 inst.instruction |= inst.operands[0].imm << 5; 8058 if (inst.operands[1].present) 8059 inst.instruction |= 0x100 | inst.operands[1].imm; 8060 } 8061 else 8062 { 8063 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1) 8064 && (inst.operands[0].imm & 4), 8065 _("selected processor does not support 'A' form " 8066 "of this instruction")); 8067 constraint (inst.operands[1].present || inst.size_req == 4, 8068 _("Thumb does not support the 2-argument " 8069 "form of this instruction")); 8070 inst.instruction |= inst.operands[0].imm; 8071 } 8072} 8073 8074/* THUMB CPY instruction (argument parse). */ 8075 8076static void 8077do_t_cpy (void) 8078{ 8079 if (inst.size_req == 4) 8080 { 8081 inst.instruction = THUMB_OP32 (T_MNEM_mov); 8082 inst.instruction |= inst.operands[0].reg << 8; 8083 inst.instruction |= inst.operands[1].reg; 8084 } 8085 else 8086 { 8087 inst.instruction |= (inst.operands[0].reg & 0x8) << 4; 8088 inst.instruction |= (inst.operands[0].reg & 0x7); 8089 inst.instruction |= inst.operands[1].reg << 3; 8090 } 8091} 8092 8093static void 8094do_t_cbz (void) 8095{ 8096 constraint (current_it_mask, BAD_NOT_IT); 8097 constraint (inst.operands[0].reg > 7, BAD_HIREG); 8098 inst.instruction |= inst.operands[0].reg; 8099 inst.reloc.pc_rel = 1; 8100 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7; 8101} 8102 8103static void 8104do_t_dbg (void) 8105{ 8106 inst.instruction |= inst.operands[0].imm; 8107} 8108 8109static void 8110do_t_div (void) 8111{ 8112 if (!inst.operands[1].present) 8113 inst.operands[1].reg = inst.operands[0].reg; 8114 inst.instruction |= inst.operands[0].reg << 8; 8115 inst.instruction |= inst.operands[1].reg << 16; 8116 inst.instruction |= inst.operands[2].reg; 8117} 8118 8119static void 8120do_t_hint (void) 8121{ 8122 if (unified_syntax && inst.size_req == 4) 8123 inst.instruction = THUMB_OP32 (inst.instruction); 8124 else 8125 inst.instruction = THUMB_OP16 (inst.instruction); 8126} 8127 8128static void 8129do_t_it (void) 8130{ 8131 unsigned int cond = inst.operands[0].imm; 8132 8133 constraint (current_it_mask, BAD_NOT_IT); 8134 current_it_mask = (inst.instruction & 0xf) | 0x10; 8135 current_cc = cond; 8136 8137 /* If the condition is a negative condition, invert the mask. */ 8138 if ((cond & 0x1) == 0x0) 8139 { 8140 unsigned int mask = inst.instruction & 0x000f; 8141 8142 if ((mask & 0x7) == 0) 8143 /* no conversion needed */; 8144 else if ((mask & 0x3) == 0) 8145 mask ^= 0x8; 8146 else if ((mask & 0x1) == 0) 8147 mask ^= 0xC; 8148 else 8149 mask ^= 0xE; 8150 8151 inst.instruction &= 0xfff0; 8152 inst.instruction |= mask; 8153 } 8154 8155 inst.instruction |= cond << 4; 8156} 8157 8158/* Helper function used for both push/pop and ldm/stm. */ 8159static void 8160encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback) 8161{ 8162 bfd_boolean load; 8163 8164 load = (inst.instruction & (1 << 20)) != 0; 8165 8166 if (mask & (1 << 13)) 8167 inst.error = _("SP not allowed in register list"); 8168 if (load) 8169 { 8170 if (mask & (1 << 14) 8171 && mask & (1 << 15)) 8172 inst.error = _("LR and PC should not both be in register list"); 8173 8174 if ((mask & (1 << base)) != 0 8175 && writeback) 8176 as_warn (_("base register should not be in register list " 8177 "when written back")); 8178 } 8179 else 8180 { 8181 if (mask & (1 << 15)) 8182 inst.error = _("PC not allowed in register list"); 8183 8184 if (writeback && 8185 (mask & (1 << base)) && 8186 (mask & (0xffffffff >> (32 - (base -1)))) != 0) 8187 as_warn (_("value stored for r%d is UNPREDICTABLE"), base); 8188 } 8189 8190 if ((mask & (mask - 1)) == 0) 8191 { 8192 /* Single register transfers implemented as str/ldr. */ 8193 if (writeback) 8194 { 8195 if (inst.instruction & (1 << 23)) 8196 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */ 8197 else 8198 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */ 8199 } 8200 else 8201 { 8202 if (inst.instruction & (1 << 23)) 8203 inst.instruction = 0x00800000; /* ia -> [base] */ 8204 else 8205 inst.instruction = 0x00000c04; /* db -> [base, #-4] */ 8206 } 8207 8208 inst.instruction |= 0xf8400000; 8209 if (load) 8210 inst.instruction |= 0x00100000; 8211 8212 mask = ffs(mask) - 1; 8213 mask <<= 12; 8214 } 8215 else if (writeback) 8216 inst.instruction |= WRITE_BACK; 8217 8218 inst.instruction |= mask; 8219 inst.instruction |= base << 16; 8220} 8221 8222static void 8223do_t_ldmstm (void) 8224{ 8225 /* This really doesn't seem worth it. */ 8226 constraint (inst.reloc.type != BFD_RELOC_UNUSED, 8227 _("expression too complex")); 8228 constraint (inst.operands[1].writeback, 8229 _("Thumb load/store multiple does not support {reglist}^")); 8230 8231 if (unified_syntax) 8232 { 8233 bfd_boolean narrow; 8234 unsigned mask; 8235 8236 narrow = FALSE; 8237 /* See if we can use a 16-bit instruction. */ 8238 if (inst.instruction < 0xffff /* not ldmdb/stmdb */ 8239 && inst.size_req != 4 8240 && !(inst.operands[1].imm & ~0xff)) 8241 { 8242 mask = 1 << inst.operands[0].reg; 8243 8244 if (inst.operands[0].reg <= 7 8245 && (inst.instruction == T_MNEM_stmia 8246 ? inst.operands[0].writeback 8247 : (inst.operands[0].writeback 8248 == !(inst.operands[1].imm & mask)))) 8249 { 8250 if (inst.instruction == T_MNEM_stmia 8251 && (inst.operands[1].imm & mask) 8252 && (inst.operands[1].imm & (mask - 1))) 8253 as_warn (_("value stored for r%d is UNPREDICTABLE"), 8254 inst.operands[0].reg); 8255 8256 inst.instruction = THUMB_OP16 (inst.instruction); 8257 inst.instruction |= inst.operands[0].reg << 8; 8258 inst.instruction |= inst.operands[1].imm; 8259 narrow = TRUE; 8260 } 8261 else if (inst.operands[0] .reg == REG_SP 8262 && inst.operands[0].writeback) 8263 { 8264 inst.instruction = THUMB_OP16 (inst.instruction == T_MNEM_stmia 8265 ? T_MNEM_push : T_MNEM_pop); 8266 inst.instruction |= inst.operands[1].imm; 8267 narrow = TRUE; 8268 } 8269 } 8270 8271 if (!narrow) 8272 { 8273 if (inst.instruction < 0xffff) 8274 inst.instruction = THUMB_OP32 (inst.instruction); 8275 8276 encode_thumb2_ldmstm(inst.operands[0].reg, inst.operands[1].imm, 8277 inst.operands[0].writeback); 8278 } 8279 } 8280 else 8281 { 8282 constraint (inst.operands[0].reg > 7 8283 || (inst.operands[1].imm & ~0xff), BAD_HIREG); 8284 constraint (inst.instruction != T_MNEM_ldmia 8285 && inst.instruction != T_MNEM_stmia, 8286 _("Thumb-2 instruction only valid in unified syntax")); 8287 if (inst.instruction == T_MNEM_stmia) 8288 { 8289 if (!inst.operands[0].writeback) 8290 as_warn (_("this instruction will write back the base register")); 8291 if ((inst.operands[1].imm & (1 << inst.operands[0].reg)) 8292 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1))) 8293 as_warn (_("value stored for r%d is UNPREDICTABLE"), 8294 inst.operands[0].reg); 8295 } 8296 else 8297 { 8298 if (!inst.operands[0].writeback 8299 && !(inst.operands[1].imm & (1 << inst.operands[0].reg))) 8300 as_warn (_("this instruction will write back the base register")); 8301 else if (inst.operands[0].writeback 8302 && (inst.operands[1].imm & (1 << inst.operands[0].reg))) 8303 as_warn (_("this instruction will not write back the base register")); 8304 } 8305 8306 inst.instruction = THUMB_OP16 (inst.instruction); 8307 inst.instruction |= inst.operands[0].reg << 8; 8308 inst.instruction |= inst.operands[1].imm; 8309 } 8310} 8311 8312static void 8313do_t_ldrex (void) 8314{ 8315 constraint (!inst.operands[1].isreg || !inst.operands[1].preind 8316 || inst.operands[1].postind || inst.operands[1].writeback 8317 || inst.operands[1].immisreg || inst.operands[1].shifted 8318 || inst.operands[1].negative, 8319 BAD_ADDR_MODE); 8320 8321 inst.instruction |= inst.operands[0].reg << 12; 8322 inst.instruction |= inst.operands[1].reg << 16; 8323 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8; 8324} 8325 8326static void 8327do_t_ldrexd (void) 8328{ 8329 if (!inst.operands[1].present) 8330 { 8331 constraint (inst.operands[0].reg == REG_LR, 8332 _("r14 not allowed as first register " 8333 "when second register is omitted")); 8334 inst.operands[1].reg = inst.operands[0].reg + 1; 8335 } 8336 constraint (inst.operands[0].reg == inst.operands[1].reg, 8337 BAD_OVERLAP); 8338 8339 inst.instruction |= inst.operands[0].reg << 12; 8340 inst.instruction |= inst.operands[1].reg << 8; 8341 inst.instruction |= inst.operands[2].reg << 16; 8342} 8343 8344static void 8345do_t_ldst (void) 8346{ 8347 uint32_t opcode; 8348 int Rn; 8349 8350 opcode = inst.instruction; 8351 if (unified_syntax) 8352 { 8353 if (!inst.operands[1].isreg) 8354 { 8355 if (opcode <= 0xffff) 8356 inst.instruction = THUMB_OP32 (opcode); 8357 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE)) 8358 return; 8359 } 8360 if (inst.operands[1].isreg 8361 && !inst.operands[1].writeback 8362 && !inst.operands[1].shifted && !inst.operands[1].postind 8363 && !inst.operands[1].negative && inst.operands[0].reg <= 7 8364 && opcode <= 0xffff 8365 && inst.size_req != 4) 8366 { 8367 /* Insn may have a 16-bit form. */ 8368 Rn = inst.operands[1].reg; 8369 if (inst.operands[1].immisreg) 8370 { 8371 inst.instruction = THUMB_OP16 (opcode); 8372 /* [Rn, Ri] */ 8373 if (Rn <= 7 && inst.operands[1].imm <= 7) 8374 goto op16; 8375 } 8376 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh 8377 && opcode != T_MNEM_ldrsb) 8378 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr) 8379 || (Rn == REG_SP && opcode == T_MNEM_str)) 8380 { 8381 /* [Rn, #const] */ 8382 if (Rn > 7) 8383 { 8384 if (Rn == REG_PC) 8385 { 8386 if (inst.reloc.pc_rel) 8387 opcode = T_MNEM_ldr_pc2; 8388 else 8389 opcode = T_MNEM_ldr_pc; 8390 } 8391 else 8392 { 8393 if (opcode == T_MNEM_ldr) 8394 opcode = T_MNEM_ldr_sp; 8395 else 8396 opcode = T_MNEM_str_sp; 8397 } 8398 inst.instruction = inst.operands[0].reg << 8; 8399 } 8400 else 8401 { 8402 inst.instruction = inst.operands[0].reg; 8403 inst.instruction |= inst.operands[1].reg << 3; 8404 } 8405 inst.instruction |= THUMB_OP16 (opcode); 8406 if (inst.size_req == 2) 8407 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET; 8408 else 8409 inst.relax = opcode; 8410 return; 8411 } 8412 } 8413 /* Definitely a 32-bit variant. */ 8414 inst.instruction = THUMB_OP32 (opcode); 8415 inst.instruction |= inst.operands[0].reg << 12; 8416 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE); 8417 return; 8418 } 8419 8420 constraint (inst.operands[0].reg > 7, BAD_HIREG); 8421 8422 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb) 8423 { 8424 /* Only [Rn,Rm] is acceptable. */ 8425 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG); 8426 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg 8427 || inst.operands[1].postind || inst.operands[1].shifted 8428 || inst.operands[1].negative, 8429 _("Thumb does not support this addressing mode")); 8430 inst.instruction = THUMB_OP16 (inst.instruction); 8431 goto op16; 8432 } 8433 8434 inst.instruction = THUMB_OP16 (inst.instruction); 8435 if (!inst.operands[1].isreg) 8436 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE)) 8437 return; 8438 8439 constraint (!inst.operands[1].preind 8440 || inst.operands[1].shifted 8441 || inst.operands[1].writeback, 8442 _("Thumb does not support this addressing mode")); 8443 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP) 8444 { 8445 constraint (inst.instruction & 0x0600, 8446 _("byte or halfword not valid for base register")); 8447 constraint (inst.operands[1].reg == REG_PC 8448 && !(inst.instruction & THUMB_LOAD_BIT), 8449 _("r15 based store not allowed")); 8450 constraint (inst.operands[1].immisreg, 8451 _("invalid base register for register offset")); 8452 8453 if (inst.operands[1].reg == REG_PC) 8454 inst.instruction = T_OPCODE_LDR_PC; 8455 else if (inst.instruction & THUMB_LOAD_BIT) 8456 inst.instruction = T_OPCODE_LDR_SP; 8457 else 8458 inst.instruction = T_OPCODE_STR_SP; 8459 8460 inst.instruction |= inst.operands[0].reg << 8; 8461 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET; 8462 return; 8463 } 8464 8465 constraint (inst.operands[1].reg > 7, BAD_HIREG); 8466 if (!inst.operands[1].immisreg) 8467 { 8468 /* Immediate offset. */ 8469 inst.instruction |= inst.operands[0].reg; 8470 inst.instruction |= inst.operands[1].reg << 3; 8471 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET; 8472 return; 8473 } 8474 8475 /* Register offset. */ 8476 constraint (inst.operands[1].imm > 7, BAD_HIREG); 8477 constraint (inst.operands[1].negative, 8478 _("Thumb does not support this addressing mode")); 8479 8480 op16: 8481 switch (inst.instruction) 8482 { 8483 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break; 8484 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break; 8485 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break; 8486 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break; 8487 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break; 8488 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break; 8489 case 0x5600 /* ldrsb */: 8490 case 0x5e00 /* ldrsh */: break; 8491 default: abort (); 8492 } 8493 8494 inst.instruction |= inst.operands[0].reg; 8495 inst.instruction |= inst.operands[1].reg << 3; 8496 inst.instruction |= inst.operands[1].imm << 6; 8497} 8498 8499static void 8500do_t_ldstd (void) 8501{ 8502 if (!inst.operands[1].present) 8503 { 8504 inst.operands[1].reg = inst.operands[0].reg + 1; 8505 constraint (inst.operands[0].reg == REG_LR, 8506 _("r14 not allowed here")); 8507 } 8508 inst.instruction |= inst.operands[0].reg << 12; 8509 inst.instruction |= inst.operands[1].reg << 8; 8510 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE); 8511 8512} 8513 8514static void 8515do_t_ldstt (void) 8516{ 8517 inst.instruction |= inst.operands[0].reg << 12; 8518 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE); 8519} 8520 8521static void 8522do_t_mla (void) 8523{ 8524 inst.instruction |= inst.operands[0].reg << 8; 8525 inst.instruction |= inst.operands[1].reg << 16; 8526 inst.instruction |= inst.operands[2].reg; 8527 inst.instruction |= inst.operands[3].reg << 12; 8528} 8529 8530static void 8531do_t_mlal (void) 8532{ 8533 inst.instruction |= inst.operands[0].reg << 12; 8534 inst.instruction |= inst.operands[1].reg << 8; 8535 inst.instruction |= inst.operands[2].reg << 16; 8536 inst.instruction |= inst.operands[3].reg; 8537} 8538 8539static void 8540do_t_mov_cmp (void) 8541{ 8542 if (unified_syntax) 8543 { 8544 int r0off = (inst.instruction == T_MNEM_mov 8545 || inst.instruction == T_MNEM_movs) ? 8 : 16; 8546 uint32_t opcode; 8547 bfd_boolean narrow; 8548 bfd_boolean low_regs; 8549 8550 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7); 8551 opcode = inst.instruction; 8552 if (current_it_mask) 8553 narrow = opcode != T_MNEM_movs; 8554 else 8555 narrow = opcode != T_MNEM_movs || low_regs; 8556 if (inst.size_req == 4 8557 || inst.operands[1].shifted) 8558 narrow = FALSE; 8559 8560 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */ 8561 if (opcode == T_MNEM_movs && inst.operands[1].isreg 8562 && !inst.operands[1].shifted 8563 && inst.operands[0].reg == REG_PC 8564 && inst.operands[1].reg == REG_LR) 8565 { 8566 inst.instruction = T2_SUBS_PC_LR; 8567 return; 8568 } 8569 8570 if (!inst.operands[1].isreg) 8571 { 8572 /* Immediate operand. */ 8573 if (current_it_mask == 0 && opcode == T_MNEM_mov) 8574 narrow = 0; 8575 if (low_regs && narrow) 8576 { 8577 inst.instruction = THUMB_OP16 (opcode); 8578 inst.instruction |= inst.operands[0].reg << 8; 8579 if (inst.size_req == 2) 8580 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM; 8581 else 8582 inst.relax = opcode; 8583 } 8584 else 8585 { 8586 inst.instruction = THUMB_OP32 (inst.instruction); 8587 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; 8588 inst.instruction |= inst.operands[0].reg << r0off; 8589 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 8590 } 8591 } 8592 else if (inst.operands[1].shifted && inst.operands[1].immisreg 8593 && (inst.instruction == T_MNEM_mov 8594 || inst.instruction == T_MNEM_movs)) 8595 { 8596 /* Register shifts are encoded as separate shift instructions. */ 8597 bfd_boolean flags = (inst.instruction == T_MNEM_movs); 8598 8599 if (current_it_mask) 8600 narrow = !flags; 8601 else 8602 narrow = flags; 8603 8604 if (inst.size_req == 4) 8605 narrow = FALSE; 8606 8607 if (!low_regs || inst.operands[1].imm > 7) 8608 narrow = FALSE; 8609 8610 if (inst.operands[0].reg != inst.operands[1].reg) 8611 narrow = FALSE; 8612 8613 switch (inst.operands[1].shift_kind) 8614 { 8615 case SHIFT_LSL: 8616 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl); 8617 break; 8618 case SHIFT_ASR: 8619 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr); 8620 break; 8621 case SHIFT_LSR: 8622 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr); 8623 break; 8624 case SHIFT_ROR: 8625 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror); 8626 break; 8627 default: 8628 abort(); 8629 } 8630 8631 inst.instruction = opcode; 8632 if (narrow) 8633 { 8634 inst.instruction |= inst.operands[0].reg; 8635 inst.instruction |= inst.operands[1].imm << 3; 8636 } 8637 else 8638 { 8639 if (flags) 8640 inst.instruction |= CONDS_BIT; 8641 8642 inst.instruction |= inst.operands[0].reg << 8; 8643 inst.instruction |= inst.operands[1].reg << 16; 8644 inst.instruction |= inst.operands[1].imm; 8645 } 8646 } 8647 else if (!narrow) 8648 { 8649 /* Some mov with immediate shift have narrow variants. 8650 Register shifts are handled above. */ 8651 if (low_regs && inst.operands[1].shifted 8652 && (inst.instruction == T_MNEM_mov 8653 || inst.instruction == T_MNEM_movs)) 8654 { 8655 if (current_it_mask) 8656 narrow = (inst.instruction == T_MNEM_mov); 8657 else 8658 narrow = (inst.instruction == T_MNEM_movs); 8659 } 8660 8661 if (narrow) 8662 { 8663 switch (inst.operands[1].shift_kind) 8664 { 8665 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break; 8666 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break; 8667 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break; 8668 default: narrow = FALSE; break; 8669 } 8670 } 8671 8672 if (narrow) 8673 { 8674 inst.instruction |= inst.operands[0].reg; 8675 inst.instruction |= inst.operands[1].reg << 3; 8676 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT; 8677 } 8678 else 8679 { 8680 inst.instruction = THUMB_OP32 (inst.instruction); 8681 inst.instruction |= inst.operands[0].reg << r0off; 8682 encode_thumb32_shifted_operand (1); 8683 } 8684 } 8685 else 8686 switch (inst.instruction) 8687 { 8688 case T_MNEM_mov: 8689 inst.instruction = T_OPCODE_MOV_HR; 8690 inst.instruction |= (inst.operands[0].reg & 0x8) << 4; 8691 inst.instruction |= (inst.operands[0].reg & 0x7); 8692 inst.instruction |= inst.operands[1].reg << 3; 8693 break; 8694 8695 case T_MNEM_movs: 8696 /* We know we have low registers at this point. 8697 Generate ADD Rd, Rs, #0. */ 8698 inst.instruction = T_OPCODE_ADD_I3; 8699 inst.instruction |= inst.operands[0].reg; 8700 inst.instruction |= inst.operands[1].reg << 3; 8701 break; 8702 8703 case T_MNEM_cmp: 8704 if (low_regs) 8705 { 8706 inst.instruction = T_OPCODE_CMP_LR; 8707 inst.instruction |= inst.operands[0].reg; 8708 inst.instruction |= inst.operands[1].reg << 3; 8709 } 8710 else 8711 { 8712 inst.instruction = T_OPCODE_CMP_HR; 8713 inst.instruction |= (inst.operands[0].reg & 0x8) << 4; 8714 inst.instruction |= (inst.operands[0].reg & 0x7); 8715 inst.instruction |= inst.operands[1].reg << 3; 8716 } 8717 break; 8718 } 8719 return; 8720 } 8721 8722 inst.instruction = THUMB_OP16 (inst.instruction); 8723 if (inst.operands[1].isreg) 8724 { 8725 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8) 8726 { 8727 /* A move of two lowregs is encoded as ADD Rd, Rs, #0 8728 since a MOV instruction produces unpredictable results. */ 8729 if (inst.instruction == T_OPCODE_MOV_I8) 8730 inst.instruction = T_OPCODE_ADD_I3; 8731 else 8732 inst.instruction = T_OPCODE_CMP_LR; 8733 8734 inst.instruction |= inst.operands[0].reg; 8735 inst.instruction |= inst.operands[1].reg << 3; 8736 } 8737 else 8738 { 8739 if (inst.instruction == T_OPCODE_MOV_I8) 8740 inst.instruction = T_OPCODE_MOV_HR; 8741 else 8742 inst.instruction = T_OPCODE_CMP_HR; 8743 do_t_cpy (); 8744 } 8745 } 8746 else 8747 { 8748 constraint (inst.operands[0].reg > 7, 8749 _("only lo regs allowed with immediate")); 8750 inst.instruction |= inst.operands[0].reg << 8; 8751 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM; 8752 } 8753} 8754 8755static void 8756do_t_mov16 (void) 8757{ 8758 bfd_vma imm; 8759 bfd_boolean top; 8760 8761 top = (inst.instruction & 0x00800000) != 0; 8762 if (inst.reloc.type == BFD_RELOC_ARM_MOVW) 8763 { 8764 constraint (top, _(":lower16: not allowed this instruction")); 8765 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW; 8766 } 8767 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT) 8768 { 8769 constraint (!top, _(":upper16: not allowed this instruction")); 8770 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT; 8771 } 8772 8773 inst.instruction |= inst.operands[0].reg << 8; 8774 if (inst.reloc.type == BFD_RELOC_UNUSED) 8775 { 8776 imm = inst.reloc.exp.X_add_number; 8777 inst.instruction |= (imm & 0xf000) << 4; 8778 inst.instruction |= (imm & 0x0800) << 15; 8779 inst.instruction |= (imm & 0x0700) << 4; 8780 inst.instruction |= (imm & 0x00ff); 8781 } 8782} 8783 8784static void 8785do_t_mvn_tst (void) 8786{ 8787 if (unified_syntax) 8788 { 8789 int r0off = (inst.instruction == T_MNEM_mvn 8790 || inst.instruction == T_MNEM_mvns) ? 8 : 16; 8791 bfd_boolean narrow; 8792 8793 if (inst.size_req == 4 8794 || inst.instruction > 0xffff 8795 || inst.operands[1].shifted 8796 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7) 8797 narrow = FALSE; 8798 else if (inst.instruction == T_MNEM_cmn) 8799 narrow = TRUE; 8800 else if (THUMB_SETS_FLAGS (inst.instruction) 8801 && (inst.cond == COND_ALWAYS || inst.instruction != T_MNEM_tst)) 8802 narrow = (current_it_mask == 0); 8803 else 8804 narrow = (current_it_mask != 0); 8805 8806 if (!inst.operands[1].isreg) 8807 { 8808 /* For an immediate, we always generate a 32-bit opcode; 8809 section relaxation will shrink it later if possible. */ 8810 if (inst.instruction < 0xffff) 8811 inst.instruction = THUMB_OP32 (inst.instruction); 8812 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; 8813 inst.instruction |= inst.operands[0].reg << r0off; 8814 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 8815 } 8816 else 8817 { 8818 /* See if we can do this with a 16-bit instruction. */ 8819 if (narrow) 8820 { 8821 inst.instruction = THUMB_OP16 (inst.instruction); 8822 inst.instruction |= inst.operands[0].reg; 8823 inst.instruction |= inst.operands[1].reg << 3; 8824 } 8825 else 8826 { 8827 constraint (inst.operands[1].shifted 8828 && inst.operands[1].immisreg, 8829 _("shift must be constant")); 8830 if (inst.instruction < 0xffff) 8831 inst.instruction = THUMB_OP32 (inst.instruction); 8832 inst.instruction |= inst.operands[0].reg << r0off; 8833 encode_thumb32_shifted_operand (1); 8834 } 8835 } 8836 } 8837 else 8838 { 8839 constraint (inst.instruction > 0xffff 8840 || inst.instruction == T_MNEM_mvns, BAD_THUMB32); 8841 constraint (!inst.operands[1].isreg || inst.operands[1].shifted, 8842 _("unshifted register required")); 8843 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7, 8844 BAD_HIREG); 8845 8846 inst.instruction = THUMB_OP16 (inst.instruction); 8847 inst.instruction |= inst.operands[0].reg; 8848 inst.instruction |= inst.operands[1].reg << 3; 8849 } 8850} 8851 8852static void 8853do_t_mrs (void) 8854{ 8855 int flags; 8856 8857 if (do_vfp_nsyn_mrs () == SUCCESS) 8858 return; 8859 8860 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT); 8861 if (flags == 0) 8862 { 8863 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m), 8864 _("selected processor does not support " 8865 "requested special purpose register")); 8866 } 8867 else 8868 { 8869 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1), 8870 _("selected processor does not support " 8871 "requested special purpose register %x")); 8872 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */ 8873 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f), 8874 _("'CPSR' or 'SPSR' expected")); 8875 } 8876 8877 inst.instruction |= inst.operands[0].reg << 8; 8878 inst.instruction |= (flags & SPSR_BIT) >> 2; 8879 inst.instruction |= inst.operands[1].imm & 0xff; 8880} 8881 8882static void 8883do_t_msr (void) 8884{ 8885 int flags; 8886 8887 if (do_vfp_nsyn_msr () == SUCCESS) 8888 return; 8889 8890 constraint (!inst.operands[1].isreg, 8891 _("Thumb encoding does not support an immediate here")); 8892 flags = inst.operands[0].imm; 8893 if (flags & ~0xff) 8894 { 8895 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1), 8896 _("selected processor does not support " 8897 "requested special purpose register")); 8898 } 8899 else 8900 { 8901 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m), 8902 _("selected processor does not support " 8903 "requested special purpose register")); 8904 flags |= PSR_f; 8905 } 8906 inst.instruction |= (flags & SPSR_BIT) >> 2; 8907 inst.instruction |= (flags & ~SPSR_BIT) >> 8; 8908 inst.instruction |= (flags & 0xff); 8909 inst.instruction |= inst.operands[1].reg << 16; 8910} 8911 8912static void 8913do_t_mul (void) 8914{ 8915 if (!inst.operands[2].present) 8916 inst.operands[2].reg = inst.operands[0].reg; 8917 8918 /* There is no 32-bit MULS and no unconditional 16-bit MUL. */ 8919 if (unified_syntax && inst.instruction == T_MNEM_mul 8920 && (inst.cond == COND_ALWAYS || inst.operands[0].reg > 7 8921 || inst.operands[1].reg > 7 || inst.operands[2].reg > 7 8922 || (inst.operands[0].reg != inst.operands[2].reg && 8923 inst.operands[0].reg != inst.operands[1].reg))) 8924 8925 { 8926 inst.instruction = THUMB_OP32 (inst.instruction); 8927 inst.instruction |= inst.operands[0].reg << 8; 8928 inst.instruction |= inst.operands[1].reg << 16; 8929 inst.instruction |= inst.operands[2].reg << 0; 8930 } 8931 else 8932 { 8933 constraint (!unified_syntax 8934 && inst.instruction == T_MNEM_muls, BAD_THUMB32); 8935 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7, 8936 BAD_HIREG); 8937 8938 inst.instruction = THUMB_OP16 (inst.instruction); 8939 inst.instruction |= inst.operands[0].reg; 8940 8941 if (inst.operands[0].reg == inst.operands[1].reg) 8942 inst.instruction |= inst.operands[2].reg << 3; 8943 else if (inst.operands[0].reg == inst.operands[2].reg) 8944 inst.instruction |= inst.operands[1].reg << 3; 8945 else 8946 constraint (1, _("dest must overlap one source register")); 8947 } 8948} 8949 8950static void 8951do_t_mull (void) 8952{ 8953 inst.instruction |= inst.operands[0].reg << 12; 8954 inst.instruction |= inst.operands[1].reg << 8; 8955 inst.instruction |= inst.operands[2].reg << 16; 8956 inst.instruction |= inst.operands[3].reg; 8957 8958 if (inst.operands[0].reg == inst.operands[1].reg) 8959 as_tsktsk (_("rdhi and rdlo must be different")); 8960} 8961 8962static void 8963do_t_nop (void) 8964{ 8965 if (unified_syntax && (cpu_variant.core & ARM_EXT_V6T2) == ARM_EXT_V6T2) 8966 { 8967 if (inst.size_req == 4 || inst.operands[0].imm > 15) 8968 { 8969 inst.instruction = THUMB_OP32 (inst.instruction); 8970 inst.instruction |= inst.operands[0].imm; 8971 } 8972 else 8973 { 8974 inst.instruction = THUMB_OP16 (inst.instruction); 8975 inst.instruction |= inst.operands[0].imm << 4; 8976 } 8977 } 8978 else 8979 { 8980 constraint (inst.operands[0].present, 8981 _("Thumb does not support NOP with hints")); 8982 inst.instruction = 0x46c0; 8983 } 8984} 8985 8986static void 8987do_t_neg (void) 8988{ 8989 if (unified_syntax) 8990 { 8991 bfd_boolean narrow; 8992 8993 if (THUMB_SETS_FLAGS (inst.instruction)) 8994 narrow = (current_it_mask == 0); 8995 else 8996 narrow = (current_it_mask != 0); 8997 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7) 8998 narrow = FALSE; 8999 if (inst.size_req == 4) 9000 narrow = FALSE; 9001 9002 if (!narrow) 9003 { 9004 inst.instruction = THUMB_OP32 (inst.instruction); 9005 inst.instruction |= inst.operands[0].reg << 8; 9006 inst.instruction |= inst.operands[1].reg << 16; 9007 } 9008 else 9009 { 9010 inst.instruction = THUMB_OP16 (inst.instruction); 9011 inst.instruction |= inst.operands[0].reg; 9012 inst.instruction |= inst.operands[1].reg << 3; 9013 } 9014 } 9015 else 9016 { 9017 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7, 9018 BAD_HIREG); 9019 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); 9020 9021 inst.instruction = THUMB_OP16 (inst.instruction); 9022 inst.instruction |= inst.operands[0].reg; 9023 inst.instruction |= inst.operands[1].reg << 3; 9024 } 9025} 9026 9027static void 9028do_t_orn (void) 9029{ 9030 int Rs; 9031 9032 Rs = (inst.operands[1].present 9033 ? inst.operands[1].reg /* Rd, Rs, foo */ 9034 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ 9035 9036 if (!inst.operands[2].isreg) 9037 { 9038 inst.instruction = 0xf0600000 | (0x00100000 & inst.instruction); 9039 inst.instruction |= inst.operands[0].reg << 8; 9040 inst.instruction |= Rs << 16; 9041 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 9042 } 9043 else 9044 { 9045 constraint (inst.operands[2].shifted 9046 && inst.operands[2].immisreg, 9047 _("shift must be constant")); 9048 inst.instruction |= inst.operands[0].reg << 8; 9049 inst.instruction |= Rs << 16; 9050 encode_thumb32_shifted_operand (2); 9051 } 9052} 9053 9054static void 9055do_t_pkhbt (void) 9056{ 9057 inst.instruction |= inst.operands[0].reg << 8; 9058 inst.instruction |= inst.operands[1].reg << 16; 9059 inst.instruction |= inst.operands[2].reg; 9060 if (inst.operands[3].present) 9061 { 9062 unsigned int val = inst.reloc.exp.X_add_number; 9063 constraint (inst.reloc.exp.X_op != O_constant, 9064 _("expression too complex")); 9065 inst.instruction |= (val & 0x1c) << 10; 9066 inst.instruction |= (val & 0x03) << 6; 9067 } 9068} 9069 9070static void 9071do_t_pkhtb (void) 9072{ 9073 if (!inst.operands[3].present) 9074 inst.instruction &= ~0x00000020; 9075 do_t_pkhbt (); 9076} 9077 9078static void 9079do_t_pld (void) 9080{ 9081 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE); 9082} 9083 9084static void 9085do_t_push_pop (void) 9086{ 9087 unsigned mask; 9088 9089 constraint (inst.operands[0].writeback, 9090 _("push/pop do not support {reglist}^")); 9091 constraint (inst.reloc.type != BFD_RELOC_UNUSED, 9092 _("expression too complex")); 9093 9094 mask = inst.operands[0].imm; 9095 if ((mask & ~0xff) == 0) 9096 inst.instruction = THUMB_OP16 (inst.instruction) | mask; 9097 else if ((inst.instruction == T_MNEM_push 9098 && (mask & ~0xff) == 1 << REG_LR) 9099 || (inst.instruction == T_MNEM_pop 9100 && (mask & ~0xff) == 1 << REG_PC)) 9101 { 9102 inst.instruction = THUMB_OP16 (inst.instruction); 9103 inst.instruction |= THUMB_PP_PC_LR; 9104 inst.instruction |= mask & 0xff; 9105 } 9106 else if (unified_syntax) 9107 { 9108 inst.instruction = THUMB_OP32 (inst.instruction); 9109 encode_thumb2_ldmstm(13, mask, TRUE); 9110 } 9111 else 9112 { 9113 inst.error = _("invalid register list to push/pop instruction"); 9114 return; 9115 } 9116} 9117 9118static void 9119do_t_rbit (void) 9120{ 9121 inst.instruction |= inst.operands[0].reg << 8; 9122 /* Rm is bits 3-0 is in of *both* 16-bit halves of the opcode */ 9123 inst.instruction |= inst.operands[1].reg; 9124 inst.instruction |= inst.operands[1].reg << 16; 9125} 9126 9127static void 9128do_t_rd_rm (void) 9129{ 9130 inst.instruction |= inst.operands[0].reg << 8; 9131 inst.instruction |= inst.operands[1].reg; 9132} 9133 9134static void 9135do_t_rd_rm_rn (void) 9136{ 9137 inst.instruction |= inst.operands[0].reg << 8; 9138 inst.instruction |= inst.operands[1].reg; 9139 inst.instruction |= inst.operands[2].reg << 16; 9140} 9141 9142static void 9143do_t_rev (void) 9144{ 9145 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7 9146 && inst.size_req != 4) 9147 { 9148 inst.instruction = THUMB_OP16 (inst.instruction); 9149 inst.instruction |= inst.operands[0].reg; 9150 inst.instruction |= inst.operands[1].reg << 3; 9151 } 9152 else if (unified_syntax) 9153 { 9154 inst.instruction = THUMB_OP32 (inst.instruction); 9155 inst.instruction |= inst.operands[0].reg << 8; 9156 inst.instruction |= inst.operands[1].reg << 16; 9157 inst.instruction |= inst.operands[1].reg; 9158 } 9159 else 9160 inst.error = BAD_HIREG; 9161} 9162 9163static void 9164do_t_rsb (void) 9165{ 9166 int Rd, Rs; 9167 9168 Rd = inst.operands[0].reg; 9169 Rs = (inst.operands[1].present 9170 ? inst.operands[1].reg /* Rd, Rs, foo */ 9171 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */ 9172 9173 inst.instruction |= Rd << 8; 9174 inst.instruction |= Rs << 16; 9175 if (!inst.operands[2].isreg) 9176 { 9177 bfd_boolean narrow; 9178 9179 if ((inst.instruction & 0x00100000) != 0) 9180 narrow = (current_it_mask == 0); 9181 else 9182 narrow = (current_it_mask != 0); 9183 9184 if (Rd > 7 || Rs > 7) 9185 narrow = FALSE; 9186 9187 if (inst.size_req == 4 || !unified_syntax) 9188 narrow = FALSE; 9189 9190 if (inst.reloc.exp.X_op != O_constant 9191 || inst.reloc.exp.X_add_number != 0) 9192 narrow = FALSE; 9193 9194 /* Turn rsb #0 into 16-bit neg. We should probably do this via 9195 relaxation, but it doesn't seem worth the hassle. */ 9196 if (narrow) 9197 { 9198 inst.reloc.type = BFD_RELOC_UNUSED; 9199 inst.instruction = THUMB_OP16 (T_MNEM_negs); 9200 inst.instruction |= Rs << 3; 9201 inst.instruction |= Rd; 9202 } 9203 else 9204 { 9205 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000; 9206 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE; 9207 } 9208 } 9209 else 9210 encode_thumb32_shifted_operand (2); 9211} 9212 9213static void 9214do_t_setend (void) 9215{ 9216 constraint (current_it_mask, BAD_NOT_IT); 9217 if (inst.operands[0].imm) 9218 inst.instruction |= 0x8; 9219} 9220 9221static void 9222do_t_shift (void) 9223{ 9224 if (!inst.operands[1].present) 9225 inst.operands[1].reg = inst.operands[0].reg; 9226 9227 if (unified_syntax) 9228 { 9229 bfd_boolean narrow; 9230 int shift_kind; 9231 9232 switch (inst.instruction) 9233 { 9234 case T_MNEM_asr: 9235 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break; 9236 case T_MNEM_lsl: 9237 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break; 9238 case T_MNEM_lsr: 9239 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break; 9240 case T_MNEM_ror: 9241 case T_MNEM_rors: shift_kind = SHIFT_ROR; break; 9242 default: abort (); 9243 } 9244 9245 if (THUMB_SETS_FLAGS (inst.instruction)) 9246 narrow = (current_it_mask == 0); 9247 else 9248 narrow = (current_it_mask != 0); 9249 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7) 9250 narrow = FALSE; 9251 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR) 9252 narrow = FALSE; 9253 if (inst.operands[2].isreg 9254 && (inst.operands[1].reg != inst.operands[0].reg 9255 || inst.operands[2].reg > 7)) 9256 narrow = FALSE; 9257 if (inst.size_req == 4) 9258 narrow = FALSE; 9259 9260 if (!narrow) 9261 { 9262 if (inst.operands[2].isreg) 9263 { 9264 inst.instruction = THUMB_OP32 (inst.instruction); 9265 inst.instruction |= inst.operands[0].reg << 8; 9266 inst.instruction |= inst.operands[1].reg << 16; 9267 inst.instruction |= inst.operands[2].reg; 9268 } 9269 else 9270 { 9271 inst.operands[1].shifted = 1; 9272 inst.operands[1].shift_kind = shift_kind; 9273 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction) 9274 ? T_MNEM_movs : T_MNEM_mov); 9275 inst.instruction |= inst.operands[0].reg << 8; 9276 encode_thumb32_shifted_operand (1); 9277 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */ 9278 inst.reloc.type = BFD_RELOC_UNUSED; 9279 } 9280 } 9281 else 9282 { 9283 if (inst.operands[2].isreg) 9284 { 9285 switch (shift_kind) 9286 { 9287 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break; 9288 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break; 9289 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break; 9290 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break; 9291 default: abort (); 9292 } 9293 9294 inst.instruction |= inst.operands[0].reg; 9295 inst.instruction |= inst.operands[2].reg << 3; 9296 } 9297 else 9298 { 9299 switch (shift_kind) 9300 { 9301 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break; 9302 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break; 9303 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break; 9304 default: abort (); 9305 } 9306 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT; 9307 inst.instruction |= inst.operands[0].reg; 9308 inst.instruction |= inst.operands[1].reg << 3; 9309 } 9310 } 9311 } 9312 else 9313 { 9314 constraint (inst.operands[0].reg > 7 9315 || inst.operands[1].reg > 7, BAD_HIREG); 9316 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32); 9317 9318 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */ 9319 { 9320 constraint (inst.operands[2].reg > 7, BAD_HIREG); 9321 constraint (inst.operands[0].reg != inst.operands[1].reg, 9322 _("source1 and dest must be same register")); 9323 9324 switch (inst.instruction) 9325 { 9326 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break; 9327 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break; 9328 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break; 9329 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break; 9330 default: abort (); 9331 } 9332 9333 inst.instruction |= inst.operands[0].reg; 9334 inst.instruction |= inst.operands[2].reg << 3; 9335 } 9336 else 9337 { 9338 switch (inst.instruction) 9339 { 9340 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break; 9341 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break; 9342 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break; 9343 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return; 9344 default: abort (); 9345 } 9346 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT; 9347 inst.instruction |= inst.operands[0].reg; 9348 inst.instruction |= inst.operands[1].reg << 3; 9349 } 9350 } 9351} 9352 9353static void 9354do_t_simd (void) 9355{ 9356 inst.instruction |= inst.operands[0].reg << 8; 9357 inst.instruction |= inst.operands[1].reg << 16; 9358 inst.instruction |= inst.operands[2].reg; 9359} 9360 9361static void 9362do_t_smc (void) 9363{ 9364 unsigned int value = inst.reloc.exp.X_add_number; 9365 constraint (inst.reloc.exp.X_op != O_constant, 9366 _("expression too complex")); 9367 inst.reloc.type = BFD_RELOC_UNUSED; 9368 inst.instruction |= (value & 0xf000) >> 12; 9369 inst.instruction |= (value & 0x0ff0); 9370 inst.instruction |= (value & 0x000f) << 16; 9371} 9372 9373static void 9374do_t_ssat (void) 9375{ 9376 inst.instruction |= inst.operands[0].reg << 8; 9377 inst.instruction |= inst.operands[1].imm - 1; 9378 inst.instruction |= inst.operands[2].reg << 16; 9379 9380 if (inst.operands[3].present) 9381 { 9382 constraint (inst.reloc.exp.X_op != O_constant, 9383 _("expression too complex")); 9384 9385 if (inst.reloc.exp.X_add_number != 0) 9386 { 9387 if (inst.operands[3].shift_kind == SHIFT_ASR) 9388 inst.instruction |= 0x00200000; /* sh bit */ 9389 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10; 9390 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6; 9391 } 9392 inst.reloc.type = BFD_RELOC_UNUSED; 9393 } 9394} 9395 9396static void 9397do_t_ssat16 (void) 9398{ 9399 inst.instruction |= inst.operands[0].reg << 8; 9400 inst.instruction |= inst.operands[1].imm - 1; 9401 inst.instruction |= inst.operands[2].reg << 16; 9402} 9403 9404static void 9405do_t_strex (void) 9406{ 9407 constraint (!inst.operands[2].isreg || !inst.operands[2].preind 9408 || inst.operands[2].postind || inst.operands[2].writeback 9409 || inst.operands[2].immisreg || inst.operands[2].shifted 9410 || inst.operands[2].negative, 9411 BAD_ADDR_MODE); 9412 9413 inst.instruction |= inst.operands[0].reg << 8; 9414 inst.instruction |= inst.operands[1].reg << 12; 9415 inst.instruction |= inst.operands[2].reg << 16; 9416 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8; 9417} 9418 9419static void 9420do_t_strexd (void) 9421{ 9422 if (!inst.operands[2].present) 9423 inst.operands[2].reg = inst.operands[1].reg + 1; 9424 9425 constraint (inst.operands[0].reg == inst.operands[1].reg 9426 || inst.operands[0].reg == inst.operands[2].reg 9427 || inst.operands[0].reg == inst.operands[3].reg 9428 || inst.operands[1].reg == inst.operands[2].reg, 9429 BAD_OVERLAP); 9430 9431 inst.instruction |= inst.operands[0].reg; 9432 inst.instruction |= inst.operands[1].reg << 12; 9433 inst.instruction |= inst.operands[2].reg << 8; 9434 inst.instruction |= inst.operands[3].reg << 16; 9435} 9436 9437static void 9438do_t_sxtah (void) 9439{ 9440 inst.instruction |= inst.operands[0].reg << 8; 9441 inst.instruction |= inst.operands[1].reg << 16; 9442 inst.instruction |= inst.operands[2].reg; 9443 inst.instruction |= inst.operands[3].imm << 4; 9444} 9445 9446static void 9447do_t_sxth (void) 9448{ 9449 if (inst.instruction <= 0xffff && inst.size_req != 4 9450 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7 9451 && (!inst.operands[2].present || inst.operands[2].imm == 0)) 9452 { 9453 inst.instruction = THUMB_OP16 (inst.instruction); 9454 inst.instruction |= inst.operands[0].reg; 9455 inst.instruction |= inst.operands[1].reg << 3; 9456 } 9457 else if (unified_syntax) 9458 { 9459 if (inst.instruction <= 0xffff) 9460 inst.instruction = THUMB_OP32 (inst.instruction); 9461 inst.instruction |= inst.operands[0].reg << 8; 9462 inst.instruction |= inst.operands[1].reg; 9463 inst.instruction |= inst.operands[2].imm << 4; 9464 } 9465 else 9466 { 9467 constraint (inst.operands[2].present && inst.operands[2].imm != 0, 9468 _("Thumb encoding does not support rotation")); 9469 constraint (1, BAD_HIREG); 9470 } 9471} 9472 9473static void 9474do_t_swi (void) 9475{ 9476 inst.reloc.type = BFD_RELOC_ARM_SWI; 9477} 9478 9479static void 9480do_t_tb (void) 9481{ 9482 int half; 9483 9484 half = (inst.instruction & 0x10) != 0; 9485 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH); 9486 constraint (inst.operands[0].immisreg, 9487 _("instruction requires register index")); 9488 constraint (inst.operands[0].imm == 15, 9489 _("PC is not a valid index register")); 9490 constraint (!half && inst.operands[0].shifted, 9491 _("instruction does not allow shifted index")); 9492 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm; 9493} 9494 9495static void 9496do_t_usat (void) 9497{ 9498 inst.instruction |= inst.operands[0].reg << 8; 9499 inst.instruction |= inst.operands[1].imm; 9500 inst.instruction |= inst.operands[2].reg << 16; 9501 9502 if (inst.operands[3].present) 9503 { 9504 constraint (inst.reloc.exp.X_op != O_constant, 9505 _("expression too complex")); 9506 if (inst.reloc.exp.X_add_number != 0) 9507 { 9508 if (inst.operands[3].shift_kind == SHIFT_ASR) 9509 inst.instruction |= 0x00200000; /* sh bit */ 9510 9511 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10; 9512 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6; 9513 } 9514 inst.reloc.type = BFD_RELOC_UNUSED; 9515 } 9516} 9517 9518static void 9519do_t_usat16 (void) 9520{ 9521 inst.instruction |= inst.operands[0].reg << 8; 9522 inst.instruction |= inst.operands[1].imm; 9523 inst.instruction |= inst.operands[2].reg << 16; 9524} 9525 9526#endif /* INSNS_TABLE_ONLY */ 9527 9528/* Neon instruction encoder helpers. */ 9529 9530/* Encodings for the different types for various Neon opcodes. */ 9531 9532/* An "invalid" code for the following tables. */ 9533#define N_INV -1u 9534 9535struct neon_tab_entry 9536{ 9537 unsigned integer; 9538 unsigned float_or_poly; 9539 unsigned scalar_or_imm; 9540}; 9541 9542/* Map overloaded Neon opcodes to their respective encodings. */ 9543#define NEON_ENC_TAB \ 9544 X(vabd, 0x0000700, 0x1200d00, N_INV), \ 9545 X(vmax, 0x0000600, 0x0000f00, N_INV), \ 9546 X(vmin, 0x0000610, 0x0200f00, N_INV), \ 9547 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \ 9548 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \ 9549 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \ 9550 X(vadd, 0x0000800, 0x0000d00, N_INV), \ 9551 X(vsub, 0x1000800, 0x0200d00, N_INV), \ 9552 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \ 9553 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \ 9554 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \ 9555 /* Register variants of the following two instructions are encoded as 9556 vcge / vcgt with the operands reversed. */ \ 9557 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \ 9558 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \ 9559 X(vfma, N_INV, 0x0000c10, N_INV), \ 9560 X(vfms, N_INV, 0x0200c10, N_INV), \ 9561 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \ 9562 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \ 9563 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \ 9564 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \ 9565 X(vmlal, 0x0800800, N_INV, 0x0800240), \ 9566 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \ 9567 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \ 9568 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \ 9569 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \ 9570 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \ 9571 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \ 9572 X(vshl, 0x0000400, N_INV, 0x0800510), \ 9573 X(vqshl, 0x0000410, N_INV, 0x0800710), \ 9574 X(vand, 0x0000110, N_INV, 0x0800030), \ 9575 X(vbic, 0x0100110, N_INV, 0x0800030), \ 9576 X(veor, 0x1000110, N_INV, N_INV), \ 9577 X(vorn, 0x0300110, N_INV, 0x0800010), \ 9578 X(vorr, 0x0200110, N_INV, 0x0800010), \ 9579 X(vmvn, 0x1b00580, N_INV, 0x0800030), \ 9580 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \ 9581 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \ 9582 X(vcvtt, 0x1b30600, N_INV, 0x0800e10), /* single, half-precision.*/ \ 9583 X(vcvtb, 0x1b30600, N_INV, 0x0800e10), /* single, half-precision.*/ \ 9584 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \ 9585 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \ 9586 X(vst1, 0x0000000, 0x0800000, N_INV), \ 9587 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \ 9588 X(vst2, 0x0000100, 0x0800100, N_INV), \ 9589 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \ 9590 X(vst3, 0x0000200, 0x0800200, N_INV), \ 9591 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \ 9592 X(vst4, 0x0000300, 0x0800300, N_INV), \ 9593 X(vmovn, 0x1b20200, N_INV, N_INV), \ 9594 X(vtrn, 0x1b20080, N_INV, N_INV), \ 9595 X(vqmovn, 0x1b20200, N_INV, N_INV), \ 9596 X(vqmovun, 0x1b20240, N_INV, N_INV), \ 9597 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \ 9598 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \ 9599 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \ 9600 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \ 9601 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \ 9602 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \ 9603 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV) 9604 9605enum neon_opc 9606{ 9607#define X(OPC,I,F,S) N_MNEM_##OPC 9608NEON_ENC_TAB 9609#undef X 9610}; 9611 9612static const struct neon_tab_entry neon_enc_tab[] = 9613{ 9614#define X(OPC,I,F,S) { (I), (F), (S) } 9615NEON_ENC_TAB 9616#undef X 9617}; 9618 9619#define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer) 9620#define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer) 9621#define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly) 9622#define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly) 9623#define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm) 9624#define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm) 9625#define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer) 9626#define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly) 9627#define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm) 9628#define NEON_ENC_SINGLE(X) \ 9629 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000)) 9630#define NEON_ENC_DOUBLE(X) \ 9631 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000)) 9632 9633/* Define shapes for instruction operands. The following mnemonic characters 9634 are used in this table: 9635 9636 F - VFP S<n> register 9637 D - Neon D<n> register 9638 Q - Neon Q<n> register 9639 I - Immediate 9640 S - Scalar 9641 R - ARM register 9642 L - D<n> register list 9643 9644 This table is used to generate various data: 9645 - enumerations of the form NS_DDR to be used as arguments to 9646 neon_select_shape. 9647 - a table classifying shapes into single, double, quad, mixed. 9648 - a table used to drive neon_select_shape. 9649*/ 9650 9651#define NEON_SHAPE_DEF \ 9652 X(3, (D, D, D), DOUBLE), \ 9653 X(3, (Q, Q, Q), QUAD), \ 9654 X(3, (D, D, I), DOUBLE), \ 9655 X(3, (Q, Q, I), QUAD), \ 9656 X(3, (D, D, S), DOUBLE), \ 9657 X(3, (Q, Q, S), QUAD), \ 9658 X(2, (D, D), DOUBLE), \ 9659 X(2, (Q, Q), QUAD), \ 9660 X(2, (D, S), DOUBLE), \ 9661 X(2, (Q, S), QUAD), \ 9662 X(2, (D, R), DOUBLE), \ 9663 X(2, (Q, R), QUAD), \ 9664 X(2, (D, I), DOUBLE), \ 9665 X(2, (Q, I), QUAD), \ 9666 X(3, (D, L, D), DOUBLE), \ 9667 X(2, (D, Q), MIXED), \ 9668 X(2, (Q, D), MIXED), \ 9669 X(3, (D, Q, I), MIXED), \ 9670 X(3, (Q, D, I), MIXED), \ 9671 X(3, (Q, D, D), MIXED), \ 9672 X(3, (D, Q, Q), MIXED), \ 9673 X(3, (Q, Q, D), MIXED), \ 9674 X(3, (Q, D, S), MIXED), \ 9675 X(3, (D, Q, S), MIXED), \ 9676 X(4, (D, D, D, I), DOUBLE), \ 9677 X(4, (Q, Q, Q, I), QUAD), \ 9678 X(2, (F, F), SINGLE), \ 9679 X(3, (F, F, F), SINGLE), \ 9680 X(2, (F, I), SINGLE), \ 9681 X(2, (F, D), MIXED), \ 9682 X(2, (D, F), MIXED), \ 9683 X(3, (F, F, I), MIXED), \ 9684 X(4, (R, R, F, F), SINGLE), \ 9685 X(4, (F, F, R, R), SINGLE), \ 9686 X(3, (D, R, R), DOUBLE), \ 9687 X(3, (R, R, D), DOUBLE), \ 9688 X(2, (S, R), SINGLE), \ 9689 X(2, (R, S), SINGLE), \ 9690 X(2, (F, R), SINGLE), \ 9691 X(2, (R, F), SINGLE) 9692 9693#define S2(A,B) NS_##A##B 9694#define S3(A,B,C) NS_##A##B##C 9695#define S4(A,B,C,D) NS_##A##B##C##D 9696 9697#define X(N, L, C) S##N L 9698 9699enum neon_shape 9700{ 9701 NEON_SHAPE_DEF, 9702 NS_NULL 9703}; 9704 9705#undef X 9706#undef S2 9707#undef S3 9708#undef S4 9709 9710enum neon_shape_class 9711{ 9712 SC_SINGLE, 9713 SC_DOUBLE, 9714 SC_QUAD, 9715 SC_MIXED 9716}; 9717 9718#define X(N, L, C) SC_##C 9719 9720#ifndef INSNS_TABLE_ONLY 9721 9722static enum neon_shape_class neon_shape_class[] = 9723{ 9724 NEON_SHAPE_DEF 9725}; 9726 9727#undef X 9728 9729enum neon_shape_el 9730{ 9731 SE_F, 9732 SE_D, 9733 SE_Q, 9734 SE_I, 9735 SE_S, 9736 SE_R, 9737 SE_L 9738}; 9739 9740/* Register widths of above. */ 9741static unsigned neon_shape_el_size[] = 9742{ 9743 32, 9744 64, 9745 128, 9746 0, 9747 32, 9748 32, 9749 0 9750}; 9751 9752struct neon_shape_info 9753{ 9754 unsigned els; 9755 enum neon_shape_el el[NEON_MAX_TYPE_ELS]; 9756}; 9757 9758#define S2(A,B) { SE_##A, SE_##B } 9759#define S3(A,B,C) { SE_##A, SE_##B, SE_##C } 9760#define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D } 9761 9762#define X(N, L, C) { N, S##N L } 9763 9764static struct neon_shape_info neon_shape_tab[] = 9765{ 9766 NEON_SHAPE_DEF 9767}; 9768 9769#undef X 9770#undef S2 9771#undef S3 9772#undef S4 9773 9774#endif /* INSNS_TABLE_ONLY */ 9775 9776/* Bit masks used in type checking given instructions. 9777 'N_EQK' means the type must be the same as (or based on in some way) the key 9778 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is 9779 set, various other bits can be set as well in order to modify the meaning of 9780 the type constraint. */ 9781 9782enum neon_type_mask 9783{ 9784 N_S8 = 0x000001, 9785 N_S16 = 0x000002, 9786 N_S32 = 0x000004, 9787 N_S64 = 0x000008, 9788 N_U8 = 0x000010, 9789 N_U16 = 0x000020, 9790 N_U32 = 0x000040, 9791 N_U64 = 0x000080, 9792 N_I8 = 0x000100, 9793 N_I16 = 0x000200, 9794 N_I32 = 0x000400, 9795 N_I64 = 0x000800, 9796 N_8 = 0x001000, 9797 N_16 = 0x002000, 9798 N_32 = 0x004000, 9799 N_64 = 0x008000, 9800 N_P8 = 0x010000, 9801 N_P16 = 0x020000, 9802 N_F16 = 0x040000, 9803 N_F32 = 0x080000, 9804 N_F64 = 0x100000, 9805 N_KEY = 0x200000, /* key element (main type specifier). */ 9806 N_EQK = 0x400000, /* given operand has the same type & size as the key. */ 9807 N_VFP = 0x800000, /* VFP mode: operand size must match register width. */ 9808 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */ 9809 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */ 9810 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */ 9811 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */ 9812 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */ 9813 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */ 9814 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */ 9815 N_UTYP = 0, 9816 N_MAX_NONSPECIAL = N_F64 9817}; 9818 9819#define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ) 9820 9821#define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64) 9822#define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32) 9823#define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64) 9824#define N_SUF_32 (N_SU_32 | N_F32) 9825#define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64) 9826#define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32) 9827 9828/* Pass this as the first type argument to neon_check_type to ignore types 9829 altogether. */ 9830#define N_IGNORE_TYPE (N_KEY | N_EQK) 9831 9832#ifndef INSNS_TABLE_ONLY 9833 9834/* Select a "shape" for the current instruction (describing register types or 9835 sizes) from a list of alternatives. Return NS_NULL if the current instruction 9836 doesn't fit. For non-polymorphic shapes, checking is usually done as a 9837 function of operand parsing, so this function doesn't need to be called. 9838 Shapes should be listed in order of decreasing length. */ 9839 9840static enum neon_shape 9841neon_select_shape (enum neon_shape shape, ...) 9842{ 9843 va_list ap; 9844 enum neon_shape first_shape = shape; 9845 9846 /* Fix missing optional operands. FIXME: we don't know at this point how 9847 many arguments we should have, so this makes the assumption that we have 9848 > 1. This is true of all current Neon opcodes, I think, but may not be 9849 true in the future. */ 9850 if (!inst.operands[1].present) 9851 inst.operands[1] = inst.operands[0]; 9852 9853 va_start (ap, shape); 9854 9855 for (; shape != NS_NULL; shape = va_arg (ap, int)) 9856 { 9857 unsigned j; 9858 int matches = 1; 9859 9860 for (j = 0; j < neon_shape_tab[shape].els; j++) 9861 { 9862 if (!inst.operands[j].present) 9863 { 9864 matches = 0; 9865 break; 9866 } 9867 9868 switch (neon_shape_tab[shape].el[j]) 9869 { 9870 case SE_F: 9871 if (!(inst.operands[j].isreg 9872 && inst.operands[j].isvec 9873 && inst.operands[j].issingle 9874 && !inst.operands[j].isquad)) 9875 matches = 0; 9876 break; 9877 9878 case SE_D: 9879 if (!(inst.operands[j].isreg 9880 && inst.operands[j].isvec 9881 && !inst.operands[j].isquad 9882 && !inst.operands[j].issingle)) 9883 matches = 0; 9884 break; 9885 9886 case SE_R: 9887 if (!(inst.operands[j].isreg 9888 && !inst.operands[j].isvec)) 9889 matches = 0; 9890 break; 9891 9892 case SE_Q: 9893 if (!(inst.operands[j].isreg 9894 && inst.operands[j].isvec 9895 && inst.operands[j].isquad 9896 && !inst.operands[j].issingle)) 9897 matches = 0; 9898 break; 9899 9900 case SE_I: 9901 if (!(!inst.operands[j].isreg 9902 && !inst.operands[j].isscalar)) 9903 matches = 0; 9904 break; 9905 9906 case SE_S: 9907 if (!(!inst.operands[j].isreg 9908 && inst.operands[j].isscalar)) 9909 matches = 0; 9910 break; 9911 9912 case SE_L: 9913 break; 9914 } 9915 } 9916 if (matches) 9917 break; 9918 } 9919 9920 va_end (ap); 9921 9922 if (shape == NS_NULL && first_shape != NS_NULL) 9923 first_error (_("invalid instruction shape")); 9924 9925 return shape; 9926} 9927 9928/* True if SHAPE is predominantly a quadword operation (most of the time, this 9929 means the Q bit should be set). */ 9930 9931static int 9932neon_quad (enum neon_shape shape) 9933{ 9934 return neon_shape_class[shape] == SC_QUAD; 9935} 9936 9937static void 9938neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type, 9939 unsigned *g_size) 9940{ 9941 /* Allow modification to be made to types which are constrained to be 9942 based on the key element, based on bits set alongside N_EQK. */ 9943 if ((typebits & N_EQK) != 0) 9944 { 9945 if ((typebits & N_HLF) != 0) 9946 *g_size /= 2; 9947 else if ((typebits & N_DBL) != 0) 9948 *g_size *= 2; 9949 if ((typebits & N_SGN) != 0) 9950 *g_type = NT_signed; 9951 else if ((typebits & N_UNS) != 0) 9952 *g_type = NT_unsigned; 9953 else if ((typebits & N_INT) != 0) 9954 *g_type = NT_integer; 9955 else if ((typebits & N_FLT) != 0) 9956 *g_type = NT_float; 9957 else if ((typebits & N_SIZ) != 0) 9958 *g_type = NT_untyped; 9959 } 9960} 9961 9962/* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key" 9963 operand type, i.e. the single type specified in a Neon instruction when it 9964 is the only one given. */ 9965 9966static struct neon_type_el 9967neon_type_promote (struct neon_type_el *key, unsigned thisarg) 9968{ 9969 struct neon_type_el dest = *key; 9970 9971 assert ((thisarg & N_EQK) != 0); 9972 9973 neon_modify_type_size (thisarg, &dest.type, &dest.size); 9974 9975 return dest; 9976} 9977 9978/* Convert Neon type and size into compact bitmask representation. */ 9979 9980static enum neon_type_mask 9981type_chk_of_el_type (enum neon_el_type type, unsigned size) 9982{ 9983 switch (type) 9984 { 9985 case NT_untyped: 9986 switch (size) 9987 { 9988 case 8: return N_8; 9989 case 16: return N_16; 9990 case 32: return N_32; 9991 case 64: return N_64; 9992 default: ; 9993 } 9994 break; 9995 9996 case NT_integer: 9997 switch (size) 9998 { 9999 case 8: return N_I8; 10000 case 16: return N_I16; 10001 case 32: return N_I32; 10002 case 64: return N_I64; 10003 default: ; 10004 } 10005 break; 10006 10007 case NT_float: 10008 switch (size) 10009 { 10010 case 16: return N_F16; 10011 case 32: return N_F32; 10012 case 64: return N_F64; 10013 default: ; 10014 } 10015 break; 10016 10017 case NT_poly: 10018 switch (size) 10019 { 10020 case 8: return N_P8; 10021 case 16: return N_P16; 10022 default: ; 10023 } 10024 break; 10025 10026 case NT_signed: 10027 switch (size) 10028 { 10029 case 8: return N_S8; 10030 case 16: return N_S16; 10031 case 32: return N_S32; 10032 case 64: return N_S64; 10033 default: ; 10034 } 10035 break; 10036 10037 case NT_unsigned: 10038 switch (size) 10039 { 10040 case 8: return N_U8; 10041 case 16: return N_U16; 10042 case 32: return N_U32; 10043 case 64: return N_U64; 10044 default: ; 10045 } 10046 break; 10047 10048 default: ; 10049 } 10050 10051 return N_UTYP; 10052} 10053 10054/* Convert compact Neon bitmask type representation to a type and size. Only 10055 handles the case where a single bit is set in the mask. */ 10056 10057static int 10058el_type_of_type_chk (enum neon_el_type *type, unsigned *size, 10059 enum neon_type_mask mask) 10060{ 10061 if ((mask & N_EQK) != 0) 10062 return FAIL; 10063 10064 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0) 10065 *size = 8; 10066 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0) 10067 *size = 16; 10068 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0) 10069 *size = 32; 10070 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0) 10071 *size = 64; 10072 else 10073 return FAIL; 10074 10075 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0) 10076 *type = NT_signed; 10077 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0) 10078 *type = NT_unsigned; 10079 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0) 10080 *type = NT_integer; 10081 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0) 10082 *type = NT_untyped; 10083 else if ((mask & (N_P8 | N_P16)) != 0) 10084 *type = NT_poly; 10085 else if ((mask & (N_F32 | N_F64)) != 0) 10086 *type = NT_float; 10087 else 10088 return FAIL; 10089 10090 return SUCCESS; 10091} 10092 10093/* Modify a bitmask of allowed types. This is only needed for type 10094 relaxation. */ 10095 10096static unsigned 10097modify_types_allowed (unsigned allowed, unsigned mods) 10098{ 10099 unsigned size; 10100 enum neon_el_type type; 10101 unsigned destmask; 10102 int i; 10103 10104 destmask = 0; 10105 10106 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1) 10107 { 10108 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS) 10109 { 10110 neon_modify_type_size (mods, &type, &size); 10111 destmask |= type_chk_of_el_type (type, size); 10112 } 10113 } 10114 10115 return destmask; 10116} 10117 10118/* Check type and return type classification. 10119 The manual states (paraphrase): If one datatype is given, it indicates the 10120 type given in: 10121 - the second operand, if there is one 10122 - the operand, if there is no second operand 10123 - the result, if there are no operands. 10124 This isn't quite good enough though, so we use a concept of a "key" datatype 10125 which is set on a per-instruction basis, which is the one which matters when 10126 only one data type is written. 10127 Note: this function has side-effects (e.g. filling in missing operands). All 10128 Neon instructions should call it before performing bit encoding. */ 10129 10130static struct neon_type_el 10131neon_check_type (unsigned els, enum neon_shape ns, ...) 10132{ 10133 va_list ap; 10134 unsigned i, pass, key_el = 0; 10135 unsigned types[NEON_MAX_TYPE_ELS]; 10136 enum neon_el_type k_type = NT_invtype; 10137 unsigned k_size = -1u; 10138 struct neon_type_el badtype = {NT_invtype, -1}; 10139 unsigned key_allowed = 0; 10140 10141 /* Optional registers in Neon instructions are always (not) in operand 1. 10142 Fill in the missing operand here, if it was omitted. */ 10143 if (els > 1 && !inst.operands[1].present) 10144 inst.operands[1] = inst.operands[0]; 10145 10146 /* Suck up all the varargs. */ 10147 va_start (ap, ns); 10148 for (i = 0; i < els; i++) 10149 { 10150 unsigned thisarg = va_arg (ap, unsigned); 10151 if (thisarg == N_IGNORE_TYPE) 10152 { 10153 va_end (ap); 10154 return badtype; 10155 } 10156 types[i] = thisarg; 10157 if ((thisarg & N_KEY) != 0) 10158 key_el = i; 10159 } 10160 va_end (ap); 10161 10162 if (inst.vectype.elems > 0) 10163 for (i = 0; i < els; i++) 10164 if (inst.operands[i].vectype.type != NT_invtype) 10165 { 10166 first_error (_("types specified in both the mnemonic and operands")); 10167 return badtype; 10168 } 10169 10170 /* Duplicate inst.vectype elements here as necessary. 10171 FIXME: No idea if this is exactly the same as the ARM assembler, 10172 particularly when an insn takes one register and one non-register 10173 operand. */ 10174 if (inst.vectype.elems == 1 && els > 1) 10175 { 10176 unsigned j; 10177 inst.vectype.elems = els; 10178 inst.vectype.el[key_el] = inst.vectype.el[0]; 10179 for (j = 0; j < els; j++) 10180 if (j != key_el) 10181 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el], 10182 types[j]); 10183 } 10184 else if (inst.vectype.elems == 0 && els > 0) 10185 { 10186 unsigned j; 10187 /* No types were given after the mnemonic, so look for types specified 10188 after each operand. We allow some flexibility here; as long as the 10189 "key" operand has a type, we can infer the others. */ 10190 for (j = 0; j < els; j++) 10191 if (inst.operands[j].vectype.type != NT_invtype) 10192 inst.vectype.el[j] = inst.operands[j].vectype; 10193 10194 if (inst.operands[key_el].vectype.type != NT_invtype) 10195 { 10196 for (j = 0; j < els; j++) 10197 if (inst.operands[j].vectype.type == NT_invtype) 10198 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el], 10199 types[j]); 10200 } 10201 else 10202 { 10203 first_error (_("operand types can't be inferred")); 10204 return badtype; 10205 } 10206 } 10207 else if (inst.vectype.elems != els) 10208 { 10209 first_error (_("type specifier has the wrong number of parts")); 10210 return badtype; 10211 } 10212 10213 for (pass = 0; pass < 2; pass++) 10214 { 10215 for (i = 0; i < els; i++) 10216 { 10217 unsigned thisarg = types[i]; 10218 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0) 10219 ? modify_types_allowed (key_allowed, thisarg) : thisarg; 10220 enum neon_el_type g_type = inst.vectype.el[i].type; 10221 unsigned g_size = inst.vectype.el[i].size; 10222 10223 /* Decay more-specific signed & unsigned types to sign-insensitive 10224 integer types if sign-specific variants are unavailable. */ 10225 if ((g_type == NT_signed || g_type == NT_unsigned) 10226 && (types_allowed & N_SU_ALL) == 0) 10227 g_type = NT_integer; 10228 10229 /* If only untyped args are allowed, decay any more specific types to 10230 them. Some instructions only care about signs for some element 10231 sizes, so handle that properly. */ 10232 if ((g_size == 8 && (types_allowed & N_8) != 0) 10233 || (g_size == 16 && (types_allowed & N_16) != 0) 10234 || (g_size == 32 && (types_allowed & N_32) != 0) 10235 || (g_size == 64 && (types_allowed & N_64) != 0)) 10236 g_type = NT_untyped; 10237 10238 if (pass == 0) 10239 { 10240 if ((thisarg & N_KEY) != 0) 10241 { 10242 k_type = g_type; 10243 k_size = g_size; 10244 key_allowed = thisarg & ~N_KEY; 10245 } 10246 } 10247 else 10248 { 10249 if ((thisarg & N_VFP) != 0) 10250 { 10251 enum neon_shape_el regshape = neon_shape_tab[ns].el[i]; 10252 unsigned regwidth = neon_shape_el_size[regshape], match; 10253 10254 /* In VFP mode, operands must match register widths. If we 10255 have a key operand, use its width, else use the width of 10256 the current operand. */ 10257 if (k_size != -1u) 10258 match = k_size; 10259 else 10260 match = g_size; 10261 10262 if (regwidth != match) 10263 { 10264 first_error (_("operand size must match register width")); 10265 return badtype; 10266 } 10267 } 10268 10269 if ((thisarg & N_EQK) == 0) 10270 { 10271 unsigned given_type = type_chk_of_el_type (g_type, g_size); 10272 10273 if ((given_type & types_allowed) == 0) 10274 { 10275 first_error (_("bad type in Neon instruction")); 10276 return badtype; 10277 } 10278 } 10279 else 10280 { 10281 enum neon_el_type mod_k_type = k_type; 10282 unsigned mod_k_size = k_size; 10283 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size); 10284 if (g_type != mod_k_type || g_size != mod_k_size) 10285 { 10286 first_error (_("inconsistent types in Neon instruction")); 10287 return badtype; 10288 } 10289 } 10290 } 10291 } 10292 } 10293 10294 return inst.vectype.el[key_el]; 10295} 10296 10297/* Neon-style VFP instruction forwarding. */ 10298 10299/* Thumb VFP instructions have 0xE in the condition field. */ 10300 10301static void 10302do_vfp_cond_or_thumb (void) 10303{ 10304 if (thumb_mode) 10305 inst.instruction |= 0xe0000000; 10306 else 10307 inst.instruction |= inst.cond << 28; 10308} 10309 10310/* Look up and encode a simple mnemonic, for use as a helper function for the 10311 Neon-style VFP syntax. This avoids duplication of bits of the insns table, 10312 etc. It is assumed that operand parsing has already been done, and that the 10313 operands are in the form expected by the given opcode (this isn't necessarily 10314 the same as the form in which they were parsed, hence some massaging must 10315 take place before this function is called). 10316 Checks current arch version against that in the looked-up opcode. */ 10317 10318static void 10319do_vfp_nsyn_opcode (const char *opname) 10320{ 10321 const struct asm_opcode *opcode; 10322 10323 opcode = hash_find (arm_ops_hsh, opname); 10324 10325 if (!opcode) 10326 abort (); 10327 10328 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, 10329 thumb_mode ? *opcode->tvariant : *opcode->avariant), 10330 _(BAD_FPU)); 10331 10332 if (thumb_mode) 10333 { 10334 inst.instruction = opcode->tvalue; 10335 opcode->tencode (); 10336 } 10337 else 10338 { 10339 inst.instruction = (inst.cond << 28) | opcode->avalue; 10340 opcode->aencode (); 10341 } 10342} 10343 10344static void 10345do_vfp_nsyn_add_sub (enum neon_shape rs) 10346{ 10347 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd; 10348 10349 if (rs == NS_FFF) 10350 { 10351 if (is_add) 10352 do_vfp_nsyn_opcode ("fadds"); 10353 else 10354 do_vfp_nsyn_opcode ("fsubs"); 10355 } 10356 else 10357 { 10358 if (is_add) 10359 do_vfp_nsyn_opcode ("faddd"); 10360 else 10361 do_vfp_nsyn_opcode ("fsubd"); 10362 } 10363} 10364 10365/* Check operand types to see if this is a VFP instruction, and if so call 10366 PFN (). */ 10367 10368static int 10369try_vfp_nsyn (int args, void (*pfn) (enum neon_shape)) 10370{ 10371 enum neon_shape rs; 10372 struct neon_type_el et; 10373 10374 switch (args) 10375 { 10376 case 2: 10377 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL); 10378 et = neon_check_type (2, rs, 10379 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); 10380 break; 10381 10382 case 3: 10383 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL); 10384 et = neon_check_type (3, rs, 10385 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); 10386 break; 10387 10388 default: 10389 abort (); 10390 } 10391 10392 if (et.type != NT_invtype) 10393 { 10394 pfn (rs); 10395 return SUCCESS; 10396 } 10397 else 10398 inst.error = NULL; 10399 10400 return FAIL; 10401} 10402 10403static void 10404do_vfp_nsyn_mla_mls (enum neon_shape rs) 10405{ 10406 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla; 10407 10408 if (rs == NS_FFF) 10409 { 10410 if (is_mla) 10411 do_vfp_nsyn_opcode ("fmacs"); 10412 else 10413 do_vfp_nsyn_opcode ("fnmacs"); 10414 } 10415 else 10416 { 10417 if (is_mla) 10418 do_vfp_nsyn_opcode ("fmacd"); 10419 else 10420 do_vfp_nsyn_opcode ("fnmacd"); 10421 } 10422} 10423 10424static void 10425do_vfp_nsyn_mul (enum neon_shape rs) 10426{ 10427 if (rs == NS_FFF) 10428 do_vfp_nsyn_opcode ("fmuls"); 10429 else 10430 do_vfp_nsyn_opcode ("fmuld"); 10431} 10432 10433static void 10434do_vfp_nsyn_fma (enum neon_shape rs) 10435{ 10436 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma; 10437 10438 if (rs == NS_FFF) 10439 { 10440 if (is_fma) 10441 do_vfp_nsyn_opcode ("ffmacs"); 10442 else 10443 do_vfp_nsyn_opcode ("fnfmacs"); 10444 } 10445 else 10446 { 10447 if (is_fma) 10448 do_vfp_nsyn_opcode ("ffmacd"); 10449 else 10450 do_vfp_nsyn_opcode ("fnfmacd"); 10451 } 10452} 10453 10454static void 10455do_vfp_nsyn_abs_neg (enum neon_shape rs) 10456{ 10457 int is_neg = (inst.instruction & 0x80) != 0; 10458 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY); 10459 10460 if (rs == NS_FF) 10461 { 10462 if (is_neg) 10463 do_vfp_nsyn_opcode ("fnegs"); 10464 else 10465 do_vfp_nsyn_opcode ("fabss"); 10466 } 10467 else 10468 { 10469 if (is_neg) 10470 do_vfp_nsyn_opcode ("fnegd"); 10471 else 10472 do_vfp_nsyn_opcode ("fabsd"); 10473 } 10474} 10475 10476/* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision 10477 insns belong to Neon, and are handled elsewhere. */ 10478 10479static void 10480do_vfp_nsyn_ldm_stm (int is_dbmode) 10481{ 10482 int is_ldm = (inst.instruction & (1 << 20)) != 0; 10483 if (is_ldm) 10484 { 10485 if (is_dbmode) 10486 do_vfp_nsyn_opcode ("fldmdbs"); 10487 else 10488 do_vfp_nsyn_opcode ("fldmias"); 10489 } 10490 else 10491 { 10492 if (is_dbmode) 10493 do_vfp_nsyn_opcode ("fstmdbs"); 10494 else 10495 do_vfp_nsyn_opcode ("fstmias"); 10496 } 10497} 10498 10499static void 10500do_vfp_nsyn_sqrt (void) 10501{ 10502 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL); 10503 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); 10504 10505 if (rs == NS_FF) 10506 do_vfp_nsyn_opcode ("fsqrts"); 10507 else 10508 do_vfp_nsyn_opcode ("fsqrtd"); 10509} 10510 10511static void 10512do_vfp_nsyn_div (void) 10513{ 10514 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL); 10515 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP, 10516 N_F32 | N_F64 | N_KEY | N_VFP); 10517 10518 if (rs == NS_FFF) 10519 do_vfp_nsyn_opcode ("fdivs"); 10520 else 10521 do_vfp_nsyn_opcode ("fdivd"); 10522} 10523 10524static void 10525do_vfp_nsyn_fnma (void) 10526{ 10527 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL); 10528 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP, 10529 N_F32 | N_F64 | N_KEY | N_VFP); 10530 10531 if (rs == NS_FFF) 10532 do_vfp_nsyn_opcode ("ffnmas"); 10533 else 10534 do_vfp_nsyn_opcode ("ffnmad"); 10535} 10536 10537static void 10538do_vfp_nsyn_fnms (void) 10539{ 10540 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL); 10541 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP, 10542 N_F32 | N_F64 | N_KEY | N_VFP); 10543 10544 if (rs == NS_FFF) 10545 do_vfp_nsyn_opcode ("ffnmss"); 10546 else 10547 do_vfp_nsyn_opcode ("ffnmsd"); 10548} 10549 10550static void 10551do_vfp_nsyn_nmul (void) 10552{ 10553 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL); 10554 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP, 10555 N_F32 | N_F64 | N_KEY | N_VFP); 10556 10557 if (rs == NS_FFF) 10558 { 10559 inst.instruction = NEON_ENC_SINGLE (inst.instruction); 10560 do_vfp_sp_dyadic (); 10561 } 10562 else 10563 { 10564 inst.instruction = NEON_ENC_DOUBLE (inst.instruction); 10565 do_vfp_dp_rd_rn_rm (); 10566 } 10567 do_vfp_cond_or_thumb (); 10568} 10569 10570static void 10571do_vfp_nsyn_cmp (void) 10572{ 10573 if (inst.operands[1].isreg) 10574 { 10575 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL); 10576 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP); 10577 10578 if (rs == NS_FF) 10579 { 10580 inst.instruction = NEON_ENC_SINGLE (inst.instruction); 10581 do_vfp_sp_monadic (); 10582 } 10583 else 10584 { 10585 inst.instruction = NEON_ENC_DOUBLE (inst.instruction); 10586 do_vfp_dp_rd_rm (); 10587 } 10588 } 10589 else 10590 { 10591 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL); 10592 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK); 10593 10594 switch (inst.instruction & 0x0fffffff) 10595 { 10596 case N_MNEM_vcmp: 10597 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp; 10598 break; 10599 case N_MNEM_vcmpe: 10600 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe; 10601 break; 10602 default: 10603 abort (); 10604 } 10605 10606 if (rs == NS_FI) 10607 { 10608 inst.instruction = NEON_ENC_SINGLE (inst.instruction); 10609 do_vfp_sp_compare_z (); 10610 } 10611 else 10612 { 10613 inst.instruction = NEON_ENC_DOUBLE (inst.instruction); 10614 do_vfp_dp_rd (); 10615 } 10616 } 10617 do_vfp_cond_or_thumb (); 10618} 10619 10620static void 10621nsyn_insert_sp (void) 10622{ 10623 inst.operands[1] = inst.operands[0]; 10624 memset (&inst.operands[0], '\0', sizeof (inst.operands[0])); 10625 inst.operands[0].reg = 13; 10626 inst.operands[0].isreg = 1; 10627 inst.operands[0].writeback = 1; 10628 inst.operands[0].present = 1; 10629} 10630 10631static void 10632do_vfp_nsyn_push (void) 10633{ 10634 nsyn_insert_sp (); 10635 if (inst.operands[1].issingle) 10636 do_vfp_nsyn_opcode ("fstmdbs"); 10637 else 10638 do_vfp_nsyn_opcode ("fstmdbd"); 10639} 10640 10641static void 10642do_vfp_nsyn_pop (void) 10643{ 10644 nsyn_insert_sp (); 10645 if (inst.operands[1].issingle) 10646 do_vfp_nsyn_opcode ("fldmias"); 10647 else 10648 do_vfp_nsyn_opcode ("fldmiad"); 10649} 10650 10651/* Fix up Neon data-processing instructions, ORing in the correct bits for 10652 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */ 10653 10654static unsigned 10655neon_dp_fixup (unsigned i) 10656{ 10657 if (thumb_mode) 10658 { 10659 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */ 10660 if (i & (1 << 24)) 10661 i |= 1 << 28; 10662 10663 i &= ~(1 << 24); 10664 10665 i |= 0xef000000; 10666 } 10667 else 10668 i |= 0xf2000000; 10669 10670 return i; 10671} 10672 10673/* Turn a size (8, 16, 32, 64) into the respective bit number minus 3 10674 (0, 1, 2, 3). */ 10675 10676static unsigned 10677neon_logbits (unsigned x) 10678{ 10679 return ffs (x) - 4; 10680} 10681 10682#define LOW4(R) ((R) & 0xf) 10683#define HI1(R) (((R) >> 4) & 1) 10684 10685/* Encode insns with bit pattern: 10686 10687 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0| 10688 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm | 10689 10690 SIZE is passed in bits. -1 means size field isn't changed, in case it has a 10691 different meaning for some instruction. */ 10692 10693static void 10694neon_three_same (int isquad, int ubit, int size) 10695{ 10696 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 10697 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 10698 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 10699 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 10700 inst.instruction |= LOW4 (inst.operands[2].reg); 10701 inst.instruction |= HI1 (inst.operands[2].reg) << 5; 10702 inst.instruction |= (isquad != 0) << 6; 10703 inst.instruction |= (ubit != 0) << 24; 10704 if (size != -1) 10705 inst.instruction |= neon_logbits (size) << 20; 10706 10707 inst.instruction = neon_dp_fixup (inst.instruction); 10708} 10709 10710/* Encode instructions of the form: 10711 10712 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0| 10713 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm | 10714 10715 Don't write size if SIZE == -1. */ 10716 10717static void 10718neon_two_same (int qbit, int ubit, int size) 10719{ 10720 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 10721 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 10722 inst.instruction |= LOW4 (inst.operands[1].reg); 10723 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 10724 inst.instruction |= (qbit != 0) << 6; 10725 inst.instruction |= (ubit != 0) << 24; 10726 10727 if (size != -1) 10728 inst.instruction |= neon_logbits (size) << 18; 10729 10730 inst.instruction = neon_dp_fixup (inst.instruction); 10731} 10732 10733/* Neon instruction encoders, in approximate order of appearance. */ 10734 10735static void 10736do_neon_dyadic_i_su (void) 10737{ 10738 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 10739 struct neon_type_el et = neon_check_type (3, rs, 10740 N_EQK, N_EQK, N_SU_32 | N_KEY); 10741 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); 10742} 10743 10744static void 10745do_neon_dyadic_i64_su (void) 10746{ 10747 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 10748 struct neon_type_el et = neon_check_type (3, rs, 10749 N_EQK, N_EQK, N_SU_ALL | N_KEY); 10750 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); 10751} 10752 10753static void 10754neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et, 10755 unsigned immbits) 10756{ 10757 unsigned size = et.size >> 3; 10758 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 10759 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 10760 inst.instruction |= LOW4 (inst.operands[1].reg); 10761 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 10762 inst.instruction |= (isquad != 0) << 6; 10763 inst.instruction |= immbits << 16; 10764 inst.instruction |= (size >> 3) << 7; 10765 inst.instruction |= (size & 0x7) << 19; 10766 if (write_ubit) 10767 inst.instruction |= (uval != 0) << 24; 10768 10769 inst.instruction = neon_dp_fixup (inst.instruction); 10770} 10771 10772static void 10773do_neon_shl_imm (void) 10774{ 10775 if (!inst.operands[2].isreg) 10776 { 10777 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 10778 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL); 10779 inst.instruction = NEON_ENC_IMMED (inst.instruction); 10780 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm); 10781 } 10782 else 10783 { 10784 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 10785 struct neon_type_el et = neon_check_type (3, rs, 10786 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN); 10787 unsigned int tmp; 10788 10789 /* VSHL/VQSHL 3-register variants have syntax such as: 10790 vshl.xx Dd, Dm, Dn 10791 whereas other 3-register operations encoded by neon_three_same have 10792 syntax like: 10793 vadd.xx Dd, Dn, Dm 10794 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg 10795 here. */ 10796 tmp = inst.operands[2].reg; 10797 inst.operands[2].reg = inst.operands[1].reg; 10798 inst.operands[1].reg = tmp; 10799 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 10800 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); 10801 } 10802} 10803 10804static void 10805do_neon_qshl_imm (void) 10806{ 10807 if (!inst.operands[2].isreg) 10808 { 10809 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 10810 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY); 10811 10812 inst.instruction = NEON_ENC_IMMED (inst.instruction); 10813 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, 10814 inst.operands[2].imm); 10815 } 10816 else 10817 { 10818 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 10819 struct neon_type_el et = neon_check_type (3, rs, 10820 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN); 10821 unsigned int tmp; 10822 10823 /* See note in do_neon_shl_imm. */ 10824 tmp = inst.operands[2].reg; 10825 inst.operands[2].reg = inst.operands[1].reg; 10826 inst.operands[1].reg = tmp; 10827 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 10828 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); 10829 } 10830} 10831 10832static void 10833do_neon_rshl (void) 10834{ 10835 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 10836 struct neon_type_el et = neon_check_type (3, rs, 10837 N_EQK, N_EQK, N_SU_ALL | N_KEY); 10838 unsigned int tmp; 10839 10840 tmp = inst.operands[2].reg; 10841 inst.operands[2].reg = inst.operands[1].reg; 10842 inst.operands[1].reg = tmp; 10843 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size); 10844} 10845 10846static int 10847neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size) 10848{ 10849 /* Handle .I8 pseudo-instructions. */ 10850 if (size == 8) 10851 { 10852 /* Unfortunately, this will make everything apart from zero out-of-range. 10853 FIXME is this the intended semantics? There doesn't seem much point in 10854 accepting .I8 if so. */ 10855 immediate |= immediate << 8; 10856 size = 16; 10857 } 10858 10859 if (size >= 32) 10860 { 10861 if (immediate == (immediate & 0x000000ff)) 10862 { 10863 *immbits = immediate; 10864 return 0x1; 10865 } 10866 else if (immediate == (immediate & 0x0000ff00)) 10867 { 10868 *immbits = immediate >> 8; 10869 return 0x3; 10870 } 10871 else if (immediate == (immediate & 0x00ff0000)) 10872 { 10873 *immbits = immediate >> 16; 10874 return 0x5; 10875 } 10876 else if (immediate == (immediate & 0xff000000)) 10877 { 10878 *immbits = immediate >> 24; 10879 return 0x7; 10880 } 10881 if ((immediate & 0xffff) != (immediate >> 16)) 10882 goto bad_immediate; 10883 immediate &= 0xffff; 10884 } 10885 10886 if (immediate == (immediate & 0x000000ff)) 10887 { 10888 *immbits = immediate; 10889 return 0x9; 10890 } 10891 else if (immediate == (immediate & 0x0000ff00)) 10892 { 10893 *immbits = immediate >> 8; 10894 return 0xb; 10895 } 10896 10897 bad_immediate: 10898 first_error (_("immediate value out of range")); 10899 return FAIL; 10900} 10901 10902/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits 10903 A, B, C, D. */ 10904 10905static int 10906neon_bits_same_in_bytes (unsigned imm) 10907{ 10908 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff) 10909 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00) 10910 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000) 10911 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000); 10912} 10913 10914/* For immediate of above form, return 0bABCD. */ 10915 10916static unsigned 10917neon_squash_bits (unsigned imm) 10918{ 10919 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14) 10920 | ((imm & 0x01000000) >> 21); 10921} 10922 10923/* Compress quarter-float representation to 0b...000 abcdefgh. */ 10924 10925static unsigned 10926neon_qfloat_bits (unsigned imm) 10927{ 10928 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80); 10929} 10930 10931/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into 10932 the instruction. *OP is passed as the initial value of the op field, and 10933 may be set to a different value depending on the constant (i.e. 10934 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not 10935 MVN). If the immediate looks like a repeated parttern then also 10936 try smaller element sizes. */ 10937 10938static int 10939neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p, 10940 unsigned *immbits, int *op, int size, 10941 enum neon_el_type type) 10942{ 10943 /* Only permit float immediates (including 0.0/-0.0) if the operand type is 10944 float. */ 10945 if (type == NT_float && !float_p) 10946 return FAIL; 10947 10948 if (type == NT_float && is_quarter_float (immlo) && immhi == 0) 10949 { 10950 if (size != 32 || *op == 1) 10951 return FAIL; 10952 *immbits = neon_qfloat_bits (immlo); 10953 return 0xf; 10954 } 10955 10956 if (size == 64) 10957 { 10958 if (neon_bits_same_in_bytes (immhi) 10959 && neon_bits_same_in_bytes (immlo)) 10960 { 10961 if (*op == 1) 10962 return FAIL; 10963 *immbits = (neon_squash_bits (immhi) << 4) 10964 | neon_squash_bits (immlo); 10965 *op = 1; 10966 return 0xe; 10967 } 10968 10969 if (immhi != immlo) 10970 return FAIL; 10971 } 10972 10973 if (size >= 32) 10974 { 10975 if (immlo == (immlo & 0x000000ff)) 10976 { 10977 *immbits = immlo; 10978 return 0x0; 10979 } 10980 else if (immlo == (immlo & 0x0000ff00)) 10981 { 10982 *immbits = immlo >> 8; 10983 return 0x2; 10984 } 10985 else if (immlo == (immlo & 0x00ff0000)) 10986 { 10987 *immbits = immlo >> 16; 10988 return 0x4; 10989 } 10990 else if (immlo == (immlo & 0xff000000)) 10991 { 10992 *immbits = immlo >> 24; 10993 return 0x6; 10994 } 10995 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff)) 10996 { 10997 *immbits = (immlo >> 8) & 0xff; 10998 return 0xc; 10999 } 11000 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff)) 11001 { 11002 *immbits = (immlo >> 16) & 0xff; 11003 return 0xd; 11004 } 11005 11006 if ((immlo & 0xffff) != (immlo >> 16)) 11007 return FAIL; 11008 immlo &= 0xffff; 11009 } 11010 11011 if (size >= 16) 11012 { 11013 if (immlo == (immlo & 0x000000ff)) 11014 { 11015 *immbits = immlo; 11016 return 0x8; 11017 } 11018 else if (immlo == (immlo & 0x0000ff00)) 11019 { 11020 *immbits = immlo >> 8; 11021 return 0xa; 11022 } 11023 11024 if ((immlo & 0xff) != (immlo >> 8)) 11025 return FAIL; 11026 immlo &= 0xff; 11027 } 11028 11029 if (immlo == (immlo & 0x000000ff)) 11030 { 11031 /* Don't allow MVN with 8-bit immediate. */ 11032 if (*op == 1) 11033 return FAIL; 11034 *immbits = immlo; 11035 return 0xe; 11036 } 11037 11038 return FAIL; 11039} 11040 11041/* Write immediate bits [7:0] to the following locations: 11042 11043 |28/24|23 19|18 16|15 4|3 0| 11044 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h| 11045 11046 This function is used by VMOV/VMVN/VORR/VBIC. */ 11047 11048static void 11049neon_write_immbits (unsigned immbits) 11050{ 11051 inst.instruction |= immbits & 0xf; 11052 inst.instruction |= ((immbits >> 4) & 0x7) << 16; 11053 inst.instruction |= ((immbits >> 7) & 0x1) << 24; 11054} 11055 11056/* Invert low-order SIZE bits of XHI:XLO. */ 11057 11058static void 11059neon_invert_size (unsigned *xlo, unsigned *xhi, int size) 11060{ 11061 unsigned immlo = xlo ? *xlo : 0; 11062 unsigned immhi = xhi ? *xhi : 0; 11063 11064 switch (size) 11065 { 11066 case 8: 11067 immlo = (~immlo) & 0xff; 11068 break; 11069 11070 case 16: 11071 immlo = (~immlo) & 0xffff; 11072 break; 11073 11074 case 64: 11075 immhi = (~immhi) & 0xffffffff; 11076 /* fall through. */ 11077 11078 case 32: 11079 immlo = (~immlo) & 0xffffffff; 11080 break; 11081 11082 default: 11083 abort (); 11084 } 11085 11086 if (xlo) 11087 *xlo = immlo; 11088 11089 if (xhi) 11090 *xhi = immhi; 11091} 11092 11093static void 11094do_neon_logic (void) 11095{ 11096 if (inst.operands[2].present && inst.operands[2].isreg) 11097 { 11098 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11099 neon_check_type (3, rs, N_IGNORE_TYPE); 11100 /* U bit and size field were set as part of the bitmask. */ 11101 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 11102 neon_three_same (neon_quad (rs), 0, -1); 11103 } 11104 else 11105 { 11106 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL); 11107 struct neon_type_el et = neon_check_type (2, rs, 11108 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK); 11109 enum neon_opc opcode = inst.instruction & 0x0fffffff; 11110 unsigned immbits; 11111 int cmode; 11112 11113 if (et.type == NT_invtype) 11114 return; 11115 11116 inst.instruction = NEON_ENC_IMMED (inst.instruction); 11117 11118 immbits = inst.operands[1].imm; 11119 if (et.size == 64) 11120 { 11121 /* .i64 is a pseudo-op, so the immediate must be a repeating 11122 pattern. */ 11123 if (immbits != (inst.operands[1].regisimm ? 11124 inst.operands[1].reg : 0)) 11125 { 11126 /* Set immbits to an invalid constant. */ 11127 immbits = 0xdeadbeef; 11128 } 11129 } 11130 11131 switch (opcode) 11132 { 11133 case N_MNEM_vbic: 11134 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size); 11135 break; 11136 11137 case N_MNEM_vorr: 11138 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size); 11139 break; 11140 11141 case N_MNEM_vand: 11142 /* Pseudo-instruction for VBIC. */ 11143 neon_invert_size (&immbits, 0, et.size); 11144 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size); 11145 break; 11146 11147 case N_MNEM_vorn: 11148 /* Pseudo-instruction for VORR. */ 11149 neon_invert_size (&immbits, 0, et.size); 11150 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size); 11151 break; 11152 11153 default: 11154 abort (); 11155 } 11156 11157 if (cmode == FAIL) 11158 return; 11159 11160 inst.instruction |= neon_quad (rs) << 6; 11161 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 11162 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 11163 inst.instruction |= cmode << 8; 11164 neon_write_immbits (immbits); 11165 11166 inst.instruction = neon_dp_fixup (inst.instruction); 11167 } 11168} 11169 11170static void 11171do_neon_bitfield (void) 11172{ 11173 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11174 neon_check_type (3, rs, N_IGNORE_TYPE); 11175 neon_three_same (neon_quad (rs), 0, -1); 11176} 11177 11178static void 11179neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types, 11180 unsigned destbits) 11181{ 11182 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11183 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK, 11184 types | N_KEY); 11185 if (et.type == NT_float) 11186 { 11187 inst.instruction = NEON_ENC_FLOAT (inst.instruction); 11188 neon_three_same (neon_quad (rs), 0, -1); 11189 } 11190 else 11191 { 11192 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 11193 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size); 11194 } 11195} 11196 11197static void 11198do_neon_dyadic_if_su (void) 11199{ 11200 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0); 11201} 11202 11203static void 11204do_neon_dyadic_if_su_d (void) 11205{ 11206 /* This version only allow D registers, but that constraint is enforced during 11207 operand parsing so we don't need to do anything extra here. */ 11208 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0); 11209} 11210 11211static void 11212do_neon_dyadic_if_i_d (void) 11213{ 11214 /* The "untyped" case can't happen. Do this to stop the "U" bit being 11215 affected if we specify unsigned args. */ 11216 neon_dyadic_misc (NT_untyped, N_IF_32, 0); 11217} 11218 11219enum vfp_or_neon_is_neon_bits 11220{ 11221 NEON_CHECK_CC = 1, 11222 NEON_CHECK_ARCH = 2 11223}; 11224 11225/* Call this function if an instruction which may have belonged to the VFP or 11226 Neon instruction sets, but turned out to be a Neon instruction (due to the 11227 operand types involved, etc.). We have to check and/or fix-up a couple of 11228 things: 11229 11230 - Make sure the user hasn't attempted to make a Neon instruction 11231 conditional. 11232 - Alter the value in the condition code field if necessary. 11233 - Make sure that the arch supports Neon instructions. 11234 11235 Which of these operations take place depends on bits from enum 11236 vfp_or_neon_is_neon_bits. 11237 11238 WARNING: This function has side effects! If NEON_CHECK_CC is used and the 11239 current instruction's condition is COND_ALWAYS, the condition field is 11240 changed to inst.uncond_value. This is necessary because instructions shared 11241 between VFP and Neon may be conditional for the VFP variants only, and the 11242 unconditional Neon version must have, e.g., 0xF in the condition field. */ 11243 11244static int 11245vfp_or_neon_is_neon (unsigned check) 11246{ 11247 /* Conditions are always legal in Thumb mode (IT blocks). */ 11248 if (!thumb_mode && (check & NEON_CHECK_CC)) 11249 { 11250 if (inst.cond != COND_ALWAYS) 11251 { 11252 first_error (_(BAD_COND)); 11253 return FAIL; 11254 } 11255 if (inst.uncond_value != -1) 11256 inst.instruction |= inst.uncond_value << 28; 11257 } 11258 11259 if ((check & NEON_CHECK_ARCH) 11260 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)) 11261 { 11262 first_error (_(BAD_FPU)); 11263 return FAIL; 11264 } 11265 11266 return SUCCESS; 11267} 11268 11269static void 11270do_neon_addsub_if_i (void) 11271{ 11272 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS) 11273 return; 11274 11275 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 11276 return; 11277 11278 /* The "untyped" case can't happen. Do this to stop the "U" bit being 11279 affected if we specify unsigned args. */ 11280 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0); 11281} 11282 11283/* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the 11284 result to be: 11285 V<op> A,B (A is operand 0, B is operand 2) 11286 to mean: 11287 V<op> A,B,A 11288 not: 11289 V<op> A,B,B 11290 so handle that case specially. */ 11291 11292static void 11293neon_exchange_operands (void) 11294{ 11295 void *scratch = alloca (sizeof (inst.operands[0])); 11296 if (inst.operands[1].present) 11297 { 11298 /* Swap operands[1] and operands[2]. */ 11299 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0])); 11300 inst.operands[1] = inst.operands[2]; 11301 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0])); 11302 } 11303 else 11304 { 11305 inst.operands[1] = inst.operands[2]; 11306 inst.operands[2] = inst.operands[0]; 11307 } 11308} 11309 11310static void 11311neon_compare (unsigned regtypes, unsigned immtypes, int invert) 11312{ 11313 if (inst.operands[2].isreg) 11314 { 11315 if (invert) 11316 neon_exchange_operands (); 11317 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ); 11318 } 11319 else 11320 { 11321 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 11322 struct neon_type_el et = neon_check_type (2, rs, 11323 N_EQK | N_SIZ, immtypes | N_KEY); 11324 11325 inst.instruction = NEON_ENC_IMMED (inst.instruction); 11326 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 11327 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 11328 inst.instruction |= LOW4 (inst.operands[1].reg); 11329 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 11330 inst.instruction |= neon_quad (rs) << 6; 11331 inst.instruction |= (et.type == NT_float) << 10; 11332 inst.instruction |= neon_logbits (et.size) << 18; 11333 11334 inst.instruction = neon_dp_fixup (inst.instruction); 11335 } 11336} 11337 11338static void 11339do_neon_cmp (void) 11340{ 11341 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE); 11342} 11343 11344static void 11345do_neon_cmp_inv (void) 11346{ 11347 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE); 11348} 11349 11350static void 11351do_neon_ceq (void) 11352{ 11353 neon_compare (N_IF_32, N_IF_32, FALSE); 11354} 11355 11356/* For multiply instructions, we have the possibility of 16-bit or 32-bit 11357 scalars, which are encoded in 5 bits, M : Rm. 11358 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in 11359 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the 11360 index in M. */ 11361 11362static unsigned 11363neon_scalar_for_mul (unsigned scalar, unsigned elsize) 11364{ 11365 unsigned regno = NEON_SCALAR_REG (scalar); 11366 unsigned elno = NEON_SCALAR_INDEX (scalar); 11367 11368 switch (elsize) 11369 { 11370 case 16: 11371 if (regno > 7 || elno > 3) 11372 goto bad_scalar; 11373 return regno | (elno << 3); 11374 11375 case 32: 11376 if (regno > 15 || elno > 1) 11377 goto bad_scalar; 11378 return regno | (elno << 4); 11379 11380 default: 11381 bad_scalar: 11382 first_error (_("scalar out of range for multiply instruction")); 11383 } 11384 11385 return 0; 11386} 11387 11388/* Encode multiply / multiply-accumulate scalar instructions. */ 11389 11390static void 11391neon_mul_mac (struct neon_type_el et, int ubit) 11392{ 11393 unsigned scalar; 11394 11395 /* Give a more helpful error message if we have an invalid type. */ 11396 if (et.type == NT_invtype) 11397 return; 11398 11399 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size); 11400 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 11401 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 11402 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 11403 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 11404 inst.instruction |= LOW4 (scalar); 11405 inst.instruction |= HI1 (scalar) << 5; 11406 inst.instruction |= (et.type == NT_float) << 8; 11407 inst.instruction |= neon_logbits (et.size) << 20; 11408 inst.instruction |= (ubit != 0) << 24; 11409 11410 inst.instruction = neon_dp_fixup (inst.instruction); 11411} 11412 11413static void 11414do_neon_mac_maybe_scalar (void) 11415{ 11416 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS) 11417 return; 11418 11419 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 11420 return; 11421 11422 if (inst.operands[2].isscalar) 11423 { 11424 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL); 11425 struct neon_type_el et = neon_check_type (3, rs, 11426 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY); 11427 inst.instruction = NEON_ENC_SCALAR (inst.instruction); 11428 neon_mul_mac (et, neon_quad (rs)); 11429 } 11430 else 11431 { 11432 /* The "untyped" case can't happen. Do this to stop the "U" bit being 11433 affected if we specify unsigned args. */ 11434 neon_dyadic_misc (NT_untyped, N_IF_32, 0); 11435 } 11436} 11437 11438static void 11439do_neon_tst (void) 11440{ 11441 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11442 struct neon_type_el et = neon_check_type (3, rs, 11443 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY); 11444 neon_three_same (neon_quad (rs), 0, et.size); 11445} 11446 11447/* VMUL with 3 registers allows the P8 type. The scalar version supports the 11448 same types as the MAC equivalents. The polynomial type for this instruction 11449 is encoded the same as the integer type. */ 11450 11451static void 11452do_neon_mul (void) 11453{ 11454 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS) 11455 return; 11456 11457 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 11458 return; 11459 11460 if (inst.operands[2].isscalar) 11461 do_neon_mac_maybe_scalar (); 11462 else 11463 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0); 11464} 11465 11466static void 11467do_neon_fma (void) 11468{ 11469 if (try_vfp_nsyn (3, do_vfp_nsyn_fma) == SUCCESS) 11470 return; 11471 11472 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 11473 return; 11474 11475 neon_dyadic_misc (NT_untyped, N_IF_32, 0); 11476} 11477 11478static void 11479do_neon_qdmulh (void) 11480{ 11481 if (inst.operands[2].isscalar) 11482 { 11483 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL); 11484 struct neon_type_el et = neon_check_type (3, rs, 11485 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY); 11486 inst.instruction = NEON_ENC_SCALAR (inst.instruction); 11487 neon_mul_mac (et, neon_quad (rs)); 11488 } 11489 else 11490 { 11491 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11492 struct neon_type_el et = neon_check_type (3, rs, 11493 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY); 11494 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 11495 /* The U bit (rounding) comes from bit mask. */ 11496 neon_three_same (neon_quad (rs), 0, et.size); 11497 } 11498} 11499 11500static void 11501do_neon_fcmp_absolute (void) 11502{ 11503 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11504 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY); 11505 /* Size field comes from bit mask. */ 11506 neon_three_same (neon_quad (rs), 1, -1); 11507} 11508 11509static void 11510do_neon_fcmp_absolute_inv (void) 11511{ 11512 neon_exchange_operands (); 11513 do_neon_fcmp_absolute (); 11514} 11515 11516static void 11517do_neon_step (void) 11518{ 11519 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL); 11520 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY); 11521 neon_three_same (neon_quad (rs), 0, -1); 11522} 11523 11524static void 11525do_neon_abs_neg (void) 11526{ 11527 enum neon_shape rs; 11528 struct neon_type_el et; 11529 11530 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS) 11531 return; 11532 11533 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 11534 return; 11535 11536 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 11537 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY); 11538 11539 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 11540 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 11541 inst.instruction |= LOW4 (inst.operands[1].reg); 11542 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 11543 inst.instruction |= neon_quad (rs) << 6; 11544 inst.instruction |= (et.type == NT_float) << 10; 11545 inst.instruction |= neon_logbits (et.size) << 18; 11546 11547 inst.instruction = neon_dp_fixup (inst.instruction); 11548} 11549 11550static void 11551do_neon_sli (void) 11552{ 11553 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 11554 struct neon_type_el et = neon_check_type (2, rs, 11555 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY); 11556 int imm = inst.operands[2].imm; 11557 constraint (imm < 0 || (unsigned)imm >= et.size, 11558 _("immediate out of range for insert")); 11559 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm); 11560} 11561 11562static void 11563do_neon_sri (void) 11564{ 11565 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 11566 struct neon_type_el et = neon_check_type (2, rs, 11567 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY); 11568 int imm = inst.operands[2].imm; 11569 constraint (imm < 1 || (unsigned)imm > et.size, 11570 _("immediate out of range for insert")); 11571 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm); 11572} 11573 11574static void 11575do_neon_qshlu_imm (void) 11576{ 11577 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 11578 struct neon_type_el et = neon_check_type (2, rs, 11579 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY); 11580 int imm = inst.operands[2].imm; 11581 constraint (imm < 0 || (unsigned)imm >= et.size, 11582 _("immediate out of range for shift")); 11583 /* Only encodes the 'U present' variant of the instruction. 11584 In this case, signed types have OP (bit 8) set to 0. 11585 Unsigned types have OP set to 1. */ 11586 inst.instruction |= (et.type == NT_unsigned) << 8; 11587 /* The rest of the bits are the same as other immediate shifts. */ 11588 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm); 11589} 11590 11591static void 11592do_neon_qmovn (void) 11593{ 11594 struct neon_type_el et = neon_check_type (2, NS_DQ, 11595 N_EQK | N_HLF, N_SU_16_64 | N_KEY); 11596 /* Saturating move where operands can be signed or unsigned, and the 11597 destination has the same signedness. */ 11598 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 11599 if (et.type == NT_unsigned) 11600 inst.instruction |= 0xc0; 11601 else 11602 inst.instruction |= 0x80; 11603 neon_two_same (0, 1, et.size / 2); 11604} 11605 11606static void 11607do_neon_qmovun (void) 11608{ 11609 struct neon_type_el et = neon_check_type (2, NS_DQ, 11610 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY); 11611 /* Saturating move with unsigned results. Operands must be signed. */ 11612 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 11613 neon_two_same (0, 1, et.size / 2); 11614} 11615 11616static void 11617do_neon_rshift_sat_narrow (void) 11618{ 11619 /* FIXME: Types for narrowing. If operands are signed, results can be signed 11620 or unsigned. If operands are unsigned, results must also be unsigned. */ 11621 struct neon_type_el et = neon_check_type (2, NS_DQI, 11622 N_EQK | N_HLF, N_SU_16_64 | N_KEY); 11623 int imm = inst.operands[2].imm; 11624 /* This gets the bounds check, size encoding and immediate bits calculation 11625 right. */ 11626 et.size /= 2; 11627 11628 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for 11629 VQMOVN.I<size> <Dd>, <Qm>. */ 11630 if (imm == 0) 11631 { 11632 inst.operands[2].present = 0; 11633 inst.instruction = N_MNEM_vqmovn; 11634 do_neon_qmovn (); 11635 return; 11636 } 11637 11638 constraint (imm < 1 || (unsigned)imm > et.size, 11639 _("immediate out of range")); 11640 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm); 11641} 11642 11643static void 11644do_neon_rshift_sat_narrow_u (void) 11645{ 11646 /* FIXME: Types for narrowing. If operands are signed, results can be signed 11647 or unsigned. If operands are unsigned, results must also be unsigned. */ 11648 struct neon_type_el et = neon_check_type (2, NS_DQI, 11649 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY); 11650 int imm = inst.operands[2].imm; 11651 /* This gets the bounds check, size encoding and immediate bits calculation 11652 right. */ 11653 et.size /= 2; 11654 11655 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for 11656 VQMOVUN.I<size> <Dd>, <Qm>. */ 11657 if (imm == 0) 11658 { 11659 inst.operands[2].present = 0; 11660 inst.instruction = N_MNEM_vqmovun; 11661 do_neon_qmovun (); 11662 return; 11663 } 11664 11665 constraint (imm < 1 || (unsigned)imm > et.size, 11666 _("immediate out of range")); 11667 /* FIXME: The manual is kind of unclear about what value U should have in 11668 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it 11669 must be 1. */ 11670 neon_imm_shift (TRUE, 1, 0, et, et.size - imm); 11671} 11672 11673static void 11674do_neon_movn (void) 11675{ 11676 struct neon_type_el et = neon_check_type (2, NS_DQ, 11677 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY); 11678 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 11679 neon_two_same (0, 1, et.size / 2); 11680} 11681 11682static void 11683do_neon_rshift_narrow (void) 11684{ 11685 struct neon_type_el et = neon_check_type (2, NS_DQI, 11686 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY); 11687 int imm = inst.operands[2].imm; 11688 /* This gets the bounds check, size encoding and immediate bits calculation 11689 right. */ 11690 et.size /= 2; 11691 11692 /* If immediate is zero then we are a pseudo-instruction for 11693 VMOVN.I<size> <Dd>, <Qm> */ 11694 if (imm == 0) 11695 { 11696 inst.operands[2].present = 0; 11697 inst.instruction = N_MNEM_vmovn; 11698 do_neon_movn (); 11699 return; 11700 } 11701 11702 constraint (imm < 1 || (unsigned)imm > et.size, 11703 _("immediate out of range for narrowing operation")); 11704 neon_imm_shift (FALSE, 0, 0, et, et.size - imm); 11705} 11706 11707static void 11708do_neon_shll (void) 11709{ 11710 /* FIXME: Type checking when lengthening. */ 11711 struct neon_type_el et = neon_check_type (2, NS_QDI, 11712 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY); 11713 unsigned imm = inst.operands[2].imm; 11714 11715 if (imm == et.size) 11716 { 11717 /* Maximum shift variant. */ 11718 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 11719 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 11720 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 11721 inst.instruction |= LOW4 (inst.operands[1].reg); 11722 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 11723 inst.instruction |= neon_logbits (et.size) << 18; 11724 11725 inst.instruction = neon_dp_fixup (inst.instruction); 11726 } 11727 else 11728 { 11729 /* A more-specific type check for non-max versions. */ 11730 et = neon_check_type (2, NS_QDI, 11731 N_EQK | N_DBL, N_SU_32 | N_KEY); 11732 inst.instruction = NEON_ENC_IMMED (inst.instruction); 11733 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm); 11734 } 11735} 11736 11737/* Check the various types for the VCVT instruction, and return which version 11738 the current instruction is. */ 11739 11740static int 11741neon_cvt_flavour (enum neon_shape rs) 11742{ 11743#define CVT_VAR(C,X,Y) \ 11744 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \ 11745 if (et.type != NT_invtype) \ 11746 { \ 11747 inst.error = NULL; \ 11748 return (C); \ 11749 } 11750 struct neon_type_el et; 11751 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF 11752 || rs == NS_FF) ? N_VFP : 0; 11753 /* The instruction versions which take an immediate take one register 11754 argument, which is extended to the width of the full register. Thus the 11755 "source" and "destination" registers must have the same width. Hack that 11756 here by making the size equal to the key (wider, in this case) operand. */ 11757 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0; 11758 11759 CVT_VAR (0, N_S32, N_F32); 11760 CVT_VAR (1, N_U32, N_F32); 11761 CVT_VAR (2, N_F32, N_S32); 11762 CVT_VAR (3, N_F32, N_U32); 11763 11764 whole_reg = N_VFP; 11765 11766 /* VFP instructions. */ 11767 CVT_VAR (4, N_F32, N_F64); 11768 CVT_VAR (5, N_F64, N_F32); 11769 CVT_VAR (6, N_S32, N_F64 | key); 11770 CVT_VAR (7, N_U32, N_F64 | key); 11771 CVT_VAR (8, N_F64 | key, N_S32); 11772 CVT_VAR (9, N_F64 | key, N_U32); 11773 /* VFP instructions with bitshift. */ 11774 CVT_VAR (10, N_F32 | key, N_S16); 11775 CVT_VAR (11, N_F32 | key, N_U16); 11776 CVT_VAR (12, N_F64 | key, N_S16); 11777 CVT_VAR (13, N_F64 | key, N_U16); 11778 CVT_VAR (14, N_S16, N_F32 | key); 11779 CVT_VAR (15, N_U16, N_F32 | key); 11780 CVT_VAR (16, N_S16, N_F64 | key); 11781 CVT_VAR (17, N_U16, N_F64 | key); 11782 11783 whole_reg = 0; 11784 CVT_VAR (18, N_F32, N_F16); 11785 CVT_VAR (19, N_F16, N_F32); 11786 11787 return -1; 11788#undef CVT_VAR 11789} 11790 11791/* Neon-syntax VFP conversions. */ 11792 11793static void 11794do_vfp_nsyn_cvt (enum neon_shape rs, int flavour) 11795{ 11796 const char *opname = 0; 11797 11798 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI) 11799 { 11800 /* Conversions with immediate bitshift. */ 11801 const char *enc[] = 11802 { 11803 "ftosls", 11804 "ftouls", 11805 "fsltos", 11806 "fultos", 11807 NULL, 11808 NULL, 11809 "ftosld", 11810 "ftould", 11811 "fsltod", 11812 "fultod", 11813 "fshtos", 11814 "fuhtos", 11815 "fshtod", 11816 "fuhtod", 11817 "ftoshs", 11818 "ftouhs", 11819 "ftoshd", 11820 "ftouhd" 11821 }; 11822 11823 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc)) 11824 { 11825 opname = enc[flavour]; 11826 constraint (inst.operands[0].reg != inst.operands[1].reg, 11827 _("operands 0 and 1 must be the same register")); 11828 inst.operands[1] = inst.operands[2]; 11829 memset (&inst.operands[2], '\0', sizeof (inst.operands[2])); 11830 } 11831 } 11832 else if (rs == NS_QD || rs == NS_DQ) 11833 { 11834 /* Conversions between half-percision and single-precision. */ 11835 if (flavour == 18) 11836 { 11837 opname = "fcvtshp"; 11838 } 11839 else if (flavour == 19) 11840 { 11841 opname = "fcvthps"; 11842 } 11843 } 11844 else if (rs == NS_FF && (flavour == 18 || flavour == 19)) 11845 { 11846 /* 11847 * Conversions between half-percision (in top or bottom half of register) 11848 * and single-precision. The routines do_neon_cvtt() and do_neon_cvtb() 11849 * set or cleared the T bit (0x80) in the inst.instruction to pass that 11850 * info to say this is for the top half of the register (T bit set) or the 11851 * bottom half of the register (T bit cleared) information here to know 11852 * which opname to use. This is done this way because the call to 11853 * do_vfp_nsyn_opcode() will set inst.instruction and loose this info. 11854 */ 11855 if (flavour == 18) 11856 { 11857 if((inst.instruction & 0x80) == 0x80) 11858 opname = "fcvtthps"; 11859 else 11860 opname = "fcvtbhps"; 11861 } 11862 else if(flavour == 19) 11863 { 11864 if((inst.instruction & 0x80) == 0x80) 11865 opname = "fcvttshp"; 11866 else 11867 opname = "fcvtbshp"; 11868 } 11869 } 11870 else 11871 { 11872 /* Conversions without bitshift. */ 11873 const char *enc[] = 11874 { 11875 "ftosizs", 11876 "ftouizs", 11877 "fsitos", 11878 "fuitos", 11879 "fcvtsd", 11880 "fcvtds", 11881 "ftosizd", 11882 "ftouizd", 11883 "fsitod", 11884 "fuitod" 11885 }; 11886 11887 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc)) 11888 opname = enc[flavour]; 11889 } 11890 11891 if (opname) 11892 do_vfp_nsyn_opcode (opname); 11893} 11894 11895static void 11896do_vfp_nsyn_cvtr (void) 11897{ 11898 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL); 11899 int flavour = neon_cvt_flavour (rs); 11900 const char *enc[] = 11901 { 11902 "ftosis", 11903 "ftouis", 11904 NULL, 11905 NULL, 11906 NULL, 11907 NULL, 11908 "ftosid", 11909 "ftouid", 11910 }; 11911 11912 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc)) 11913 { 11914 if (enc[flavour]) 11915 do_vfp_nsyn_opcode (enc[flavour]); 11916 else 11917 first_error (_("invalid instruction shape (using rounding mode)")); 11918 } 11919} 11920 11921static void 11922do_neon_cvt (void) 11923{ 11924 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ, 11925 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL); 11926 int flavour = neon_cvt_flavour (rs); 11927 11928 /* VFP rather than Neon conversions. */ 11929 if (flavour >= 4) 11930 { 11931 do_vfp_nsyn_cvt (rs, flavour); 11932 return; 11933 } 11934 11935 switch (rs) 11936 { 11937 case NS_DDI: 11938 case NS_QQI: 11939 { 11940 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 11941 return; 11942 11943 /* Fixed-point conversion with #0 immediate is encoded as an 11944 integer conversion. */ 11945 if (inst.operands[2].present && inst.operands[2].imm == 0) 11946 goto int_encode; 11947 unsigned immbits = 32 - inst.operands[2].imm; 11948 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 }; 11949 inst.instruction = NEON_ENC_IMMED (inst.instruction); 11950 if (flavour != -1) 11951 inst.instruction |= enctab[flavour]; 11952 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 11953 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 11954 inst.instruction |= LOW4 (inst.operands[1].reg); 11955 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 11956 inst.instruction |= neon_quad (rs) << 6; 11957 inst.instruction |= 1 << 21; 11958 inst.instruction |= immbits << 16; 11959 11960 inst.instruction = neon_dp_fixup (inst.instruction); 11961 } 11962 break; 11963 11964 case NS_DD: 11965 case NS_QQ: 11966 int_encode: 11967 { 11968 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 }; 11969 11970 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 11971 11972 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 11973 return; 11974 11975 if (flavour != -1) 11976 inst.instruction |= enctab[flavour]; 11977 11978 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 11979 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 11980 inst.instruction |= LOW4 (inst.operands[1].reg); 11981 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 11982 inst.instruction |= neon_quad (rs) << 6; 11983 inst.instruction |= 2 << 18; 11984 11985 inst.instruction = neon_dp_fixup (inst.instruction); 11986 } 11987 break; 11988 11989 default: 11990 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */ 11991 do_vfp_nsyn_cvt (rs, flavour); 11992 } 11993} 11994 11995static void 11996do_neon_cvtt (void) 11997{ 11998 /* set the T bit to say this is for the top half of the register for the 11999 routine do_vfp_nsyn_cvt() to use to pick the opname */ 12000 inst.instruction |= 0x80; 12001 do_neon_cvt (); 12002} 12003 12004static void 12005do_neon_cvtb (void) 12006{ 12007 /* clear the T bit to say this is for the bottom half of the register for the 12008 routine do_vfp_nsyn_cvt() to use to pick the opname */ 12009 inst.instruction = inst.instruction & ~(0x80); 12010 do_neon_cvt (); 12011} 12012 12013static void 12014neon_move_immediate (void) 12015{ 12016 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL); 12017 struct neon_type_el et = neon_check_type (2, rs, 12018 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK); 12019 unsigned immlo, immhi = 0, immbits; 12020 int op, cmode, float_p; 12021 12022 constraint (et.type == NT_invtype, 12023 _("operand size must be specified for immediate VMOV")); 12024 12025 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */ 12026 op = (inst.instruction & (1 << 5)) != 0; 12027 12028 immlo = inst.operands[1].imm; 12029 if (inst.operands[1].regisimm) 12030 immhi = inst.operands[1].reg; 12031 12032 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0, 12033 _("immediate has bits set outside the operand size")); 12034 12035 float_p = inst.operands[1].immisfloat; 12036 12037 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op, 12038 et.size, et.type)) == FAIL) 12039 { 12040 /* Invert relevant bits only. */ 12041 neon_invert_size (&immlo, &immhi, et.size); 12042 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable 12043 with one or the other; those cases are caught by 12044 neon_cmode_for_move_imm. */ 12045 op = !op; 12046 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, 12047 &op, et.size, et.type)) == FAIL) 12048 { 12049 first_error (_("immediate out of range")); 12050 return; 12051 } 12052 } 12053 12054 inst.instruction &= ~(1 << 5); 12055 inst.instruction |= op << 5; 12056 12057 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12058 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12059 inst.instruction |= neon_quad (rs) << 6; 12060 inst.instruction |= cmode << 8; 12061 12062 neon_write_immbits (immbits); 12063} 12064 12065static void 12066do_neon_mvn (void) 12067{ 12068 if (inst.operands[1].isreg) 12069 { 12070 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 12071 12072 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12073 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12074 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12075 inst.instruction |= LOW4 (inst.operands[1].reg); 12076 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 12077 inst.instruction |= neon_quad (rs) << 6; 12078 } 12079 else 12080 { 12081 inst.instruction = NEON_ENC_IMMED (inst.instruction); 12082 neon_move_immediate (); 12083 } 12084 12085 inst.instruction = neon_dp_fixup (inst.instruction); 12086} 12087 12088/* Encode instructions of form: 12089 12090 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0| 12091 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | 12092 12093*/ 12094 12095static void 12096neon_mixed_length (struct neon_type_el et, unsigned size) 12097{ 12098 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12099 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12100 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 12101 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 12102 inst.instruction |= LOW4 (inst.operands[2].reg); 12103 inst.instruction |= HI1 (inst.operands[2].reg) << 5; 12104 inst.instruction |= (et.type == NT_unsigned) << 24; 12105 inst.instruction |= neon_logbits (size) << 20; 12106 12107 inst.instruction = neon_dp_fixup (inst.instruction); 12108} 12109 12110static void 12111do_neon_dyadic_long (void) 12112{ 12113 /* FIXME: Type checking for lengthening op. */ 12114 struct neon_type_el et = neon_check_type (3, NS_QDD, 12115 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY); 12116 neon_mixed_length (et, et.size); 12117} 12118 12119static void 12120do_neon_abal (void) 12121{ 12122 struct neon_type_el et = neon_check_type (3, NS_QDD, 12123 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY); 12124 neon_mixed_length (et, et.size); 12125} 12126 12127static void 12128neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes) 12129{ 12130 if (inst.operands[2].isscalar) 12131 { 12132 struct neon_type_el et = neon_check_type (3, NS_QDS, 12133 N_EQK | N_DBL, N_EQK, regtypes | N_KEY); 12134 inst.instruction = NEON_ENC_SCALAR (inst.instruction); 12135 neon_mul_mac (et, et.type == NT_unsigned); 12136 } 12137 else 12138 { 12139 struct neon_type_el et = neon_check_type (3, NS_QDD, 12140 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY); 12141 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12142 neon_mixed_length (et, et.size); 12143 } 12144} 12145 12146static void 12147do_neon_mac_maybe_scalar_long (void) 12148{ 12149 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32); 12150} 12151 12152static void 12153do_neon_dyadic_wide (void) 12154{ 12155 struct neon_type_el et = neon_check_type (3, NS_QQD, 12156 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY); 12157 neon_mixed_length (et, et.size); 12158} 12159 12160static void 12161do_neon_dyadic_narrow (void) 12162{ 12163 struct neon_type_el et = neon_check_type (3, NS_QDD, 12164 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY); 12165 /* Operand sign is unimportant, and the U bit is part of the opcode, 12166 so force the operand type to integer. */ 12167 et.type = NT_integer; 12168 neon_mixed_length (et, et.size / 2); 12169} 12170 12171static void 12172do_neon_mul_sat_scalar_long (void) 12173{ 12174 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32); 12175} 12176 12177static void 12178do_neon_vmull (void) 12179{ 12180 if (inst.operands[2].isscalar) 12181 do_neon_mac_maybe_scalar_long (); 12182 else 12183 { 12184 struct neon_type_el et = neon_check_type (3, NS_QDD, 12185 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY); 12186 if (et.type == NT_poly) 12187 inst.instruction = NEON_ENC_POLY (inst.instruction); 12188 else 12189 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12190 /* For polynomial encoding, size field must be 0b00 and the U bit must be 12191 zero. Should be OK as-is. */ 12192 neon_mixed_length (et, et.size); 12193 } 12194} 12195 12196static void 12197do_neon_ext (void) 12198{ 12199 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL); 12200 struct neon_type_el et = neon_check_type (3, rs, 12201 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY); 12202 unsigned imm = (inst.operands[3].imm * et.size) / 8; 12203 constraint (imm >= (neon_quad (rs) ? 16 : 8), _("shift out of range")); 12204 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12205 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12206 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 12207 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 12208 inst.instruction |= LOW4 (inst.operands[2].reg); 12209 inst.instruction |= HI1 (inst.operands[2].reg) << 5; 12210 inst.instruction |= neon_quad (rs) << 6; 12211 inst.instruction |= imm << 8; 12212 12213 inst.instruction = neon_dp_fixup (inst.instruction); 12214} 12215 12216static void 12217do_neon_rev (void) 12218{ 12219 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 12220 struct neon_type_el et = neon_check_type (2, rs, 12221 N_EQK, N_8 | N_16 | N_32 | N_KEY); 12222 unsigned op = (inst.instruction >> 7) & 3; 12223 /* N (width of reversed regions) is encoded as part of the bitmask. We 12224 extract it here to check the elements to be reversed are smaller. 12225 Otherwise we'd get a reserved instruction. */ 12226 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0; 12227 assert (elsize != 0); 12228 constraint (et.size >= elsize, 12229 _("elements must be smaller than reversal region")); 12230 neon_two_same (neon_quad (rs), 1, et.size); 12231} 12232 12233static void 12234do_neon_dup (void) 12235{ 12236 if (inst.operands[1].isscalar) 12237 { 12238 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL); 12239 struct neon_type_el et = neon_check_type (2, rs, 12240 N_EQK, N_8 | N_16 | N_32 | N_KEY); 12241 unsigned sizebits = et.size >> 3; 12242 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg); 12243 int logsize = neon_logbits (et.size); 12244 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize; 12245 12246 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL) 12247 return; 12248 12249 inst.instruction = NEON_ENC_SCALAR (inst.instruction); 12250 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12251 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12252 inst.instruction |= LOW4 (dm); 12253 inst.instruction |= HI1 (dm) << 5; 12254 inst.instruction |= neon_quad (rs) << 6; 12255 inst.instruction |= x << 17; 12256 inst.instruction |= sizebits << 16; 12257 12258 inst.instruction = neon_dp_fixup (inst.instruction); 12259 } 12260 else 12261 { 12262 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL); 12263 struct neon_type_el et = neon_check_type (2, rs, 12264 N_8 | N_16 | N_32 | N_KEY, N_EQK); 12265 /* Duplicate ARM register to lanes of vector. */ 12266 inst.instruction = NEON_ENC_ARMREG (inst.instruction); 12267 switch (et.size) 12268 { 12269 case 8: inst.instruction |= 0x400000; break; 12270 case 16: inst.instruction |= 0x000020; break; 12271 case 32: inst.instruction |= 0x000000; break; 12272 default: break; 12273 } 12274 inst.instruction |= LOW4 (inst.operands[1].reg) << 12; 12275 inst.instruction |= LOW4 (inst.operands[0].reg) << 16; 12276 inst.instruction |= HI1 (inst.operands[0].reg) << 7; 12277 inst.instruction |= neon_quad (rs) << 21; 12278 /* The encoding for this instruction is identical for the ARM and Thumb 12279 variants, except for the condition field. */ 12280 do_vfp_cond_or_thumb (); 12281 } 12282} 12283 12284/* VMOV has particularly many variations. It can be one of: 12285 0. VMOV<c><q> <Qd>, <Qm> 12286 1. VMOV<c><q> <Dd>, <Dm> 12287 (Register operations, which are VORR with Rm = Rn.) 12288 2. VMOV<c><q>.<dt> <Qd>, #<imm> 12289 3. VMOV<c><q>.<dt> <Dd>, #<imm> 12290 (Immediate loads.) 12291 4. VMOV<c><q>.<size> <Dn[x]>, <Rd> 12292 (ARM register to scalar.) 12293 5. VMOV<c><q> <Dm>, <Rd>, <Rn> 12294 (Two ARM registers to vector.) 12295 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]> 12296 (Scalar to ARM register.) 12297 7. VMOV<c><q> <Rd>, <Rn>, <Dm> 12298 (Vector to two ARM registers.) 12299 8. VMOV.F32 <Sd>, <Sm> 12300 9. VMOV.F64 <Dd>, <Dm> 12301 (VFP register moves.) 12302 10. VMOV.F32 <Sd>, #imm 12303 11. VMOV.F64 <Dd>, #imm 12304 (VFP float immediate load.) 12305 12. VMOV <Rd>, <Sm> 12306 (VFP single to ARM reg.) 12307 13. VMOV <Sd>, <Rm> 12308 (ARM reg to VFP single.) 12309 14. VMOV <Rd>, <Re>, <Sn>, <Sm> 12310 (Two ARM regs to two VFP singles.) 12311 15. VMOV <Sd>, <Se>, <Rn>, <Rm> 12312 (Two VFP singles to two ARM regs.) 12313 12314 These cases can be disambiguated using neon_select_shape, except cases 1/9 12315 and 3/11 which depend on the operand type too. 12316 12317 All the encoded bits are hardcoded by this function. 12318 12319 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!). 12320 Cases 5, 7 may be used with VFPv2 and above. 12321 12322 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you 12323 can specify a type where it doesn't make sense to, and is ignored). 12324*/ 12325 12326static void 12327do_neon_mov (void) 12328{ 12329 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD, 12330 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR, 12331 NS_NULL); 12332 struct neon_type_el et; 12333 const char *ldconst = 0; 12334 12335 switch (rs) 12336 { 12337 case NS_DD: /* case 1/9. */ 12338 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY); 12339 /* It is not an error here if no type is given. */ 12340 inst.error = NULL; 12341 if (et.type == NT_float && et.size == 64) 12342 { 12343 do_vfp_nsyn_opcode ("fcpyd"); 12344 break; 12345 } 12346 /* fall through. */ 12347 12348 case NS_QQ: /* case 0/1. */ 12349 { 12350 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 12351 return; 12352 /* The architecture manual I have doesn't explicitly state which 12353 value the U bit should have for register->register moves, but 12354 the equivalent VORR instruction has U = 0, so do that. */ 12355 inst.instruction = 0x0200110; 12356 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12357 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12358 inst.instruction |= LOW4 (inst.operands[1].reg); 12359 inst.instruction |= HI1 (inst.operands[1].reg) << 5; 12360 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 12361 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 12362 inst.instruction |= neon_quad (rs) << 6; 12363 12364 inst.instruction = neon_dp_fixup (inst.instruction); 12365 } 12366 break; 12367 12368 case NS_DI: /* case 3/11. */ 12369 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY); 12370 inst.error = NULL; 12371 if (et.type == NT_float && et.size == 64) 12372 { 12373 /* case 11 (fconstd). */ 12374 ldconst = "fconstd"; 12375 goto encode_fconstd; 12376 } 12377 /* fall through. */ 12378 12379 case NS_QI: /* case 2/3. */ 12380 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL) 12381 return; 12382 inst.instruction = 0x0800010; 12383 neon_move_immediate (); 12384 inst.instruction = neon_dp_fixup (inst.instruction); 12385 break; 12386 12387 case NS_SR: /* case 4. */ 12388 { 12389 unsigned bcdebits = 0; 12390 struct neon_type_el et = neon_check_type (2, NS_NULL, 12391 N_8 | N_16 | N_32 | N_KEY, N_EQK); 12392 int logsize = neon_logbits (et.size); 12393 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg); 12394 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg); 12395 12396 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1), 12397 _(BAD_FPU)); 12398 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1) 12399 && et.size != 32, _(BAD_FPU)); 12400 constraint (et.type == NT_invtype, _("bad type for scalar")); 12401 constraint (x >= 64 / et.size, _("scalar index out of range")); 12402 12403 switch (et.size) 12404 { 12405 case 8: bcdebits = 0x8; break; 12406 case 16: bcdebits = 0x1; break; 12407 case 32: bcdebits = 0x0; break; 12408 default: ; 12409 } 12410 12411 bcdebits |= x << logsize; 12412 12413 inst.instruction = 0xe000b10; 12414 do_vfp_cond_or_thumb (); 12415 inst.instruction |= LOW4 (dn) << 16; 12416 inst.instruction |= HI1 (dn) << 7; 12417 inst.instruction |= inst.operands[1].reg << 12; 12418 inst.instruction |= (bcdebits & 3) << 5; 12419 inst.instruction |= (bcdebits >> 2) << 21; 12420 } 12421 break; 12422 12423 case NS_DRR: /* case 5 (fmdrr). */ 12424 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2), 12425 _(BAD_FPU)); 12426 12427 inst.instruction = 0xc400b10; 12428 do_vfp_cond_or_thumb (); 12429 inst.instruction |= LOW4 (inst.operands[0].reg); 12430 inst.instruction |= HI1 (inst.operands[0].reg) << 5; 12431 inst.instruction |= inst.operands[1].reg << 12; 12432 inst.instruction |= inst.operands[2].reg << 16; 12433 break; 12434 12435 case NS_RS: /* case 6. */ 12436 { 12437 struct neon_type_el et = neon_check_type (2, NS_NULL, 12438 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY); 12439 unsigned logsize = neon_logbits (et.size); 12440 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg); 12441 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg); 12442 unsigned abcdebits = 0; 12443 12444 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1), 12445 _(BAD_FPU)); 12446 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1) 12447 && et.size != 32, _(BAD_FPU)); 12448 constraint (et.type == NT_invtype, _("bad type for scalar")); 12449 constraint (x >= 64 / et.size, _("scalar index out of range")); 12450 12451 switch (et.size) 12452 { 12453 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break; 12454 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break; 12455 case 32: abcdebits = 0x00; break; 12456 default: ; 12457 } 12458 12459 abcdebits |= x << logsize; 12460 inst.instruction = 0xe100b10; 12461 do_vfp_cond_or_thumb (); 12462 inst.instruction |= LOW4 (dn) << 16; 12463 inst.instruction |= HI1 (dn) << 7; 12464 inst.instruction |= inst.operands[0].reg << 12; 12465 inst.instruction |= (abcdebits & 3) << 5; 12466 inst.instruction |= (abcdebits >> 2) << 21; 12467 } 12468 break; 12469 12470 case NS_RRD: /* case 7 (fmrrd). */ 12471 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2), 12472 _(BAD_FPU)); 12473 12474 inst.instruction = 0xc500b10; 12475 do_vfp_cond_or_thumb (); 12476 inst.instruction |= inst.operands[0].reg << 12; 12477 inst.instruction |= inst.operands[1].reg << 16; 12478 inst.instruction |= LOW4 (inst.operands[2].reg); 12479 inst.instruction |= HI1 (inst.operands[2].reg) << 5; 12480 break; 12481 12482 case NS_FF: /* case 8 (fcpys). */ 12483 do_vfp_nsyn_opcode ("fcpys"); 12484 break; 12485 12486 case NS_FI: /* case 10 (fconsts). */ 12487 ldconst = "fconsts"; 12488 encode_fconstd: 12489 if (is_quarter_float (inst.operands[1].imm)) 12490 { 12491 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm); 12492 do_vfp_nsyn_opcode (ldconst); 12493 } 12494 else 12495 first_error (_("immediate out of range")); 12496 break; 12497 12498 case NS_RF: /* case 12 (fmrs). */ 12499 do_vfp_nsyn_opcode ("fmrs"); 12500 break; 12501 12502 case NS_FR: /* case 13 (fmsr). */ 12503 do_vfp_nsyn_opcode ("fmsr"); 12504 break; 12505 12506 /* The encoders for the fmrrs and fmsrr instructions expect three operands 12507 (one of which is a list), but we have parsed four. Do some fiddling to 12508 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2 12509 expect. */ 12510 case NS_RRFF: /* case 14 (fmrrs). */ 12511 constraint (inst.operands[3].reg != inst.operands[2].reg + 1, 12512 _("VFP registers must be adjacent")); 12513 inst.operands[2].imm = 2; 12514 memset (&inst.operands[3], '\0', sizeof (inst.operands[3])); 12515 do_vfp_nsyn_opcode ("fmrrs"); 12516 break; 12517 12518 case NS_FFRR: /* case 15 (fmsrr). */ 12519 constraint (inst.operands[1].reg != inst.operands[0].reg + 1, 12520 _("VFP registers must be adjacent")); 12521 inst.operands[1] = inst.operands[2]; 12522 inst.operands[2] = inst.operands[3]; 12523 inst.operands[0].imm = 2; 12524 memset (&inst.operands[3], '\0', sizeof (inst.operands[3])); 12525 do_vfp_nsyn_opcode ("fmsrr"); 12526 break; 12527 12528 default: 12529 if (inst.error) 12530 break; 12531 abort (); 12532 } 12533} 12534 12535static void 12536do_neon_rshift_round_imm (void) 12537{ 12538 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL); 12539 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY); 12540 int imm = inst.operands[2].imm; 12541 12542 /* imm == 0 case is encoded as VMOV for V{R}SHR. */ 12543 if (imm == 0) 12544 { 12545 inst.operands[2].present = 0; 12546 do_neon_mov (); 12547 return; 12548 } 12549 12550 constraint (imm < 1 || (unsigned)imm > et.size, 12551 _("immediate out of range for shift")); 12552 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, 12553 et.size - imm); 12554} 12555 12556static void 12557do_neon_movl (void) 12558{ 12559 struct neon_type_el et = neon_check_type (2, NS_QD, 12560 N_EQK | N_DBL, N_SU_32 | N_KEY); 12561 unsigned sizebits = et.size >> 3; 12562 inst.instruction |= sizebits << 19; 12563 neon_two_same (0, et.type == NT_unsigned, -1); 12564} 12565 12566static void 12567do_neon_trn (void) 12568{ 12569 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 12570 struct neon_type_el et = neon_check_type (2, rs, 12571 N_EQK, N_8 | N_16 | N_32 | N_KEY); 12572 inst.instruction = NEON_ENC_INTEGER (inst.instruction); 12573 neon_two_same (neon_quad (rs), 1, et.size); 12574} 12575 12576static void 12577do_neon_zip_uzp (void) 12578{ 12579 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 12580 struct neon_type_el et = neon_check_type (2, rs, 12581 N_EQK, N_8 | N_16 | N_32 | N_KEY); 12582 if (rs == NS_DD && et.size == 32) 12583 { 12584 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */ 12585 inst.instruction = N_MNEM_vtrn; 12586 do_neon_trn (); 12587 return; 12588 } 12589 neon_two_same (neon_quad (rs), 1, et.size); 12590} 12591 12592static void 12593do_neon_sat_abs_neg (void) 12594{ 12595 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 12596 struct neon_type_el et = neon_check_type (2, rs, 12597 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY); 12598 neon_two_same (neon_quad (rs), 1, et.size); 12599} 12600 12601static void 12602do_neon_pair_long (void) 12603{ 12604 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 12605 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY); 12606 /* Unsigned is encoded in OP field (bit 7) for these instruction. */ 12607 inst.instruction |= (et.type == NT_unsigned) << 7; 12608 neon_two_same (neon_quad (rs), 1, et.size); 12609} 12610 12611static void 12612do_neon_recip_est (void) 12613{ 12614 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 12615 struct neon_type_el et = neon_check_type (2, rs, 12616 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY); 12617 inst.instruction |= (et.type == NT_float) << 8; 12618 neon_two_same (neon_quad (rs), 1, et.size); 12619} 12620 12621static void 12622do_neon_cls (void) 12623{ 12624 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 12625 struct neon_type_el et = neon_check_type (2, rs, 12626 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY); 12627 neon_two_same (neon_quad (rs), 1, et.size); 12628} 12629 12630static void 12631do_neon_clz (void) 12632{ 12633 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 12634 struct neon_type_el et = neon_check_type (2, rs, 12635 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY); 12636 neon_two_same (neon_quad (rs), 1, et.size); 12637} 12638 12639static void 12640do_neon_cnt (void) 12641{ 12642 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 12643 struct neon_type_el et = neon_check_type (2, rs, 12644 N_EQK | N_INT, N_8 | N_KEY); 12645 neon_two_same (neon_quad (rs), 1, et.size); 12646} 12647 12648static void 12649do_neon_swp (void) 12650{ 12651 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL); 12652 neon_two_same (neon_quad (rs), 1, -1); 12653} 12654 12655static void 12656do_neon_tbl_tbx (void) 12657{ 12658 unsigned listlenbits; 12659 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY); 12660 12661 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4) 12662 { 12663 first_error (_("bad list length for table lookup")); 12664 return; 12665 } 12666 12667 listlenbits = inst.operands[1].imm - 1; 12668 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 12669 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 12670 inst.instruction |= LOW4 (inst.operands[1].reg) << 16; 12671 inst.instruction |= HI1 (inst.operands[1].reg) << 7; 12672 inst.instruction |= LOW4 (inst.operands[2].reg); 12673 inst.instruction |= HI1 (inst.operands[2].reg) << 5; 12674 inst.instruction |= listlenbits << 8; 12675 12676 inst.instruction = neon_dp_fixup (inst.instruction); 12677} 12678 12679static void 12680do_neon_ldm_stm (void) 12681{ 12682 /* P, U and L bits are part of bitmask. */ 12683 int is_dbmode = (inst.instruction & (1 << 24)) != 0; 12684 unsigned offsetbits = inst.operands[1].imm * 2; 12685 12686 if (inst.operands[1].issingle) 12687 { 12688 do_vfp_nsyn_ldm_stm (is_dbmode); 12689 return; 12690 } 12691 12692 constraint (is_dbmode && !inst.operands[0].writeback, 12693 _("writeback (!) must be used for VLDMDB and VSTMDB")); 12694 12695 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16, 12696 _("register list must contain at least 1 and at most 16 " 12697 "registers")); 12698 12699 inst.instruction |= inst.operands[0].reg << 16; 12700 inst.instruction |= inst.operands[0].writeback << 21; 12701 inst.instruction |= LOW4 (inst.operands[1].reg) << 12; 12702 inst.instruction |= HI1 (inst.operands[1].reg) << 22; 12703 12704 inst.instruction |= offsetbits; 12705 12706 do_vfp_cond_or_thumb (); 12707} 12708 12709static void 12710do_neon_ldr_str (void) 12711{ 12712 int is_ldr = (inst.instruction & (1 << 20)) != 0; 12713 12714 if (inst.operands[0].issingle) 12715 { 12716 if (is_ldr) 12717 do_vfp_nsyn_opcode ("flds"); 12718 else 12719 do_vfp_nsyn_opcode ("fsts"); 12720 } 12721 else 12722 { 12723 if (is_ldr) 12724 do_vfp_nsyn_opcode ("fldd"); 12725 else 12726 do_vfp_nsyn_opcode ("fstd"); 12727 } 12728} 12729 12730/* "interleave" version also handles non-interleaving register VLD1/VST1 12731 instructions. */ 12732 12733static void 12734do_neon_ld_st_interleave (void) 12735{ 12736 struct neon_type_el et = neon_check_type (1, NS_NULL, 12737 N_8 | N_16 | N_32 | N_64); 12738 unsigned alignbits = 0; 12739 unsigned idx; 12740 /* The bits in this table go: 12741 0: register stride of one (0) or two (1) 12742 1,2: register list length, minus one (1, 2, 3, 4). 12743 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>). 12744 We use -1 for invalid entries. */ 12745 const int typetable[] = 12746 { 12747 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */ 12748 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */ 12749 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */ 12750 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */ 12751 }; 12752 int typebits; 12753 12754 if (et.type == NT_invtype) 12755 return; 12756 12757 if (inst.operands[1].immisalign) 12758 switch (inst.operands[1].imm >> 8) 12759 { 12760 case 64: alignbits = 1; break; 12761 case 128: 12762 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3) 12763 goto bad_alignment; 12764 alignbits = 2; 12765 break; 12766 case 256: 12767 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3) 12768 goto bad_alignment; 12769 alignbits = 3; 12770 break; 12771 default: 12772 bad_alignment: 12773 first_error (_("bad alignment")); 12774 return; 12775 } 12776 12777 inst.instruction |= alignbits << 4; 12778 inst.instruction |= neon_logbits (et.size) << 6; 12779 12780 /* Bits [4:6] of the immediate in a list specifier encode register stride 12781 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of 12782 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look 12783 up the right value for "type" in a table based on this value and the given 12784 list style, then stick it back. */ 12785 idx = ((inst.operands[0].imm >> 4) & 7) 12786 | (((inst.instruction >> 8) & 3) << 3); 12787 12788 typebits = typetable[idx]; 12789 12790 constraint (typebits == -1, _("bad list type for instruction")); 12791 /* Only VLD1/VST1 allows a size of 64. As the comment above states, the <n> 12792 of VLD<n>/VST<n> is in bits [9:8] of the initial bitmask. */ 12793 constraint (((inst.instruction >> 8) & 3) != 0 && 12794 et.size == 64, _("bad size for instruction")); 12795 12796 inst.instruction &= ~0xf00; 12797 inst.instruction |= typebits << 8; 12798} 12799 12800/* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup. 12801 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0 12802 otherwise. The variable arguments are a list of pairs of legal (size, align) 12803 values, terminated with -1. */ 12804 12805static int 12806neon_alignment_bit (int size, int align, int *do_align, ...) 12807{ 12808 va_list ap; 12809 int result = FAIL, thissize, thisalign; 12810 12811 if (!inst.operands[1].immisalign) 12812 { 12813 *do_align = 0; 12814 return SUCCESS; 12815 } 12816 12817 va_start (ap, do_align); 12818 12819 do 12820 { 12821 thissize = va_arg (ap, int); 12822 if (thissize == -1) 12823 break; 12824 thisalign = va_arg (ap, int); 12825 12826 if (size == thissize && align == thisalign) 12827 result = SUCCESS; 12828 } 12829 while (result != SUCCESS); 12830 12831 va_end (ap); 12832 12833 if (result == SUCCESS) 12834 *do_align = 1; 12835 else 12836 first_error (_("unsupported alignment for instruction")); 12837 12838 return result; 12839} 12840 12841static void 12842do_neon_ld_st_lane (void) 12843{ 12844 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32); 12845 int align_good, do_align = 0; 12846 int logsize = neon_logbits (et.size); 12847 int align = inst.operands[1].imm >> 8; 12848 int n = (inst.instruction >> 8) & 3; 12849 int max_el = 64 / et.size; 12850 12851 if (et.type == NT_invtype) 12852 return; 12853 12854 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1, 12855 _("bad list length")); 12856 constraint (NEON_LANE (inst.operands[0].imm) >= max_el, 12857 _("scalar index out of range")); 12858 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2 12859 && et.size == 8, 12860 _("stride of 2 unavailable when element size is 8")); 12861 12862 switch (n) 12863 { 12864 case 0: /* VLD1 / VST1. */ 12865 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16, 12866 32, 32, -1); 12867 if (align_good == FAIL) 12868 return; 12869 if (do_align) 12870 { 12871 unsigned alignbits = 0; 12872 switch (et.size) 12873 { 12874 case 16: alignbits = 0x1; break; 12875 case 32: alignbits = 0x3; break; 12876 default: ; 12877 } 12878 inst.instruction |= alignbits << 4; 12879 } 12880 break; 12881 12882 case 1: /* VLD2 / VST2. */ 12883 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32, 12884 32, 64, -1); 12885 if (align_good == FAIL) 12886 return; 12887 if (do_align) 12888 inst.instruction |= 1 << 4; 12889 break; 12890 12891 case 2: /* VLD3 / VST3. */ 12892 constraint (inst.operands[1].immisalign, 12893 _("can't use alignment with this instruction")); 12894 break; 12895 12896 case 3: /* VLD4 / VST4. */ 12897 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32, 12898 16, 64, 32, 64, 32, 128, -1); 12899 if (align_good == FAIL) 12900 return; 12901 if (do_align) 12902 { 12903 unsigned alignbits = 0; 12904 switch (et.size) 12905 { 12906 case 8: alignbits = 0x1; break; 12907 case 16: alignbits = 0x1; break; 12908 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break; 12909 default: ; 12910 } 12911 inst.instruction |= alignbits << 4; 12912 } 12913 break; 12914 12915 default: ; 12916 } 12917 12918 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */ 12919 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2) 12920 inst.instruction |= 1 << (4 + logsize); 12921 12922 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5); 12923 inst.instruction |= logsize << 10; 12924} 12925 12926/* Encode single n-element structure to all lanes VLD<n> instructions. */ 12927 12928static void 12929do_neon_ld_dup (void) 12930{ 12931 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32); 12932 int align_good, do_align = 0; 12933 12934 if (et.type == NT_invtype) 12935 return; 12936 12937 switch ((inst.instruction >> 8) & 3) 12938 { 12939 case 0: /* VLD1. */ 12940 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2); 12941 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8, 12942 &do_align, 16, 16, 32, 32, -1); 12943 if (align_good == FAIL) 12944 return; 12945 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm)) 12946 { 12947 case 1: break; 12948 case 2: inst.instruction |= 1 << 5; break; 12949 default: first_error (_("bad list length")); return; 12950 } 12951 inst.instruction |= neon_logbits (et.size) << 6; 12952 break; 12953 12954 case 1: /* VLD2. */ 12955 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8, 12956 &do_align, 8, 16, 16, 32, 32, 64, -1); 12957 if (align_good == FAIL) 12958 return; 12959 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2, 12960 _("bad list length")); 12961 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2) 12962 inst.instruction |= 1 << 5; 12963 inst.instruction |= neon_logbits (et.size) << 6; 12964 break; 12965 12966 case 2: /* VLD3. */ 12967 constraint (inst.operands[1].immisalign, 12968 _("can't use alignment with this instruction")); 12969 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3, 12970 _("bad list length")); 12971 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2) 12972 inst.instruction |= 1 << 5; 12973 inst.instruction |= neon_logbits (et.size) << 6; 12974 break; 12975 12976 case 3: /* VLD4. */ 12977 { 12978 int align = inst.operands[1].imm >> 8; 12979 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32, 12980 16, 64, 32, 64, 32, 128, -1); 12981 if (align_good == FAIL) 12982 return; 12983 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4, 12984 _("bad list length")); 12985 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2) 12986 inst.instruction |= 1 << 5; 12987 if (et.size == 32 && align == 128) 12988 inst.instruction |= 0x3 << 6; 12989 else 12990 inst.instruction |= neon_logbits (et.size) << 6; 12991 } 12992 break; 12993 12994 default: ; 12995 } 12996 12997 inst.instruction |= do_align << 4; 12998} 12999 13000/* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those 13001 apart from bits [11:4]. */ 13002 13003static void 13004do_neon_ldx_stx (void) 13005{ 13006 switch (NEON_LANE (inst.operands[0].imm)) 13007 { 13008 case NEON_INTERLEAVE_LANES: 13009 inst.instruction = NEON_ENC_INTERLV (inst.instruction); 13010 do_neon_ld_st_interleave (); 13011 break; 13012 13013 case NEON_ALL_LANES: 13014 inst.instruction = NEON_ENC_DUP (inst.instruction); 13015 do_neon_ld_dup (); 13016 break; 13017 13018 default: 13019 inst.instruction = NEON_ENC_LANE (inst.instruction); 13020 do_neon_ld_st_lane (); 13021 } 13022 13023 /* L bit comes from bit mask. */ 13024 inst.instruction |= LOW4 (inst.operands[0].reg) << 12; 13025 inst.instruction |= HI1 (inst.operands[0].reg) << 22; 13026 inst.instruction |= inst.operands[1].reg << 16; 13027 13028 if (inst.operands[1].postind) 13029 { 13030 int postreg = inst.operands[1].imm & 0xf; 13031 constraint (!inst.operands[1].immisreg, 13032 _("post-index must be a register")); 13033 constraint (postreg == 0xd || postreg == 0xf, 13034 _("bad register for post-index")); 13035 inst.instruction |= postreg; 13036 } 13037 else if (inst.operands[1].writeback) 13038 { 13039 inst.instruction |= 0xd; 13040 } 13041 else 13042 inst.instruction |= 0xf; 13043 13044 if (thumb_mode) 13045 inst.instruction |= 0xf9000000; 13046 else 13047 inst.instruction |= 0xf4000000; 13048} 13049 13050 13051/* Overall per-instruction processing. */ 13052 13053/* We need to be able to fix up arbitrary expressions in some statements. 13054 This is so that we can handle symbols that are an arbitrary distance from 13055 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask), 13056 which returns part of an address in a form which will be valid for 13057 a data instruction. We do this by pushing the expression into a symbol 13058 in the expr_section, and creating a fix for that. */ 13059 13060static void 13061fix_new_arm (fragS * frag, 13062 int where, 13063 short int size, 13064 expressionS * exp, 13065 int pc_rel, 13066 int pcrel_reloc, /* HACK_GUESS */ 13067 int reloc) 13068{ 13069 fixS * new_fix; 13070 13071 switch (exp->X_op) 13072 { 13073#ifdef NOTYET 13074 case O_constant: 13075#endif 13076 case O_symbol: 13077#ifdef NOTYET 13078 case O_add: 13079 case O_subtract: 13080#endif 13081 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, pcrel_reloc, reloc); 13082 break; 13083 default: 13084 new_fix = fix_new (frag, 13085 where, 13086 size, 13087 exp->X_add_symbol, 13088 exp->X_subtract_symbol, 13089 exp->X_add_number, 13090 pc_rel, 13091 pcrel_reloc, 13092 reloc); 13093#ifdef NOTYET 13094 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0, 13095 pc_rel, reloc); 13096#endif /* NOTYET */ 13097 break; 13098 } 13099 13100 /* Mark whether the fix is to a THUMB instruction, or an ARM 13101 instruction. */ 13102 new_fix->tc_fix_data = &thumb_mode; 13103} 13104 13105/* Create a frg for an instruction requiring relaxation. */ 13106static void 13107output_relax_insn (void) 13108{ 13109 char * to; 13110 symbolS *sym; 13111 int offset; 13112 13113 /* The size of the instruction is unknown, so tie the debug info to the 13114 start of the instruction. */ 13115 dwarf2_emit_insn (0); 13116 13117 switch (inst.reloc.exp.X_op) 13118 { 13119 case O_symbol: 13120 sym = inst.reloc.exp.X_add_symbol; 13121 offset = inst.reloc.exp.X_add_number; 13122 break; 13123 case O_constant: 13124 sym = NULL; 13125 offset = inst.reloc.exp.X_add_number; 13126 break; 13127 default: 13128 /* Avoid make_expr_symbol() if their is no subtract symbol and the 13129 symbol is just an undefined symbol or absolute, if so use that in the 13130 expression. */ 13131 if (inst.reloc.exp.X_subtract_symbol == NULL && 13132 inst.reloc.exp.X_add_symbol != NULL && 13133 ((inst.reloc.exp.X_add_symbol->sy_nlist.n_type & N_TYPE) == N_UNDF || 13134 (inst.reloc.exp.X_add_symbol->sy_nlist.n_type & N_TYPE) == N_ABS) ) 13135 { 13136 sym = inst.reloc.exp.X_add_symbol; 13137 offset = inst.reloc.exp.X_add_number; 13138 } 13139 else 13140 { 13141 sym = make_expr_symbol (&inst.reloc.exp); 13142 offset = 0; 13143 } 13144 break; 13145 } 13146 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE, 13147 inst.relax, sym, offset, NULL/*offset, opcode*/); 13148 md_number_to_chars (to, inst.instruction, THUMB_SIZE); 13149} 13150 13151/* Write a 32-bit thumb instruction to buf. */ 13152static void 13153put_thumb32_insn (char * buf, uint32_t insn) 13154{ 13155 md_number_to_chars (buf, insn >> 16, THUMB_SIZE); 13156 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE); 13157} 13158static void 13159output_inst (const char * str) 13160{ 13161 char * to = NULL; 13162 13163 if (inst.error) 13164 { 13165 as_bad ("%s -- `%s'", inst.error, str); 13166 return; 13167 } 13168 if (inst.relax) { 13169 output_relax_insn(); 13170 return; 13171 } 13172 if (inst.size == 0) 13173 return; 13174 13175 to = frag_more (inst.size); 13176 13177 if (thumb_mode && (inst.size > THUMB_SIZE)) 13178 { 13179 assert (inst.size == (2 * THUMB_SIZE)); 13180 put_thumb32_insn (to, inst.instruction); 13181 } 13182 else if (inst.size > INSN_SIZE) 13183 { 13184 assert (inst.size == (2 * INSN_SIZE)); 13185 md_number_to_chars (to, inst.instruction, INSN_SIZE); 13186 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE); 13187 } 13188 else 13189 md_number_to_chars (to, inst.instruction, inst.size); 13190 13191 if (inst.reloc.type != BFD_RELOC_UNUSED) 13192 fix_new_arm (frag_now, to - frag_now->fr_literal, 13193 inst.size, & inst.reloc.exp, inst.reloc.pc_rel, 13194 /* HACK_GUESS */ inst.reloc.pcrel_reloc, 13195 inst.reloc.type); 13196 13197 dwarf2_emit_insn (inst.size); 13198} 13199 13200#endif /* INSNS_TABLE_ONLY */ 13201 13202/* Tag values used in struct asm_opcode's tag field. */ 13203enum opcode_tag 13204{ 13205 OT_unconditional, /* Instruction cannot be conditionalized. 13206 The ARM condition field is still 0xE. */ 13207 OT_unconditionalF, /* Instruction cannot be conditionalized 13208 and carries 0xF in its ARM condition field. */ 13209 OT_csuffix, /* Instruction takes a conditional suffix. */ 13210 OT_csuffixF, /* Some forms of the instruction take a conditional 13211 suffix, others place 0xF where the condition field 13212 would be. */ 13213 OT_cinfix3, /* Instruction takes a conditional infix, 13214 beginning at character index 3. (In 13215 unified mode, it becomes a suffix.) */ 13216 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for 13217 tsts, cmps, cmns, and teqs. */ 13218 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at 13219 character index 3, even in unified mode. Used for 13220 legacy instructions where suffix and infix forms 13221 may be ambiguous. */ 13222 OT_csuf_or_in3, /* Instruction takes either a conditional 13223 suffix or an infix at character index 3. */ 13224 OT_odd_infix_unc, /* This is the unconditional variant of an 13225 instruction that takes a conditional infix 13226 at an unusual position. In unified mode, 13227 this variant will accept a suffix. */ 13228 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0 13229 are the conditional variants of instructions that 13230 take conditional infixes in unusual positions. 13231 The infix appears at character index 13232 (tag - OT_odd_infix_0). These are not accepted 13233 in unified mode. */ 13234}; 13235 13236#ifndef INSNS_TABLE_ONLY 13237 13238/* Subroutine of md_assemble, responsible for looking up the primary 13239 opcode from the mnemonic the user wrote. STR points to the 13240 beginning of the mnemonic. 13241 13242 This is not simply a hash table lookup, because of conditional 13243 variants. Most instructions have conditional variants, which are 13244 expressed with a _conditional affix_ to the mnemonic. If we were 13245 to encode each conditional variant as a literal string in the opcode 13246 table, it would have approximately 20,000 entries. 13247 13248 Most mnemonics take this affix as a suffix, and in unified syntax, 13249 'most' is upgraded to 'all'. However, in the divided syntax, some 13250 instructions take the affix as an infix, notably the s-variants of 13251 the arithmetic instructions. Of those instructions, all but six 13252 have the infix appear after the third character of the mnemonic. 13253 13254 Accordingly, the algorithm for looking up primary opcodes given 13255 an identifier is: 13256 13257 1. Look up the identifier in the opcode table. 13258 If we find a match, go to step U. 13259 13260 2. Look up the last two characters of the identifier in the 13261 conditions table. If we find a match, look up the first N-2 13262 characters of the identifier in the opcode table. If we 13263 find a match, go to step CE. 13264 13265 3. Look up the fourth and fifth characters of the identifier in 13266 the conditions table. If we find a match, extract those 13267 characters from the identifier, and look up the remaining 13268 characters in the opcode table. If we find a match, go 13269 to step CM. 13270 13271 4. Fail. 13272 13273 U. Examine the tag field of the opcode structure, in case this is 13274 one of the six instructions with its conditional infix in an 13275 unusual place. If it is, the tag tells us where to find the 13276 infix; look it up in the conditions table and set inst.cond 13277 accordingly. Otherwise, this is an unconditional instruction. 13278 Again set inst.cond accordingly. Return the opcode structure. 13279 13280 CE. Examine the tag field to make sure this is an instruction that 13281 should receive a conditional suffix. If it is not, fail. 13282 Otherwise, set inst.cond from the suffix we already looked up, 13283 and return the opcode structure. 13284 13285 CM. Examine the tag field to make sure this is an instruction that 13286 should receive a conditional infix after the third character. 13287 If it is not, fail. Otherwise, undo the edits to the current 13288 line of input and proceed as for case CE. */ 13289 13290static const struct asm_opcode * 13291opcode_lookup (char **str) 13292{ 13293 char *end, *base; 13294 char *affix; 13295 const struct asm_opcode *opcode; 13296 const struct asm_cond *cond; 13297 char save[2]; 13298 bfd_boolean neon_supported; 13299 13300 neon_supported = ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1); 13301 13302 /* Scan up to the end of the mnemonic, which must end in white space, 13303 '.' (in unified mode, or for Neon instructions), or end of string. */ 13304 for (base = end = *str; *end != '\0'; end++) 13305 if (*end == ' ' || ((unified_syntax || neon_supported) && *end == '.')) 13306 break; 13307 13308 if (end == base) 13309 return 0; 13310 13311 /* Handle a possible width suffix and/or Neon type suffix. */ 13312 if (end[0] == '.') 13313 { 13314 int offset = 2; 13315 13316 /* The .w and .n suffixes are only valid if the unified syntax is in 13317 use. */ 13318 if (unified_syntax && end[1] == 'w') 13319 inst.size_req = 4; 13320 else if (unified_syntax && end[1] == 'n') 13321 inst.size_req = 2; 13322 else 13323 offset = 0; 13324 13325 inst.vectype.elems = 0; 13326 13327 *str = end + offset; 13328 13329 if (end[offset] == '.') 13330 { 13331 /* See if we have a Neon type suffix (possible in either unified or 13332 non-unified ARM syntax mode). */ 13333 if (parse_neon_type (&inst.vectype, str) == FAIL) 13334 return 0; 13335 } 13336 else if (end[offset] != '\0' && end[offset] != ' ') 13337 return 0; 13338 } 13339 else 13340 *str = end; 13341 13342 /* Look for unaffixed or special-case affixed mnemonic. */ 13343 opcode = hash_find_n (arm_ops_hsh, base, end - base); 13344 if (opcode) 13345 { 13346 /* step U */ 13347 if (opcode->tag < OT_odd_infix_0) 13348 { 13349 inst.cond = COND_ALWAYS; 13350 return opcode; 13351 } 13352 13353 if (unified_syntax) 13354 as_warn (_("conditional infixes are deprecated in unified syntax")); 13355 affix = base + (opcode->tag - OT_odd_infix_0); 13356 cond = hash_find_n (arm_cond_hsh, affix, 2); 13357 assert (cond); 13358 13359 inst.cond = cond->value; 13360 return opcode; 13361 } 13362 13363 /* Cannot have a conditional suffix on a mnemonic of less than two 13364 characters. */ 13365 if (end - base < 3) 13366 return 0; 13367 13368 /* Look for suffixed mnemonic. */ 13369 affix = end - 2; 13370 cond = hash_find_n (arm_cond_hsh, affix, 2); 13371 opcode = hash_find_n (arm_ops_hsh, base, affix - base); 13372 if (opcode && cond) 13373 { 13374 /* step CE */ 13375 switch (opcode->tag) 13376 { 13377 case OT_cinfix3_legacy: 13378 /* Ignore conditional suffixes matched on infix only mnemonics. */ 13379 break; 13380 13381 case OT_cinfix3: 13382 case OT_cinfix3_deprecated: 13383 case OT_odd_infix_unc: 13384 if (!unified_syntax) 13385 return 0; 13386 /* else fall through */ 13387 13388 case OT_csuffix: 13389 case OT_csuffixF: 13390 case OT_csuf_or_in3: 13391 inst.cond = cond->value; 13392 return opcode; 13393 13394 case OT_unconditional: 13395 case OT_unconditionalF: 13396 if (thumb_mode) 13397 { 13398 inst.cond = cond->value; 13399 } 13400 else 13401 { 13402 /* delayed diagnostic */ 13403 inst.error = BAD_COND; 13404 inst.cond = COND_ALWAYS; 13405 } 13406 return opcode; 13407 13408 default: 13409 return 0; 13410 } 13411 } 13412 13413 /* Cannot have a usual-position infix on a mnemonic of less than 13414 six characters (five would be a suffix). */ 13415 if (end - base < 6) 13416 return 0; 13417 13418 /* Look for infixed mnemonic in the usual position. */ 13419 affix = base + 3; 13420 cond = hash_find_n (arm_cond_hsh, affix, 2); 13421 if (!cond) 13422 return 0; 13423 13424 memcpy (save, affix, 2); 13425 memmove (affix, affix + 2, (end - affix) - 2); 13426 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2); 13427 memmove (affix + 2, affix, (end - affix) - 2); 13428 memcpy (affix, save, 2); 13429 13430 if (opcode 13431 && (opcode->tag == OT_cinfix3 13432 || opcode->tag == OT_cinfix3_deprecated 13433 || opcode->tag == OT_csuf_or_in3 13434 || opcode->tag == OT_cinfix3_legacy)) 13435 { 13436 /* step CM */ 13437 if (unified_syntax 13438 && (opcode->tag == OT_cinfix3 13439 || opcode->tag == OT_cinfix3_deprecated)) 13440 as_warn (_("conditional infixes are deprecated in unified syntax")); 13441 13442 inst.cond = cond->value; 13443 return opcode; 13444 } 13445 13446 return 0; 13447} 13448 13449void 13450md_assemble (char *str) 13451{ 13452 char *p = str; 13453 const struct asm_opcode * opcode; 13454 13455#ifdef NOTYET 13456 /* Align the previous label if needed. */ 13457 if (last_label_seen != NULL) 13458 { 13459 symbol_set_frag (last_label_seen, frag_now); 13460 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ()); 13461 S_SET_SEGMENT (last_label_seen, now_seg); 13462 } 13463#endif /* NOTYET */ 13464 13465 memset (&inst, '\0', sizeof (inst)); 13466 inst.reloc.type = BFD_RELOC_UNUSED; 13467 13468 opcode = opcode_lookup (&p); 13469 if (!opcode) 13470 { 13471 /* It wasn't an instruction, but it might be a register alias of 13472 the form alias .req reg, or a Neon .dn/.qn directive. */ 13473 if (!create_register_alias (str, p) 13474 && !create_neon_reg_alias (str, p)) 13475 as_bad (_("bad instruction `%s'"), str); 13476 13477 return; 13478 } 13479 13480 if (opcode->tag == OT_cinfix3_deprecated) 13481 as_warn (_("s suffix on comparison instruction is deprecated")); 13482 13483 /* The value which unconditional instructions should have in place of the 13484 condition field. */ 13485 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1; 13486 13487 if (thumb_mode) 13488 { 13489 arm_feature_set variant; 13490 13491 variant = cpu_variant; 13492 /* Only allow coprocessor instructions on Thumb-2 capable devices. */ 13493 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2)) 13494 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard); 13495 /* Check that this instruction is supported for this CPU. */ 13496 if (!opcode->tvariant 13497 || (thumb_mode == 1 13498 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant) 13499 && !force_cpusubtype_ALL)) 13500 { 13501 as_bad (_("selected processor does not support `%s'"), str); 13502 return; 13503 } 13504 if (inst.cond != COND_ALWAYS && !unified_syntax 13505 && opcode->tencode != do_t_branch) 13506 { 13507 as_bad (_("Thumb does not support conditional execution")); 13508 return; 13509 } 13510 13511 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2) && !inst.size_req) 13512 { 13513 /* Implicit require narrow instructions on Thumb-1. This avoids 13514 relaxation accidentally introducing Thumb-2 instructions. */ 13515 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23) 13516 inst.size_req = 2; 13517 } 13518 13519 /* Check conditional suffixes. */ 13520 if (current_it_mask) 13521 { 13522 int cond; 13523 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1; 13524 current_it_mask <<= 1; 13525 current_it_mask &= 0x1f; 13526 /* The BKPT instruction is unconditional even in an IT block. */ 13527 if (!inst.error 13528 && cond != inst.cond && opcode->tencode != do_t_bkpt) 13529 { 13530 as_bad (_("incorrect condition in IT block")); 13531 return; 13532 } 13533 } 13534 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch) 13535 { 13536 as_bad (_("thumb conditional instruction not in IT block")); 13537 return; 13538 } 13539 13540 mapping_state (MAP_THUMB); 13541 inst.instruction = opcode->tvalue; 13542 13543 if (!parse_operands (p, opcode->operands)) 13544 opcode->tencode (); 13545 13546 /* Clear current_it_mask at the end of an IT block. */ 13547 if (current_it_mask == 0x10) 13548 current_it_mask = 0; 13549 13550 if (!(inst.error || inst.relax)) 13551 { 13552 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff); 13553 inst.size = (inst.instruction > 0xffff ? 4 : 2); 13554 if (inst.size_req && inst.size_req != inst.size) 13555 { 13556 as_bad (_("cannot honor width suffix -- `%s'"), str); 13557 return; 13558 } 13559 } 13560 13561 /* Something has gone badly wrong if we try to relax a fixed size 13562 instruction. */ 13563 assert (inst.size_req == 0 || !inst.relax); 13564 13565 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, 13566 *opcode->tvariant); 13567 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly 13568 set those bits when Thumb-2 32-bit instructions are seen. ie. 13569 anything other than bl/blx. 13570 This is overly pessimistic for relaxable instructions. */ 13571 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800) 13572 || inst.relax) 13573 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, 13574 arm_ext_v6t2); 13575 } 13576 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)) 13577 { 13578 /* Check that this instruction is supported for this CPU. */ 13579 if (!opcode->avariant || 13580 (!ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant) && 13581 !force_cpusubtype_ALL)) 13582 { 13583 as_bad (_("selected processor does not support `%s'"), str); 13584 return; 13585 } 13586 /* Allow width suffixes with unified_syntax */ 13587 if (inst.size_req && !unified_syntax) 13588 { 13589 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str); 13590 return; 13591 } 13592 13593 mapping_state (MAP_ARM); 13594 inst.instruction = opcode->avalue; 13595 if (opcode->tag == OT_unconditionalF) 13596 inst.instruction |= 0xF << 28; 13597 else 13598 inst.instruction |= inst.cond << 28; 13599 inst.size = INSN_SIZE; 13600 if (!parse_operands (p, opcode->operands)) 13601 opcode->aencode (); 13602 /* Arm mode bx is marked as both v4T and v5 because it's still required 13603 on a hypothetical non-thumb v5 core. */ 13604 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t) 13605 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5)) 13606 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t); 13607 else 13608 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, 13609 *opcode->avariant); 13610 } 13611 else 13612 { 13613 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor " 13614 "-- `%s'"), str); 13615 return; 13616 } 13617 output_inst (str); 13618 13619/* FROM Mac OS X port */ 13620 /* 13621 * If the -g flag is present generate a line number stab for the 13622 * instruction. 13623 * 13624 * See the detailed comments about stabs in read_a_source_file() for a 13625 * description of what is going on here. 13626 */ 13627 if (flagseen['g'] && frchain_now->frch_nsect == text_nsect) 13628 { 13629 (void)symbol_new( 13630 "", 13631 68 /* N_SLINE */, 13632 text_nsect, 13633 logical_input_line /* n_desc, line number */, 13634 obstack_next_free(&frags) - frag_now->fr_literal, 13635 frag_now); 13636 } 13637 /* 13638 * If the --gdwarf2 flag is present generate a .loc for this. 13639 */ 13640 if(debug_type == DEBUG_DWARF2 && frchain_now->frch_nsect == text_nsect){ 13641 dwarf2_loc(dwarf2_file_number, logical_input_line); 13642 } 13643 13644 /* 13645 * We are putting a machine instruction in this section so mark it as 13646 * containg some machine instructions. 13647 */ 13648 frchain_now->frch_section.flags |= S_ATTR_SOME_INSTRUCTIONS; 13649} 13650 13651/* Various frobbings of labels and their addresses. */ 13652 13653void 13654arm_start_line_hook (void) 13655{ 13656 last_label_seen = NULL; 13657} 13658 13659void 13660arm_frob_label (symbolS * sym) 13661{ 13662#ifdef NOTYET 13663 last_label_seen = sym; 13664 13665 ARM_SET_THUMB (sym, thumb_mode); 13666 13667#if defined OBJ_COFF || defined OBJ_ELF 13668 ARM_SET_INTERWORK (sym, support_interwork); 13669#endif 13670#endif /* NOTYET */ 13671 13672 /* Note - do not allow local symbols (.Lxxx) to be labeled 13673 as Thumb functions. This is because these labels, whilst 13674 they exist inside Thumb code, are not the entry points for 13675 possible ARM->Thumb calls. Also, these labels can be used 13676 as part of a computed goto or switch statement. eg gcc 13677 can generate code that looks like this: 13678 13679 ldr r2, [pc, .Laaa] 13680 lsl r3, r3, #2 13681 ldr r2, [r3, r2] 13682 mov pc, r2 13683 13684 .Lbbb: .word .Lxxx 13685 .Lccc: .word .Lyyy 13686 ..etc... 13687 .Laaa: .word Lbbb 13688 13689 The first instruction loads the address of the jump table. 13690 The second instruction converts a table index into a byte offset. 13691 The third instruction gets the jump address out of the table. 13692 The fourth instruction performs the jump. 13693 13694 If the address stored at .Laaa is that of a symbol which has the 13695 Thumb_Func bit set, then the linker will arrange for this address 13696 to have the bottom bit set, which in turn would mean that the 13697 address computation performed by the third instruction would end 13698 up with the bottom bit set. Since the ARM is capable of unaligned 13699 word loads, the instruction would then load the incorrect address 13700 out of the jump table, and chaos would ensue. */ 13701 if (label_is_thumb_function_name 13702 && (S_GET_NAME (sym)[0] != 'L') 13703#ifdef NOTYET 13704 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0) 13705#else 13706 && (frchain_now->frch_nsect == text_nsect)) 13707#endif 13708 { 13709 /* When the address of a Thumb function is taken the bottom 13710 bit of that address should be set. This will allow 13711 interworking between Arm and Thumb functions to work 13712 correctly. */ 13713 13714 THUMB_SET_FUNC (sym, 1); 13715 sym->sy_desc |= N_ARM_THUMB_DEF; 13716 13717 label_is_thumb_function_name = FALSE; 13718 } 13719 13720 dwarf2_emit_label (sym); 13721} 13722 13723int 13724arm_data_in_code (void) 13725{ 13726 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5)) 13727 { 13728 *input_line_pointer = '/'; 13729 input_line_pointer += 5; 13730 *input_line_pointer = 0; 13731 return 1; 13732 } 13733 13734 return 0; 13735} 13736 13737char * 13738arm_canonicalize_symbol_name (char * name) 13739{ 13740 int len; 13741 13742 if (thumb_mode && (len = strlen (name)) > 5 13743 && streq (name + len - 5, "/data")) 13744 *(name + len - 5) = 0; 13745 13746 return name; 13747} 13748 13749/* Table of all register names defined by default. The user can 13750 define additional names with .req. Note that all register names 13751 should appear in both upper and lowercase variants. Some registers 13752 also have mixed-case names. */ 13753 13754#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 } 13755#define REGNUM(p,n,t) REGDEF(p##n, n, t) 13756#define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t) 13757#define REGSET(p,t) \ 13758 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \ 13759 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \ 13760 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \ 13761 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t) 13762#define REGSETH(p,t) \ 13763 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \ 13764 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \ 13765 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \ 13766 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t) 13767#define REGSET2(p,t) \ 13768 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \ 13769 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \ 13770 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \ 13771 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t) 13772 13773static const struct reg_entry reg_names[] = 13774{ 13775 /* ARM integer registers. */ 13776 REGSET(r, RN), REGSET(R, RN), 13777 13778 /* ATPCS synonyms. */ 13779 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN), 13780 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN), 13781 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN), 13782 13783 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN), 13784 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN), 13785 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN), 13786 13787 /* Well-known aliases. */ 13788 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN), 13789 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN), 13790 13791 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN), 13792 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN), 13793 13794 /* Coprocessor numbers. */ 13795 REGSET(p, CP), REGSET(P, CP), 13796 13797 /* Coprocessor register numbers. The "cr" variants are for backward 13798 compatibility. */ 13799 REGSET(c, CN), REGSET(C, CN), 13800 REGSET(cr, CN), REGSET(CR, CN), 13801 13802 /* FPA registers. */ 13803 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN), 13804 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN), 13805 13806 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN), 13807 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN), 13808 13809 /* VFP SP registers. */ 13810 REGSET(s,VFS), REGSET(S,VFS), 13811 REGSETH(s,VFS), REGSETH(S,VFS), 13812 13813 /* VFP DP Registers. */ 13814 REGSET(d,VFD), REGSET(D,VFD), 13815 /* Extra Neon DP registers. */ 13816 REGSETH(d,VFD), REGSETH(D,VFD), 13817 13818 /* Neon QP registers. */ 13819 REGSET2(q,NQ), REGSET2(Q,NQ), 13820 13821 /* VFP control registers. */ 13822 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC), 13823 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC), 13824 REGDEF(mvfr1,6,VFC), REGDEF(mvfr0,7,VFC), 13825 REGDEF(MVFR1,6,VFC), REGDEF(MVFR0,7,VFC), 13826 13827 /* Maverick DSP coprocessor registers. */ 13828 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX), 13829 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX), 13830 13831 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX), 13832 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX), 13833 REGDEF(dspsc,0,DSPSC), 13834 13835 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX), 13836 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX), 13837 REGDEF(DSPSC,0,DSPSC), 13838 13839 /* iWMMXt data registers - p0, c0-15. */ 13840 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR), 13841 13842 /* iWMMXt control registers - p1, c0-3. */ 13843 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC), 13844 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC), 13845 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC), 13846 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC), 13847 13848 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */ 13849 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG), 13850 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG), 13851 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG), 13852 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG), 13853 13854 /* XScale accumulator registers. */ 13855 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE), 13856}; 13857#undef REGDEF 13858#undef REGNUM 13859#undef REGSET 13860 13861/* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled 13862 within psr_required_here. */ 13863static const struct asm_psr psrs[] = 13864{ 13865 /* Backward compatibility notation. Note that "all" is no longer 13866 truly all possible PSR bits. */ 13867 {"all", PSR_c | PSR_f}, 13868 {"flg", PSR_f}, 13869 {"ctl", PSR_c}, 13870 13871 /* Individual flags. */ 13872 {"f", PSR_f}, 13873 {"c", PSR_c}, 13874 {"x", PSR_x}, 13875 {"s", PSR_s}, 13876 /* Combinations of flags. */ 13877 {"fs", PSR_f | PSR_s}, 13878 {"fx", PSR_f | PSR_x}, 13879 {"fc", PSR_f | PSR_c}, 13880 {"sf", PSR_s | PSR_f}, 13881 {"sx", PSR_s | PSR_x}, 13882 {"sc", PSR_s | PSR_c}, 13883 {"xf", PSR_x | PSR_f}, 13884 {"xs", PSR_x | PSR_s}, 13885 {"xc", PSR_x | PSR_c}, 13886 {"cf", PSR_c | PSR_f}, 13887 {"cs", PSR_c | PSR_s}, 13888 {"cx", PSR_c | PSR_x}, 13889 {"fsx", PSR_f | PSR_s | PSR_x}, 13890 {"fsc", PSR_f | PSR_s | PSR_c}, 13891 {"fxs", PSR_f | PSR_x | PSR_s}, 13892 {"fxc", PSR_f | PSR_x | PSR_c}, 13893 {"fcs", PSR_f | PSR_c | PSR_s}, 13894 {"fcx", PSR_f | PSR_c | PSR_x}, 13895 {"sfx", PSR_s | PSR_f | PSR_x}, 13896 {"sfc", PSR_s | PSR_f | PSR_c}, 13897 {"sxf", PSR_s | PSR_x | PSR_f}, 13898 {"sxc", PSR_s | PSR_x | PSR_c}, 13899 {"scf", PSR_s | PSR_c | PSR_f}, 13900 {"scx", PSR_s | PSR_c | PSR_x}, 13901 {"xfs", PSR_x | PSR_f | PSR_s}, 13902 {"xfc", PSR_x | PSR_f | PSR_c}, 13903 {"xsf", PSR_x | PSR_s | PSR_f}, 13904 {"xsc", PSR_x | PSR_s | PSR_c}, 13905 {"xcf", PSR_x | PSR_c | PSR_f}, 13906 {"xcs", PSR_x | PSR_c | PSR_s}, 13907 {"cfs", PSR_c | PSR_f | PSR_s}, 13908 {"cfx", PSR_c | PSR_f | PSR_x}, 13909 {"csf", PSR_c | PSR_s | PSR_f}, 13910 {"csx", PSR_c | PSR_s | PSR_x}, 13911 {"cxf", PSR_c | PSR_x | PSR_f}, 13912 {"cxs", PSR_c | PSR_x | PSR_s}, 13913 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c}, 13914 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x}, 13915 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c}, 13916 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s}, 13917 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x}, 13918 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s}, 13919 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c}, 13920 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x}, 13921 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c}, 13922 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f}, 13923 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x}, 13924 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f}, 13925 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c}, 13926 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s}, 13927 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c}, 13928 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f}, 13929 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s}, 13930 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f}, 13931 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x}, 13932 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s}, 13933 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x}, 13934 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f}, 13935 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s}, 13936 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f}, 13937}; 13938 13939/* Table of V7M psr names. */ 13940static const struct asm_psr v7m_psrs[] = 13941{ 13942 {"apsr", 0 }, {"APSR", 0 }, 13943 {"iapsr", 1 }, {"IAPSR", 1 }, 13944 {"eapsr", 2 }, {"EAPSR", 2 }, 13945 {"psr", 3 }, {"PSR", 3 }, 13946 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 }, 13947 {"ipsr", 5 }, {"IPSR", 5 }, 13948 {"epsr", 6 }, {"EPSR", 6 }, 13949 {"iepsr", 7 }, {"IEPSR", 7 }, 13950 {"msp", 8 }, {"MSP", 8 }, 13951 {"psp", 9 }, {"PSP", 9 }, 13952 {"primask", 16}, {"PRIMASK", 16}, 13953 {"basepri", 17}, {"BASEPRI", 17}, 13954 {"basepri_max", 18}, {"BASEPRI_MAX", 18}, 13955 {"faultmask", 19}, {"FAULTMASK", 19}, 13956 {"control", 20}, {"CONTROL", 20} 13957}; 13958 13959/* Table of all shift-in-operand names. */ 13960static const struct asm_shift_name shift_names [] = 13961{ 13962 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL }, 13963 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL }, 13964 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR }, 13965 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR }, 13966 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR }, 13967 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX } 13968}; 13969 13970/* Table of all conditional affixes. 0xF is not defined as a condition code. */ 13971static const struct asm_cond conds[] = 13972{ 13973 {"eq", 0x0}, 13974 {"ne", 0x1}, 13975 {"cs", 0x2}, {"hs", 0x2}, 13976 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3}, 13977 {"mi", 0x4}, 13978 {"pl", 0x5}, 13979 {"vs", 0x6}, 13980 {"vc", 0x7}, 13981 {"hi", 0x8}, 13982 {"ls", 0x9}, 13983 {"ge", 0xa}, 13984 {"lt", 0xb}, 13985 {"gt", 0xc}, 13986 {"le", 0xd}, 13987 {"al", 0xe} 13988}; 13989 13990static struct asm_barrier_opt barrier_opt_names[] = 13991{ 13992 { "sy", 0xf }, 13993 { "un", 0x7 }, 13994 { "st", 0xe }, 13995 { "unst", 0x6 }, 13996 { "ish", 0xb }, 13997 { "ishst",0xa }, 13998 { "nsh", 0x7 }, 13999 { "nshst",0x6 }, 14000 { "osh", 0x3 }, 14001 { "oshst",0x2 } 14002}; 14003 14004#endif /* INSNS_TABLE_ONLY */ 14005 14006/* Table of ARM-format instructions. */ 14007 14008/* Macros for gluing together operand strings. N.B. In all cases 14009 other than OPS0, the trailing OP_stop comes from default 14010 zero-initialization of the unspecified elements of the array. */ 14011#define OPS0() { OP_stop, } 14012#define OPS1(a) { OP_##a, } 14013#define OPS2(a,b) { OP_##a,OP_##b, } 14014#define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, } 14015#define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, } 14016#define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, } 14017#define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, } 14018 14019/* These macros abstract out the exact format of the mnemonic table and 14020 save some repeated characters. */ 14021 14022/* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */ 14023#define TxCE(mnem, op, top, nops, ops, ae, te) \ 14024 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \ 14025 THUMB_VARIANT, do_##ae, do_##te } 14026 14027/* Two variants of the above - TCE for a numeric Thumb opcode, tCE for 14028 a T_MNEM_xyz enumerator. */ 14029#define TCE(mnem, aop, top, nops, ops, ae, te) \ 14030 TxCE(mnem, aop, 0x##top, nops, ops, ae, te) 14031#define tCE(mnem, aop, top, nops, ops, ae, te) \ 14032 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te) 14033 14034/* Second most common sort of mnemonic: has a Thumb variant, takes a conditional 14035 infix after the third character. */ 14036#define TxC3(mnem, op, top, nops, ops, ae, te) \ 14037 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \ 14038 THUMB_VARIANT, do_##ae, do_##te } 14039#define TxC3w(mnem, op, top, nops, ops, ae, te) \ 14040 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \ 14041 THUMB_VARIANT, do_##ae, do_##te } 14042#define TC3(mnem, aop, top, nops, ops, ae, te) \ 14043 TxC3(mnem, aop, 0x##top, nops, ops, ae, te) 14044#define TC3w(mnem, aop, top, nops, ops, ae, te) \ 14045 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te) 14046#define tC3(mnem, aop, top, nops, ops, ae, te) \ 14047 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te) 14048#define tC3w(mnem, aop, top, nops, ops, ae, te) \ 14049 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te) 14050 14051/* Mnemonic with a conditional infix in an unusual place. Each and every variant has to 14052 appear in the condition table. */ 14053#define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \ 14054 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \ 14055 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te } 14056 14057#define TxCM(m1, m2, op, top, nops, ops, ae, te) \ 14058 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \ 14059 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \ 14060 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \ 14061 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \ 14062 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \ 14063 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \ 14064 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \ 14065 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \ 14066 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \ 14067 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \ 14068 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \ 14069 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \ 14070 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \ 14071 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \ 14072 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \ 14073 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \ 14074 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \ 14075 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \ 14076 TxCM_(m1, al, m2, op, top, nops, ops, ae, te) 14077 14078#define TCM(m1,m2, aop, top, nops, ops, ae, te) \ 14079 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te) 14080#define tCM(m1,m2, aop, top, nops, ops, ae, te) \ 14081 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te) 14082 14083/* Mnemonic that cannot be conditionalized. The ARM condition-code 14084 field is still 0xE. Many of the Thumb variants can be executed 14085 conditionally, so this is checked separately. */ 14086#define TUE(mnem, op, top, nops, ops, ae, te) \ 14087 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \ 14088 THUMB_VARIANT, do_##ae, do_##te } 14089 14090/* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM 14091 condition code field. */ 14092#define TUF(mnem, op, top, nops, ops, ae, te) \ 14093 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \ 14094 THUMB_VARIANT, do_##ae, do_##te } 14095 14096/* ARM-only variants of all the above. */ 14097#define CE(mnem, op, nops, ops, ae) \ 14098 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL } 14099 14100#define C3(mnem, op, nops, ops, ae) \ 14101 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL } 14102 14103/* Legacy mnemonics that always have conditional infix after the third 14104 character. */ 14105#define CL(mnem, op, nops, ops, ae) \ 14106 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \ 14107 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL } 14108 14109/* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */ 14110#define cCE(mnem, op, nops, ops, ae) \ 14111 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae } 14112 14113/* Legacy coprocessor instructions where conditional infix and conditional 14114 suffix are ambiguous. For consistency this includes all FPA instructions, 14115 not just the potentially ambiguous ones. */ 14116#define cCL(mnem, op, nops, ops, ae) \ 14117 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \ 14118 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae } 14119 14120/* Coprocessor, takes either a suffix or a position-3 infix 14121 (for an FPA corner case). */ 14122#define C3E(mnem, op, nops, ops, ae) \ 14123 { #mnem, OPS##nops ops, OT_csuf_or_in3, \ 14124 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae } 14125 14126#define xCM_(m1, m2, m3, op, nops, ops, ae) \ 14127 { #m1 #m2 #m3, OPS##nops ops, \ 14128 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \ 14129 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL } 14130 14131#define CM(m1, m2, op, nops, ops, ae) \ 14132 xCM_(m1, , m2, op, nops, ops, ae), \ 14133 xCM_(m1, eq, m2, op, nops, ops, ae), \ 14134 xCM_(m1, ne, m2, op, nops, ops, ae), \ 14135 xCM_(m1, cs, m2, op, nops, ops, ae), \ 14136 xCM_(m1, hs, m2, op, nops, ops, ae), \ 14137 xCM_(m1, cc, m2, op, nops, ops, ae), \ 14138 xCM_(m1, ul, m2, op, nops, ops, ae), \ 14139 xCM_(m1, lo, m2, op, nops, ops, ae), \ 14140 xCM_(m1, mi, m2, op, nops, ops, ae), \ 14141 xCM_(m1, pl, m2, op, nops, ops, ae), \ 14142 xCM_(m1, vs, m2, op, nops, ops, ae), \ 14143 xCM_(m1, vc, m2, op, nops, ops, ae), \ 14144 xCM_(m1, hi, m2, op, nops, ops, ae), \ 14145 xCM_(m1, ls, m2, op, nops, ops, ae), \ 14146 xCM_(m1, ge, m2, op, nops, ops, ae), \ 14147 xCM_(m1, lt, m2, op, nops, ops, ae), \ 14148 xCM_(m1, gt, m2, op, nops, ops, ae), \ 14149 xCM_(m1, le, m2, op, nops, ops, ae), \ 14150 xCM_(m1, al, m2, op, nops, ops, ae) 14151 14152#define UE(mnem, op, nops, ops, ae) \ 14153 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL } 14154 14155#define UF(mnem, op, nops, ops, ae) \ 14156 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL } 14157 14158/* Neon data-processing. ARM versions are unconditional with cond=0xf. 14159 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we 14160 use the same encoding function for each. */ 14161#define NUF(mnem, op, nops, ops, enc) \ 14162 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \ 14163 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc } 14164 14165/* Neon data processing, version which indirects through neon_enc_tab for 14166 the various overloaded versions of opcodes. */ 14167#define nUF(mnem, op, nops, ops, enc) \ 14168 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \ 14169 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc } 14170 14171/* Neon insn with conditional suffix for the ARM version, non-overloaded 14172 version. */ 14173#define NCE_tag(mnem, op, nops, ops, enc, tag) \ 14174 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \ 14175 THUMB_VARIANT, do_##enc, do_##enc } 14176 14177#define NCE(mnem, op, nops, ops, enc) \ 14178 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix) 14179 14180#define NCEF(mnem, op, nops, ops, enc) \ 14181 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF) 14182 14183/* Neon insn with conditional suffix for the ARM version, overloaded types. */ 14184#define nCE_tag(mnem, op, nops, ops, enc, tag) \ 14185 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \ 14186 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc } 14187 14188#define nCE(mnem, op, nops, ops, enc) \ 14189 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix) 14190 14191#define nCEF(mnem, op, nops, ops, enc) \ 14192 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF) 14193 14194#define do_0 0 14195 14196/* Thumb-only, unconditional. */ 14197#define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te) 14198 14199static const struct asm_opcode insns[] = 14200{ 14201#define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */ 14202#define THUMB_VARIANT &arm_ext_v4t 14203 /* APPLE LOCAL */ 14204 TUE(trap, 7ffdefe, defe, 0, (), noargs, noargs), 14205 14206 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c), 14207 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c), 14208 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c), 14209 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c), 14210 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub), 14211 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub), 14212 tCE(add, 0800000, add, 3, (RR, oRR, SHG), arit, t_add_sub), 14213 tC3(adds, 0900000, adds, 3, (RR, oRR, SHG), arit, t_add_sub), 14214 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c), 14215 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c), 14216 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3), 14217 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3), 14218 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c), 14219 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c), 14220 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3), 14221 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3), 14222 14223 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism 14224 for setting PSR flag bits. They are obsolete in V6 and do not 14225 have Thumb equivalents. */ 14226 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst), 14227 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst), 14228 CL(tstp, 110f000, 2, (RR, SH), cmp), 14229 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp), 14230 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp), 14231 CL(cmpp, 150f000, 2, (RR, SH), cmp), 14232 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst), 14233 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst), 14234 CL(cmnp, 170f000, 2, (RR, SH), cmp), 14235 14236 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp), 14237 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp), 14238 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst), 14239 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst), 14240 14241 tCE(ldr, 4100000, ldr, 2, (RR, ADDRGLDR),ldst, t_ldst), 14242 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst), 14243 tCE(str, 4000000, str, 2, (RR, ADDRGLDR),ldst, t_ldst), 14244 tC3(strb, 4400000, strb, 2, (RR, ADDRGLDR),ldst, t_ldst), 14245 14246 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14247 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14248 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14249 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14250 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14251 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14252 14253 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi), 14254 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi), 14255 tCE(b, a000000, b, 1, (EXPr), branch, t_branch), 14256 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23), 14257 14258 /* Pseudo ops. */ 14259 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr), 14260 C3(adrl, 28f0000, 2, (RR, EXP), adrl), 14261 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop), 14262 14263 /* Thumb-compatibility pseudo ops. */ 14264 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift), 14265 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift), 14266 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift), 14267 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift), 14268 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift), 14269 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift), 14270 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift), 14271 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift), 14272 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg), 14273 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg), 14274 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop), 14275 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop), 14276 14277 /* These may simplify to neg. */ 14278 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb), 14279 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb), 14280 14281 TCE(rrx, 1a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rd_rm), 14282 TCE(rrxs, 1b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rd_rm), 14283 14284#undef THUMB_VARIANT 14285#define THUMB_VARIANT &arm_ext_v6 14286 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy), 14287 14288 /* V1 instructions with no Thumb analogue prior to V6T2. */ 14289#undef THUMB_VARIANT 14290#define THUMB_VARIANT &arm_ext_v6t2 14291 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst), 14292 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst), 14293 CL(teqp, 130f000, 2, (RR, SH), cmp), 14294 14295 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt), 14296 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt), 14297 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt), 14298 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt), 14299 14300 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14301 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14302 14303 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14304 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm), 14305 14306 /* V1 instructions with no Thumb analogue at all. */ 14307 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit), 14308 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit), 14309 14310 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm), 14311 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm), 14312 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm), 14313 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm), 14314 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm), 14315 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm), 14316 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm), 14317 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm), 14318 14319#undef ARM_VARIANT 14320#define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */ 14321#undef THUMB_VARIANT 14322#define THUMB_VARIANT &arm_ext_v4t 14323 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul), 14324 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul), 14325 14326#undef THUMB_VARIANT 14327#define THUMB_VARIANT &arm_ext_v6t2 14328 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla), 14329 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas), 14330 14331 /* Generic coprocessor instructions. */ 14332 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp), 14333 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 14334 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 14335 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 14336 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 14337 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg), 14338 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg), 14339 14340#undef ARM_VARIANT 14341#define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */ 14342 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn), 14343 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn), 14344 14345#undef ARM_VARIANT 14346#define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */ 14347 TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs), 14348 TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr), 14349 14350#undef ARM_VARIANT 14351#define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */ 14352 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull), 14353 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull), 14354 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull), 14355 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull), 14356 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull), 14357 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull), 14358 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull), 14359 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull), 14360 14361#undef ARM_VARIANT 14362#define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */ 14363#undef THUMB_VARIANT 14364#define THUMB_VARIANT &arm_ext_v4t 14365 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 14366 tC3(strh, 00000b0, strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 14367 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 14368 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 14369 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 14370 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst), 14371 14372#undef ARM_VARIANT 14373#define ARM_VARIANT &arm_ext_v4t_5 14374 /* ARM Architecture 4T. */ 14375 /* Note: bx (and blx) are required on V5, even if the processor does 14376 not support Thumb. */ 14377 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx), 14378 14379#undef ARM_VARIANT 14380#define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */ 14381#undef THUMB_VARIANT 14382#define THUMB_VARIANT &arm_ext_v5t 14383 /* Note: blx has 2 variants; the .value coded here is for 14384 BLX(2). Only this variant has conditional execution. */ 14385 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx), 14386 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt), 14387 14388#undef THUMB_VARIANT 14389#define THUMB_VARIANT &arm_ext_v6t2 14390 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz), 14391 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 14392 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 14393 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 14394 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc), 14395 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp), 14396 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg), 14397 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg), 14398 14399#undef ARM_VARIANT 14400#define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */ 14401 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 14402 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 14403 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 14404 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 14405 14406 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 14407 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla), 14408 14409 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal), 14410 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal), 14411 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal), 14412 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal), 14413 14414 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 14415 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 14416 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 14417 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 14418 14419 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 14420 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 14421 14422 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_rd_rm_rn), 14423 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_rd_rm_rn), 14424 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_rd_rm_rn), 14425 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_rd_rm_rn), 14426 14427#undef ARM_VARIANT 14428#define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */ 14429 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld), 14430 TC3(ldrd, 00000d0, e8500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd), 14431 TC3(strd, 00000f0, e8400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd), 14432 14433 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), 14434 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), 14435 14436#undef ARM_VARIANT 14437#define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */ 14438 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj), 14439 14440#undef ARM_VARIANT 14441#define ARM_VARIANT &arm_ext_v6 /* ARM V6. */ 14442#undef THUMB_VARIANT 14443#define THUMB_VARIANT &arm_ext_v6 14444 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi), 14445 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi), 14446 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev), 14447 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev), 14448 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev), 14449 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 14450 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 14451 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 14452 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 14453 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend), 14454 14455#undef THUMB_VARIANT 14456#define THUMB_VARIANT &arm_ext_v6t2 14457 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex), 14458 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex), 14459 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), 14460 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c), 14461 14462 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat), 14463 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat), 14464 14465/* ARM V6 not included in V7M (eg. integer SIMD). */ 14466#undef THUMB_VARIANT 14467#define THUMB_VARIANT &arm_ext_v6_notm 14468 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps), 14469 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt), 14470 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb), 14471 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14472 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14473 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14474 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14475 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14476 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14477 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14478 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14479 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14480 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14481 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14482 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14483 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14484 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14485 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14486 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14487 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14488 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14489 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14490 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14491 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14492 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14493 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14494 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14495 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14496 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14497 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14498 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14499 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14500 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14501 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14502 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14503 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14504 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14505 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14506 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14507 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe), 14508 UF(rfeib, 9900a00, 1, (RRw), rfe), 14509 UF(rfeda, 8100a00, 1, (RRw), rfe), 14510 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe), 14511 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe), 14512 UF(rfefa, 9900a00, 1, (RRw), rfe), 14513 UF(rfeea, 8100a00, 1, (RRw), rfe), 14514 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe), 14515 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 14516 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 14517 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 14518 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 14519 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 14520 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 14521 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah), 14522 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth), 14523 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd), 14524 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 14525 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 14526 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal), 14527 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal), 14528 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 14529 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 14530 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal), 14531 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal), 14532 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 14533 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 14534 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 14535 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 14536 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 14537 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 14538 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 14539 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 14540 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 14541 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 14542 TUF(srsia, 8c00500, e980c000, 2, (oRRw, I31w), srs, srs), 14543 UF(srsib, 9c00500, 2, (oRRw, I31w), srs), 14544 UF(srsda, 8400500, 2, (oRRw, I31w), srs), 14545 TUF(srsdb, 9400500, e800c000, 2, (oRRw, I31w), srs, srs), 14546 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16), 14547 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal), 14548 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd), 14549 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla), 14550 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16), 14551 14552#undef ARM_VARIANT 14553#define ARM_VARIANT &arm_ext_v6k 14554#undef THUMB_VARIANT 14555#define THUMB_VARIANT &arm_ext_v6k 14556 tCE(yield, 320f001, yield, 0, (), noargs, t_hint), 14557 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint), 14558 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint), 14559 tCE(sev, 320f004, sev, 0, (), noargs, t_hint), 14560 14561#undef THUMB_VARIANT 14562#define THUMB_VARIANT &arm_ext_v6_notm 14563 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd), 14564 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd), 14565 14566#undef THUMB_VARIANT 14567#define THUMB_VARIANT &arm_ext_v6t2 14568 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn), 14569 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn), 14570 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn), 14571 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn), 14572 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs), 14573 14574#undef ARM_VARIANT 14575#define ARM_VARIANT &arm_ext_v6z 14576 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc), 14577 14578#undef ARM_VARIANT 14579#define ARM_VARIANT &arm_ext_v6t2 14580 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc), 14581 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi), 14582 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx), 14583 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx), 14584 14585 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla), 14586 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16), 14587 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16), 14588 TCE(rbit, 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit), 14589 14590 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt), 14591 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt), 14592 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt), 14593 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt), 14594 14595 /* Thumb only instructions. */ 14596#undef ARM_VARIANT 14597#define ARM_VARIANT NULL 14598 UT(cbnz, b900, 2, (RR, EXP), t_cbz), 14599 UT(cbz, b100, 2, (RR, EXP), t_cbz), 14600 /* ARM does not really have an IT instruction, so always allow it. */ 14601#undef ARM_VARIANT 14602#define ARM_VARIANT &arm_ext_v1 14603 TUE(it, 0, bf08, 1, (COND), it, t_it), 14604 TUE(itt, 0, bf0c, 1, (COND), it, t_it), 14605 TUE(ite, 0, bf04, 1, (COND), it, t_it), 14606 TUE(ittt, 0, bf0e, 1, (COND), it, t_it), 14607 TUE(itet, 0, bf06, 1, (COND), it, t_it), 14608 TUE(itte, 0, bf0a, 1, (COND), it, t_it), 14609 TUE(itee, 0, bf02, 1, (COND), it, t_it), 14610 TUE(itttt, 0, bf0f, 1, (COND), it, t_it), 14611 TUE(itett, 0, bf07, 1, (COND), it, t_it), 14612 TUE(ittet, 0, bf0b, 1, (COND), it, t_it), 14613 TUE(iteet, 0, bf03, 1, (COND), it, t_it), 14614 TUE(ittte, 0, bf0d, 1, (COND), it, t_it), 14615 TUE(itete, 0, bf05, 1, (COND), it, t_it), 14616 TUE(ittee, 0, bf09, 1, (COND), it, t_it), 14617 TUE(iteee, 0, bf01, 1, (COND), it, t_it), 14618 14619 /* Thumb2 only instructions. */ 14620#undef ARM_VARIANT 14621#define ARM_VARIANT NULL 14622 14623 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w), 14624 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w), 14625 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb), 14626 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb), 14627 TCE(orn, 0, ea600000, 3, (RR, oRR, SH), 0, t_orn), 14628 TCE(orns, 0, ea700000, 3, (RR, oRR, SH), 0, t_orn), 14629 14630 /* Thumb-2 hardware division instructions (R and M profiles only). */ 14631#undef ARM_VARIANT 14632#define ARM_VARIANT &arm_ext_v7 14633#undef THUMB_VARIANT 14634#define THUMB_VARIANT &arm_ext_div 14635 TCE(sdiv, 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div), 14636 TCE(udiv, 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div), 14637 14638 /* ARM V7 instructions. */ 14639#undef ARM_VARIANT 14640#define ARM_VARIANT &arm_ext_v7 14641#undef THUMB_VARIANT 14642#define THUMB_VARIANT &arm_ext_v7 14643 TUF(pldw, 410f000, f830f000, 1, (ADDR), pld, t_pld), 14644 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld), 14645 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg), 14646 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier), 14647 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier), 14648 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier), 14649 14650#undef ARM_VARIANT 14651#define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */ 14652 cCE(wfs, e200110, 1, (RR), rd), 14653 cCE(rfs, e300110, 1, (RR), rd), 14654 cCE(wfc, e400110, 1, (RR), rd), 14655 cCE(rfc, e500110, 1, (RR), rd), 14656 14657 cCL(ldfs, c100100, 2, (RF, ADDRGLDC), rd_cpaddr), 14658 cCL(ldfd, c108100, 2, (RF, ADDRGLDC), rd_cpaddr), 14659 cCL(ldfe, c500100, 2, (RF, ADDRGLDC), rd_cpaddr), 14660 cCL(ldfp, c508100, 2, (RF, ADDRGLDC), rd_cpaddr), 14661 14662 cCL(stfs, c000100, 2, (RF, ADDRGLDC), rd_cpaddr), 14663 cCL(stfd, c008100, 2, (RF, ADDRGLDC), rd_cpaddr), 14664 cCL(stfe, c400100, 2, (RF, ADDRGLDC), rd_cpaddr), 14665 cCL(stfp, c408100, 2, (RF, ADDRGLDC), rd_cpaddr), 14666 14667 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm), 14668 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm), 14669 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm), 14670 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm), 14671 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm), 14672 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm), 14673 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm), 14674 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm), 14675 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm), 14676 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm), 14677 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm), 14678 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm), 14679 14680 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm), 14681 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm), 14682 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm), 14683 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm), 14684 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm), 14685 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm), 14686 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm), 14687 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm), 14688 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm), 14689 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm), 14690 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm), 14691 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm), 14692 14693 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm), 14694 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm), 14695 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm), 14696 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm), 14697 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm), 14698 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm), 14699 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm), 14700 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm), 14701 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm), 14702 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm), 14703 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm), 14704 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm), 14705 14706 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm), 14707 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm), 14708 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm), 14709 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm), 14710 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm), 14711 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm), 14712 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm), 14713 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm), 14714 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm), 14715 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm), 14716 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm), 14717 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm), 14718 14719 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm), 14720 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm), 14721 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm), 14722 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm), 14723 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm), 14724 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm), 14725 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm), 14726 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm), 14727 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm), 14728 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm), 14729 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm), 14730 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm), 14731 14732 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm), 14733 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm), 14734 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm), 14735 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm), 14736 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm), 14737 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm), 14738 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm), 14739 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm), 14740 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm), 14741 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm), 14742 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm), 14743 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm), 14744 14745 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm), 14746 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm), 14747 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm), 14748 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm), 14749 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm), 14750 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm), 14751 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm), 14752 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm), 14753 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm), 14754 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm), 14755 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm), 14756 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm), 14757 14758 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm), 14759 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm), 14760 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm), 14761 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm), 14762 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm), 14763 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm), 14764 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm), 14765 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm), 14766 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm), 14767 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm), 14768 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm), 14769 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm), 14770 14771 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm), 14772 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm), 14773 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm), 14774 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm), 14775 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm), 14776 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm), 14777 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm), 14778 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm), 14779 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm), 14780 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm), 14781 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm), 14782 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm), 14783 14784 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm), 14785 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm), 14786 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm), 14787 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm), 14788 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm), 14789 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm), 14790 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm), 14791 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm), 14792 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm), 14793 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm), 14794 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm), 14795 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm), 14796 14797 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm), 14798 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm), 14799 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm), 14800 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm), 14801 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm), 14802 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm), 14803 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm), 14804 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm), 14805 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm), 14806 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm), 14807 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm), 14808 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm), 14809 14810 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm), 14811 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm), 14812 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm), 14813 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm), 14814 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm), 14815 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm), 14816 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm), 14817 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm), 14818 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm), 14819 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm), 14820 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm), 14821 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm), 14822 14823 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm), 14824 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm), 14825 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm), 14826 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm), 14827 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm), 14828 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm), 14829 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm), 14830 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm), 14831 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm), 14832 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm), 14833 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm), 14834 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm), 14835 14836 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm), 14837 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm), 14838 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm), 14839 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm), 14840 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm), 14841 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm), 14842 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm), 14843 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm), 14844 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm), 14845 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm), 14846 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm), 14847 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm), 14848 14849 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm), 14850 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm), 14851 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm), 14852 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm), 14853 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm), 14854 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm), 14855 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm), 14856 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm), 14857 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm), 14858 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm), 14859 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm), 14860 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm), 14861 14862 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm), 14863 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm), 14864 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm), 14865 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm), 14866 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm), 14867 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm), 14868 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm), 14869 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm), 14870 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm), 14871 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm), 14872 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm), 14873 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm), 14874 14875 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm), 14876 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm), 14877 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm), 14878 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm), 14879 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm), 14880 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 14881 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 14882 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 14883 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm), 14884 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm), 14885 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm), 14886 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm), 14887 14888 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm), 14889 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm), 14890 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm), 14891 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm), 14892 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm), 14893 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 14894 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 14895 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 14896 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm), 14897 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm), 14898 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm), 14899 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm), 14900 14901 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm), 14902 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm), 14903 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm), 14904 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm), 14905 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm), 14906 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 14907 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 14908 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 14909 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm), 14910 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm), 14911 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm), 14912 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm), 14913 14914 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm), 14915 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm), 14916 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm), 14917 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm), 14918 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm), 14919 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 14920 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 14921 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 14922 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm), 14923 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm), 14924 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm), 14925 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm), 14926 14927 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm), 14928 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm), 14929 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm), 14930 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm), 14931 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm), 14932 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 14933 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 14934 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 14935 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm), 14936 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm), 14937 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm), 14938 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm), 14939 14940 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm), 14941 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm), 14942 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm), 14943 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm), 14944 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm), 14945 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 14946 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 14947 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 14948 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm), 14949 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm), 14950 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm), 14951 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm), 14952 14953 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm), 14954 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm), 14955 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm), 14956 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm), 14957 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm), 14958 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 14959 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 14960 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 14961 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm), 14962 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm), 14963 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm), 14964 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm), 14965 14966 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm), 14967 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm), 14968 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm), 14969 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm), 14970 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm), 14971 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 14972 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 14973 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 14974 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm), 14975 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm), 14976 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm), 14977 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm), 14978 14979 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm), 14980 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm), 14981 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm), 14982 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm), 14983 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm), 14984 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 14985 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 14986 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 14987 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm), 14988 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm), 14989 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm), 14990 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm), 14991 14992 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm), 14993 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm), 14994 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm), 14995 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm), 14996 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm), 14997 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 14998 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 14999 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15000 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm), 15001 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm), 15002 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm), 15003 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm), 15004 15005 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm), 15006 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm), 15007 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm), 15008 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm), 15009 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm), 15010 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15011 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15012 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15013 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm), 15014 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm), 15015 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm), 15016 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm), 15017 15018 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm), 15019 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm), 15020 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm), 15021 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm), 15022 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm), 15023 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15024 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15025 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15026 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm), 15027 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm), 15028 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm), 15029 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm), 15030 15031 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm), 15032 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm), 15033 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm), 15034 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm), 15035 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm), 15036 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm), 15037 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm), 15038 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm), 15039 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm), 15040 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm), 15041 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm), 15042 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm), 15043 15044 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp), 15045 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp), 15046 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp), 15047 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp), 15048 15049 cCL(flts, e000110, 2, (RF, RR), rn_rd), 15050 cCL(fltsp, e000130, 2, (RF, RR), rn_rd), 15051 cCL(fltsm, e000150, 2, (RF, RR), rn_rd), 15052 cCL(fltsz, e000170, 2, (RF, RR), rn_rd), 15053 cCL(fltd, e000190, 2, (RF, RR), rn_rd), 15054 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd), 15055 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd), 15056 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd), 15057 cCL(flte, e080110, 2, (RF, RR), rn_rd), 15058 cCL(fltep, e080130, 2, (RF, RR), rn_rd), 15059 cCL(fltem, e080150, 2, (RF, RR), rn_rd), 15060 cCL(fltez, e080170, 2, (RF, RR), rn_rd), 15061 15062 /* The implementation of the FIX instruction is broken on some 15063 assemblers, in that it accepts a precision specifier as well as a 15064 rounding specifier, despite the fact that this is meaningless. 15065 To be more compatible, we accept it as well, though of course it 15066 does not set any bits. */ 15067 cCE(fix, e100110, 2, (RR, RF), rd_rm), 15068 cCL(fixp, e100130, 2, (RR, RF), rd_rm), 15069 cCL(fixm, e100150, 2, (RR, RF), rd_rm), 15070 cCL(fixz, e100170, 2, (RR, RF), rd_rm), 15071 cCL(fixsp, e100130, 2, (RR, RF), rd_rm), 15072 cCL(fixsm, e100150, 2, (RR, RF), rd_rm), 15073 cCL(fixsz, e100170, 2, (RR, RF), rd_rm), 15074 cCL(fixdp, e100130, 2, (RR, RF), rd_rm), 15075 cCL(fixdm, e100150, 2, (RR, RF), rd_rm), 15076 cCL(fixdz, e100170, 2, (RR, RF), rd_rm), 15077 cCL(fixep, e100130, 2, (RR, RF), rd_rm), 15078 cCL(fixem, e100150, 2, (RR, RF), rd_rm), 15079 cCL(fixez, e100170, 2, (RR, RF), rd_rm), 15080 15081 /* Instructions that were new with the real FPA, call them V2. */ 15082#undef ARM_VARIANT 15083#define ARM_VARIANT &fpu_fpa_ext_v2 15084 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15085 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15086 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15087 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15088 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15089 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm), 15090 15091#undef ARM_VARIANT 15092#define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */ 15093 /* Moves and type conversions. */ 15094 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic), 15095 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp), 15096 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg), 15097 cCE(fmstat, ef1fa10, 0, (), noargs), 15098 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic), 15099 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic), 15100 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic), 15101 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic), 15102 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic), 15103 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic), 15104 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn), 15105 cCE(vmrs, ef00a10, 2, (APSR_RR, RVC), vmrs), 15106 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd), 15107 cCE(vmsr, ee00a10, 2, (RVC, RR), rn_rd), 15108 15109 /* Memory operations. */ 15110 cCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst), 15111 cCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst), 15112 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia), 15113 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia), 15114 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb), 15115 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb), 15116 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia), 15117 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia), 15118 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb), 15119 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb), 15120 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia), 15121 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia), 15122 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb), 15123 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb), 15124 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia), 15125 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia), 15126 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb), 15127 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb), 15128 15129 /* Monadic operations. */ 15130 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic), 15131 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic), 15132 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic), 15133 15134 /* Dyadic operations. */ 15135 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15136 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15137 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15138 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15139 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15140 cCE(ffmacs, ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15141 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15142 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15143 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15144 cCE(fnfmacs, ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15145 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15146 cCE(ffnmas, e900a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15147 cCE(ffnmss, e900a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic), 15148 15149 /* Comparisons. */ 15150 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic), 15151 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z), 15152 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic), 15153 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z), 15154 15155#undef ARM_VARIANT 15156#define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */ 15157 /* Moves and type conversions. */ 15158 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm), 15159 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt), 15160 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt), 15161 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd), 15162 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd), 15163 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn), 15164 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn), 15165 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt), 15166 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt), 15167 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt), 15168 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt), 15169 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt), 15170 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt), 15171 cCE(fcvtshp, 0b60700, 2, (RNQ, RVD), vfp_sp_hp_cvt), 15172 cCE(fcvthps, 0b60600, 2, (RVD, RNQ), vfp_hp_sp_cvt), 15173 cCE(fcvttshp, eb30ac0, 2, (RVS, RVS), vfp_t_sp_hp_cvt), 15174 cCE(fcvtbshp, eb30a40, 2, (RVS, RVS), vfp_b_sp_hp_cvt), 15175 cCE(fcvtthps, eb20ac0, 2, (RVS, RVS), vfp_t_hp_sp_cvt), 15176 cCE(fcvtbhps, eb20a40, 2, (RVS, RVS), vfp_b_hp_sp_cvt), 15177 15178 /* Memory operations. */ 15179 cCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst), 15180 cCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst), 15181 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia), 15182 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia), 15183 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb), 15184 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb), 15185 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia), 15186 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia), 15187 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb), 15188 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb), 15189 15190 /* Monadic operations. */ 15191 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm), 15192 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm), 15193 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm), 15194 15195 /* Dyadic operations. */ 15196 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15197 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15198 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15199 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15200 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15201 cCE(ffmacd, ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15202 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15203 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15204 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15205 cCE(fnfmacd, ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15206 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15207 cCE(ffnmad, e900b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15208 cCE(ffnmsd, e900b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm), 15209 15210 /* Comparisons. */ 15211 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm), 15212 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd), 15213 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm), 15214 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd), 15215 15216#undef ARM_VARIANT 15217#define ARM_VARIANT &fpu_vfp_ext_v2 15218 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2), 15219 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2), 15220 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn), 15221 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm), 15222 15223/* Instructions which may belong to either the Neon or VFP instruction sets. 15224 Individual encoder functions perform additional architecture checks. */ 15225#undef ARM_VARIANT 15226#define ARM_VARIANT &fpu_vfp_ext_v1xd 15227#undef THUMB_VARIANT 15228#define THUMB_VARIANT &fpu_vfp_ext_v1xd 15229 /* These mnemonics are unique to VFP. */ 15230 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt), 15231 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div), 15232 nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul), 15233 nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul), 15234 nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul), 15235 nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp), 15236 nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp), 15237 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push), 15238 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop), 15239 NCE(vcvtr, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtr), 15240 NCE(vfnma, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_fnma), 15241 NCE(vfnms, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_fnms), 15242 15243 /* Mnemonics shared by Neon and VFP. */ 15244 nCEF(vfma, vfma, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_fma), 15245 nCEF(vfms, vfms, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_fma), 15246 15247 nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul), 15248 nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar), 15249 nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar), 15250 15251 nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i), 15252 nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i), 15253 15254 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg), 15255 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg), 15256 15257 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15258 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15259 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15260 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15261 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15262 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm), 15263 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str), 15264 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str), 15265 15266 nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt), 15267 nCEF(vcvtt, vcvtt, 2, (RVS, RVS), neon_cvtt), 15268 nCEF(vcvtb, vcvtt, 2, (RVS, RVS), neon_cvtb), 15269 15270 /* NOTE: All VMOV encoding is special-cased! */ 15271 NCE(vmov, 0, 1, (VMOV), neon_mov), 15272 NCE(vmovq, 0, 1, (VMOV), neon_mov), 15273 15274#undef THUMB_VARIANT 15275#define THUMB_VARIANT &fpu_neon_ext_v1 15276#undef ARM_VARIANT 15277#define ARM_VARIANT &fpu_neon_ext_v1 15278 /* Data processing with three registers of the same length. */ 15279 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */ 15280 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su), 15281 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su), 15282 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su), 15283 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su), 15284 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su), 15285 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su), 15286 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su), 15287 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su), 15288 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */ 15289 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su), 15290 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su), 15291 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su), 15292 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su), 15293 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl), 15294 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl), 15295 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl), 15296 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl), 15297 /* If not immediate, fall back to neon_dyadic_i64_su. 15298 shl_imm should accept I8 I16 I32 I64, 15299 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */ 15300 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm), 15301 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm), 15302 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm), 15303 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm), 15304 /* Logic ops, types optional & ignored. */ 15305 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic), 15306 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic), 15307 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic), 15308 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic), 15309 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic), 15310 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic), 15311 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic), 15312 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic), 15313 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic), 15314 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic), 15315 /* Bitfield ops, untyped. */ 15316 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield), 15317 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield), 15318 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield), 15319 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield), 15320 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield), 15321 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield), 15322 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */ 15323 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su), 15324 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su), 15325 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su), 15326 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su), 15327 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su), 15328 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su), 15329 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall 15330 back to neon_dyadic_if_su. */ 15331 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp), 15332 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp), 15333 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp), 15334 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp), 15335 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv), 15336 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv), 15337 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv), 15338 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv), 15339 /* Comparison. Type I8 I16 I32 F32. */ 15340 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq), 15341 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq), 15342 /* As above, D registers only. */ 15343 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d), 15344 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d), 15345 /* Int and float variants, signedness unimportant. */ 15346 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar), 15347 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar), 15348 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d), 15349 /* Add/sub take types I8 I16 I32 I64 F32. */ 15350 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i), 15351 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i), 15352 /* vtst takes sizes 8, 16, 32. */ 15353 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst), 15354 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst), 15355 /* VMUL takes I8 I16 I32 F32 P8. */ 15356 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul), 15357 /* VQD{R}MULH takes S16 S32. */ 15358 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh), 15359 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh), 15360 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh), 15361 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh), 15362 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute), 15363 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute), 15364 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute), 15365 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute), 15366 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv), 15367 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv), 15368 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv), 15369 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv), 15370 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step), 15371 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step), 15372 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step), 15373 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step), 15374 15375 /* Two address, int/float. Types S8 S16 S32 F32. */ 15376 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg), 15377 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg), 15378 15379 /* Data processing with two registers and a shift amount. */ 15380 /* Right shifts, and variants with rounding. 15381 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */ 15382 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm), 15383 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm), 15384 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm), 15385 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm), 15386 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm), 15387 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm), 15388 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm), 15389 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm), 15390 /* Shift and insert. Sizes accepted 8 16 32 64. */ 15391 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli), 15392 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli), 15393 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri), 15394 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri), 15395 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */ 15396 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm), 15397 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm), 15398 /* Right shift immediate, saturating & narrowing, with rounding variants. 15399 Types accepted S16 S32 S64 U16 U32 U64. */ 15400 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow), 15401 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow), 15402 /* As above, unsigned. Types accepted S16 S32 S64. */ 15403 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u), 15404 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u), 15405 /* Right shift narrowing. Types accepted I16 I32 I64. */ 15406 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow), 15407 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow), 15408 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */ 15409 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll), 15410 /* CVT with optional immediate for fixed-point variant. */ 15411 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt), 15412 15413 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn), 15414 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn), 15415 15416 /* Data processing, three registers of different lengths. */ 15417 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */ 15418 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal), 15419 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long), 15420 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long), 15421 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long), 15422 /* If not scalar, fall back to neon_dyadic_long. 15423 Vector types as above, scalar types S16 S32 U16 U32. */ 15424 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long), 15425 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long), 15426 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */ 15427 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide), 15428 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide), 15429 /* Dyadic, narrowing insns. Types I16 I32 I64. */ 15430 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow), 15431 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow), 15432 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow), 15433 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow), 15434 /* Saturating doubling multiplies. Types S16 S32. */ 15435 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long), 15436 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long), 15437 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long), 15438 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types 15439 S16 S32 U16 U32. */ 15440 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull), 15441 15442 /* Extract. Size 8. */ 15443 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext), 15444 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext), 15445 15446 /* Two registers, miscellaneous. */ 15447 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */ 15448 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev), 15449 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev), 15450 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev), 15451 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev), 15452 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev), 15453 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev), 15454 /* Vector replicate. Sizes 8 16 32. */ 15455 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup), 15456 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup), 15457 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */ 15458 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl), 15459 /* VMOVN. Types I16 I32 I64. */ 15460 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn), 15461 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */ 15462 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn), 15463 /* VQMOVUN. Types S16 S32 S64. */ 15464 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun), 15465 /* VZIP / VUZP. Sizes 8 16 32. */ 15466 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp), 15467 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp), 15468 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp), 15469 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp), 15470 /* VQABS / VQNEG. Types S8 S16 S32. */ 15471 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg), 15472 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg), 15473 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg), 15474 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg), 15475 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */ 15476 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long), 15477 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long), 15478 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long), 15479 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long), 15480 /* Reciprocal estimates. Types U32 F32. */ 15481 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est), 15482 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est), 15483 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est), 15484 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est), 15485 /* VCLS. Types S8 S16 S32. */ 15486 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls), 15487 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls), 15488 /* VCLZ. Types I8 I16 I32. */ 15489 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz), 15490 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz), 15491 /* VCNT. Size 8. */ 15492 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt), 15493 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt), 15494 /* Two address, untyped. */ 15495 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp), 15496 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp), 15497 /* VTRN. Sizes 8 16 32. */ 15498 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn), 15499 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn), 15500 15501 /* Table lookup. Size 8. */ 15502 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx), 15503 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx), 15504 15505#undef THUMB_VARIANT 15506#define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext 15507#undef ARM_VARIANT 15508#define ARM_VARIANT &fpu_vfp_v3_or_neon_ext 15509 /* Neon element/structure load/store. */ 15510 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx), 15511 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx), 15512 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx), 15513 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx), 15514 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx), 15515 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx), 15516 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx), 15517 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx), 15518 15519#undef THUMB_VARIANT 15520#define THUMB_VARIANT &fpu_vfp_ext_v3 15521#undef ARM_VARIANT 15522#define ARM_VARIANT &fpu_vfp_ext_v3 15523 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const), 15524 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const), 15525 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16), 15526 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16), 15527 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32), 15528 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32), 15529 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16), 15530 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16), 15531 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32), 15532 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32), 15533 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16), 15534 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16), 15535 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32), 15536 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32), 15537 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16), 15538 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16), 15539 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32), 15540 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32), 15541 15542#undef THUMB_VARIANT 15543#undef ARM_VARIANT 15544#define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */ 15545 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 15546 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 15547 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 15548 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 15549 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 15550 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia), 15551 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar), 15552 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra), 15553 15554#undef ARM_VARIANT 15555#define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */ 15556 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc), 15557 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc), 15558 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc), 15559 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd), 15560 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd), 15561 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd), 15562 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc), 15563 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc), 15564 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc), 15565 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm), 15566 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm), 15567 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm), 15568 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm), 15569 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm), 15570 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm), 15571 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr), 15572 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr), 15573 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr), 15574 cCE(tmcr, e000110, 2, (RIWC_RIWG, RR), rn_rd), 15575 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn), 15576 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia), 15577 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia), 15578 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia), 15579 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia), 15580 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia), 15581 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia), 15582 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn), 15583 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn), 15584 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn), 15585 cCE(tmrc, e100110, 2, (RR, RIWC_RIWG), rd_rn), 15586 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm), 15587 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc), 15588 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc), 15589 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc), 15590 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn), 15591 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn), 15592 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn), 15593 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15594 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15595 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15596 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15597 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15598 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15599 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15600 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15601 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15602 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni), 15603 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15604 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15605 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15606 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15607 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15608 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15609 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15610 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15611 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15612 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15613 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15614 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15615 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15616 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15617 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15618 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15619 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15620 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15621 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15622 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh), 15623 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh), 15624 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw), 15625 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd), 15626 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15627 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15628 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15629 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15630 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15631 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15632 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15633 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15634 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15635 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15636 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15637 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15638 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15639 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15640 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15641 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15642 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15643 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15644 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov), 15645 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15646 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15647 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15648 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15649 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15650 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15651 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15652 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15653 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15654 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15655 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15656 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 15657 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 15658 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 15659 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 15660 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 15661 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 15662 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15663 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15664 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15665 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15666 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh), 15667 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 15668 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 15669 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 15670 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 15671 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 15672 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 15673 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 15674 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 15675 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 15676 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 15677 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 15678 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 15679 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 15680 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 15681 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 15682 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 15683 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5), 15684 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm), 15685 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh), 15686 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh), 15687 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw), 15688 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd), 15689 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15690 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15691 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15692 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15693 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15694 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15695 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15696 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15697 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15698 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn), 15699 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn), 15700 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn), 15701 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn), 15702 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn), 15703 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn), 15704 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15705 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15706 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15707 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn), 15708 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn), 15709 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn), 15710 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn), 15711 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn), 15712 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn), 15713 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15714 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15715 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15716 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15717 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero), 15718 15719#undef ARM_VARIANT 15720#define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */ 15721 cCE(torvscb, e13f190, 1, (RR), iwmmxt_tandorc), 15722 cCE(torvsch, e53f190, 1, (RR), iwmmxt_tandorc), 15723 cCE(torvscw, e93f190, 1, (RR), iwmmxt_tandorc), 15724 cCE(wabsb, e2001c0, 2, (RIWR, RIWR), rd_rn), 15725 cCE(wabsh, e6001c0, 2, (RIWR, RIWR), rd_rn), 15726 cCE(wabsw, ea001c0, 2, (RIWR, RIWR), rd_rn), 15727 cCE(wabsdiffb, e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15728 cCE(wabsdiffh, e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15729 cCE(wabsdiffw, e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15730 cCE(waddbhusl, e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15731 cCE(waddbhusm, e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15732 cCE(waddhc, e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15733 cCE(waddwc, ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15734 cCE(waddsubhx, ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15735 cCE(wavg4, e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15736 cCE(wavg4r, e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15737 cCE(wmaddsn, ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15738 cCE(wmaddsx, eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15739 cCE(wmaddun, ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15740 cCE(wmaddux, e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15741 cCE(wmerge, e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge), 15742 cCE(wmiabb, e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15743 cCE(wmiabt, e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15744 cCE(wmiatb, e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15745 cCE(wmiatt, e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15746 cCE(wmiabbn, e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15747 cCE(wmiabtn, e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15748 cCE(wmiatbn, e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15749 cCE(wmiattn, e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15750 cCE(wmiawbb, e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15751 cCE(wmiawbt, e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15752 cCE(wmiawtb, ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15753 cCE(wmiawtt, eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15754 cCE(wmiawbbn, ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15755 cCE(wmiawbtn, ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15756 cCE(wmiawtbn, ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15757 cCE(wmiawttn, ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15758 cCE(wmulsmr, ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15759 cCE(wmulumr, ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15760 cCE(wmulwumr, ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15761 cCE(wmulwsmr, ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15762 cCE(wmulwum, ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15763 cCE(wmulwsm, ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15764 cCE(wmulwl, eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15765 cCE(wqmiabb, e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15766 cCE(wqmiabt, e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15767 cCE(wqmiatb, ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15768 cCE(wqmiatt, eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15769 cCE(wqmiabbn, ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15770 cCE(wqmiabtn, ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15771 cCE(wqmiatbn, ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15772 cCE(wqmiattn, ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15773 cCE(wqmulm, e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15774 cCE(wqmulmr, e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15775 cCE(wqmulwm, ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15776 cCE(wqmulwmr, ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15777 cCE(wsubaddhx, ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm), 15778 15779#undef ARM_VARIANT 15780#define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */ 15781 cCE(cfldrs, c100400, 2, (RMF, ADDRGLDC), rd_cpaddr), 15782 cCE(cfldrd, c500400, 2, (RMD, ADDRGLDC), rd_cpaddr), 15783 cCE(cfldr32, c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr), 15784 cCE(cfldr64, c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr), 15785 cCE(cfstrs, c000400, 2, (RMF, ADDRGLDC), rd_cpaddr), 15786 cCE(cfstrd, c400400, 2, (RMD, ADDRGLDC), rd_cpaddr), 15787 cCE(cfstr32, c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr), 15788 cCE(cfstr64, c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr), 15789 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd), 15790 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn), 15791 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd), 15792 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn), 15793 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd), 15794 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn), 15795 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd), 15796 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn), 15797 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd), 15798 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn), 15799 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn), 15800 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn), 15801 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn), 15802 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn), 15803 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn), 15804 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn), 15805 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn), 15806 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn), 15807 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn), 15808 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn), 15809 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc), 15810 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd), 15811 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn), 15812 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn), 15813 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn), 15814 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn), 15815 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn), 15816 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn), 15817 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn), 15818 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn), 15819 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn), 15820 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn), 15821 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn), 15822 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn), 15823 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple), 15824 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple), 15825 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift), 15826 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift), 15827 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm), 15828 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm), 15829 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm), 15830 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm), 15831 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn), 15832 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn), 15833 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn), 15834 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn), 15835 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm), 15836 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm), 15837 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm), 15838 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm), 15839 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm), 15840 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm), 15841 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn), 15842 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn), 15843 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn), 15844 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn), 15845 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm), 15846 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm), 15847 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm), 15848 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm), 15849 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm), 15850 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm), 15851 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm), 15852 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm), 15853 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad), 15854 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad), 15855 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad), 15856 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad), 15857}; 15858#undef ARM_VARIANT 15859#undef THUMB_VARIANT 15860#undef TCE 15861#undef TCM 15862#undef TUE 15863#undef TUF 15864#undef TCC 15865#undef cCE 15866#undef cCL 15867#undef C3E 15868#undef CE 15869#undef CM 15870#undef UE 15871#undef UF 15872#undef UT 15873#undef NUF 15874#undef nUF 15875#undef NCE 15876#undef nCE 15877#undef OPS0 15878#undef OPS1 15879#undef OPS2 15880#undef OPS3 15881#undef OPS4 15882#undef OPS5 15883#undef OPS6 15884#undef do_0 15885 15886 15887#ifndef INSNS_TABLE_ONLY 15888 15889/* MD interface: bits in the object file. */ 15890 15891/* Turn an integer of n bytes (in val) into a stream of bytes appropriate 15892 for use in the a.out file, and stores them in the array pointed to by buf. 15893 This knows about the endian-ness of the target machine and does 15894 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte) 15895 2 (short) and 4 (long) Floating numbers are put out as a series of 15896 LITTLENUMS (shorts, here at least). */ 15897 15898void 15899md_number_to_chars (char * buf, signed_expr_t val, int n) 15900{ 15901 if (target_big_endian) 15902 number_to_chars_bigendian (buf, val, n); 15903 else 15904 number_to_chars_littleendian (buf, val, n); 15905} 15906 15907static valueT 15908md_chars_to_number (char * buf, int n) 15909{ 15910 valueT result = 0; 15911 unsigned char * where = (unsigned char *) buf; 15912 15913 if (target_big_endian) 15914 { 15915 while (n--) 15916 { 15917 result <<= 8; 15918 result |= (*where++ & 255); 15919 } 15920 } 15921 else 15922 { 15923 while (n--) 15924 { 15925 result <<= 8; 15926 result |= (where[n] & 255); 15927 } 15928 } 15929 15930 return result; 15931} 15932 15933/* MD interface: Sections. */ 15934 15935/* Estimate the size of a frag before relaxing. Assume everything fits in 15936 2 bytes. */ 15937 15938int 15939md_estimate_size_before_relax (fragS * fragp, 15940 int segtype ATTRIBUTE_UNUSED) 15941{ 15942 fragp->fr_var = 2; 15943 return 2; 15944} 15945 15946/* FIXME - Looks like the old way "relaxation" is done in relax_section() in 15947 layout.c will need to change to drive the arm "relaxation" */ 15948const relax_typeS md_relax_table[] = { {0} }; 15949 15950/* Convert a machine dependent frag. */ 15951 15952void 15953md_convert_frag (/* bfd *abfd, segT asec ATTRIBUTE_UNUSED, */ fragS *fragp) 15954{ 15955 uint32_t insn; 15956 uint32_t old_op; 15957 char *buf; 15958 expressionS exp; 15959 fixS *fixp; 15960 int reloc_type; 15961 int pc_rel; 15962 int opcode; 15963 15964 buf = fragp->fr_literal + fragp->fr_fix; 15965 15966#ifdef NOTYET 15967 old_op = bfd_get_16(abfd, buf); 15968#else 15969 old_op = md_chars_to_number(buf, THUMB_SIZE); 15970#endif 15971 if (fragp->fr_symbol) { 15972 exp.X_op = O_symbol; 15973 exp.X_add_symbol = fragp->fr_symbol; 15974 } else { 15975 exp.X_op = O_constant; 15976 } 15977 exp.X_add_number = fragp->fr_offset; 15978 opcode = fragp->fr_subtype; 15979 switch (opcode) 15980 { 15981 case T_MNEM_ldr_pc: 15982 case T_MNEM_ldr_pc2: 15983 case T_MNEM_ldr_sp: 15984 case T_MNEM_str_sp: 15985 case T_MNEM_ldr: 15986 case T_MNEM_ldrb: 15987 case T_MNEM_ldrh: 15988 case T_MNEM_str: 15989 case T_MNEM_strb: 15990 case T_MNEM_strh: 15991 if (fragp->fr_var == 4) 15992 { 15993 insn = THUMB_OP32(opcode); 15994 if ((old_op >> 12) == 4 || (old_op >> 12) == 9) 15995 { 15996 insn |= (old_op & 0x700) << 4; 15997 } 15998 else 15999 { 16000 insn |= (old_op & 7) << 12; 16001 insn |= (old_op & 0x38) << 13; 16002 } 16003 insn |= 0x00000c00; 16004 put_thumb32_insn (buf, insn); 16005 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM; 16006 } 16007 else 16008 { 16009 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET; 16010 } 16011 pc_rel = (opcode == T_MNEM_ldr_pc2); 16012 break; 16013 case T_MNEM_adr: 16014 if (fragp->fr_var == 4) 16015 { 16016 insn = THUMB_OP32 (opcode); 16017 insn |= (old_op & 0xf0) << 4; 16018 put_thumb32_insn (buf, insn); 16019 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12; 16020 } 16021 else 16022 { 16023 reloc_type = BFD_RELOC_ARM_THUMB_ADD; 16024 exp.X_add_number -= 4; 16025 } 16026 pc_rel = 1; 16027 break; 16028 case T_MNEM_mov: 16029 case T_MNEM_movs: 16030 case T_MNEM_cmp: 16031 case T_MNEM_cmn: 16032 if (fragp->fr_var == 4) 16033 { 16034 int r0off = (opcode == T_MNEM_mov 16035 || opcode == T_MNEM_movs) ? 0 : 8; 16036 insn = THUMB_OP32 (opcode); 16037 insn = (insn & 0xe1ffffff) | 0x10000000; 16038 insn |= (old_op & 0x700) << r0off; 16039 put_thumb32_insn (buf, insn); 16040 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE; 16041 } 16042 else 16043 { 16044 reloc_type = BFD_RELOC_ARM_THUMB_IMM; 16045 } 16046 pc_rel = 0; 16047 break; 16048 case T_MNEM_b: 16049 if (fragp->fr_var == 4) 16050 { 16051 insn = THUMB_OP32(opcode); 16052 put_thumb32_insn (buf, insn); 16053 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25; 16054 } 16055 else 16056 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12; 16057 pc_rel = 1; 16058 break; 16059 case T_MNEM_bcond: 16060 if (fragp->fr_var == 4) 16061 { 16062 insn = THUMB_OP32(opcode); 16063 insn |= (old_op & 0xf00) << 14; 16064 put_thumb32_insn (buf, insn); 16065 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20; 16066 } 16067 else 16068 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9; 16069 pc_rel = 1; 16070 break; 16071 case T_MNEM_add_sp: 16072 case T_MNEM_add_pc: 16073 case T_MNEM_inc_sp: 16074 case T_MNEM_dec_sp: 16075 if (fragp->fr_var == 4) 16076 { 16077 /* ??? Choose between add and addw. */ 16078 insn = THUMB_OP32 (opcode); 16079 insn |= (old_op & 0xf0) << 4; 16080 put_thumb32_insn (buf, insn); 16081 if (opcode == T_MNEM_add_pc) 16082 reloc_type = BFD_RELOC_ARM_T32_IMM12; 16083 else 16084 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM; 16085 } 16086 else 16087 reloc_type = BFD_RELOC_ARM_THUMB_ADD; 16088 pc_rel = 0; 16089 break; 16090 16091 case T_MNEM_addi: 16092 case T_MNEM_addis: 16093 case T_MNEM_subi: 16094 case T_MNEM_subis: 16095 if (fragp->fr_var == 4) 16096 { 16097 insn = THUMB_OP32 (opcode); 16098 insn |= (old_op & 0xf0) << 4; 16099 insn |= (old_op & 0xf) << 16; 16100 put_thumb32_insn (buf, insn); 16101 if (insn & (1 << 20)) 16102 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM; 16103 else 16104 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE; 16105 } 16106 else 16107 reloc_type = BFD_RELOC_ARM_THUMB_ADD; 16108 pc_rel = 0; 16109 break; 16110 default: 16111 abort(); 16112 } 16113 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel, 16114 /* HACK GUESS, pcrel_reloc */ FALSE, 16115 reloc_type); 16116 fixp->fx_file = fragp->fr_file; 16117 fixp->fx_line = fragp->fr_line; 16118 fragp->fr_fix += fragp->fr_var; 16119} 16120 16121/* Return the size of a relaxable immediate operand instruction. 16122 SHIFT and SIZE specify the form of the allowable immediate. */ 16123static int 16124relax_immediate (fragS *fragp, int size, int shift) 16125{ 16126 offsetT offset; 16127 offsetT mask; 16128 offsetT low; 16129 16130 /* ??? Should be able to do better than this. */ 16131 if (fragp->fr_symbol) 16132 return 4; 16133 16134 low = (1 << shift) - 1; 16135 mask = (1 << (shift + size)) - (1 << shift); 16136 offset = fragp->fr_offset; 16137 /* Force misaligned offsets to 32-bit variant. */ 16138 if (offset & low) 16139 return 4; 16140 if (offset & ~mask) 16141 return 4; 16142 return 2; 16143} 16144 16145/* Get the address of a symbol during relaxation. */ 16146static addressT 16147relaxed_symbol_addr(fragS *fragp, int32_t stretch) 16148{ 16149 fragS *sym_frag; 16150 addressT addr; 16151 symbolS *sym; 16152 16153 sym = fragp->fr_symbol; 16154 sym_frag = symbol_get_frag (sym); 16155 know (S_GET_SEGMENT (sym) != absolute_section 16156 || sym_frag == &zero_address_frag); 16157#ifdef NOTYET 16158 addr = S_GET_VALUE (sym) + fragp->fr_offset; 16159#else 16160 addr = fragp->fr_symbol->sy_nlist.n_value + 16161 fragp->fr_symbol->sy_frag->fr_address + 16162 fragp->fr_offset; 16163#endif 16164 16165 /* If frag has yet to be reached on this pass, assume it will 16166 move by STRETCH just as we did. If this is not so, it will 16167 be because some frag between grows, and that will force 16168 another pass. */ 16169 16170 if (stretch != 0 16171 && sym_frag->relax_marker != fragp->relax_marker) 16172 addr += stretch; 16173 16174 return addr; 16175} 16176 16177/* Return the size of a relaxable adr pseudo-instruction or PC-relative 16178 load. */ 16179static int 16180relax_adr (fragS *fragp, /* HACK asection *sec, */ int32_t stretch) 16181{ 16182 addressT addr; 16183 offsetT val; 16184 16185 /* Assume worst case for symbols not known to be in the same section. */ 16186 if (!S_IS_DEFINED(fragp->fr_symbol) 16187#ifdef NOTYET 16188 || sec != S_GET_SEGMENT (fragp->fr_symbol) 16189#endif 16190 ) 16191 return 4; 16192 16193 val = relaxed_symbol_addr(fragp, stretch); 16194 addr = fragp->fr_address + fragp->fr_fix; 16195 addr = (addr + 4) & ~3; 16196 /* Force misaligned targets to 32-bit variant. */ 16197 if (val & 3) 16198 return 4; 16199 val -= addr; 16200 if (val < 0 || val > 1020) 16201 return 4; 16202 return 2; 16203} 16204 16205/* Return the size of a relaxable add/sub immediate instruction. */ 16206static int 16207relax_addsub (fragS *fragp /* HACK , asection *sec */) 16208{ 16209 char *buf; 16210 int op; 16211 16212 buf = fragp->fr_literal + fragp->fr_fix; 16213#ifdef NOTYET 16214 op = bfd_get_16(sec->owner, buf); 16215#else 16216 op = md_chars_to_number(buf, THUMB_SIZE); 16217#endif 16218 if ((op & 0xf) == ((op >> 4) & 0xf)) 16219 return relax_immediate (fragp, 8, 0); 16220 else 16221 return relax_immediate (fragp, 3, 0); 16222} 16223 16224 16225/* Return the size of a relaxable branch instruction. BITS is the 16226 size of the offset field in the narrow instruction. */ 16227 16228static int 16229relax_branch (fragS *fragp, int nsect, int bits, int32_t stretch) 16230{ 16231 addressT addr; 16232 offsetT val; 16233 offsetT limit; 16234 16235 /* Assume worst case for symbols not known to be in the same section. */ 16236 if (!S_IS_DEFINED(fragp->fr_symbol) || 16237 (fragp->fr_symbol->sy_nlist.n_type & N_TYPE) != N_SECT || 16238 fragp->fr_symbol->sy_nlist.n_sect != nsect) 16239 return 4; 16240 16241 val = relaxed_symbol_addr(fragp, stretch); 16242 addr = fragp->fr_address + fragp->fr_fix + 4; 16243 val -= addr; 16244 16245 /* Offset is a signed value *2 */ 16246 limit = 1 << bits; 16247 if (val >= limit || val < -limit) 16248 return 4; 16249 return 2; 16250} 16251 16252 16253/* Relax a machine dependent frag. This returns the amount by which 16254 the current size of the frag should change. */ 16255 16256int 16257arm_relax_frag (int nsect, fragS *fragp, int32_t stretch) 16258{ 16259 int oldsize; 16260 int newsize; 16261 16262 oldsize = fragp->fr_var; 16263 switch (fragp->fr_subtype) 16264 { 16265 case T_MNEM_ldr_pc2: 16266 newsize = relax_adr(fragp, /* HACK sec, */ stretch); 16267 break; 16268 case T_MNEM_ldr_pc: 16269 case T_MNEM_ldr_sp: 16270 case T_MNEM_str_sp: 16271 newsize = relax_immediate(fragp, 8, 2); 16272 break; 16273 case T_MNEM_ldr: 16274 case T_MNEM_str: 16275 newsize = relax_immediate(fragp, 5, 2); 16276 break; 16277 case T_MNEM_ldrh: 16278 case T_MNEM_strh: 16279 newsize = relax_immediate(fragp, 5, 1); 16280 break; 16281 case T_MNEM_ldrb: 16282 case T_MNEM_strb: 16283 newsize = relax_immediate(fragp, 5, 0); 16284 break; 16285 case T_MNEM_adr: 16286 newsize = relax_adr(fragp, /* HACK sec, */ stretch); 16287 break; 16288 case T_MNEM_mov: 16289 case T_MNEM_movs: 16290 case T_MNEM_cmp: 16291 case T_MNEM_cmn: 16292 newsize = relax_immediate(fragp, 8, 0); 16293 break; 16294 case T_MNEM_b: 16295 newsize = relax_branch(fragp, nsect, 11, stretch); 16296 break; 16297 case T_MNEM_bcond: 16298 newsize = relax_branch(fragp, nsect, 8, stretch); 16299 break; 16300 case T_MNEM_add_sp: 16301 case T_MNEM_add_pc: 16302 newsize = relax_immediate (fragp, 8, 2); 16303 break; 16304 case T_MNEM_inc_sp: 16305 case T_MNEM_dec_sp: 16306 newsize = relax_immediate (fragp, 7, 2); 16307 break; 16308 case T_MNEM_addi: 16309 case T_MNEM_addis: 16310 case T_MNEM_subi: 16311 case T_MNEM_subis: 16312 newsize = relax_addsub (fragp /*, HACK sec */); 16313 break; 16314 default: 16315 abort(); 16316 } 16317 16318 fragp->fr_var = newsize; 16319 /* Freeze wide instructions that are at or before the same location as 16320 in the previous pass. This avoids infinite loops. 16321 Don't freeze them unconditionally because targets may be artificialy 16322 misaligned by the expansion of preceeding frags. */ 16323 if (stretch <= 0 && newsize > 2) 16324 { 16325 md_convert_frag (/* sec->owner, sec, */ fragp); 16326 frag_wane(fragp); 16327 } 16328 16329 return newsize - oldsize; 16330} 16331 16332/* Round up a section size to the appropriate boundary. */ 16333 16334valueT 16335md_section_align (segT segment ATTRIBUTE_UNUSED, 16336 valueT size) 16337{ 16338#if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT)) 16339 if (OUTPUT_FLAVOR == bfd_target_aout_flavour) 16340 { 16341 /* For a.out, force the section size to be aligned. If we don't do 16342 this, BFD will align it for us, but it will not write out the 16343 final bytes of the section. This may be a bug in BFD, but it is 16344 easier to fix it here since that is how the other a.out targets 16345 work. */ 16346 int align; 16347 16348 align = bfd_get_section_alignment (stdoutput, segment); 16349 size = ((size + (1 << align) - 1) & ((valueT) -1 << align)); 16350 } 16351#endif 16352 16353 return size; 16354} 16355 16356#ifdef NOTYET 16357/* This is called from HANDLE_ALIGN in write.c. Fill in the contents 16358 of an rs_align_code fragment. */ 16359 16360void 16361arm_handle_align (fragS * fragP) 16362{ 16363 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 }; 16364 static char const thumb_noop[2] = { 0xc0, 0x46 }; 16365 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 }; 16366 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 }; 16367 16368 int bytes, fix, noop_size; 16369 char * p; 16370 const char * noop; 16371 16372 if (fragP->fr_type != rs_align_code) 16373 return; 16374 16375 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix; 16376 p = fragP->fr_literal + fragP->fr_fix; 16377 fix = 0; 16378 16379 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE) 16380 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE; 16381 16382 if (fragP->tc_frag_data) 16383 { 16384 if (target_big_endian) 16385 noop = thumb_bigend_noop; 16386 else 16387 noop = thumb_noop; 16388 noop_size = sizeof (thumb_noop); 16389 } 16390 else 16391 { 16392 if (target_big_endian) 16393 noop = arm_bigend_noop; 16394 else 16395 noop = arm_noop; 16396 noop_size = sizeof (arm_noop); 16397 } 16398 16399 if (bytes & (noop_size - 1)) 16400 { 16401 fix = bytes & (noop_size - 1); 16402 memset (p, 0, fix); 16403 p += fix; 16404 bytes -= fix; 16405 } 16406 16407 while (bytes >= noop_size) 16408 { 16409 memcpy (p, noop, noop_size); 16410 p += noop_size; 16411 bytes -= noop_size; 16412 fix += noop_size; 16413 } 16414 16415 fragP->fr_fix += fix; 16416 fragP->fr_var = noop_size; 16417} 16418 16419/* Called from md_do_align. Used to create an alignment 16420 frag in a code section. */ 16421 16422void 16423arm_frag_align_code (int n, int max) 16424{ 16425 char * p; 16426 16427 /* We assume that there will never be a requirement 16428 to support alignments greater than 32 bytes. */ 16429 if (max > MAX_MEM_FOR_RS_ALIGN_CODE) 16430 as_fatal (_("alignments greater than 32 bytes not supported in .text sections.")); 16431 16432 p = frag_var (rs_align_code, 16433 MAX_MEM_FOR_RS_ALIGN_CODE, 16434 1, 16435 (relax_substateT) max, 16436 (symbolS *) NULL, 16437 (offsetT) n, 16438 (char *) NULL); 16439 *p = 0; 16440} 16441 16442/* Perform target specific initialisation of a frag. */ 16443 16444void 16445arm_init_frag (fragS * fragP) 16446{ 16447 /* Record whether this frag is in an ARM or a THUMB area. */ 16448 fragP->tc_frag_data = thumb_mode; 16449} 16450#endif /* NOTYET */ 16451 16452/* MD interface: Symbol and relocation handling. */ 16453 16454/* Return the address within the segment that a PC-relative fixup is 16455 relative to. For ARM, PC-relative fixups applied to instructions 16456 are generally relative to the location of the fixup plus 8 bytes. 16457 Thumb branches are offset by 4, and Thumb loads relative to PC 16458 require special handling. */ 16459 16460int32_t 16461md_pcrel_from_section (fixS * fixP, segT seg) 16462{ 16463 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address; 16464 16465#ifdef NOTYET 16466 /* If this is pc-relative and we are going to emit a relocation 16467 then we just want to put out any pipeline compensation that the linker 16468 will need. Otherwise we want to use the calculated base. 16469 For WinCE we skip the bias for externals as well, since this 16470 is how the MS ARM-CE assembler behaves and we want to be compatible. */ 16471 if (fixP->fx_pcrel 16472 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg) 16473 || (arm_force_relocation (fixP) 16474#ifdef TE_WINCE 16475 && !S_IS_EXTERNAL (fixP->fx_addsy) 16476#endif 16477 ))) 16478 base = 0; 16479#endif /* NOTYET */ 16480 16481 switch (fixP->fx_r_type) 16482 { 16483 /* PC relative addressing on the Thumb is slightly odd as the 16484 bottom two bits of the PC are forced to zero for the 16485 calculation. This happens *after* application of the 16486 pipeline offset. However, Thumb adrl already adjusts for 16487 this, so we need not do it again. */ 16488 case BFD_RELOC_ARM_THUMB_ADD: 16489 return base & ~3; 16490 16491 case BFD_RELOC_ARM_THUMB_OFFSET: 16492 case BFD_RELOC_ARM_T32_OFFSET_IMM: 16493 case BFD_RELOC_ARM_T32_ADD_PC12: 16494 case BFD_RELOC_ARM_T32_CP_OFF_IMM: 16495 return (base + 4) & ~3; 16496 16497 /* Thumb branches are simply offset by +4. */ 16498 case BFD_RELOC_THUMB_PCREL_BRANCH7: 16499 case BFD_RELOC_THUMB_PCREL_BRANCH9: 16500 case BFD_RELOC_THUMB_PCREL_BRANCH12: 16501 case BFD_RELOC_THUMB_PCREL_BRANCH20: 16502 case BFD_RELOC_THUMB_PCREL_BRANCH23: 16503 case BFD_RELOC_THUMB_PCREL_BRANCH25: 16504 case BFD_RELOC_THUMB_PCREL_BLX: 16505 return base + 4; 16506 16507 /* ARM mode branches are offset by +8. However, the Windows CE 16508 loader expects the relocation not to take this into account. */ 16509 case BFD_RELOC_ARM_PCREL_BRANCH: 16510 case BFD_RELOC_ARM_PCREL_CALL: 16511 case BFD_RELOC_ARM_PCREL_JUMP: 16512 case BFD_RELOC_ARM_PCREL_BLX: 16513 case BFD_RELOC_ARM_PLT32: 16514#ifdef TE_WINCE 16515 /* When handling fixups immediately, because we have already 16516 discovered the value of a symbol, or the address of the frag involved 16517 we must account for the offset by +8, as the OS loader will never see the reloc. 16518 see fixup_segment() in write.c 16519 The S_IS_EXTERNAL test handles the case of global symbols. 16520 Those need the calculated base, not just the pipe compensation the linker will need. */ 16521 if (fixP->fx_pcrel 16522 && fixP->fx_addsy != NULL 16523 && (S_GET_SEGMENT (fixP->fx_addsy) == seg) 16524 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP))) 16525 return base + 8; 16526 return base; 16527#else 16528 return base + 8; 16529#endif 16530 16531 /* ARM mode loads relative to PC are also offset by +8. Unlike 16532 branches, the Windows CE loader *does* expect the relocation 16533 to take this into account. */ 16534 case BFD_RELOC_ARM_OFFSET_IMM: 16535 case BFD_RELOC_ARM_OFFSET_IMM8: 16536 case BFD_RELOC_ARM_HWLITERAL: 16537 case BFD_RELOC_ARM_LITERAL: 16538 case BFD_RELOC_ARM_CP_OFF_IMM: 16539 return base + 8; 16540 16541 16542 /* Other PC-relative relocations are un-offset. */ 16543 default: 16544 return base; 16545 } 16546} 16547 16548/* Under ELF we need to default _GLOBAL_OFFSET_TABLE. 16549 Otherwise we have no need to default values of symbols. */ 16550 16551symbolS * 16552md_undefined_symbol (char * name ATTRIBUTE_UNUSED) 16553{ 16554#ifdef OBJ_ELF 16555 if (name[0] == '_' && name[1] == 'G' 16556 && streq (name, GLOBAL_OFFSET_TABLE_NAME)) 16557 { 16558 if (!GOT_symbol) 16559 { 16560 if (symbol_find (name)) 16561 as_bad ("GOT already in the symbol table"); 16562 16563 GOT_symbol = symbol_new (name, undefined_section, 16564 (valueT) 0, & zero_address_frag); 16565 } 16566 16567 return GOT_symbol; 16568 } 16569#endif 16570 16571 return 0; 16572} 16573 16574/* Subroutine of md_apply_fix. Check to see if an immediate can be 16575 computed as two separate immediate values, added together. We 16576 already know that this value cannot be computed by just one ARM 16577 instruction. */ 16578 16579static unsigned int 16580validate_immediate_twopart (unsigned int val, 16581 unsigned int * highpart) 16582{ 16583 unsigned int a; 16584 unsigned int i; 16585 16586 for (i = 0; i < 32; i += 2) 16587 if (((a = rotate_left (val, i)) & 0xff) != 0) 16588 { 16589 if (a & 0xff00) 16590 { 16591 if (a & ~ 0xffff) 16592 continue; 16593 * highpart = (a >> 8) | ((i + 24) << 7); 16594 } 16595 else if (a & 0xff0000) 16596 { 16597 if (a & 0xff000000) 16598 continue; 16599 * highpart = (a >> 16) | ((i + 16) << 7); 16600 } 16601 else 16602 { 16603 assert (a & 0xff000000); 16604 * highpart = (a >> 24) | ((i + 8) << 7); 16605 } 16606 16607 return (a & 0xff) | (i << 7); 16608 } 16609 16610 return FAIL; 16611} 16612 16613static int 16614validate_offset_imm (unsigned int val, int hwse) 16615{ 16616 if ((hwse && val > 255) || val > 4095) 16617 return FAIL; 16618 return val; 16619} 16620 16621/* Subroutine of md_apply_fix. Do those data_ops which can take a 16622 negative immediate constant by altering the instruction. A bit of 16623 a hack really. 16624 MOV <-> MVN 16625 AND <-> BIC 16626 ADC <-> SBC 16627 by inverting the second operand, and 16628 ADD <-> SUB 16629 CMP <-> CMN 16630 by negating the second operand. */ 16631 16632static int 16633negate_data_op (uint32_t * instruction, 16634 uint32_t value) 16635{ 16636 int op, new_inst; 16637 uint32_t negated, inverted; 16638 16639 negated = encode_arm_immediate (-value); 16640 inverted = encode_arm_immediate (~value); 16641 16642 op = (*instruction >> DATA_OP_SHIFT) & 0xf; 16643 switch (op) 16644 { 16645 /* First negates. */ 16646 case OPCODE_SUB: /* ADD <-> SUB */ 16647 new_inst = OPCODE_ADD; 16648 value = negated; 16649 break; 16650 16651 case OPCODE_ADD: 16652 new_inst = OPCODE_SUB; 16653 value = negated; 16654 break; 16655 16656 case OPCODE_CMP: /* CMP <-> CMN */ 16657 new_inst = OPCODE_CMN; 16658 value = negated; 16659 break; 16660 16661 case OPCODE_CMN: 16662 new_inst = OPCODE_CMP; 16663 value = negated; 16664 break; 16665 16666 /* Now Inverted ops. */ 16667 case OPCODE_MOV: /* MOV <-> MVN */ 16668 new_inst = OPCODE_MVN; 16669 value = inverted; 16670 break; 16671 16672 case OPCODE_MVN: 16673 new_inst = OPCODE_MOV; 16674 value = inverted; 16675 break; 16676 16677 case OPCODE_AND: /* AND <-> BIC */ 16678 new_inst = OPCODE_BIC; 16679 value = inverted; 16680 break; 16681 16682 case OPCODE_BIC: 16683 new_inst = OPCODE_AND; 16684 value = inverted; 16685 break; 16686 16687 case OPCODE_ADC: /* ADC <-> SBC */ 16688 new_inst = OPCODE_SBC; 16689 value = inverted; 16690 break; 16691 16692 case OPCODE_SBC: 16693 new_inst = OPCODE_ADC; 16694 value = inverted; 16695 break; 16696 16697 /* We cannot do anything. */ 16698 default: 16699 return FAIL; 16700 } 16701 16702 if (value == (unsigned) FAIL) 16703 return FAIL; 16704 16705 *instruction &= OPCODE_MASK; 16706 *instruction |= new_inst << DATA_OP_SHIFT; 16707 return value; 16708} 16709 16710/* Like negate_data_op, but for Thumb-2. */ 16711 16712static unsigned int 16713thumb32_negate_data_op (offsetT *instruction, unsigned int value) 16714{ 16715 int op, new_inst; 16716 int rd; 16717 unsigned int negated, inverted; 16718 16719 negated = encode_thumb32_immediate (-value); 16720 inverted = encode_thumb32_immediate (~value); 16721 16722 rd = (*instruction >> 8) & 0xf; 16723 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf; 16724 switch (op) 16725 { 16726 /* ADD <-> SUB. Includes CMP <-> CMN. */ 16727 case T2_OPCODE_SUB: 16728 new_inst = T2_OPCODE_ADD; 16729 value = negated; 16730 break; 16731 16732 case T2_OPCODE_ADD: 16733 new_inst = T2_OPCODE_SUB; 16734 value = negated; 16735 break; 16736 16737 /* ORR <-> ORN. Includes MOV <-> MVN. */ 16738 case T2_OPCODE_ORR: 16739 new_inst = T2_OPCODE_ORN; 16740 value = inverted; 16741 break; 16742 16743 case T2_OPCODE_ORN: 16744 new_inst = T2_OPCODE_ORR; 16745 value = inverted; 16746 break; 16747 16748 /* AND <-> BIC. TST has no inverted equivalent. */ 16749 case T2_OPCODE_AND: 16750 new_inst = T2_OPCODE_BIC; 16751 if (rd == 15) 16752 value = FAIL; 16753 else 16754 value = inverted; 16755 break; 16756 16757 case T2_OPCODE_BIC: 16758 new_inst = T2_OPCODE_AND; 16759 value = inverted; 16760 break; 16761 16762 /* ADC <-> SBC */ 16763 case T2_OPCODE_ADC: 16764 new_inst = T2_OPCODE_SBC; 16765 value = inverted; 16766 break; 16767 16768 case T2_OPCODE_SBC: 16769 new_inst = T2_OPCODE_ADC; 16770 value = inverted; 16771 break; 16772 16773 /* We cannot do anything. */ 16774 default: 16775 return FAIL; 16776 } 16777 16778 if (value == (unsigned int)FAIL) 16779 return FAIL; 16780 16781 *instruction &= T2_OPCODE_MASK; 16782 *instruction |= new_inst << T2_DATA_OP_SHIFT; 16783 return value; 16784} 16785 16786/* Read a 32-bit thumb instruction from buf. */ 16787static uint32_t 16788get_thumb32_insn (char * buf) 16789{ 16790 uint32_t insn; 16791 insn = md_chars_to_number (buf, THUMB_SIZE) << 16; 16792 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE); 16793 16794 return insn; 16795} 16796 16797/* We usually want to set the low bit on the address of thumb function 16798 symbols. In particular .word foo - . should have the low bit set. 16799 Generic code tries to fold the difference of two symbols to 16800 a constant. Prevent this and force a relocation when the first symbols 16801 is a thumb function. */ 16802int 16803arm_optimize_expr (expressionS *l, operatorT op, expressionS *r) 16804{ 16805#ifdef NOTYET 16806 if (op == O_subtract 16807 && l->X_op == O_symbol 16808 && r->X_op == O_symbol 16809 && THUMB_IS_FUNC (l->X_add_symbol)) 16810 { 16811 l->X_op = O_subtract; 16812 l->X_op_symbol = r->X_add_symbol; 16813 l->X_add_number -= r->X_add_number; 16814 return 1; 16815 } 16816#endif /* NOTYET */ 16817 /* Process as normal. */ 16818 return 0; 16819} 16820 16821void 16822md_apply_fix (fixS * fixP, 16823 valueT * valP, 16824 segT seg) 16825{ 16826 offsetT value = * valP; 16827 offsetT newval; 16828 unsigned int newimm; 16829 uint32_t temp; 16830 int sign; 16831 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal; 16832 16833 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED); 16834 16835 /* Note whether this will delete the relocation. */ 16836 16837 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel) 16838 fixP->fx_done = 1; 16839 16840 /* On a 64-bit host, silently truncate 'value' to 32 bits for 16841 consistency with the behavior on 32-bit hosts. Remember value 16842 for emit_reloc. */ 16843 value &= 0xffffffff; 16844 value ^= 0x80000000; 16845 value -= 0x80000000; 16846 16847 *valP = value; 16848 fixP->fx_addnumber = value; 16849 16850 /* Same treatment for fixP->fx_offset. */ 16851 fixP->fx_offset &= 0xffffffff; 16852 fixP->fx_offset ^= 0x80000000; 16853 fixP->fx_offset -= 0x80000000; 16854 16855 switch (fixP->fx_r_type) 16856 { 16857#ifdef NOTYET 16858 case BFD_RELOC_NONE: 16859 /* This will need to go in the object file. */ 16860 fixP->fx_done = 0; 16861 break; 16862#endif 16863 16864 case BFD_RELOC_ARM_IMMEDIATE: 16865 /* We claim that this fixup has been processed here, 16866 even if in fact we generate an error because we do 16867 not have a reloc for it, so tc_gen_reloc will reject it. */ 16868 fixP->fx_done = 1; 16869 16870 if (fixP->fx_addsy 16871 && ! S_IS_DEFINED (fixP->fx_addsy)) 16872 { 16873 as_bad_where (fixP->fx_file, fixP->fx_line, 16874 _("undefined symbol %s used as an immediate value"), 16875 S_GET_NAME (fixP->fx_addsy)); 16876 break; 16877 } 16878 16879 newimm = encode_arm_immediate (value); 16880 temp = md_chars_to_number (buf, INSN_SIZE); 16881 16882 /* If the instruction will fail, see if we can fix things up by 16883 changing the opcode. */ 16884 if (newimm == (unsigned int) FAIL 16885 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL) 16886 { 16887 as_bad_where (fixP->fx_file, fixP->fx_line, 16888 _("invalid constant (0x%x) after fixup"), 16889 (uint32_t) value); 16890 break; 16891 } 16892 16893 newimm |= (temp & 0xfffff000); 16894 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE); 16895 break; 16896 16897 case BFD_RELOC_ARM_ADRL_IMMEDIATE: 16898 { 16899 unsigned int highpart = 0; 16900 unsigned int newinsn = 0xe1a00000; /* nop. */ 16901 16902 newimm = encode_arm_immediate (value); 16903 temp = md_chars_to_number (buf, INSN_SIZE); 16904 16905 /* If the instruction will fail, see if we can fix things up by 16906 changing the opcode. */ 16907 if (newimm == (unsigned int) FAIL 16908 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL) 16909 { 16910 /* No ? OK - try using two ADD instructions to generate 16911 the value. */ 16912 newimm = validate_immediate_twopart (value, & highpart); 16913 16914 /* Yes - then make sure that the second instruction is 16915 also an add. */ 16916 if (newimm != (unsigned int) FAIL) 16917 newinsn = temp; 16918 /* Still No ? Try using a negated value. */ 16919 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL) 16920 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT; 16921 /* Otherwise - give up. */ 16922 else 16923 { 16924 as_bad_where (fixP->fx_file, fixP->fx_line, 16925 _("unable to compute ADRL instructions for PC offset of 0x%x"), 16926 (int32_t) value); 16927 break; 16928 } 16929 16930 /* Replace the first operand in the 2nd instruction (which 16931 is the PC) with the destination register. We have 16932 already added in the PC in the first instruction and we 16933 do not want to do it again. */ 16934 newinsn &= ~ 0xf0000; 16935 newinsn |= ((newinsn & 0x0f000) << 4); 16936 } 16937 16938 newimm |= (temp & 0xfffff000); 16939 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE); 16940 16941 highpart |= (newinsn & 0xfffff000); 16942 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE); 16943 } 16944 break; 16945 16946 case BFD_RELOC_ARM_OFFSET_IMM: 16947#ifdef NOTYET 16948 if (!fixP->fx_done && seg->use_rela_p) 16949#else 16950 if (!fixP->fx_done && 0) 16951#endif 16952 value = 0; 16953 16954 case BFD_RELOC_ARM_LITERAL: 16955 sign = value >= 0; 16956 16957 if (value < 0) 16958 value = - value; 16959 16960 if (validate_offset_imm (value, 0) == FAIL) 16961 { 16962 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL) 16963 as_bad_where (fixP->fx_file, fixP->fx_line, 16964 _("invalid literal constant: pool needs to be closer")); 16965 else 16966 as_bad_where (fixP->fx_file, fixP->fx_line, 16967 _("bad immediate value for offset (%d)"), 16968 (int32_t) value); 16969 break; 16970 } 16971 16972 newval = md_chars_to_number (buf, INSN_SIZE); 16973 newval &= 0xff7ff000; 16974 newval |= value | (sign ? INDEX_UP : 0); 16975 md_number_to_chars (buf, newval, INSN_SIZE); 16976 break; 16977 16978 case BFD_RELOC_ARM_OFFSET_IMM8: 16979 case BFD_RELOC_ARM_HWLITERAL: 16980 sign = value >= 0; 16981 16982 if (value < 0) 16983 value = - value; 16984 16985 if (validate_offset_imm (value, 1) == FAIL) 16986 { 16987 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL) 16988 as_bad_where (fixP->fx_file, fixP->fx_line, 16989 _("invalid literal constant: pool needs to be closer")); 16990 else 16991 as_bad_where (fixP->fx_file, fixP->fx_line, 16992 _("bad immediate value for 8-bit offset (%d)"), 16993 (int32_t) value); 16994 break; 16995 } 16996 16997 newval = md_chars_to_number (buf, INSN_SIZE); 16998 newval &= 0xff7ff0f0; 16999 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0); 17000 md_number_to_chars (buf, newval, INSN_SIZE); 17001 break; 17002 17003 case BFD_RELOC_ARM_T32_OFFSET_U8: 17004 if (value < 0 || value > 1020 || value % 4 != 0) 17005 as_bad_where (fixP->fx_file, fixP->fx_line, 17006 _("bad immediate value for offset (%d)"), (int32_t) value); 17007 value /= 4; 17008 17009 newval = md_chars_to_number (buf+2, THUMB_SIZE); 17010 newval |= value; 17011 md_number_to_chars (buf+2, newval, THUMB_SIZE); 17012 break; 17013 17014 case BFD_RELOC_ARM_T32_OFFSET_IMM: 17015 /* This is a complicated relocation used for all varieties of Thumb32 17016 load/store instruction with immediate offset: 17017 17018 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit, 17019 *4, optional writeback(W) 17020 (doubleword load/store) 17021 17022 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel 17023 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit 17024 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction) 17025 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit 17026 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit 17027 17028 Uppercase letters indicate bits that are already encoded at 17029 this point. Lowercase letters are our problem. For the 17030 second block of instructions, the secondary opcode nybble 17031 (bits 8..11) is present, and bit 23 is zero, even if this is 17032 a PC-relative operation. */ 17033 newval = md_chars_to_number (buf, THUMB_SIZE); 17034 newval <<= 16; 17035 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE); 17036 17037 if ((newval & 0xf0000000) == 0xe0000000) 17038 { 17039 /* Doubleword load/store: 8-bit offset, scaled by 4. */ 17040 if (value >= 0) 17041 newval |= (1 << 23); 17042 else 17043 value = -value; 17044 if (value % 4 != 0) 17045 { 17046 as_bad_where (fixP->fx_file, fixP->fx_line, 17047 _("offset not a multiple of 4")); 17048 break; 17049 } 17050 value /= 4; 17051 if (value > 0xff) 17052 { 17053 as_bad_where (fixP->fx_file, fixP->fx_line, 17054 _("offset out of range")); 17055 break; 17056 } 17057 newval &= ~0xff; 17058 } 17059 else if ((newval & 0x000f0000) == 0x000f0000) 17060 { 17061 /* PC-relative, 12-bit offset. */ 17062 if (value >= 0) 17063 newval |= (1 << 23); 17064 else 17065 value = -value; 17066 if (value > 0xfff) 17067 { 17068 as_bad_where (fixP->fx_file, fixP->fx_line, 17069 _("offset out of range")); 17070 break; 17071 } 17072 newval &= ~0xfff; 17073 } 17074 else if ((newval & 0x00000100) == 0x00000100) 17075 { 17076 /* Writeback: 8-bit, +/- offset. */ 17077 if (value >= 0) 17078 newval |= (1 << 9); 17079 else 17080 value = -value; 17081 if (value > 0xff) 17082 { 17083 as_bad_where (fixP->fx_file, fixP->fx_line, 17084 _("offset out of range")); 17085 break; 17086 } 17087 newval &= ~0xff; 17088 } 17089 else if ((newval & 0x00000f00) == 0x00000e00) 17090 { 17091 /* T-instruction: positive 8-bit offset. */ 17092 if (value < 0 || value > 0xff) 17093 { 17094 as_bad_where (fixP->fx_file, fixP->fx_line, 17095 _("offset out of range")); 17096 break; 17097 } 17098 newval &= ~0xff; 17099 newval |= value; 17100 } 17101 else 17102 { 17103 /* Positive 12-bit or negative 8-bit offset. */ 17104 int limit; 17105 if (value >= 0) 17106 { 17107 newval |= (1 << 23); 17108 limit = 0xfff; 17109 } 17110 else 17111 { 17112 value = -value; 17113 limit = 0xff; 17114 } 17115 if (value > limit) 17116 { 17117 as_bad_where (fixP->fx_file, fixP->fx_line, 17118 _("offset out of range")); 17119 break; 17120 } 17121 newval &= ~limit; 17122 } 17123 17124 newval |= value; 17125 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE); 17126 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE); 17127 break; 17128 17129 case BFD_RELOC_ARM_SHIFT_IMM: 17130 newval = md_chars_to_number (buf, INSN_SIZE); 17131 if (((uint32_t) value) > 32 17132 || (value == 32 17133 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60))) 17134 { 17135 as_bad_where (fixP->fx_file, fixP->fx_line, 17136 _("shift expression is too large")); 17137 break; 17138 } 17139 17140 if (value == 0) 17141 /* Shifts of zero must be done as lsl. */ 17142 newval &= ~0x60; 17143 else if (value == 32) 17144 value = 0; 17145 newval &= 0xfffff07f; 17146 newval |= (value & 0x1f) << 7; 17147 md_number_to_chars (buf, newval, INSN_SIZE); 17148 break; 17149 17150 case BFD_RELOC_ARM_T32_IMMEDIATE: 17151 case BFD_RELOC_ARM_T32_ADD_IMM: 17152 case BFD_RELOC_ARM_T32_IMM12: 17153 case BFD_RELOC_ARM_T32_ADD_PC12: 17154 /* We claim that this fixup has been processed here, 17155 even if in fact we generate an error because we do 17156 not have a reloc for it, so tc_gen_reloc will reject it. */ 17157 fixP->fx_done = 1; 17158 17159 if (fixP->fx_addsy 17160 && ! S_IS_DEFINED (fixP->fx_addsy)) 17161 { 17162 as_bad_where (fixP->fx_file, fixP->fx_line, 17163 _("undefined symbol %s used as an immediate value"), 17164 S_GET_NAME (fixP->fx_addsy)); 17165 break; 17166 } 17167 17168 newval = md_chars_to_number (buf, THUMB_SIZE); 17169 newval <<= 16; 17170 newval |= md_chars_to_number (buf+2, THUMB_SIZE); 17171 17172 newimm = FAIL; 17173 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE 17174 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM) 17175 { 17176 newimm = encode_thumb32_immediate (value); 17177 if (newimm == (unsigned int) FAIL) 17178 newimm = thumb32_negate_data_op (&newval, value); 17179 } 17180 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE 17181 && newimm == (unsigned int) FAIL) 17182 { 17183 /* Turn add/sum into addw/subw. */ 17184 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM) 17185 newval = (newval & 0xfeffffff) | 0x02000000; 17186 17187 /* 12 bit immediate for addw/subw. */ 17188 if (value < 0) 17189 { 17190 value = -value; 17191 newval ^= 0x00a00000; 17192 } 17193 if (value > 0xfff) 17194 newimm = (unsigned int) FAIL; 17195 else 17196 newimm = value; 17197 } 17198 17199 if (newimm == (unsigned int)FAIL) 17200 { 17201 as_bad_where (fixP->fx_file, fixP->fx_line, 17202 _("invalid constant (0x%x) after fixup"), 17203 (uint32_t) value); 17204 break; 17205 } 17206 17207 newval |= (newimm & 0x800) << 15; 17208 newval |= (newimm & 0x700) << 4; 17209 newval |= (newimm & 0x0ff); 17210 17211 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE); 17212 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE); 17213 break; 17214 17215 case BFD_RELOC_ARM_SMC: 17216 if (((uint32_t) value) > 0xffff) 17217 as_bad_where (fixP->fx_file, fixP->fx_line, 17218 _("invalid smc expression")); 17219 newval = md_chars_to_number (buf, INSN_SIZE); 17220 newval |= (value & 0xf) | ((value & 0xfff0) << 4); 17221 md_number_to_chars (buf, newval, INSN_SIZE); 17222 break; 17223 17224 case BFD_RELOC_ARM_SWI: 17225 if (*((int*)fixP->tc_fix_data) != 0) 17226 { 17227 if (((uint32_t) value) > 0xff) 17228 as_bad_where (fixP->fx_file, fixP->fx_line, 17229 _("invalid swi expression")); 17230 newval = md_chars_to_number (buf, THUMB_SIZE); 17231 newval |= value; 17232 md_number_to_chars (buf, newval, THUMB_SIZE); 17233 } 17234 else 17235 { 17236 if (((uint32_t) value) > 0x00ffffff) 17237 as_bad_where (fixP->fx_file, fixP->fx_line, 17238 _("invalid swi expression")); 17239 newval = md_chars_to_number (buf, INSN_SIZE); 17240 newval |= value; 17241 md_number_to_chars (buf, newval, INSN_SIZE); 17242 } 17243 break; 17244 17245 case BFD_RELOC_ARM_MULTI: 17246 if (((uint32_t) value) > 0xffff) 17247 as_bad_where (fixP->fx_file, fixP->fx_line, 17248 _("invalid expression in load/store multiple")); 17249 newval = value | md_chars_to_number (buf, INSN_SIZE); 17250 md_number_to_chars (buf, newval, INSN_SIZE); 17251 break; 17252 17253#ifdef OBJ_ELF 17254 case BFD_RELOC_ARM_PCREL_CALL: 17255 newval = md_chars_to_number (buf, INSN_SIZE); 17256 if ((newval & 0xf0000000) == 0xf0000000) 17257 temp = 1; 17258 else 17259 temp = 3; 17260 goto arm_branch_common; 17261 17262 case BFD_RELOC_ARM_PCREL_JUMP: 17263 case BFD_RELOC_ARM_PLT32: 17264#endif 17265 case BFD_RELOC_ARM_PCREL_BRANCH: 17266 temp = 3; 17267 goto arm_branch_common; 17268 17269 case BFD_RELOC_ARM_PCREL_BLX: 17270 temp = 1; 17271#ifndef NOTYET 17272 /* Our linker wants the same reloc for bl and blx. */ 17273 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_BRANCH; 17274#endif 17275 arm_branch_common: 17276 /* We are going to store value (shifted right by two) in the 17277 instruction, in a 24 bit, signed field. Bits 26 through 32 either 17278 all clear or all set and bit 0 must be clear. For B/BL bit 1 must 17279 also be be clear. */ 17280 if (value & temp) 17281 as_bad_where (fixP->fx_file, fixP->fx_line, 17282 _("misaligned branch destination")); 17283 if ((value & (offsetT)0xfe000000) != (offsetT)0 17284 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000) 17285 as_bad_where (fixP->fx_file, fixP->fx_line, 17286 _("branch out of range")); 17287 17288#ifdef NOTYET 17289 if (fixP->fx_done || !seg->use_rela_p) 17290#else 17291 if (fixP->fx_done || !0) 17292#endif 17293 { 17294 newval = md_chars_to_number (buf, INSN_SIZE); 17295 newval |= (value >> 2) & 0x00ffffff; 17296 /* Set the H bit on BLX instructions. */ 17297 if (temp == 1) 17298 { 17299 if (value & 2) 17300 newval |= 0x01000000; 17301 else 17302 newval &= ~0x01000000; 17303 } 17304 md_number_to_chars (buf, newval, INSN_SIZE); 17305 } 17306 break; 17307 17308 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */ 17309 /* CBZ can only branch forward. */ 17310 17311 /* Attempts to use CBZ to branch to the next instruction 17312 (which, strictly speaking, are prohibited) will be turned into 17313 no-ops. 17314 17315 FIXME: It may be better to remove the instruction completely and 17316 perform relaxation. */ 17317 if (value == -2) 17318 { 17319 newval = md_chars_to_number (buf, THUMB_SIZE); 17320 newval = 0xbf00; /* NOP encoding T1 */ 17321 md_number_to_chars (buf, newval, THUMB_SIZE); 17322 } 17323 else 17324 { 17325 if (value & ~0x7e) 17326 as_bad_where (fixP->fx_file, fixP->fx_line, 17327 _("branch out of range")); 17328 17329#ifdef NOTYET 17330 if (fixP->fx_done || !seg->use_rela_p) 17331#else 17332 if (fixP->fx_done || !0) 17333#endif 17334 { 17335 newval = md_chars_to_number (buf, THUMB_SIZE); 17336 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3); 17337 md_number_to_chars (buf, newval, THUMB_SIZE); 17338 } 17339 } 17340 break; 17341 17342 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */ 17343 if ((value & ~0xff) && ((value & ~0xff) != ~0xff)) 17344 as_bad_where (fixP->fx_file, fixP->fx_line, 17345 _("branch out of range")); 17346 17347#ifdef NOTYET 17348 if (fixP->fx_done || !seg->use_rela_p) 17349#else 17350 if (fixP->fx_done || !0) 17351#endif 17352 { 17353 newval = md_chars_to_number (buf, THUMB_SIZE); 17354 newval |= (value & 0x1ff) >> 1; 17355 md_number_to_chars (buf, newval, THUMB_SIZE); 17356 } 17357 break; 17358 17359 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */ 17360 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff)) 17361 as_bad_where (fixP->fx_file, fixP->fx_line, 17362 _("branch out of range")); 17363 17364#ifdef NOTYET 17365 if (fixP->fx_done || !seg->use_rela_p) 17366#else 17367 if (fixP->fx_done || !0) 17368#endif 17369 { 17370 newval = md_chars_to_number (buf, THUMB_SIZE); 17371 newval |= (value & 0xfff) >> 1; 17372 md_number_to_chars (buf, newval, THUMB_SIZE); 17373 } 17374 break; 17375 17376 case BFD_RELOC_THUMB_PCREL_BRANCH20: 17377 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff)) 17378 as_bad_where (fixP->fx_file, fixP->fx_line, 17379 _("conditional branch out of range")); 17380 17381#ifdef NOTYET 17382 if (fixP->fx_done || !seg->use_rela_p) 17383#else 17384 if (fixP->fx_done || !0) 17385#endif 17386 { 17387 offsetT newval2; 17388 addressT S, J1, J2, lo, hi; 17389 17390 S = (value & 0x00100000) >> 20; 17391 J2 = (value & 0x00080000) >> 19; 17392 J1 = (value & 0x00040000) >> 18; 17393 hi = (value & 0x0003f000) >> 12; 17394 lo = (value & 0x00000ffe) >> 1; 17395 17396 newval = md_chars_to_number (buf, THUMB_SIZE); 17397 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE); 17398 newval |= (S << 10) | hi; 17399 newval2 |= (J1 << 13) | (J2 << 11) | lo; 17400 md_number_to_chars (buf, newval, THUMB_SIZE); 17401 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE); 17402 } 17403 break; 17404 17405 case BFD_RELOC_THUMB_PCREL_BLX: 17406 case BFD_RELOC_THUMB_PCREL_BRANCH23: 17407 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff)) 17408 as_bad_where (fixP->fx_file, fixP->fx_line, 17409 _("branch out of range")); 17410 17411 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX) 17412 /* For a BLX instruction, make sure that the relocation is rounded up 17413 to a word boundary. This follows the semantics of the instruction 17414 which specifies that bit 1 of the target address will come from bit 17415 1 of the base address. */ 17416 value = (value + 2) & ~ 2; 17417 17418#ifndef NOTYET 17419 /* Our linker wants the same reloc for bl and blx. */ 17420 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23; 17421#endif 17422 17423#ifdef NOTYET 17424 if (fixP->fx_done || !seg->use_rela_p) 17425#else 17426 if (fixP->fx_done || !0) 17427#endif 17428 { 17429 offsetT newval2; 17430 17431 newval = md_chars_to_number (buf, THUMB_SIZE); 17432 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE); 17433 newval |= (value & 0x7fffff) >> 12; 17434 newval2 |= (value & 0xfff) >> 1; 17435 md_number_to_chars (buf, newval, THUMB_SIZE); 17436 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE); 17437 } 17438 break; 17439 17440 case BFD_RELOC_THUMB_PCREL_BRANCH25: 17441 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff)) 17442 as_bad_where (fixP->fx_file, fixP->fx_line, 17443 _("branch out of range")); 17444#ifndef NOTYET 17445 /* Our linker wants the same reloc for bl and blx. */ 17446 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23; 17447#endif 17448 17449#ifdef NOTYET 17450 if (fixP->fx_done || !seg->use_rela_p) 17451#else 17452 if (fixP->fx_done || !0) 17453#endif 17454 { 17455 offsetT newval2; 17456 addressT S, I1, I2, lo, hi; 17457 17458 S = (value & 0x01000000) >> 24; 17459 I1 = (value & 0x00800000) >> 23; 17460 I2 = (value & 0x00400000) >> 22; 17461 hi = (value & 0x003ff000) >> 12; 17462 lo = (value & 0x00000ffe) >> 1; 17463 17464 I1 = !(I1 ^ S); 17465 I2 = !(I2 ^ S); 17466 17467 newval = md_chars_to_number (buf, THUMB_SIZE); 17468 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE); 17469 newval |= (S << 10) | hi; 17470 newval2 |= (I1 << 13) | (I2 << 11) | lo; 17471 md_number_to_chars (buf, newval, THUMB_SIZE); 17472 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE); 17473 } 17474 break; 17475 17476 case BFD_RELOC_8: 17477#ifdef NOTYET 17478 if (fixP->fx_done || !seg->use_rela_p) 17479#else 17480 if (fixP->fx_done || !0) 17481#endif 17482 md_number_to_chars (buf, value, 1); 17483 break; 17484 17485 case BFD_RELOC_16: 17486#ifdef NOTYET 17487 if (fixP->fx_done || !seg->use_rela_p) 17488#else 17489 if (fixP->fx_done || !0) 17490#endif 17491 md_number_to_chars (buf, value, 2); 17492 break; 17493 17494#ifdef OBJ_ELF 17495 case BFD_RELOC_ARM_TLS_GD32: 17496 case BFD_RELOC_ARM_TLS_LE32: 17497 case BFD_RELOC_ARM_TLS_IE32: 17498 case BFD_RELOC_ARM_TLS_LDM32: 17499 case BFD_RELOC_ARM_TLS_LDO32: 17500 S_SET_THREAD_LOCAL (fixP->fx_addsy); 17501 /* fall through */ 17502 17503 case BFD_RELOC_ARM_GOT32: 17504 case BFD_RELOC_ARM_GOTOFF: 17505 case BFD_RELOC_ARM_TARGET2: 17506#ifdef NOTYET 17507 if (fixP->fx_done || !seg->use_rela_p) 17508#else 17509 if (fixP->fx_done || !0) 17510#endif 17511 md_number_to_chars (buf, 0, 4); 17512 break; 17513#endif 17514 17515 case BFD_RELOC_RVA: 17516 case BFD_RELOC_32: 17517 case BFD_RELOC_ARM_TARGET1: 17518 case BFD_RELOC_ARM_ROSEGREL32: 17519 case BFD_RELOC_ARM_SBREL32: 17520 case BFD_RELOC_32_PCREL: 17521#ifdef TE_PE 17522 case BFD_RELOC_32_SECREL: 17523#endif 17524#ifdef NOTYET 17525 if (fixP->fx_done || !seg->use_rela_p) 17526#else 17527 if (fixP->fx_done || !0) 17528#endif 17529#ifdef TE_WINCE 17530 /* For WinCE we only do this for pcrel fixups. */ 17531 if (fixP->fx_done || fixP->fx_pcrel) 17532#endif 17533 md_number_to_chars (buf, value, 4); 17534 break; 17535 17536#ifdef OBJ_ELF 17537 case BFD_RELOC_ARM_PREL31: 17538#ifdef NOTYET 17539 if (fixP->fx_done || !seg->use_rela_p) 17540#else 17541 if (fixP->fx_done || !0) 17542#endif 17543 { 17544 newval = md_chars_to_number (buf, 4) & 0x80000000; 17545 if ((value ^ (value >> 1)) & 0x40000000) 17546 { 17547 as_bad_where (fixP->fx_file, fixP->fx_line, 17548 _("rel31 relocation overflow")); 17549 } 17550 newval |= value & 0x7fffffff; 17551 md_number_to_chars (buf, newval, 4); 17552 } 17553 break; 17554#endif 17555 17556 case BFD_RELOC_ARM_CP_OFF_IMM: 17557 case BFD_RELOC_ARM_T32_CP_OFF_IMM: 17558 if (value < -1023 || value > 1023 || (value & 3)) 17559 as_bad_where (fixP->fx_file, fixP->fx_line, 17560 _("co-processor offset out of range")); 17561 cp_off_common: 17562 sign = value >= 0; 17563 if (value < 0) 17564 value = -value; 17565 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM 17566 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2) 17567 newval = md_chars_to_number (buf, INSN_SIZE); 17568 else 17569 newval = get_thumb32_insn (buf); 17570 newval &= 0xff7fff00; 17571 newval |= (value >> 2) | (sign ? INDEX_UP : 0); 17572 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM 17573 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2) 17574 md_number_to_chars (buf, newval, INSN_SIZE); 17575 else 17576 put_thumb32_insn (buf, newval); 17577 break; 17578 17579 case BFD_RELOC_ARM_CP_OFF_IMM_S2: 17580 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2: 17581 if (value < -255 || value > 255) 17582 as_bad_where (fixP->fx_file, fixP->fx_line, 17583 _("co-processor offset out of range")); 17584 value *= 4; 17585 goto cp_off_common; 17586 17587 case BFD_RELOC_ARM_THUMB_OFFSET: 17588 newval = md_chars_to_number (buf, THUMB_SIZE); 17589 /* Exactly what ranges, and where the offset is inserted depends 17590 on the type of instruction, we can establish this from the 17591 top 4 bits. */ 17592 switch (newval >> 12) 17593 { 17594 case 4: /* PC load. */ 17595 /* Thumb PC loads are somewhat odd, bit 1 of the PC is 17596 forced to zero for these loads; md_pcrel_from has already 17597 compensated for this. */ 17598 if (value & 3) 17599 as_bad_where (fixP->fx_file, fixP->fx_line, 17600 _("invalid offset, target not word aligned (0x%08x)"), 17601 (((uint32_t) fixP->fx_frag->fr_address 17602 + (uint32_t) fixP->fx_where) & ~3) 17603 + (uint32_t) value); 17604 17605 if (value & ~0x3fc) 17606 as_bad_where (fixP->fx_file, fixP->fx_line, 17607 _("invalid offset, value too big (0x%08x)"), 17608 (int32_t) value); 17609 17610 newval |= value >> 2; 17611 break; 17612 17613 case 9: /* SP load/store. */ 17614 if (value & ~0x3fc) 17615 as_bad_where (fixP->fx_file, fixP->fx_line, 17616 _("invalid offset, value too big (0x%08x)"), 17617 (int32_t) value); 17618 newval |= value >> 2; 17619 break; 17620 17621 case 6: /* Word load/store. */ 17622 if (value & ~0x7c) 17623 as_bad_where (fixP->fx_file, fixP->fx_line, 17624 _("invalid offset, value too big (0x%08x)"), 17625 (int32_t) value); 17626 newval |= value << 4; /* 6 - 2. */ 17627 break; 17628 17629 case 7: /* Byte load/store. */ 17630 if (value & ~0x1f) 17631 as_bad_where (fixP->fx_file, fixP->fx_line, 17632 _("invalid offset, value too big (0x%08x)"), 17633 (int32_t) value); 17634 newval |= value << 6; 17635 break; 17636 17637 case 8: /* Halfword load/store. */ 17638 if (value & ~0x3e) 17639 as_bad_where (fixP->fx_file, fixP->fx_line, 17640 _("invalid offset, value too big (0x%08x)"), 17641 (int32_t) value); 17642 newval |= value << 5; /* 6 - 1. */ 17643 break; 17644 17645 default: 17646 as_bad_where (fixP->fx_file, fixP->fx_line, 17647 "Unable to process relocation for thumb opcode: %x", 17648 (uint32_t) newval); 17649 break; 17650 } 17651 md_number_to_chars (buf, newval, THUMB_SIZE); 17652 break; 17653 17654 case BFD_RELOC_ARM_THUMB_ADD: 17655 /* This is a complicated relocation, since we use it for all of 17656 the following immediate relocations: 17657 17658 3bit ADD/SUB 17659 8bit ADD/SUB 17660 9bit ADD/SUB SP word-aligned 17661 10bit ADD PC/SP word-aligned 17662 17663 The type of instruction being processed is encoded in the 17664 instruction field: 17665 17666 0x8000 SUB 17667 0x0100 second operand was present in assembly code 17668 0x00F0 Rd 17669 0x000F Rs 17670 */ 17671 newval = md_chars_to_number (buf, THUMB_SIZE); 17672 { 17673 int rd = (newval >> 4) & 0xf; 17674 int rs = newval & 0xf; 17675 int subtract = !!(newval & 0x8000); 17676 int two_operand = !!(newval & 0x0100); 17677 17678 /* Check for HI regs, only very restricted cases allowed: 17679 Adjusting SP, and using PC or SP to get an address. */ 17680 if ((rd > 7 && (rd != REG_SP || rs != REG_SP)) 17681 || (rs > 7 && rs != REG_SP && rs != REG_PC)) 17682 as_bad_where (fixP->fx_file, fixP->fx_line, 17683 _("invalid Hi register with immediate")); 17684 17685 /* If value is negative, choose the opposite instruction. */ 17686 if (value < 0) 17687 { 17688 value = -value; 17689 subtract = !subtract; 17690 if (value < 0) 17691 as_bad_where (fixP->fx_file, fixP->fx_line, 17692 _("immediate value out of range")); 17693 } 17694 17695 if (rd == REG_SP) 17696 { 17697 if (value & ~0x1fc) 17698 as_bad_where (fixP->fx_file, fixP->fx_line, 17699 _("invalid immediate for stack address calculation")); 17700 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST; 17701 newval |= value >> 2; 17702 } 17703 else if (rs == REG_PC || rs == REG_SP) 17704 { 17705 if (subtract || value & ~0x3fc) 17706 as_bad_where (fixP->fx_file, fixP->fx_line, 17707 _("invalid immediate for address calculation (value = 0x%08x)"), 17708 (uint32_t) value); 17709 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP); 17710 newval |= rd << 8; 17711 newval |= value >> 2; 17712 } 17713 else if ((rs == rd) && (!two_operand || (value & ~0x7))) 17714 { 17715 if (value & ~0xff) 17716 as_bad_where (fixP->fx_file, fixP->fx_line, 17717 _("immediate value out of range")); 17718 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8; 17719 newval |= (rd << 8) | value; 17720 } 17721 else 17722 { 17723 if (value & ~0x7) 17724 as_bad_where (fixP->fx_file, fixP->fx_line, 17725 _("immediate value out of range")); 17726 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3; 17727 newval |= rd | (rs << 3) | (value << 6); 17728 } 17729 } 17730 md_number_to_chars (buf, newval, THUMB_SIZE); 17731 break; 17732 17733 case BFD_RELOC_ARM_THUMB_IMM: 17734 newval = md_chars_to_number (buf, THUMB_SIZE); 17735 if (value < 0 || value > 255) 17736 as_bad_where (fixP->fx_file, fixP->fx_line, 17737 _("invalid immediate: %d is too large"), 17738 (int32_t) value); 17739 newval |= value; 17740 md_number_to_chars (buf, newval, THUMB_SIZE); 17741 break; 17742 17743 case BFD_RELOC_ARM_THUMB_SHIFT: 17744 /* 5bit shift value (0..32). LSL cannot take 32. */ 17745 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f; 17746 temp = newval & 0xf800; 17747 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I)) 17748 as_bad_where (fixP->fx_file, fixP->fx_line, 17749 _("invalid shift value: %d"), (int32_t) value); 17750 /* Shifts of zero must be encoded as LSL. */ 17751 if (value == 0) 17752 newval = (newval & 0x003f) | T_OPCODE_LSL_I; 17753 /* Shifts of 32 are encoded as zero. */ 17754 else if (value == 32) 17755 value = 0; 17756 newval |= value << 6; 17757 md_number_to_chars (buf, newval, THUMB_SIZE); 17758 break; 17759 17760#ifdef NOTYET 17761 case BFD_RELOC_VTABLE_INHERIT: 17762 case BFD_RELOC_VTABLE_ENTRY: 17763 fixP->fx_done = 0; 17764 return; 17765#endif 17766 17767 case BFD_RELOC_ARM_MOVW: 17768 case BFD_RELOC_ARM_MOVT: 17769 case BFD_RELOC_ARM_THUMB_MOVW: 17770 case BFD_RELOC_ARM_THUMB_MOVT: 17771#ifdef NOTYET 17772 if (fixP->fx_done || !seg->use_rela_p) 17773#endif 17774 { 17775#ifdef NOTYET 17776 /* REL format relocations are limited to a 16-bit addend. */ 17777 if (!fixP->fx_done) 17778 { 17779 if (value < -0x1000 || value > 0xffff) 17780 as_bad_where (fixP->fx_file, fixP->fx_line, 17781 _("offset too big")); 17782 } 17783 else 17784#endif 17785 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT 17786 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT) 17787 { 17788 value >>= 16; 17789 } 17790 17791 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW 17792 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT) 17793 { 17794 newval = get_thumb32_insn (buf); 17795 newval &= 0xfbf08f00; 17796 newval |= (value & 0xf000) << 4; 17797 newval |= (value & 0x0800) << 15; 17798 newval |= (value & 0x0700) << 4; 17799 newval |= (value & 0x00ff); 17800 put_thumb32_insn (buf, newval); 17801 } 17802 else 17803 { 17804 newval = md_chars_to_number (buf, 4); 17805 newval &= 0xfff0f000; 17806 newval |= value & 0x0fff; 17807 newval |= (value & 0xf000) << 4; 17808 md_number_to_chars (buf, newval, 4); 17809 } 17810 } 17811 return; 17812 17813 case BFD_RELOC_ARM_ALU_PC_G0_NC: 17814 case BFD_RELOC_ARM_ALU_PC_G0: 17815 case BFD_RELOC_ARM_ALU_PC_G1_NC: 17816 case BFD_RELOC_ARM_ALU_PC_G1: 17817 case BFD_RELOC_ARM_ALU_PC_G2: 17818 case BFD_RELOC_ARM_ALU_SB_G0_NC: 17819 case BFD_RELOC_ARM_ALU_SB_G0: 17820 case BFD_RELOC_ARM_ALU_SB_G1_NC: 17821 case BFD_RELOC_ARM_ALU_SB_G1: 17822 case BFD_RELOC_ARM_ALU_SB_G2: 17823 assert (!fixP->fx_done); 17824#ifdef NOTYET 17825 if (!seg->use_rela_p) 17826#else 17827 if (!0) 17828#endif 17829 { 17830 bfd_vma insn; 17831 bfd_vma encoded_addend; 17832 bfd_vma addend_abs = abs (value); 17833 17834 /* Check that the absolute value of the addend can be 17835 expressed as an 8-bit constant plus a rotation. */ 17836 encoded_addend = encode_arm_immediate (addend_abs); 17837 if (encoded_addend == (unsigned int) FAIL) 17838 as_bad_where (fixP->fx_file, fixP->fx_line, 17839 _("the offset 0x%08x is not representable"), 17840 (uint32_t)addend_abs); 17841 17842 /* Extract the instruction. */ 17843 insn = md_chars_to_number (buf, INSN_SIZE); 17844 17845 /* If the addend is positive, use an ADD instruction. 17846 Otherwise use a SUB. Take care not to destroy the S bit. */ 17847 insn &= 0xff1fffff; 17848 if (value < 0) 17849 insn |= 1 << 22; 17850 else 17851 insn |= 1 << 23; 17852 17853 /* Place the encoded addend into the first 12 bits of the 17854 instruction. */ 17855 insn &= 0xfffff000; 17856 insn |= encoded_addend; 17857 17858 /* Update the instruction. */ 17859 md_number_to_chars (buf, insn, INSN_SIZE); 17860 } 17861 break; 17862 17863 case BFD_RELOC_ARM_LDR_PC_G0: 17864 case BFD_RELOC_ARM_LDR_PC_G1: 17865 case BFD_RELOC_ARM_LDR_PC_G2: 17866 case BFD_RELOC_ARM_LDR_SB_G0: 17867 case BFD_RELOC_ARM_LDR_SB_G1: 17868 case BFD_RELOC_ARM_LDR_SB_G2: 17869 assert (!fixP->fx_done); 17870#ifdef NOTYET 17871 if (!seg->use_rela_p) 17872#else 17873 if (!0) 17874#endif 17875 { 17876 bfd_vma insn; 17877 bfd_vma addend_abs = abs (value); 17878 17879 /* Check that the absolute value of the addend can be 17880 encoded in 12 bits. */ 17881 if (addend_abs >= 0x1000) 17882 as_bad_where (fixP->fx_file, fixP->fx_line, 17883 _("bad offset 0x%08x (only 12 bits available for the magnitude)"), 17884 (uint32_t)addend_abs); 17885 17886 /* Extract the instruction. */ 17887 insn = md_chars_to_number (buf, INSN_SIZE); 17888 17889 /* If the addend is negative, clear bit 23 of the instruction. 17890 Otherwise set it. */ 17891 if (value < 0) 17892 insn &= ~(1 << 23); 17893 else 17894 insn |= 1 << 23; 17895 17896 /* Place the absolute value of the addend into the first 12 bits 17897 of the instruction. */ 17898 insn &= 0xfffff000; 17899 insn |= addend_abs; 17900 17901 /* Update the instruction. */ 17902 md_number_to_chars (buf, insn, INSN_SIZE); 17903 } 17904 break; 17905 17906 case BFD_RELOC_ARM_LDRS_PC_G0: 17907 case BFD_RELOC_ARM_LDRS_PC_G1: 17908 case BFD_RELOC_ARM_LDRS_PC_G2: 17909 case BFD_RELOC_ARM_LDRS_SB_G0: 17910 case BFD_RELOC_ARM_LDRS_SB_G1: 17911 case BFD_RELOC_ARM_LDRS_SB_G2: 17912 assert (!fixP->fx_done); 17913#ifdef NOTYET 17914 if (!seg->use_rela_p) 17915#else 17916 if (!0) 17917#endif 17918 { 17919 bfd_vma insn; 17920 bfd_vma addend_abs = abs (value); 17921 17922 /* Check that the absolute value of the addend can be 17923 encoded in 8 bits. */ 17924 if (addend_abs >= 0x100) 17925 as_bad_where (fixP->fx_file, fixP->fx_line, 17926 _("bad offset 0x%08x (only 8 bits available for the magnitude)"), 17927 (uint32_t)addend_abs); 17928 17929 /* Extract the instruction. */ 17930 insn = md_chars_to_number (buf, INSN_SIZE); 17931 17932 /* If the addend is negative, clear bit 23 of the instruction. 17933 Otherwise set it. */ 17934 if (value < 0) 17935 insn &= ~(1 << 23); 17936 else 17937 insn |= 1 << 23; 17938 17939 /* Place the first four bits of the absolute value of the addend 17940 into the first 4 bits of the instruction, and the remaining 17941 four into bits 8 .. 11. */ 17942 insn &= 0xfffff0f0; 17943 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4); 17944 17945 /* Update the instruction. */ 17946 md_number_to_chars (buf, insn, INSN_SIZE); 17947 } 17948 break; 17949 17950 case BFD_RELOC_ARM_LDC_PC_G0: 17951 case BFD_RELOC_ARM_LDC_PC_G1: 17952 case BFD_RELOC_ARM_LDC_PC_G2: 17953 case BFD_RELOC_ARM_LDC_SB_G0: 17954 case BFD_RELOC_ARM_LDC_SB_G1: 17955 case BFD_RELOC_ARM_LDC_SB_G2: 17956 assert (!fixP->fx_done); 17957#ifdef NOTYET 17958 if (!seg->use_rela_p) 17959#else 17960 if (!0) 17961#endif 17962 { 17963 bfd_vma insn; 17964 bfd_vma addend_abs = abs (value); 17965 17966 /* Check that the absolute value of the addend is a multiple of 17967 four and, when divided by four, fits in 8 bits. */ 17968 if (addend_abs & 0x3) 17969 as_bad_where (fixP->fx_file, fixP->fx_line, 17970 _("bad offset 0x%08x (must be word-aligned)"), 17971 (uint32_t)addend_abs); 17972 17973 if ((addend_abs >> 2) > 0xff) 17974 as_bad_where (fixP->fx_file, fixP->fx_line, 17975 _("bad offset 0x%08x (must be an 8-bit number of words)"), 17976 (uint32_t)addend_abs); 17977 17978 /* Extract the instruction. */ 17979 insn = md_chars_to_number (buf, INSN_SIZE); 17980 17981 /* If the addend is negative, clear bit 23 of the instruction. 17982 Otherwise set it. */ 17983 if (value < 0) 17984 insn &= ~(1 << 23); 17985 else 17986 insn |= 1 << 23; 17987 17988 /* Place the addend (divided by four) into the first eight 17989 bits of the instruction. */ 17990 insn &= 0xfffffff0; 17991 insn |= addend_abs >> 2; 17992 17993 /* Update the instruction. */ 17994 md_number_to_chars (buf, insn, INSN_SIZE); 17995 } 17996 break; 17997 17998 case BFD_RELOC_UNUSED: 17999 default: 18000 as_bad_where (fixP->fx_file, fixP->fx_line, 18001 _("bad relocation fixup type (%d)"), fixP->fx_r_type); 18002 } 18003#ifndef NOTYET 18004 /* Don't allow relocations to escape into the object file that aren't 18005 supported by the linker. */ 18006 if (fixP->fx_addsy != NULL 18007 && fixP->fx_r_type != ARM_THUMB_RELOC_BR22 18008 && fixP->fx_r_type != ARM_RELOC_BR24) 18009 as_bad_where (fixP->fx_file, fixP->fx_line, 18010 _("unsupported relocation on symbol %s"), 18011 S_GET_NAME (fixP->fx_addsy)); 18012#endif 18013} 18014 18015int 18016arm_force_relocation (struct fix * fixp) 18017{ 18018#if defined (OBJ_COFF) && defined (TE_PE) 18019 if (fixp->fx_r_type == BFD_RELOC_RVA) 18020 return 1; 18021#endif 18022 18023#ifdef NeXT_MOD 18024 if (fixp->fx_r_type == BFD_RELOC_ARM_PCREL_BRANCH 18025 || fixp->fx_r_type == BFD_RELOC_ARM_PCREL_BLX 18026 || fixp->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX 18027 || fixp->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23 18028 || fixp->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH25) 18029 { 18030 if (fixp->fx_addsy != NULL) 18031 { 18032 const char *name = S_GET_NAME (fixp->fx_addsy); 18033 if (! flagseen ['L'] && name && name[0] == 'L') 18034 return 0; 18035 } 18036 return 1; 18037 } 18038#endif 18039 18040 /* Resolve these relocations even if the symbol is extern or weak. */ 18041 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE 18042 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM 18043 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE 18044 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM 18045 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE 18046 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12 18047 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12) 18048 return 0; 18049 18050 /* Always leave these relocations for the linker. */ 18051 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC 18052 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2) 18053 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0) 18054 return 1; 18055 18056#ifdef NOTYET 18057 /* Always generate relocations against function symbols. */ 18058 if (fixp->fx_r_type == BFD_RELOC_32 18059 && fixp->fx_addsy 18060 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION)) 18061 return 1; 18062 18063 return generic_force_reloc (fixp); 18064#else 18065 return 0; 18066#endif 18067} 18068 18069/* MD interface: Finalization. */ 18070 18071/* A good place to do this, although this was probably not intended 18072 for this kind of use. We need to dump the literal pool before 18073 references are made to a null symbol pointer. */ 18074 18075#ifdef NOTYET 18076void 18077arm_cleanup (void) 18078{ 18079 literal_pool * pool; 18080 18081 for (pool = list_of_pools; pool; pool = pool->next) 18082 { 18083 /* Put it at the end of the relevent section. */ 18084 subseg_set (pool->section, pool->sub_section); 18085#ifdef OBJ_ELF 18086 arm_elf_change_section (); 18087#endif 18088 s_ltorg (0); 18089 } 18090} 18091#endif /* NOTYET */ 18092 18093/* MD interface: Initialization. */ 18094 18095static void 18096set_constant_flonums (void) 18097{ 18098 int i; 18099 18100 for (i = 0; i < NUM_FLOAT_VALS; i++) 18101 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL) 18102 abort (); 18103} 18104 18105/* Auto-select Thumb mode if it's the only available instruction set for the 18106 given architecture. */ 18107 18108#ifdef NOTYET 18109static void 18110autoselect_thumb_from_cpu_variant (void) 18111{ 18112 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)) 18113 opcode_select (16); 18114} 18115#endif /* NOTYET */ 18116 18117void 18118md_begin (void) 18119{ 18120#ifdef NOTYET 18121 unsigned mach; 18122#endif 18123 unsigned int i; 18124 18125 if ( (arm_ops_hsh = hash_new ()) == NULL 18126 || (arm_cond_hsh = hash_new ()) == NULL 18127 || (arm_shift_hsh = hash_new ()) == NULL 18128 || (arm_psr_hsh = hash_new ()) == NULL 18129 || (arm_v7m_psr_hsh = hash_new ()) == NULL 18130 || (arm_reg_hsh = hash_new ()) == NULL 18131 || (arm_reloc_hsh = hash_new ()) == NULL 18132 || (arm_barrier_opt_hsh = hash_new ()) == NULL) 18133 as_fatal (_("virtual memory exhausted")); 18134 18135 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++) 18136 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i)); 18137 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++) 18138 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i)); 18139 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++) 18140 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i)); 18141 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++) 18142 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i)); 18143 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++) 18144 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i)); 18145 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++) 18146 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i)); 18147 for (i = 0; 18148 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt); 18149 i++) 18150 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template, 18151 (PTR) (barrier_opt_names + i)); 18152#ifdef OBJ_ELF 18153 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++) 18154 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i)); 18155#endif 18156 18157 set_constant_flonums (); 18158 18159#ifdef NOTYET 18160 /* Set the cpu variant based on the command-line options. We prefer 18161 -mcpu= over -march= if both are set (as for GCC); and we prefer 18162 -mfpu= over any other way of setting the floating point unit. 18163 Use of legacy options with new options are faulted. */ 18164 if (legacy_cpu) 18165 { 18166 if (mcpu_cpu_opt || march_cpu_opt) 18167 as_bad (_("use of old and new-style options to set CPU type")); 18168 18169 mcpu_cpu_opt = legacy_cpu; 18170 } 18171 else if (!mcpu_cpu_opt) 18172 mcpu_cpu_opt = march_cpu_opt; 18173 18174 if (legacy_fpu) 18175 { 18176 if (mfpu_opt) 18177 as_bad (_("use of old and new-style options to set FPU type")); 18178 18179 mfpu_opt = legacy_fpu; 18180 } 18181 else if (!mfpu_opt) 18182 { 18183#if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS)) 18184 /* Some environments specify a default FPU. If they don't, infer it 18185 from the processor. */ 18186 if (mcpu_fpu_opt) 18187 mfpu_opt = mcpu_fpu_opt; 18188 else 18189 mfpu_opt = march_fpu_opt; 18190#else 18191 mfpu_opt = &fpu_default; 18192#endif 18193 } 18194 18195 if (!mfpu_opt) 18196 { 18197 if (mcpu_cpu_opt != NULL) 18198 mfpu_opt = &fpu_default; 18199 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5)) 18200 mfpu_opt = &fpu_arch_vfp_v2; 18201 else 18202 mfpu_opt = &fpu_arch_fpa; 18203 } 18204 18205#ifdef CPU_DEFAULT 18206 if (!mcpu_cpu_opt) 18207 { 18208 mcpu_cpu_opt = &cpu_default; 18209 selected_cpu = cpu_default; 18210 } 18211#else 18212 if (mcpu_cpu_opt) 18213 selected_cpu = *mcpu_cpu_opt; 18214 else 18215 mcpu_cpu_opt = &arm_arch_any; 18216#endif 18217 18218 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt); 18219 18220 autoselect_thumb_from_cpu_variant (); 18221 18222 arm_arch_used = thumb_arch_used = arm_arch_none; 18223 18224#if defined OBJ_COFF || defined OBJ_ELF 18225 { 18226 unsigned int flags = 0; 18227 18228#if defined OBJ_ELF 18229 flags = meabi_flags; 18230 18231 switch (meabi_flags) 18232 { 18233 case EF_ARM_EABI_UNKNOWN: 18234#endif 18235 /* Set the flags in the private structure. */ 18236 if (uses_apcs_26) flags |= F_APCS26; 18237 if (support_interwork) flags |= F_INTERWORK; 18238 if (uses_apcs_float) flags |= F_APCS_FLOAT; 18239 if (pic_code) flags |= F_PIC; 18240 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard)) 18241 flags |= F_SOFT_FLOAT; 18242 18243 switch (mfloat_abi_opt) 18244 { 18245 case ARM_FLOAT_ABI_SOFT: 18246 case ARM_FLOAT_ABI_SOFTFP: 18247 flags |= F_SOFT_FLOAT; 18248 break; 18249 18250 case ARM_FLOAT_ABI_HARD: 18251 if (flags & F_SOFT_FLOAT) 18252 as_bad (_("hard-float conflicts with specified fpu")); 18253 break; 18254 } 18255 18256 /* Using pure-endian doubles (even if soft-float). */ 18257 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure)) 18258 flags |= F_VFP_FLOAT; 18259 18260#if defined OBJ_ELF 18261 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick)) 18262 flags |= EF_ARM_MAVERICK_FLOAT; 18263 break; 18264 18265 case EF_ARM_EABI_VER4: 18266 case EF_ARM_EABI_VER5: 18267 /* No additional flags to set. */ 18268 break; 18269 18270 default: 18271 abort (); 18272 } 18273#endif 18274 bfd_set_private_flags (stdoutput, flags); 18275 18276 /* We have run out flags in the COFF header to encode the 18277 status of ATPCS support, so instead we create a dummy, 18278 empty, debug section called .arm.atpcs. */ 18279 if (atpcs) 18280 { 18281 asection * sec; 18282 18283 sec = bfd_make_section (stdoutput, ".arm.atpcs"); 18284 18285 if (sec != NULL) 18286 { 18287 bfd_set_section_flags 18288 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */); 18289 bfd_set_section_size (stdoutput, sec, 0); 18290 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0); 18291 } 18292 } 18293 } 18294#endif 18295 18296 /* Record the CPU type as well. */ 18297 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)) 18298 mach = bfd_mach_arm_iWMMXt2; 18299 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt)) 18300 mach = bfd_mach_arm_iWMMXt; 18301 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale)) 18302 mach = bfd_mach_arm_XScale; 18303 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick)) 18304 mach = bfd_mach_arm_ep9312; 18305 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e)) 18306 mach = bfd_mach_arm_5TE; 18307 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5)) 18308 { 18309 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t)) 18310 mach = bfd_mach_arm_5T; 18311 else 18312 mach = bfd_mach_arm_5; 18313 } 18314 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4)) 18315 { 18316 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t)) 18317 mach = bfd_mach_arm_4T; 18318 else 18319 mach = bfd_mach_arm_4; 18320 } 18321 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m)) 18322 mach = bfd_mach_arm_3M; 18323 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3)) 18324 mach = bfd_mach_arm_3; 18325 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s)) 18326 mach = bfd_mach_arm_2a; 18327 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2)) 18328 mach = bfd_mach_arm_2; 18329 else 18330 mach = bfd_mach_arm_unknown; 18331 18332 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach); 18333#endif /* NOTYET */ 18334 18335 if (force_cpusubtype_ALL) 18336 { 18337 cpu_variant = arm_arch_full; 18338 archflag_cpusubtype = CPU_SUBTYPE_ARM_ALL; 18339 } 18340 18341 switch (archflag_cpusubtype) 18342 { 18343 case CPU_SUBTYPE_ARM_V5TEJ: 18344 { 18345 static const arm_feature_set arm_arch_v5tej = ARM_ARCH_V5TEJ; 18346 cpu_variant = arm_arch_v5tej; 18347 } 18348 break; 18349 case CPU_SUBTYPE_ARM_XSCALE: 18350 { 18351 static const arm_feature_set arm_arch_xscale = ARM_ARCH_XSCALE; 18352 cpu_variant = arm_arch_xscale; 18353 } 18354 break; 18355 case CPU_SUBTYPE_ARM_V6: 18356 { 18357 static const arm_feature_set arm_arch_v6zk_vfp_v2 = 18358 ARM_FEATURE (ARM_AEXT_V6ZK, FPU_VFP_V2); 18359 cpu_variant = arm_arch_v6zk_vfp_v2; 18360 } 18361 break; 18362 case CPU_SUBTYPE_ARM_V7: 18363 case CPU_SUBTYPE_ARM_V7F: 18364 case CPU_SUBTYPE_ARM_V7S: 18365 case CPU_SUBTYPE_ARM_V7K: 18366 { 18367 static const arm_feature_set arm_arch_v7_vfp_v3_plus_neon_v1 = 18368 ARM_FEATURE (ARM_AEXT_V7_ARM | ARM_EXT_V7M | ARM_EXT_DIV, 18369 FPU_VFP_V3 | FPU_NEON_EXT_V1); 18370 cpu_variant = arm_arch_v7_vfp_v3_plus_neon_v1; 18371 } 18372 break; 18373 } 18374} 18375 18376/* 18377 * md_end() is called from main() in as.c after assembly ends. It is used 18378 * to allow target machine dependent clean up. 18379 */ 18380void 18381md_end(void) 18382{ 18383} 18384 18385/* 18386 * md_parse_option() is called from main() in as.c to parse target machine 18387 * dependent command line options. This routine returns 0 if it is passed an 18388 * option that is not recognized non-zero otherwise. 18389 */ 18390int 18391md_parse_option( 18392char **argP, 18393int *cntP, 18394char ***vecP) 18395{ 18396 switch(**argP) { 18397 default: 18398 break; 18399 } 18400 return(0); 18401} 18402 18403/* 18404 * md_number_to_imm() is the target machine dependent routine that puts out 18405 * a binary value of size 4, 2, or 1 bytes into the specified buffer with 18406 * reguard to a possible relocation entry (the fixP->fx_r_type field in the fixS 18407 * structure pointed to by fixP) for the section with the ordinal nsect. This 18408 * is done in the target machine's byte sex using it's relocation types. 18409 * In this case the byte order is little endian. 18410 */ 18411void 18412md_number_to_imm( 18413unsigned char *buf, 18414signed_expr_t val, 18415int nbytes, 18416fixS *fixP, 18417int nsect) 18418{ 18419#if 0 18420 int sign; 18421 signed_target_addr_t newval; 18422#endif 18423 18424 if(fixP->fx_r_type == NO_RELOC || 18425 fixP->fx_r_type == ARM_RELOC_VANILLA){ 18426 switch(nbytes){ 18427 case 8: 18428 *buf++ = val & 0xff; 18429 *buf++ = (val >> 8) & 0xff; 18430 *buf++ = (val >> 16) & 0xff; 18431 *buf++ = (val >> 24) & 0xff; 18432 *buf++ = (val >> 32) & 0xff; 18433 *buf++ = (val >> 40) & 0xff; 18434 *buf++ = (val >> 48) & 0xff; 18435 *buf++ = (val >> 56) & 0xff; 18436 break; 18437 case 4: 18438 *buf++ = val & 0xff; 18439 *buf++ = (val >> 8) & 0xff; 18440 *buf++ = (val >> 16) & 0xff; 18441 *buf++ = (val >> 24) & 0xff; 18442 break; 18443 case 2: 18444 *buf++ = val & 0xff; 18445 *buf++ = (val >> 8) & 0xff; 18446 break; 18447 case 1: 18448 *buf = val & 0xff; 18449 break; 18450 default: 18451 abort(); 18452 } 18453 return; 18454 } 18455 switch(fixP->fx_r_type){ 18456#if 0 18457 case ARM_RELOC_BR24: 18458 if(fixP->fx_pcrel) 18459/* GUESS this should be 4 not 8 which seems to disassemble correctly for local 18460 defined labels. But this seems to be off by 8 for external undefined labels 18461 and the target address is not 0 but 0xfffffff8 */ 18462 val += 4; 18463 if((val & 0xfc000000) && ((val & 0xfc000000) != 0xfc000000)){ 18464 layout_file = fixP->file; 18465 layout_line = fixP->line; 18466 as_warn("Fixup of %u too large for field width of 26 bits",val); 18467 } 18468 if((val & 0x3) != 0){ 18469 layout_file = fixP->file; 18470 layout_line = fixP->line; 18471 as_warn("Fixup of %u is not to a 4 byte address", val); 18472 } 18473 buf[0] = (val >> 2) & 0xff; 18474 buf[1] = (val >> 10) & 0xff; 18475 buf[2] = (val >> 18) & 0xff; 18476 /* buf[3] has the opcode part of the instruction */ 18477 break; 18478 18479/* Code taken and modified from tc-arm.c md_apply_fix3() line 11473 */ 18480 case BFD_RELOC_ARM_OFFSET_IMM: 18481/* GUESS this needs 4 added to val. Which then better matches what the FSF GAS 18482 produces for an ldr instruction */ 18483 val += 4; 18484 18485 sign = val >= 0; 18486 18487 if(val < 0) 18488 val = - val; 18489 18490 if(validate_offset_imm(val, 0) == FAIL){ 18491 layout_file = fixP->file; 18492 layout_line = fixP->line; 18493 as_warn("bad immediate value for offset (%d)", (int32_t) val); 18494 break; 18495 } 18496 18497 newval = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; 18498 18499 newval &= 0xff7ff000; 18500 newval |= val | (sign ? INDEX_UP : 0); 18501 18502 *buf++ = newval & 0xff; 18503 *buf++ = (newval >> 8) & 0xff; 18504 *buf++ = (newval >> 16) & 0xff; 18505 *buf++ = (newval >> 24) & 0xff; 18506 18507 break; 18508#endif 18509 18510 case BFD_RELOC_ARM_ADRL_IMMEDIATE: 18511 default: 18512 { 18513 valueT newval = val; 18514 18515 /* Die if we have more bytes than md_apply_fix3 knows how to 18516 handle. */ 18517 if (sizeof (valueT) < nbytes) 18518 abort (); 18519 18520 md_apply_fix (fixP, &newval, now_seg); 18521 } 18522 } 18523} 18524#endif /* INSNS_TABLE_ONLY */ 18525