1/* aarch64-asm.c -- AArch64 assembler support.
2   Copyright (C) 2012-2017 Free Software Foundation, Inc.
3   Contributed by ARM Ltd.
4
5   This file is part of the GNU opcodes library.
6
7   This library is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License as published by
9   the Free Software Foundation; either version 3, or (at your option)
10   any later version.
11
12   It is distributed in the hope that it will be useful, but WITHOUT
13   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15   License for more details.
16
17   You should have received a copy of the GNU General Public License
18   along with this program; see the file COPYING3. If not,
19   see <http://www.gnu.org/licenses/>.  */
20
21#include "sysdep.h"
22#include <stdarg.h>
23#include "libiberty.h"
24#include "aarch64-asm.h"
25
26/* Utilities.  */
27
28/* The unnamed arguments consist of the number of fields and information about
29   these fields where the VALUE will be inserted into CODE.  MASK can be zero or
30   the base mask of the opcode.
31
32   N.B. the fields are required to be in such an order than the least signficant
33   field for VALUE comes the first, e.g. the <index> in
34    SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
35   is encoded in H:L:M in some cases, the fields H:L:M should be passed in
36   the order of M, L, H.  */
37
38static inline void
39insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
40{
41  uint32_t num;
42  const aarch64_field *field;
43  enum aarch64_field_kind kind;
44  va_list va;
45
46  va_start (va, mask);
47  num = va_arg (va, uint32_t);
48  assert (num <= 5);
49  while (num--)
50    {
51      kind = va_arg (va, enum aarch64_field_kind);
52      field = &fields[kind];
53      insert_field (kind, code, value, mask);
54      value >>= field->width;
55    }
56  va_end (va);
57}
58
59/* Insert a raw field value VALUE into all fields in SELF->fields.
60   The least significant bit goes in the final field.  */
61
62static void
63insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
64		   aarch64_insn value)
65{
66  unsigned int i;
67  enum aarch64_field_kind kind;
68
69  for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70    if (self->fields[i] != FLD_NIL)
71      {
72	kind = self->fields[i];
73	insert_field (kind, code, value, 0);
74	value >>= fields[kind].width;
75      }
76}
77
78/* Operand inserters.  */
79
80/* Insert register number.  */
81const char *
82aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
83		   aarch64_insn *code,
84		   const aarch64_inst *inst ATTRIBUTE_UNUSED)
85{
86  insert_field (self->fields[0], code, info->reg.regno, 0);
87  return NULL;
88}
89
90/* Insert register number, index and/or other data for SIMD register element
91   operand, e.g. the last source operand in
92     SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
93const char *
94aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
95		     aarch64_insn *code, const aarch64_inst *inst)
96{
97  /* regno */
98  insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
99  /* index and/or type */
100  if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
101    {
102      int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
103      if (info->type == AARCH64_OPND_En
104	  && inst->opcode->operands[0] == AARCH64_OPND_Ed)
105	{
106	  /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>].  */
107	  assert (info->idx == 1);	/* Vn */
108	  aarch64_insn value = info->reglane.index << pos;
109	  insert_field (FLD_imm4, code, value, 0);
110	}
111      else
112	{
113	  /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
114	     imm5<3:0>	<V>
115	     0000	RESERVED
116	     xxx1	B
117	     xx10	H
118	     x100	S
119	     1000	D  */
120	  aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
121	  insert_field (FLD_imm5, code, value, 0);
122	}
123    }
124  else
125    {
126      /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
127         or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
128      unsigned reglane_index = info->reglane.index;
129
130      if (inst->opcode->op == OP_FCMLA_ELEM)
131	/* Complex operand takes two elements.  */
132	reglane_index *= 2;
133
134      switch (info->qualifier)
135	{
136	case AARCH64_OPND_QLF_S_H:
137	  /* H:L:M */
138	  assert (reglane_index < 8);
139	  insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
140	  break;
141	case AARCH64_OPND_QLF_S_S:
142	  /* H:L */
143	  assert (reglane_index < 4);
144	  insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
145	  break;
146	case AARCH64_OPND_QLF_S_D:
147	  /* H */
148	  assert (reglane_index < 2);
149	  insert_field (FLD_H, code, reglane_index, 0);
150	  break;
151	default:
152	  assert (0);
153	}
154    }
155  return NULL;
156}
157
158/* Insert regno and len field of a register list operand, e.g. Vn in TBL.  */
159const char *
160aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
161		     aarch64_insn *code,
162		     const aarch64_inst *inst ATTRIBUTE_UNUSED)
163{
164  /* R */
165  insert_field (self->fields[0], code, info->reglist.first_regno, 0);
166  /* len */
167  insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
168  return NULL;
169}
170
171/* Insert Rt and opcode fields for a register list operand, e.g. Vt
172   in AdvSIMD load/store instructions.  */
173const char *
174aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
175			  const aarch64_opnd_info *info, aarch64_insn *code,
176			  const aarch64_inst *inst)
177{
178  aarch64_insn value = 0;
179  /* Number of elements in each structure to be loaded/stored.  */
180  unsigned num = get_opcode_dependent_value (inst->opcode);
181
182  /* Rt */
183  insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
184  /* opcode */
185  switch (num)
186    {
187    case 1:
188      switch (info->reglist.num_regs)
189	{
190	case 1: value = 0x7; break;
191	case 2: value = 0xa; break;
192	case 3: value = 0x6; break;
193	case 4: value = 0x2; break;
194	default: assert (0);
195	}
196      break;
197    case 2:
198      value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
199      break;
200    case 3:
201      value = 0x4;
202      break;
203    case 4:
204      value = 0x0;
205      break;
206    default:
207      assert (0);
208    }
209  insert_field (FLD_opcode, code, value, 0);
210
211  return NULL;
212}
213
214/* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
215   single structure to all lanes instructions.  */
216const char *
217aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
218			    const aarch64_opnd_info *info, aarch64_insn *code,
219			    const aarch64_inst *inst)
220{
221  aarch64_insn value;
222  /* The opcode dependent area stores the number of elements in
223     each structure to be loaded/stored.  */
224  int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
225
226  /* Rt */
227  insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
228  /* S */
229  value = (aarch64_insn) 0;
230  if (is_ld1r && info->reglist.num_regs == 2)
231    /* OP_LD1R does not have alternating variant, but have "two consecutive"
232       instead.  */
233    value = (aarch64_insn) 1;
234  insert_field (FLD_S, code, value, 0);
235
236  return NULL;
237}
238
239/* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
240   operand e.g. Vt in AdvSIMD load/store single element instructions.  */
241const char *
242aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
243			   const aarch64_opnd_info *info, aarch64_insn *code,
244			   const aarch64_inst *inst ATTRIBUTE_UNUSED)
245{
246  aarch64_field field = {0, 0};
247  aarch64_insn QSsize = 0;	/* fields Q:S:size.  */
248  aarch64_insn opcodeh2 = 0;	/* opcode<2:1> */
249
250  assert (info->reglist.has_index);
251
252  /* Rt */
253  insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
254  /* Encode the index, opcode<2:1> and size.  */
255  switch (info->qualifier)
256    {
257    case AARCH64_OPND_QLF_S_B:
258      /* Index encoded in "Q:S:size".  */
259      QSsize = info->reglist.index;
260      opcodeh2 = 0x0;
261      break;
262    case AARCH64_OPND_QLF_S_H:
263      /* Index encoded in "Q:S:size<1>".  */
264      QSsize = info->reglist.index << 1;
265      opcodeh2 = 0x1;
266      break;
267    case AARCH64_OPND_QLF_S_S:
268      /* Index encoded in "Q:S".  */
269      QSsize = info->reglist.index << 2;
270      opcodeh2 = 0x2;
271      break;
272    case AARCH64_OPND_QLF_S_D:
273      /* Index encoded in "Q".  */
274      QSsize = info->reglist.index << 3 | 0x1;
275      opcodeh2 = 0x2;
276      break;
277    default:
278      assert (0);
279    }
280  insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
281  gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
282  insert_field_2 (&field, code, opcodeh2, 0);
283
284  return NULL;
285}
286
287/* Insert fields immh:immb and/or Q for e.g. the shift immediate in
288   SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
289   or SSHR <V><d>, <V><n>, #<shift>.  */
290const char *
291aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
292			       const aarch64_opnd_info *info,
293			       aarch64_insn *code, const aarch64_inst *inst)
294{
295  unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
296  aarch64_insn Q, imm;
297
298  if (inst->opcode->iclass == asimdshf)
299    {
300      /* Q
301	 immh	Q	<T>
302	 0000	x	SEE AdvSIMD modified immediate
303	 0001	0	8B
304	 0001	1	16B
305	 001x	0	4H
306	 001x	1	8H
307	 01xx	0	2S
308	 01xx	1	4S
309	 1xxx	0	RESERVED
310	 1xxx	1	2D  */
311      Q = (val & 0x1) ? 1 : 0;
312      insert_field (FLD_Q, code, Q, inst->opcode->mask);
313      val >>= 1;
314    }
315
316  assert (info->type == AARCH64_OPND_IMM_VLSR
317	  || info->type == AARCH64_OPND_IMM_VLSL);
318
319  if (info->type == AARCH64_OPND_IMM_VLSR)
320    /* immh:immb
321       immh	<shift>
322       0000	SEE AdvSIMD modified immediate
323       0001	(16-UInt(immh:immb))
324       001x	(32-UInt(immh:immb))
325       01xx	(64-UInt(immh:immb))
326       1xxx	(128-UInt(immh:immb))  */
327    imm = (16 << (unsigned)val) - info->imm.value;
328  else
329    /* immh:immb
330       immh	<shift>
331       0000	SEE AdvSIMD modified immediate
332       0001	(UInt(immh:immb)-8)
333       001x	(UInt(immh:immb)-16)
334       01xx	(UInt(immh:immb)-32)
335       1xxx	(UInt(immh:immb)-64)  */
336    imm = info->imm.value + (8 << (unsigned)val);
337  insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
338
339  return NULL;
340}
341
342/* Insert fields for e.g. the immediate operands in
343   BFM <Wd>, <Wn>, #<immr>, #<imms>.  */
344const char *
345aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
346		 aarch64_insn *code,
347		 const aarch64_inst *inst ATTRIBUTE_UNUSED)
348{
349  int64_t imm;
350
351  imm = info->imm.value;
352  if (operand_need_shift_by_two (self))
353    imm >>= 2;
354  insert_all_fields (self, code, imm);
355  return NULL;
356}
357
358/* Insert immediate and its shift amount for e.g. the last operand in
359     MOVZ <Wd>, #<imm16>{, LSL #<shift>}.  */
360const char *
361aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
362		      aarch64_insn *code, const aarch64_inst *inst)
363{
364  /* imm16 */
365  aarch64_ins_imm (self, info, code, inst);
366  /* hw */
367  insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
368  return NULL;
369}
370
371/* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
372     MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}.  */
373const char *
374aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
375				  const aarch64_opnd_info *info,
376				  aarch64_insn *code,
377				  const aarch64_inst *inst ATTRIBUTE_UNUSED)
378{
379  enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
380  uint64_t imm = info->imm.value;
381  enum aarch64_modifier_kind kind = info->shifter.kind;
382  int amount = info->shifter.amount;
383  aarch64_field field = {0, 0};
384
385  /* a:b:c:d:e:f:g:h */
386  if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
387    {
388      /* Either MOVI <Dd>, #<imm>
389	 or     MOVI <Vd>.2D, #<imm>.
390	 <imm> is a 64-bit immediate
391	 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
392	 encoded in "a:b:c:d:e:f:g:h".	*/
393      imm = aarch64_shrink_expanded_imm8 (imm);
394      assert ((int)imm >= 0);
395    }
396  insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
397
398  if (kind == AARCH64_MOD_NONE)
399    return NULL;
400
401  /* shift amount partially in cmode */
402  assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
403  if (kind == AARCH64_MOD_LSL)
404    {
405      /* AARCH64_MOD_LSL: shift zeros.  */
406      int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
407      assert (esize == 4 || esize == 2 || esize == 1);
408      /* For 8-bit move immediate, the optional LSL #0 does not require
409	 encoding.  */
410      if (esize == 1)
411	return NULL;
412      amount >>= 3;
413      if (esize == 4)
414	gen_sub_field (FLD_cmode, 1, 2, &field);	/* per word */
415      else
416	gen_sub_field (FLD_cmode, 1, 1, &field);	/* per halfword */
417    }
418  else
419    {
420      /* AARCH64_MOD_MSL: shift ones.  */
421      amount >>= 4;
422      gen_sub_field (FLD_cmode, 0, 1, &field);		/* per word */
423    }
424  insert_field_2 (&field, code, amount, 0);
425
426  return NULL;
427}
428
429/* Insert fields for an 8-bit floating-point immediate.  */
430const char *
431aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
432		   aarch64_insn *code,
433		   const aarch64_inst *inst ATTRIBUTE_UNUSED)
434{
435  insert_all_fields (self, code, info->imm.value);
436  return NULL;
437}
438
439/* Insert 1-bit rotation immediate (#90 or #270).  */
440const char *
441aarch64_ins_imm_rotate1 (const aarch64_operand *self,
442			 const aarch64_opnd_info *info,
443			 aarch64_insn *code, const aarch64_inst *inst)
444{
445  uint64_t rot = (info->imm.value - 90) / 180;
446  assert (rot < 2U);
447  insert_field (self->fields[0], code, rot, inst->opcode->mask);
448  return NULL;
449}
450
451/* Insert 2-bit rotation immediate (#0, #90, #180 or #270).  */
452const char *
453aarch64_ins_imm_rotate2 (const aarch64_operand *self,
454			 const aarch64_opnd_info *info,
455			 aarch64_insn *code, const aarch64_inst *inst)
456{
457  uint64_t rot = info->imm.value / 90;
458  assert (rot < 4U);
459  insert_field (self->fields[0], code, rot, inst->opcode->mask);
460  return NULL;
461}
462
463/* Insert #<fbits> for the immediate operand in fp fix-point instructions,
464   e.g.  SCVTF <Dd>, <Wn>, #<fbits>.  */
465const char *
466aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
467		   aarch64_insn *code,
468		   const aarch64_inst *inst ATTRIBUTE_UNUSED)
469{
470  insert_field (self->fields[0], code, 64 - info->imm.value, 0);
471  return NULL;
472}
473
474/* Insert arithmetic immediate for e.g. the last operand in
475     SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}.  */
476const char *
477aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
478		  aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
479{
480  /* shift */
481  aarch64_insn value = info->shifter.amount ? 1 : 0;
482  insert_field (self->fields[0], code, value, 0);
483  /* imm12 (unsigned) */
484  insert_field (self->fields[1], code, info->imm.value, 0);
485  return NULL;
486}
487
488/* Common routine shared by aarch64_ins{,_inv}_limm.  INVERT_P says whether
489   the operand should be inverted before encoding.  */
490static const char *
491aarch64_ins_limm_1 (const aarch64_operand *self,
492		    const aarch64_opnd_info *info, aarch64_insn *code,
493		    const aarch64_inst *inst, bfd_boolean invert_p)
494{
495  aarch64_insn value;
496  uint64_t imm = info->imm.value;
497  int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
498
499  if (invert_p)
500    imm = ~imm;
501  if (aarch64_logical_immediate_p (imm, esize, &value) == FALSE)
502    /* The constraint check should have guaranteed this wouldn't happen.  */
503    assert (0);
504
505  insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
506		 self->fields[0]);
507  return NULL;
508}
509
510/* Insert logical/bitmask immediate for e.g. the last operand in
511     ORR <Wd|WSP>, <Wn>, #<imm>.  */
512const char *
513aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
514		  aarch64_insn *code, const aarch64_inst *inst)
515{
516  return aarch64_ins_limm_1 (self, info, code, inst,
517			     inst->opcode->op == OP_BIC);
518}
519
520/* Insert a logical/bitmask immediate for the BIC alias of AND (etc.).  */
521const char *
522aarch64_ins_inv_limm (const aarch64_operand *self,
523		      const aarch64_opnd_info *info, aarch64_insn *code,
524		      const aarch64_inst *inst)
525{
526  return aarch64_ins_limm_1 (self, info, code, inst, TRUE);
527}
528
529/* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
530   or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>.  */
531const char *
532aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
533		aarch64_insn *code, const aarch64_inst *inst)
534{
535  aarch64_insn value = 0;
536
537  assert (info->idx == 0);
538
539  /* Rt */
540  aarch64_ins_regno (self, info, code, inst);
541  if (inst->opcode->iclass == ldstpair_indexed
542      || inst->opcode->iclass == ldstnapair_offs
543      || inst->opcode->iclass == ldstpair_off
544      || inst->opcode->iclass == loadlit)
545    {
546      /* size */
547      switch (info->qualifier)
548	{
549	case AARCH64_OPND_QLF_S_S: value = 0; break;
550	case AARCH64_OPND_QLF_S_D: value = 1; break;
551	case AARCH64_OPND_QLF_S_Q: value = 2; break;
552	default: assert (0);
553	}
554      insert_field (FLD_ldst_size, code, value, 0);
555    }
556  else
557    {
558      /* opc[1]:size */
559      value = aarch64_get_qualifier_standard_value (info->qualifier);
560      insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
561    }
562
563  return NULL;
564}
565
566/* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}].  */
567const char *
568aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
569			 const aarch64_opnd_info *info, aarch64_insn *code,
570			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
571{
572  /* Rn */
573  insert_field (FLD_Rn, code, info->addr.base_regno, 0);
574  return NULL;
575}
576
577/* Encode the address operand for e.g.
578     STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
579const char *
580aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
581			 const aarch64_opnd_info *info, aarch64_insn *code,
582			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
583{
584  aarch64_insn S;
585  enum aarch64_modifier_kind kind = info->shifter.kind;
586
587  /* Rn */
588  insert_field (FLD_Rn, code, info->addr.base_regno, 0);
589  /* Rm */
590  insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
591  /* option */
592  if (kind == AARCH64_MOD_LSL)
593    kind = AARCH64_MOD_UXTX;	/* Trick to enable the table-driven.  */
594  insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
595  /* S */
596  if (info->qualifier != AARCH64_OPND_QLF_S_B)
597    S = info->shifter.amount != 0;
598  else
599    /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
600       S	<amount>
601       0	[absent]
602       1	#0
603       Must be #0 if <extend> is explicitly LSL.  */
604    S = info->shifter.operator_present && info->shifter.amount_present;
605  insert_field (FLD_S, code, S, 0);
606
607  return NULL;
608}
609
610/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!.  */
611const char *
612aarch64_ins_addr_simm (const aarch64_operand *self,
613		       const aarch64_opnd_info *info,
614		       aarch64_insn *code,
615		       const aarch64_inst *inst ATTRIBUTE_UNUSED)
616{
617  int imm;
618
619  /* Rn */
620  insert_field (FLD_Rn, code, info->addr.base_regno, 0);
621  /* simm (imm9 or imm7) */
622  imm = info->addr.offset.imm;
623  if (self->fields[0] == FLD_imm7)
624    /* scaled immediate in ld/st pair instructions..  */
625    imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
626  insert_field (self->fields[0], code, imm, 0);
627  /* pre/post- index */
628  if (info->addr.writeback)
629    {
630      assert (inst->opcode->iclass != ldst_unscaled
631	      && inst->opcode->iclass != ldstnapair_offs
632	      && inst->opcode->iclass != ldstpair_off
633	      && inst->opcode->iclass != ldst_unpriv);
634      assert (info->addr.preind != info->addr.postind);
635      if (info->addr.preind)
636	insert_field (self->fields[1], code, 1, 0);
637    }
638
639  return NULL;
640}
641
642/* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}].  */
643const char *
644aarch64_ins_addr_simm10 (const aarch64_operand *self,
645			 const aarch64_opnd_info *info,
646			 aarch64_insn *code,
647			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
648{
649  int imm;
650
651  /* Rn */
652  insert_field (self->fields[0], code, info->addr.base_regno, 0);
653  /* simm10 */
654  imm = info->addr.offset.imm >> 3;
655  insert_field (self->fields[1], code, imm >> 9, 0);
656  insert_field (self->fields[2], code, imm, 0);
657  /* writeback */
658  if (info->addr.writeback)
659    {
660      assert (info->addr.preind == 1 && info->addr.postind == 0);
661      insert_field (self->fields[3], code, 1, 0);
662    }
663  return NULL;
664}
665
666/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}].  */
667const char *
668aarch64_ins_addr_uimm12 (const aarch64_operand *self,
669			 const aarch64_opnd_info *info,
670			 aarch64_insn *code,
671			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
672{
673  int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
674
675  /* Rn */
676  insert_field (self->fields[0], code, info->addr.base_regno, 0);
677  /* uimm12 */
678  insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
679  return NULL;
680}
681
682/* Encode the address operand for e.g.
683     LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>.  */
684const char *
685aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
686			    const aarch64_opnd_info *info, aarch64_insn *code,
687			    const aarch64_inst *inst ATTRIBUTE_UNUSED)
688{
689  /* Rn */
690  insert_field (FLD_Rn, code, info->addr.base_regno, 0);
691  /* Rm | #<amount>  */
692  if (info->addr.offset.is_reg)
693    insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
694  else
695    insert_field (FLD_Rm, code, 0x1f, 0);
696  return NULL;
697}
698
699/* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>.  */
700const char *
701aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
702		  const aarch64_opnd_info *info, aarch64_insn *code,
703		  const aarch64_inst *inst ATTRIBUTE_UNUSED)
704{
705  /* cond */
706  insert_field (FLD_cond, code, info->cond->value, 0);
707  return NULL;
708}
709
710/* Encode the system register operand for e.g. MRS <Xt>, <systemreg>.  */
711const char *
712aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
713		    const aarch64_opnd_info *info, aarch64_insn *code,
714		    const aarch64_inst *inst ATTRIBUTE_UNUSED)
715{
716  /* op0:op1:CRn:CRm:op2 */
717  insert_fields (code, info->sysreg, inst->opcode->mask, 5,
718		 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
719  return NULL;
720}
721
722/* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>.  */
723const char *
724aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
725			 const aarch64_opnd_info *info, aarch64_insn *code,
726			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
727{
728  /* op1:op2 */
729  insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
730		 FLD_op2, FLD_op1);
731  return NULL;
732}
733
734/* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>.  */
735const char *
736aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
737		       const aarch64_opnd_info *info, aarch64_insn *code,
738		       const aarch64_inst *inst ATTRIBUTE_UNUSED)
739{
740  /* op1:CRn:CRm:op2 */
741  insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
742		 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
743  return NULL;
744}
745
746/* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>.  */
747
748const char *
749aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
750		     const aarch64_opnd_info *info, aarch64_insn *code,
751		     const aarch64_inst *inst ATTRIBUTE_UNUSED)
752{
753  /* CRm */
754  insert_field (FLD_CRm, code, info->barrier->value, 0);
755  return NULL;
756}
757
758/* Encode the prefetch operation option operand for e.g.
759     PRFM <prfop>, [<Xn|SP>{, #<pimm>}].  */
760
761const char *
762aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
763		   const aarch64_opnd_info *info, aarch64_insn *code,
764		   const aarch64_inst *inst ATTRIBUTE_UNUSED)
765{
766  /* prfop in Rt */
767  insert_field (FLD_Rt, code, info->prfop->value, 0);
768  return NULL;
769}
770
771/* Encode the hint number for instructions that alias HINT but take an
772   operand.  */
773
774const char *
775aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
776		  const aarch64_opnd_info *info, aarch64_insn *code,
777		  const aarch64_inst *inst ATTRIBUTE_UNUSED)
778{
779  /* CRm:op2.  */
780  insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
781  return NULL;
782}
783
784/* Encode the extended register operand for e.g.
785     STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
786const char *
787aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
788			  const aarch64_opnd_info *info, aarch64_insn *code,
789			  const aarch64_inst *inst ATTRIBUTE_UNUSED)
790{
791  enum aarch64_modifier_kind kind;
792
793  /* Rm */
794  insert_field (FLD_Rm, code, info->reg.regno, 0);
795  /* option */
796  kind = info->shifter.kind;
797  if (kind == AARCH64_MOD_LSL)
798    kind = info->qualifier == AARCH64_OPND_QLF_W
799      ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
800  insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
801  /* imm3 */
802  insert_field (FLD_imm3, code, info->shifter.amount, 0);
803
804  return NULL;
805}
806
807/* Encode the shifted register operand for e.g.
808     SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}.  */
809const char *
810aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
811			 const aarch64_opnd_info *info, aarch64_insn *code,
812			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
813{
814  /* Rm */
815  insert_field (FLD_Rm, code, info->reg.regno, 0);
816  /* shift */
817  insert_field (FLD_shift, code,
818		aarch64_get_operand_modifier_value (info->shifter.kind), 0);
819  /* imm6 */
820  insert_field (FLD_imm6, code, info->shifter.amount, 0);
821
822  return NULL;
823}
824
825/* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
826   where <simm4> is a 4-bit signed value and where <factor> is 1 plus
827   SELF's operand-dependent value.  fields[0] specifies the field that
828   holds <base>.  <simm4> is encoded in the SVE_imm4 field.  */
829const char *
830aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
831			       const aarch64_opnd_info *info,
832			       aarch64_insn *code,
833			       const aarch64_inst *inst ATTRIBUTE_UNUSED)
834{
835  int factor = 1 + get_operand_specific_data (self);
836  insert_field (self->fields[0], code, info->addr.base_regno, 0);
837  insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
838  return NULL;
839}
840
841/* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
842   where <simm6> is a 6-bit signed value and where <factor> is 1 plus
843   SELF's operand-dependent value.  fields[0] specifies the field that
844   holds <base>.  <simm6> is encoded in the SVE_imm6 field.  */
845const char *
846aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
847			       const aarch64_opnd_info *info,
848			       aarch64_insn *code,
849			       const aarch64_inst *inst ATTRIBUTE_UNUSED)
850{
851  int factor = 1 + get_operand_specific_data (self);
852  insert_field (self->fields[0], code, info->addr.base_regno, 0);
853  insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
854  return NULL;
855}
856
857/* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
858   where <simm9> is a 9-bit signed value and where <factor> is 1 plus
859   SELF's operand-dependent value.  fields[0] specifies the field that
860   holds <base>.  <simm9> is encoded in the concatenation of the SVE_imm6
861   and imm3 fields, with imm3 being the less-significant part.  */
862const char *
863aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
864			       const aarch64_opnd_info *info,
865			       aarch64_insn *code,
866			       const aarch64_inst *inst ATTRIBUTE_UNUSED)
867{
868  int factor = 1 + get_operand_specific_data (self);
869  insert_field (self->fields[0], code, info->addr.base_regno, 0);
870  insert_fields (code, info->addr.offset.imm / factor, 0,
871		 2, FLD_imm3, FLD_SVE_imm6);
872  return NULL;
873}
874
875/* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
876   is a 4-bit signed number and where <shift> is SELF's operand-dependent
877   value.  fields[0] specifies the base register field.  */
878const char *
879aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
880			    const aarch64_opnd_info *info, aarch64_insn *code,
881			    const aarch64_inst *inst ATTRIBUTE_UNUSED)
882{
883  int factor = 1 << get_operand_specific_data (self);
884  insert_field (self->fields[0], code, info->addr.base_regno, 0);
885  insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
886  return NULL;
887}
888
889/* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
890   is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
891   value.  fields[0] specifies the base register field.  */
892const char *
893aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
894			    const aarch64_opnd_info *info, aarch64_insn *code,
895			    const aarch64_inst *inst ATTRIBUTE_UNUSED)
896{
897  int factor = 1 << get_operand_specific_data (self);
898  insert_field (self->fields[0], code, info->addr.base_regno, 0);
899  insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
900  return NULL;
901}
902
903/* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
904   is SELF's operand-dependent value.  fields[0] specifies the base
905   register field and fields[1] specifies the offset register field.  */
906const char *
907aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
908			     const aarch64_opnd_info *info, aarch64_insn *code,
909			     const aarch64_inst *inst ATTRIBUTE_UNUSED)
910{
911  insert_field (self->fields[0], code, info->addr.base_regno, 0);
912  insert_field (self->fields[1], code, info->addr.offset.regno, 0);
913  return NULL;
914}
915
916/* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
917   <shift> is SELF's operand-dependent value.  fields[0] specifies the
918   base register field, fields[1] specifies the offset register field and
919   fields[2] is a single-bit field that selects SXTW over UXTW.  */
920const char *
921aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
922			     const aarch64_opnd_info *info, aarch64_insn *code,
923			     const aarch64_inst *inst ATTRIBUTE_UNUSED)
924{
925  insert_field (self->fields[0], code, info->addr.base_regno, 0);
926  insert_field (self->fields[1], code, info->addr.offset.regno, 0);
927  if (info->shifter.kind == AARCH64_MOD_UXTW)
928    insert_field (self->fields[2], code, 0, 0);
929  else
930    insert_field (self->fields[2], code, 1, 0);
931  return NULL;
932}
933
934/* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
935   5-bit unsigned number and where <shift> is SELF's operand-dependent value.
936   fields[0] specifies the base register field.  */
937const char *
938aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
939			    const aarch64_opnd_info *info, aarch64_insn *code,
940			    const aarch64_inst *inst ATTRIBUTE_UNUSED)
941{
942  int factor = 1 << get_operand_specific_data (self);
943  insert_field (self->fields[0], code, info->addr.base_regno, 0);
944  insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
945  return NULL;
946}
947
948/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
949   where <modifier> is fixed by the instruction and where <msz> is a
950   2-bit unsigned number.  fields[0] specifies the base register field
951   and fields[1] specifies the offset register field.  */
952static const char *
953aarch64_ext_sve_addr_zz (const aarch64_operand *self,
954			 const aarch64_opnd_info *info, aarch64_insn *code)
955{
956  insert_field (self->fields[0], code, info->addr.base_regno, 0);
957  insert_field (self->fields[1], code, info->addr.offset.regno, 0);
958  insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
959  return NULL;
960}
961
962/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
963   <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
964   field and fields[1] specifies the offset register field.  */
965const char *
966aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
967			     const aarch64_opnd_info *info, aarch64_insn *code,
968			     const aarch64_inst *inst ATTRIBUTE_UNUSED)
969{
970  return aarch64_ext_sve_addr_zz (self, info, code);
971}
972
973/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
974   <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
975   field and fields[1] specifies the offset register field.  */
976const char *
977aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
978			      const aarch64_opnd_info *info,
979			      aarch64_insn *code,
980			      const aarch64_inst *inst ATTRIBUTE_UNUSED)
981{
982  return aarch64_ext_sve_addr_zz (self, info, code);
983}
984
985/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
986   <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
987   field and fields[1] specifies the offset register field.  */
988const char *
989aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
990			      const aarch64_opnd_info *info,
991			      aarch64_insn *code,
992			      const aarch64_inst *inst ATTRIBUTE_UNUSED)
993{
994  return aarch64_ext_sve_addr_zz (self, info, code);
995}
996
997/* Encode an SVE ADD/SUB immediate.  */
998const char *
999aarch64_ins_sve_aimm (const aarch64_operand *self,
1000		      const aarch64_opnd_info *info, aarch64_insn *code,
1001		      const aarch64_inst *inst ATTRIBUTE_UNUSED)
1002{
1003  if (info->shifter.amount == 8)
1004    insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1005  else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1006    insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1007  else
1008    insert_all_fields (self, code, info->imm.value & 0xff);
1009  return NULL;
1010}
1011
1012/* Encode an SVE CPY/DUP immediate.  */
1013const char *
1014aarch64_ins_sve_asimm (const aarch64_operand *self,
1015		       const aarch64_opnd_info *info, aarch64_insn *code,
1016		       const aarch64_inst *inst)
1017{
1018  return aarch64_ins_sve_aimm (self, info, code, inst);
1019}
1020
1021/* Encode Zn[MM], where MM has a 7-bit triangular encoding.  The fields
1022   array specifies which field to use for Zn.  MM is encoded in the
1023   concatenation of imm5 and SVE_tszh, with imm5 being the less
1024   significant part.  */
1025const char *
1026aarch64_ins_sve_index (const aarch64_operand *self,
1027		       const aarch64_opnd_info *info, aarch64_insn *code,
1028		       const aarch64_inst *inst ATTRIBUTE_UNUSED)
1029{
1030  unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1031  insert_field (self->fields[0], code, info->reglane.regno, 0);
1032  insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1033		 2, FLD_imm5, FLD_SVE_tszh);
1034  return NULL;
1035}
1036
1037/* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM.  */
1038const char *
1039aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1040			  const aarch64_opnd_info *info, aarch64_insn *code,
1041			  const aarch64_inst *inst)
1042{
1043  return aarch64_ins_limm (self, info, code, inst);
1044}
1045
1046/* Encode Zn[MM], where Zn occupies the least-significant part of the field
1047   and where MM occupies the most-significant part.  The operand-dependent
1048   value specifies the number of bits in Zn.  */
1049const char *
1050aarch64_ins_sve_quad_index (const aarch64_operand *self,
1051			    const aarch64_opnd_info *info, aarch64_insn *code,
1052			    const aarch64_inst *inst ATTRIBUTE_UNUSED)
1053{
1054  unsigned int reg_bits = get_operand_specific_data (self);
1055  assert (info->reglane.regno < (1U << reg_bits));
1056  unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1057  insert_all_fields (self, code, val);
1058  return NULL;
1059}
1060
1061/* Encode {Zn.<T> - Zm.<T>}.  The fields array specifies which field
1062   to use for Zn.  */
1063const char *
1064aarch64_ins_sve_reglist (const aarch64_operand *self,
1065			 const aarch64_opnd_info *info, aarch64_insn *code,
1066			 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1067{
1068  insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1069  return NULL;
1070}
1071
1072/* Encode <pattern>{, MUL #<amount>}.  The fields array specifies which
1073   fields to use for <pattern>.  <amount> - 1 is encoded in the SVE_imm4
1074   field.  */
1075const char *
1076aarch64_ins_sve_scale (const aarch64_operand *self,
1077		       const aarch64_opnd_info *info, aarch64_insn *code,
1078		       const aarch64_inst *inst ATTRIBUTE_UNUSED)
1079{
1080  insert_all_fields (self, code, info->imm.value);
1081  insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1082  return NULL;
1083}
1084
1085/* Encode an SVE shift left immediate.  */
1086const char *
1087aarch64_ins_sve_shlimm (const aarch64_operand *self,
1088			const aarch64_opnd_info *info, aarch64_insn *code,
1089			const aarch64_inst *inst)
1090{
1091  const aarch64_opnd_info *prev_operand;
1092  unsigned int esize;
1093
1094  assert (info->idx > 0);
1095  prev_operand = &inst->operands[info->idx - 1];
1096  esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1097  insert_all_fields (self, code, 8 * esize + info->imm.value);
1098  return NULL;
1099}
1100
1101/* Encode an SVE shift right immediate.  */
1102const char *
1103aarch64_ins_sve_shrimm (const aarch64_operand *self,
1104			const aarch64_opnd_info *info, aarch64_insn *code,
1105			const aarch64_inst *inst)
1106{
1107  const aarch64_opnd_info *prev_operand;
1108  unsigned int esize;
1109
1110  assert (info->idx > 0);
1111  prev_operand = &inst->operands[info->idx - 1];
1112  esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1113  insert_all_fields (self, code, 16 * esize - info->imm.value);
1114  return NULL;
1115}
1116
1117/* Encode a single-bit immediate that selects between #0.5 and #1.0.
1118   The fields array specifies which field to use.  */
1119const char *
1120aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1121				const aarch64_opnd_info *info,
1122				aarch64_insn *code,
1123				const aarch64_inst *inst ATTRIBUTE_UNUSED)
1124{
1125  if (info->imm.value == 0x3f000000)
1126    insert_field (self->fields[0], code, 0, 0);
1127  else
1128    insert_field (self->fields[0], code, 1, 0);
1129  return NULL;
1130}
1131
1132/* Encode a single-bit immediate that selects between #0.5 and #2.0.
1133   The fields array specifies which field to use.  */
1134const char *
1135aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1136				const aarch64_opnd_info *info,
1137				aarch64_insn *code,
1138				const aarch64_inst *inst ATTRIBUTE_UNUSED)
1139{
1140  if (info->imm.value == 0x3f000000)
1141    insert_field (self->fields[0], code, 0, 0);
1142  else
1143    insert_field (self->fields[0], code, 1, 0);
1144  return NULL;
1145}
1146
1147/* Encode a single-bit immediate that selects between #0.0 and #1.0.
1148   The fields array specifies which field to use.  */
1149const char *
1150aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1151				const aarch64_opnd_info *info,
1152				aarch64_insn *code,
1153				const aarch64_inst *inst ATTRIBUTE_UNUSED)
1154{
1155  if (info->imm.value == 0)
1156    insert_field (self->fields[0], code, 0, 0);
1157  else
1158    insert_field (self->fields[0], code, 1, 0);
1159  return NULL;
1160}
1161
1162/* Miscellaneous encoding functions.  */
1163
1164/* Encode size[0], i.e. bit 22, for
1165     e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
1166
1167static void
1168encode_asimd_fcvt (aarch64_inst *inst)
1169{
1170  aarch64_insn value;
1171  aarch64_field field = {0, 0};
1172  enum aarch64_opnd_qualifier qualifier;
1173
1174  switch (inst->opcode->op)
1175    {
1176    case OP_FCVTN:
1177    case OP_FCVTN2:
1178      /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
1179      qualifier = inst->operands[1].qualifier;
1180      break;
1181    case OP_FCVTL:
1182    case OP_FCVTL2:
1183      /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>.  */
1184      qualifier = inst->operands[0].qualifier;
1185      break;
1186    default:
1187      assert (0);
1188    }
1189  assert (qualifier == AARCH64_OPND_QLF_V_4S
1190	  || qualifier == AARCH64_OPND_QLF_V_2D);
1191  value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1192  gen_sub_field (FLD_size, 0, 1, &field);
1193  insert_field_2 (&field, &inst->value, value, 0);
1194}
1195
1196/* Encode size[0], i.e. bit 22, for
1197     e.g. FCVTXN <Vb><d>, <Va><n>.  */
1198
1199static void
1200encode_asisd_fcvtxn (aarch64_inst *inst)
1201{
1202  aarch64_insn val = 1;
1203  aarch64_field field = {0, 0};
1204  assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1205  gen_sub_field (FLD_size, 0, 1, &field);
1206  insert_field_2 (&field, &inst->value, val, 0);
1207}
1208
1209/* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>.  */
1210static void
1211encode_fcvt (aarch64_inst *inst)
1212{
1213  aarch64_insn val;
1214  const aarch64_field field = {15, 2};
1215
1216  /* opc dstsize */
1217  switch (inst->operands[0].qualifier)
1218    {
1219    case AARCH64_OPND_QLF_S_S: val = 0; break;
1220    case AARCH64_OPND_QLF_S_D: val = 1; break;
1221    case AARCH64_OPND_QLF_S_H: val = 3; break;
1222    default: abort ();
1223    }
1224  insert_field_2 (&field, &inst->value, val, 0);
1225
1226  return;
1227}
1228
1229/* Return the index in qualifiers_list that INST is using.  Should only
1230   be called once the qualifiers are known to be valid.  */
1231
1232static int
1233aarch64_get_variant (struct aarch64_inst *inst)
1234{
1235  int i, nops, variant;
1236
1237  nops = aarch64_num_of_operands (inst->opcode);
1238  for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1239    {
1240      for (i = 0; i < nops; ++i)
1241	if (inst->opcode->qualifiers_list[variant][i]
1242	    != inst->operands[i].qualifier)
1243	  break;
1244      if (i == nops)
1245	return variant;
1246    }
1247  abort ();
1248}
1249
1250/* Do miscellaneous encodings that are not common enough to be driven by
1251   flags.  */
1252
1253static void
1254do_misc_encoding (aarch64_inst *inst)
1255{
1256  unsigned int value;
1257
1258  switch (inst->opcode->op)
1259    {
1260    case OP_FCVT:
1261      encode_fcvt (inst);
1262      break;
1263    case OP_FCVTN:
1264    case OP_FCVTN2:
1265    case OP_FCVTL:
1266    case OP_FCVTL2:
1267      encode_asimd_fcvt (inst);
1268      break;
1269    case OP_FCVTXN_S:
1270      encode_asisd_fcvtxn (inst);
1271      break;
1272    case OP_MOV_P_P:
1273    case OP_MOVS_P_P:
1274      /* Copy Pn to Pm and Pg.  */
1275      value = extract_field (FLD_SVE_Pn, inst->value, 0);
1276      insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1277      insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1278      break;
1279    case OP_MOV_Z_P_Z:
1280      /* Copy Zd to Zm.  */
1281      value = extract_field (FLD_SVE_Zd, inst->value, 0);
1282      insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1283      break;
1284    case OP_MOV_Z_V:
1285      /* Fill in the zero immediate.  */
1286      insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1287		     2, FLD_imm5, FLD_SVE_tszh);
1288      break;
1289    case OP_MOV_Z_Z:
1290      /* Copy Zn to Zm.  */
1291      value = extract_field (FLD_SVE_Zn, inst->value, 0);
1292      insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1293      break;
1294    case OP_MOV_Z_Zi:
1295      break;
1296    case OP_MOVM_P_P_P:
1297      /* Copy Pd to Pm.  */
1298      value = extract_field (FLD_SVE_Pd, inst->value, 0);
1299      insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1300      break;
1301    case OP_MOVZS_P_P_P:
1302    case OP_MOVZ_P_P_P:
1303      /* Copy Pn to Pm.  */
1304      value = extract_field (FLD_SVE_Pn, inst->value, 0);
1305      insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1306      break;
1307    case OP_NOTS_P_P_P_Z:
1308    case OP_NOT_P_P_P_Z:
1309      /* Copy Pg to Pm.  */
1310      value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1311      insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1312      break;
1313    default: break;
1314    }
1315}
1316
1317/* Encode the 'size' and 'Q' field for e.g. SHADD.  */
1318static void
1319encode_sizeq (aarch64_inst *inst)
1320{
1321  aarch64_insn sizeq;
1322  enum aarch64_field_kind kind;
1323  int idx;
1324
1325  /* Get the index of the operand whose information we are going to use
1326     to encode the size and Q fields.
1327     This is deduced from the possible valid qualifier lists.  */
1328  idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1329  DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1330	       aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1331  sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1332  /* Q */
1333  insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1334  /* size */
1335  if (inst->opcode->iclass == asisdlse
1336     || inst->opcode->iclass == asisdlsep
1337     || inst->opcode->iclass == asisdlso
1338     || inst->opcode->iclass == asisdlsop)
1339    kind = FLD_vldst_size;
1340  else
1341    kind = FLD_size;
1342  insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1343}
1344
1345/* Opcodes that have fields shared by multiple operands are usually flagged
1346   with flags.  In this function, we detect such flags and use the
1347   information in one of the related operands to do the encoding.  The 'one'
1348   operand is not any operand but one of the operands that has the enough
1349   information for such an encoding.  */
1350
1351static void
1352do_special_encoding (struct aarch64_inst *inst)
1353{
1354  int idx;
1355  aarch64_insn value = 0;
1356
1357  DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1358
1359  /* Condition for truly conditional executed instructions, e.g. b.cond.  */
1360  if (inst->opcode->flags & F_COND)
1361    {
1362      insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1363    }
1364  if (inst->opcode->flags & F_SF)
1365    {
1366      idx = select_operand_for_sf_field_coding (inst->opcode);
1367      value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1368	       || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1369	? 1 : 0;
1370      insert_field (FLD_sf, &inst->value, value, 0);
1371      if (inst->opcode->flags & F_N)
1372	insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1373    }
1374  if (inst->opcode->flags & F_LSE_SZ)
1375    {
1376      idx = select_operand_for_sf_field_coding (inst->opcode);
1377      value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1378	       || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1379	? 1 : 0;
1380      insert_field (FLD_lse_sz, &inst->value, value, 0);
1381    }
1382  if (inst->opcode->flags & F_SIZEQ)
1383    encode_sizeq (inst);
1384  if (inst->opcode->flags & F_FPTYPE)
1385    {
1386      idx = select_operand_for_fptype_field_coding (inst->opcode);
1387      switch (inst->operands[idx].qualifier)
1388	{
1389	case AARCH64_OPND_QLF_S_S: value = 0; break;
1390	case AARCH64_OPND_QLF_S_D: value = 1; break;
1391	case AARCH64_OPND_QLF_S_H: value = 3; break;
1392	default: assert (0);
1393	}
1394      insert_field (FLD_type, &inst->value, value, 0);
1395    }
1396  if (inst->opcode->flags & F_SSIZE)
1397    {
1398      enum aarch64_opnd_qualifier qualifier;
1399      idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1400      qualifier = inst->operands[idx].qualifier;
1401      assert (qualifier >= AARCH64_OPND_QLF_S_B
1402	      && qualifier <= AARCH64_OPND_QLF_S_Q);
1403      value = aarch64_get_qualifier_standard_value (qualifier);
1404      insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1405    }
1406  if (inst->opcode->flags & F_T)
1407    {
1408      int num;	/* num of consecutive '0's on the right side of imm5<3:0>.  */
1409      aarch64_field field = {0, 0};
1410      enum aarch64_opnd_qualifier qualifier;
1411
1412      idx = 0;
1413      qualifier = inst->operands[idx].qualifier;
1414      assert (aarch64_get_operand_class (inst->opcode->operands[0])
1415	      == AARCH64_OPND_CLASS_SIMD_REG
1416	      && qualifier >= AARCH64_OPND_QLF_V_8B
1417	      && qualifier <= AARCH64_OPND_QLF_V_2D);
1418      /* imm5<3:0>	q	<t>
1419	 0000		x	reserved
1420	 xxx1		0	8b
1421	 xxx1		1	16b
1422	 xx10		0	4h
1423	 xx10		1	8h
1424	 x100		0	2s
1425	 x100		1	4s
1426	 1000		0	reserved
1427	 1000		1	2d  */
1428      value = aarch64_get_qualifier_standard_value (qualifier);
1429      insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1430      num = (int) value >> 1;
1431      assert (num >= 0 && num <= 3);
1432      gen_sub_field (FLD_imm5, 0, num + 1, &field);
1433      insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1434    }
1435  if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1436    {
1437      /* Use Rt to encode in the case of e.g.
1438	 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}].  */
1439      enum aarch64_opnd_qualifier qualifier;
1440      idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1441      if (idx == -1)
1442	/* Otherwise use the result operand, which has to be a integer
1443	   register.  */
1444	idx = 0;
1445      assert (idx == 0 || idx == 1);
1446      assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1447	      == AARCH64_OPND_CLASS_INT_REG);
1448      qualifier = inst->operands[idx].qualifier;
1449      insert_field (FLD_Q, &inst->value,
1450		    aarch64_get_qualifier_standard_value (qualifier), 0);
1451    }
1452  if (inst->opcode->flags & F_LDS_SIZE)
1453    {
1454      /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
1455      enum aarch64_opnd_qualifier qualifier;
1456      aarch64_field field = {0, 0};
1457      assert (aarch64_get_operand_class (inst->opcode->operands[0])
1458	      == AARCH64_OPND_CLASS_INT_REG);
1459      gen_sub_field (FLD_opc, 0, 1, &field);
1460      qualifier = inst->operands[0].qualifier;
1461      insert_field_2 (&field, &inst->value,
1462		      1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1463    }
1464  /* Miscellaneous encoding as the last step.  */
1465  if (inst->opcode->flags & F_MISC)
1466    do_misc_encoding (inst);
1467
1468  DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1469}
1470
1471/* Some instructions (including all SVE ones) use the instruction class
1472   to describe how a qualifiers_list index is represented in the instruction
1473   encoding.  If INST is such an instruction, encode the chosen qualifier
1474   variant.  */
1475
1476static void
1477aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1478{
1479  switch (inst->opcode->iclass)
1480    {
1481    case sve_cpy:
1482      insert_fields (&inst->value, aarch64_get_variant (inst),
1483		     0, 2, FLD_SVE_M_14, FLD_size);
1484      break;
1485
1486    case sve_index:
1487    case sve_shift_pred:
1488    case sve_shift_unpred:
1489      /* For indices and shift amounts, the variant is encoded as
1490	 part of the immediate.  */
1491      break;
1492
1493    case sve_limm:
1494      /* For sve_limm, the .B, .H, and .S forms are just a convenience
1495	 and depend on the immediate.  They don't have a separate
1496	 encoding.  */
1497      break;
1498
1499    case sve_misc:
1500      /* sve_misc instructions have only a single variant.  */
1501      break;
1502
1503    case sve_movprfx:
1504      insert_fields (&inst->value, aarch64_get_variant (inst),
1505		     0, 2, FLD_SVE_M_16, FLD_size);
1506      break;
1507
1508    case sve_pred_zm:
1509      insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1510      break;
1511
1512    case sve_size_bhs:
1513    case sve_size_bhsd:
1514      insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1515      break;
1516
1517    case sve_size_hsd:
1518      insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1519      break;
1520
1521    case sve_size_sd:
1522      insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1523      break;
1524
1525    default:
1526      break;
1527    }
1528}
1529
1530/* Converters converting an alias opcode instruction to its real form.  */
1531
1532/* ROR <Wd>, <Ws>, #<shift>
1533     is equivalent to:
1534   EXTR <Wd>, <Ws>, <Ws>, #<shift>.  */
1535static void
1536convert_ror_to_extr (aarch64_inst *inst)
1537{
1538  copy_operand_info (inst, 3, 2);
1539  copy_operand_info (inst, 2, 1);
1540}
1541
1542/* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1543     is equivalent to:
1544   USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0.  */
1545static void
1546convert_xtl_to_shll (aarch64_inst *inst)
1547{
1548  inst->operands[2].qualifier = inst->operands[1].qualifier;
1549  inst->operands[2].imm.value = 0;
1550}
1551
1552/* Convert
1553     LSR <Xd>, <Xn>, #<shift>
1554   to
1555     UBFM <Xd>, <Xn>, #<shift>, #63.  */
1556static void
1557convert_sr_to_bfm (aarch64_inst *inst)
1558{
1559  inst->operands[3].imm.value =
1560    inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1561}
1562
1563/* Convert MOV to ORR.  */
1564static void
1565convert_mov_to_orr (aarch64_inst *inst)
1566{
1567  /* MOV <Vd>.<T>, <Vn>.<T>
1568     is equivalent to:
1569     ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>.  */
1570  copy_operand_info (inst, 2, 1);
1571}
1572
1573/* When <imms> >= <immr>, the instruction written:
1574     SBFX <Xd>, <Xn>, #<lsb>, #<width>
1575   is equivalent to:
1576     SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1).  */
1577
1578static void
1579convert_bfx_to_bfm (aarch64_inst *inst)
1580{
1581  int64_t lsb, width;
1582
1583  /* Convert the operand.  */
1584  lsb = inst->operands[2].imm.value;
1585  width = inst->operands[3].imm.value;
1586  inst->operands[2].imm.value = lsb;
1587  inst->operands[3].imm.value = lsb + width - 1;
1588}
1589
1590/* When <imms> < <immr>, the instruction written:
1591     SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1592   is equivalent to:
1593     SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1).  */
1594
1595static void
1596convert_bfi_to_bfm (aarch64_inst *inst)
1597{
1598  int64_t lsb, width;
1599
1600  /* Convert the operand.  */
1601  lsb = inst->operands[2].imm.value;
1602  width = inst->operands[3].imm.value;
1603  if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1604    {
1605      inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1606      inst->operands[3].imm.value = width - 1;
1607    }
1608  else
1609    {
1610      inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1611      inst->operands[3].imm.value = width - 1;
1612    }
1613}
1614
1615/* The instruction written:
1616     BFC <Xd>, #<lsb>, #<width>
1617   is equivalent to:
1618     BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1).  */
1619
1620static void
1621convert_bfc_to_bfm (aarch64_inst *inst)
1622{
1623  int64_t lsb, width;
1624
1625  /* Insert XZR.  */
1626  copy_operand_info (inst, 3, 2);
1627  copy_operand_info (inst, 2, 1);
1628  copy_operand_info (inst, 0, 0);
1629  inst->operands[1].reg.regno = 0x1f;
1630
1631  /* Convert the immedate operand.  */
1632  lsb = inst->operands[2].imm.value;
1633  width = inst->operands[3].imm.value;
1634  if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1635    {
1636      inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1637      inst->operands[3].imm.value = width - 1;
1638    }
1639  else
1640    {
1641      inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1642      inst->operands[3].imm.value = width - 1;
1643    }
1644}
1645
1646/* The instruction written:
1647     LSL <Xd>, <Xn>, #<shift>
1648   is equivalent to:
1649     UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>).  */
1650
1651static void
1652convert_lsl_to_ubfm (aarch64_inst *inst)
1653{
1654  int64_t shift = inst->operands[2].imm.value;
1655
1656  if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1657    {
1658      inst->operands[2].imm.value = (32 - shift) & 0x1f;
1659      inst->operands[3].imm.value = 31 - shift;
1660    }
1661  else
1662    {
1663      inst->operands[2].imm.value = (64 - shift) & 0x3f;
1664      inst->operands[3].imm.value = 63 - shift;
1665    }
1666}
1667
1668/* CINC <Wd>, <Wn>, <cond>
1669     is equivalent to:
1670   CSINC <Wd>, <Wn>, <Wn>, invert(<cond>).  */
1671
1672static void
1673convert_to_csel (aarch64_inst *inst)
1674{
1675  copy_operand_info (inst, 3, 2);
1676  copy_operand_info (inst, 2, 1);
1677  inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1678}
1679
1680/* CSET <Wd>, <cond>
1681     is equivalent to:
1682   CSINC <Wd>, WZR, WZR, invert(<cond>).  */
1683
1684static void
1685convert_cset_to_csinc (aarch64_inst *inst)
1686{
1687  copy_operand_info (inst, 3, 1);
1688  copy_operand_info (inst, 2, 0);
1689  copy_operand_info (inst, 1, 0);
1690  inst->operands[1].reg.regno = 0x1f;
1691  inst->operands[2].reg.regno = 0x1f;
1692  inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1693}
1694
1695/* MOV <Wd>, #<imm>
1696   is equivalent to:
1697   MOVZ <Wd>, #<imm16>, LSL #<shift>.  */
1698
1699static void
1700convert_mov_to_movewide (aarch64_inst *inst)
1701{
1702  int is32;
1703  uint32_t shift_amount;
1704  uint64_t value;
1705
1706  switch (inst->opcode->op)
1707    {
1708    case OP_MOV_IMM_WIDE:
1709      value = inst->operands[1].imm.value;
1710      break;
1711    case OP_MOV_IMM_WIDEN:
1712      value = ~inst->operands[1].imm.value;
1713      break;
1714    default:
1715      assert (0);
1716    }
1717  inst->operands[1].type = AARCH64_OPND_HALF;
1718  is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1719  if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1720    /* The constraint check should have guaranteed this wouldn't happen.  */
1721    assert (0);
1722  value >>= shift_amount;
1723  value &= 0xffff;
1724  inst->operands[1].imm.value = value;
1725  inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1726  inst->operands[1].shifter.amount = shift_amount;
1727}
1728
1729/* MOV <Wd>, #<imm>
1730     is equivalent to:
1731   ORR <Wd>, WZR, #<imm>.  */
1732
1733static void
1734convert_mov_to_movebitmask (aarch64_inst *inst)
1735{
1736  copy_operand_info (inst, 2, 1);
1737  inst->operands[1].reg.regno = 0x1f;
1738  inst->operands[1].skip = 0;
1739}
1740
1741/* Some alias opcodes are assembled by being converted to their real-form.  */
1742
1743static void
1744convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1745{
1746  const aarch64_opcode *alias = inst->opcode;
1747
1748  if ((alias->flags & F_CONV) == 0)
1749    goto convert_to_real_return;
1750
1751  switch (alias->op)
1752    {
1753    case OP_ASR_IMM:
1754    case OP_LSR_IMM:
1755      convert_sr_to_bfm (inst);
1756      break;
1757    case OP_LSL_IMM:
1758      convert_lsl_to_ubfm (inst);
1759      break;
1760    case OP_CINC:
1761    case OP_CINV:
1762    case OP_CNEG:
1763      convert_to_csel (inst);
1764      break;
1765    case OP_CSET:
1766    case OP_CSETM:
1767      convert_cset_to_csinc (inst);
1768      break;
1769    case OP_UBFX:
1770    case OP_BFXIL:
1771    case OP_SBFX:
1772      convert_bfx_to_bfm (inst);
1773      break;
1774    case OP_SBFIZ:
1775    case OP_BFI:
1776    case OP_UBFIZ:
1777      convert_bfi_to_bfm (inst);
1778      break;
1779    case OP_BFC:
1780      convert_bfc_to_bfm (inst);
1781      break;
1782    case OP_MOV_V:
1783      convert_mov_to_orr (inst);
1784      break;
1785    case OP_MOV_IMM_WIDE:
1786    case OP_MOV_IMM_WIDEN:
1787      convert_mov_to_movewide (inst);
1788      break;
1789    case OP_MOV_IMM_LOG:
1790      convert_mov_to_movebitmask (inst);
1791      break;
1792    case OP_ROR_IMM:
1793      convert_ror_to_extr (inst);
1794      break;
1795    case OP_SXTL:
1796    case OP_SXTL2:
1797    case OP_UXTL:
1798    case OP_UXTL2:
1799      convert_xtl_to_shll (inst);
1800      break;
1801    default:
1802      break;
1803    }
1804
1805convert_to_real_return:
1806  aarch64_replace_opcode (inst, real);
1807}
1808
1809/* Encode *INST_ORI of the opcode code OPCODE.
1810   Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1811   matched operand qualifier sequence in *QLF_SEQ.  */
1812
1813int
1814aarch64_opcode_encode (const aarch64_opcode *opcode,
1815		       const aarch64_inst *inst_ori, aarch64_insn *code,
1816		       aarch64_opnd_qualifier_t *qlf_seq,
1817		       aarch64_operand_error *mismatch_detail)
1818{
1819  int i;
1820  const aarch64_opcode *aliased;
1821  aarch64_inst copy, *inst;
1822
1823  DEBUG_TRACE ("enter with %s", opcode->name);
1824
1825  /* Create a copy of *INST_ORI, so that we can do any change we want.  */
1826  copy = *inst_ori;
1827  inst = &copy;
1828
1829  assert (inst->opcode == NULL || inst->opcode == opcode);
1830  if (inst->opcode == NULL)
1831    inst->opcode = opcode;
1832
1833  /* Constrain the operands.
1834     After passing this, the encoding is guaranteed to succeed.  */
1835  if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1836    {
1837      DEBUG_TRACE ("FAIL since operand constraint not met");
1838      return 0;
1839    }
1840
1841  /* Get the base value.
1842     Note: this has to be before the aliasing handling below in order to
1843     get the base value from the alias opcode before we move on to the
1844     aliased opcode for encoding.  */
1845  inst->value = opcode->opcode;
1846
1847  /* No need to do anything else if the opcode does not have any operand.  */
1848  if (aarch64_num_of_operands (opcode) == 0)
1849    goto encoding_exit;
1850
1851  /* Assign operand indexes and check types.  Also put the matched
1852     operand qualifiers in *QLF_SEQ to return.  */
1853  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1854    {
1855      assert (opcode->operands[i] == inst->operands[i].type);
1856      inst->operands[i].idx = i;
1857      if (qlf_seq != NULL)
1858	*qlf_seq = inst->operands[i].qualifier;
1859    }
1860
1861  aliased = aarch64_find_real_opcode (opcode);
1862  /* If the opcode is an alias and it does not ask for direct encoding by
1863     itself, the instruction will be transformed to the form of real opcode
1864     and the encoding will be carried out using the rules for the aliased
1865     opcode.  */
1866  if (aliased != NULL && (opcode->flags & F_CONV))
1867    {
1868      DEBUG_TRACE ("real opcode '%s' has been found for the alias  %s",
1869		   aliased->name, opcode->name);
1870      /* Convert the operands to the form of the real opcode.  */
1871      convert_to_real (inst, aliased);
1872      opcode = aliased;
1873    }
1874
1875  aarch64_opnd_info *info = inst->operands;
1876
1877  /* Call the inserter of each operand.  */
1878  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1879    {
1880      const aarch64_operand *opnd;
1881      enum aarch64_opnd type = opcode->operands[i];
1882      if (type == AARCH64_OPND_NIL)
1883	break;
1884      if (info->skip)
1885	{
1886	  DEBUG_TRACE ("skip the incomplete operand %d", i);
1887	  continue;
1888	}
1889      opnd = &aarch64_operands[type];
1890      if (operand_has_inserter (opnd))
1891	aarch64_insert_operand (opnd, info, &inst->value, inst);
1892    }
1893
1894  /* Call opcode encoders indicated by flags.  */
1895  if (opcode_has_special_coder (opcode))
1896    do_special_encoding (inst);
1897
1898  /* Possibly use the instruction class to encode the chosen qualifier
1899     variant.  */
1900  aarch64_encode_variant_using_iclass (inst);
1901
1902encoding_exit:
1903  DEBUG_TRACE ("exit with %s", opcode->name);
1904
1905  *code = inst->value;
1906
1907  return 1;
1908}
1909