1/* RTL simplification functions for GNU compiler.
2   Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4   Free Software Foundation, Inc.
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 2, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING.  If not, write to the Free
20Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2102110-1301, USA.  */
22
23
24#include "config.h"
25#include "system.h"
26#include "coretypes.h"
27#include "tm.h"
28#include "rtl.h"
29#include "tree.h"
30#include "tm_p.h"
31#include "regs.h"
32#include "hard-reg-set.h"
33#include "flags.h"
34#include "real.h"
35#include "insn-config.h"
36#include "recog.h"
37#include "function.h"
38#include "expr.h"
39#include "toplev.h"
40#include "output.h"
41#include "ggc.h"
42#include "target.h"
43
44/* Simplification and canonicalization of RTL.  */
45
46/* Much code operates on (low, high) pairs; the low value is an
47   unsigned wide int, the high value a signed wide int.  We
48   occasionally need to sign extend from low to high as if low were a
49   signed wide int.  */
50#define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52
53static rtx neg_const_int (enum machine_mode, rtx);
54static bool plus_minus_operand_p (rtx);
55static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
57static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58				  unsigned int);
59static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60					   rtx, rtx);
61static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62					    enum machine_mode, rtx, rtx);
63static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65					rtx, rtx, rtx, rtx);
66
67/* Negate a CONST_INT rtx, truncating (because a conversion from a
68   maximally negative number can overflow).  */
69static rtx
70neg_const_int (enum machine_mode mode, rtx i)
71{
72  return gen_int_mode (- INTVAL (i), mode);
73}
74
75/* Test whether expression, X, is an immediate constant that represents
76   the most significant bit of machine mode MODE.  */
77
78bool
79mode_signbit_p (enum machine_mode mode, rtx x)
80{
81  unsigned HOST_WIDE_INT val;
82  unsigned int width;
83
84  if (GET_MODE_CLASS (mode) != MODE_INT)
85    return false;
86
87  width = GET_MODE_BITSIZE (mode);
88  if (width == 0)
89    return false;
90
91  if (width <= HOST_BITS_PER_WIDE_INT
92      && GET_CODE (x) == CONST_INT)
93    val = INTVAL (x);
94  else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95	   && GET_CODE (x) == CONST_DOUBLE
96	   && CONST_DOUBLE_LOW (x) == 0)
97    {
98      val = CONST_DOUBLE_HIGH (x);
99      width -= HOST_BITS_PER_WIDE_INT;
100    }
101  else
102    return false;
103
104  if (width < HOST_BITS_PER_WIDE_INT)
105    val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106  return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107}
108
109/* Make a binary operation by properly ordering the operands and
110   seeing if the expression folds.  */
111
112rtx
113simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114		     rtx op1)
115{
116  rtx tem;
117
118  /* If this simplifies, do it.  */
119  tem = simplify_binary_operation (code, mode, op0, op1);
120  if (tem)
121    return tem;
122
123  /* Put complex operands first and constants second if commutative.  */
124  if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
125      && swap_commutative_operands_p (op0, op1))
126    tem = op0, op0 = op1, op1 = tem;
127
128  return gen_rtx_fmt_ee (code, mode, op0, op1);
129}
130
131/* If X is a MEM referencing the constant pool, return the real value.
132   Otherwise return X.  */
133rtx
134avoid_constant_pool_reference (rtx x)
135{
136  rtx c, tmp, addr;
137  enum machine_mode cmode;
138  HOST_WIDE_INT offset = 0;
139
140  switch (GET_CODE (x))
141    {
142    case MEM:
143      break;
144
145    case FLOAT_EXTEND:
146      /* Handle float extensions of constant pool references.  */
147      tmp = XEXP (x, 0);
148      c = avoid_constant_pool_reference (tmp);
149      if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150	{
151	  REAL_VALUE_TYPE d;
152
153	  REAL_VALUE_FROM_CONST_DOUBLE (d, c);
154	  return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155	}
156      return x;
157
158    default:
159      return x;
160    }
161
162  addr = XEXP (x, 0);
163
164  /* Call target hook to avoid the effects of -fpic etc....  */
165  addr = targetm.delegitimize_address (addr);
166
167  /* Split the address into a base and integer offset.  */
168  if (GET_CODE (addr) == CONST
169      && GET_CODE (XEXP (addr, 0)) == PLUS
170      && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
171    {
172      offset = INTVAL (XEXP (XEXP (addr, 0), 1));
173      addr = XEXP (XEXP (addr, 0), 0);
174    }
175
176  if (GET_CODE (addr) == LO_SUM)
177    addr = XEXP (addr, 1);
178
179  /* If this is a constant pool reference, we can turn it into its
180     constant and hope that simplifications happen.  */
181  if (GET_CODE (addr) == SYMBOL_REF
182      && CONSTANT_POOL_ADDRESS_P (addr))
183    {
184      c = get_pool_constant (addr);
185      cmode = get_pool_mode (addr);
186
187      /* If we're accessing the constant in a different mode than it was
188         originally stored, attempt to fix that up via subreg simplifications.
189         If that fails we have no choice but to return the original memory.  */
190      if (offset != 0 || cmode != GET_MODE (x))
191        {
192          rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
193          if (tem && CONSTANT_P (tem))
194            return tem;
195        }
196      else
197        return c;
198    }
199
200  return x;
201}
202
203/* Return true if X is a MEM referencing the constant pool.  */
204
205bool
206constant_pool_reference_p (rtx x)
207{
208  return avoid_constant_pool_reference (x) != x;
209}
210
211/* Make a unary operation by first seeing if it folds and otherwise making
212   the specified operation.  */
213
214rtx
215simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
216		    enum machine_mode op_mode)
217{
218  rtx tem;
219
220  /* If this simplifies, use it.  */
221  if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
222    return tem;
223
224  return gen_rtx_fmt_e (code, mode, op);
225}
226
227/* Likewise for ternary operations.  */
228
229rtx
230simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
231		      enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
232{
233  rtx tem;
234
235  /* If this simplifies, use it.  */
236  if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
237					      op0, op1, op2)))
238    return tem;
239
240  return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
241}
242
243/* Likewise, for relational operations.
244   CMP_MODE specifies mode comparison is done in.  */
245
246rtx
247simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
248			 enum machine_mode cmp_mode, rtx op0, rtx op1)
249{
250  rtx tem;
251
252  if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
253						 op0, op1)))
254    return tem;
255
256  return gen_rtx_fmt_ee (code, mode, op0, op1);
257}
258
259/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
260   resulting RTX.  Return a new RTX which is as simplified as possible.  */
261
262rtx
263simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
264{
265  enum rtx_code code = GET_CODE (x);
266  enum machine_mode mode = GET_MODE (x);
267  enum machine_mode op_mode;
268  rtx op0, op1, op2;
269
270  /* If X is OLD_RTX, return NEW_RTX.  Otherwise, if this is an expression, try
271     to build a new expression substituting recursively.  If we can't do
272     anything, return our input.  */
273
274  if (x == old_rtx)
275    return new_rtx;
276
277  switch (GET_RTX_CLASS (code))
278    {
279    case RTX_UNARY:
280      op0 = XEXP (x, 0);
281      op_mode = GET_MODE (op0);
282      op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
283      if (op0 == XEXP (x, 0))
284	return x;
285      return simplify_gen_unary (code, mode, op0, op_mode);
286
287    case RTX_BIN_ARITH:
288    case RTX_COMM_ARITH:
289      op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
290      op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
291      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
292	return x;
293      return simplify_gen_binary (code, mode, op0, op1);
294
295    case RTX_COMPARE:
296    case RTX_COMM_COMPARE:
297      op0 = XEXP (x, 0);
298      op1 = XEXP (x, 1);
299      op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
300      op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
301      op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
302      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
303	return x;
304      return simplify_gen_relational (code, mode, op_mode, op0, op1);
305
306    case RTX_TERNARY:
307    case RTX_BITFIELD_OPS:
308      op0 = XEXP (x, 0);
309      op_mode = GET_MODE (op0);
310      op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
311      op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
312      op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
313      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
314	return x;
315      if (op_mode == VOIDmode)
316	op_mode = GET_MODE (op0);
317      return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
318
319    case RTX_EXTRA:
320      /* The only case we try to handle is a SUBREG.  */
321      if (code == SUBREG)
322	{
323	  op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
324	  if (op0 == SUBREG_REG (x))
325	    return x;
326	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
327				     GET_MODE (SUBREG_REG (x)),
328				     SUBREG_BYTE (x));
329	  return op0 ? op0 : x;
330	}
331      break;
332
333    case RTX_OBJ:
334      if (code == MEM)
335	{
336	  op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
337	  if (op0 == XEXP (x, 0))
338	    return x;
339	  return replace_equiv_address_nv (x, op0);
340	}
341      else if (code == LO_SUM)
342	{
343	  op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
344	  op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
345
346	  /* (lo_sum (high x) x) -> x  */
347	  if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
348	    return op1;
349
350	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
351	    return x;
352	  return gen_rtx_LO_SUM (mode, op0, op1);
353	}
354      else if (code == REG)
355	{
356	  if (rtx_equal_p (x, old_rtx))
357	    return new_rtx;
358	}
359      break;
360
361    default:
362      break;
363    }
364  return x;
365}
366
367/* Try to simplify a unary operation CODE whose output mode is to be
368   MODE with input operand OP whose mode was originally OP_MODE.
369   Return zero if no simplification can be made.  */
370rtx
371simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
372			  rtx op, enum machine_mode op_mode)
373{
374  rtx trueop, tem;
375
376  if (GET_CODE (op) == CONST)
377    op = XEXP (op, 0);
378
379  trueop = avoid_constant_pool_reference (op);
380
381  tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
382  if (tem)
383    return tem;
384
385  return simplify_unary_operation_1 (code, mode, op);
386}
387
388/* Perform some simplifications we can do even if the operands
389   aren't constant.  */
390static rtx
391simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
392{
393  enum rtx_code reversed;
394  rtx temp;
395
396  switch (code)
397    {
398    case NOT:
399      /* (not (not X)) == X.  */
400      if (GET_CODE (op) == NOT)
401	return XEXP (op, 0);
402
403      /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
404	 comparison is all ones.   */
405      if (COMPARISON_P (op)
406	  && (mode == BImode || STORE_FLAG_VALUE == -1)
407	  && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
408	return simplify_gen_relational (reversed, mode, VOIDmode,
409					XEXP (op, 0), XEXP (op, 1));
410
411      /* (not (plus X -1)) can become (neg X).  */
412      if (GET_CODE (op) == PLUS
413	  && XEXP (op, 1) == constm1_rtx)
414	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
415
416      /* Similarly, (not (neg X)) is (plus X -1).  */
417      if (GET_CODE (op) == NEG)
418	return plus_constant (XEXP (op, 0), -1);
419
420      /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
421      if (GET_CODE (op) == XOR
422	  && GET_CODE (XEXP (op, 1)) == CONST_INT
423	  && (temp = simplify_unary_operation (NOT, mode,
424					       XEXP (op, 1), mode)) != 0)
425	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
426
427      /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
428      if (GET_CODE (op) == PLUS
429	  && GET_CODE (XEXP (op, 1)) == CONST_INT
430	  && mode_signbit_p (mode, XEXP (op, 1))
431	  && (temp = simplify_unary_operation (NOT, mode,
432					       XEXP (op, 1), mode)) != 0)
433	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
434
435
436      /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
437	 operands other than 1, but that is not valid.  We could do a
438	 similar simplification for (not (lshiftrt C X)) where C is
439	 just the sign bit, but this doesn't seem common enough to
440	 bother with.  */
441      if (GET_CODE (op) == ASHIFT
442	  && XEXP (op, 0) == const1_rtx)
443	{
444	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
445	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
446	}
447
448      /* (not (ashiftrt foo C)) where C is the number of bits in FOO
449	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
450	 so we can perform the above simplification.  */
451
452      if (STORE_FLAG_VALUE == -1
453	  && GET_CODE (op) == ASHIFTRT
454	  && GET_CODE (XEXP (op, 1)) == CONST_INT
455	  && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
456	return simplify_gen_relational (GE, mode, VOIDmode,
457					XEXP (op, 0), const0_rtx);
458
459
460      if (GET_CODE (op) == SUBREG
461	  && subreg_lowpart_p (op)
462	  && (GET_MODE_SIZE (GET_MODE (op))
463	      < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
464	  && GET_CODE (SUBREG_REG (op)) == ASHIFT
465	  && XEXP (SUBREG_REG (op), 0) == const1_rtx)
466	{
467	  enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
468	  rtx x;
469
470	  x = gen_rtx_ROTATE (inner_mode,
471			      simplify_gen_unary (NOT, inner_mode, const1_rtx,
472						  inner_mode),
473			      XEXP (SUBREG_REG (op), 1));
474	  return rtl_hooks.gen_lowpart_no_emit (mode, x);
475	}
476
477      /* Apply De Morgan's laws to reduce number of patterns for machines
478	 with negating logical insns (and-not, nand, etc.).  If result has
479	 only one NOT, put it first, since that is how the patterns are
480	 coded.  */
481
482      if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
483	{
484	  rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
485	  enum machine_mode op_mode;
486
487	  op_mode = GET_MODE (in1);
488	  in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
489
490	  op_mode = GET_MODE (in2);
491	  if (op_mode == VOIDmode)
492	    op_mode = mode;
493	  in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
494
495	  if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
496	    {
497	      rtx tem = in2;
498	      in2 = in1; in1 = tem;
499	    }
500
501	  return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
502				 mode, in1, in2);
503	}
504      break;
505
506    case NEG:
507      /* (neg (neg X)) == X.  */
508      if (GET_CODE (op) == NEG)
509	return XEXP (op, 0);
510
511      /* (neg (plus X 1)) can become (not X).  */
512      if (GET_CODE (op) == PLUS
513	  && XEXP (op, 1) == const1_rtx)
514	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
515
516      /* Similarly, (neg (not X)) is (plus X 1).  */
517      if (GET_CODE (op) == NOT)
518	return plus_constant (XEXP (op, 0), 1);
519
520      /* (neg (minus X Y)) can become (minus Y X).  This transformation
521	 isn't safe for modes with signed zeros, since if X and Y are
522	 both +0, (minus Y X) is the same as (minus X Y).  If the
523	 rounding mode is towards +infinity (or -infinity) then the two
524	 expressions will be rounded differently.  */
525      if (GET_CODE (op) == MINUS
526	  && !HONOR_SIGNED_ZEROS (mode)
527	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
528	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
529
530      if (GET_CODE (op) == PLUS
531	  && !HONOR_SIGNED_ZEROS (mode)
532	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
533	{
534	  /* (neg (plus A C)) is simplified to (minus -C A).  */
535	  if (GET_CODE (XEXP (op, 1)) == CONST_INT
536	      || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
537	    {
538	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
539	      if (temp)
540		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
541	    }
542
543	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
544	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
545	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
546	}
547
548      /* (neg (mult A B)) becomes (mult (neg A) B).
549	 This works even for floating-point values.  */
550      if (GET_CODE (op) == MULT
551	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
552	{
553	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
554	  return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
555	}
556
557      /* NEG commutes with ASHIFT since it is multiplication.  Only do
558	 this if we can then eliminate the NEG (e.g., if the operand
559	 is a constant).  */
560      if (GET_CODE (op) == ASHIFT)
561	{
562	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
563	  if (temp)
564	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
565	}
566
567      /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
568	 C is equal to the width of MODE minus 1.  */
569      if (GET_CODE (op) == ASHIFTRT
570	  && GET_CODE (XEXP (op, 1)) == CONST_INT
571	  && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
572	return simplify_gen_binary (LSHIFTRT, mode,
573				    XEXP (op, 0), XEXP (op, 1));
574
575      /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
576	 C is equal to the width of MODE minus 1.  */
577      if (GET_CODE (op) == LSHIFTRT
578	  && GET_CODE (XEXP (op, 1)) == CONST_INT
579	  && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
580	return simplify_gen_binary (ASHIFTRT, mode,
581				    XEXP (op, 0), XEXP (op, 1));
582
583      /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
584      if (GET_CODE (op) == XOR
585	  && XEXP (op, 1) == const1_rtx
586	  && nonzero_bits (XEXP (op, 0), mode) == 1)
587	return plus_constant (XEXP (op, 0), -1);
588
589      /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
590      /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
591      if (GET_CODE (op) == LT
592	  && XEXP (op, 1) == const0_rtx
593	  && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
594	{
595	  enum machine_mode inner = GET_MODE (XEXP (op, 0));
596	  int isize = GET_MODE_BITSIZE (inner);
597	  if (STORE_FLAG_VALUE == 1)
598	    {
599	      temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
600					  GEN_INT (isize - 1));
601	      if (mode == inner)
602		return temp;
603	      if (GET_MODE_BITSIZE (mode) > isize)
604		return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
605	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
606	    }
607	  else if (STORE_FLAG_VALUE == -1)
608	    {
609	      temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
610					  GEN_INT (isize - 1));
611	      if (mode == inner)
612		return temp;
613	      if (GET_MODE_BITSIZE (mode) > isize)
614		return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
615	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
616	    }
617	}
618      break;
619
620    case TRUNCATE:
621      /* We can't handle truncation to a partial integer mode here
622         because we don't know the real bitsize of the partial
623         integer mode.  */
624      if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
625        break;
626
627      /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI.  */
628      if ((GET_CODE (op) == SIGN_EXTEND
629	   || GET_CODE (op) == ZERO_EXTEND)
630	  && GET_MODE (XEXP (op, 0)) == mode)
631	return XEXP (op, 0);
632
633      /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
634	 (OP:SI foo:SI) if OP is NEG or ABS.  */
635      if ((GET_CODE (op) == ABS
636	   || GET_CODE (op) == NEG)
637	  && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
638	      || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
639	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
640	return simplify_gen_unary (GET_CODE (op), mode,
641				   XEXP (XEXP (op, 0), 0), mode);
642
643      /* (truncate:A (subreg:B (truncate:C X) 0)) is
644	 (truncate:A X).  */
645      if (GET_CODE (op) == SUBREG
646	  && GET_CODE (SUBREG_REG (op)) == TRUNCATE
647	  && subreg_lowpart_p (op))
648	return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
649				   GET_MODE (XEXP (SUBREG_REG (op), 0)));
650
651      /* If we know that the value is already truncated, we can
652         replace the TRUNCATE with a SUBREG.  Note that this is also
653         valid if TRULY_NOOP_TRUNCATION is false for the corresponding
654         modes we just have to apply a different definition for
655         truncation.  But don't do this for an (LSHIFTRT (MULT ...))
656         since this will cause problems with the umulXi3_highpart
657         patterns.  */
658      if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
659				 GET_MODE_BITSIZE (GET_MODE (op)))
660	   ? (num_sign_bit_copies (op, GET_MODE (op))
661	      > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
662				- GET_MODE_BITSIZE (mode)))
663	   : truncated_to_mode (mode, op))
664	  && ! (GET_CODE (op) == LSHIFTRT
665		&& GET_CODE (XEXP (op, 0)) == MULT))
666	return rtl_hooks.gen_lowpart_no_emit (mode, op);
667
668      /* A truncate of a comparison can be replaced with a subreg if
669         STORE_FLAG_VALUE permits.  This is like the previous test,
670         but it works even if the comparison is done in a mode larger
671         than HOST_BITS_PER_WIDE_INT.  */
672      if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
673	  && COMPARISON_P (op)
674	  && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
675	return rtl_hooks.gen_lowpart_no_emit (mode, op);
676      break;
677
678    case FLOAT_TRUNCATE:
679      if (DECIMAL_FLOAT_MODE_P (mode))
680	break;
681
682      /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
683      if (GET_CODE (op) == FLOAT_EXTEND
684	  && GET_MODE (XEXP (op, 0)) == mode)
685	return XEXP (op, 0);
686
687      /* (float_truncate:SF (float_truncate:DF foo:XF))
688         = (float_truncate:SF foo:XF).
689	 This may eliminate double rounding, so it is unsafe.
690
691         (float_truncate:SF (float_extend:XF foo:DF))
692         = (float_truncate:SF foo:DF).
693
694         (float_truncate:DF (float_extend:XF foo:SF))
695         = (float_extend:SF foo:DF).  */
696      if ((GET_CODE (op) == FLOAT_TRUNCATE
697	   && flag_unsafe_math_optimizations)
698	  || GET_CODE (op) == FLOAT_EXTEND)
699	return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
700							    0)))
701				   > GET_MODE_SIZE (mode)
702				   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
703				   mode,
704				   XEXP (op, 0), mode);
705
706      /*  (float_truncate (float x)) is (float x)  */
707      if (GET_CODE (op) == FLOAT
708	  && (flag_unsafe_math_optimizations
709	      || ((unsigned)significand_size (GET_MODE (op))
710		  >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
711		      - num_sign_bit_copies (XEXP (op, 0),
712					     GET_MODE (XEXP (op, 0)))))))
713	return simplify_gen_unary (FLOAT, mode,
714				   XEXP (op, 0),
715				   GET_MODE (XEXP (op, 0)));
716
717      /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
718	 (OP:SF foo:SF) if OP is NEG or ABS.  */
719      if ((GET_CODE (op) == ABS
720	   || GET_CODE (op) == NEG)
721	  && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
722	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
723	return simplify_gen_unary (GET_CODE (op), mode,
724				   XEXP (XEXP (op, 0), 0), mode);
725
726      /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
727	 is (float_truncate:SF x).  */
728      if (GET_CODE (op) == SUBREG
729	  && subreg_lowpart_p (op)
730	  && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
731	return SUBREG_REG (op);
732      break;
733
734    case FLOAT_EXTEND:
735      if (DECIMAL_FLOAT_MODE_P (mode))
736	break;
737
738      /*  (float_extend (float_extend x)) is (float_extend x)
739
740	  (float_extend (float x)) is (float x) assuming that double
741	  rounding can't happen.
742          */
743      if (GET_CODE (op) == FLOAT_EXTEND
744	  || (GET_CODE (op) == FLOAT
745	      && ((unsigned)significand_size (GET_MODE (op))
746		  >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
747		      - num_sign_bit_copies (XEXP (op, 0),
748					     GET_MODE (XEXP (op, 0)))))))
749	return simplify_gen_unary (GET_CODE (op), mode,
750				   XEXP (op, 0),
751				   GET_MODE (XEXP (op, 0)));
752
753      break;
754
755    case ABS:
756      /* (abs (neg <foo>)) -> (abs <foo>) */
757      if (GET_CODE (op) == NEG)
758	return simplify_gen_unary (ABS, mode, XEXP (op, 0),
759				   GET_MODE (XEXP (op, 0)));
760
761      /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
762         do nothing.  */
763      if (GET_MODE (op) == VOIDmode)
764	break;
765
766      /* If operand is something known to be positive, ignore the ABS.  */
767      if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
768	  || ((GET_MODE_BITSIZE (GET_MODE (op))
769	       <= HOST_BITS_PER_WIDE_INT)
770	      && ((nonzero_bits (op, GET_MODE (op))
771		   & ((HOST_WIDE_INT) 1
772		      << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
773		  == 0)))
774	return op;
775
776      /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
777      if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
778	return gen_rtx_NEG (mode, op);
779
780      break;
781
782    case FFS:
783      /* (ffs (*_extend <X>)) = (ffs <X>) */
784      if (GET_CODE (op) == SIGN_EXTEND
785	  || GET_CODE (op) == ZERO_EXTEND)
786	return simplify_gen_unary (FFS, mode, XEXP (op, 0),
787				   GET_MODE (XEXP (op, 0)));
788      break;
789
790    case POPCOUNT:
791    case PARITY:
792      /* (pop* (zero_extend <X>)) = (pop* <X>) */
793      if (GET_CODE (op) == ZERO_EXTEND)
794	return simplify_gen_unary (code, mode, XEXP (op, 0),
795				   GET_MODE (XEXP (op, 0)));
796      break;
797
798    case FLOAT:
799      /* (float (sign_extend <X>)) = (float <X>).  */
800      if (GET_CODE (op) == SIGN_EXTEND)
801	return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
802				   GET_MODE (XEXP (op, 0)));
803      break;
804
805    case SIGN_EXTEND:
806      /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
807	 becomes just the MINUS if its mode is MODE.  This allows
808	 folding switch statements on machines using casesi (such as
809	 the VAX).  */
810      if (GET_CODE (op) == TRUNCATE
811	  && GET_MODE (XEXP (op, 0)) == mode
812	  && GET_CODE (XEXP (op, 0)) == MINUS
813	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
814	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
815	return XEXP (op, 0);
816
817      /* Check for a sign extension of a subreg of a promoted
818	 variable, where the promotion is sign-extended, and the
819	 target mode is the same as the variable's promotion.  */
820      if (GET_CODE (op) == SUBREG
821	  && SUBREG_PROMOTED_VAR_P (op)
822	  && ! SUBREG_PROMOTED_UNSIGNED_P (op)
823	  && GET_MODE (XEXP (op, 0)) == mode)
824	return XEXP (op, 0);
825
826#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
827      if (! POINTERS_EXTEND_UNSIGNED
828	  && mode == Pmode && GET_MODE (op) == ptr_mode
829	  && (CONSTANT_P (op)
830	      || (GET_CODE (op) == SUBREG
831		  && REG_P (SUBREG_REG (op))
832		  && REG_POINTER (SUBREG_REG (op))
833		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
834	return convert_memory_address (Pmode, op);
835#endif
836      break;
837
838    case ZERO_EXTEND:
839      /* Check for a zero extension of a subreg of a promoted
840	 variable, where the promotion is zero-extended, and the
841	 target mode is the same as the variable's promotion.  */
842      if (GET_CODE (op) == SUBREG
843	  && SUBREG_PROMOTED_VAR_P (op)
844	  && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
845	  && GET_MODE (XEXP (op, 0)) == mode)
846	return XEXP (op, 0);
847
848#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
849      if (POINTERS_EXTEND_UNSIGNED > 0
850	  && mode == Pmode && GET_MODE (op) == ptr_mode
851	  && (CONSTANT_P (op)
852	      || (GET_CODE (op) == SUBREG
853		  && REG_P (SUBREG_REG (op))
854		  && REG_POINTER (SUBREG_REG (op))
855		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
856	return convert_memory_address (Pmode, op);
857#endif
858      break;
859
860    default:
861      break;
862    }
863
864  return 0;
865}
866
867/* Try to compute the value of a unary operation CODE whose output mode is to
868   be MODE with input operand OP whose mode was originally OP_MODE.
869   Return zero if the value cannot be computed.  */
870rtx
871simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
872				rtx op, enum machine_mode op_mode)
873{
874  unsigned int width = GET_MODE_BITSIZE (mode);
875
876  if (code == VEC_DUPLICATE)
877    {
878      gcc_assert (VECTOR_MODE_P (mode));
879      if (GET_MODE (op) != VOIDmode)
880      {
881	if (!VECTOR_MODE_P (GET_MODE (op)))
882	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
883	else
884	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
885						(GET_MODE (op)));
886      }
887      if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
888	  || GET_CODE (op) == CONST_VECTOR)
889	{
890          int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
891          unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
892	  rtvec v = rtvec_alloc (n_elts);
893	  unsigned int i;
894
895	  if (GET_CODE (op) != CONST_VECTOR)
896	    for (i = 0; i < n_elts; i++)
897	      RTVEC_ELT (v, i) = op;
898	  else
899	    {
900	      enum machine_mode inmode = GET_MODE (op);
901              int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
902              unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
903
904	      gcc_assert (in_n_elts < n_elts);
905	      gcc_assert ((n_elts % in_n_elts) == 0);
906	      for (i = 0; i < n_elts; i++)
907	        RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
908	    }
909	  return gen_rtx_CONST_VECTOR (mode, v);
910	}
911    }
912
913  if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
914    {
915      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
916      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
917      enum machine_mode opmode = GET_MODE (op);
918      int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
919      unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
920      rtvec v = rtvec_alloc (n_elts);
921      unsigned int i;
922
923      gcc_assert (op_n_elts == n_elts);
924      for (i = 0; i < n_elts; i++)
925	{
926	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
927					    CONST_VECTOR_ELT (op, i),
928					    GET_MODE_INNER (opmode));
929	  if (!x)
930	    return 0;
931	  RTVEC_ELT (v, i) = x;
932	}
933      return gen_rtx_CONST_VECTOR (mode, v);
934    }
935
936  /* The order of these tests is critical so that, for example, we don't
937     check the wrong mode (input vs. output) for a conversion operation,
938     such as FIX.  At some point, this should be simplified.  */
939
940  if (code == FLOAT && GET_MODE (op) == VOIDmode
941      && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
942    {
943      HOST_WIDE_INT hv, lv;
944      REAL_VALUE_TYPE d;
945
946      if (GET_CODE (op) == CONST_INT)
947	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
948      else
949	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
950
951      REAL_VALUE_FROM_INT (d, lv, hv, mode);
952      d = real_value_truncate (mode, d);
953      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
954    }
955  else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
956	   && (GET_CODE (op) == CONST_DOUBLE
957	       || GET_CODE (op) == CONST_INT))
958    {
959      HOST_WIDE_INT hv, lv;
960      REAL_VALUE_TYPE d;
961
962      if (GET_CODE (op) == CONST_INT)
963	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
964      else
965	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
966
967      if (op_mode == VOIDmode)
968	{
969	  /* We don't know how to interpret negative-looking numbers in
970	     this case, so don't try to fold those.  */
971	  if (hv < 0)
972	    return 0;
973	}
974      else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
975	;
976      else
977	hv = 0, lv &= GET_MODE_MASK (op_mode);
978
979      REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
980      d = real_value_truncate (mode, d);
981      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
982    }
983
984  if (GET_CODE (op) == CONST_INT
985      && width <= HOST_BITS_PER_WIDE_INT && width > 0)
986    {
987      HOST_WIDE_INT arg0 = INTVAL (op);
988      HOST_WIDE_INT val;
989
990      switch (code)
991	{
992	case NOT:
993	  val = ~ arg0;
994	  break;
995
996	case NEG:
997	  val = - arg0;
998	  break;
999
1000	case ABS:
1001	  val = (arg0 >= 0 ? arg0 : - arg0);
1002	  break;
1003
1004	case FFS:
1005	  /* Don't use ffs here.  Instead, get low order bit and then its
1006	     number.  If arg0 is zero, this will return 0, as desired.  */
1007	  arg0 &= GET_MODE_MASK (mode);
1008	  val = exact_log2 (arg0 & (- arg0)) + 1;
1009	  break;
1010
1011	case CLZ:
1012	  arg0 &= GET_MODE_MASK (mode);
1013	  if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1014	    ;
1015	  else
1016	    val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1017	  break;
1018
1019	case CTZ:
1020	  arg0 &= GET_MODE_MASK (mode);
1021	  if (arg0 == 0)
1022	    {
1023	      /* Even if the value at zero is undefined, we have to come
1024		 up with some replacement.  Seems good enough.  */
1025	      if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1026		val = GET_MODE_BITSIZE (mode);
1027	    }
1028	  else
1029	    val = exact_log2 (arg0 & -arg0);
1030	  break;
1031
1032	case POPCOUNT:
1033	  arg0 &= GET_MODE_MASK (mode);
1034	  val = 0;
1035	  while (arg0)
1036	    val++, arg0 &= arg0 - 1;
1037	  break;
1038
1039	case PARITY:
1040	  arg0 &= GET_MODE_MASK (mode);
1041	  val = 0;
1042	  while (arg0)
1043	    val++, arg0 &= arg0 - 1;
1044	  val &= 1;
1045	  break;
1046
1047	case BSWAP:
1048	  return 0;
1049
1050	case TRUNCATE:
1051	  val = arg0;
1052	  break;
1053
1054	case ZERO_EXTEND:
1055	  /* When zero-extending a CONST_INT, we need to know its
1056             original mode.  */
1057	  gcc_assert (op_mode != VOIDmode);
1058	  if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1059	    {
1060	      /* If we were really extending the mode,
1061		 we would have to distinguish between zero-extension
1062		 and sign-extension.  */
1063	      gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1064	      val = arg0;
1065	    }
1066	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1067	    val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1068	  else
1069	    return 0;
1070	  break;
1071
1072	case SIGN_EXTEND:
1073	  if (op_mode == VOIDmode)
1074	    op_mode = mode;
1075	  if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1076	    {
1077	      /* If we were really extending the mode,
1078		 we would have to distinguish between zero-extension
1079		 and sign-extension.  */
1080	      gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1081	      val = arg0;
1082	    }
1083	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1084	    {
1085	      val
1086		= arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1087	      if (val
1088		  & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1089		val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1090	    }
1091	  else
1092	    return 0;
1093	  break;
1094
1095	case SQRT:
1096	case FLOAT_EXTEND:
1097	case FLOAT_TRUNCATE:
1098	case SS_TRUNCATE:
1099	case US_TRUNCATE:
1100	case SS_NEG:
1101	  return 0;
1102
1103	default:
1104	  gcc_unreachable ();
1105	}
1106
1107      return gen_int_mode (val, mode);
1108    }
1109
1110  /* We can do some operations on integer CONST_DOUBLEs.  Also allow
1111     for a DImode operation on a CONST_INT.  */
1112  else if (GET_MODE (op) == VOIDmode
1113	   && width <= HOST_BITS_PER_WIDE_INT * 2
1114	   && (GET_CODE (op) == CONST_DOUBLE
1115	       || GET_CODE (op) == CONST_INT))
1116    {
1117      unsigned HOST_WIDE_INT l1, lv;
1118      HOST_WIDE_INT h1, hv;
1119
1120      if (GET_CODE (op) == CONST_DOUBLE)
1121	l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1122      else
1123	l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1124
1125      switch (code)
1126	{
1127	case NOT:
1128	  lv = ~ l1;
1129	  hv = ~ h1;
1130	  break;
1131
1132	case NEG:
1133	  neg_double (l1, h1, &lv, &hv);
1134	  break;
1135
1136	case ABS:
1137	  if (h1 < 0)
1138	    neg_double (l1, h1, &lv, &hv);
1139	  else
1140	    lv = l1, hv = h1;
1141	  break;
1142
1143	case FFS:
1144	  hv = 0;
1145	  if (l1 == 0)
1146	    {
1147	      if (h1 == 0)
1148		lv = 0;
1149	      else
1150		lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1151	    }
1152	  else
1153	    lv = exact_log2 (l1 & -l1) + 1;
1154	  break;
1155
1156	case CLZ:
1157	  hv = 0;
1158	  if (h1 != 0)
1159	    lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1160	      - HOST_BITS_PER_WIDE_INT;
1161	  else if (l1 != 0)
1162	    lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1163	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1164	    lv = GET_MODE_BITSIZE (mode);
1165	  break;
1166
1167	case CTZ:
1168	  hv = 0;
1169	  if (l1 != 0)
1170	    lv = exact_log2 (l1 & -l1);
1171	  else if (h1 != 0)
1172	    lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1173	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1174	    lv = GET_MODE_BITSIZE (mode);
1175	  break;
1176
1177	case POPCOUNT:
1178	  hv = 0;
1179	  lv = 0;
1180	  while (l1)
1181	    lv++, l1 &= l1 - 1;
1182	  while (h1)
1183	    lv++, h1 &= h1 - 1;
1184	  break;
1185
1186	case PARITY:
1187	  hv = 0;
1188	  lv = 0;
1189	  while (l1)
1190	    lv++, l1 &= l1 - 1;
1191	  while (h1)
1192	    lv++, h1 &= h1 - 1;
1193	  lv &= 1;
1194	  break;
1195
1196	case TRUNCATE:
1197	  /* This is just a change-of-mode, so do nothing.  */
1198	  lv = l1, hv = h1;
1199	  break;
1200
1201	case ZERO_EXTEND:
1202	  gcc_assert (op_mode != VOIDmode);
1203
1204	  if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1205	    return 0;
1206
1207	  hv = 0;
1208	  lv = l1 & GET_MODE_MASK (op_mode);
1209	  break;
1210
1211	case SIGN_EXTEND:
1212	  if (op_mode == VOIDmode
1213	      || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1214	    return 0;
1215	  else
1216	    {
1217	      lv = l1 & GET_MODE_MASK (op_mode);
1218	      if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1219		  && (lv & ((HOST_WIDE_INT) 1
1220			    << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1221		lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1222
1223	      hv = HWI_SIGN_EXTEND (lv);
1224	    }
1225	  break;
1226
1227	case SQRT:
1228	  return 0;
1229
1230	default:
1231	  return 0;
1232	}
1233
1234      return immed_double_const (lv, hv, mode);
1235    }
1236
1237  else if (GET_CODE (op) == CONST_DOUBLE
1238	   && SCALAR_FLOAT_MODE_P (mode))
1239    {
1240      REAL_VALUE_TYPE d, t;
1241      REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1242
1243      switch (code)
1244	{
1245	case SQRT:
1246	  if (HONOR_SNANS (mode) && real_isnan (&d))
1247	    return 0;
1248	  real_sqrt (&t, mode, &d);
1249	  d = t;
1250	  break;
1251	case ABS:
1252	  d = REAL_VALUE_ABS (d);
1253	  break;
1254	case NEG:
1255	  d = REAL_VALUE_NEGATE (d);
1256	  break;
1257	case FLOAT_TRUNCATE:
1258	  d = real_value_truncate (mode, d);
1259	  break;
1260	case FLOAT_EXTEND:
1261	  /* All this does is change the mode.  */
1262	  break;
1263	case FIX:
1264	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1265	  break;
1266	case NOT:
1267	  {
1268	    long tmp[4];
1269	    int i;
1270
1271	    real_to_target (tmp, &d, GET_MODE (op));
1272	    for (i = 0; i < 4; i++)
1273	      tmp[i] = ~tmp[i];
1274	    real_from_target (&d, tmp, mode);
1275	    break;
1276	  }
1277	default:
1278	  gcc_unreachable ();
1279	}
1280      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1281    }
1282
1283  else if (GET_CODE (op) == CONST_DOUBLE
1284	   && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1285	   && GET_MODE_CLASS (mode) == MODE_INT
1286	   && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1287    {
1288      /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1289	 operators are intentionally left unspecified (to ease implementation
1290	 by target backends), for consistency, this routine implements the
1291	 same semantics for constant folding as used by the middle-end.  */
1292
1293      /* This was formerly used only for non-IEEE float.
1294	 eggert@twinsun.com says it is safe for IEEE also.  */
1295      HOST_WIDE_INT xh, xl, th, tl;
1296      REAL_VALUE_TYPE x, t;
1297      REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1298      switch (code)
1299	{
1300	case FIX:
1301	  if (REAL_VALUE_ISNAN (x))
1302	    return const0_rtx;
1303
1304	  /* Test against the signed upper bound.  */
1305	  if (width > HOST_BITS_PER_WIDE_INT)
1306	    {
1307	      th = ((unsigned HOST_WIDE_INT) 1
1308		    << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1309	      tl = -1;
1310	    }
1311	  else
1312	    {
1313	      th = 0;
1314	      tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1315	    }
1316	  real_from_integer (&t, VOIDmode, tl, th, 0);
1317	  if (REAL_VALUES_LESS (t, x))
1318	    {
1319	      xh = th;
1320	      xl = tl;
1321	      break;
1322	    }
1323
1324	  /* Test against the signed lower bound.  */
1325	  if (width > HOST_BITS_PER_WIDE_INT)
1326	    {
1327	      th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1328	      tl = 0;
1329	    }
1330	  else
1331	    {
1332	      th = -1;
1333	      tl = (HOST_WIDE_INT) -1 << (width - 1);
1334	    }
1335	  real_from_integer (&t, VOIDmode, tl, th, 0);
1336	  if (REAL_VALUES_LESS (x, t))
1337	    {
1338	      xh = th;
1339	      xl = tl;
1340	      break;
1341	    }
1342	  REAL_VALUE_TO_INT (&xl, &xh, x);
1343	  break;
1344
1345	case UNSIGNED_FIX:
1346	  if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1347	    return const0_rtx;
1348
1349	  /* Test against the unsigned upper bound.  */
1350	  if (width == 2*HOST_BITS_PER_WIDE_INT)
1351	    {
1352	      th = -1;
1353	      tl = -1;
1354	    }
1355	  else if (width >= HOST_BITS_PER_WIDE_INT)
1356	    {
1357	      th = ((unsigned HOST_WIDE_INT) 1
1358		    << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1359	      tl = -1;
1360	    }
1361	  else
1362	    {
1363	      th = 0;
1364	      tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1365	    }
1366	  real_from_integer (&t, VOIDmode, tl, th, 1);
1367	  if (REAL_VALUES_LESS (t, x))
1368	    {
1369	      xh = th;
1370	      xl = tl;
1371	      break;
1372	    }
1373
1374	  REAL_VALUE_TO_INT (&xl, &xh, x);
1375	  break;
1376
1377	default:
1378	  gcc_unreachable ();
1379	}
1380      return immed_double_const (xl, xh, mode);
1381    }
1382
1383  return NULL_RTX;
1384}
1385
1386/* Subroutine of simplify_binary_operation to simplify a commutative,
1387   associative binary operation CODE with result mode MODE, operating
1388   on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1389   SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
1390   canonicalization is possible.  */
1391
1392static rtx
1393simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1394				rtx op0, rtx op1)
1395{
1396  rtx tem;
1397
1398  /* Linearize the operator to the left.  */
1399  if (GET_CODE (op1) == code)
1400    {
1401      /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
1402      if (GET_CODE (op0) == code)
1403	{
1404	  tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1405	  return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1406	}
1407
1408      /* "a op (b op c)" becomes "(b op c) op a".  */
1409      if (! swap_commutative_operands_p (op1, op0))
1410	return simplify_gen_binary (code, mode, op1, op0);
1411
1412      tem = op0;
1413      op0 = op1;
1414      op1 = tem;
1415    }
1416
1417  if (GET_CODE (op0) == code)
1418    {
1419      /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
1420      if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1421	{
1422	  tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1423	  return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1424	}
1425
1426      /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
1427      tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1428	    ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1429	    : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1430      if (tem != 0)
1431        return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1432
1433      /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
1434      tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1435	    ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1436	    : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1437      if (tem != 0)
1438        return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1439    }
1440
1441  return 0;
1442}
1443
1444
1445/* Simplify a binary operation CODE with result mode MODE, operating on OP0
1446   and OP1.  Return 0 if no simplification is possible.
1447
1448   Don't use this for relational operations such as EQ or LT.
1449   Use simplify_relational_operation instead.  */
1450rtx
1451simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1452			   rtx op0, rtx op1)
1453{
1454  rtx trueop0, trueop1;
1455  rtx tem;
1456
1457  /* Relational operations don't work here.  We must know the mode
1458     of the operands in order to do the comparison correctly.
1459     Assuming a full word can give incorrect results.
1460     Consider comparing 128 with -128 in QImode.  */
1461  gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1462  gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1463
1464  /* Make sure the constant is second.  */
1465  if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1466      && swap_commutative_operands_p (op0, op1))
1467    {
1468      tem = op0, op0 = op1, op1 = tem;
1469    }
1470
1471  trueop0 = avoid_constant_pool_reference (op0);
1472  trueop1 = avoid_constant_pool_reference (op1);
1473
1474  tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1475  if (tem)
1476    return tem;
1477  return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1478}
1479
1480/* Subroutine of simplify_binary_operation.  Simplify a binary operation
1481   CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
1482   OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1483   actual constants.  */
1484
1485static rtx
1486simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1487			     rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1488{
1489  rtx tem, reversed, opleft, opright;
1490  HOST_WIDE_INT val;
1491  unsigned int width = GET_MODE_BITSIZE (mode);
1492
1493  /* Even if we can't compute a constant result,
1494     there are some cases worth simplifying.  */
1495
1496  switch (code)
1497    {
1498    case PLUS:
1499      /* Maybe simplify x + 0 to x.  The two expressions are equivalent
1500	 when x is NaN, infinite, or finite and nonzero.  They aren't
1501	 when x is -0 and the rounding mode is not towards -infinity,
1502	 since (-0) + 0 is then 0.  */
1503      if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1504	return op0;
1505
1506      /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
1507	 transformations are safe even for IEEE.  */
1508      if (GET_CODE (op0) == NEG)
1509	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1510      else if (GET_CODE (op1) == NEG)
1511	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1512
1513      /* (~a) + 1 -> -a */
1514      if (INTEGRAL_MODE_P (mode)
1515	  && GET_CODE (op0) == NOT
1516	  && trueop1 == const1_rtx)
1517	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1518
1519      /* Handle both-operands-constant cases.  We can only add
1520	 CONST_INTs to constants since the sum of relocatable symbols
1521	 can't be handled by most assemblers.  Don't add CONST_INT
1522	 to CONST_INT since overflow won't be computed properly if wider
1523	 than HOST_BITS_PER_WIDE_INT.  */
1524
1525      if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1526	  && GET_CODE (op1) == CONST_INT)
1527	return plus_constant (op0, INTVAL (op1));
1528      else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1529	       && GET_CODE (op0) == CONST_INT)
1530	return plus_constant (op1, INTVAL (op0));
1531
1532      /* See if this is something like X * C - X or vice versa or
1533	 if the multiplication is written as a shift.  If so, we can
1534	 distribute and make a new multiply, shift, or maybe just
1535	 have X (if C is 2 in the example above).  But don't make
1536	 something more expensive than we had before.  */
1537
1538      if (SCALAR_INT_MODE_P (mode))
1539	{
1540	  HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1541	  unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1542	  rtx lhs = op0, rhs = op1;
1543
1544	  if (GET_CODE (lhs) == NEG)
1545	    {
1546	      coeff0l = -1;
1547	      coeff0h = -1;
1548	      lhs = XEXP (lhs, 0);
1549	    }
1550	  else if (GET_CODE (lhs) == MULT
1551		   && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1552	    {
1553	      coeff0l = INTVAL (XEXP (lhs, 1));
1554	      coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1555	      lhs = XEXP (lhs, 0);
1556	    }
1557	  else if (GET_CODE (lhs) == ASHIFT
1558		   && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1559		   && INTVAL (XEXP (lhs, 1)) >= 0
1560		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1561	    {
1562	      coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1563	      coeff0h = 0;
1564	      lhs = XEXP (lhs, 0);
1565	    }
1566
1567	  if (GET_CODE (rhs) == NEG)
1568	    {
1569	      coeff1l = -1;
1570	      coeff1h = -1;
1571	      rhs = XEXP (rhs, 0);
1572	    }
1573	  else if (GET_CODE (rhs) == MULT
1574		   && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1575	    {
1576	      coeff1l = INTVAL (XEXP (rhs, 1));
1577	      coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1578	      rhs = XEXP (rhs, 0);
1579	    }
1580	  else if (GET_CODE (rhs) == ASHIFT
1581		   && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1582		   && INTVAL (XEXP (rhs, 1)) >= 0
1583		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1584	    {
1585	      coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1586	      coeff1h = 0;
1587	      rhs = XEXP (rhs, 0);
1588	    }
1589
1590	  if (rtx_equal_p (lhs, rhs))
1591	    {
1592	      rtx orig = gen_rtx_PLUS (mode, op0, op1);
1593	      rtx coeff;
1594	      unsigned HOST_WIDE_INT l;
1595	      HOST_WIDE_INT h;
1596
1597	      add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1598	      coeff = immed_double_const (l, h, mode);
1599
1600	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1601	      return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1602		? tem : 0;
1603	    }
1604	}
1605
1606      /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
1607      if ((GET_CODE (op1) == CONST_INT
1608	   || GET_CODE (op1) == CONST_DOUBLE)
1609	  && GET_CODE (op0) == XOR
1610	  && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1611	      || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1612	  && mode_signbit_p (mode, op1))
1613	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1614				    simplify_gen_binary (XOR, mode, op1,
1615							 XEXP (op0, 1)));
1616
1617      /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
1618      if (GET_CODE (op0) == MULT
1619	  && GET_CODE (XEXP (op0, 0)) == NEG)
1620	{
1621	  rtx in1, in2;
1622
1623	  in1 = XEXP (XEXP (op0, 0), 0);
1624	  in2 = XEXP (op0, 1);
1625	  return simplify_gen_binary (MINUS, mode, op1,
1626				      simplify_gen_binary (MULT, mode,
1627							   in1, in2));
1628	}
1629
1630      /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1631	 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1632	 is 1.  */
1633      if (COMPARISON_P (op0)
1634	  && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1635	      || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1636	  && (reversed = reversed_comparison (op0, mode)))
1637	return
1638	  simplify_gen_unary (NEG, mode, reversed, mode);
1639
1640      /* If one of the operands is a PLUS or a MINUS, see if we can
1641	 simplify this by the associative law.
1642	 Don't use the associative law for floating point.
1643	 The inaccuracy makes it nonassociative,
1644	 and subtle programs can break if operations are associated.  */
1645
1646      if (INTEGRAL_MODE_P (mode)
1647	  && (plus_minus_operand_p (op0)
1648	      || plus_minus_operand_p (op1))
1649	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1650	return tem;
1651
1652      /* Reassociate floating point addition only when the user
1653	 specifies unsafe math optimizations.  */
1654      if (FLOAT_MODE_P (mode)
1655	  && flag_unsafe_math_optimizations)
1656	{
1657	  tem = simplify_associative_operation (code, mode, op0, op1);
1658	  if (tem)
1659	    return tem;
1660	}
1661      break;
1662
1663    case COMPARE:
1664#ifdef HAVE_cc0
1665      /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1666	 using cc0, in which case we want to leave it as a COMPARE
1667	 so we can distinguish it from a register-register-copy.
1668
1669	 In IEEE floating point, x-0 is not the same as x.  */
1670
1671      if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1672	   || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1673	  && trueop1 == CONST0_RTX (mode))
1674	return op0;
1675#endif
1676
1677      /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
1678      if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1679	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1680	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1681	{
1682	  rtx xop00 = XEXP (op0, 0);
1683	  rtx xop10 = XEXP (op1, 0);
1684
1685#ifdef HAVE_cc0
1686	  if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1687#else
1688	    if (REG_P (xop00) && REG_P (xop10)
1689		&& GET_MODE (xop00) == GET_MODE (xop10)
1690		&& REGNO (xop00) == REGNO (xop10)
1691		&& GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1692		&& GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1693#endif
1694	      return xop00;
1695	}
1696      break;
1697
1698    case MINUS:
1699      /* We can't assume x-x is 0 even with non-IEEE floating point,
1700	 but since it is zero except in very strange circumstances, we
1701	 will treat it as zero with -funsafe-math-optimizations.  */
1702      if (rtx_equal_p (trueop0, trueop1)
1703	  && ! side_effects_p (op0)
1704	  && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1705	return CONST0_RTX (mode);
1706
1707      /* Change subtraction from zero into negation.  (0 - x) is the
1708	 same as -x when x is NaN, infinite, or finite and nonzero.
1709	 But if the mode has signed zeros, and does not round towards
1710	 -infinity, then 0 - 0 is 0, not -0.  */
1711      if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1712	return simplify_gen_unary (NEG, mode, op1, mode);
1713
1714      /* (-1 - a) is ~a.  */
1715      if (trueop0 == constm1_rtx)
1716	return simplify_gen_unary (NOT, mode, op1, mode);
1717
1718      /* Subtracting 0 has no effect unless the mode has signed zeros
1719	 and supports rounding towards -infinity.  In such a case,
1720	 0 - 0 is -0.  */
1721      if (!(HONOR_SIGNED_ZEROS (mode)
1722	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1723	  && trueop1 == CONST0_RTX (mode))
1724	return op0;
1725
1726      /* See if this is something like X * C - X or vice versa or
1727	 if the multiplication is written as a shift.  If so, we can
1728	 distribute and make a new multiply, shift, or maybe just
1729	 have X (if C is 2 in the example above).  But don't make
1730	 something more expensive than we had before.  */
1731
1732      if (SCALAR_INT_MODE_P (mode))
1733	{
1734	  HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1735	  unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1736	  rtx lhs = op0, rhs = op1;
1737
1738	  if (GET_CODE (lhs) == NEG)
1739	    {
1740	      coeff0l = -1;
1741	      coeff0h = -1;
1742	      lhs = XEXP (lhs, 0);
1743	    }
1744	  else if (GET_CODE (lhs) == MULT
1745		   && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1746	    {
1747	      coeff0l = INTVAL (XEXP (lhs, 1));
1748	      coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1749	      lhs = XEXP (lhs, 0);
1750	    }
1751	  else if (GET_CODE (lhs) == ASHIFT
1752		   && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1753		   && INTVAL (XEXP (lhs, 1)) >= 0
1754		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1755	    {
1756	      coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1757	      coeff0h = 0;
1758	      lhs = XEXP (lhs, 0);
1759	    }
1760
1761	  if (GET_CODE (rhs) == NEG)
1762	    {
1763	      negcoeff1l = 1;
1764	      negcoeff1h = 0;
1765	      rhs = XEXP (rhs, 0);
1766	    }
1767	  else if (GET_CODE (rhs) == MULT
1768		   && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1769	    {
1770	      negcoeff1l = -INTVAL (XEXP (rhs, 1));
1771	      negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1772	      rhs = XEXP (rhs, 0);
1773	    }
1774	  else if (GET_CODE (rhs) == ASHIFT
1775		   && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1776		   && INTVAL (XEXP (rhs, 1)) >= 0
1777		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1778	    {
1779	      negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1780	      negcoeff1h = -1;
1781	      rhs = XEXP (rhs, 0);
1782	    }
1783
1784	  if (rtx_equal_p (lhs, rhs))
1785	    {
1786	      rtx orig = gen_rtx_MINUS (mode, op0, op1);
1787	      rtx coeff;
1788	      unsigned HOST_WIDE_INT l;
1789	      HOST_WIDE_INT h;
1790
1791	      add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1792	      coeff = immed_double_const (l, h, mode);
1793
1794	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1795	      return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1796		? tem : 0;
1797	    }
1798	}
1799
1800      /* (a - (-b)) -> (a + b).  True even for IEEE.  */
1801      if (GET_CODE (op1) == NEG)
1802	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1803
1804      /* (-x - c) may be simplified as (-c - x).  */
1805      if (GET_CODE (op0) == NEG
1806	  && (GET_CODE (op1) == CONST_INT
1807	      || GET_CODE (op1) == CONST_DOUBLE))
1808	{
1809	  tem = simplify_unary_operation (NEG, mode, op1, mode);
1810	  if (tem)
1811	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1812	}
1813
1814      /* Don't let a relocatable value get a negative coeff.  */
1815      if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1816	return simplify_gen_binary (PLUS, mode,
1817				    op0,
1818				    neg_const_int (mode, op1));
1819
1820      /* (x - (x & y)) -> (x & ~y) */
1821      if (GET_CODE (op1) == AND)
1822	{
1823	  if (rtx_equal_p (op0, XEXP (op1, 0)))
1824	    {
1825	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1826					GET_MODE (XEXP (op1, 1)));
1827	      return simplify_gen_binary (AND, mode, op0, tem);
1828	    }
1829	  if (rtx_equal_p (op0, XEXP (op1, 1)))
1830	    {
1831	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1832					GET_MODE (XEXP (op1, 0)));
1833	      return simplify_gen_binary (AND, mode, op0, tem);
1834	    }
1835	}
1836
1837      /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1838	 by reversing the comparison code if valid.  */
1839      if (STORE_FLAG_VALUE == 1
1840	  && trueop0 == const1_rtx
1841	  && COMPARISON_P (op1)
1842	  && (reversed = reversed_comparison (op1, mode)))
1843	return reversed;
1844
1845      /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
1846      if (GET_CODE (op1) == MULT
1847	  && GET_CODE (XEXP (op1, 0)) == NEG)
1848	{
1849	  rtx in1, in2;
1850
1851	  in1 = XEXP (XEXP (op1, 0), 0);
1852	  in2 = XEXP (op1, 1);
1853	  return simplify_gen_binary (PLUS, mode,
1854				      simplify_gen_binary (MULT, mode,
1855							   in1, in2),
1856				      op0);
1857	}
1858
1859      /* Canonicalize (minus (neg A) (mult B C)) to
1860	 (minus (mult (neg B) C) A).  */
1861      if (GET_CODE (op1) == MULT
1862	  && GET_CODE (op0) == NEG)
1863	{
1864	  rtx in1, in2;
1865
1866	  in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1867	  in2 = XEXP (op1, 1);
1868	  return simplify_gen_binary (MINUS, mode,
1869				      simplify_gen_binary (MULT, mode,
1870							   in1, in2),
1871				      XEXP (op0, 0));
1872	}
1873
1874      /* If one of the operands is a PLUS or a MINUS, see if we can
1875	 simplify this by the associative law.  This will, for example,
1876         canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1877	 Don't use the associative law for floating point.
1878	 The inaccuracy makes it nonassociative,
1879	 and subtle programs can break if operations are associated.  */
1880
1881      if (INTEGRAL_MODE_P (mode)
1882	  && (plus_minus_operand_p (op0)
1883	      || plus_minus_operand_p (op1))
1884	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1885	return tem;
1886      break;
1887
1888    case MULT:
1889      if (trueop1 == constm1_rtx)
1890	return simplify_gen_unary (NEG, mode, op0, mode);
1891
1892      /* Maybe simplify x * 0 to 0.  The reduction is not valid if
1893	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
1894	 when the mode has signed zeros, since multiplying a negative
1895	 number by 0 will give -0, not 0.  */
1896      if (!HONOR_NANS (mode)
1897	  && !HONOR_SIGNED_ZEROS (mode)
1898	  && trueop1 == CONST0_RTX (mode)
1899	  && ! side_effects_p (op0))
1900	return op1;
1901
1902      /* In IEEE floating point, x*1 is not equivalent to x for
1903	 signalling NaNs.  */
1904      if (!HONOR_SNANS (mode)
1905	  && trueop1 == CONST1_RTX (mode))
1906	return op0;
1907
1908      /* Convert multiply by constant power of two into shift unless
1909	 we are still generating RTL.  This test is a kludge.  */
1910      if (GET_CODE (trueop1) == CONST_INT
1911	  && (val = exact_log2 (INTVAL (trueop1))) >= 0
1912	  /* If the mode is larger than the host word size, and the
1913	     uppermost bit is set, then this isn't a power of two due
1914	     to implicit sign extension.  */
1915	  && (width <= HOST_BITS_PER_WIDE_INT
1916	      || val != HOST_BITS_PER_WIDE_INT - 1))
1917	return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1918
1919      /* Likewise for multipliers wider than a word.  */
1920      if (GET_CODE (trueop1) == CONST_DOUBLE
1921	  && (GET_MODE (trueop1) == VOIDmode
1922	      || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1923	  && GET_MODE (op0) == mode
1924	  && CONST_DOUBLE_LOW (trueop1) == 0
1925	  && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1926	return simplify_gen_binary (ASHIFT, mode, op0,
1927				    GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1928
1929      /* x*2 is x+x and x*(-1) is -x */
1930      if (GET_CODE (trueop1) == CONST_DOUBLE
1931	  && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1932	  && GET_MODE (op0) == mode)
1933	{
1934	  REAL_VALUE_TYPE d;
1935	  REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1936
1937	  if (REAL_VALUES_EQUAL (d, dconst2))
1938	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1939
1940	  if (!HONOR_SNANS (mode)
1941	      && REAL_VALUES_EQUAL (d, dconstm1))
1942	    return simplify_gen_unary (NEG, mode, op0, mode);
1943	}
1944
1945      /* Optimize -x * -x as x * x.  */
1946      if (FLOAT_MODE_P (mode)
1947	  && GET_CODE (op0) == NEG
1948	  && GET_CODE (op1) == NEG
1949	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1950	  && !side_effects_p (XEXP (op0, 0)))
1951	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1952
1953      /* Likewise, optimize abs(x) * abs(x) as x * x.  */
1954      if (SCALAR_FLOAT_MODE_P (mode)
1955	  && GET_CODE (op0) == ABS
1956	  && GET_CODE (op1) == ABS
1957	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1958	  && !side_effects_p (XEXP (op0, 0)))
1959	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1960
1961      /* Reassociate multiplication, but for floating point MULTs
1962	 only when the user specifies unsafe math optimizations.  */
1963      if (! FLOAT_MODE_P (mode)
1964	  || flag_unsafe_math_optimizations)
1965	{
1966	  tem = simplify_associative_operation (code, mode, op0, op1);
1967	  if (tem)
1968	    return tem;
1969	}
1970      break;
1971
1972    case IOR:
1973      if (trueop1 == const0_rtx)
1974	return op0;
1975      if (GET_CODE (trueop1) == CONST_INT
1976	  && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1977	      == GET_MODE_MASK (mode)))
1978	return op1;
1979      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1980	return op0;
1981      /* A | (~A) -> -1 */
1982      if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1983	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1984	  && ! side_effects_p (op0)
1985	  && SCALAR_INT_MODE_P (mode))
1986	return constm1_rtx;
1987
1988      /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
1989      if (GET_CODE (op1) == CONST_INT
1990	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1991	  && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1992	return op1;
1993
1994      /* Convert (A & B) | A to A.  */
1995      if (GET_CODE (op0) == AND
1996	  && (rtx_equal_p (XEXP (op0, 0), op1)
1997	      || rtx_equal_p (XEXP (op0, 1), op1))
1998	  && ! side_effects_p (XEXP (op0, 0))
1999	  && ! side_effects_p (XEXP (op0, 1)))
2000	return op1;
2001
2002      /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2003         mode size to (rotate A CX).  */
2004
2005      if (GET_CODE (op1) == ASHIFT
2006          || GET_CODE (op1) == SUBREG)
2007        {
2008	  opleft = op1;
2009	  opright = op0;
2010	}
2011      else
2012        {
2013	  opright = op1;
2014	  opleft = op0;
2015	}
2016
2017      if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2018          && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2019          && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2020          && GET_CODE (XEXP (opright, 1)) == CONST_INT
2021          && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2022              == GET_MODE_BITSIZE (mode)))
2023        return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2024
2025      /* Same, but for ashift that has been "simplified" to a wider mode
2026        by simplify_shift_const.  */
2027
2028      if (GET_CODE (opleft) == SUBREG
2029          && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2030          && GET_CODE (opright) == LSHIFTRT
2031          && GET_CODE (XEXP (opright, 0)) == SUBREG
2032          && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2033          && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2034          && (GET_MODE_SIZE (GET_MODE (opleft))
2035              < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2036          && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2037                          SUBREG_REG (XEXP (opright, 0)))
2038          && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2039          && GET_CODE (XEXP (opright, 1)) == CONST_INT
2040          && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2041              == GET_MODE_BITSIZE (mode)))
2042        return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2043                               XEXP (SUBREG_REG (opleft), 1));
2044
2045      /* If we have (ior (and (X C1) C2)), simplify this by making
2046	 C1 as small as possible if C1 actually changes.  */
2047      if (GET_CODE (op1) == CONST_INT
2048	  && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2049	      || INTVAL (op1) > 0)
2050	  && GET_CODE (op0) == AND
2051	  && GET_CODE (XEXP (op0, 1)) == CONST_INT
2052	  && GET_CODE (op1) == CONST_INT
2053	  && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2054	return simplify_gen_binary (IOR, mode,
2055				    simplify_gen_binary
2056					  (AND, mode, XEXP (op0, 0),
2057					   GEN_INT (INTVAL (XEXP (op0, 1))
2058						    & ~INTVAL (op1))),
2059				    op1);
2060
2061      /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2062         a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
2063	 the PLUS does not affect any of the bits in OP1: then we can do
2064	 the IOR as a PLUS and we can associate.  This is valid if OP1
2065         can be safely shifted left C bits.  */
2066      if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2067          && GET_CODE (XEXP (op0, 0)) == PLUS
2068          && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2069          && GET_CODE (XEXP (op0, 1)) == CONST_INT
2070          && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2071        {
2072          int count = INTVAL (XEXP (op0, 1));
2073          HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2074
2075          if (mask >> count == INTVAL (trueop1)
2076              && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2077	    return simplify_gen_binary (ASHIFTRT, mode,
2078					plus_constant (XEXP (op0, 0), mask),
2079					XEXP (op0, 1));
2080        }
2081
2082      tem = simplify_associative_operation (code, mode, op0, op1);
2083      if (tem)
2084	return tem;
2085      break;
2086
2087    case XOR:
2088      if (trueop1 == const0_rtx)
2089	return op0;
2090      if (GET_CODE (trueop1) == CONST_INT
2091	  && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2092	      == GET_MODE_MASK (mode)))
2093	return simplify_gen_unary (NOT, mode, op0, mode);
2094      if (rtx_equal_p (trueop0, trueop1)
2095	  && ! side_effects_p (op0)
2096	  && GET_MODE_CLASS (mode) != MODE_CC)
2097	 return CONST0_RTX (mode);
2098
2099      /* Canonicalize XOR of the most significant bit to PLUS.  */
2100      if ((GET_CODE (op1) == CONST_INT
2101	   || GET_CODE (op1) == CONST_DOUBLE)
2102	  && mode_signbit_p (mode, op1))
2103	return simplify_gen_binary (PLUS, mode, op0, op1);
2104      /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
2105      if ((GET_CODE (op1) == CONST_INT
2106	   || GET_CODE (op1) == CONST_DOUBLE)
2107	  && GET_CODE (op0) == PLUS
2108	  && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2109	      || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2110	  && mode_signbit_p (mode, XEXP (op0, 1)))
2111	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2112				    simplify_gen_binary (XOR, mode, op1,
2113							 XEXP (op0, 1)));
2114
2115      /* If we are XORing two things that have no bits in common,
2116	 convert them into an IOR.  This helps to detect rotation encoded
2117	 using those methods and possibly other simplifications.  */
2118
2119      if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2120	  && (nonzero_bits (op0, mode)
2121	      & nonzero_bits (op1, mode)) == 0)
2122	return (simplify_gen_binary (IOR, mode, op0, op1));
2123
2124      /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2125	 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2126	 (NOT y).  */
2127      {
2128	int num_negated = 0;
2129
2130	if (GET_CODE (op0) == NOT)
2131	  num_negated++, op0 = XEXP (op0, 0);
2132	if (GET_CODE (op1) == NOT)
2133	  num_negated++, op1 = XEXP (op1, 0);
2134
2135	if (num_negated == 2)
2136	  return simplify_gen_binary (XOR, mode, op0, op1);
2137	else if (num_negated == 1)
2138	  return simplify_gen_unary (NOT, mode,
2139				     simplify_gen_binary (XOR, mode, op0, op1),
2140				     mode);
2141      }
2142
2143      /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
2144	 correspond to a machine insn or result in further simplifications
2145	 if B is a constant.  */
2146
2147      if (GET_CODE (op0) == AND
2148	  && rtx_equal_p (XEXP (op0, 1), op1)
2149	  && ! side_effects_p (op1))
2150	return simplify_gen_binary (AND, mode,
2151				    simplify_gen_unary (NOT, mode,
2152							XEXP (op0, 0), mode),
2153				    op1);
2154
2155      else if (GET_CODE (op0) == AND
2156	       && rtx_equal_p (XEXP (op0, 0), op1)
2157	       && ! side_effects_p (op1))
2158	return simplify_gen_binary (AND, mode,
2159				    simplify_gen_unary (NOT, mode,
2160							XEXP (op0, 1), mode),
2161				    op1);
2162
2163      /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2164	 comparison if STORE_FLAG_VALUE is 1.  */
2165      if (STORE_FLAG_VALUE == 1
2166	  && trueop1 == const1_rtx
2167	  && COMPARISON_P (op0)
2168	  && (reversed = reversed_comparison (op0, mode)))
2169	return reversed;
2170
2171      /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2172	 is (lt foo (const_int 0)), so we can perform the above
2173	 simplification if STORE_FLAG_VALUE is 1.  */
2174
2175      if (STORE_FLAG_VALUE == 1
2176	  && trueop1 == const1_rtx
2177	  && GET_CODE (op0) == LSHIFTRT
2178	  && GET_CODE (XEXP (op0, 1)) == CONST_INT
2179	  && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2180	return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2181
2182      /* (xor (comparison foo bar) (const_int sign-bit))
2183	 when STORE_FLAG_VALUE is the sign bit.  */
2184      if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2185	  && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2186	      == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2187	  && trueop1 == const_true_rtx
2188	  && COMPARISON_P (op0)
2189	  && (reversed = reversed_comparison (op0, mode)))
2190	return reversed;
2191
2192      break;
2193
2194      tem = simplify_associative_operation (code, mode, op0, op1);
2195      if (tem)
2196	return tem;
2197      break;
2198
2199    case AND:
2200      if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2201	return trueop1;
2202      /* If we are turning off bits already known off in OP0, we need
2203	 not do an AND.  */
2204      if (GET_CODE (trueop1) == CONST_INT
2205	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2206	  && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2207	return op0;
2208      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2209	  && GET_MODE_CLASS (mode) != MODE_CC)
2210	return op0;
2211      /* A & (~A) -> 0 */
2212      if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2213	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2214	  && ! side_effects_p (op0)
2215	  && GET_MODE_CLASS (mode) != MODE_CC)
2216	return CONST0_RTX (mode);
2217
2218      /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2219	 there are no nonzero bits of C outside of X's mode.  */
2220      if ((GET_CODE (op0) == SIGN_EXTEND
2221	   || GET_CODE (op0) == ZERO_EXTEND)
2222	  && GET_CODE (trueop1) == CONST_INT
2223	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2224	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2225	      & INTVAL (trueop1)) == 0)
2226	{
2227	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2228	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2229				     gen_int_mode (INTVAL (trueop1),
2230						   imode));
2231	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2232	}
2233
2234      /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2235	 insn (and may simplify more).  */
2236      if (GET_CODE (op0) == XOR
2237	  && rtx_equal_p (XEXP (op0, 0), op1)
2238	  && ! side_effects_p (op1))
2239	return simplify_gen_binary (AND, mode,
2240				    simplify_gen_unary (NOT, mode,
2241							XEXP (op0, 1), mode),
2242				    op1);
2243
2244      if (GET_CODE (op0) == XOR
2245	  && rtx_equal_p (XEXP (op0, 1), op1)
2246	  && ! side_effects_p (op1))
2247	return simplify_gen_binary (AND, mode,
2248				    simplify_gen_unary (NOT, mode,
2249							XEXP (op0, 0), mode),
2250				    op1);
2251
2252      /* Similarly for (~(A ^ B)) & A.  */
2253      if (GET_CODE (op0) == NOT
2254	  && GET_CODE (XEXP (op0, 0)) == XOR
2255	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2256	  && ! side_effects_p (op1))
2257	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2258
2259      if (GET_CODE (op0) == NOT
2260	  && GET_CODE (XEXP (op0, 0)) == XOR
2261	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2262	  && ! side_effects_p (op1))
2263	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2264
2265      /* Convert (A | B) & A to A.  */
2266      if (GET_CODE (op0) == IOR
2267	  && (rtx_equal_p (XEXP (op0, 0), op1)
2268	      || rtx_equal_p (XEXP (op0, 1), op1))
2269	  && ! side_effects_p (XEXP (op0, 0))
2270	  && ! side_effects_p (XEXP (op0, 1)))
2271	return op1;
2272
2273      /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2274	 ((A & N) + B) & M -> (A + B) & M
2275	 Similarly if (N & M) == 0,
2276	 ((A | N) + B) & M -> (A + B) & M
2277	 and for - instead of + and/or ^ instead of |.  */
2278      if (GET_CODE (trueop1) == CONST_INT
2279	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2280	  && ~INTVAL (trueop1)
2281	  && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2282	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2283	{
2284	  rtx pmop[2];
2285	  int which;
2286
2287	  pmop[0] = XEXP (op0, 0);
2288	  pmop[1] = XEXP (op0, 1);
2289
2290	  for (which = 0; which < 2; which++)
2291	    {
2292	      tem = pmop[which];
2293	      switch (GET_CODE (tem))
2294		{
2295		case AND:
2296		  if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2297		      && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2298		      == INTVAL (trueop1))
2299		    pmop[which] = XEXP (tem, 0);
2300		  break;
2301		case IOR:
2302		case XOR:
2303		  if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2304		      && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2305		    pmop[which] = XEXP (tem, 0);
2306		  break;
2307		default:
2308		  break;
2309		}
2310	    }
2311
2312	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2313	    {
2314	      tem = simplify_gen_binary (GET_CODE (op0), mode,
2315					 pmop[0], pmop[1]);
2316	      return simplify_gen_binary (code, mode, tem, op1);
2317	    }
2318	}
2319      tem = simplify_associative_operation (code, mode, op0, op1);
2320      if (tem)
2321	return tem;
2322      break;
2323
2324    case UDIV:
2325      /* 0/x is 0 (or x&0 if x has side-effects).  */
2326      if (trueop0 == CONST0_RTX (mode))
2327	{
2328	  if (side_effects_p (op1))
2329	    return simplify_gen_binary (AND, mode, op1, trueop0);
2330	  return trueop0;
2331	}
2332      /* x/1 is x.  */
2333      if (trueop1 == CONST1_RTX (mode))
2334	return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2335      /* Convert divide by power of two into shift.  */
2336      if (GET_CODE (trueop1) == CONST_INT
2337	  && (val = exact_log2 (INTVAL (trueop1))) > 0)
2338	return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2339      break;
2340
2341    case DIV:
2342      /* Handle floating point and integers separately.  */
2343      if (SCALAR_FLOAT_MODE_P (mode))
2344	{
2345	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
2346	     safe for modes with NaNs, since 0.0 / 0.0 will then be
2347	     NaN rather than 0.0.  Nor is it safe for modes with signed
2348	     zeros, since dividing 0 by a negative number gives -0.0  */
2349	  if (trueop0 == CONST0_RTX (mode)
2350	      && !HONOR_NANS (mode)
2351	      && !HONOR_SIGNED_ZEROS (mode)
2352	      && ! side_effects_p (op1))
2353	    return op0;
2354	  /* x/1.0 is x.  */
2355	  if (trueop1 == CONST1_RTX (mode)
2356	      && !HONOR_SNANS (mode))
2357	    return op0;
2358
2359	  if (GET_CODE (trueop1) == CONST_DOUBLE
2360	      && trueop1 != CONST0_RTX (mode))
2361	    {
2362	      REAL_VALUE_TYPE d;
2363	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2364
2365	      /* x/-1.0 is -x.  */
2366	      if (REAL_VALUES_EQUAL (d, dconstm1)
2367		  && !HONOR_SNANS (mode))
2368		return simplify_gen_unary (NEG, mode, op0, mode);
2369
2370	      /* Change FP division by a constant into multiplication.
2371		 Only do this with -funsafe-math-optimizations.  */
2372	      if (flag_unsafe_math_optimizations
2373		  && !REAL_VALUES_EQUAL (d, dconst0))
2374		{
2375		  REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2376		  tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2377		  return simplify_gen_binary (MULT, mode, op0, tem);
2378		}
2379	    }
2380	}
2381      else
2382	{
2383	  /* 0/x is 0 (or x&0 if x has side-effects).  */
2384	  if (trueop0 == CONST0_RTX (mode))
2385	    {
2386	      if (side_effects_p (op1))
2387		return simplify_gen_binary (AND, mode, op1, trueop0);
2388	      return trueop0;
2389	    }
2390	  /* x/1 is x.  */
2391	  if (trueop1 == CONST1_RTX (mode))
2392	    return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2393	  /* x/-1 is -x.  */
2394	  if (trueop1 == constm1_rtx)
2395	    {
2396	      rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2397	      return simplify_gen_unary (NEG, mode, x, mode);
2398	    }
2399	}
2400      break;
2401
2402    case UMOD:
2403      /* 0%x is 0 (or x&0 if x has side-effects).  */
2404      if (trueop0 == CONST0_RTX (mode))
2405	{
2406	  if (side_effects_p (op1))
2407	    return simplify_gen_binary (AND, mode, op1, trueop0);
2408	  return trueop0;
2409	}
2410      /* x%1 is 0 (of x&0 if x has side-effects).  */
2411      if (trueop1 == CONST1_RTX (mode))
2412	{
2413	  if (side_effects_p (op0))
2414	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2415	  return CONST0_RTX (mode);
2416	}
2417      /* Implement modulus by power of two as AND.  */
2418      if (GET_CODE (trueop1) == CONST_INT
2419	  && exact_log2 (INTVAL (trueop1)) > 0)
2420	return simplify_gen_binary (AND, mode, op0,
2421				    GEN_INT (INTVAL (op1) - 1));
2422      break;
2423
2424    case MOD:
2425      /* 0%x is 0 (or x&0 if x has side-effects).  */
2426      if (trueop0 == CONST0_RTX (mode))
2427	{
2428	  if (side_effects_p (op1))
2429	    return simplify_gen_binary (AND, mode, op1, trueop0);
2430	  return trueop0;
2431	}
2432      /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
2433      if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2434	{
2435	  if (side_effects_p (op0))
2436	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2437	  return CONST0_RTX (mode);
2438	}
2439      break;
2440
2441    case ROTATERT:
2442    case ROTATE:
2443    case ASHIFTRT:
2444      if (trueop1 == CONST0_RTX (mode))
2445	return op0;
2446      if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2447	return op0;
2448      /* Rotating ~0 always results in ~0.  */
2449      if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2450	  && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2451	  && ! side_effects_p (op1))
2452	return op0;
2453      break;
2454
2455    case ASHIFT:
2456    case SS_ASHIFT:
2457      if (trueop1 == CONST0_RTX (mode))
2458	return op0;
2459      if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2460	return op0;
2461      break;
2462
2463    case LSHIFTRT:
2464      if (trueop1 == CONST0_RTX (mode))
2465	return op0;
2466      if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2467	return op0;
2468      /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
2469      if (GET_CODE (op0) == CLZ
2470	  && GET_CODE (trueop1) == CONST_INT
2471	  && STORE_FLAG_VALUE == 1
2472	  && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2473	{
2474	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2475	  unsigned HOST_WIDE_INT zero_val = 0;
2476
2477	  if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2478	      && zero_val == GET_MODE_BITSIZE (imode)
2479	      && INTVAL (trueop1) == exact_log2 (zero_val))
2480	    return simplify_gen_relational (EQ, mode, imode,
2481					    XEXP (op0, 0), const0_rtx);
2482	}
2483      break;
2484
2485    case SMIN:
2486      if (width <= HOST_BITS_PER_WIDE_INT
2487	  && GET_CODE (trueop1) == CONST_INT
2488	  && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2489	  && ! side_effects_p (op0))
2490	return op1;
2491      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2492	return op0;
2493      tem = simplify_associative_operation (code, mode, op0, op1);
2494      if (tem)
2495	return tem;
2496      break;
2497
2498    case SMAX:
2499      if (width <= HOST_BITS_PER_WIDE_INT
2500	  && GET_CODE (trueop1) == CONST_INT
2501	  && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2502	      == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2503	  && ! side_effects_p (op0))
2504	return op1;
2505      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2506	return op0;
2507      tem = simplify_associative_operation (code, mode, op0, op1);
2508      if (tem)
2509	return tem;
2510      break;
2511
2512    case UMIN:
2513      if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2514	return op1;
2515      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2516	return op0;
2517      tem = simplify_associative_operation (code, mode, op0, op1);
2518      if (tem)
2519	return tem;
2520      break;
2521
2522    case UMAX:
2523      if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2524	return op1;
2525      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2526	return op0;
2527      tem = simplify_associative_operation (code, mode, op0, op1);
2528      if (tem)
2529	return tem;
2530      break;
2531
2532    case SS_PLUS:
2533    case US_PLUS:
2534    case SS_MINUS:
2535    case US_MINUS:
2536      /* ??? There are simplifications that can be done.  */
2537      return 0;
2538
2539    case VEC_SELECT:
2540      if (!VECTOR_MODE_P (mode))
2541	{
2542	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2543	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2544	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
2545	  gcc_assert (XVECLEN (trueop1, 0) == 1);
2546	  gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2547
2548	  if (GET_CODE (trueop0) == CONST_VECTOR)
2549	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2550						      (trueop1, 0, 0)));
2551	}
2552      else
2553	{
2554	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2555	  gcc_assert (GET_MODE_INNER (mode)
2556		      == GET_MODE_INNER (GET_MODE (trueop0)));
2557	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
2558
2559	  if (GET_CODE (trueop0) == CONST_VECTOR)
2560	    {
2561	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2562	      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2563	      rtvec v = rtvec_alloc (n_elts);
2564	      unsigned int i;
2565
2566	      gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2567	      for (i = 0; i < n_elts; i++)
2568		{
2569		  rtx x = XVECEXP (trueop1, 0, i);
2570
2571		  gcc_assert (GET_CODE (x) == CONST_INT);
2572		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2573						       INTVAL (x));
2574		}
2575
2576	      return gen_rtx_CONST_VECTOR (mode, v);
2577	    }
2578	}
2579
2580      if (XVECLEN (trueop1, 0) == 1
2581	  && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2582	  && GET_CODE (trueop0) == VEC_CONCAT)
2583	{
2584	  rtx vec = trueop0;
2585	  int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2586
2587	  /* Try to find the element in the VEC_CONCAT.  */
2588	  while (GET_MODE (vec) != mode
2589		 && GET_CODE (vec) == VEC_CONCAT)
2590	    {
2591	      HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2592	      if (offset < vec_size)
2593		vec = XEXP (vec, 0);
2594	      else
2595		{
2596		  offset -= vec_size;
2597		  vec = XEXP (vec, 1);
2598		}
2599	      vec = avoid_constant_pool_reference (vec);
2600	    }
2601
2602	  if (GET_MODE (vec) == mode)
2603	    return vec;
2604	}
2605
2606      return 0;
2607    case VEC_CONCAT:
2608      {
2609	enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2610				      ? GET_MODE (trueop0)
2611				      : GET_MODE_INNER (mode));
2612	enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2613				      ? GET_MODE (trueop1)
2614				      : GET_MODE_INNER (mode));
2615
2616	gcc_assert (VECTOR_MODE_P (mode));
2617	gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2618		    == GET_MODE_SIZE (mode));
2619
2620	if (VECTOR_MODE_P (op0_mode))
2621	  gcc_assert (GET_MODE_INNER (mode)
2622		      == GET_MODE_INNER (op0_mode));
2623	else
2624	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2625
2626	if (VECTOR_MODE_P (op1_mode))
2627	  gcc_assert (GET_MODE_INNER (mode)
2628		      == GET_MODE_INNER (op1_mode));
2629	else
2630	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2631
2632	if ((GET_CODE (trueop0) == CONST_VECTOR
2633	     || GET_CODE (trueop0) == CONST_INT
2634	     || GET_CODE (trueop0) == CONST_DOUBLE)
2635	    && (GET_CODE (trueop1) == CONST_VECTOR
2636		|| GET_CODE (trueop1) == CONST_INT
2637		|| GET_CODE (trueop1) == CONST_DOUBLE))
2638	  {
2639	    int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2640	    unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2641	    rtvec v = rtvec_alloc (n_elts);
2642	    unsigned int i;
2643	    unsigned in_n_elts = 1;
2644
2645	    if (VECTOR_MODE_P (op0_mode))
2646	      in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2647	    for (i = 0; i < n_elts; i++)
2648	      {
2649		if (i < in_n_elts)
2650		  {
2651		    if (!VECTOR_MODE_P (op0_mode))
2652		      RTVEC_ELT (v, i) = trueop0;
2653		    else
2654		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2655		  }
2656		else
2657		  {
2658		    if (!VECTOR_MODE_P (op1_mode))
2659		      RTVEC_ELT (v, i) = trueop1;
2660		    else
2661		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2662							   i - in_n_elts);
2663		  }
2664	      }
2665
2666	    return gen_rtx_CONST_VECTOR (mode, v);
2667	  }
2668      }
2669      return 0;
2670
2671    default:
2672      gcc_unreachable ();
2673    }
2674
2675  return 0;
2676}
2677
2678rtx
2679simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2680				 rtx op0, rtx op1)
2681{
2682  HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2683  HOST_WIDE_INT val;
2684  unsigned int width = GET_MODE_BITSIZE (mode);
2685
2686  if (VECTOR_MODE_P (mode)
2687      && code != VEC_CONCAT
2688      && GET_CODE (op0) == CONST_VECTOR
2689      && GET_CODE (op1) == CONST_VECTOR)
2690    {
2691      unsigned n_elts = GET_MODE_NUNITS (mode);
2692      enum machine_mode op0mode = GET_MODE (op0);
2693      unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2694      enum machine_mode op1mode = GET_MODE (op1);
2695      unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2696      rtvec v = rtvec_alloc (n_elts);
2697      unsigned int i;
2698
2699      gcc_assert (op0_n_elts == n_elts);
2700      gcc_assert (op1_n_elts == n_elts);
2701      for (i = 0; i < n_elts; i++)
2702	{
2703	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2704					     CONST_VECTOR_ELT (op0, i),
2705					     CONST_VECTOR_ELT (op1, i));
2706	  if (!x)
2707	    return 0;
2708	  RTVEC_ELT (v, i) = x;
2709	}
2710
2711      return gen_rtx_CONST_VECTOR (mode, v);
2712    }
2713
2714  if (VECTOR_MODE_P (mode)
2715      && code == VEC_CONCAT
2716      && CONSTANT_P (op0) && CONSTANT_P (op1))
2717    {
2718      unsigned n_elts = GET_MODE_NUNITS (mode);
2719      rtvec v = rtvec_alloc (n_elts);
2720
2721      gcc_assert (n_elts >= 2);
2722      if (n_elts == 2)
2723	{
2724	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2725	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2726
2727	  RTVEC_ELT (v, 0) = op0;
2728	  RTVEC_ELT (v, 1) = op1;
2729	}
2730      else
2731	{
2732	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2733	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2734	  unsigned i;
2735
2736	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2737	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2738	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2739
2740	  for (i = 0; i < op0_n_elts; ++i)
2741	    RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2742	  for (i = 0; i < op1_n_elts; ++i)
2743	    RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2744	}
2745
2746      return gen_rtx_CONST_VECTOR (mode, v);
2747    }
2748
2749  if (SCALAR_FLOAT_MODE_P (mode)
2750      && GET_CODE (op0) == CONST_DOUBLE
2751      && GET_CODE (op1) == CONST_DOUBLE
2752      && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2753    {
2754      if (code == AND
2755	  || code == IOR
2756	  || code == XOR)
2757	{
2758	  long tmp0[4];
2759	  long tmp1[4];
2760	  REAL_VALUE_TYPE r;
2761	  int i;
2762
2763	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2764			  GET_MODE (op0));
2765	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2766			  GET_MODE (op1));
2767	  for (i = 0; i < 4; i++)
2768	    {
2769	      switch (code)
2770	      {
2771	      case AND:
2772		tmp0[i] &= tmp1[i];
2773		break;
2774	      case IOR:
2775		tmp0[i] |= tmp1[i];
2776		break;
2777	      case XOR:
2778		tmp0[i] ^= tmp1[i];
2779		break;
2780	      default:
2781		gcc_unreachable ();
2782	      }
2783	    }
2784	   real_from_target (&r, tmp0, mode);
2785	   return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2786	}
2787      else
2788	{
2789	  REAL_VALUE_TYPE f0, f1, value, result;
2790	  bool inexact;
2791
2792	  REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2793	  REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2794	  real_convert (&f0, mode, &f0);
2795	  real_convert (&f1, mode, &f1);
2796
2797	  if (HONOR_SNANS (mode)
2798	      && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2799	    return 0;
2800
2801	  if (code == DIV
2802	      && REAL_VALUES_EQUAL (f1, dconst0)
2803	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2804	    return 0;
2805
2806	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2807	      && flag_trapping_math
2808	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2809	    {
2810	      int s0 = REAL_VALUE_NEGATIVE (f0);
2811	      int s1 = REAL_VALUE_NEGATIVE (f1);
2812
2813	      switch (code)
2814		{
2815		case PLUS:
2816		  /* Inf + -Inf = NaN plus exception.  */
2817		  if (s0 != s1)
2818		    return 0;
2819		  break;
2820		case MINUS:
2821		  /* Inf - Inf = NaN plus exception.  */
2822		  if (s0 == s1)
2823		    return 0;
2824		  break;
2825		case DIV:
2826		  /* Inf / Inf = NaN plus exception.  */
2827		  return 0;
2828		default:
2829		  break;
2830		}
2831	    }
2832
2833	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2834	      && flag_trapping_math
2835	      && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2836		  || (REAL_VALUE_ISINF (f1)
2837		      && REAL_VALUES_EQUAL (f0, dconst0))))
2838	    /* Inf * 0 = NaN plus exception.  */
2839	    return 0;
2840
2841	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2842				     &f0, &f1);
2843	  real_convert (&result, mode, &value);
2844
2845	  /* Don't constant fold this floating point operation if
2846	     the result has overflowed and flag_trapping_math.  */
2847
2848	  if (flag_trapping_math
2849	      && MODE_HAS_INFINITIES (mode)
2850	      && REAL_VALUE_ISINF (result)
2851	      && !REAL_VALUE_ISINF (f0)
2852	      && !REAL_VALUE_ISINF (f1))
2853	    /* Overflow plus exception.  */
2854	    return 0;
2855
2856	  /* Don't constant fold this floating point operation if the
2857	     result may dependent upon the run-time rounding mode and
2858	     flag_rounding_math is set, or if GCC's software emulation
2859	     is unable to accurately represent the result.  */
2860
2861	  if ((flag_rounding_math
2862	       || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2863		   && !flag_unsafe_math_optimizations))
2864	      && (inexact || !real_identical (&result, &value)))
2865	    return NULL_RTX;
2866
2867	  return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2868	}
2869    }
2870
2871  /* We can fold some multi-word operations.  */
2872  if (GET_MODE_CLASS (mode) == MODE_INT
2873      && width == HOST_BITS_PER_WIDE_INT * 2
2874      && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2875      && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2876    {
2877      unsigned HOST_WIDE_INT l1, l2, lv, lt;
2878      HOST_WIDE_INT h1, h2, hv, ht;
2879
2880      if (GET_CODE (op0) == CONST_DOUBLE)
2881	l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2882      else
2883	l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2884
2885      if (GET_CODE (op1) == CONST_DOUBLE)
2886	l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2887      else
2888	l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2889
2890      switch (code)
2891	{
2892	case MINUS:
2893	  /* A - B == A + (-B).  */
2894	  neg_double (l2, h2, &lv, &hv);
2895	  l2 = lv, h2 = hv;
2896
2897	  /* Fall through....  */
2898
2899	case PLUS:
2900	  add_double (l1, h1, l2, h2, &lv, &hv);
2901	  break;
2902
2903	case MULT:
2904	  mul_double (l1, h1, l2, h2, &lv, &hv);
2905	  break;
2906
2907	case DIV:
2908	  if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2909				    &lv, &hv, &lt, &ht))
2910	    return 0;
2911	  break;
2912
2913	case MOD:
2914	  if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2915				    &lt, &ht, &lv, &hv))
2916	    return 0;
2917	  break;
2918
2919	case UDIV:
2920	  if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2921				    &lv, &hv, &lt, &ht))
2922	    return 0;
2923	  break;
2924
2925	case UMOD:
2926	  if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2927				    &lt, &ht, &lv, &hv))
2928	    return 0;
2929	  break;
2930
2931	case AND:
2932	  lv = l1 & l2, hv = h1 & h2;
2933	  break;
2934
2935	case IOR:
2936	  lv = l1 | l2, hv = h1 | h2;
2937	  break;
2938
2939	case XOR:
2940	  lv = l1 ^ l2, hv = h1 ^ h2;
2941	  break;
2942
2943	case SMIN:
2944	  if (h1 < h2
2945	      || (h1 == h2
2946		  && ((unsigned HOST_WIDE_INT) l1
2947		      < (unsigned HOST_WIDE_INT) l2)))
2948	    lv = l1, hv = h1;
2949	  else
2950	    lv = l2, hv = h2;
2951	  break;
2952
2953	case SMAX:
2954	  if (h1 > h2
2955	      || (h1 == h2
2956		  && ((unsigned HOST_WIDE_INT) l1
2957		      > (unsigned HOST_WIDE_INT) l2)))
2958	    lv = l1, hv = h1;
2959	  else
2960	    lv = l2, hv = h2;
2961	  break;
2962
2963	case UMIN:
2964	  if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2965	      || (h1 == h2
2966		  && ((unsigned HOST_WIDE_INT) l1
2967		      < (unsigned HOST_WIDE_INT) l2)))
2968	    lv = l1, hv = h1;
2969	  else
2970	    lv = l2, hv = h2;
2971	  break;
2972
2973	case UMAX:
2974	  if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2975	      || (h1 == h2
2976		  && ((unsigned HOST_WIDE_INT) l1
2977		      > (unsigned HOST_WIDE_INT) l2)))
2978	    lv = l1, hv = h1;
2979	  else
2980	    lv = l2, hv = h2;
2981	  break;
2982
2983	case LSHIFTRT:   case ASHIFTRT:
2984	case ASHIFT:
2985	case ROTATE:     case ROTATERT:
2986	  if (SHIFT_COUNT_TRUNCATED)
2987	    l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2988
2989	  if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2990	    return 0;
2991
2992	  if (code == LSHIFTRT || code == ASHIFTRT)
2993	    rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2994			   code == ASHIFTRT);
2995	  else if (code == ASHIFT)
2996	    lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2997	  else if (code == ROTATE)
2998	    lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2999	  else /* code == ROTATERT */
3000	    rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3001	  break;
3002
3003	default:
3004	  return 0;
3005	}
3006
3007      return immed_double_const (lv, hv, mode);
3008    }
3009
3010  if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3011      && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3012    {
3013      /* Get the integer argument values in two forms:
3014         zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S.  */
3015
3016      arg0 = INTVAL (op0);
3017      arg1 = INTVAL (op1);
3018
3019      if (width < HOST_BITS_PER_WIDE_INT)
3020        {
3021          arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3022          arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3023
3024          arg0s = arg0;
3025          if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3026	    arg0s |= ((HOST_WIDE_INT) (-1) << width);
3027
3028	  arg1s = arg1;
3029	  if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3030	    arg1s |= ((HOST_WIDE_INT) (-1) << width);
3031	}
3032      else
3033	{
3034	  arg0s = arg0;
3035	  arg1s = arg1;
3036	}
3037
3038      /* Compute the value of the arithmetic.  */
3039
3040      switch (code)
3041	{
3042	case PLUS:
3043	  val = arg0s + arg1s;
3044	  break;
3045
3046	case MINUS:
3047	  val = arg0s - arg1s;
3048	  break;
3049
3050	case MULT:
3051	  val = arg0s * arg1s;
3052	  break;
3053
3054	case DIV:
3055	  if (arg1s == 0
3056	      || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3057		  && arg1s == -1))
3058	    return 0;
3059	  val = arg0s / arg1s;
3060	  break;
3061
3062	case MOD:
3063	  if (arg1s == 0
3064	      || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3065		  && arg1s == -1))
3066	    return 0;
3067	  val = arg0s % arg1s;
3068	  break;
3069
3070	case UDIV:
3071	  if (arg1 == 0
3072	      || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3073		  && arg1s == -1))
3074	    return 0;
3075	  val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3076	  break;
3077
3078	case UMOD:
3079	  if (arg1 == 0
3080	      || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3081		  && arg1s == -1))
3082	    return 0;
3083	  val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3084	  break;
3085
3086	case AND:
3087	  val = arg0 & arg1;
3088	  break;
3089
3090	case IOR:
3091	  val = arg0 | arg1;
3092	  break;
3093
3094	case XOR:
3095	  val = arg0 ^ arg1;
3096	  break;
3097
3098	case LSHIFTRT:
3099	case ASHIFT:
3100	case ASHIFTRT:
3101	  /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3102	     the value is in range.  We can't return any old value for
3103	     out-of-range arguments because either the middle-end (via
3104	     shift_truncation_mask) or the back-end might be relying on
3105	     target-specific knowledge.  Nor can we rely on
3106	     shift_truncation_mask, since the shift might not be part of an
3107	     ashlM3, lshrM3 or ashrM3 instruction.  */
3108	  if (SHIFT_COUNT_TRUNCATED)
3109	    arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3110	  else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3111	    return 0;
3112
3113	  val = (code == ASHIFT
3114		 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3115		 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3116
3117	  /* Sign-extend the result for arithmetic right shifts.  */
3118	  if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3119	    val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3120	  break;
3121
3122	case ROTATERT:
3123	  if (arg1 < 0)
3124	    return 0;
3125
3126	  arg1 %= width;
3127	  val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3128		 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3129	  break;
3130
3131	case ROTATE:
3132	  if (arg1 < 0)
3133	    return 0;
3134
3135	  arg1 %= width;
3136	  val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3137		 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3138	  break;
3139
3140	case COMPARE:
3141	  /* Do nothing here.  */
3142	  return 0;
3143
3144	case SMIN:
3145	  val = arg0s <= arg1s ? arg0s : arg1s;
3146	  break;
3147
3148	case UMIN:
3149	  val = ((unsigned HOST_WIDE_INT) arg0
3150		 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3151	  break;
3152
3153	case SMAX:
3154	  val = arg0s > arg1s ? arg0s : arg1s;
3155	  break;
3156
3157	case UMAX:
3158	  val = ((unsigned HOST_WIDE_INT) arg0
3159		 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3160	  break;
3161
3162	case SS_PLUS:
3163	case US_PLUS:
3164	case SS_MINUS:
3165	case US_MINUS:
3166	case SS_ASHIFT:
3167	  /* ??? There are simplifications that can be done.  */
3168	  return 0;
3169
3170	default:
3171	  gcc_unreachable ();
3172	}
3173
3174      return gen_int_mode (val, mode);
3175    }
3176
3177  return NULL_RTX;
3178}
3179
3180
3181
3182/* Simplify a PLUS or MINUS, at least one of whose operands may be another
3183   PLUS or MINUS.
3184
3185   Rather than test for specific case, we do this by a brute-force method
3186   and do all possible simplifications until no more changes occur.  Then
3187   we rebuild the operation.  */
3188
3189struct simplify_plus_minus_op_data
3190{
3191  rtx op;
3192  short neg;
3193};
3194
3195static int
3196simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3197{
3198  const struct simplify_plus_minus_op_data *d1 = p1;
3199  const struct simplify_plus_minus_op_data *d2 = p2;
3200  int result;
3201
3202  result = (commutative_operand_precedence (d2->op)
3203	    - commutative_operand_precedence (d1->op));
3204  if (result)
3205    return result;
3206
3207  /* Group together equal REGs to do more simplification.  */
3208  if (REG_P (d1->op) && REG_P (d2->op))
3209    return REGNO (d1->op) - REGNO (d2->op);
3210  else
3211    return 0;
3212}
3213
3214static rtx
3215simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3216		     rtx op1)
3217{
3218  struct simplify_plus_minus_op_data ops[8];
3219  rtx result, tem;
3220  int n_ops = 2, input_ops = 2;
3221  int changed, n_constants = 0, canonicalized = 0;
3222  int i, j;
3223
3224  memset (ops, 0, sizeof ops);
3225
3226  /* Set up the two operands and then expand them until nothing has been
3227     changed.  If we run out of room in our array, give up; this should
3228     almost never happen.  */
3229
3230  ops[0].op = op0;
3231  ops[0].neg = 0;
3232  ops[1].op = op1;
3233  ops[1].neg = (code == MINUS);
3234
3235  do
3236    {
3237      changed = 0;
3238
3239      for (i = 0; i < n_ops; i++)
3240	{
3241	  rtx this_op = ops[i].op;
3242	  int this_neg = ops[i].neg;
3243	  enum rtx_code this_code = GET_CODE (this_op);
3244
3245	  switch (this_code)
3246	    {
3247	    case PLUS:
3248	    case MINUS:
3249	      if (n_ops == 7)
3250		return NULL_RTX;
3251
3252	      ops[n_ops].op = XEXP (this_op, 1);
3253	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3254	      n_ops++;
3255
3256	      ops[i].op = XEXP (this_op, 0);
3257	      input_ops++;
3258	      changed = 1;
3259	      canonicalized |= this_neg;
3260	      break;
3261
3262	    case NEG:
3263	      ops[i].op = XEXP (this_op, 0);
3264	      ops[i].neg = ! this_neg;
3265	      changed = 1;
3266	      canonicalized = 1;
3267	      break;
3268
3269	    case CONST:
3270	      if (n_ops < 7
3271		  && GET_CODE (XEXP (this_op, 0)) == PLUS
3272		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3273		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3274		{
3275		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
3276		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3277		  ops[n_ops].neg = this_neg;
3278		  n_ops++;
3279		  changed = 1;
3280	          canonicalized = 1;
3281		}
3282	      break;
3283
3284	    case NOT:
3285	      /* ~a -> (-a - 1) */
3286	      if (n_ops != 7)
3287		{
3288		  ops[n_ops].op = constm1_rtx;
3289		  ops[n_ops++].neg = this_neg;
3290		  ops[i].op = XEXP (this_op, 0);
3291		  ops[i].neg = !this_neg;
3292		  changed = 1;
3293	          canonicalized = 1;
3294		}
3295	      break;
3296
3297	    case CONST_INT:
3298	      n_constants++;
3299	      if (this_neg)
3300		{
3301		  ops[i].op = neg_const_int (mode, this_op);
3302		  ops[i].neg = 0;
3303		  changed = 1;
3304	          canonicalized = 1;
3305		}
3306	      break;
3307
3308	    default:
3309	      break;
3310	    }
3311	}
3312    }
3313  while (changed);
3314
3315  if (n_constants > 1)
3316    canonicalized = 1;
3317
3318  gcc_assert (n_ops >= 2);
3319
3320  /* If we only have two operands, we can avoid the loops.  */
3321  if (n_ops == 2)
3322    {
3323      enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3324      rtx lhs, rhs;
3325
3326      /* Get the two operands.  Be careful with the order, especially for
3327	 the cases where code == MINUS.  */
3328      if (ops[0].neg && ops[1].neg)
3329	{
3330	  lhs = gen_rtx_NEG (mode, ops[0].op);
3331	  rhs = ops[1].op;
3332	}
3333      else if (ops[0].neg)
3334	{
3335	  lhs = ops[1].op;
3336	  rhs = ops[0].op;
3337	}
3338      else
3339	{
3340	  lhs = ops[0].op;
3341	  rhs = ops[1].op;
3342	}
3343
3344      return simplify_const_binary_operation (code, mode, lhs, rhs);
3345    }
3346
3347  /* Now simplify each pair of operands until nothing changes.  */
3348  do
3349    {
3350      /* Insertion sort is good enough for an eight-element array.  */
3351      for (i = 1; i < n_ops; i++)
3352        {
3353          struct simplify_plus_minus_op_data save;
3354          j = i - 1;
3355          if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3356	    continue;
3357
3358          canonicalized = 1;
3359          save = ops[i];
3360          do
3361	    ops[j + 1] = ops[j];
3362          while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3363          ops[j + 1] = save;
3364        }
3365
3366      /* This is only useful the first time through.  */
3367      if (!canonicalized)
3368        return NULL_RTX;
3369
3370      changed = 0;
3371      for (i = n_ops - 1; i > 0; i--)
3372	for (j = i - 1; j >= 0; j--)
3373	  {
3374	    rtx lhs = ops[j].op, rhs = ops[i].op;
3375	    int lneg = ops[j].neg, rneg = ops[i].neg;
3376
3377	    if (lhs != 0 && rhs != 0)
3378	      {
3379		enum rtx_code ncode = PLUS;
3380
3381		if (lneg != rneg)
3382		  {
3383		    ncode = MINUS;
3384		    if (lneg)
3385		      tem = lhs, lhs = rhs, rhs = tem;
3386		  }
3387		else if (swap_commutative_operands_p (lhs, rhs))
3388		  tem = lhs, lhs = rhs, rhs = tem;
3389
3390		if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3391		    && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3392		  {
3393		    rtx tem_lhs, tem_rhs;
3394
3395		    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3396		    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3397		    tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3398
3399		    if (tem && !CONSTANT_P (tem))
3400		      tem = gen_rtx_CONST (GET_MODE (tem), tem);
3401		  }
3402		else
3403		  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3404
3405		/* Reject "simplifications" that just wrap the two
3406		   arguments in a CONST.  Failure to do so can result
3407		   in infinite recursion with simplify_binary_operation
3408		   when it calls us to simplify CONST operations.  */
3409		if (tem
3410		    && ! (GET_CODE (tem) == CONST
3411			  && GET_CODE (XEXP (tem, 0)) == ncode
3412			  && XEXP (XEXP (tem, 0), 0) == lhs
3413			  && XEXP (XEXP (tem, 0), 1) == rhs))
3414		  {
3415		    lneg &= rneg;
3416		    if (GET_CODE (tem) == NEG)
3417		      tem = XEXP (tem, 0), lneg = !lneg;
3418		    if (GET_CODE (tem) == CONST_INT && lneg)
3419		      tem = neg_const_int (mode, tem), lneg = 0;
3420
3421		    ops[i].op = tem;
3422		    ops[i].neg = lneg;
3423		    ops[j].op = NULL_RTX;
3424		    changed = 1;
3425		  }
3426	      }
3427	  }
3428
3429      /* Pack all the operands to the lower-numbered entries.  */
3430      for (i = 0, j = 0; j < n_ops; j++)
3431        if (ops[j].op)
3432          {
3433	    ops[i] = ops[j];
3434	    i++;
3435          }
3436      n_ops = i;
3437    }
3438  while (changed);
3439
3440  /* Create (minus -C X) instead of (neg (const (plus X C))).  */
3441  if (n_ops == 2
3442      && GET_CODE (ops[1].op) == CONST_INT
3443      && CONSTANT_P (ops[0].op)
3444      && ops[0].neg)
3445    return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3446
3447  /* We suppressed creation of trivial CONST expressions in the
3448     combination loop to avoid recursion.  Create one manually now.
3449     The combination loop should have ensured that there is exactly
3450     one CONST_INT, and the sort will have ensured that it is last
3451     in the array and that any other constant will be next-to-last.  */
3452
3453  if (n_ops > 1
3454      && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3455      && CONSTANT_P (ops[n_ops - 2].op))
3456    {
3457      rtx value = ops[n_ops - 1].op;
3458      if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3459	value = neg_const_int (mode, value);
3460      ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3461      n_ops--;
3462    }
3463
3464  /* Put a non-negated operand first, if possible.  */
3465
3466  for (i = 0; i < n_ops && ops[i].neg; i++)
3467    continue;
3468  if (i == n_ops)
3469    ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3470  else if (i != 0)
3471    {
3472      tem = ops[0].op;
3473      ops[0] = ops[i];
3474      ops[i].op = tem;
3475      ops[i].neg = 1;
3476    }
3477
3478  /* Now make the result by performing the requested operations.  */
3479  result = ops[0].op;
3480  for (i = 1; i < n_ops; i++)
3481    result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3482			     mode, result, ops[i].op);
3483
3484  return result;
3485}
3486
3487/* Check whether an operand is suitable for calling simplify_plus_minus.  */
3488static bool
3489plus_minus_operand_p (rtx x)
3490{
3491  return GET_CODE (x) == PLUS
3492         || GET_CODE (x) == MINUS
3493	 || (GET_CODE (x) == CONST
3494	     && GET_CODE (XEXP (x, 0)) == PLUS
3495	     && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3496	     && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3497}
3498
3499/* Like simplify_binary_operation except used for relational operators.
3500   MODE is the mode of the result. If MODE is VOIDmode, both operands must
3501   not also be VOIDmode.
3502
3503   CMP_MODE specifies in which mode the comparison is done in, so it is
3504   the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
3505   the operands or, if both are VOIDmode, the operands are compared in
3506   "infinite precision".  */
3507rtx
3508simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3509			       enum machine_mode cmp_mode, rtx op0, rtx op1)
3510{
3511  rtx tem, trueop0, trueop1;
3512
3513  if (cmp_mode == VOIDmode)
3514    cmp_mode = GET_MODE (op0);
3515  if (cmp_mode == VOIDmode)
3516    cmp_mode = GET_MODE (op1);
3517
3518  tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3519  if (tem)
3520    {
3521      if (SCALAR_FLOAT_MODE_P (mode))
3522	{
3523          if (tem == const0_rtx)
3524            return CONST0_RTX (mode);
3525#ifdef FLOAT_STORE_FLAG_VALUE
3526	  {
3527	    REAL_VALUE_TYPE val;
3528	    val = FLOAT_STORE_FLAG_VALUE (mode);
3529	    return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3530	  }
3531#else
3532	  return NULL_RTX;
3533#endif
3534	}
3535      if (VECTOR_MODE_P (mode))
3536	{
3537	  if (tem == const0_rtx)
3538	    return CONST0_RTX (mode);
3539#ifdef VECTOR_STORE_FLAG_VALUE
3540	  {
3541	    int i, units;
3542	    rtvec v;
3543
3544	    rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3545	    if (val == NULL_RTX)
3546	      return NULL_RTX;
3547	    if (val == const1_rtx)
3548	      return CONST1_RTX (mode);
3549
3550	    units = GET_MODE_NUNITS (mode);
3551	    v = rtvec_alloc (units);
3552	    for (i = 0; i < units; i++)
3553	      RTVEC_ELT (v, i) = val;
3554	    return gen_rtx_raw_CONST_VECTOR (mode, v);
3555	  }
3556#else
3557	  return NULL_RTX;
3558#endif
3559	}
3560
3561      return tem;
3562    }
3563
3564  /* For the following tests, ensure const0_rtx is op1.  */
3565  if (swap_commutative_operands_p (op0, op1)
3566      || (op0 == const0_rtx && op1 != const0_rtx))
3567    tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3568
3569  /* If op0 is a compare, extract the comparison arguments from it.  */
3570  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3571    return simplify_relational_operation (code, mode, VOIDmode,
3572				          XEXP (op0, 0), XEXP (op0, 1));
3573
3574  if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3575      || CC0_P (op0))
3576    return NULL_RTX;
3577
3578  trueop0 = avoid_constant_pool_reference (op0);
3579  trueop1 = avoid_constant_pool_reference (op1);
3580  return simplify_relational_operation_1 (code, mode, cmp_mode,
3581		  			  trueop0, trueop1);
3582}
3583
3584/* This part of simplify_relational_operation is only used when CMP_MODE
3585   is not in class MODE_CC (i.e. it is a real comparison).
3586
3587   MODE is the mode of the result, while CMP_MODE specifies in which
3588   mode the comparison is done in, so it is the mode of the operands.  */
3589
3590static rtx
3591simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3592				 enum machine_mode cmp_mode, rtx op0, rtx op1)
3593{
3594  enum rtx_code op0code = GET_CODE (op0);
3595
3596  if (GET_CODE (op1) == CONST_INT)
3597    {
3598      if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3599	{
3600	  /* If op0 is a comparison, extract the comparison arguments
3601	     from it.  */
3602	  if (code == NE)
3603	    {
3604	      if (GET_MODE (op0) == mode)
3605		return simplify_rtx (op0);
3606	      else
3607		return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3608					        XEXP (op0, 0), XEXP (op0, 1));
3609	    }
3610	  else if (code == EQ)
3611	    {
3612	      enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3613	      if (new_code != UNKNOWN)
3614	        return simplify_gen_relational (new_code, mode, VOIDmode,
3615					        XEXP (op0, 0), XEXP (op0, 1));
3616	    }
3617	}
3618    }
3619
3620  /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
3621  if ((code == EQ || code == NE)
3622      && (op0code == PLUS || op0code == MINUS)
3623      && CONSTANT_P (op1)
3624      && CONSTANT_P (XEXP (op0, 1))
3625      && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3626    {
3627      rtx x = XEXP (op0, 0);
3628      rtx c = XEXP (op0, 1);
3629
3630      c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3631			       cmp_mode, op1, c);
3632      return simplify_gen_relational (code, mode, cmp_mode, x, c);
3633    }
3634
3635  /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3636     the same as (zero_extract:SI FOO (const_int 1) BAR).  */
3637  if (code == NE
3638      && op1 == const0_rtx
3639      && GET_MODE_CLASS (mode) == MODE_INT
3640      && cmp_mode != VOIDmode
3641      /* ??? Work-around BImode bugs in the ia64 backend.  */
3642      && mode != BImode
3643      && cmp_mode != BImode
3644      && nonzero_bits (op0, cmp_mode) == 1
3645      && STORE_FLAG_VALUE == 1)
3646    return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3647	   ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3648	   : lowpart_subreg (mode, op0, cmp_mode);
3649
3650  /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
3651  if ((code == EQ || code == NE)
3652      && op1 == const0_rtx
3653      && op0code == XOR)
3654    return simplify_gen_relational (code, mode, cmp_mode,
3655				    XEXP (op0, 0), XEXP (op0, 1));
3656
3657  /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
3658  if ((code == EQ || code == NE)
3659      && op0code == XOR
3660      && rtx_equal_p (XEXP (op0, 0), op1)
3661      && !side_effects_p (XEXP (op0, 0)))
3662    return simplify_gen_relational (code, mode, cmp_mode,
3663				    XEXP (op0, 1), const0_rtx);
3664
3665  /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
3666  if ((code == EQ || code == NE)
3667      && op0code == XOR
3668      && rtx_equal_p (XEXP (op0, 1), op1)
3669      && !side_effects_p (XEXP (op0, 1)))
3670    return simplify_gen_relational (code, mode, cmp_mode,
3671				    XEXP (op0, 0), const0_rtx);
3672
3673  /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
3674  if ((code == EQ || code == NE)
3675      && op0code == XOR
3676      && (GET_CODE (op1) == CONST_INT
3677	  || GET_CODE (op1) == CONST_DOUBLE)
3678      && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3679	  || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3680    return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3681				    simplify_gen_binary (XOR, cmp_mode,
3682							 XEXP (op0, 1), op1));
3683
3684  return NULL_RTX;
3685}
3686
3687/* Check if the given comparison (done in the given MODE) is actually a
3688   tautology or a contradiction.
3689   If no simplification is possible, this function returns zero.
3690   Otherwise, it returns either const_true_rtx or const0_rtx.  */
3691
3692rtx
3693simplify_const_relational_operation (enum rtx_code code,
3694				     enum machine_mode mode,
3695				     rtx op0, rtx op1)
3696{
3697  int equal, op0lt, op0ltu, op1lt, op1ltu;
3698  rtx tem;
3699  rtx trueop0;
3700  rtx trueop1;
3701
3702  gcc_assert (mode != VOIDmode
3703	      || (GET_MODE (op0) == VOIDmode
3704		  && GET_MODE (op1) == VOIDmode));
3705
3706  /* If op0 is a compare, extract the comparison arguments from it.  */
3707  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3708    {
3709      op1 = XEXP (op0, 1);
3710      op0 = XEXP (op0, 0);
3711
3712      if (GET_MODE (op0) != VOIDmode)
3713	mode = GET_MODE (op0);
3714      else if (GET_MODE (op1) != VOIDmode)
3715	mode = GET_MODE (op1);
3716      else
3717	return 0;
3718    }
3719
3720  /* We can't simplify MODE_CC values since we don't know what the
3721     actual comparison is.  */
3722  if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3723    return 0;
3724
3725  /* Make sure the constant is second.  */
3726  if (swap_commutative_operands_p (op0, op1))
3727    {
3728      tem = op0, op0 = op1, op1 = tem;
3729      code = swap_condition (code);
3730    }
3731
3732  trueop0 = avoid_constant_pool_reference (op0);
3733  trueop1 = avoid_constant_pool_reference (op1);
3734
3735  /* For integer comparisons of A and B maybe we can simplify A - B and can
3736     then simplify a comparison of that with zero.  If A and B are both either
3737     a register or a CONST_INT, this can't help; testing for these cases will
3738     prevent infinite recursion here and speed things up.
3739
3740     We can only do this for EQ and NE comparisons as otherwise we may
3741     lose or introduce overflow which we cannot disregard as undefined as
3742     we do not know the signedness of the operation on either the left or
3743     the right hand side of the comparison.  */
3744
3745  if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3746      && (code == EQ || code == NE)
3747      && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3748	    && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3749      && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3750      /* We cannot do this if tem is a nonzero address.  */
3751      && ! nonzero_address_p (tem))
3752    return simplify_const_relational_operation (signed_condition (code),
3753						mode, tem, const0_rtx);
3754
3755  if (! HONOR_NANS (mode) && code == ORDERED)
3756    return const_true_rtx;
3757
3758  if (! HONOR_NANS (mode) && code == UNORDERED)
3759    return const0_rtx;
3760
3761  /* For modes without NaNs, if the two operands are equal, we know the
3762     result except if they have side-effects.  */
3763  if (! HONOR_NANS (GET_MODE (trueop0))
3764      && rtx_equal_p (trueop0, trueop1)
3765      && ! side_effects_p (trueop0))
3766    equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3767
3768  /* If the operands are floating-point constants, see if we can fold
3769     the result.  */
3770  else if (GET_CODE (trueop0) == CONST_DOUBLE
3771	   && GET_CODE (trueop1) == CONST_DOUBLE
3772	   && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3773    {
3774      REAL_VALUE_TYPE d0, d1;
3775
3776      REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3777      REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3778
3779      /* Comparisons are unordered iff at least one of the values is NaN.  */
3780      if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3781	switch (code)
3782	  {
3783	  case UNEQ:
3784	  case UNLT:
3785	  case UNGT:
3786	  case UNLE:
3787	  case UNGE:
3788	  case NE:
3789	  case UNORDERED:
3790	    return const_true_rtx;
3791	  case EQ:
3792	  case LT:
3793	  case GT:
3794	  case LE:
3795	  case GE:
3796	  case LTGT:
3797	  case ORDERED:
3798	    return const0_rtx;
3799	  default:
3800	    return 0;
3801	  }
3802
3803      equal = REAL_VALUES_EQUAL (d0, d1);
3804      op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3805      op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3806    }
3807
3808  /* Otherwise, see if the operands are both integers.  */
3809  else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3810	   && (GET_CODE (trueop0) == CONST_DOUBLE
3811	       || GET_CODE (trueop0) == CONST_INT)
3812	   && (GET_CODE (trueop1) == CONST_DOUBLE
3813	       || GET_CODE (trueop1) == CONST_INT))
3814    {
3815      int width = GET_MODE_BITSIZE (mode);
3816      HOST_WIDE_INT l0s, h0s, l1s, h1s;
3817      unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3818
3819      /* Get the two words comprising each integer constant.  */
3820      if (GET_CODE (trueop0) == CONST_DOUBLE)
3821	{
3822	  l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3823	  h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3824	}
3825      else
3826	{
3827	  l0u = l0s = INTVAL (trueop0);
3828	  h0u = h0s = HWI_SIGN_EXTEND (l0s);
3829	}
3830
3831      if (GET_CODE (trueop1) == CONST_DOUBLE)
3832	{
3833	  l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3834	  h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3835	}
3836      else
3837	{
3838	  l1u = l1s = INTVAL (trueop1);
3839	  h1u = h1s = HWI_SIGN_EXTEND (l1s);
3840	}
3841
3842      /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3843	 we have to sign or zero-extend the values.  */
3844      if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3845	{
3846	  l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3847	  l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3848
3849	  if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3850	    l0s |= ((HOST_WIDE_INT) (-1) << width);
3851
3852	  if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3853	    l1s |= ((HOST_WIDE_INT) (-1) << width);
3854	}
3855      if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3856	h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3857
3858      equal = (h0u == h1u && l0u == l1u);
3859      op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3860      op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3861      op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3862      op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3863    }
3864
3865  /* Otherwise, there are some code-specific tests we can make.  */
3866  else
3867    {
3868      /* Optimize comparisons with upper and lower bounds.  */
3869      if (SCALAR_INT_MODE_P (mode)
3870	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3871	{
3872	  rtx mmin, mmax;
3873	  int sign;
3874
3875	  if (code == GEU
3876	      || code == LEU
3877	      || code == GTU
3878	      || code == LTU)
3879	    sign = 0;
3880	  else
3881	    sign = 1;
3882
3883	  get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3884
3885	  tem = NULL_RTX;
3886	  switch (code)
3887	    {
3888	    case GEU:
3889	    case GE:
3890	      /* x >= min is always true.  */
3891	      if (rtx_equal_p (trueop1, mmin))
3892		tem = const_true_rtx;
3893	      else
3894	      break;
3895
3896	    case LEU:
3897	    case LE:
3898	      /* x <= max is always true.  */
3899	      if (rtx_equal_p (trueop1, mmax))
3900		tem = const_true_rtx;
3901	      break;
3902
3903	    case GTU:
3904	    case GT:
3905	      /* x > max is always false.  */
3906	      if (rtx_equal_p (trueop1, mmax))
3907		tem = const0_rtx;
3908	      break;
3909
3910	    case LTU:
3911	    case LT:
3912	      /* x < min is always false.  */
3913	      if (rtx_equal_p (trueop1, mmin))
3914		tem = const0_rtx;
3915	      break;
3916
3917	    default:
3918	      break;
3919	    }
3920	  if (tem == const0_rtx
3921	      || tem == const_true_rtx)
3922	    return tem;
3923	}
3924
3925      switch (code)
3926	{
3927	case EQ:
3928	  if (trueop1 == const0_rtx && nonzero_address_p (op0))
3929	    return const0_rtx;
3930	  break;
3931
3932	case NE:
3933	  if (trueop1 == const0_rtx && nonzero_address_p (op0))
3934	    return const_true_rtx;
3935	  break;
3936
3937	case LT:
3938	  /* Optimize abs(x) < 0.0.  */
3939	  if (trueop1 == CONST0_RTX (mode)
3940	      && !HONOR_SNANS (mode)
3941	      && (!INTEGRAL_MODE_P (mode)
3942		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3943	    {
3944	      tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3945						       : trueop0;
3946	      if (GET_CODE (tem) == ABS)
3947		{
3948		  if (INTEGRAL_MODE_P (mode)
3949		      && (issue_strict_overflow_warning
3950			  (WARN_STRICT_OVERFLOW_CONDITIONAL)))
3951		    warning (OPT_Wstrict_overflow,
3952			     ("assuming signed overflow does not occur when "
3953			      "assuming abs (x) < 0 is false"));
3954		  return const0_rtx;
3955		}
3956	    }
3957	  break;
3958
3959	case GE:
3960	  /* Optimize abs(x) >= 0.0.  */
3961	  if (trueop1 == CONST0_RTX (mode)
3962	      && !HONOR_NANS (mode)
3963	      && (!INTEGRAL_MODE_P (mode)
3964		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3965	    {
3966	      tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3967						       : trueop0;
3968	      if (GET_CODE (tem) == ABS)
3969		{
3970		  if (INTEGRAL_MODE_P (mode)
3971		      && (issue_strict_overflow_warning
3972			  (WARN_STRICT_OVERFLOW_CONDITIONAL)))
3973		    warning (OPT_Wstrict_overflow,
3974			     ("assuming signed overflow does not occur when "
3975			      "assuming abs (x) >= 0 is true"));
3976		  return const_true_rtx;
3977		}
3978	    }
3979	  break;
3980
3981	case UNGE:
3982	  /* Optimize ! (abs(x) < 0.0).  */
3983	  if (trueop1 == CONST0_RTX (mode))
3984	    {
3985	      tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3986						       : trueop0;
3987	      if (GET_CODE (tem) == ABS)
3988		return const_true_rtx;
3989	    }
3990	  break;
3991
3992	default:
3993	  break;
3994	}
3995
3996      return 0;
3997    }
3998
3999  /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4000     as appropriate.  */
4001  switch (code)
4002    {
4003    case EQ:
4004    case UNEQ:
4005      return equal ? const_true_rtx : const0_rtx;
4006    case NE:
4007    case LTGT:
4008      return ! equal ? const_true_rtx : const0_rtx;
4009    case LT:
4010    case UNLT:
4011      return op0lt ? const_true_rtx : const0_rtx;
4012    case GT:
4013    case UNGT:
4014      return op1lt ? const_true_rtx : const0_rtx;
4015    case LTU:
4016      return op0ltu ? const_true_rtx : const0_rtx;
4017    case GTU:
4018      return op1ltu ? const_true_rtx : const0_rtx;
4019    case LE:
4020    case UNLE:
4021      return equal || op0lt ? const_true_rtx : const0_rtx;
4022    case GE:
4023    case UNGE:
4024      return equal || op1lt ? const_true_rtx : const0_rtx;
4025    case LEU:
4026      return equal || op0ltu ? const_true_rtx : const0_rtx;
4027    case GEU:
4028      return equal || op1ltu ? const_true_rtx : const0_rtx;
4029    case ORDERED:
4030      return const_true_rtx;
4031    case UNORDERED:
4032      return const0_rtx;
4033    default:
4034      gcc_unreachable ();
4035    }
4036}
4037
4038/* Simplify CODE, an operation with result mode MODE and three operands,
4039   OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
4040   a constant.  Return 0 if no simplifications is possible.  */
4041
4042rtx
4043simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4044			    enum machine_mode op0_mode, rtx op0, rtx op1,
4045			    rtx op2)
4046{
4047  unsigned int width = GET_MODE_BITSIZE (mode);
4048
4049  /* VOIDmode means "infinite" precision.  */
4050  if (width == 0)
4051    width = HOST_BITS_PER_WIDE_INT;
4052
4053  switch (code)
4054    {
4055    case SIGN_EXTRACT:
4056    case ZERO_EXTRACT:
4057      if (GET_CODE (op0) == CONST_INT
4058	  && GET_CODE (op1) == CONST_INT
4059	  && GET_CODE (op2) == CONST_INT
4060	  && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4061	  && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4062	{
4063	  /* Extracting a bit-field from a constant */
4064	  HOST_WIDE_INT val = INTVAL (op0);
4065
4066	  if (BITS_BIG_ENDIAN)
4067	    val >>= (GET_MODE_BITSIZE (op0_mode)
4068		     - INTVAL (op2) - INTVAL (op1));
4069	  else
4070	    val >>= INTVAL (op2);
4071
4072	  if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4073	    {
4074	      /* First zero-extend.  */
4075	      val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4076	      /* If desired, propagate sign bit.  */
4077	      if (code == SIGN_EXTRACT
4078		  && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4079		val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4080	    }
4081
4082	  /* Clear the bits that don't belong in our mode,
4083	     unless they and our sign bit are all one.
4084	     So we get either a reasonable negative value or a reasonable
4085	     unsigned value for this mode.  */
4086	  if (width < HOST_BITS_PER_WIDE_INT
4087	      && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4088		  != ((HOST_WIDE_INT) (-1) << (width - 1))))
4089	    val &= ((HOST_WIDE_INT) 1 << width) - 1;
4090
4091	  return gen_int_mode (val, mode);
4092	}
4093      break;
4094
4095    case IF_THEN_ELSE:
4096      if (GET_CODE (op0) == CONST_INT)
4097	return op0 != const0_rtx ? op1 : op2;
4098
4099      /* Convert c ? a : a into "a".  */
4100      if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4101	return op1;
4102
4103      /* Convert a != b ? a : b into "a".  */
4104      if (GET_CODE (op0) == NE
4105	  && ! side_effects_p (op0)
4106	  && ! HONOR_NANS (mode)
4107	  && ! HONOR_SIGNED_ZEROS (mode)
4108	  && ((rtx_equal_p (XEXP (op0, 0), op1)
4109	       && rtx_equal_p (XEXP (op0, 1), op2))
4110	      || (rtx_equal_p (XEXP (op0, 0), op2)
4111		  && rtx_equal_p (XEXP (op0, 1), op1))))
4112	return op1;
4113
4114      /* Convert a == b ? a : b into "b".  */
4115      if (GET_CODE (op0) == EQ
4116	  && ! side_effects_p (op0)
4117	  && ! HONOR_NANS (mode)
4118	  && ! HONOR_SIGNED_ZEROS (mode)
4119	  && ((rtx_equal_p (XEXP (op0, 0), op1)
4120	       && rtx_equal_p (XEXP (op0, 1), op2))
4121	      || (rtx_equal_p (XEXP (op0, 0), op2)
4122		  && rtx_equal_p (XEXP (op0, 1), op1))))
4123	return op2;
4124
4125      if (COMPARISON_P (op0) && ! side_effects_p (op0))
4126	{
4127	  enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4128					? GET_MODE (XEXP (op0, 1))
4129					: GET_MODE (XEXP (op0, 0)));
4130	  rtx temp;
4131
4132	  /* Look for happy constants in op1 and op2.  */
4133	  if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4134	    {
4135	      HOST_WIDE_INT t = INTVAL (op1);
4136	      HOST_WIDE_INT f = INTVAL (op2);
4137
4138	      if (t == STORE_FLAG_VALUE && f == 0)
4139	        code = GET_CODE (op0);
4140	      else if (t == 0 && f == STORE_FLAG_VALUE)
4141		{
4142		  enum rtx_code tmp;
4143		  tmp = reversed_comparison_code (op0, NULL_RTX);
4144		  if (tmp == UNKNOWN)
4145		    break;
4146		  code = tmp;
4147		}
4148	      else
4149		break;
4150
4151	      return simplify_gen_relational (code, mode, cmp_mode,
4152					      XEXP (op0, 0), XEXP (op0, 1));
4153	    }
4154
4155	  if (cmp_mode == VOIDmode)
4156	    cmp_mode = op0_mode;
4157	  temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4158			  			cmp_mode, XEXP (op0, 0),
4159						XEXP (op0, 1));
4160
4161	  /* See if any simplifications were possible.  */
4162	  if (temp)
4163	    {
4164	      if (GET_CODE (temp) == CONST_INT)
4165		return temp == const0_rtx ? op2 : op1;
4166	      else if (temp)
4167	        return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4168	    }
4169	}
4170      break;
4171
4172    case VEC_MERGE:
4173      gcc_assert (GET_MODE (op0) == mode);
4174      gcc_assert (GET_MODE (op1) == mode);
4175      gcc_assert (VECTOR_MODE_P (mode));
4176      op2 = avoid_constant_pool_reference (op2);
4177      if (GET_CODE (op2) == CONST_INT)
4178	{
4179          int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4180	  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4181	  int mask = (1 << n_elts) - 1;
4182
4183	  if (!(INTVAL (op2) & mask))
4184	    return op1;
4185	  if ((INTVAL (op2) & mask) == mask)
4186	    return op0;
4187
4188	  op0 = avoid_constant_pool_reference (op0);
4189	  op1 = avoid_constant_pool_reference (op1);
4190	  if (GET_CODE (op0) == CONST_VECTOR
4191	      && GET_CODE (op1) == CONST_VECTOR)
4192	    {
4193	      rtvec v = rtvec_alloc (n_elts);
4194	      unsigned int i;
4195
4196	      for (i = 0; i < n_elts; i++)
4197		RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4198				    ? CONST_VECTOR_ELT (op0, i)
4199				    : CONST_VECTOR_ELT (op1, i));
4200	      return gen_rtx_CONST_VECTOR (mode, v);
4201	    }
4202	}
4203      break;
4204
4205    default:
4206      gcc_unreachable ();
4207    }
4208
4209  return 0;
4210}
4211
4212/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4213   returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4214
4215   Works by unpacking OP into a collection of 8-bit values
4216   represented as a little-endian array of 'unsigned char', selecting by BYTE,
4217   and then repacking them again for OUTERMODE.  */
4218
4219static rtx
4220simplify_immed_subreg (enum machine_mode outermode, rtx op,
4221		       enum machine_mode innermode, unsigned int byte)
4222{
4223  /* We support up to 512-bit values (for V8DFmode).  */
4224  enum {
4225    max_bitsize = 512,
4226    value_bit = 8,
4227    value_mask = (1 << value_bit) - 1
4228  };
4229  unsigned char value[max_bitsize / value_bit];
4230  int value_start;
4231  int i;
4232  int elem;
4233
4234  int num_elem;
4235  rtx * elems;
4236  int elem_bitsize;
4237  rtx result_s;
4238  rtvec result_v = NULL;
4239  enum mode_class outer_class;
4240  enum machine_mode outer_submode;
4241
4242  /* Some ports misuse CCmode.  */
4243  if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4244    return op;
4245
4246  /* We have no way to represent a complex constant at the rtl level.  */
4247  if (COMPLEX_MODE_P (outermode))
4248    return NULL_RTX;
4249
4250  /* Unpack the value.  */
4251
4252  if (GET_CODE (op) == CONST_VECTOR)
4253    {
4254      num_elem = CONST_VECTOR_NUNITS (op);
4255      elems = &CONST_VECTOR_ELT (op, 0);
4256      elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4257    }
4258  else
4259    {
4260      num_elem = 1;
4261      elems = &op;
4262      elem_bitsize = max_bitsize;
4263    }
4264  /* If this asserts, it is too complicated; reducing value_bit may help.  */
4265  gcc_assert (BITS_PER_UNIT % value_bit == 0);
4266  /* I don't know how to handle endianness of sub-units.  */
4267  gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4268
4269  for (elem = 0; elem < num_elem; elem++)
4270    {
4271      unsigned char * vp;
4272      rtx el = elems[elem];
4273
4274      /* Vectors are kept in target memory order.  (This is probably
4275	 a mistake.)  */
4276      {
4277	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4278	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4279			  / BITS_PER_UNIT);
4280	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4281	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4282	unsigned bytele = (subword_byte % UNITS_PER_WORD
4283			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4284	vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4285      }
4286
4287      switch (GET_CODE (el))
4288	{
4289	case CONST_INT:
4290	  for (i = 0;
4291	       i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4292	       i += value_bit)
4293	    *vp++ = INTVAL (el) >> i;
4294	  /* CONST_INTs are always logically sign-extended.  */
4295	  for (; i < elem_bitsize; i += value_bit)
4296	    *vp++ = INTVAL (el) < 0 ? -1 : 0;
4297	  break;
4298
4299	case CONST_DOUBLE:
4300	  if (GET_MODE (el) == VOIDmode)
4301	    {
4302	      /* If this triggers, someone should have generated a
4303		 CONST_INT instead.  */
4304	      gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4305
4306	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4307		*vp++ = CONST_DOUBLE_LOW (el) >> i;
4308	      while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4309		{
4310		  *vp++
4311		    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4312		  i += value_bit;
4313		}
4314	      /* It shouldn't matter what's done here, so fill it with
4315		 zero.  */
4316	      for (; i < elem_bitsize; i += value_bit)
4317		*vp++ = 0;
4318	    }
4319	  else
4320	    {
4321	      long tmp[max_bitsize / 32];
4322	      int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4323
4324	      gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4325	      gcc_assert (bitsize <= elem_bitsize);
4326	      gcc_assert (bitsize % value_bit == 0);
4327
4328	      real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4329			      GET_MODE (el));
4330
4331	      /* real_to_target produces its result in words affected by
4332		 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
4333		 and use WORDS_BIG_ENDIAN instead; see the documentation
4334	         of SUBREG in rtl.texi.  */
4335	      for (i = 0; i < bitsize; i += value_bit)
4336		{
4337		  int ibase;
4338		  if (WORDS_BIG_ENDIAN)
4339		    ibase = bitsize - 1 - i;
4340		  else
4341		    ibase = i;
4342		  *vp++ = tmp[ibase / 32] >> i % 32;
4343		}
4344
4345	      /* It shouldn't matter what's done here, so fill it with
4346		 zero.  */
4347	      for (; i < elem_bitsize; i += value_bit)
4348		*vp++ = 0;
4349	    }
4350	  break;
4351
4352	default:
4353	  gcc_unreachable ();
4354	}
4355    }
4356
4357  /* Now, pick the right byte to start with.  */
4358  /* Renumber BYTE so that the least-significant byte is byte 0.  A special
4359     case is paradoxical SUBREGs, which shouldn't be adjusted since they
4360     will already have offset 0.  */
4361  if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4362    {
4363      unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4364			- byte);
4365      unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4366      unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4367      byte = (subword_byte % UNITS_PER_WORD
4368	      + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4369    }
4370
4371  /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
4372     so if it's become negative it will instead be very large.)  */
4373  gcc_assert (byte < GET_MODE_SIZE (innermode));
4374
4375  /* Convert from bytes to chunks of size value_bit.  */
4376  value_start = byte * (BITS_PER_UNIT / value_bit);
4377
4378  /* Re-pack the value.  */
4379
4380  if (VECTOR_MODE_P (outermode))
4381    {
4382      num_elem = GET_MODE_NUNITS (outermode);
4383      result_v = rtvec_alloc (num_elem);
4384      elems = &RTVEC_ELT (result_v, 0);
4385      outer_submode = GET_MODE_INNER (outermode);
4386    }
4387  else
4388    {
4389      num_elem = 1;
4390      elems = &result_s;
4391      outer_submode = outermode;
4392    }
4393
4394  outer_class = GET_MODE_CLASS (outer_submode);
4395  elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4396
4397  gcc_assert (elem_bitsize % value_bit == 0);
4398  gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4399
4400  for (elem = 0; elem < num_elem; elem++)
4401    {
4402      unsigned char *vp;
4403
4404      /* Vectors are stored in target memory order.  (This is probably
4405	 a mistake.)  */
4406      {
4407	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4408	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4409			  / BITS_PER_UNIT);
4410	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4411	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4412	unsigned bytele = (subword_byte % UNITS_PER_WORD
4413			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4414	vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4415      }
4416
4417      switch (outer_class)
4418	{
4419	case MODE_INT:
4420	case MODE_PARTIAL_INT:
4421	  {
4422	    unsigned HOST_WIDE_INT hi = 0, lo = 0;
4423
4424	    for (i = 0;
4425		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4426		 i += value_bit)
4427	      lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4428	    for (; i < elem_bitsize; i += value_bit)
4429	      hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4430		     << (i - HOST_BITS_PER_WIDE_INT));
4431
4432	    /* immed_double_const doesn't call trunc_int_for_mode.  I don't
4433	       know why.  */
4434	    if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4435	      elems[elem] = gen_int_mode (lo, outer_submode);
4436	    else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4437	      elems[elem] = immed_double_const (lo, hi, outer_submode);
4438	    else
4439	      return NULL_RTX;
4440	  }
4441	  break;
4442
4443	case MODE_FLOAT:
4444	case MODE_DECIMAL_FLOAT:
4445	  {
4446	    REAL_VALUE_TYPE r;
4447	    long tmp[max_bitsize / 32];
4448
4449	    /* real_from_target wants its input in words affected by
4450	       FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
4451	       and use WORDS_BIG_ENDIAN instead; see the documentation
4452	       of SUBREG in rtl.texi.  */
4453	    for (i = 0; i < max_bitsize / 32; i++)
4454	      tmp[i] = 0;
4455	    for (i = 0; i < elem_bitsize; i += value_bit)
4456	      {
4457		int ibase;
4458		if (WORDS_BIG_ENDIAN)
4459		  ibase = elem_bitsize - 1 - i;
4460		else
4461		  ibase = i;
4462		tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4463	      }
4464
4465	    real_from_target (&r, tmp, outer_submode);
4466	    elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4467	  }
4468	  break;
4469
4470	default:
4471	  gcc_unreachable ();
4472	}
4473    }
4474  if (VECTOR_MODE_P (outermode))
4475    return gen_rtx_CONST_VECTOR (outermode, result_v);
4476  else
4477    return result_s;
4478}
4479
4480/* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4481   Return 0 if no simplifications are possible.  */
4482rtx
4483simplify_subreg (enum machine_mode outermode, rtx op,
4484		 enum machine_mode innermode, unsigned int byte)
4485{
4486  /* Little bit of sanity checking.  */
4487  gcc_assert (innermode != VOIDmode);
4488  gcc_assert (outermode != VOIDmode);
4489  gcc_assert (innermode != BLKmode);
4490  gcc_assert (outermode != BLKmode);
4491
4492  gcc_assert (GET_MODE (op) == innermode
4493	      || GET_MODE (op) == VOIDmode);
4494
4495  gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4496  gcc_assert (byte < GET_MODE_SIZE (innermode));
4497
4498  if (outermode == innermode && !byte)
4499    return op;
4500
4501  if (GET_CODE (op) == CONST_INT
4502      || GET_CODE (op) == CONST_DOUBLE
4503      || GET_CODE (op) == CONST_VECTOR)
4504    return simplify_immed_subreg (outermode, op, innermode, byte);
4505
4506  /* Changing mode twice with SUBREG => just change it once,
4507     or not at all if changing back op starting mode.  */
4508  if (GET_CODE (op) == SUBREG)
4509    {
4510      enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4511      int final_offset = byte + SUBREG_BYTE (op);
4512      rtx newx;
4513
4514      if (outermode == innermostmode
4515	  && byte == 0 && SUBREG_BYTE (op) == 0)
4516	return SUBREG_REG (op);
4517
4518      /* The SUBREG_BYTE represents offset, as if the value were stored
4519	 in memory.  Irritating exception is paradoxical subreg, where
4520	 we define SUBREG_BYTE to be 0.  On big endian machines, this
4521	 value should be negative.  For a moment, undo this exception.  */
4522      if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4523	{
4524	  int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4525	  if (WORDS_BIG_ENDIAN)
4526	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4527	  if (BYTES_BIG_ENDIAN)
4528	    final_offset += difference % UNITS_PER_WORD;
4529	}
4530      if (SUBREG_BYTE (op) == 0
4531	  && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4532	{
4533	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4534	  if (WORDS_BIG_ENDIAN)
4535	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4536	  if (BYTES_BIG_ENDIAN)
4537	    final_offset += difference % UNITS_PER_WORD;
4538	}
4539
4540      /* See whether resulting subreg will be paradoxical.  */
4541      if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4542	{
4543	  /* In nonparadoxical subregs we can't handle negative offsets.  */
4544	  if (final_offset < 0)
4545	    return NULL_RTX;
4546	  /* Bail out in case resulting subreg would be incorrect.  */
4547	  if (final_offset % GET_MODE_SIZE (outermode)
4548	      || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4549	    return NULL_RTX;
4550	}
4551      else
4552	{
4553	  int offset = 0;
4554	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4555
4556	  /* In paradoxical subreg, see if we are still looking on lower part.
4557	     If so, our SUBREG_BYTE will be 0.  */
4558	  if (WORDS_BIG_ENDIAN)
4559	    offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4560	  if (BYTES_BIG_ENDIAN)
4561	    offset += difference % UNITS_PER_WORD;
4562	  if (offset == final_offset)
4563	    final_offset = 0;
4564	  else
4565	    return NULL_RTX;
4566	}
4567
4568      /* Recurse for further possible simplifications.  */
4569      newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4570			      final_offset);
4571      if (newx)
4572	return newx;
4573      if (validate_subreg (outermode, innermostmode,
4574			   SUBREG_REG (op), final_offset))
4575        return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4576      return NULL_RTX;
4577    }
4578
4579  /* Merge implicit and explicit truncations.  */
4580
4581  if (GET_CODE (op) == TRUNCATE
4582      && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4583      && subreg_lowpart_offset (outermode, innermode) == byte)
4584    return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4585			       GET_MODE (XEXP (op, 0)));
4586
4587  /* SUBREG of a hard register => just change the register number
4588     and/or mode.  If the hard register is not valid in that mode,
4589     suppress this simplification.  If the hard register is the stack,
4590     frame, or argument pointer, leave this as a SUBREG.  */
4591
4592  if (REG_P (op)
4593      && REGNO (op) < FIRST_PSEUDO_REGISTER
4594#ifdef CANNOT_CHANGE_MODE_CLASS
4595      && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4596	    && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4597	    && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4598#endif
4599      && ((reload_completed && !frame_pointer_needed)
4600	  || (REGNO (op) != FRAME_POINTER_REGNUM
4601#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4602	      && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4603#endif
4604	     ))
4605#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4606      && REGNO (op) != ARG_POINTER_REGNUM
4607#endif
4608      && REGNO (op) != STACK_POINTER_REGNUM
4609      && subreg_offset_representable_p (REGNO (op), innermode,
4610					byte, outermode))
4611    {
4612      unsigned int regno = REGNO (op);
4613      unsigned int final_regno
4614	= regno + subreg_regno_offset (regno, innermode, byte, outermode);
4615
4616      /* ??? We do allow it if the current REG is not valid for
4617	 its mode.  This is a kludge to work around how float/complex
4618	 arguments are passed on 32-bit SPARC and should be fixed.  */
4619      if (HARD_REGNO_MODE_OK (final_regno, outermode)
4620	  || ! HARD_REGNO_MODE_OK (regno, innermode))
4621	{
4622	  rtx x;
4623	  int final_offset = byte;
4624
4625	  /* Adjust offset for paradoxical subregs.  */
4626	  if (byte == 0
4627	      && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4628	    {
4629	      int difference = (GET_MODE_SIZE (innermode)
4630				- GET_MODE_SIZE (outermode));
4631	      if (WORDS_BIG_ENDIAN)
4632		final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4633	      if (BYTES_BIG_ENDIAN)
4634		final_offset += difference % UNITS_PER_WORD;
4635	    }
4636
4637	  x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4638
4639	  /* Propagate original regno.  We don't have any way to specify
4640	     the offset inside original regno, so do so only for lowpart.
4641	     The information is used only by alias analysis that can not
4642	     grog partial register anyway.  */
4643
4644	  if (subreg_lowpart_offset (outermode, innermode) == byte)
4645	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4646	  return x;
4647	}
4648    }
4649
4650  /* If we have a SUBREG of a register that we are replacing and we are
4651     replacing it with a MEM, make a new MEM and try replacing the
4652     SUBREG with it.  Don't do this if the MEM has a mode-dependent address
4653     or if we would be widening it.  */
4654
4655  if (MEM_P (op)
4656      && ! mode_dependent_address_p (XEXP (op, 0))
4657      /* Allow splitting of volatile memory references in case we don't
4658         have instruction to move the whole thing.  */
4659      && (! MEM_VOLATILE_P (op)
4660	  || ! have_insn_for (SET, innermode))
4661      && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4662    return adjust_address_nv (op, outermode, byte);
4663
4664  /* Handle complex values represented as CONCAT
4665     of real and imaginary part.  */
4666  if (GET_CODE (op) == CONCAT)
4667    {
4668      unsigned int inner_size, final_offset;
4669      rtx part, res;
4670
4671      inner_size = GET_MODE_UNIT_SIZE (innermode);
4672      part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4673      final_offset = byte % inner_size;
4674      if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4675	return NULL_RTX;
4676
4677      res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4678      if (res)
4679	return res;
4680      if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4681	return gen_rtx_SUBREG (outermode, part, final_offset);
4682      return NULL_RTX;
4683    }
4684
4685  /* Optimize SUBREG truncations of zero and sign extended values.  */
4686  if ((GET_CODE (op) == ZERO_EXTEND
4687       || GET_CODE (op) == SIGN_EXTEND)
4688      && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4689    {
4690      unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4691
4692      /* If we're requesting the lowpart of a zero or sign extension,
4693	 there are three possibilities.  If the outermode is the same
4694	 as the origmode, we can omit both the extension and the subreg.
4695	 If the outermode is not larger than the origmode, we can apply
4696	 the truncation without the extension.  Finally, if the outermode
4697	 is larger than the origmode, but both are integer modes, we
4698	 can just extend to the appropriate mode.  */
4699      if (bitpos == 0)
4700	{
4701	  enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4702	  if (outermode == origmode)
4703	    return XEXP (op, 0);
4704	  if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4705	    return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4706					subreg_lowpart_offset (outermode,
4707							       origmode));
4708	  if (SCALAR_INT_MODE_P (outermode))
4709	    return simplify_gen_unary (GET_CODE (op), outermode,
4710				       XEXP (op, 0), origmode);
4711	}
4712
4713      /* A SUBREG resulting from a zero extension may fold to zero if
4714	 it extracts higher bits that the ZERO_EXTEND's source bits.  */
4715      if (GET_CODE (op) == ZERO_EXTEND
4716	  && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4717	return CONST0_RTX (outermode);
4718    }
4719
4720  /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4721     to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4722     the outer subreg is effectively a truncation to the original mode.  */
4723  if ((GET_CODE (op) == LSHIFTRT
4724       || GET_CODE (op) == ASHIFTRT)
4725      && SCALAR_INT_MODE_P (outermode)
4726      /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4727	 to avoid the possibility that an outer LSHIFTRT shifts by more
4728	 than the sign extension's sign_bit_copies and introduces zeros
4729	 into the high bits of the result.  */
4730      && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4731      && GET_CODE (XEXP (op, 1)) == CONST_INT
4732      && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4733      && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4734      && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4735      && subreg_lsb_1 (outermode, innermode, byte) == 0)
4736    return simplify_gen_binary (ASHIFTRT, outermode,
4737				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4738
4739  /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4740     to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4741     the outer subreg is effectively a truncation to the original mode.  */
4742  if ((GET_CODE (op) == LSHIFTRT
4743       || GET_CODE (op) == ASHIFTRT)
4744      && SCALAR_INT_MODE_P (outermode)
4745      && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4746      && GET_CODE (XEXP (op, 1)) == CONST_INT
4747      && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4748      && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4749      && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4750      && subreg_lsb_1 (outermode, innermode, byte) == 0)
4751    return simplify_gen_binary (LSHIFTRT, outermode,
4752				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4753
4754  /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4755     to (ashift:QI (x:QI) C), where C is a suitable small constant and
4756     the outer subreg is effectively a truncation to the original mode.  */
4757  if (GET_CODE (op) == ASHIFT
4758      && SCALAR_INT_MODE_P (outermode)
4759      && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4760      && GET_CODE (XEXP (op, 1)) == CONST_INT
4761      && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4762	  || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4763      && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4764      && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4765      && subreg_lsb_1 (outermode, innermode, byte) == 0)
4766    return simplify_gen_binary (ASHIFT, outermode,
4767				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4768
4769  return NULL_RTX;
4770}
4771
4772/* Make a SUBREG operation or equivalent if it folds.  */
4773
4774rtx
4775simplify_gen_subreg (enum machine_mode outermode, rtx op,
4776		     enum machine_mode innermode, unsigned int byte)
4777{
4778  rtx newx;
4779
4780  newx = simplify_subreg (outermode, op, innermode, byte);
4781  if (newx)
4782    return newx;
4783
4784  if (GET_CODE (op) == SUBREG
4785      || GET_CODE (op) == CONCAT
4786      || GET_MODE (op) == VOIDmode)
4787    return NULL_RTX;
4788
4789  if (validate_subreg (outermode, innermode, op, byte))
4790    return gen_rtx_SUBREG (outermode, op, byte);
4791
4792  return NULL_RTX;
4793}
4794
4795/* Simplify X, an rtx expression.
4796
4797   Return the simplified expression or NULL if no simplifications
4798   were possible.
4799
4800   This is the preferred entry point into the simplification routines;
4801   however, we still allow passes to call the more specific routines.
4802
4803   Right now GCC has three (yes, three) major bodies of RTL simplification
4804   code that need to be unified.
4805
4806	1. fold_rtx in cse.c.  This code uses various CSE specific
4807	   information to aid in RTL simplification.
4808
4809	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
4810	   it uses combine specific information to aid in RTL
4811	   simplification.
4812
4813	3. The routines in this file.
4814
4815
4816   Long term we want to only have one body of simplification code; to
4817   get to that state I recommend the following steps:
4818
4819	1. Pour over fold_rtx & simplify_rtx and move any simplifications
4820	   which are not pass dependent state into these routines.
4821
4822	2. As code is moved by #1, change fold_rtx & simplify_rtx to
4823	   use this routine whenever possible.
4824
4825	3. Allow for pass dependent state to be provided to these
4826	   routines and add simplifications based on the pass dependent
4827	   state.  Remove code from cse.c & combine.c that becomes
4828	   redundant/dead.
4829
4830    It will take time, but ultimately the compiler will be easier to
4831    maintain and improve.  It's totally silly that when we add a
4832    simplification that it needs to be added to 4 places (3 for RTL
4833    simplification and 1 for tree simplification.  */
4834
4835rtx
4836simplify_rtx (rtx x)
4837{
4838  enum rtx_code code = GET_CODE (x);
4839  enum machine_mode mode = GET_MODE (x);
4840
4841  switch (GET_RTX_CLASS (code))
4842    {
4843    case RTX_UNARY:
4844      return simplify_unary_operation (code, mode,
4845				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4846    case RTX_COMM_ARITH:
4847      if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4848	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4849
4850      /* Fall through....  */
4851
4852    case RTX_BIN_ARITH:
4853      return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4854
4855    case RTX_TERNARY:
4856    case RTX_BITFIELD_OPS:
4857      return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4858					 XEXP (x, 0), XEXP (x, 1),
4859					 XEXP (x, 2));
4860
4861    case RTX_COMPARE:
4862    case RTX_COMM_COMPARE:
4863      return simplify_relational_operation (code, mode,
4864                                            ((GET_MODE (XEXP (x, 0))
4865                                             != VOIDmode)
4866                                            ? GET_MODE (XEXP (x, 0))
4867                                            : GET_MODE (XEXP (x, 1))),
4868                                            XEXP (x, 0),
4869                                            XEXP (x, 1));
4870
4871    case RTX_EXTRA:
4872      if (code == SUBREG)
4873	return simplify_gen_subreg (mode, SUBREG_REG (x),
4874				    GET_MODE (SUBREG_REG (x)),
4875				    SUBREG_BYTE (x));
4876      break;
4877
4878    case RTX_OBJ:
4879      if (code == LO_SUM)
4880	{
4881	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
4882	  if (GET_CODE (XEXP (x, 0)) == HIGH
4883	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4884	  return XEXP (x, 1);
4885	}
4886      break;
4887
4888    default:
4889      break;
4890    }
4891  return NULL;
4892}
4893