1/* Alias analysis for GNU C
2   Copyright (C) 1997-2015 Free Software Foundation, Inc.
3   Contributed by John Carr (jfc@mit.edu).
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3.  If not see
19<http://www.gnu.org/licenses/>.  */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "tm.h"
25#include "rtl.h"
26#include "hash-set.h"
27#include "machmode.h"
28#include "vec.h"
29#include "double-int.h"
30#include "input.h"
31#include "alias.h"
32#include "symtab.h"
33#include "wide-int.h"
34#include "inchash.h"
35#include "tree.h"
36#include "fold-const.h"
37#include "varasm.h"
38#include "hashtab.h"
39#include "hard-reg-set.h"
40#include "function.h"
41#include "flags.h"
42#include "statistics.h"
43#include "real.h"
44#include "fixed-value.h"
45#include "insn-config.h"
46#include "expmed.h"
47#include "dojump.h"
48#include "explow.h"
49#include "calls.h"
50#include "emit-rtl.h"
51#include "stmt.h"
52#include "expr.h"
53#include "tm_p.h"
54#include "regs.h"
55#include "diagnostic-core.h"
56#include "cselib.h"
57#include "hash-map.h"
58#include "langhooks.h"
59#include "timevar.h"
60#include "dumpfile.h"
61#include "target.h"
62#include "dominance.h"
63#include "cfg.h"
64#include "cfganal.h"
65#include "predict.h"
66#include "basic-block.h"
67#include "df.h"
68#include "tree-ssa-alias.h"
69#include "internal-fn.h"
70#include "gimple-expr.h"
71#include "is-a.h"
72#include "gimple.h"
73#include "gimple-ssa.h"
74#include "rtl-iter.h"
75
76/* The aliasing API provided here solves related but different problems:
77
78   Say there exists (in c)
79
80   struct X {
81     struct Y y1;
82     struct Z z2;
83   } x1, *px1,  *px2;
84
85   struct Y y2, *py;
86   struct Z z2, *pz;
87
88
89   py = &x1.y1;
90   px2 = &x1;
91
92   Consider the four questions:
93
94   Can a store to x1 interfere with px2->y1?
95   Can a store to x1 interfere with px2->z2?
96   Can a store to x1 change the value pointed to by with py?
97   Can a store to x1 change the value pointed to by with pz?
98
99   The answer to these questions can be yes, yes, yes, and maybe.
100
101   The first two questions can be answered with a simple examination
102   of the type system.  If structure X contains a field of type Y then
103   a store through a pointer to an X can overwrite any field that is
104   contained (recursively) in an X (unless we know that px1 != px2).
105
106   The last two questions can be solved in the same way as the first
107   two questions but this is too conservative.  The observation is
108   that in some cases we can know which (if any) fields are addressed
109   and if those addresses are used in bad ways.  This analysis may be
110   language specific.  In C, arbitrary operations may be applied to
111   pointers.  However, there is some indication that this may be too
112   conservative for some C++ types.
113
114   The pass ipa-type-escape does this analysis for the types whose
115   instances do not escape across the compilation boundary.
116
117   Historically in GCC, these two problems were combined and a single
118   data structure that was used to represent the solution to these
119   problems.  We now have two similar but different data structures,
120   The data structure to solve the last two questions is similar to
121   the first, but does not contain the fields whose address are never
122   taken.  For types that do escape the compilation unit, the data
123   structures will have identical information.
124*/
125
126/* The alias sets assigned to MEMs assist the back-end in determining
127   which MEMs can alias which other MEMs.  In general, two MEMs in
128   different alias sets cannot alias each other, with one important
129   exception.  Consider something like:
130
131     struct S { int i; double d; };
132
133   a store to an `S' can alias something of either type `int' or type
134   `double'.  (However, a store to an `int' cannot alias a `double'
135   and vice versa.)  We indicate this via a tree structure that looks
136   like:
137	   struct S
138	    /   \
139	   /     \
140	 |/_     _\|
141	 int    double
142
143   (The arrows are directed and point downwards.)
144    In this situation we say the alias set for `struct S' is the
145   `superset' and that those for `int' and `double' are `subsets'.
146
147   To see whether two alias sets can point to the same memory, we must
148   see if either alias set is a subset of the other. We need not trace
149   past immediate descendants, however, since we propagate all
150   grandchildren up one level.
151
152   Alias set zero is implicitly a superset of all other alias sets.
153   However, this is no actual entry for alias set zero.  It is an
154   error to attempt to explicitly construct a subset of zero.  */
155
156struct alias_set_traits : default_hashmap_traits
157{
158  template<typename T>
159  static bool
160  is_empty (T &e)
161  {
162    return e.m_key == INT_MIN;
163  }
164
165  template<typename  T>
166  static bool
167  is_deleted (T &e)
168  {
169    return e.m_key == (INT_MIN + 1);
170  }
171
172  template<typename T> static void mark_empty (T &e) { e.m_key = INT_MIN; }
173
174  template<typename T>
175  static void
176  mark_deleted (T &e)
177  {
178    e.m_key = INT_MIN + 1;
179  }
180};
181
182struct GTY(()) alias_set_entry_d {
183  /* The alias set number, as stored in MEM_ALIAS_SET.  */
184  alias_set_type alias_set;
185
186  /* Nonzero if would have a child of zero: this effectively makes this
187     alias set the same as alias set zero.  */
188  int has_zero_child;
189
190  /* The children of the alias set.  These are not just the immediate
191     children, but, in fact, all descendants.  So, if we have:
192
193       struct T { struct S s; float f; }
194
195     continuing our example above, the children here will be all of
196     `int', `double', `float', and `struct S'.  */
197  hash_map<int, int, alias_set_traits> *children;
198};
199typedef struct alias_set_entry_d *alias_set_entry;
200
201static int rtx_equal_for_memref_p (const_rtx, const_rtx);
202static int memrefs_conflict_p (int, rtx, int, rtx, HOST_WIDE_INT);
203static void record_set (rtx, const_rtx, void *);
204static int base_alias_check (rtx, rtx, rtx, rtx, machine_mode,
205			     machine_mode);
206static rtx find_base_value (rtx);
207static int mems_in_disjoint_alias_sets_p (const_rtx, const_rtx);
208static alias_set_entry get_alias_set_entry (alias_set_type);
209static tree decl_for_component_ref (tree);
210static int write_dependence_p (const_rtx,
211			       const_rtx, machine_mode, rtx,
212			       bool, bool, bool);
213
214static void memory_modified_1 (rtx, const_rtx, void *);
215
216/* Set up all info needed to perform alias analysis on memory references.  */
217
218/* Returns the size in bytes of the mode of X.  */
219#define SIZE_FOR_MODE(X) (GET_MODE_SIZE (GET_MODE (X)))
220
221/* Cap the number of passes we make over the insns propagating alias
222   information through set chains.
223   ??? 10 is a completely arbitrary choice.  This should be based on the
224   maximum loop depth in the CFG, but we do not have this information
225   available (even if current_loops _is_ available).  */
226#define MAX_ALIAS_LOOP_PASSES 10
227
228/* reg_base_value[N] gives an address to which register N is related.
229   If all sets after the first add or subtract to the current value
230   or otherwise modify it so it does not point to a different top level
231   object, reg_base_value[N] is equal to the address part of the source
232   of the first set.
233
234   A base address can be an ADDRESS, SYMBOL_REF, or LABEL_REF.  ADDRESS
235   expressions represent three types of base:
236
237     1. incoming arguments.  There is just one ADDRESS to represent all
238	arguments, since we do not know at this level whether accesses
239	based on different arguments can alias.  The ADDRESS has id 0.
240
241     2. stack_pointer_rtx, frame_pointer_rtx, hard_frame_pointer_rtx
242	(if distinct from frame_pointer_rtx) and arg_pointer_rtx.
243	Each of these rtxes has a separate ADDRESS associated with it,
244	each with a negative id.
245
246	GCC is (and is required to be) precise in which register it
247	chooses to access a particular region of stack.  We can therefore
248	assume that accesses based on one of these rtxes do not alias
249	accesses based on another of these rtxes.
250
251     3. bases that are derived from malloc()ed memory (REG_NOALIAS).
252	Each such piece of memory has a separate ADDRESS associated
253	with it, each with an id greater than 0.
254
255   Accesses based on one ADDRESS do not alias accesses based on other
256   ADDRESSes.  Accesses based on ADDRESSes in groups (2) and (3) do not
257   alias globals either; the ADDRESSes have Pmode to indicate this.
258   The ADDRESS in group (1) _may_ alias globals; it has VOIDmode to
259   indicate this.  */
260
261static GTY(()) vec<rtx, va_gc> *reg_base_value;
262static rtx *new_reg_base_value;
263
264/* The single VOIDmode ADDRESS that represents all argument bases.
265   It has id 0.  */
266static GTY(()) rtx arg_base_value;
267
268/* Used to allocate unique ids to each REG_NOALIAS ADDRESS.  */
269static int unique_id;
270
271/* We preserve the copy of old array around to avoid amount of garbage
272   produced.  About 8% of garbage produced were attributed to this
273   array.  */
274static GTY((deletable)) vec<rtx, va_gc> *old_reg_base_value;
275
276/* Values of XINT (address, 0) of Pmode ADDRESS rtxes for special
277   registers.  */
278#define UNIQUE_BASE_VALUE_SP	-1
279#define UNIQUE_BASE_VALUE_ARGP	-2
280#define UNIQUE_BASE_VALUE_FP	-3
281#define UNIQUE_BASE_VALUE_HFP	-4
282
283#define static_reg_base_value \
284  (this_target_rtl->x_static_reg_base_value)
285
286#define REG_BASE_VALUE(X)					\
287  (REGNO (X) < vec_safe_length (reg_base_value)			\
288   ? (*reg_base_value)[REGNO (X)] : 0)
289
290/* Vector indexed by N giving the initial (unchanging) value known for
291   pseudo-register N.  This vector is initialized in init_alias_analysis,
292   and does not change until end_alias_analysis is called.  */
293static GTY(()) vec<rtx, va_gc> *reg_known_value;
294
295/* Vector recording for each reg_known_value whether it is due to a
296   REG_EQUIV note.  Future passes (viz., reload) may replace the
297   pseudo with the equivalent expression and so we account for the
298   dependences that would be introduced if that happens.
299
300   The REG_EQUIV notes created in assign_parms may mention the arg
301   pointer, and there are explicit insns in the RTL that modify the
302   arg pointer.  Thus we must ensure that such insns don't get
303   scheduled across each other because that would invalidate the
304   REG_EQUIV notes.  One could argue that the REG_EQUIV notes are
305   wrong, but solving the problem in the scheduler will likely give
306   better code, so we do it here.  */
307static sbitmap reg_known_equiv_p;
308
309/* True when scanning insns from the start of the rtl to the
310   NOTE_INSN_FUNCTION_BEG note.  */
311static bool copying_arguments;
312
313
314/* The splay-tree used to store the various alias set entries.  */
315static GTY (()) vec<alias_set_entry, va_gc> *alias_sets;
316
317/* Build a decomposed reference object for querying the alias-oracle
318   from the MEM rtx and store it in *REF.
319   Returns false if MEM is not suitable for the alias-oracle.  */
320
321static bool
322ao_ref_from_mem (ao_ref *ref, const_rtx mem)
323{
324  tree expr = MEM_EXPR (mem);
325  tree base;
326
327  if (!expr)
328    return false;
329
330  ao_ref_init (ref, expr);
331
332  /* Get the base of the reference and see if we have to reject or
333     adjust it.  */
334  base = ao_ref_base (ref);
335  if (base == NULL_TREE)
336    return false;
337
338  /* The tree oracle doesn't like bases that are neither decls
339     nor indirect references of SSA names.  */
340  if (!(DECL_P (base)
341	|| (TREE_CODE (base) == MEM_REF
342	    && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
343	|| (TREE_CODE (base) == TARGET_MEM_REF
344	    && TREE_CODE (TMR_BASE (base)) == SSA_NAME)))
345    return false;
346
347  /* If this is a reference based on a partitioned decl replace the
348     base with a MEM_REF of the pointer representative we
349     created during stack slot partitioning.  */
350  if (TREE_CODE (base) == VAR_DECL
351      && ! is_global_var (base)
352      && cfun->gimple_df->decls_to_pointers != NULL)
353    {
354      tree *namep = cfun->gimple_df->decls_to_pointers->get (base);
355      if (namep)
356	ref->base = build_simple_mem_ref (*namep);
357    }
358
359  ref->ref_alias_set = MEM_ALIAS_SET (mem);
360
361  /* If MEM_OFFSET or MEM_SIZE are unknown what we got from MEM_EXPR
362     is conservative, so trust it.  */
363  if (!MEM_OFFSET_KNOWN_P (mem)
364      || !MEM_SIZE_KNOWN_P (mem))
365    return true;
366
367  /* If MEM_OFFSET/MEM_SIZE get us outside of ref->offset/ref->max_size
368     drop ref->ref.  */
369  if (MEM_OFFSET (mem) < 0
370      || (ref->max_size != -1
371	  && ((MEM_OFFSET (mem) + MEM_SIZE (mem)) * BITS_PER_UNIT
372	      > ref->max_size)))
373    ref->ref = NULL_TREE;
374
375  /* Refine size and offset we got from analyzing MEM_EXPR by using
376     MEM_SIZE and MEM_OFFSET.  */
377
378  ref->offset += MEM_OFFSET (mem) * BITS_PER_UNIT;
379  ref->size = MEM_SIZE (mem) * BITS_PER_UNIT;
380
381  /* The MEM may extend into adjacent fields, so adjust max_size if
382     necessary.  */
383  if (ref->max_size != -1
384      && ref->size > ref->max_size)
385    ref->max_size = ref->size;
386
387  /* If MEM_OFFSET and MEM_SIZE get us outside of the base object of
388     the MEM_EXPR punt.  This happens for STRICT_ALIGNMENT targets a lot.  */
389  if (MEM_EXPR (mem) != get_spill_slot_decl (false)
390      && (ref->offset < 0
391	  || (DECL_P (ref->base)
392	      && (DECL_SIZE (ref->base) == NULL_TREE
393		  || TREE_CODE (DECL_SIZE (ref->base)) != INTEGER_CST
394		  || wi::ltu_p (wi::to_offset (DECL_SIZE (ref->base)),
395				ref->offset + ref->size)))))
396    return false;
397
398  return true;
399}
400
401/* Query the alias-oracle on whether the two memory rtx X and MEM may
402   alias.  If TBAA_P is set also apply TBAA.  Returns true if the
403   two rtxen may alias, false otherwise.  */
404
405static bool
406rtx_refs_may_alias_p (const_rtx x, const_rtx mem, bool tbaa_p)
407{
408  ao_ref ref1, ref2;
409
410  if (!ao_ref_from_mem (&ref1, x)
411      || !ao_ref_from_mem (&ref2, mem))
412    return true;
413
414  return refs_may_alias_p_1 (&ref1, &ref2,
415			     tbaa_p
416			     && MEM_ALIAS_SET (x) != 0
417			     && MEM_ALIAS_SET (mem) != 0);
418}
419
420/* Returns a pointer to the alias set entry for ALIAS_SET, if there is
421   such an entry, or NULL otherwise.  */
422
423static inline alias_set_entry
424get_alias_set_entry (alias_set_type alias_set)
425{
426  return (*alias_sets)[alias_set];
427}
428
429/* Returns nonzero if the alias sets for MEM1 and MEM2 are such that
430   the two MEMs cannot alias each other.  */
431
432static inline int
433mems_in_disjoint_alias_sets_p (const_rtx mem1, const_rtx mem2)
434{
435  return (flag_strict_aliasing
436	  && ! alias_sets_conflict_p (MEM_ALIAS_SET (mem1),
437				      MEM_ALIAS_SET (mem2)));
438}
439
440/* Return true if the first alias set is a subset of the second.  */
441
442bool
443alias_set_subset_of (alias_set_type set1, alias_set_type set2)
444{
445  alias_set_entry ase;
446
447  /* Everything is a subset of the "aliases everything" set.  */
448  if (set2 == 0)
449    return true;
450
451  /* Otherwise, check if set1 is a subset of set2.  */
452  ase = get_alias_set_entry (set2);
453  if (ase != 0
454      && (ase->has_zero_child
455	  || ase->children->get (set1)))
456    return true;
457  return false;
458}
459
460/* Return 1 if the two specified alias sets may conflict.  */
461
462int
463alias_sets_conflict_p (alias_set_type set1, alias_set_type set2)
464{
465  alias_set_entry ase;
466
467  /* The easy case.  */
468  if (alias_sets_must_conflict_p (set1, set2))
469    return 1;
470
471  /* See if the first alias set is a subset of the second.  */
472  ase = get_alias_set_entry (set1);
473  if (ase != 0
474      && (ase->has_zero_child
475	  || ase->children->get (set2)))
476    return 1;
477
478  /* Now do the same, but with the alias sets reversed.  */
479  ase = get_alias_set_entry (set2);
480  if (ase != 0
481      && (ase->has_zero_child
482	  || ase->children->get (set1)))
483    return 1;
484
485  /* The two alias sets are distinct and neither one is the
486     child of the other.  Therefore, they cannot conflict.  */
487  return 0;
488}
489
490/* Return 1 if the two specified alias sets will always conflict.  */
491
492int
493alias_sets_must_conflict_p (alias_set_type set1, alias_set_type set2)
494{
495  if (set1 == 0 || set2 == 0 || set1 == set2)
496    return 1;
497
498  return 0;
499}
500
501/* Return 1 if any MEM object of type T1 will always conflict (using the
502   dependency routines in this file) with any MEM object of type T2.
503   This is used when allocating temporary storage.  If T1 and/or T2 are
504   NULL_TREE, it means we know nothing about the storage.  */
505
506int
507objects_must_conflict_p (tree t1, tree t2)
508{
509  alias_set_type set1, set2;
510
511  /* If neither has a type specified, we don't know if they'll conflict
512     because we may be using them to store objects of various types, for
513     example the argument and local variables areas of inlined functions.  */
514  if (t1 == 0 && t2 == 0)
515    return 0;
516
517  /* If they are the same type, they must conflict.  */
518  if (t1 == t2
519      /* Likewise if both are volatile.  */
520      || (t1 != 0 && TYPE_VOLATILE (t1) && t2 != 0 && TYPE_VOLATILE (t2)))
521    return 1;
522
523  set1 = t1 ? get_alias_set (t1) : 0;
524  set2 = t2 ? get_alias_set (t2) : 0;
525
526  /* We can't use alias_sets_conflict_p because we must make sure
527     that every subtype of t1 will conflict with every subtype of
528     t2 for which a pair of subobjects of these respective subtypes
529     overlaps on the stack.  */
530  return alias_sets_must_conflict_p (set1, set2);
531}
532
533/* Return the outermost parent of component present in the chain of
534   component references handled by get_inner_reference in T with the
535   following property:
536     - the component is non-addressable, or
537     - the parent has alias set zero,
538   or NULL_TREE if no such parent exists.  In the former cases, the alias
539   set of this parent is the alias set that must be used for T itself.  */
540
541tree
542component_uses_parent_alias_set_from (const_tree t)
543{
544  const_tree found = NULL_TREE;
545
546  while (handled_component_p (t))
547    {
548      switch (TREE_CODE (t))
549	{
550	case COMPONENT_REF:
551	  if (DECL_NONADDRESSABLE_P (TREE_OPERAND (t, 1)))
552	    found = t;
553	  break;
554
555	case ARRAY_REF:
556	case ARRAY_RANGE_REF:
557	  if (TYPE_NONALIASED_COMPONENT (TREE_TYPE (TREE_OPERAND (t, 0))))
558	    found = t;
559	  break;
560
561	case REALPART_EXPR:
562	case IMAGPART_EXPR:
563	  break;
564
565	case BIT_FIELD_REF:
566	case VIEW_CONVERT_EXPR:
567	  /* Bitfields and casts are never addressable.  */
568	  found = t;
569	  break;
570
571	default:
572	  gcc_unreachable ();
573	}
574
575      if (get_alias_set (TREE_TYPE (TREE_OPERAND (t, 0))) == 0)
576	found = t;
577
578      t = TREE_OPERAND (t, 0);
579    }
580
581  if (found)
582    return TREE_OPERAND (found, 0);
583
584  return NULL_TREE;
585}
586
587
588/* Return whether the pointer-type T effective for aliasing may
589   access everything and thus the reference has to be assigned
590   alias-set zero.  */
591
592static bool
593ref_all_alias_ptr_type_p (const_tree t)
594{
595  return (TREE_CODE (TREE_TYPE (t)) == VOID_TYPE
596	  || TYPE_REF_CAN_ALIAS_ALL (t));
597}
598
599/* Return the alias set for the memory pointed to by T, which may be
600   either a type or an expression.  Return -1 if there is nothing
601   special about dereferencing T.  */
602
603static alias_set_type
604get_deref_alias_set_1 (tree t)
605{
606  /* All we care about is the type.  */
607  if (! TYPE_P (t))
608    t = TREE_TYPE (t);
609
610  /* If we have an INDIRECT_REF via a void pointer, we don't
611     know anything about what that might alias.  Likewise if the
612     pointer is marked that way.  */
613  if (ref_all_alias_ptr_type_p (t))
614    return 0;
615
616  return -1;
617}
618
619/* Return the alias set for the memory pointed to by T, which may be
620   either a type or an expression.  */
621
622alias_set_type
623get_deref_alias_set (tree t)
624{
625  /* If we're not doing any alias analysis, just assume everything
626     aliases everything else.  */
627  if (!flag_strict_aliasing)
628    return 0;
629
630  alias_set_type set = get_deref_alias_set_1 (t);
631
632  /* Fall back to the alias-set of the pointed-to type.  */
633  if (set == -1)
634    {
635      if (! TYPE_P (t))
636	t = TREE_TYPE (t);
637      set = get_alias_set (TREE_TYPE (t));
638    }
639
640  return set;
641}
642
643/* Return the pointer-type relevant for TBAA purposes from the
644   memory reference tree *T or NULL_TREE in which case *T is
645   adjusted to point to the outermost component reference that
646   can be used for assigning an alias set.  */
647
648static tree
649reference_alias_ptr_type_1 (tree *t)
650{
651  tree inner;
652
653  /* Get the base object of the reference.  */
654  inner = *t;
655  while (handled_component_p (inner))
656    {
657      /* If there is a VIEW_CONVERT_EXPR in the chain we cannot use
658	 the type of any component references that wrap it to
659	 determine the alias-set.  */
660      if (TREE_CODE (inner) == VIEW_CONVERT_EXPR)
661	*t = TREE_OPERAND (inner, 0);
662      inner = TREE_OPERAND (inner, 0);
663    }
664
665  /* Handle pointer dereferences here, they can override the
666     alias-set.  */
667  if (INDIRECT_REF_P (inner)
668      && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 0))))
669    return TREE_TYPE (TREE_OPERAND (inner, 0));
670  else if (TREE_CODE (inner) == TARGET_MEM_REF)
671    return TREE_TYPE (TMR_OFFSET (inner));
672  else if (TREE_CODE (inner) == MEM_REF
673	   && ref_all_alias_ptr_type_p (TREE_TYPE (TREE_OPERAND (inner, 1))))
674    return TREE_TYPE (TREE_OPERAND (inner, 1));
675
676  /* If the innermost reference is a MEM_REF that has a
677     conversion embedded treat it like a VIEW_CONVERT_EXPR above,
678     using the memory access type for determining the alias-set.  */
679  if (TREE_CODE (inner) == MEM_REF
680      && (TYPE_MAIN_VARIANT (TREE_TYPE (inner))
681	  != TYPE_MAIN_VARIANT
682	       (TREE_TYPE (TREE_TYPE (TREE_OPERAND (inner, 1))))))
683    return TREE_TYPE (TREE_OPERAND (inner, 1));
684
685  /* Otherwise, pick up the outermost object that we could have
686     a pointer to.  */
687  tree tem = component_uses_parent_alias_set_from (*t);
688  if (tem)
689    *t = tem;
690
691  return NULL_TREE;
692}
693
694/* Return the pointer-type relevant for TBAA purposes from the
695   gimple memory reference tree T.  This is the type to be used for
696   the offset operand of MEM_REF or TARGET_MEM_REF replacements of T
697   and guarantees that get_alias_set will return the same alias
698   set for T and the replacement.  */
699
700tree
701reference_alias_ptr_type (tree t)
702{
703  tree ptype = reference_alias_ptr_type_1 (&t);
704  /* If there is a given pointer type for aliasing purposes, return it.  */
705  if (ptype != NULL_TREE)
706    return ptype;
707
708  /* Otherwise build one from the outermost component reference we
709     may use.  */
710  if (TREE_CODE (t) == MEM_REF
711      || TREE_CODE (t) == TARGET_MEM_REF)
712    return TREE_TYPE (TREE_OPERAND (t, 1));
713  else
714    return build_pointer_type (TYPE_MAIN_VARIANT (TREE_TYPE (t)));
715}
716
717/* Return whether the pointer-types T1 and T2 used to determine
718   two alias sets of two references will yield the same answer
719   from get_deref_alias_set.  */
720
721bool
722alias_ptr_types_compatible_p (tree t1, tree t2)
723{
724  if (TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
725    return true;
726
727  if (ref_all_alias_ptr_type_p (t1)
728      || ref_all_alias_ptr_type_p (t2))
729    return false;
730
731  return (TYPE_MAIN_VARIANT (TREE_TYPE (t1))
732	  == TYPE_MAIN_VARIANT (TREE_TYPE (t2)));
733}
734
735/* Return the alias set for T, which may be either a type or an
736   expression.  Call language-specific routine for help, if needed.  */
737
738alias_set_type
739get_alias_set (tree t)
740{
741  alias_set_type set;
742
743  /* If we're not doing any alias analysis, just assume everything
744     aliases everything else.  Also return 0 if this or its type is
745     an error.  */
746  if (! flag_strict_aliasing || t == error_mark_node
747      || (! TYPE_P (t)
748	  && (TREE_TYPE (t) == 0 || TREE_TYPE (t) == error_mark_node)))
749    return 0;
750
751  /* We can be passed either an expression or a type.  This and the
752     language-specific routine may make mutually-recursive calls to each other
753     to figure out what to do.  At each juncture, we see if this is a tree
754     that the language may need to handle specially.  First handle things that
755     aren't types.  */
756  if (! TYPE_P (t))
757    {
758      /* Give the language a chance to do something with this tree
759	 before we look at it.  */
760      STRIP_NOPS (t);
761      set = lang_hooks.get_alias_set (t);
762      if (set != -1)
763	return set;
764
765      /* Get the alias pointer-type to use or the outermost object
766         that we could have a pointer to.  */
767      tree ptype = reference_alias_ptr_type_1 (&t);
768      if (ptype != NULL)
769	return get_deref_alias_set (ptype);
770
771      /* If we've already determined the alias set for a decl, just return
772	 it.  This is necessary for C++ anonymous unions, whose component
773	 variables don't look like union members (boo!).  */
774      if (TREE_CODE (t) == VAR_DECL
775	  && DECL_RTL_SET_P (t) && MEM_P (DECL_RTL (t)))
776	return MEM_ALIAS_SET (DECL_RTL (t));
777
778      /* Now all we care about is the type.  */
779      t = TREE_TYPE (t);
780    }
781
782  /* Variant qualifiers don't affect the alias set, so get the main
783     variant.  */
784  t = TYPE_MAIN_VARIANT (t);
785
786  /* Always use the canonical type as well.  If this is a type that
787     requires structural comparisons to identify compatible types
788     use alias set zero.  */
789  if (TYPE_STRUCTURAL_EQUALITY_P (t))
790    {
791      /* Allow the language to specify another alias set for this
792	 type.  */
793      set = lang_hooks.get_alias_set (t);
794      if (set != -1)
795	return set;
796      return 0;
797    }
798
799  t = TYPE_CANONICAL (t);
800
801  /* The canonical type should not require structural equality checks.  */
802  gcc_checking_assert (!TYPE_STRUCTURAL_EQUALITY_P (t));
803
804  /* If this is a type with a known alias set, return it.  */
805  if (TYPE_ALIAS_SET_KNOWN_P (t))
806    return TYPE_ALIAS_SET (t);
807
808  /* We don't want to set TYPE_ALIAS_SET for incomplete types.  */
809  if (!COMPLETE_TYPE_P (t))
810    {
811      /* For arrays with unknown size the conservative answer is the
812	 alias set of the element type.  */
813      if (TREE_CODE (t) == ARRAY_TYPE)
814	return get_alias_set (TREE_TYPE (t));
815
816      /* But return zero as a conservative answer for incomplete types.  */
817      return 0;
818    }
819
820  /* See if the language has special handling for this type.  */
821  set = lang_hooks.get_alias_set (t);
822  if (set != -1)
823    return set;
824
825  /* There are no objects of FUNCTION_TYPE, so there's no point in
826     using up an alias set for them.  (There are, of course, pointers
827     and references to functions, but that's different.)  */
828  else if (TREE_CODE (t) == FUNCTION_TYPE || TREE_CODE (t) == METHOD_TYPE)
829    set = 0;
830
831  /* Unless the language specifies otherwise, let vector types alias
832     their components.  This avoids some nasty type punning issues in
833     normal usage.  And indeed lets vectors be treated more like an
834     array slice.  */
835  else if (TREE_CODE (t) == VECTOR_TYPE)
836    set = get_alias_set (TREE_TYPE (t));
837
838  /* Unless the language specifies otherwise, treat array types the
839     same as their components.  This avoids the asymmetry we get
840     through recording the components.  Consider accessing a
841     character(kind=1) through a reference to a character(kind=1)[1:1].
842     Or consider if we want to assign integer(kind=4)[0:D.1387] and
843     integer(kind=4)[4] the same alias set or not.
844     Just be pragmatic here and make sure the array and its element
845     type get the same alias set assigned.  */
846  else if (TREE_CODE (t) == ARRAY_TYPE && !TYPE_NONALIASED_COMPONENT (t))
847    set = get_alias_set (TREE_TYPE (t));
848
849  /* From the former common C and C++ langhook implementation:
850
851     Unfortunately, there is no canonical form of a pointer type.
852     In particular, if we have `typedef int I', then `int *', and
853     `I *' are different types.  So, we have to pick a canonical
854     representative.  We do this below.
855
856     Technically, this approach is actually more conservative that
857     it needs to be.  In particular, `const int *' and `int *'
858     should be in different alias sets, according to the C and C++
859     standard, since their types are not the same, and so,
860     technically, an `int **' and `const int **' cannot point at
861     the same thing.
862
863     But, the standard is wrong.  In particular, this code is
864     legal C++:
865
866     int *ip;
867     int **ipp = &ip;
868     const int* const* cipp = ipp;
869     And, it doesn't make sense for that to be legal unless you
870     can dereference IPP and CIPP.  So, we ignore cv-qualifiers on
871     the pointed-to types.  This issue has been reported to the
872     C++ committee.
873
874     In addition to the above canonicalization issue, with LTO
875     we should also canonicalize `T (*)[]' to `T *' avoiding
876     alias issues with pointer-to element types and pointer-to
877     array types.
878
879     Likewise we need to deal with the situation of incomplete
880     pointed-to types and make `*(struct X **)&a' and
881     `*(struct X {} **)&a' alias.  Otherwise we will have to
882     guarantee that all pointer-to incomplete type variants
883     will be replaced by pointer-to complete type variants if
884     they are available.
885
886     With LTO the convenient situation of using `void *' to
887     access and store any pointer type will also become
888     more apparent (and `void *' is just another pointer-to
889     incomplete type).  Assigning alias-set zero to `void *'
890     and all pointer-to incomplete types is a not appealing
891     solution.  Assigning an effective alias-set zero only
892     affecting pointers might be - by recording proper subset
893     relationships of all pointer alias-sets.
894
895     Pointer-to function types are another grey area which
896     needs caution.  Globbing them all into one alias-set
897     or the above effective zero set would work.
898
899     For now just assign the same alias-set to all pointers.
900     That's simple and avoids all the above problems.  */
901  else if (POINTER_TYPE_P (t)
902	   && t != ptr_type_node)
903    set = get_alias_set (ptr_type_node);
904
905  /* Otherwise make a new alias set for this type.  */
906  else
907    {
908      /* Each canonical type gets its own alias set, so canonical types
909	 shouldn't form a tree.  It doesn't really matter for types
910	 we handle specially above, so only check it where it possibly
911	 would result in a bogus alias set.  */
912      gcc_checking_assert (TYPE_CANONICAL (t) == t);
913
914      set = new_alias_set ();
915    }
916
917  TYPE_ALIAS_SET (t) = set;
918
919  /* If this is an aggregate type or a complex type, we must record any
920     component aliasing information.  */
921  if (AGGREGATE_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
922    record_component_aliases (t);
923
924  return set;
925}
926
927/* Return a brand-new alias set.  */
928
929alias_set_type
930new_alias_set (void)
931{
932  if (flag_strict_aliasing)
933    {
934      if (alias_sets == 0)
935	vec_safe_push (alias_sets, (alias_set_entry) 0);
936      vec_safe_push (alias_sets, (alias_set_entry) 0);
937      return alias_sets->length () - 1;
938    }
939  else
940    return 0;
941}
942
943/* Indicate that things in SUBSET can alias things in SUPERSET, but that
944   not everything that aliases SUPERSET also aliases SUBSET.  For example,
945   in C, a store to an `int' can alias a load of a structure containing an
946   `int', and vice versa.  But it can't alias a load of a 'double' member
947   of the same structure.  Here, the structure would be the SUPERSET and
948   `int' the SUBSET.  This relationship is also described in the comment at
949   the beginning of this file.
950
951   This function should be called only once per SUPERSET/SUBSET pair.
952
953   It is illegal for SUPERSET to be zero; everything is implicitly a
954   subset of alias set zero.  */
955
956void
957record_alias_subset (alias_set_type superset, alias_set_type subset)
958{
959  alias_set_entry superset_entry;
960  alias_set_entry subset_entry;
961
962  /* It is possible in complex type situations for both sets to be the same,
963     in which case we can ignore this operation.  */
964  if (superset == subset)
965    return;
966
967  gcc_assert (superset);
968
969  superset_entry = get_alias_set_entry (superset);
970  if (superset_entry == 0)
971    {
972      /* Create an entry for the SUPERSET, so that we have a place to
973	 attach the SUBSET.  */
974      superset_entry = ggc_cleared_alloc<alias_set_entry_d> ();
975      superset_entry->alias_set = superset;
976      superset_entry->children
977	= hash_map<int, int, alias_set_traits>::create_ggc (64);
978      superset_entry->has_zero_child = 0;
979      (*alias_sets)[superset] = superset_entry;
980    }
981
982  if (subset == 0)
983    superset_entry->has_zero_child = 1;
984  else
985    {
986      subset_entry = get_alias_set_entry (subset);
987      /* If there is an entry for the subset, enter all of its children
988	 (if they are not already present) as children of the SUPERSET.  */
989      if (subset_entry)
990	{
991	  if (subset_entry->has_zero_child)
992	    superset_entry->has_zero_child = 1;
993
994	  hash_map<int, int, alias_set_traits>::iterator iter
995	    = subset_entry->children->begin ();
996	  for (; iter != subset_entry->children->end (); ++iter)
997	    superset_entry->children->put ((*iter).first, (*iter).second);
998	}
999
1000      /* Enter the SUBSET itself as a child of the SUPERSET.  */
1001      superset_entry->children->put (subset, 0);
1002    }
1003}
1004
1005/* Record that component types of TYPE, if any, are part of that type for
1006   aliasing purposes.  For record types, we only record component types
1007   for fields that are not marked non-addressable.  For array types, we
1008   only record the component type if it is not marked non-aliased.  */
1009
1010void
1011record_component_aliases (tree type)
1012{
1013  alias_set_type superset = get_alias_set (type);
1014  tree field;
1015
1016  if (superset == 0)
1017    return;
1018
1019  switch (TREE_CODE (type))
1020    {
1021    case RECORD_TYPE:
1022    case UNION_TYPE:
1023    case QUAL_UNION_TYPE:
1024      for (field = TYPE_FIELDS (type); field != 0; field = DECL_CHAIN (field))
1025	if (TREE_CODE (field) == FIELD_DECL && !DECL_NONADDRESSABLE_P (field))
1026	  record_alias_subset (superset, get_alias_set (TREE_TYPE (field)));
1027      break;
1028
1029    case COMPLEX_TYPE:
1030      record_alias_subset (superset, get_alias_set (TREE_TYPE (type)));
1031      break;
1032
1033    /* VECTOR_TYPE and ARRAY_TYPE share the alias set with their
1034       element type.  */
1035
1036    default:
1037      break;
1038    }
1039}
1040
1041/* Allocate an alias set for use in storing and reading from the varargs
1042   spill area.  */
1043
1044static GTY(()) alias_set_type varargs_set = -1;
1045
1046alias_set_type
1047get_varargs_alias_set (void)
1048{
1049#if 1
1050  /* We now lower VA_ARG_EXPR, and there's currently no way to attach the
1051     varargs alias set to an INDIRECT_REF (FIXME!), so we can't
1052     consistently use the varargs alias set for loads from the varargs
1053     area.  So don't use it anywhere.  */
1054  return 0;
1055#else
1056  if (varargs_set == -1)
1057    varargs_set = new_alias_set ();
1058
1059  return varargs_set;
1060#endif
1061}
1062
1063/* Likewise, but used for the fixed portions of the frame, e.g., register
1064   save areas.  */
1065
1066static GTY(()) alias_set_type frame_set = -1;
1067
1068alias_set_type
1069get_frame_alias_set (void)
1070{
1071  if (frame_set == -1)
1072    frame_set = new_alias_set ();
1073
1074  return frame_set;
1075}
1076
1077/* Create a new, unique base with id ID.  */
1078
1079static rtx
1080unique_base_value (HOST_WIDE_INT id)
1081{
1082  return gen_rtx_ADDRESS (Pmode, id);
1083}
1084
1085/* Return true if accesses based on any other base value cannot alias
1086   those based on X.  */
1087
1088static bool
1089unique_base_value_p (rtx x)
1090{
1091  return GET_CODE (x) == ADDRESS && GET_MODE (x) == Pmode;
1092}
1093
1094/* Return true if X is known to be a base value.  */
1095
1096static bool
1097known_base_value_p (rtx x)
1098{
1099  switch (GET_CODE (x))
1100    {
1101    case LABEL_REF:
1102    case SYMBOL_REF:
1103      return true;
1104
1105    case ADDRESS:
1106      /* Arguments may or may not be bases; we don't know for sure.  */
1107      return GET_MODE (x) != VOIDmode;
1108
1109    default:
1110      return false;
1111    }
1112}
1113
1114/* Inside SRC, the source of a SET, find a base address.  */
1115
1116static rtx
1117find_base_value (rtx src)
1118{
1119  unsigned int regno;
1120
1121#if defined (FIND_BASE_TERM)
1122  /* Try machine-dependent ways to find the base term.  */
1123  src = FIND_BASE_TERM (src);
1124#endif
1125
1126  switch (GET_CODE (src))
1127    {
1128    case SYMBOL_REF:
1129    case LABEL_REF:
1130      return src;
1131
1132    case REG:
1133      regno = REGNO (src);
1134      /* At the start of a function, argument registers have known base
1135	 values which may be lost later.  Returning an ADDRESS
1136	 expression here allows optimization based on argument values
1137	 even when the argument registers are used for other purposes.  */
1138      if (regno < FIRST_PSEUDO_REGISTER && copying_arguments)
1139	return new_reg_base_value[regno];
1140
1141      /* If a pseudo has a known base value, return it.  Do not do this
1142	 for non-fixed hard regs since it can result in a circular
1143	 dependency chain for registers which have values at function entry.
1144
1145	 The test above is not sufficient because the scheduler may move
1146	 a copy out of an arg reg past the NOTE_INSN_FUNCTION_BEGIN.  */
1147      if ((regno >= FIRST_PSEUDO_REGISTER || fixed_regs[regno])
1148	  && regno < vec_safe_length (reg_base_value))
1149	{
1150	  /* If we're inside init_alias_analysis, use new_reg_base_value
1151	     to reduce the number of relaxation iterations.  */
1152	  if (new_reg_base_value && new_reg_base_value[regno]
1153	      && DF_REG_DEF_COUNT (regno) == 1)
1154	    return new_reg_base_value[regno];
1155
1156	  if ((*reg_base_value)[regno])
1157	    return (*reg_base_value)[regno];
1158	}
1159
1160      return 0;
1161
1162    case MEM:
1163      /* Check for an argument passed in memory.  Only record in the
1164	 copying-arguments block; it is too hard to track changes
1165	 otherwise.  */
1166      if (copying_arguments
1167	  && (XEXP (src, 0) == arg_pointer_rtx
1168	      || (GET_CODE (XEXP (src, 0)) == PLUS
1169		  && XEXP (XEXP (src, 0), 0) == arg_pointer_rtx)))
1170	return arg_base_value;
1171      return 0;
1172
1173    case CONST:
1174      src = XEXP (src, 0);
1175      if (GET_CODE (src) != PLUS && GET_CODE (src) != MINUS)
1176	break;
1177
1178      /* ... fall through ...  */
1179
1180    case PLUS:
1181    case MINUS:
1182      {
1183	rtx temp, src_0 = XEXP (src, 0), src_1 = XEXP (src, 1);
1184
1185	/* If either operand is a REG that is a known pointer, then it
1186	   is the base.  */
1187	if (REG_P (src_0) && REG_POINTER (src_0))
1188	  return find_base_value (src_0);
1189	if (REG_P (src_1) && REG_POINTER (src_1))
1190	  return find_base_value (src_1);
1191
1192	/* If either operand is a REG, then see if we already have
1193	   a known value for it.  */
1194	if (REG_P (src_0))
1195	  {
1196	    temp = find_base_value (src_0);
1197	    if (temp != 0)
1198	      src_0 = temp;
1199	  }
1200
1201	if (REG_P (src_1))
1202	  {
1203	    temp = find_base_value (src_1);
1204	    if (temp!= 0)
1205	      src_1 = temp;
1206	  }
1207
1208	/* If either base is named object or a special address
1209	   (like an argument or stack reference), then use it for the
1210	   base term.  */
1211	if (src_0 != 0 && known_base_value_p (src_0))
1212	  return src_0;
1213
1214	if (src_1 != 0 && known_base_value_p (src_1))
1215	  return src_1;
1216
1217	/* Guess which operand is the base address:
1218	   If either operand is a symbol, then it is the base.  If
1219	   either operand is a CONST_INT, then the other is the base.  */
1220	if (CONST_INT_P (src_1) || CONSTANT_P (src_0))
1221	  return find_base_value (src_0);
1222	else if (CONST_INT_P (src_0) || CONSTANT_P (src_1))
1223	  return find_base_value (src_1);
1224
1225	return 0;
1226      }
1227
1228    case LO_SUM:
1229      /* The standard form is (lo_sum reg sym) so look only at the
1230	 second operand.  */
1231      return find_base_value (XEXP (src, 1));
1232
1233    case AND:
1234      /* If the second operand is constant set the base
1235	 address to the first operand.  */
1236      if (CONST_INT_P (XEXP (src, 1)) && INTVAL (XEXP (src, 1)) != 0)
1237	return find_base_value (XEXP (src, 0));
1238      return 0;
1239
1240    case TRUNCATE:
1241      /* As we do not know which address space the pointer is referring to, we can
1242	 handle this only if the target does not support different pointer or
1243	 address modes depending on the address space.  */
1244      if (!target_default_pointer_address_modes_p ())
1245	break;
1246      if (GET_MODE_SIZE (GET_MODE (src)) < GET_MODE_SIZE (Pmode))
1247	break;
1248      /* Fall through.  */
1249    case HIGH:
1250    case PRE_INC:
1251    case PRE_DEC:
1252    case POST_INC:
1253    case POST_DEC:
1254    case PRE_MODIFY:
1255    case POST_MODIFY:
1256      return find_base_value (XEXP (src, 0));
1257
1258    case ZERO_EXTEND:
1259    case SIGN_EXTEND:	/* used for NT/Alpha pointers */
1260      /* As we do not know which address space the pointer is referring to, we can
1261	 handle this only if the target does not support different pointer or
1262	 address modes depending on the address space.  */
1263      if (!target_default_pointer_address_modes_p ())
1264	break;
1265
1266      {
1267	rtx temp = find_base_value (XEXP (src, 0));
1268
1269	if (temp != 0 && CONSTANT_P (temp))
1270	  temp = convert_memory_address (Pmode, temp);
1271
1272	return temp;
1273      }
1274
1275    default:
1276      break;
1277    }
1278
1279  return 0;
1280}
1281
1282/* Called from init_alias_analysis indirectly through note_stores,
1283   or directly if DEST is a register with a REG_NOALIAS note attached.
1284   SET is null in the latter case.  */
1285
1286/* While scanning insns to find base values, reg_seen[N] is nonzero if
1287   register N has been set in this function.  */
1288static sbitmap reg_seen;
1289
1290static void
1291record_set (rtx dest, const_rtx set, void *data ATTRIBUTE_UNUSED)
1292{
1293  unsigned regno;
1294  rtx src;
1295  int n;
1296
1297  if (!REG_P (dest))
1298    return;
1299
1300  regno = REGNO (dest);
1301
1302  gcc_checking_assert (regno < reg_base_value->length ());
1303
1304  /* If this spans multiple hard registers, then we must indicate that every
1305     register has an unusable value.  */
1306  if (regno < FIRST_PSEUDO_REGISTER)
1307    n = hard_regno_nregs[regno][GET_MODE (dest)];
1308  else
1309    n = 1;
1310  if (n != 1)
1311    {
1312      while (--n >= 0)
1313	{
1314	  bitmap_set_bit (reg_seen, regno + n);
1315	  new_reg_base_value[regno + n] = 0;
1316	}
1317      return;
1318    }
1319
1320  if (set)
1321    {
1322      /* A CLOBBER wipes out any old value but does not prevent a previously
1323	 unset register from acquiring a base address (i.e. reg_seen is not
1324	 set).  */
1325      if (GET_CODE (set) == CLOBBER)
1326	{
1327	  new_reg_base_value[regno] = 0;
1328	  return;
1329	}
1330      src = SET_SRC (set);
1331    }
1332  else
1333    {
1334      /* There's a REG_NOALIAS note against DEST.  */
1335      if (bitmap_bit_p (reg_seen, regno))
1336	{
1337	  new_reg_base_value[regno] = 0;
1338	  return;
1339	}
1340      bitmap_set_bit (reg_seen, regno);
1341      new_reg_base_value[regno] = unique_base_value (unique_id++);
1342      return;
1343    }
1344
1345  /* If this is not the first set of REGNO, see whether the new value
1346     is related to the old one.  There are two cases of interest:
1347
1348	(1) The register might be assigned an entirely new value
1349	    that has the same base term as the original set.
1350
1351	(2) The set might be a simple self-modification that
1352	    cannot change REGNO's base value.
1353
1354     If neither case holds, reject the original base value as invalid.
1355     Note that the following situation is not detected:
1356
1357	 extern int x, y;  int *p = &x; p += (&y-&x);
1358
1359     ANSI C does not allow computing the difference of addresses
1360     of distinct top level objects.  */
1361  if (new_reg_base_value[regno] != 0
1362      && find_base_value (src) != new_reg_base_value[regno])
1363    switch (GET_CODE (src))
1364      {
1365      case LO_SUM:
1366      case MINUS:
1367	if (XEXP (src, 0) != dest && XEXP (src, 1) != dest)
1368	  new_reg_base_value[regno] = 0;
1369	break;
1370      case PLUS:
1371	/* If the value we add in the PLUS is also a valid base value,
1372	   this might be the actual base value, and the original value
1373	   an index.  */
1374	{
1375	  rtx other = NULL_RTX;
1376
1377	  if (XEXP (src, 0) == dest)
1378	    other = XEXP (src, 1);
1379	  else if (XEXP (src, 1) == dest)
1380	    other = XEXP (src, 0);
1381
1382	  if (! other || find_base_value (other))
1383	    new_reg_base_value[regno] = 0;
1384	  break;
1385	}
1386      case AND:
1387	if (XEXP (src, 0) != dest || !CONST_INT_P (XEXP (src, 1)))
1388	  new_reg_base_value[regno] = 0;
1389	break;
1390      default:
1391	new_reg_base_value[regno] = 0;
1392	break;
1393      }
1394  /* If this is the first set of a register, record the value.  */
1395  else if ((regno >= FIRST_PSEUDO_REGISTER || ! fixed_regs[regno])
1396	   && ! bitmap_bit_p (reg_seen, regno) && new_reg_base_value[regno] == 0)
1397    new_reg_base_value[regno] = find_base_value (src);
1398
1399  bitmap_set_bit (reg_seen, regno);
1400}
1401
1402/* Return REG_BASE_VALUE for REGNO.  Selective scheduler uses this to avoid
1403   using hard registers with non-null REG_BASE_VALUE for renaming.  */
1404rtx
1405get_reg_base_value (unsigned int regno)
1406{
1407  return (*reg_base_value)[regno];
1408}
1409
1410/* If a value is known for REGNO, return it.  */
1411
1412rtx
1413get_reg_known_value (unsigned int regno)
1414{
1415  if (regno >= FIRST_PSEUDO_REGISTER)
1416    {
1417      regno -= FIRST_PSEUDO_REGISTER;
1418      if (regno < vec_safe_length (reg_known_value))
1419	return (*reg_known_value)[regno];
1420    }
1421  return NULL;
1422}
1423
1424/* Set it.  */
1425
1426static void
1427set_reg_known_value (unsigned int regno, rtx val)
1428{
1429  if (regno >= FIRST_PSEUDO_REGISTER)
1430    {
1431      regno -= FIRST_PSEUDO_REGISTER;
1432      if (regno < vec_safe_length (reg_known_value))
1433	(*reg_known_value)[regno] = val;
1434    }
1435}
1436
1437/* Similarly for reg_known_equiv_p.  */
1438
1439bool
1440get_reg_known_equiv_p (unsigned int regno)
1441{
1442  if (regno >= FIRST_PSEUDO_REGISTER)
1443    {
1444      regno -= FIRST_PSEUDO_REGISTER;
1445      if (regno < vec_safe_length (reg_known_value))
1446	return bitmap_bit_p (reg_known_equiv_p, regno);
1447    }
1448  return false;
1449}
1450
1451static void
1452set_reg_known_equiv_p (unsigned int regno, bool val)
1453{
1454  if (regno >= FIRST_PSEUDO_REGISTER)
1455    {
1456      regno -= FIRST_PSEUDO_REGISTER;
1457      if (regno < vec_safe_length (reg_known_value))
1458	{
1459	  if (val)
1460	    bitmap_set_bit (reg_known_equiv_p, regno);
1461	  else
1462	    bitmap_clear_bit (reg_known_equiv_p, regno);
1463	}
1464    }
1465}
1466
1467
1468/* Returns a canonical version of X, from the point of view alias
1469   analysis.  (For example, if X is a MEM whose address is a register,
1470   and the register has a known value (say a SYMBOL_REF), then a MEM
1471   whose address is the SYMBOL_REF is returned.)  */
1472
1473rtx
1474canon_rtx (rtx x)
1475{
1476  /* Recursively look for equivalences.  */
1477  if (REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
1478    {
1479      rtx t = get_reg_known_value (REGNO (x));
1480      if (t == x)
1481	return x;
1482      if (t)
1483	return canon_rtx (t);
1484    }
1485
1486  if (GET_CODE (x) == PLUS)
1487    {
1488      rtx x0 = canon_rtx (XEXP (x, 0));
1489      rtx x1 = canon_rtx (XEXP (x, 1));
1490
1491      if (x0 != XEXP (x, 0) || x1 != XEXP (x, 1))
1492	{
1493	  if (CONST_INT_P (x0))
1494	    return plus_constant (GET_MODE (x), x1, INTVAL (x0));
1495	  else if (CONST_INT_P (x1))
1496	    return plus_constant (GET_MODE (x), x0, INTVAL (x1));
1497	  return gen_rtx_PLUS (GET_MODE (x), x0, x1);
1498	}
1499    }
1500
1501  /* This gives us much better alias analysis when called from
1502     the loop optimizer.   Note we want to leave the original
1503     MEM alone, but need to return the canonicalized MEM with
1504     all the flags with their original values.  */
1505  else if (MEM_P (x))
1506    x = replace_equiv_address_nv (x, canon_rtx (XEXP (x, 0)));
1507
1508  return x;
1509}
1510
1511/* Return 1 if X and Y are identical-looking rtx's.
1512   Expect that X and Y has been already canonicalized.
1513
1514   We use the data in reg_known_value above to see if two registers with
1515   different numbers are, in fact, equivalent.  */
1516
1517static int
1518rtx_equal_for_memref_p (const_rtx x, const_rtx y)
1519{
1520  int i;
1521  int j;
1522  enum rtx_code code;
1523  const char *fmt;
1524
1525  if (x == 0 && y == 0)
1526    return 1;
1527  if (x == 0 || y == 0)
1528    return 0;
1529
1530  if (x == y)
1531    return 1;
1532
1533  code = GET_CODE (x);
1534  /* Rtx's of different codes cannot be equal.  */
1535  if (code != GET_CODE (y))
1536    return 0;
1537
1538  /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1539     (REG:SI x) and (REG:HI x) are NOT equivalent.  */
1540
1541  if (GET_MODE (x) != GET_MODE (y))
1542    return 0;
1543
1544  /* Some RTL can be compared without a recursive examination.  */
1545  switch (code)
1546    {
1547    case REG:
1548      return REGNO (x) == REGNO (y);
1549
1550    case LABEL_REF:
1551      return LABEL_REF_LABEL (x) == LABEL_REF_LABEL (y);
1552
1553    case SYMBOL_REF:
1554      return XSTR (x, 0) == XSTR (y, 0);
1555
1556    case ENTRY_VALUE:
1557      /* This is magic, don't go through canonicalization et al.  */
1558      return rtx_equal_p (ENTRY_VALUE_EXP (x), ENTRY_VALUE_EXP (y));
1559
1560    case VALUE:
1561    CASE_CONST_UNIQUE:
1562      /* Pointer equality guarantees equality for these nodes.  */
1563      return 0;
1564
1565    default:
1566      break;
1567    }
1568
1569  /* canon_rtx knows how to handle plus.  No need to canonicalize.  */
1570  if (code == PLUS)
1571    return ((rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 0))
1572	     && rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 1)))
1573	    || (rtx_equal_for_memref_p (XEXP (x, 0), XEXP (y, 1))
1574		&& rtx_equal_for_memref_p (XEXP (x, 1), XEXP (y, 0))));
1575  /* For commutative operations, the RTX match if the operand match in any
1576     order.  Also handle the simple binary and unary cases without a loop.  */
1577  if (COMMUTATIVE_P (x))
1578    {
1579      rtx xop0 = canon_rtx (XEXP (x, 0));
1580      rtx yop0 = canon_rtx (XEXP (y, 0));
1581      rtx yop1 = canon_rtx (XEXP (y, 1));
1582
1583      return ((rtx_equal_for_memref_p (xop0, yop0)
1584	       && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop1))
1585	      || (rtx_equal_for_memref_p (xop0, yop1)
1586		  && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)), yop0)));
1587    }
1588  else if (NON_COMMUTATIVE_P (x))
1589    {
1590      return (rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1591				      canon_rtx (XEXP (y, 0)))
1592	      && rtx_equal_for_memref_p (canon_rtx (XEXP (x, 1)),
1593					 canon_rtx (XEXP (y, 1))));
1594    }
1595  else if (UNARY_P (x))
1596    return rtx_equal_for_memref_p (canon_rtx (XEXP (x, 0)),
1597				   canon_rtx (XEXP (y, 0)));
1598
1599  /* Compare the elements.  If any pair of corresponding elements
1600     fail to match, return 0 for the whole things.
1601
1602     Limit cases to types which actually appear in addresses.  */
1603
1604  fmt = GET_RTX_FORMAT (code);
1605  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1606    {
1607      switch (fmt[i])
1608	{
1609	case 'i':
1610	  if (XINT (x, i) != XINT (y, i))
1611	    return 0;
1612	  break;
1613
1614	case 'E':
1615	  /* Two vectors must have the same length.  */
1616	  if (XVECLEN (x, i) != XVECLEN (y, i))
1617	    return 0;
1618
1619	  /* And the corresponding elements must match.  */
1620	  for (j = 0; j < XVECLEN (x, i); j++)
1621	    if (rtx_equal_for_memref_p (canon_rtx (XVECEXP (x, i, j)),
1622					canon_rtx (XVECEXP (y, i, j))) == 0)
1623	      return 0;
1624	  break;
1625
1626	case 'e':
1627	  if (rtx_equal_for_memref_p (canon_rtx (XEXP (x, i)),
1628				      canon_rtx (XEXP (y, i))) == 0)
1629	    return 0;
1630	  break;
1631
1632	  /* This can happen for asm operands.  */
1633	case 's':
1634	  if (strcmp (XSTR (x, i), XSTR (y, i)))
1635	    return 0;
1636	  break;
1637
1638	/* This can happen for an asm which clobbers memory.  */
1639	case '0':
1640	  break;
1641
1642	  /* It is believed that rtx's at this level will never
1643	     contain anything but integers and other rtx's,
1644	     except for within LABEL_REFs and SYMBOL_REFs.  */
1645	default:
1646	  gcc_unreachable ();
1647	}
1648    }
1649  return 1;
1650}
1651
1652static rtx
1653find_base_term (rtx x)
1654{
1655  cselib_val *val;
1656  struct elt_loc_list *l, *f;
1657  rtx ret;
1658
1659#if defined (FIND_BASE_TERM)
1660  /* Try machine-dependent ways to find the base term.  */
1661  x = FIND_BASE_TERM (x);
1662#endif
1663
1664  switch (GET_CODE (x))
1665    {
1666    case REG:
1667      return REG_BASE_VALUE (x);
1668
1669    case TRUNCATE:
1670      /* As we do not know which address space the pointer is referring to, we can
1671	 handle this only if the target does not support different pointer or
1672	 address modes depending on the address space.  */
1673      if (!target_default_pointer_address_modes_p ())
1674	return 0;
1675      if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (Pmode))
1676	return 0;
1677      /* Fall through.  */
1678    case HIGH:
1679    case PRE_INC:
1680    case PRE_DEC:
1681    case POST_INC:
1682    case POST_DEC:
1683    case PRE_MODIFY:
1684    case POST_MODIFY:
1685      return find_base_term (XEXP (x, 0));
1686
1687    case ZERO_EXTEND:
1688    case SIGN_EXTEND:	/* Used for Alpha/NT pointers */
1689      /* As we do not know which address space the pointer is referring to, we can
1690	 handle this only if the target does not support different pointer or
1691	 address modes depending on the address space.  */
1692      if (!target_default_pointer_address_modes_p ())
1693	return 0;
1694
1695      {
1696	rtx temp = find_base_term (XEXP (x, 0));
1697
1698	if (temp != 0 && CONSTANT_P (temp))
1699	  temp = convert_memory_address (Pmode, temp);
1700
1701	return temp;
1702      }
1703
1704    case VALUE:
1705      val = CSELIB_VAL_PTR (x);
1706      ret = NULL_RTX;
1707
1708      if (!val)
1709	return ret;
1710
1711      if (cselib_sp_based_value_p (val))
1712	return static_reg_base_value[STACK_POINTER_REGNUM];
1713
1714      f = val->locs;
1715      /* Temporarily reset val->locs to avoid infinite recursion.  */
1716      val->locs = NULL;
1717
1718      for (l = f; l; l = l->next)
1719	if (GET_CODE (l->loc) == VALUE
1720	    && CSELIB_VAL_PTR (l->loc)->locs
1721	    && !CSELIB_VAL_PTR (l->loc)->locs->next
1722	    && CSELIB_VAL_PTR (l->loc)->locs->loc == x)
1723	  continue;
1724	else if ((ret = find_base_term (l->loc)) != 0)
1725	  break;
1726
1727      val->locs = f;
1728      return ret;
1729
1730    case LO_SUM:
1731      /* The standard form is (lo_sum reg sym) so look only at the
1732         second operand.  */
1733      return find_base_term (XEXP (x, 1));
1734
1735    case CONST:
1736      x = XEXP (x, 0);
1737      if (GET_CODE (x) != PLUS && GET_CODE (x) != MINUS)
1738	return 0;
1739      /* Fall through.  */
1740    case PLUS:
1741    case MINUS:
1742      {
1743	rtx tmp1 = XEXP (x, 0);
1744	rtx tmp2 = XEXP (x, 1);
1745
1746	/* This is a little bit tricky since we have to determine which of
1747	   the two operands represents the real base address.  Otherwise this
1748	   routine may return the index register instead of the base register.
1749
1750	   That may cause us to believe no aliasing was possible, when in
1751	   fact aliasing is possible.
1752
1753	   We use a few simple tests to guess the base register.  Additional
1754	   tests can certainly be added.  For example, if one of the operands
1755	   is a shift or multiply, then it must be the index register and the
1756	   other operand is the base register.  */
1757
1758	if (tmp1 == pic_offset_table_rtx && CONSTANT_P (tmp2))
1759	  return find_base_term (tmp2);
1760
1761	/* If either operand is known to be a pointer, then prefer it
1762	   to determine the base term.  */
1763	if (REG_P (tmp1) && REG_POINTER (tmp1))
1764	  ;
1765	else if (REG_P (tmp2) && REG_POINTER (tmp2))
1766	  std::swap (tmp1, tmp2);
1767	/* If second argument is constant which has base term, prefer it
1768	   over variable tmp1.  See PR64025.  */
1769	else if (CONSTANT_P (tmp2) && !CONST_INT_P (tmp2))
1770	  std::swap (tmp1, tmp2);
1771
1772	/* Go ahead and find the base term for both operands.  If either base
1773	   term is from a pointer or is a named object or a special address
1774	   (like an argument or stack reference), then use it for the
1775	   base term.  */
1776	rtx base = find_base_term (tmp1);
1777	if (base != NULL_RTX
1778	    && ((REG_P (tmp1) && REG_POINTER (tmp1))
1779		 || known_base_value_p (base)))
1780	  return base;
1781	base = find_base_term (tmp2);
1782	if (base != NULL_RTX
1783	    && ((REG_P (tmp2) && REG_POINTER (tmp2))
1784		 || known_base_value_p (base)))
1785	  return base;
1786
1787	/* We could not determine which of the two operands was the
1788	   base register and which was the index.  So we can determine
1789	   nothing from the base alias check.  */
1790	return 0;
1791      }
1792
1793    case AND:
1794      if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0)
1795	return find_base_term (XEXP (x, 0));
1796      return 0;
1797
1798    case SYMBOL_REF:
1799    case LABEL_REF:
1800      return x;
1801
1802    default:
1803      return 0;
1804    }
1805}
1806
1807/* Return true if accesses to address X may alias accesses based
1808   on the stack pointer.  */
1809
1810bool
1811may_be_sp_based_p (rtx x)
1812{
1813  rtx base = find_base_term (x);
1814  return !base || base == static_reg_base_value[STACK_POINTER_REGNUM];
1815}
1816
1817/* Return 0 if the addresses X and Y are known to point to different
1818   objects, 1 if they might be pointers to the same object.  */
1819
1820static int
1821base_alias_check (rtx x, rtx x_base, rtx y, rtx y_base,
1822		  machine_mode x_mode, machine_mode y_mode)
1823{
1824  /* If the address itself has no known base see if a known equivalent
1825     value has one.  If either address still has no known base, nothing
1826     is known about aliasing.  */
1827  if (x_base == 0)
1828    {
1829      rtx x_c;
1830
1831      if (! flag_expensive_optimizations || (x_c = canon_rtx (x)) == x)
1832	return 1;
1833
1834      x_base = find_base_term (x_c);
1835      if (x_base == 0)
1836	return 1;
1837    }
1838
1839  if (y_base == 0)
1840    {
1841      rtx y_c;
1842      if (! flag_expensive_optimizations || (y_c = canon_rtx (y)) == y)
1843	return 1;
1844
1845      y_base = find_base_term (y_c);
1846      if (y_base == 0)
1847	return 1;
1848    }
1849
1850  /* If the base addresses are equal nothing is known about aliasing.  */
1851  if (rtx_equal_p (x_base, y_base))
1852    return 1;
1853
1854  /* The base addresses are different expressions.  If they are not accessed
1855     via AND, there is no conflict.  We can bring knowledge of object
1856     alignment into play here.  For example, on alpha, "char a, b;" can
1857     alias one another, though "char a; long b;" cannot.  AND addesses may
1858     implicitly alias surrounding objects; i.e. unaligned access in DImode
1859     via AND address can alias all surrounding object types except those
1860     with aligment 8 or higher.  */
1861  if (GET_CODE (x) == AND && GET_CODE (y) == AND)
1862    return 1;
1863  if (GET_CODE (x) == AND
1864      && (!CONST_INT_P (XEXP (x, 1))
1865	  || (int) GET_MODE_UNIT_SIZE (y_mode) < -INTVAL (XEXP (x, 1))))
1866    return 1;
1867  if (GET_CODE (y) == AND
1868      && (!CONST_INT_P (XEXP (y, 1))
1869	  || (int) GET_MODE_UNIT_SIZE (x_mode) < -INTVAL (XEXP (y, 1))))
1870    return 1;
1871
1872  /* Differing symbols not accessed via AND never alias.  */
1873  if (GET_CODE (x_base) != ADDRESS && GET_CODE (y_base) != ADDRESS)
1874    return 0;
1875
1876  if (unique_base_value_p (x_base) || unique_base_value_p (y_base))
1877    return 0;
1878
1879  return 1;
1880}
1881
1882/* Return TRUE if EXPR refers to a VALUE whose uid is greater than
1883   (or equal to) that of V.  */
1884
1885static bool
1886refs_newer_value_p (const_rtx expr, rtx v)
1887{
1888  int minuid = CSELIB_VAL_PTR (v)->uid;
1889  subrtx_iterator::array_type array;
1890  FOR_EACH_SUBRTX (iter, array, expr, NONCONST)
1891    if (GET_CODE (*iter) == VALUE && CSELIB_VAL_PTR (*iter)->uid >= minuid)
1892      return true;
1893  return false;
1894}
1895
1896/* Convert the address X into something we can use.  This is done by returning
1897   it unchanged unless it is a VALUE or VALUE +/- constant; for VALUE
1898   we call cselib to get a more useful rtx.  */
1899
1900rtx
1901get_addr (rtx x)
1902{
1903  cselib_val *v;
1904  struct elt_loc_list *l;
1905
1906  if (GET_CODE (x) != VALUE)
1907    {
1908      if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
1909	  && GET_CODE (XEXP (x, 0)) == VALUE
1910	  && CONST_SCALAR_INT_P (XEXP (x, 1)))
1911	{
1912	  rtx op0 = get_addr (XEXP (x, 0));
1913	  if (op0 != XEXP (x, 0))
1914	    {
1915	      if (GET_CODE (x) == PLUS
1916		  && GET_CODE (XEXP (x, 1)) == CONST_INT)
1917		return plus_constant (GET_MODE (x), op0, INTVAL (XEXP (x, 1)));
1918	      return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
1919					  op0, XEXP (x, 1));
1920	    }
1921	}
1922      return x;
1923    }
1924  v = CSELIB_VAL_PTR (x);
1925  if (v)
1926    {
1927      bool have_equivs = cselib_have_permanent_equivalences ();
1928      if (have_equivs)
1929	v = canonical_cselib_val (v);
1930      for (l = v->locs; l; l = l->next)
1931	if (CONSTANT_P (l->loc))
1932	  return l->loc;
1933      for (l = v->locs; l; l = l->next)
1934	if (!REG_P (l->loc) && !MEM_P (l->loc)
1935	    /* Avoid infinite recursion when potentially dealing with
1936	       var-tracking artificial equivalences, by skipping the
1937	       equivalences themselves, and not choosing expressions
1938	       that refer to newer VALUEs.  */
1939	    && (!have_equivs
1940		|| (GET_CODE (l->loc) != VALUE
1941		    && !refs_newer_value_p (l->loc, x))))
1942	  return l->loc;
1943      if (have_equivs)
1944	{
1945	  for (l = v->locs; l; l = l->next)
1946	    if (REG_P (l->loc)
1947		|| (GET_CODE (l->loc) != VALUE
1948		    && !refs_newer_value_p (l->loc, x)))
1949	      return l->loc;
1950	  /* Return the canonical value.  */
1951	  return v->val_rtx;
1952	}
1953      if (v->locs)
1954	return v->locs->loc;
1955    }
1956  return x;
1957}
1958
1959/*  Return the address of the (N_REFS + 1)th memory reference to ADDR
1960    where SIZE is the size in bytes of the memory reference.  If ADDR
1961    is not modified by the memory reference then ADDR is returned.  */
1962
1963static rtx
1964addr_side_effect_eval (rtx addr, int size, int n_refs)
1965{
1966  int offset = 0;
1967
1968  switch (GET_CODE (addr))
1969    {
1970    case PRE_INC:
1971      offset = (n_refs + 1) * size;
1972      break;
1973    case PRE_DEC:
1974      offset = -(n_refs + 1) * size;
1975      break;
1976    case POST_INC:
1977      offset = n_refs * size;
1978      break;
1979    case POST_DEC:
1980      offset = -n_refs * size;
1981      break;
1982
1983    default:
1984      return addr;
1985    }
1986
1987  if (offset)
1988    addr = gen_rtx_PLUS (GET_MODE (addr), XEXP (addr, 0),
1989			 gen_int_mode (offset, GET_MODE (addr)));
1990  else
1991    addr = XEXP (addr, 0);
1992  addr = canon_rtx (addr);
1993
1994  return addr;
1995}
1996
1997/* Return TRUE if an object X sized at XSIZE bytes and another object
1998   Y sized at YSIZE bytes, starting C bytes after X, may overlap.  If
1999   any of the sizes is zero, assume an overlap, otherwise use the
2000   absolute value of the sizes as the actual sizes.  */
2001
2002static inline bool
2003offset_overlap_p (HOST_WIDE_INT c, int xsize, int ysize)
2004{
2005  return (xsize == 0 || ysize == 0
2006	  || (c >= 0
2007	      ? (abs (xsize) > c)
2008	      : (abs (ysize) > -c)));
2009}
2010
2011/* Return one if X and Y (memory addresses) reference the
2012   same location in memory or if the references overlap.
2013   Return zero if they do not overlap, else return
2014   minus one in which case they still might reference the same location.
2015
2016   C is an offset accumulator.  When
2017   C is nonzero, we are testing aliases between X and Y + C.
2018   XSIZE is the size in bytes of the X reference,
2019   similarly YSIZE is the size in bytes for Y.
2020   Expect that canon_rtx has been already called for X and Y.
2021
2022   If XSIZE or YSIZE is zero, we do not know the amount of memory being
2023   referenced (the reference was BLKmode), so make the most pessimistic
2024   assumptions.
2025
2026   If XSIZE or YSIZE is negative, we may access memory outside the object
2027   being referenced as a side effect.  This can happen when using AND to
2028   align memory references, as is done on the Alpha.
2029
2030   Nice to notice that varying addresses cannot conflict with fp if no
2031   local variables had their addresses taken, but that's too hard now.
2032
2033   ???  Contrary to the tree alias oracle this does not return
2034   one for X + non-constant and Y + non-constant when X and Y are equal.
2035   If that is fixed the TBAA hack for union type-punning can be removed.  */
2036
2037static int
2038memrefs_conflict_p (int xsize, rtx x, int ysize, rtx y, HOST_WIDE_INT c)
2039{
2040  if (GET_CODE (x) == VALUE)
2041    {
2042      if (REG_P (y))
2043	{
2044	  struct elt_loc_list *l = NULL;
2045	  if (CSELIB_VAL_PTR (x))
2046	    for (l = canonical_cselib_val (CSELIB_VAL_PTR (x))->locs;
2047		 l; l = l->next)
2048	      if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, y))
2049		break;
2050	  if (l)
2051	    x = y;
2052	  else
2053	    x = get_addr (x);
2054	}
2055      /* Don't call get_addr if y is the same VALUE.  */
2056      else if (x != y)
2057	x = get_addr (x);
2058    }
2059  if (GET_CODE (y) == VALUE)
2060    {
2061      if (REG_P (x))
2062	{
2063	  struct elt_loc_list *l = NULL;
2064	  if (CSELIB_VAL_PTR (y))
2065	    for (l = canonical_cselib_val (CSELIB_VAL_PTR (y))->locs;
2066		 l; l = l->next)
2067	      if (REG_P (l->loc) && rtx_equal_for_memref_p (l->loc, x))
2068		break;
2069	  if (l)
2070	    y = x;
2071	  else
2072	    y = get_addr (y);
2073	}
2074      /* Don't call get_addr if x is the same VALUE.  */
2075      else if (y != x)
2076	y = get_addr (y);
2077    }
2078  if (GET_CODE (x) == HIGH)
2079    x = XEXP (x, 0);
2080  else if (GET_CODE (x) == LO_SUM)
2081    x = XEXP (x, 1);
2082  else
2083    x = addr_side_effect_eval (x, abs (xsize), 0);
2084  if (GET_CODE (y) == HIGH)
2085    y = XEXP (y, 0);
2086  else if (GET_CODE (y) == LO_SUM)
2087    y = XEXP (y, 1);
2088  else
2089    y = addr_side_effect_eval (y, abs (ysize), 0);
2090
2091  if (rtx_equal_for_memref_p (x, y))
2092    {
2093      return offset_overlap_p (c, xsize, ysize);
2094    }
2095
2096  /* This code used to check for conflicts involving stack references and
2097     globals but the base address alias code now handles these cases.  */
2098
2099  if (GET_CODE (x) == PLUS)
2100    {
2101      /* The fact that X is canonicalized means that this
2102	 PLUS rtx is canonicalized.  */
2103      rtx x0 = XEXP (x, 0);
2104      rtx x1 = XEXP (x, 1);
2105
2106      if (GET_CODE (y) == PLUS)
2107	{
2108	  /* The fact that Y is canonicalized means that this
2109	     PLUS rtx is canonicalized.  */
2110	  rtx y0 = XEXP (y, 0);
2111	  rtx y1 = XEXP (y, 1);
2112
2113	  if (rtx_equal_for_memref_p (x1, y1))
2114	    return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2115	  if (rtx_equal_for_memref_p (x0, y0))
2116	    return memrefs_conflict_p (xsize, x1, ysize, y1, c);
2117	  if (CONST_INT_P (x1))
2118	    {
2119	      if (CONST_INT_P (y1))
2120		return memrefs_conflict_p (xsize, x0, ysize, y0,
2121					   c - INTVAL (x1) + INTVAL (y1));
2122	      else
2123		return memrefs_conflict_p (xsize, x0, ysize, y,
2124					   c - INTVAL (x1));
2125	    }
2126	  else if (CONST_INT_P (y1))
2127	    return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2128
2129	  return -1;
2130	}
2131      else if (CONST_INT_P (x1))
2132	return memrefs_conflict_p (xsize, x0, ysize, y, c - INTVAL (x1));
2133    }
2134  else if (GET_CODE (y) == PLUS)
2135    {
2136      /* The fact that Y is canonicalized means that this
2137	 PLUS rtx is canonicalized.  */
2138      rtx y0 = XEXP (y, 0);
2139      rtx y1 = XEXP (y, 1);
2140
2141      if (CONST_INT_P (y1))
2142	return memrefs_conflict_p (xsize, x, ysize, y0, c + INTVAL (y1));
2143      else
2144	return -1;
2145    }
2146
2147  if (GET_CODE (x) == GET_CODE (y))
2148    switch (GET_CODE (x))
2149      {
2150      case MULT:
2151	{
2152	  /* Handle cases where we expect the second operands to be the
2153	     same, and check only whether the first operand would conflict
2154	     or not.  */
2155	  rtx x0, y0;
2156	  rtx x1 = canon_rtx (XEXP (x, 1));
2157	  rtx y1 = canon_rtx (XEXP (y, 1));
2158	  if (! rtx_equal_for_memref_p (x1, y1))
2159	    return -1;
2160	  x0 = canon_rtx (XEXP (x, 0));
2161	  y0 = canon_rtx (XEXP (y, 0));
2162	  if (rtx_equal_for_memref_p (x0, y0))
2163	    return offset_overlap_p (c, xsize, ysize);
2164
2165	  /* Can't properly adjust our sizes.  */
2166	  if (!CONST_INT_P (x1))
2167	    return -1;
2168	  xsize /= INTVAL (x1);
2169	  ysize /= INTVAL (x1);
2170	  c /= INTVAL (x1);
2171	  return memrefs_conflict_p (xsize, x0, ysize, y0, c);
2172	}
2173
2174      default:
2175	break;
2176      }
2177
2178  /* Deal with alignment ANDs by adjusting offset and size so as to
2179     cover the maximum range, without taking any previously known
2180     alignment into account.  Make a size negative after such an
2181     adjustments, so that, if we end up with e.g. two SYMBOL_REFs, we
2182     assume a potential overlap, because they may end up in contiguous
2183     memory locations and the stricter-alignment access may span over
2184     part of both.  */
2185  if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)))
2186    {
2187      HOST_WIDE_INT sc = INTVAL (XEXP (x, 1));
2188      unsigned HOST_WIDE_INT uc = sc;
2189      if (sc < 0 && -uc == (uc & -uc))
2190	{
2191	  if (xsize > 0)
2192	    xsize = -xsize;
2193	  if (xsize)
2194	    xsize += sc + 1;
2195	  c -= sc + 1;
2196	  return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2197				     ysize, y, c);
2198	}
2199    }
2200  if (GET_CODE (y) == AND && CONST_INT_P (XEXP (y, 1)))
2201    {
2202      HOST_WIDE_INT sc = INTVAL (XEXP (y, 1));
2203      unsigned HOST_WIDE_INT uc = sc;
2204      if (sc < 0 && -uc == (uc & -uc))
2205	{
2206	  if (ysize > 0)
2207	    ysize = -ysize;
2208	  if (ysize)
2209	    ysize += sc + 1;
2210	  c += sc + 1;
2211	  return memrefs_conflict_p (xsize, x,
2212				     ysize, canon_rtx (XEXP (y, 0)), c);
2213	}
2214    }
2215
2216  if (CONSTANT_P (x))
2217    {
2218      if (CONST_INT_P (x) && CONST_INT_P (y))
2219	{
2220	  c += (INTVAL (y) - INTVAL (x));
2221	  return offset_overlap_p (c, xsize, ysize);
2222	}
2223
2224      if (GET_CODE (x) == CONST)
2225	{
2226	  if (GET_CODE (y) == CONST)
2227	    return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2228				       ysize, canon_rtx (XEXP (y, 0)), c);
2229	  else
2230	    return memrefs_conflict_p (xsize, canon_rtx (XEXP (x, 0)),
2231				       ysize, y, c);
2232	}
2233      if (GET_CODE (y) == CONST)
2234	return memrefs_conflict_p (xsize, x, ysize,
2235				   canon_rtx (XEXP (y, 0)), c);
2236
2237      /* Assume a potential overlap for symbolic addresses that went
2238	 through alignment adjustments (i.e., that have negative
2239	 sizes), because we can't know how far they are from each
2240	 other.  */
2241      if (CONSTANT_P (y))
2242	return (xsize < 0 || ysize < 0 || offset_overlap_p (c, xsize, ysize));
2243
2244      return -1;
2245    }
2246
2247  return -1;
2248}
2249
2250/* Functions to compute memory dependencies.
2251
2252   Since we process the insns in execution order, we can build tables
2253   to keep track of what registers are fixed (and not aliased), what registers
2254   are varying in known ways, and what registers are varying in unknown
2255   ways.
2256
2257   If both memory references are volatile, then there must always be a
2258   dependence between the two references, since their order can not be
2259   changed.  A volatile and non-volatile reference can be interchanged
2260   though.
2261
2262   We also must allow AND addresses, because they may generate accesses
2263   outside the object being referenced.  This is used to generate aligned
2264   addresses from unaligned addresses, for instance, the alpha
2265   storeqi_unaligned pattern.  */
2266
2267/* Read dependence: X is read after read in MEM takes place.  There can
2268   only be a dependence here if both reads are volatile, or if either is
2269   an explicit barrier.  */
2270
2271int
2272read_dependence (const_rtx mem, const_rtx x)
2273{
2274  if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2275    return true;
2276  if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2277      || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2278    return true;
2279  return false;
2280}
2281
2282/* Look at the bottom of the COMPONENT_REF list for a DECL, and return it.  */
2283
2284static tree
2285decl_for_component_ref (tree x)
2286{
2287  do
2288    {
2289      x = TREE_OPERAND (x, 0);
2290    }
2291  while (x && TREE_CODE (x) == COMPONENT_REF);
2292
2293  return x && DECL_P (x) ? x : NULL_TREE;
2294}
2295
2296/* Walk up the COMPONENT_REF list in X and adjust *OFFSET to compensate
2297   for the offset of the field reference.  *KNOWN_P says whether the
2298   offset is known.  */
2299
2300static void
2301adjust_offset_for_component_ref (tree x, bool *known_p,
2302				 HOST_WIDE_INT *offset)
2303{
2304  if (!*known_p)
2305    return;
2306  do
2307    {
2308      tree xoffset = component_ref_field_offset (x);
2309      tree field = TREE_OPERAND (x, 1);
2310      if (TREE_CODE (xoffset) != INTEGER_CST)
2311	{
2312	  *known_p = false;
2313	  return;
2314	}
2315
2316      offset_int woffset
2317	= (wi::to_offset (xoffset)
2318	   + wi::lrshift (wi::to_offset (DECL_FIELD_BIT_OFFSET (field)),
2319			  LOG2_BITS_PER_UNIT));
2320      if (!wi::fits_uhwi_p (woffset))
2321	{
2322	  *known_p = false;
2323	  return;
2324	}
2325      *offset += woffset.to_uhwi ();
2326
2327      x = TREE_OPERAND (x, 0);
2328    }
2329  while (x && TREE_CODE (x) == COMPONENT_REF);
2330}
2331
2332/* Return nonzero if we can determine the exprs corresponding to memrefs
2333   X and Y and they do not overlap.
2334   If LOOP_VARIANT is set, skip offset-based disambiguation */
2335
2336int
2337nonoverlapping_memrefs_p (const_rtx x, const_rtx y, bool loop_invariant)
2338{
2339  tree exprx = MEM_EXPR (x), expry = MEM_EXPR (y);
2340  rtx rtlx, rtly;
2341  rtx basex, basey;
2342  bool moffsetx_known_p, moffsety_known_p;
2343  HOST_WIDE_INT moffsetx = 0, moffsety = 0;
2344  HOST_WIDE_INT offsetx = 0, offsety = 0, sizex, sizey, tem;
2345
2346  /* Unless both have exprs, we can't tell anything.  */
2347  if (exprx == 0 || expry == 0)
2348    return 0;
2349
2350  /* For spill-slot accesses make sure we have valid offsets.  */
2351  if ((exprx == get_spill_slot_decl (false)
2352       && ! MEM_OFFSET_KNOWN_P (x))
2353      || (expry == get_spill_slot_decl (false)
2354	  && ! MEM_OFFSET_KNOWN_P (y)))
2355    return 0;
2356
2357  /* If the field reference test failed, look at the DECLs involved.  */
2358  moffsetx_known_p = MEM_OFFSET_KNOWN_P (x);
2359  if (moffsetx_known_p)
2360    moffsetx = MEM_OFFSET (x);
2361  if (TREE_CODE (exprx) == COMPONENT_REF)
2362    {
2363      tree t = decl_for_component_ref (exprx);
2364      if (! t)
2365	return 0;
2366      adjust_offset_for_component_ref (exprx, &moffsetx_known_p, &moffsetx);
2367      exprx = t;
2368    }
2369
2370  moffsety_known_p = MEM_OFFSET_KNOWN_P (y);
2371  if (moffsety_known_p)
2372    moffsety = MEM_OFFSET (y);
2373  if (TREE_CODE (expry) == COMPONENT_REF)
2374    {
2375      tree t = decl_for_component_ref (expry);
2376      if (! t)
2377	return 0;
2378      adjust_offset_for_component_ref (expry, &moffsety_known_p, &moffsety);
2379      expry = t;
2380    }
2381
2382  if (! DECL_P (exprx) || ! DECL_P (expry))
2383    return 0;
2384
2385  /* With invalid code we can end up storing into the constant pool.
2386     Bail out to avoid ICEing when creating RTL for this.
2387     See gfortran.dg/lto/20091028-2_0.f90.  */
2388  if (TREE_CODE (exprx) == CONST_DECL
2389      || TREE_CODE (expry) == CONST_DECL)
2390    return 1;
2391
2392  rtlx = DECL_RTL (exprx);
2393  rtly = DECL_RTL (expry);
2394
2395  /* If either RTL is not a MEM, it must be a REG or CONCAT, meaning they
2396     can't overlap unless they are the same because we never reuse that part
2397     of the stack frame used for locals for spilled pseudos.  */
2398  if ((!MEM_P (rtlx) || !MEM_P (rtly))
2399      && ! rtx_equal_p (rtlx, rtly))
2400    return 1;
2401
2402  /* If we have MEMs referring to different address spaces (which can
2403     potentially overlap), we cannot easily tell from the addresses
2404     whether the references overlap.  */
2405  if (MEM_P (rtlx) && MEM_P (rtly)
2406      && MEM_ADDR_SPACE (rtlx) != MEM_ADDR_SPACE (rtly))
2407    return 0;
2408
2409  /* Get the base and offsets of both decls.  If either is a register, we
2410     know both are and are the same, so use that as the base.  The only
2411     we can avoid overlap is if we can deduce that they are nonoverlapping
2412     pieces of that decl, which is very rare.  */
2413  basex = MEM_P (rtlx) ? XEXP (rtlx, 0) : rtlx;
2414  if (GET_CODE (basex) == PLUS && CONST_INT_P (XEXP (basex, 1)))
2415    offsetx = INTVAL (XEXP (basex, 1)), basex = XEXP (basex, 0);
2416
2417  basey = MEM_P (rtly) ? XEXP (rtly, 0) : rtly;
2418  if (GET_CODE (basey) == PLUS && CONST_INT_P (XEXP (basey, 1)))
2419    offsety = INTVAL (XEXP (basey, 1)), basey = XEXP (basey, 0);
2420
2421  /* If the bases are different, we know they do not overlap if both
2422     are constants or if one is a constant and the other a pointer into the
2423     stack frame.  Otherwise a different base means we can't tell if they
2424     overlap or not.  */
2425  if (! rtx_equal_p (basex, basey))
2426    return ((CONSTANT_P (basex) && CONSTANT_P (basey))
2427	    || (CONSTANT_P (basex) && REG_P (basey)
2428		&& REGNO_PTR_FRAME_P (REGNO (basey)))
2429	    || (CONSTANT_P (basey) && REG_P (basex)
2430		&& REGNO_PTR_FRAME_P (REGNO (basex))));
2431
2432  /* Offset based disambiguation not appropriate for loop invariant */
2433  if (loop_invariant)
2434    return 0;
2435
2436  sizex = (!MEM_P (rtlx) ? (int) GET_MODE_SIZE (GET_MODE (rtlx))
2437	   : MEM_SIZE_KNOWN_P (rtlx) ? MEM_SIZE (rtlx)
2438	   : -1);
2439  sizey = (!MEM_P (rtly) ? (int) GET_MODE_SIZE (GET_MODE (rtly))
2440	   : MEM_SIZE_KNOWN_P (rtly) ? MEM_SIZE (rtly)
2441	   : -1);
2442
2443  /* If we have an offset for either memref, it can update the values computed
2444     above.  */
2445  if (moffsetx_known_p)
2446    offsetx += moffsetx, sizex -= moffsetx;
2447  if (moffsety_known_p)
2448    offsety += moffsety, sizey -= moffsety;
2449
2450  /* If a memref has both a size and an offset, we can use the smaller size.
2451     We can't do this if the offset isn't known because we must view this
2452     memref as being anywhere inside the DECL's MEM.  */
2453  if (MEM_SIZE_KNOWN_P (x) && moffsetx_known_p)
2454    sizex = MEM_SIZE (x);
2455  if (MEM_SIZE_KNOWN_P (y) && moffsety_known_p)
2456    sizey = MEM_SIZE (y);
2457
2458  /* Put the values of the memref with the lower offset in X's values.  */
2459  if (offsetx > offsety)
2460    {
2461      tem = offsetx, offsetx = offsety, offsety = tem;
2462      tem = sizex, sizex = sizey, sizey = tem;
2463    }
2464
2465  /* If we don't know the size of the lower-offset value, we can't tell
2466     if they conflict.  Otherwise, we do the test.  */
2467  return sizex >= 0 && offsety >= offsetx + sizex;
2468}
2469
2470/* Helper for true_dependence and canon_true_dependence.
2471   Checks for true dependence: X is read after store in MEM takes place.
2472
2473   If MEM_CANONICALIZED is FALSE, then X_ADDR and MEM_ADDR should be
2474   NULL_RTX, and the canonical addresses of MEM and X are both computed
2475   here.  If MEM_CANONICALIZED, then MEM must be already canonicalized.
2476
2477   If X_ADDR is non-NULL, it is used in preference of XEXP (x, 0).
2478
2479   Returns 1 if there is a true dependence, 0 otherwise.  */
2480
2481static int
2482true_dependence_1 (const_rtx mem, machine_mode mem_mode, rtx mem_addr,
2483		   const_rtx x, rtx x_addr, bool mem_canonicalized)
2484{
2485  rtx true_mem_addr;
2486  rtx base;
2487  int ret;
2488
2489  gcc_checking_assert (mem_canonicalized ? (mem_addr != NULL_RTX)
2490		       : (mem_addr == NULL_RTX && x_addr == NULL_RTX));
2491
2492  if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2493    return 1;
2494
2495  /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2496     This is used in epilogue deallocation functions, and in cselib.  */
2497  if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2498    return 1;
2499  if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2500    return 1;
2501  if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2502      || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2503    return 1;
2504
2505  if (! x_addr)
2506    x_addr = XEXP (x, 0);
2507  x_addr = get_addr (x_addr);
2508
2509  if (! mem_addr)
2510    {
2511      mem_addr = XEXP (mem, 0);
2512      if (mem_mode == VOIDmode)
2513	mem_mode = GET_MODE (mem);
2514    }
2515  true_mem_addr = get_addr (mem_addr);
2516
2517  /* Read-only memory is by definition never modified, and therefore can't
2518     conflict with anything.  However, don't assume anything when AND
2519     addresses are involved and leave to the code below to determine
2520     dependence.  We don't expect to find read-only set on MEM, but
2521     stupid user tricks can produce them, so don't die.  */
2522  if (MEM_READONLY_P (x)
2523      && GET_CODE (x_addr) != AND
2524      && GET_CODE (true_mem_addr) != AND)
2525    return 0;
2526
2527  /* If we have MEMs referring to different address spaces (which can
2528     potentially overlap), we cannot easily tell from the addresses
2529     whether the references overlap.  */
2530  if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2531    return 1;
2532
2533  base = find_base_term (x_addr);
2534  if (base && (GET_CODE (base) == LABEL_REF
2535	       || (GET_CODE (base) == SYMBOL_REF
2536		   && CONSTANT_POOL_ADDRESS_P (base))))
2537    return 0;
2538
2539  rtx mem_base = find_base_term (true_mem_addr);
2540  if (! base_alias_check (x_addr, base, true_mem_addr, mem_base,
2541			  GET_MODE (x), mem_mode))
2542    return 0;
2543
2544  x_addr = canon_rtx (x_addr);
2545  if (!mem_canonicalized)
2546    mem_addr = canon_rtx (true_mem_addr);
2547
2548  if ((ret = memrefs_conflict_p (GET_MODE_SIZE (mem_mode), mem_addr,
2549				 SIZE_FOR_MODE (x), x_addr, 0)) != -1)
2550    return ret;
2551
2552  if (mems_in_disjoint_alias_sets_p (x, mem))
2553    return 0;
2554
2555  if (nonoverlapping_memrefs_p (mem, x, false))
2556    return 0;
2557
2558  return rtx_refs_may_alias_p (x, mem, true);
2559}
2560
2561/* True dependence: X is read after store in MEM takes place.  */
2562
2563int
2564true_dependence (const_rtx mem, machine_mode mem_mode, const_rtx x)
2565{
2566  return true_dependence_1 (mem, mem_mode, NULL_RTX,
2567			    x, NULL_RTX, /*mem_canonicalized=*/false);
2568}
2569
2570/* Canonical true dependence: X is read after store in MEM takes place.
2571   Variant of true_dependence which assumes MEM has already been
2572   canonicalized (hence we no longer do that here).
2573   The mem_addr argument has been added, since true_dependence_1 computed
2574   this value prior to canonicalizing.  */
2575
2576int
2577canon_true_dependence (const_rtx mem, machine_mode mem_mode, rtx mem_addr,
2578		       const_rtx x, rtx x_addr)
2579{
2580  return true_dependence_1 (mem, mem_mode, mem_addr,
2581			    x, x_addr, /*mem_canonicalized=*/true);
2582}
2583
2584/* Returns nonzero if a write to X might alias a previous read from
2585   (or, if WRITEP is true, a write to) MEM.
2586   If X_CANONCALIZED is true, then X_ADDR is the canonicalized address of X,
2587   and X_MODE the mode for that access.
2588   If MEM_CANONICALIZED is true, MEM is canonicalized.  */
2589
2590static int
2591write_dependence_p (const_rtx mem,
2592		    const_rtx x, machine_mode x_mode, rtx x_addr,
2593		    bool mem_canonicalized, bool x_canonicalized, bool writep)
2594{
2595  rtx mem_addr;
2596  rtx true_mem_addr, true_x_addr;
2597  rtx base;
2598  int ret;
2599
2600  gcc_checking_assert (x_canonicalized
2601		       ? (x_addr != NULL_RTX && x_mode != VOIDmode)
2602		       : (x_addr == NULL_RTX && x_mode == VOIDmode));
2603
2604  if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2605    return 1;
2606
2607  /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2608     This is used in epilogue deallocation functions.  */
2609  if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2610    return 1;
2611  if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2612    return 1;
2613  if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2614      || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2615    return 1;
2616
2617  if (!x_addr)
2618    x_addr = XEXP (x, 0);
2619  true_x_addr = get_addr (x_addr);
2620
2621  mem_addr = XEXP (mem, 0);
2622  true_mem_addr = get_addr (mem_addr);
2623
2624  /* A read from read-only memory can't conflict with read-write memory.
2625     Don't assume anything when AND addresses are involved and leave to
2626     the code below to determine dependence.  */
2627  if (!writep
2628      && MEM_READONLY_P (mem)
2629      && GET_CODE (true_x_addr) != AND
2630      && GET_CODE (true_mem_addr) != AND)
2631    return 0;
2632
2633  /* If we have MEMs referring to different address spaces (which can
2634     potentially overlap), we cannot easily tell from the addresses
2635     whether the references overlap.  */
2636  if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2637    return 1;
2638
2639  base = find_base_term (true_mem_addr);
2640  if (! writep
2641      && base
2642      && (GET_CODE (base) == LABEL_REF
2643	  || (GET_CODE (base) == SYMBOL_REF
2644	      && CONSTANT_POOL_ADDRESS_P (base))))
2645    return 0;
2646
2647  rtx x_base = find_base_term (true_x_addr);
2648  if (! base_alias_check (true_x_addr, x_base, true_mem_addr, base,
2649			  GET_MODE (x), GET_MODE (mem)))
2650    return 0;
2651
2652  if (!x_canonicalized)
2653    {
2654      x_addr = canon_rtx (true_x_addr);
2655      x_mode = GET_MODE (x);
2656    }
2657  if (!mem_canonicalized)
2658    mem_addr = canon_rtx (true_mem_addr);
2659
2660  if ((ret = memrefs_conflict_p (SIZE_FOR_MODE (mem), mem_addr,
2661				 GET_MODE_SIZE (x_mode), x_addr, 0)) != -1)
2662    return ret;
2663
2664  if (nonoverlapping_memrefs_p (x, mem, false))
2665    return 0;
2666
2667  return rtx_refs_may_alias_p (x, mem, false);
2668}
2669
2670/* Anti dependence: X is written after read in MEM takes place.  */
2671
2672int
2673anti_dependence (const_rtx mem, const_rtx x)
2674{
2675  return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
2676			     /*mem_canonicalized=*/false,
2677			     /*x_canonicalized*/false, /*writep=*/false);
2678}
2679
2680/* Likewise, but we already have a canonicalized MEM, and X_ADDR for X.
2681   Also, consider X in X_MODE (which might be from an enclosing
2682   STRICT_LOW_PART / ZERO_EXTRACT).
2683   If MEM_CANONICALIZED is true, MEM is canonicalized.  */
2684
2685int
2686canon_anti_dependence (const_rtx mem, bool mem_canonicalized,
2687		       const_rtx x, machine_mode x_mode, rtx x_addr)
2688{
2689  return write_dependence_p (mem, x, x_mode, x_addr,
2690			     mem_canonicalized, /*x_canonicalized=*/true,
2691			     /*writep=*/false);
2692}
2693
2694/* Output dependence: X is written after store in MEM takes place.  */
2695
2696int
2697output_dependence (const_rtx mem, const_rtx x)
2698{
2699  return write_dependence_p (mem, x, VOIDmode, NULL_RTX,
2700			     /*mem_canonicalized=*/false,
2701			     /*x_canonicalized*/false, /*writep=*/true);
2702}
2703
2704/* Likewise, but we already have a canonicalized MEM, and X_ADDR for X.
2705   Also, consider X in X_MODE (which might be from an enclosing
2706   STRICT_LOW_PART / ZERO_EXTRACT).
2707   If MEM_CANONICALIZED is true, MEM is canonicalized.  */
2708
2709int
2710canon_output_dependence (const_rtx mem, bool mem_canonicalized,
2711			 const_rtx x, machine_mode x_mode, rtx x_addr)
2712{
2713  return write_dependence_p (mem, x, x_mode, x_addr,
2714			     mem_canonicalized, /*x_canonicalized=*/true,
2715			     /*writep=*/true);
2716}
2717
2718
2719
2720/* Check whether X may be aliased with MEM.  Don't do offset-based
2721  memory disambiguation & TBAA.  */
2722int
2723may_alias_p (const_rtx mem, const_rtx x)
2724{
2725  rtx x_addr, mem_addr;
2726
2727  if (MEM_VOLATILE_P (x) && MEM_VOLATILE_P (mem))
2728    return 1;
2729
2730  /* (mem:BLK (scratch)) is a special mechanism to conflict with everything.
2731     This is used in epilogue deallocation functions.  */
2732  if (GET_MODE (x) == BLKmode && GET_CODE (XEXP (x, 0)) == SCRATCH)
2733    return 1;
2734  if (GET_MODE (mem) == BLKmode && GET_CODE (XEXP (mem, 0)) == SCRATCH)
2735    return 1;
2736  if (MEM_ALIAS_SET (x) == ALIAS_SET_MEMORY_BARRIER
2737      || MEM_ALIAS_SET (mem) == ALIAS_SET_MEMORY_BARRIER)
2738    return 1;
2739
2740  x_addr = XEXP (x, 0);
2741  x_addr = get_addr (x_addr);
2742
2743  mem_addr = XEXP (mem, 0);
2744  mem_addr = get_addr (mem_addr);
2745
2746  /* Read-only memory is by definition never modified, and therefore can't
2747     conflict with anything.  However, don't assume anything when AND
2748     addresses are involved and leave to the code below to determine
2749     dependence.  We don't expect to find read-only set on MEM, but
2750     stupid user tricks can produce them, so don't die.  */
2751  if (MEM_READONLY_P (x)
2752      && GET_CODE (x_addr) != AND
2753      && GET_CODE (mem_addr) != AND)
2754    return 0;
2755
2756  /* If we have MEMs referring to different address spaces (which can
2757     potentially overlap), we cannot easily tell from the addresses
2758     whether the references overlap.  */
2759  if (MEM_ADDR_SPACE (mem) != MEM_ADDR_SPACE (x))
2760    return 1;
2761
2762  rtx x_base = find_base_term (x_addr);
2763  rtx mem_base = find_base_term (mem_addr);
2764  if (! base_alias_check (x_addr, x_base, mem_addr, mem_base,
2765			  GET_MODE (x), GET_MODE (mem_addr)))
2766    return 0;
2767
2768  if (nonoverlapping_memrefs_p (mem, x, true))
2769    return 0;
2770
2771  /* TBAA not valid for loop_invarint */
2772  return rtx_refs_may_alias_p (x, mem, false);
2773}
2774
2775void
2776init_alias_target (void)
2777{
2778  int i;
2779
2780  if (!arg_base_value)
2781    arg_base_value = gen_rtx_ADDRESS (VOIDmode, 0);
2782
2783  memset (static_reg_base_value, 0, sizeof static_reg_base_value);
2784
2785  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2786    /* Check whether this register can hold an incoming pointer
2787       argument.  FUNCTION_ARG_REGNO_P tests outgoing register
2788       numbers, so translate if necessary due to register windows.  */
2789    if (FUNCTION_ARG_REGNO_P (OUTGOING_REGNO (i))
2790	&& HARD_REGNO_MODE_OK (i, Pmode))
2791      static_reg_base_value[i] = arg_base_value;
2792
2793  static_reg_base_value[STACK_POINTER_REGNUM]
2794    = unique_base_value (UNIQUE_BASE_VALUE_SP);
2795  static_reg_base_value[ARG_POINTER_REGNUM]
2796    = unique_base_value (UNIQUE_BASE_VALUE_ARGP);
2797  static_reg_base_value[FRAME_POINTER_REGNUM]
2798    = unique_base_value (UNIQUE_BASE_VALUE_FP);
2799#if !HARD_FRAME_POINTER_IS_FRAME_POINTER
2800  static_reg_base_value[HARD_FRAME_POINTER_REGNUM]
2801    = unique_base_value (UNIQUE_BASE_VALUE_HFP);
2802#endif
2803}
2804
2805/* Set MEMORY_MODIFIED when X modifies DATA (that is assumed
2806   to be memory reference.  */
2807static bool memory_modified;
2808static void
2809memory_modified_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
2810{
2811  if (MEM_P (x))
2812    {
2813      if (anti_dependence (x, (const_rtx)data) || output_dependence (x, (const_rtx)data))
2814	memory_modified = true;
2815    }
2816}
2817
2818
2819/* Return true when INSN possibly modify memory contents of MEM
2820   (i.e. address can be modified).  */
2821bool
2822memory_modified_in_insn_p (const_rtx mem, const_rtx insn)
2823{
2824  if (!INSN_P (insn))
2825    return false;
2826  memory_modified = false;
2827  note_stores (PATTERN (insn), memory_modified_1, CONST_CAST_RTX(mem));
2828  return memory_modified;
2829}
2830
2831/* Return TRUE if the destination of a set is rtx identical to
2832   ITEM.  */
2833static inline bool
2834set_dest_equal_p (const_rtx set, const_rtx item)
2835{
2836  rtx dest = SET_DEST (set);
2837  return rtx_equal_p (dest, item);
2838}
2839
2840/* Like memory_modified_in_insn_p, but return TRUE if INSN will
2841   *DEFINITELY* modify the memory contents of MEM.  */
2842bool
2843memory_must_be_modified_in_insn_p (const_rtx mem, const_rtx insn)
2844{
2845  if (!INSN_P (insn))
2846    return false;
2847  insn = PATTERN (insn);
2848  if (GET_CODE (insn) == SET)
2849    return set_dest_equal_p (insn, mem);
2850  else if (GET_CODE (insn) == PARALLEL)
2851    {
2852      int i;
2853      for (i = 0; i < XVECLEN (insn, 0); i++)
2854	{
2855	  rtx sub = XVECEXP (insn, 0, i);
2856	  if (GET_CODE (sub) == SET
2857	      &&  set_dest_equal_p (sub, mem))
2858	    return true;
2859	}
2860    }
2861  return false;
2862}
2863
2864/* Initialize the aliasing machinery.  Initialize the REG_KNOWN_VALUE
2865   array.  */
2866
2867void
2868init_alias_analysis (void)
2869{
2870  unsigned int maxreg = max_reg_num ();
2871  int changed, pass;
2872  int i;
2873  unsigned int ui;
2874  rtx_insn *insn;
2875  rtx val;
2876  int rpo_cnt;
2877  int *rpo;
2878
2879  timevar_push (TV_ALIAS_ANALYSIS);
2880
2881  vec_safe_grow_cleared (reg_known_value, maxreg - FIRST_PSEUDO_REGISTER);
2882  reg_known_equiv_p = sbitmap_alloc (maxreg - FIRST_PSEUDO_REGISTER);
2883  bitmap_clear (reg_known_equiv_p);
2884
2885  /* If we have memory allocated from the previous run, use it.  */
2886  if (old_reg_base_value)
2887    reg_base_value = old_reg_base_value;
2888
2889  if (reg_base_value)
2890    reg_base_value->truncate (0);
2891
2892  vec_safe_grow_cleared (reg_base_value, maxreg);
2893
2894  new_reg_base_value = XNEWVEC (rtx, maxreg);
2895  reg_seen = sbitmap_alloc (maxreg);
2896
2897  /* The basic idea is that each pass through this loop will use the
2898     "constant" information from the previous pass to propagate alias
2899     information through another level of assignments.
2900
2901     The propagation is done on the CFG in reverse post-order, to propagate
2902     things forward as far as possible in each iteration.
2903
2904     This could get expensive if the assignment chains are long.  Maybe
2905     we should throttle the number of iterations, possibly based on
2906     the optimization level or flag_expensive_optimizations.
2907
2908     We could propagate more information in the first pass by making use
2909     of DF_REG_DEF_COUNT to determine immediately that the alias information
2910     for a pseudo is "constant".
2911
2912     A program with an uninitialized variable can cause an infinite loop
2913     here.  Instead of doing a full dataflow analysis to detect such problems
2914     we just cap the number of iterations for the loop.
2915
2916     The state of the arrays for the set chain in question does not matter
2917     since the program has undefined behavior.  */
2918
2919  rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
2920  rpo_cnt = pre_and_rev_post_order_compute (NULL, rpo, false);
2921
2922  pass = 0;
2923  do
2924    {
2925      /* Assume nothing will change this iteration of the loop.  */
2926      changed = 0;
2927
2928      /* We want to assign the same IDs each iteration of this loop, so
2929	 start counting from one each iteration of the loop.  */
2930      unique_id = 1;
2931
2932      /* We're at the start of the function each iteration through the
2933	 loop, so we're copying arguments.  */
2934      copying_arguments = true;
2935
2936      /* Wipe the potential alias information clean for this pass.  */
2937      memset (new_reg_base_value, 0, maxreg * sizeof (rtx));
2938
2939      /* Wipe the reg_seen array clean.  */
2940      bitmap_clear (reg_seen);
2941
2942      /* Initialize the alias information for this pass.  */
2943      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2944	if (static_reg_base_value[i])
2945	  {
2946	    new_reg_base_value[i] = static_reg_base_value[i];
2947	    bitmap_set_bit (reg_seen, i);
2948	  }
2949
2950      /* Walk the insns adding values to the new_reg_base_value array.  */
2951      for (i = 0; i < rpo_cnt; i++)
2952	{
2953	  basic_block bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]);
2954	  FOR_BB_INSNS (bb, insn)
2955	    {
2956	      if (NONDEBUG_INSN_P (insn))
2957		{
2958		  rtx note, set;
2959
2960#if defined (HAVE_prologue) || defined (HAVE_epilogue)
2961		  /* The prologue/epilogue insns are not threaded onto the
2962		     insn chain until after reload has completed.  Thus,
2963		     there is no sense wasting time checking if INSN is in
2964		     the prologue/epilogue until after reload has completed.  */
2965		  if (reload_completed
2966		      && prologue_epilogue_contains (insn))
2967		    continue;
2968#endif
2969
2970		  /* If this insn has a noalias note, process it,  Otherwise,
2971		     scan for sets.  A simple set will have no side effects
2972		     which could change the base value of any other register.  */
2973
2974		  if (GET_CODE (PATTERN (insn)) == SET
2975		      && REG_NOTES (insn) != 0
2976		      && find_reg_note (insn, REG_NOALIAS, NULL_RTX))
2977		    record_set (SET_DEST (PATTERN (insn)), NULL_RTX, NULL);
2978		  else
2979		    note_stores (PATTERN (insn), record_set, NULL);
2980
2981		  set = single_set (insn);
2982
2983		  if (set != 0
2984		      && REG_P (SET_DEST (set))
2985		      && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
2986		    {
2987		      unsigned int regno = REGNO (SET_DEST (set));
2988		      rtx src = SET_SRC (set);
2989		      rtx t;
2990
2991		      note = find_reg_equal_equiv_note (insn);
2992		      if (note && REG_NOTE_KIND (note) == REG_EQUAL
2993			  && DF_REG_DEF_COUNT (regno) != 1)
2994			note = NULL_RTX;
2995
2996		      if (note != NULL_RTX
2997			  && GET_CODE (XEXP (note, 0)) != EXPR_LIST
2998			  && ! rtx_varies_p (XEXP (note, 0), 1)
2999			  && ! reg_overlap_mentioned_p (SET_DEST (set),
3000							XEXP (note, 0)))
3001			{
3002			  set_reg_known_value (regno, XEXP (note, 0));
3003			  set_reg_known_equiv_p (regno,
3004						 REG_NOTE_KIND (note) == REG_EQUIV);
3005			}
3006		      else if (DF_REG_DEF_COUNT (regno) == 1
3007			       && GET_CODE (src) == PLUS
3008			       && REG_P (XEXP (src, 0))
3009			       && (t = get_reg_known_value (REGNO (XEXP (src, 0))))
3010			       && CONST_INT_P (XEXP (src, 1)))
3011			{
3012			  t = plus_constant (GET_MODE (src), t,
3013					     INTVAL (XEXP (src, 1)));
3014			  set_reg_known_value (regno, t);
3015			  set_reg_known_equiv_p (regno, false);
3016			}
3017		      else if (DF_REG_DEF_COUNT (regno) == 1
3018			       && ! rtx_varies_p (src, 1))
3019			{
3020			  set_reg_known_value (regno, src);
3021			  set_reg_known_equiv_p (regno, false);
3022			}
3023		    }
3024		}
3025	      else if (NOTE_P (insn)
3026		       && NOTE_KIND (insn) == NOTE_INSN_FUNCTION_BEG)
3027		copying_arguments = false;
3028	    }
3029	}
3030
3031      /* Now propagate values from new_reg_base_value to reg_base_value.  */
3032      gcc_assert (maxreg == (unsigned int) max_reg_num ());
3033
3034      for (ui = 0; ui < maxreg; ui++)
3035	{
3036	  if (new_reg_base_value[ui]
3037	      && new_reg_base_value[ui] != (*reg_base_value)[ui]
3038	      && ! rtx_equal_p (new_reg_base_value[ui], (*reg_base_value)[ui]))
3039	    {
3040	      (*reg_base_value)[ui] = new_reg_base_value[ui];
3041	      changed = 1;
3042	    }
3043	}
3044    }
3045  while (changed && ++pass < MAX_ALIAS_LOOP_PASSES);
3046  XDELETEVEC (rpo);
3047
3048  /* Fill in the remaining entries.  */
3049  FOR_EACH_VEC_ELT (*reg_known_value, i, val)
3050    {
3051      int regno = i + FIRST_PSEUDO_REGISTER;
3052      if (! val)
3053	set_reg_known_value (regno, regno_reg_rtx[regno]);
3054    }
3055
3056  /* Clean up.  */
3057  free (new_reg_base_value);
3058  new_reg_base_value = 0;
3059  sbitmap_free (reg_seen);
3060  reg_seen = 0;
3061  timevar_pop (TV_ALIAS_ANALYSIS);
3062}
3063
3064/* Equate REG_BASE_VALUE (reg1) to REG_BASE_VALUE (reg2).
3065   Special API for var-tracking pass purposes.  */
3066
3067void
3068vt_equate_reg_base_value (const_rtx reg1, const_rtx reg2)
3069{
3070  (*reg_base_value)[REGNO (reg1)] = REG_BASE_VALUE (reg2);
3071}
3072
3073void
3074end_alias_analysis (void)
3075{
3076  old_reg_base_value = reg_base_value;
3077  vec_free (reg_known_value);
3078  sbitmap_free (reg_known_equiv_p);
3079}
3080
3081#include "gt-alias.h"
3082