1/* Vectorizer
2   Copyright (C) 2003-2015 Free Software Foundation, Inc.
3   Contributed by Dorit Naishlos <dorit@il.ibm.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3.  If not see
19<http://www.gnu.org/licenses/>.  */
20
21/* Loop and basic block vectorizer.
22
23  This file contains drivers for the three vectorizers:
24  (1) loop vectorizer (inter-iteration parallelism),
25  (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
26      vectorizer)
27  (3) BB vectorizer (out-of-loops), aka SLP
28
29  The rest of the vectorizer's code is organized as follows:
30  - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
31    used by drivers (1) and (2).
32  - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
33    drivers (1) and (2).
34  - tree-vect-slp.c - BB vectorization specific analysis and transformation,
35    used by drivers (2) and (3).
36  - tree-vect-stmts.c - statements analysis and transformation (used by all).
37  - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
38    manipulations (used by all).
39  - tree-vect-patterns.c - vectorizable code patterns detector (used by all)
40
41  Here's a poor attempt at illustrating that:
42
43     tree-vectorizer.c:
44     loop_vect()  loop_aware_slp()  slp_vect()
45          |        /           \          /
46          |       /             \        /
47          tree-vect-loop.c  tree-vect-slp.c
48                | \      \  /      /   |
49                |  \      \/      /    |
50                |   \     /\     /     |
51                |    \   /  \   /      |
52         tree-vect-stmts.c  tree-vect-data-refs.c
53                       \      /
54                    tree-vect-patterns.c
55*/
56
57#include "config.h"
58#include "system.h"
59#include "coretypes.h"
60#include "dumpfile.h"
61#include "tm.h"
62#include "hash-set.h"
63#include "machmode.h"
64#include "vec.h"
65#include "double-int.h"
66#include "input.h"
67#include "alias.h"
68#include "symtab.h"
69#include "wide-int.h"
70#include "inchash.h"
71#include "tree.h"
72#include "fold-const.h"
73#include "stor-layout.h"
74#include "tree-pretty-print.h"
75#include "predict.h"
76#include "hard-reg-set.h"
77#include "input.h"
78#include "function.h"
79#include "dominance.h"
80#include "cfg.h"
81#include "basic-block.h"
82#include "tree-ssa-alias.h"
83#include "internal-fn.h"
84#include "gimple-expr.h"
85#include "is-a.h"
86#include "gimple.h"
87#include "gimple-iterator.h"
88#include "gimple-walk.h"
89#include "gimple-ssa.h"
90#include "hash-map.h"
91#include "plugin-api.h"
92#include "ipa-ref.h"
93#include "cgraph.h"
94#include "tree-phinodes.h"
95#include "ssa-iterators.h"
96#include "tree-ssa-loop-manip.h"
97#include "tree-cfg.h"
98#include "cfgloop.h"
99#include "tree-vectorizer.h"
100#include "tree-pass.h"
101#include "tree-ssa-propagate.h"
102#include "dbgcnt.h"
103#include "gimple-fold.h"
104#include "tree-scalar-evolution.h"
105
106
107/* Loop or bb location.  */
108source_location vect_location;
109
110/* Vector mapping GIMPLE stmt to stmt_vec_info. */
111vec<vec_void_p> stmt_vec_info_vec;
112
113/* For mapping simduid to vectorization factor.  */
114
115struct simduid_to_vf : typed_free_remove<simduid_to_vf>
116{
117  unsigned int simduid;
118  int vf;
119
120  /* hash_table support.  */
121  typedef simduid_to_vf value_type;
122  typedef simduid_to_vf compare_type;
123  static inline hashval_t hash (const value_type *);
124  static inline int equal (const value_type *, const compare_type *);
125};
126
127inline hashval_t
128simduid_to_vf::hash (const value_type *p)
129{
130  return p->simduid;
131}
132
133inline int
134simduid_to_vf::equal (const value_type *p1, const value_type *p2)
135{
136  return p1->simduid == p2->simduid;
137}
138
139/* This hash maps the OMP simd array to the corresponding simduid used
140   to index into it.  Like thus,
141
142        _7 = GOMP_SIMD_LANE (simduid.0)
143        ...
144        ...
145        D.1737[_7] = stuff;
146
147
148   This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
149   simduid.0.  */
150
151struct simd_array_to_simduid : typed_free_remove<simd_array_to_simduid>
152{
153  tree decl;
154  unsigned int simduid;
155
156  /* hash_table support.  */
157  typedef simd_array_to_simduid value_type;
158  typedef simd_array_to_simduid compare_type;
159  static inline hashval_t hash (const value_type *);
160  static inline int equal (const value_type *, const compare_type *);
161};
162
163inline hashval_t
164simd_array_to_simduid::hash (const value_type *p)
165{
166  return DECL_UID (p->decl);
167}
168
169inline int
170simd_array_to_simduid::equal (const value_type *p1, const value_type *p2)
171{
172  return p1->decl == p2->decl;
173}
174
175/* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF and IFN_GOMP_SIMD_LAST_LANE
176   into their corresponding constants.  */
177
178static void
179adjust_simduid_builtins (hash_table<simduid_to_vf> *htab)
180{
181  basic_block bb;
182
183  FOR_EACH_BB_FN (bb, cfun)
184    {
185      gimple_stmt_iterator i;
186
187      for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i))
188	{
189	  unsigned int vf = 1;
190	  enum internal_fn ifn;
191	  gimple stmt = gsi_stmt (i);
192	  tree t;
193	  if (!is_gimple_call (stmt)
194	      || !gimple_call_internal_p (stmt))
195	    continue;
196	  ifn = gimple_call_internal_fn (stmt);
197	  switch (ifn)
198	    {
199	    case IFN_GOMP_SIMD_LANE:
200	    case IFN_GOMP_SIMD_VF:
201	    case IFN_GOMP_SIMD_LAST_LANE:
202	      break;
203	    default:
204	      continue;
205	    }
206	  tree arg = gimple_call_arg (stmt, 0);
207	  gcc_assert (arg != NULL_TREE);
208	  gcc_assert (TREE_CODE (arg) == SSA_NAME);
209	  simduid_to_vf *p = NULL, data;
210	  data.simduid = DECL_UID (SSA_NAME_VAR (arg));
211	  if (htab)
212	    {
213	      p = htab->find (&data);
214	      if (p)
215		vf = p->vf;
216	    }
217	  switch (ifn)
218	    {
219	    case IFN_GOMP_SIMD_VF:
220	      t = build_int_cst (unsigned_type_node, vf);
221	      break;
222	    case IFN_GOMP_SIMD_LANE:
223	      t = build_int_cst (unsigned_type_node, 0);
224	      break;
225	    case IFN_GOMP_SIMD_LAST_LANE:
226	      t = gimple_call_arg (stmt, 1);
227	      break;
228	    default:
229	      gcc_unreachable ();
230	    }
231	  update_call_from_tree (&i, t);
232	}
233    }
234}
235
236/* Helper structure for note_simd_array_uses.  */
237
238struct note_simd_array_uses_struct
239{
240  hash_table<simd_array_to_simduid> **htab;
241  unsigned int simduid;
242};
243
244/* Callback for note_simd_array_uses, called through walk_gimple_op.  */
245
246static tree
247note_simd_array_uses_cb (tree *tp, int *walk_subtrees, void *data)
248{
249  struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
250  struct note_simd_array_uses_struct *ns
251    = (struct note_simd_array_uses_struct *) wi->info;
252
253  if (TYPE_P (*tp))
254    *walk_subtrees = 0;
255  else if (VAR_P (*tp)
256	   && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp))
257	   && DECL_CONTEXT (*tp) == current_function_decl)
258    {
259      simd_array_to_simduid data;
260      if (!*ns->htab)
261	*ns->htab = new hash_table<simd_array_to_simduid> (15);
262      data.decl = *tp;
263      data.simduid = ns->simduid;
264      simd_array_to_simduid **slot = (*ns->htab)->find_slot (&data, INSERT);
265      if (*slot == NULL)
266	{
267	  simd_array_to_simduid *p = XNEW (simd_array_to_simduid);
268	  *p = data;
269	  *slot = p;
270	}
271      else if ((*slot)->simduid != ns->simduid)
272	(*slot)->simduid = -1U;
273      *walk_subtrees = 0;
274    }
275  return NULL_TREE;
276}
277
278/* Find "omp simd array" temporaries and map them to corresponding
279   simduid.  */
280
281static void
282note_simd_array_uses (hash_table<simd_array_to_simduid> **htab)
283{
284  basic_block bb;
285  gimple_stmt_iterator gsi;
286  struct walk_stmt_info wi;
287  struct note_simd_array_uses_struct ns;
288
289  memset (&wi, 0, sizeof (wi));
290  wi.info = &ns;
291  ns.htab = htab;
292
293  FOR_EACH_BB_FN (bb, cfun)
294    for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
295      {
296	gimple stmt = gsi_stmt (gsi);
297	if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt))
298	  continue;
299	switch (gimple_call_internal_fn (stmt))
300	  {
301	  case IFN_GOMP_SIMD_LANE:
302	  case IFN_GOMP_SIMD_VF:
303	  case IFN_GOMP_SIMD_LAST_LANE:
304	    break;
305	  default:
306	    continue;
307	  }
308	tree lhs = gimple_call_lhs (stmt);
309	if (lhs == NULL_TREE)
310	  continue;
311	imm_use_iterator use_iter;
312	gimple use_stmt;
313	ns.simduid = DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt, 0)));
314	FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, lhs)
315	  if (!is_gimple_debug (use_stmt))
316	    walk_gimple_op (use_stmt, note_simd_array_uses_cb, &wi);
317      }
318}
319
320/* Shrink arrays with "omp simd array" attribute to the corresponding
321   vectorization factor.  */
322
323static void
324shrink_simd_arrays
325  (hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab,
326   hash_table<simduid_to_vf> *simduid_to_vf_htab)
327{
328  for (hash_table<simd_array_to_simduid>::iterator iter
329	 = simd_array_to_simduid_htab->begin ();
330       iter != simd_array_to_simduid_htab->end (); ++iter)
331    if ((*iter)->simduid != -1U)
332      {
333	tree decl = (*iter)->decl;
334	int vf = 1;
335	if (simduid_to_vf_htab)
336	  {
337	    simduid_to_vf *p = NULL, data;
338	    data.simduid = (*iter)->simduid;
339	    p = simduid_to_vf_htab->find (&data);
340	    if (p)
341	      vf = p->vf;
342	  }
343	tree atype
344	  = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl)), vf);
345	TREE_TYPE (decl) = atype;
346	relayout_decl (decl);
347      }
348
349  delete simd_array_to_simduid_htab;
350}
351
352/* A helper function to free data refs.  */
353
354void
355vect_destroy_datarefs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
356{
357  vec<data_reference_p> datarefs;
358  struct data_reference *dr;
359  unsigned int i;
360
361 if (loop_vinfo)
362    datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
363  else
364    datarefs = BB_VINFO_DATAREFS (bb_vinfo);
365
366  FOR_EACH_VEC_ELT (datarefs, i, dr)
367    if (dr->aux)
368      {
369        free (dr->aux);
370        dr->aux = NULL;
371      }
372
373  free_data_refs (datarefs);
374}
375
376
377/* If LOOP has been versioned during ifcvt, return the internal call
378   guarding it.  */
379
380static gimple
381vect_loop_vectorized_call (struct loop *loop)
382{
383  basic_block bb = loop_preheader_edge (loop)->src;
384  gimple g;
385  do
386    {
387      g = last_stmt (bb);
388      if (g)
389	break;
390      if (!single_pred_p (bb))
391	break;
392      bb = single_pred (bb);
393    }
394  while (1);
395  if (g && gimple_code (g) == GIMPLE_COND)
396    {
397      gimple_stmt_iterator gsi = gsi_for_stmt (g);
398      gsi_prev (&gsi);
399      if (!gsi_end_p (gsi))
400	{
401	  g = gsi_stmt (gsi);
402	  if (is_gimple_call (g)
403	      && gimple_call_internal_p (g)
404	      && gimple_call_internal_fn (g) == IFN_LOOP_VECTORIZED
405	      && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->num
406		  || tree_to_shwi (gimple_call_arg (g, 1)) == loop->num))
407	    return g;
408	}
409    }
410  return NULL;
411}
412
413/* Fold LOOP_VECTORIZED internal call G to VALUE and
414   update any immediate uses of it's LHS.  */
415
416static void
417fold_loop_vectorized_call (gimple g, tree value)
418{
419  tree lhs = gimple_call_lhs (g);
420  use_operand_p use_p;
421  imm_use_iterator iter;
422  gimple use_stmt;
423  gimple_stmt_iterator gsi = gsi_for_stmt (g);
424
425  update_call_from_tree (&gsi, value);
426  FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
427    {
428      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
429	SET_USE (use_p, value);
430      update_stmt (use_stmt);
431    }
432}
433
434/* Function vectorize_loops.
435
436   Entry point to loop vectorization phase.  */
437
438unsigned
439vectorize_loops (void)
440{
441  unsigned int i;
442  unsigned int num_vectorized_loops = 0;
443  unsigned int vect_loops_num;
444  struct loop *loop;
445  hash_table<simduid_to_vf> *simduid_to_vf_htab = NULL;
446  hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
447  bool any_ifcvt_loops = false;
448  unsigned ret = 0;
449
450  vect_loops_num = number_of_loops (cfun);
451
452  /* Bail out if there are no loops.  */
453  if (vect_loops_num <= 1)
454    return 0;
455
456  if (cfun->has_simduid_loops)
457    note_simd_array_uses (&simd_array_to_simduid_htab);
458
459  init_stmt_vec_info_vec ();
460
461  /*  ----------- Analyze loops. -----------  */
462
463  /* If some loop was duplicated, it gets bigger number
464     than all previously defined loops.  This fact allows us to run
465     only over initial loops skipping newly generated ones.  */
466  FOR_EACH_LOOP (loop, 0)
467    if (loop->dont_vectorize)
468      any_ifcvt_loops = true;
469    else if ((flag_tree_loop_vectorize
470	      && optimize_loop_nest_for_speed_p (loop))
471	     || loop->force_vectorize)
472      {
473	loop_vec_info loop_vinfo;
474	vect_location = find_loop_location (loop);
475        if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
476	    && dump_enabled_p ())
477	  dump_printf (MSG_NOTE, "\nAnalyzing loop at %s:%d\n",
478                       LOCATION_FILE (vect_location),
479		       LOCATION_LINE (vect_location));
480
481	loop_vinfo = vect_analyze_loop (loop);
482	loop->aux = loop_vinfo;
483
484	if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
485	  continue;
486
487        if (!dbg_cnt (vect_loop))
488	  break;
489
490	gimple loop_vectorized_call = vect_loop_vectorized_call (loop);
491	if (loop_vectorized_call)
492	  {
493	    tree arg = gimple_call_arg (loop_vectorized_call, 1);
494	    basic_block *bbs;
495	    unsigned int i;
496	    struct loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
497
498	    LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
499	    gcc_checking_assert (vect_loop_vectorized_call
500					(LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
501				 == loop_vectorized_call);
502	    bbs = get_loop_body (scalar_loop);
503	    for (i = 0; i < scalar_loop->num_nodes; i++)
504	      {
505		basic_block bb = bbs[i];
506		gimple_stmt_iterator gsi;
507		for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
508		     gsi_next (&gsi))
509		  {
510		    gimple phi = gsi_stmt (gsi);
511		    gimple_set_uid (phi, 0);
512		  }
513		for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
514		     gsi_next (&gsi))
515		  {
516		    gimple stmt = gsi_stmt (gsi);
517		    gimple_set_uid (stmt, 0);
518		  }
519	      }
520	    free (bbs);
521	  }
522
523        if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
524	    && dump_enabled_p ())
525          dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
526                           "loop vectorized\n");
527	vect_transform_loop (loop_vinfo);
528	num_vectorized_loops++;
529	/* Now that the loop has been vectorized, allow it to be unrolled
530	   etc.  */
531	loop->force_vectorize = false;
532
533	if (loop->simduid)
534	  {
535	    simduid_to_vf *simduid_to_vf_data = XNEW (simduid_to_vf);
536	    if (!simduid_to_vf_htab)
537	      simduid_to_vf_htab = new hash_table<simduid_to_vf> (15);
538	    simduid_to_vf_data->simduid = DECL_UID (loop->simduid);
539	    simduid_to_vf_data->vf = loop_vinfo->vectorization_factor;
540	    *simduid_to_vf_htab->find_slot (simduid_to_vf_data, INSERT)
541	      = simduid_to_vf_data;
542	  }
543
544	if (loop_vectorized_call)
545	  {
546	    fold_loop_vectorized_call (loop_vectorized_call, boolean_true_node);
547	    ret |= TODO_cleanup_cfg;
548	  }
549      }
550
551  vect_location = UNKNOWN_LOCATION;
552
553  statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
554  if (dump_enabled_p ()
555      || (num_vectorized_loops > 0 && dump_enabled_p ()))
556    dump_printf_loc (MSG_NOTE, vect_location,
557                     "vectorized %u loops in function.\n",
558                     num_vectorized_loops);
559
560  /*  ----------- Finalize. -----------  */
561
562  if (any_ifcvt_loops)
563    for (i = 1; i < vect_loops_num; i++)
564      {
565	loop = get_loop (cfun, i);
566	if (loop && loop->dont_vectorize)
567	  {
568	    gimple g = vect_loop_vectorized_call (loop);
569	    if (g)
570	      {
571		fold_loop_vectorized_call (g, boolean_false_node);
572		ret |= TODO_cleanup_cfg;
573	      }
574	  }
575      }
576
577  for (i = 1; i < vect_loops_num; i++)
578    {
579      loop_vec_info loop_vinfo;
580
581      loop = get_loop (cfun, i);
582      if (!loop)
583	continue;
584      loop_vinfo = (loop_vec_info) loop->aux;
585      destroy_loop_vec_info (loop_vinfo, true);
586      loop->aux = NULL;
587    }
588
589  free_stmt_vec_info_vec ();
590
591  /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE} builtins.  */
592  if (cfun->has_simduid_loops)
593    adjust_simduid_builtins (simduid_to_vf_htab);
594
595  /* Shrink any "omp array simd" temporary arrays to the
596     actual vectorization factors.  */
597  if (simd_array_to_simduid_htab)
598    shrink_simd_arrays (simd_array_to_simduid_htab, simduid_to_vf_htab);
599  delete simduid_to_vf_htab;
600  cfun->has_simduid_loops = false;
601
602  if (num_vectorized_loops > 0)
603    {
604      /* If we vectorized any loop only virtual SSA form needs to be updated.
605	 ???  Also while we try hard to update loop-closed SSA form we fail
606	 to properly do this in some corner-cases (see PR56286).  */
607      rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa_only_virtuals);
608      return TODO_cleanup_cfg;
609    }
610
611  return ret;
612}
613
614
615/* Entry point to the simduid cleanup pass.  */
616
617namespace {
618
619const pass_data pass_data_simduid_cleanup =
620{
621  GIMPLE_PASS, /* type */
622  "simduid", /* name */
623  OPTGROUP_NONE, /* optinfo_flags */
624  TV_NONE, /* tv_id */
625  ( PROP_ssa | PROP_cfg ), /* properties_required */
626  0, /* properties_provided */
627  0, /* properties_destroyed */
628  0, /* todo_flags_start */
629  0, /* todo_flags_finish */
630};
631
632class pass_simduid_cleanup : public gimple_opt_pass
633{
634public:
635  pass_simduid_cleanup (gcc::context *ctxt)
636    : gimple_opt_pass (pass_data_simduid_cleanup, ctxt)
637  {}
638
639  /* opt_pass methods: */
640  opt_pass * clone () { return new pass_simduid_cleanup (m_ctxt); }
641  virtual bool gate (function *fun) { return fun->has_simduid_loops; }
642  virtual unsigned int execute (function *);
643
644}; // class pass_simduid_cleanup
645
646unsigned int
647pass_simduid_cleanup::execute (function *fun)
648{
649  hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
650
651  note_simd_array_uses (&simd_array_to_simduid_htab);
652
653  /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE} builtins.  */
654  adjust_simduid_builtins (NULL);
655
656  /* Shrink any "omp array simd" temporary arrays to the
657     actual vectorization factors.  */
658  if (simd_array_to_simduid_htab)
659    shrink_simd_arrays (simd_array_to_simduid_htab, NULL);
660  fun->has_simduid_loops = false;
661  return 0;
662}
663
664}  // anon namespace
665
666gimple_opt_pass *
667make_pass_simduid_cleanup (gcc::context *ctxt)
668{
669  return new pass_simduid_cleanup (ctxt);
670}
671
672
673/*  Entry point to basic block SLP phase.  */
674
675namespace {
676
677const pass_data pass_data_slp_vectorize =
678{
679  GIMPLE_PASS, /* type */
680  "slp", /* name */
681  OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
682  TV_TREE_SLP_VECTORIZATION, /* tv_id */
683  ( PROP_ssa | PROP_cfg ), /* properties_required */
684  0, /* properties_provided */
685  0, /* properties_destroyed */
686  0, /* todo_flags_start */
687  TODO_update_ssa, /* todo_flags_finish */
688};
689
690class pass_slp_vectorize : public gimple_opt_pass
691{
692public:
693  pass_slp_vectorize (gcc::context *ctxt)
694    : gimple_opt_pass (pass_data_slp_vectorize, ctxt)
695  {}
696
697  /* opt_pass methods: */
698  opt_pass * clone () { return new pass_slp_vectorize (m_ctxt); }
699  virtual bool gate (function *) { return flag_tree_slp_vectorize != 0; }
700  virtual unsigned int execute (function *);
701
702}; // class pass_slp_vectorize
703
704unsigned int
705pass_slp_vectorize::execute (function *fun)
706{
707  basic_block bb;
708
709  bool in_loop_pipeline = scev_initialized_p ();
710  if (!in_loop_pipeline)
711    {
712      loop_optimizer_init (LOOPS_NORMAL);
713      scev_initialize ();
714    }
715
716  init_stmt_vec_info_vec ();
717
718  FOR_EACH_BB_FN (bb, fun)
719    {
720      vect_location = find_bb_location (bb);
721
722      if (vect_slp_analyze_bb (bb))
723        {
724          if (!dbg_cnt (vect_slp))
725            break;
726
727          vect_slp_transform_bb (bb);
728          if (dump_enabled_p ())
729            dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
730			     "basic block vectorized\n");
731        }
732    }
733
734  free_stmt_vec_info_vec ();
735
736  if (!in_loop_pipeline)
737    {
738      scev_finalize ();
739      loop_optimizer_finalize ();
740    }
741
742  return 0;
743}
744
745} // anon namespace
746
747gimple_opt_pass *
748make_pass_slp_vectorize (gcc::context *ctxt)
749{
750  return new pass_slp_vectorize (ctxt);
751}
752
753
754/* Increase alignment of global arrays to improve vectorization potential.
755   TODO:
756   - Consider also structs that have an array field.
757   - Use ipa analysis to prune arrays that can't be vectorized?
758     This should involve global alignment analysis and in the future also
759     array padding.  */
760
761static unsigned int
762increase_alignment (void)
763{
764  varpool_node *vnode;
765
766  vect_location = UNKNOWN_LOCATION;
767
768  /* Increase the alignment of all global arrays for vectorization.  */
769  FOR_EACH_DEFINED_VARIABLE (vnode)
770    {
771      tree vectype, decl = vnode->decl;
772      tree t;
773      unsigned int alignment;
774
775      t = TREE_TYPE (decl);
776      if (TREE_CODE (t) != ARRAY_TYPE)
777        continue;
778      vectype = get_vectype_for_scalar_type (strip_array_types (t));
779      if (!vectype)
780        continue;
781      alignment = TYPE_ALIGN (vectype);
782      if (DECL_ALIGN (decl) >= alignment)
783        continue;
784
785      if (vect_can_force_dr_alignment_p (decl, alignment))
786        {
787	  vnode->increase_alignment (TYPE_ALIGN (vectype));
788          dump_printf (MSG_NOTE, "Increasing alignment of decl: ");
789          dump_generic_expr (MSG_NOTE, TDF_SLIM, decl);
790          dump_printf (MSG_NOTE, "\n");
791        }
792    }
793  return 0;
794}
795
796
797namespace {
798
799const pass_data pass_data_ipa_increase_alignment =
800{
801  SIMPLE_IPA_PASS, /* type */
802  "increase_alignment", /* name */
803  OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
804  TV_IPA_OPT, /* tv_id */
805  0, /* properties_required */
806  0, /* properties_provided */
807  0, /* properties_destroyed */
808  0, /* todo_flags_start */
809  0, /* todo_flags_finish */
810};
811
812class pass_ipa_increase_alignment : public simple_ipa_opt_pass
813{
814public:
815  pass_ipa_increase_alignment (gcc::context *ctxt)
816    : simple_ipa_opt_pass (pass_data_ipa_increase_alignment, ctxt)
817  {}
818
819  /* opt_pass methods: */
820  virtual bool gate (function *)
821    {
822      return flag_section_anchors && flag_tree_loop_vectorize;
823    }
824
825  virtual unsigned int execute (function *) { return increase_alignment (); }
826
827}; // class pass_ipa_increase_alignment
828
829} // anon namespace
830
831simple_ipa_opt_pass *
832make_pass_ipa_increase_alignment (gcc::context *ctxt)
833{
834  return new pass_ipa_increase_alignment (ctxt);
835}
836