Lines Matching defs:loop

164    correspond to deeper loop nesting levels.  */
245 /* A structure describing the main elements of a parallel loop. */
249 struct omp_for_data_loop loop;
305 /* The reduction clause may be nested inside a loop directive.
410 /* Extract the header elements of parallel loop FOR_STMT and store
419 struct omp_for_data_loop *loop;
433 fd->loops = &fd->loop;
498 loop = &fd->loop;
500 loop = loops + i;
502 loop = &dummy_loop;
504 loop->v = gimple_omp_for_index (for_stmt, i);
505 gcc_assert (SSA_VAR_P (loop->v));
506 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
507 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
508 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
509 loop->n1 = gimple_omp_for_initial (for_stmt, i);
511 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
512 loop->n2 = gimple_omp_for_final (for_stmt, i);
513 switch (loop->cond_code)
525 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
526 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
528 loop->n2 = fold_build2_loc (loc,
529 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
530 build_int_cst (TREE_TYPE (loop->n2), 1));
531 loop->cond_code = LT_EXPR;
534 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
535 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
537 loop->n2 = fold_build2_loc (loc,
538 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
539 build_int_cst (TREE_TYPE (loop->n2), 1));
540 loop->cond_code = GT_EXPR;
551 loop->step = TREE_OPERAND (t, 1);
554 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
557 loop->step = TREE_OPERAND (t, 1);
558 loop->step = fold_build1_loc (loc,
559 NEGATE_EXPR, TREE_TYPE (loop->step),
560 loop->step);
571 iter_type = TREE_TYPE (loop->v);
574 < TYPE_PRECISION (TREE_TYPE (loop->v)))
577 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
581 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
583 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
584 && TYPE_PRECISION (TREE_TYPE (loop->v))
589 if (loop->cond_code == LT_EXPR)
591 PLUS_EXPR, TREE_TYPE (loop->v),
592 loop->n2, loop->step);
594 n = loop->n1;
599 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
604 if (loop->cond_code == LT_EXPR)
606 n1 = loop->n1;
608 PLUS_EXPR, TREE_TYPE (loop->v),
609 loop->n2, loop->step);
614 MINUS_EXPR, TREE_TYPE (loop->v),
615 loop->n2, loop->step);
616 n2 = loop->n1;
628 t = fold_binary (loop->cond_code, boolean_type_node,
629 fold_convert (TREE_TYPE (loop->v), loop->n1),
630 fold_convert (TREE_TYPE (loop->v), loop->n2));
634 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
635 && TREE_CONSTANT (loop->n1)
636 && TREE_CONSTANT (loop->n2)
637 && TREE_CODE (loop->step) == INTEGER_CST)
639 tree itype = TREE_TYPE (loop->v);
643 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
646 fold_convert_loc (loc, itype, loop->step), t);
648 fold_convert_loc (loc, itype, loop->n2));
650 fold_convert_loc (loc, itype, loop->n1));
651 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
656 loop->step)));
659 fold_convert_loc (loc, itype, loop->step));
700 fd->loop.v = *collapse_iter;
701 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
702 fd->loop.n2 = *collapse_count;
703 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
704 fd->loop.cond_code = LT_EXPR;
713 fd->chunk_size = build_int_cst (TREE_TYPE (fd->loop.v), 1);
773 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
783 if (!is_gimple_min_invariant (fd.loop.n1)
784 || !is_gimple_min_invariant (fd.loop.n2)
785 || !is_gimple_min_invariant (fd.loop.step)
810 n1 = fd.loop.n1;
811 n2 = fd.loop.n2;
834 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
899 /* If this is a combined parallel loop, we need to determine
902 static loops and any kind of ordered loop. In the first
903 case, we already open code the loop so there is no need
905 parallel loop call would still need extra synchronization
2320 /* We need two temporaries with fd.loop.v type (istart/iend)
2326 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
2610 "gang, worker and vector may occur only once in a loop nest");
2614 "gang, worker and vector must occur in this order in a loop nest");
2971 "a loop region with an ordered clause");
2978 "a loop region with an ordered clause");
3807 /* For reduction in SIMD loop, defer adding the
3976 tree n1 = fd->loop.n1;
3977 tree step = fd->loop.step;
3983 && fd->loop.cond_code == GT_EXPR)
5584 loop tree arrange for the child function to fixup loops. */
5650 of the combined collapse > 1 loop constructs, generate code like:
5673 of the combined loop constructs, just initialize COUNTS array
5708 tree t, type = TREE_TYPE (fd->loop.v);
5716 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
5719 isn't supposed to be handled, as the inner loop doesn't
5741 if (SSA_VAR_P (fd->loop.n2)
5774 assign_stmt = gimple_build_assign (fd->loop.n2,
5822 if (SSA_VAR_P (fd->loop.n2))
5827 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
5828 expand_omp_build_assign (gsi, fd->loop.n2, t);
5841 if this loop doesn't have an inner loop construct combined with it.
5842 If it does have an inner loop construct combined with it and the
5853 /* If fd->loop.n2 is constant, then no propagation of the counts
5855 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
5862 isn't supposed to be handled, as the inner loop doesn't
5884 tree type = TREE_TYPE (fd->loop.v);
6014 loop with any schedule. Given parameters:
6033 If this is a combined omp parallel loop, instead of the call to
6035 If this is gimple_omp_for_combined_p loop, then instead of assigning
6127 type = TREE_TYPE (fd->loop.v);
6140 if (fd->loop.cond_code == LT_EXPR)
6142 n1 = fd->loop.n1;
6143 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
6147 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
6148 n2 = fd->loop.n1;
6191 some loop has zero iterations. But the body shouldn't
6208 /* In a combined parallel loop, emit a call to
6217 /* If this is not a combined parallel loop, emit a call to
6221 t2 = fold_convert (fd->iter_type, fd->loop.step);
6222 t1 = fd->loop.n2;
6223 t0 = fd->loop.n1;
6279 fd->loop.cond_code == LT_EXPR ? 1 : 0);
6301 /* Iteration setup for sequential loop goes in L0_BB. */
6302 tree startvar = fd->loop.v;
6346 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
6347 assign_stmt = gimple_build_assign (fd->loop.v, iend);
6349 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, iend);
6358 loop goes in the CONT_BB. */
6368 t = fold_build_pointer_plus (vmain, fd->loop.step);
6370 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
6378 t = build2 (fd->loop.cond_code, boolean_type_node,
6406 /* Add the loop cleanup function. */
6476 struct loop *outer_loop = alloc_loop ();
6483 struct loop *loop = alloc_loop ();
6484 loop->header = l1_bb;
6485 /* The loop may have multiple latches. */
6486 add_loop (loop, outer_loop);
6493 loop with static schedule and no specified chunk size. Given
6549 itype = type = TREE_TYPE (fd->loop.v);
6586 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6587 fold_convert (type, fd->loop.n1),
6588 fold_convert (type, fd->loop.n2));
6593 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6596 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6599 gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6656 n1 = fd->loop.n1;
6657 n2 = fd->loop.n2;
6658 step = fd->loop.step;
6677 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6681 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6734 tree startvar = fd->loop.v;
6777 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
6778 assign_stmt = gimple_build_assign (fd->loop.v, e);
6780 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
6788 /* The code controlling the sequential loop replaces the
6809 t = build2 (fd->loop.cond_code, boolean_type_node,
6873 struct loop *loop = alloc_loop ();
6874 loop->header = body_bb;
6876 loop->latch = cont_bb;
6877 add_loop (loop, body_bb->loop_father);
6883 loop with static schedule and a specified chunk size. Given
6902 if the loop is not entered
6939 itype = type = TREE_TYPE (fd->loop.v);
6980 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6981 fold_convert (type, fd->loop.n1),
6982 fold_convert (type, fd->loop.n2));
6987 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6990 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6993 gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
7050 n1 = fd->loop.n1;
7051 n2 = fd->loop.n2;
7052 step = fd->loop.step;
7074 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
7078 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
7138 tree startvar = fd->loop.v;
7182 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e)))
7183 assign_stmt = gimple_build_assign (fd->loop.v, e);
7185 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e);
7193 /* The code controlling the sequential loop goes in CONT_BB,
7212 t = build2 (fd->loop.cond_code, boolean_type_node,
7303 /* A special case -- fd->loop.v is not yet computed in
7305 if (t == fd->loop.v)
7342 struct loop *trip_loop = alloc_loop ();
7349 struct loop *loop = alloc_loop ();
7350 loop->header = body_bb;
7352 loop->latch = cont_bb;
7353 add_loop (loop, trip_loop);
7358 /* A subroutine of expand_omp_for. Generate code for _Cilk_for loop.
7465 t = fold_build2 (MULT_EXPR, TREE_TYPE (fd->loop.step),
7466 fold_convert (TREE_TYPE (fd->loop.step), ind_var),
7467 fd->loop.step);
7468 if (POINTER_TYPE_P (TREE_TYPE (fd->loop.n1)))
7469 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (fd->loop.n1),
7470 fd->loop.n1, fold_convert (sizetype, t));
7472 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loop.n1),
7473 fd->loop.n1, fold_convert (TREE_TYPE (fd->loop.n1), t));
7474 t = fold_convert (TREE_TYPE (fd->loop.v), t);
7475 expand_omp_build_assign (&gsi, fd->loop.v, t);
7517 struct loop *loop = alloc_loop ();
7518 loop->header = l1_bb;
7519 loop->latch = cont_bb;
7520 add_loop (loop, l1_bb->loop_father);
7521 loop->safelen = INT_MAX;
7546 loop. Given parameters:
7623 type = TREE_TYPE (fd->loop.v);
7664 n1 = fd->loop.n1;
7665 n2 = fd->loop.n2;
7676 expand_omp_build_assign (&gsi, fd->loop.v,
7687 expand_omp_build_assign (&gsi, fd->loop.v,
7688 fold_convert (type, fd->loop.n1));
7711 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
7713 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
7714 expand_omp_build_assign (&gsi, fd->loop.v, t);
7771 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
7817 struct loop *loop = alloc_loop ();
7818 loop->header = l1_bb;
7819 loop->latch = cont_bb;
7820 add_loop (loop, l1_bb->loop_father);
7822 loop->safelen = INT_MAX;
7827 loop->safelen = 0;
7830 loop->safelen = INT_MAX;
7832 loop->safelen = tree_to_uhwi (safelen);
7833 if (loop->safelen == 1)
7834 loop->safelen = 0;
7838 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
7841 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
7842 the loop. */
7847 && loop->safelen > 1)
7849 loop->force_vectorize = true;
7858 /* Expand the OMP loop defined by REGION. */
8444 Let optabs.c take care of expanding any compare-and-swap loop. */
8660 struct loop *loop = alloc_loop ();
8661 loop->header = loop_header;
8662 loop->latch = store_bb;
8663 add_loop (loop, loop_header->loop_father);
8786 as a compare and swap loop. */
8956 loop tree arrange for the child function to fixup loops. */
9785 /* Create for loop.
9803 /* Insert the loop header label here. */
9806 /* Exit loop if ix >= nthreads. */
9812 /* Insert the loop body label here. */
9882 /* Go back to the top of the loop. */
9885 /* Place the loop exit label here. */
10524 for a lastprivate clause. Given a loop control predicate of (V
10537 cond_code = fd->loop.cond_code;
10542 if (tree_fits_shwi_p (fd->loop.step))
10544 HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
10549 tree n2 = fd->loop.n2;
10560 n2 = fold_convert (TREE_TYPE (n2), outer_fd.loop.n2);
10563 cond = build2 (cond_code, boolean_type_node, fd->loop.v, n2);
10574 vinit = fd->loop.n1;
10576 && tree_fits_shwi_p (fd->loop.n2)
10577 && ! integer_zerop (fd->loop.n2))
10578 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
10584 gimplify_assign (fd->loop.v, vinit, body_p);
10589 /* Lower code for an OMP loop directive. */
10612 /* Move declaration of temporaries in the loop body before we make
10634 /* We need two temporaries with fd.loop.v type (istart/iend)
10640 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
10710 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
10711 fd.loop.v));
10713 /* After the loop, add exit clauses. */
10723 /* Region exit marker goes at the end of the loop body. */
12352 corresponds to the case that the body of the loop
12859 as part of the eventual loop. Create such temporary array and
12880 part of the loop.
13360 /* Mostly annotate the loop for the vectorizer (the rest is done below). */
13361 struct loop *loop = alloc_loop ();
13363 loop->safelen = node->simdclone->simdlen;
13364 loop->force_vectorize = true;
13365 loop->header = body_bb;
13370 gimple_stmt_iterator gsi = gsi_last_bb (loop->header);
13393 make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
13394 FALLTHRU_EDGE (loop->header)->flags = EDGE_FALSE_VALUE;
13408 loop->latch = latch_bb;
13601 add_loop (loop, loop->header->loop_father);