1/* Generic SSA value propagation engine. 2 Copyright (C) 2004-2015 Free Software Foundation, Inc. 3 Contributed by Diego Novillo <dnovillo@redhat.com> 4 5 This file is part of GCC. 6 7 GCC is free software; you can redistribute it and/or modify it 8 under the terms of the GNU General Public License as published by the 9 Free Software Foundation; either version 3, or (at your option) any 10 later version. 11 12 GCC is distributed in the hope that it will be useful, but WITHOUT 13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with GCC; see the file COPYING3. If not see 19 <http://www.gnu.org/licenses/>. */ 20 21#include "config.h" 22#include "system.h" 23#include "coretypes.h" 24#include "tm.h" 25#include "hash-set.h" 26#include "machmode.h" 27#include "vec.h" 28#include "double-int.h" 29#include "input.h" 30#include "alias.h" 31#include "symtab.h" 32#include "wide-int.h" 33#include "inchash.h" 34#include "tree.h" 35#include "fold-const.h" 36#include "flags.h" 37#include "tm_p.h" 38#include "predict.h" 39#include "hard-reg-set.h" 40#include "input.h" 41#include "function.h" 42#include "dominance.h" 43#include "cfg.h" 44#include "basic-block.h" 45#include "gimple-pretty-print.h" 46#include "dumpfile.h" 47#include "bitmap.h" 48#include "sbitmap.h" 49#include "tree-ssa-alias.h" 50#include "internal-fn.h" 51#include "gimple-fold.h" 52#include "tree-eh.h" 53#include "gimple-expr.h" 54#include "is-a.h" 55#include "gimple.h" 56#include "gimplify.h" 57#include "gimple-iterator.h" 58#include "gimple-ssa.h" 59#include "tree-cfg.h" 60#include "tree-phinodes.h" 61#include "ssa-iterators.h" 62#include "stringpool.h" 63#include "tree-ssanames.h" 64#include "tree-ssa.h" 65#include "tree-ssa-propagate.h" 66#include "langhooks.h" 67#include "value-prof.h" 68#include "domwalk.h" 69#include "cfgloop.h" 70#include "tree-cfgcleanup.h" 71 72/* This file implements a generic value propagation engine based on 73 the same propagation used by the SSA-CCP algorithm [1]. 74 75 Propagation is performed by simulating the execution of every 76 statement that produces the value being propagated. Simulation 77 proceeds as follows: 78 79 1- Initially, all edges of the CFG are marked not executable and 80 the CFG worklist is seeded with all the statements in the entry 81 basic block (block 0). 82 83 2- Every statement S is simulated with a call to the call-back 84 function SSA_PROP_VISIT_STMT. This evaluation may produce 3 85 results: 86 87 SSA_PROP_NOT_INTERESTING: Statement S produces nothing of 88 interest and does not affect any of the work lists. 89 90 SSA_PROP_VARYING: The value produced by S cannot be determined 91 at compile time. Further simulation of S is not required. 92 If S is a conditional jump, all the outgoing edges for the 93 block are considered executable and added to the work 94 list. 95 96 SSA_PROP_INTERESTING: S produces a value that can be computed 97 at compile time. Its result can be propagated into the 98 statements that feed from S. Furthermore, if S is a 99 conditional jump, only the edge known to be taken is added 100 to the work list. Edges that are known not to execute are 101 never simulated. 102 103 3- PHI nodes are simulated with a call to SSA_PROP_VISIT_PHI. The 104 return value from SSA_PROP_VISIT_PHI has the same semantics as 105 described in #2. 106 107 4- Three work lists are kept. Statements are only added to these 108 lists if they produce one of SSA_PROP_INTERESTING or 109 SSA_PROP_VARYING. 110 111 CFG_BLOCKS contains the list of blocks to be simulated. 112 Blocks are added to this list if their incoming edges are 113 found executable. 114 115 VARYING_SSA_EDGES contains the list of statements that feed 116 from statements that produce an SSA_PROP_VARYING result. 117 These are simulated first to speed up processing. 118 119 INTERESTING_SSA_EDGES contains the list of statements that 120 feed from statements that produce an SSA_PROP_INTERESTING 121 result. 122 123 5- Simulation terminates when all three work lists are drained. 124 125 Before calling ssa_propagate, it is important to clear 126 prop_simulate_again_p for all the statements in the program that 127 should be simulated. This initialization allows an implementation 128 to specify which statements should never be simulated. 129 130 It is also important to compute def-use information before calling 131 ssa_propagate. 132 133 References: 134 135 [1] Constant propagation with conditional branches, 136 Wegman and Zadeck, ACM TOPLAS 13(2):181-210. 137 138 [2] Building an Optimizing Compiler, 139 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9. 140 141 [3] Advanced Compiler Design and Implementation, 142 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */ 143 144/* Function pointers used to parameterize the propagation engine. */ 145static ssa_prop_visit_stmt_fn ssa_prop_visit_stmt; 146static ssa_prop_visit_phi_fn ssa_prop_visit_phi; 147 148/* Keep track of statements that have been added to one of the SSA 149 edges worklists. This flag is used to avoid visiting statements 150 unnecessarily when draining an SSA edge worklist. If while 151 simulating a basic block, we find a statement with 152 STMT_IN_SSA_EDGE_WORKLIST set, we clear it to prevent SSA edge 153 processing from visiting it again. 154 155 NOTE: users of the propagation engine are not allowed to use 156 the GF_PLF_1 flag. */ 157#define STMT_IN_SSA_EDGE_WORKLIST GF_PLF_1 158 159/* A bitmap to keep track of executable blocks in the CFG. */ 160static sbitmap executable_blocks; 161 162/* Array of control flow edges on the worklist. */ 163static vec<basic_block> cfg_blocks; 164 165static unsigned int cfg_blocks_num = 0; 166static int cfg_blocks_tail; 167static int cfg_blocks_head; 168 169static sbitmap bb_in_list; 170 171/* Worklist of SSA edges which will need reexamination as their 172 definition has changed. SSA edges are def-use edges in the SSA 173 web. For each D-U edge, we store the target statement or PHI node 174 U. */ 175static vec<gimple> interesting_ssa_edges; 176 177/* Identical to INTERESTING_SSA_EDGES. For performance reasons, the 178 list of SSA edges is split into two. One contains all SSA edges 179 who need to be reexamined because their lattice value changed to 180 varying (this worklist), and the other contains all other SSA edges 181 to be reexamined (INTERESTING_SSA_EDGES). 182 183 Since most values in the program are VARYING, the ideal situation 184 is to move them to that lattice value as quickly as possible. 185 Thus, it doesn't make sense to process any other type of lattice 186 value until all VARYING values are propagated fully, which is one 187 thing using the VARYING worklist achieves. In addition, if we 188 don't use a separate worklist for VARYING edges, we end up with 189 situations where lattice values move from 190 UNDEFINED->INTERESTING->VARYING instead of UNDEFINED->VARYING. */ 191static vec<gimple> varying_ssa_edges; 192 193 194/* Return true if the block worklist empty. */ 195 196static inline bool 197cfg_blocks_empty_p (void) 198{ 199 return (cfg_blocks_num == 0); 200} 201 202 203/* Add a basic block to the worklist. The block must not be already 204 in the worklist, and it must not be the ENTRY or EXIT block. */ 205 206static void 207cfg_blocks_add (basic_block bb) 208{ 209 bool head = false; 210 211 gcc_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) 212 && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)); 213 gcc_assert (!bitmap_bit_p (bb_in_list, bb->index)); 214 215 if (cfg_blocks_empty_p ()) 216 { 217 cfg_blocks_tail = cfg_blocks_head = 0; 218 cfg_blocks_num = 1; 219 } 220 else 221 { 222 cfg_blocks_num++; 223 if (cfg_blocks_num > cfg_blocks.length ()) 224 { 225 /* We have to grow the array now. Adjust to queue to occupy 226 the full space of the original array. We do not need to 227 initialize the newly allocated portion of the array 228 because we keep track of CFG_BLOCKS_HEAD and 229 CFG_BLOCKS_HEAD. */ 230 cfg_blocks_tail = cfg_blocks.length (); 231 cfg_blocks_head = 0; 232 cfg_blocks.safe_grow (2 * cfg_blocks_tail); 233 } 234 /* Minor optimization: we prefer to see blocks with more 235 predecessors later, because there is more of a chance that 236 the incoming edges will be executable. */ 237 else if (EDGE_COUNT (bb->preds) 238 >= EDGE_COUNT (cfg_blocks[cfg_blocks_head]->preds)) 239 cfg_blocks_tail = ((cfg_blocks_tail + 1) % cfg_blocks.length ()); 240 else 241 { 242 if (cfg_blocks_head == 0) 243 cfg_blocks_head = cfg_blocks.length (); 244 --cfg_blocks_head; 245 head = true; 246 } 247 } 248 249 cfg_blocks[head ? cfg_blocks_head : cfg_blocks_tail] = bb; 250 bitmap_set_bit (bb_in_list, bb->index); 251} 252 253 254/* Remove a block from the worklist. */ 255 256static basic_block 257cfg_blocks_get (void) 258{ 259 basic_block bb; 260 261 bb = cfg_blocks[cfg_blocks_head]; 262 263 gcc_assert (!cfg_blocks_empty_p ()); 264 gcc_assert (bb); 265 266 cfg_blocks_head = ((cfg_blocks_head + 1) % cfg_blocks.length ()); 267 --cfg_blocks_num; 268 bitmap_clear_bit (bb_in_list, bb->index); 269 270 return bb; 271} 272 273 274/* We have just defined a new value for VAR. If IS_VARYING is true, 275 add all immediate uses of VAR to VARYING_SSA_EDGES, otherwise add 276 them to INTERESTING_SSA_EDGES. */ 277 278static void 279add_ssa_edge (tree var, bool is_varying) 280{ 281 imm_use_iterator iter; 282 use_operand_p use_p; 283 284 FOR_EACH_IMM_USE_FAST (use_p, iter, var) 285 { 286 gimple use_stmt = USE_STMT (use_p); 287 288 if (prop_simulate_again_p (use_stmt) 289 && !gimple_plf (use_stmt, STMT_IN_SSA_EDGE_WORKLIST)) 290 { 291 gimple_set_plf (use_stmt, STMT_IN_SSA_EDGE_WORKLIST, true); 292 if (is_varying) 293 varying_ssa_edges.safe_push (use_stmt); 294 else 295 interesting_ssa_edges.safe_push (use_stmt); 296 } 297 } 298} 299 300 301/* Add edge E to the control flow worklist. */ 302 303static void 304add_control_edge (edge e) 305{ 306 basic_block bb = e->dest; 307 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)) 308 return; 309 310 /* If the edge had already been executed, skip it. */ 311 if (e->flags & EDGE_EXECUTABLE) 312 return; 313 314 e->flags |= EDGE_EXECUTABLE; 315 316 /* If the block is already in the list, we're done. */ 317 if (bitmap_bit_p (bb_in_list, bb->index)) 318 return; 319 320 cfg_blocks_add (bb); 321 322 if (dump_file && (dump_flags & TDF_DETAILS)) 323 fprintf (dump_file, "\nAdding Destination of edge (%d -> %d) to worklist\n", 324 e->src->index, e->dest->index); 325} 326 327 328/* Simulate the execution of STMT and update the work lists accordingly. */ 329 330static void 331simulate_stmt (gimple stmt) 332{ 333 enum ssa_prop_result val = SSA_PROP_NOT_INTERESTING; 334 edge taken_edge = NULL; 335 tree output_name = NULL_TREE; 336 337 /* Don't bother visiting statements that are already 338 considered varying by the propagator. */ 339 if (!prop_simulate_again_p (stmt)) 340 return; 341 342 if (gimple_code (stmt) == GIMPLE_PHI) 343 { 344 val = ssa_prop_visit_phi (as_a <gphi *> (stmt)); 345 output_name = gimple_phi_result (stmt); 346 } 347 else 348 val = ssa_prop_visit_stmt (stmt, &taken_edge, &output_name); 349 350 if (val == SSA_PROP_VARYING) 351 { 352 prop_set_simulate_again (stmt, false); 353 354 /* If the statement produced a new varying value, add the SSA 355 edges coming out of OUTPUT_NAME. */ 356 if (output_name) 357 add_ssa_edge (output_name, true); 358 359 /* If STMT transfers control out of its basic block, add 360 all outgoing edges to the work list. */ 361 if (stmt_ends_bb_p (stmt)) 362 { 363 edge e; 364 edge_iterator ei; 365 basic_block bb = gimple_bb (stmt); 366 FOR_EACH_EDGE (e, ei, bb->succs) 367 add_control_edge (e); 368 } 369 } 370 else if (val == SSA_PROP_INTERESTING) 371 { 372 /* If the statement produced new value, add the SSA edges coming 373 out of OUTPUT_NAME. */ 374 if (output_name) 375 add_ssa_edge (output_name, false); 376 377 /* If we know which edge is going to be taken out of this block, 378 add it to the CFG work list. */ 379 if (taken_edge) 380 add_control_edge (taken_edge); 381 } 382} 383 384/* Process an SSA edge worklist. WORKLIST is the SSA edge worklist to 385 drain. This pops statements off the given WORKLIST and processes 386 them until there are no more statements on WORKLIST. 387 We take a pointer to WORKLIST because it may be reallocated when an 388 SSA edge is added to it in simulate_stmt. */ 389 390static void 391process_ssa_edge_worklist (vec<gimple> *worklist) 392{ 393 /* Drain the entire worklist. */ 394 while (worklist->length () > 0) 395 { 396 basic_block bb; 397 398 /* Pull the statement to simulate off the worklist. */ 399 gimple stmt = worklist->pop (); 400 401 /* If this statement was already visited by simulate_block, then 402 we don't need to visit it again here. */ 403 if (!gimple_plf (stmt, STMT_IN_SSA_EDGE_WORKLIST)) 404 continue; 405 406 /* STMT is no longer in a worklist. */ 407 gimple_set_plf (stmt, STMT_IN_SSA_EDGE_WORKLIST, false); 408 409 if (dump_file && (dump_flags & TDF_DETAILS)) 410 { 411 fprintf (dump_file, "\nSimulating statement (from ssa_edges): "); 412 print_gimple_stmt (dump_file, stmt, 0, dump_flags); 413 } 414 415 bb = gimple_bb (stmt); 416 417 /* PHI nodes are always visited, regardless of whether or not 418 the destination block is executable. Otherwise, visit the 419 statement only if its block is marked executable. */ 420 if (gimple_code (stmt) == GIMPLE_PHI 421 || bitmap_bit_p (executable_blocks, bb->index)) 422 simulate_stmt (stmt); 423 } 424} 425 426 427/* Simulate the execution of BLOCK. Evaluate the statement associated 428 with each variable reference inside the block. */ 429 430static void 431simulate_block (basic_block block) 432{ 433 gimple_stmt_iterator gsi; 434 435 /* There is nothing to do for the exit block. */ 436 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun)) 437 return; 438 439 if (dump_file && (dump_flags & TDF_DETAILS)) 440 fprintf (dump_file, "\nSimulating block %d\n", block->index); 441 442 /* Always simulate PHI nodes, even if we have simulated this block 443 before. */ 444 for (gsi = gsi_start_phis (block); !gsi_end_p (gsi); gsi_next (&gsi)) 445 simulate_stmt (gsi_stmt (gsi)); 446 447 /* If this is the first time we've simulated this block, then we 448 must simulate each of its statements. */ 449 if (!bitmap_bit_p (executable_blocks, block->index)) 450 { 451 gimple_stmt_iterator j; 452 unsigned int normal_edge_count; 453 edge e, normal_edge; 454 edge_iterator ei; 455 456 /* Note that we have simulated this block. */ 457 bitmap_set_bit (executable_blocks, block->index); 458 459 for (j = gsi_start_bb (block); !gsi_end_p (j); gsi_next (&j)) 460 { 461 gimple stmt = gsi_stmt (j); 462 463 /* If this statement is already in the worklist then 464 "cancel" it. The reevaluation implied by the worklist 465 entry will produce the same value we generate here and 466 thus reevaluating it again from the worklist is 467 pointless. */ 468 if (gimple_plf (stmt, STMT_IN_SSA_EDGE_WORKLIST)) 469 gimple_set_plf (stmt, STMT_IN_SSA_EDGE_WORKLIST, false); 470 471 simulate_stmt (stmt); 472 } 473 474 /* We can not predict when abnormal and EH edges will be executed, so 475 once a block is considered executable, we consider any 476 outgoing abnormal edges as executable. 477 478 TODO: This is not exactly true. Simplifying statement might 479 prove it non-throwing and also computed goto can be handled 480 when destination is known. 481 482 At the same time, if this block has only one successor that is 483 reached by non-abnormal edges, then add that successor to the 484 worklist. */ 485 normal_edge_count = 0; 486 normal_edge = NULL; 487 FOR_EACH_EDGE (e, ei, block->succs) 488 { 489 if (e->flags & (EDGE_ABNORMAL | EDGE_EH)) 490 add_control_edge (e); 491 else 492 { 493 normal_edge_count++; 494 normal_edge = e; 495 } 496 } 497 498 if (normal_edge_count == 1) 499 add_control_edge (normal_edge); 500 } 501} 502 503 504/* Initialize local data structures and work lists. */ 505 506static void 507ssa_prop_init (void) 508{ 509 edge e; 510 edge_iterator ei; 511 basic_block bb; 512 513 /* Worklists of SSA edges. */ 514 interesting_ssa_edges.create (20); 515 varying_ssa_edges.create (20); 516 517 executable_blocks = sbitmap_alloc (last_basic_block_for_fn (cfun)); 518 bitmap_clear (executable_blocks); 519 520 bb_in_list = sbitmap_alloc (last_basic_block_for_fn (cfun)); 521 bitmap_clear (bb_in_list); 522 523 if (dump_file && (dump_flags & TDF_DETAILS)) 524 dump_immediate_uses (dump_file); 525 526 cfg_blocks.create (20); 527 cfg_blocks.safe_grow_cleared (20); 528 529 /* Initially assume that every edge in the CFG is not executable. 530 (including the edges coming out of the entry block). */ 531 FOR_ALL_BB_FN (bb, cfun) 532 { 533 gimple_stmt_iterator si; 534 535 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) 536 gimple_set_plf (gsi_stmt (si), STMT_IN_SSA_EDGE_WORKLIST, false); 537 538 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) 539 gimple_set_plf (gsi_stmt (si), STMT_IN_SSA_EDGE_WORKLIST, false); 540 541 FOR_EACH_EDGE (e, ei, bb->succs) 542 e->flags &= ~EDGE_EXECUTABLE; 543 } 544 545 /* Seed the algorithm by adding the successors of the entry block to the 546 edge worklist. */ 547 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) 548 add_control_edge (e); 549} 550 551 552/* Free allocated storage. */ 553 554static void 555ssa_prop_fini (void) 556{ 557 interesting_ssa_edges.release (); 558 varying_ssa_edges.release (); 559 cfg_blocks.release (); 560 sbitmap_free (bb_in_list); 561 sbitmap_free (executable_blocks); 562} 563 564 565/* Return true if EXPR is an acceptable right-hand-side for a 566 GIMPLE assignment. We validate the entire tree, not just 567 the root node, thus catching expressions that embed complex 568 operands that are not permitted in GIMPLE. This function 569 is needed because the folding routines in fold-const.c 570 may return such expressions in some cases, e.g., an array 571 access with an embedded index addition. It may make more 572 sense to have folding routines that are sensitive to the 573 constraints on GIMPLE operands, rather than abandoning any 574 any attempt to fold if the usual folding turns out to be too 575 aggressive. */ 576 577bool 578valid_gimple_rhs_p (tree expr) 579{ 580 enum tree_code code = TREE_CODE (expr); 581 582 switch (TREE_CODE_CLASS (code)) 583 { 584 case tcc_declaration: 585 if (!is_gimple_variable (expr)) 586 return false; 587 break; 588 589 case tcc_constant: 590 /* All constants are ok. */ 591 break; 592 593 case tcc_comparison: 594 /* GENERIC allows comparisons with non-boolean types, reject 595 those for GIMPLE. Let vector-typed comparisons pass - rules 596 for GENERIC and GIMPLE are the same here. */ 597 if (!(INTEGRAL_TYPE_P (TREE_TYPE (expr)) 598 && (TREE_CODE (TREE_TYPE (expr)) == BOOLEAN_TYPE 599 || TYPE_PRECISION (TREE_TYPE (expr)) == 1)) 600 && ! VECTOR_TYPE_P (TREE_TYPE (expr))) 601 return false; 602 603 /* Fallthru. */ 604 case tcc_binary: 605 if (!is_gimple_val (TREE_OPERAND (expr, 0)) 606 || !is_gimple_val (TREE_OPERAND (expr, 1))) 607 return false; 608 break; 609 610 case tcc_unary: 611 if (!is_gimple_val (TREE_OPERAND (expr, 0))) 612 return false; 613 break; 614 615 case tcc_expression: 616 switch (code) 617 { 618 case ADDR_EXPR: 619 { 620 tree t; 621 if (is_gimple_min_invariant (expr)) 622 return true; 623 t = TREE_OPERAND (expr, 0); 624 while (handled_component_p (t)) 625 { 626 /* ??? More checks needed, see the GIMPLE verifier. */ 627 if ((TREE_CODE (t) == ARRAY_REF 628 || TREE_CODE (t) == ARRAY_RANGE_REF) 629 && !is_gimple_val (TREE_OPERAND (t, 1))) 630 return false; 631 t = TREE_OPERAND (t, 0); 632 } 633 if (!is_gimple_id (t)) 634 return false; 635 } 636 break; 637 638 default: 639 if (get_gimple_rhs_class (code) == GIMPLE_TERNARY_RHS) 640 { 641 if (((code == VEC_COND_EXPR || code == COND_EXPR) 642 ? !is_gimple_condexpr (TREE_OPERAND (expr, 0)) 643 : !is_gimple_val (TREE_OPERAND (expr, 0))) 644 || !is_gimple_val (TREE_OPERAND (expr, 1)) 645 || !is_gimple_val (TREE_OPERAND (expr, 2))) 646 return false; 647 break; 648 } 649 return false; 650 } 651 break; 652 653 case tcc_vl_exp: 654 return false; 655 656 case tcc_exceptional: 657 if (code == CONSTRUCTOR) 658 { 659 unsigned i; 660 tree elt; 661 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (expr), i, elt) 662 if (!is_gimple_val (elt)) 663 return false; 664 return true; 665 } 666 if (code != SSA_NAME) 667 return false; 668 break; 669 670 case tcc_reference: 671 if (code == BIT_FIELD_REF) 672 return is_gimple_val (TREE_OPERAND (expr, 0)); 673 return false; 674 675 default: 676 return false; 677 } 678 679 return true; 680} 681 682 683/* Return true if EXPR is a CALL_EXPR suitable for representation 684 as a single GIMPLE_CALL statement. If the arguments require 685 further gimplification, return false. */ 686 687static bool 688valid_gimple_call_p (tree expr) 689{ 690 unsigned i, nargs; 691 692 if (TREE_CODE (expr) != CALL_EXPR) 693 return false; 694 695 nargs = call_expr_nargs (expr); 696 for (i = 0; i < nargs; i++) 697 { 698 tree arg = CALL_EXPR_ARG (expr, i); 699 if (is_gimple_reg_type (TREE_TYPE (arg))) 700 { 701 if (!is_gimple_val (arg)) 702 return false; 703 } 704 else 705 if (!is_gimple_lvalue (arg)) 706 return false; 707 } 708 709 return true; 710} 711 712 713/* Make SSA names defined by OLD_STMT point to NEW_STMT 714 as their defining statement. */ 715 716void 717move_ssa_defining_stmt_for_defs (gimple new_stmt, gimple old_stmt) 718{ 719 tree var; 720 ssa_op_iter iter; 721 722 if (gimple_in_ssa_p (cfun)) 723 { 724 /* Make defined SSA_NAMEs point to the new 725 statement as their definition. */ 726 FOR_EACH_SSA_TREE_OPERAND (var, old_stmt, iter, SSA_OP_ALL_DEFS) 727 { 728 if (TREE_CODE (var) == SSA_NAME) 729 SSA_NAME_DEF_STMT (var) = new_stmt; 730 } 731 } 732} 733 734/* Helper function for update_gimple_call and update_call_from_tree. 735 A GIMPLE_CALL STMT is being replaced with GIMPLE_CALL NEW_STMT. */ 736 737static void 738finish_update_gimple_call (gimple_stmt_iterator *si_p, gimple new_stmt, 739 gimple stmt) 740{ 741 gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt)); 742 move_ssa_defining_stmt_for_defs (new_stmt, stmt); 743 gimple_set_vuse (new_stmt, gimple_vuse (stmt)); 744 gimple_set_vdef (new_stmt, gimple_vdef (stmt)); 745 gimple_set_location (new_stmt, gimple_location (stmt)); 746 if (gimple_block (new_stmt) == NULL_TREE) 747 gimple_set_block (new_stmt, gimple_block (stmt)); 748 gsi_replace (si_p, new_stmt, false); 749} 750 751/* Update a GIMPLE_CALL statement at iterator *SI_P to call to FN 752 with number of arguments NARGS, where the arguments in GIMPLE form 753 follow NARGS argument. */ 754 755bool 756update_gimple_call (gimple_stmt_iterator *si_p, tree fn, int nargs, ...) 757{ 758 va_list ap; 759 gcall *new_stmt, *stmt = as_a <gcall *> (gsi_stmt (*si_p)); 760 761 gcc_assert (is_gimple_call (stmt)); 762 va_start (ap, nargs); 763 new_stmt = gimple_build_call_valist (fn, nargs, ap); 764 finish_update_gimple_call (si_p, new_stmt, stmt); 765 va_end (ap); 766 return true; 767} 768 769/* Update a GIMPLE_CALL statement at iterator *SI_P to reflect the 770 value of EXPR, which is expected to be the result of folding the 771 call. This can only be done if EXPR is a CALL_EXPR with valid 772 GIMPLE operands as arguments, or if it is a suitable RHS expression 773 for a GIMPLE_ASSIGN. More complex expressions will require 774 gimplification, which will introduce additional statements. In this 775 event, no update is performed, and the function returns false. 776 Note that we cannot mutate a GIMPLE_CALL in-place, so we always 777 replace the statement at *SI_P with an entirely new statement. 778 The new statement need not be a call, e.g., if the original call 779 folded to a constant. */ 780 781bool 782update_call_from_tree (gimple_stmt_iterator *si_p, tree expr) 783{ 784 gimple stmt = gsi_stmt (*si_p); 785 786 if (valid_gimple_call_p (expr)) 787 { 788 /* The call has simplified to another call. */ 789 tree fn = CALL_EXPR_FN (expr); 790 unsigned i; 791 unsigned nargs = call_expr_nargs (expr); 792 vec<tree> args = vNULL; 793 gcall *new_stmt; 794 795 if (nargs > 0) 796 { 797 args.create (nargs); 798 args.safe_grow_cleared (nargs); 799 800 for (i = 0; i < nargs; i++) 801 args[i] = CALL_EXPR_ARG (expr, i); 802 } 803 804 new_stmt = gimple_build_call_vec (fn, args); 805 finish_update_gimple_call (si_p, new_stmt, stmt); 806 args.release (); 807 808 return true; 809 } 810 else if (valid_gimple_rhs_p (expr)) 811 { 812 tree lhs = gimple_call_lhs (stmt); 813 gimple new_stmt; 814 815 /* The call has simplified to an expression 816 that cannot be represented as a GIMPLE_CALL. */ 817 if (lhs) 818 { 819 /* A value is expected. 820 Introduce a new GIMPLE_ASSIGN statement. */ 821 STRIP_USELESS_TYPE_CONVERSION (expr); 822 new_stmt = gimple_build_assign (lhs, expr); 823 move_ssa_defining_stmt_for_defs (new_stmt, stmt); 824 gimple_set_vuse (new_stmt, gimple_vuse (stmt)); 825 gimple_set_vdef (new_stmt, gimple_vdef (stmt)); 826 } 827 else if (!TREE_SIDE_EFFECTS (expr)) 828 { 829 /* No value is expected, and EXPR has no effect. 830 Replace it with an empty statement. */ 831 new_stmt = gimple_build_nop (); 832 if (gimple_in_ssa_p (cfun)) 833 { 834 unlink_stmt_vdef (stmt); 835 release_defs (stmt); 836 } 837 } 838 else 839 { 840 /* No value is expected, but EXPR has an effect, 841 e.g., it could be a reference to a volatile 842 variable. Create an assignment statement 843 with a dummy (unused) lhs variable. */ 844 STRIP_USELESS_TYPE_CONVERSION (expr); 845 if (gimple_in_ssa_p (cfun)) 846 lhs = make_ssa_name (TREE_TYPE (expr)); 847 else 848 lhs = create_tmp_var (TREE_TYPE (expr)); 849 new_stmt = gimple_build_assign (lhs, expr); 850 gimple_set_vuse (new_stmt, gimple_vuse (stmt)); 851 gimple_set_vdef (new_stmt, gimple_vdef (stmt)); 852 move_ssa_defining_stmt_for_defs (new_stmt, stmt); 853 } 854 gimple_set_location (new_stmt, gimple_location (stmt)); 855 gsi_replace (si_p, new_stmt, false); 856 return true; 857 } 858 else 859 /* The call simplified to an expression that is 860 not a valid GIMPLE RHS. */ 861 return false; 862} 863 864 865/* Entry point to the propagation engine. 866 867 VISIT_STMT is called for every statement visited. 868 VISIT_PHI is called for every PHI node visited. */ 869 870void 871ssa_propagate (ssa_prop_visit_stmt_fn visit_stmt, 872 ssa_prop_visit_phi_fn visit_phi) 873{ 874 ssa_prop_visit_stmt = visit_stmt; 875 ssa_prop_visit_phi = visit_phi; 876 877 ssa_prop_init (); 878 879 /* Iterate until the worklists are empty. */ 880 while (!cfg_blocks_empty_p () 881 || interesting_ssa_edges.length () > 0 882 || varying_ssa_edges.length () > 0) 883 { 884 if (!cfg_blocks_empty_p ()) 885 { 886 /* Pull the next block to simulate off the worklist. */ 887 basic_block dest_block = cfg_blocks_get (); 888 simulate_block (dest_block); 889 } 890 891 /* In order to move things to varying as quickly as 892 possible,process the VARYING_SSA_EDGES worklist first. */ 893 process_ssa_edge_worklist (&varying_ssa_edges); 894 895 /* Now process the INTERESTING_SSA_EDGES worklist. */ 896 process_ssa_edge_worklist (&interesting_ssa_edges); 897 } 898 899 ssa_prop_fini (); 900} 901 902 903/* Return true if STMT is of the form 'mem_ref = RHS', where 'mem_ref' 904 is a non-volatile pointer dereference, a structure reference or a 905 reference to a single _DECL. Ignore volatile memory references 906 because they are not interesting for the optimizers. */ 907 908bool 909stmt_makes_single_store (gimple stmt) 910{ 911 tree lhs; 912 913 if (gimple_code (stmt) != GIMPLE_ASSIGN 914 && gimple_code (stmt) != GIMPLE_CALL) 915 return false; 916 917 if (!gimple_vdef (stmt)) 918 return false; 919 920 lhs = gimple_get_lhs (stmt); 921 922 /* A call statement may have a null LHS. */ 923 if (!lhs) 924 return false; 925 926 return (!TREE_THIS_VOLATILE (lhs) 927 && (DECL_P (lhs) 928 || REFERENCE_CLASS_P (lhs))); 929} 930 931 932/* Propagation statistics. */ 933struct prop_stats_d 934{ 935 long num_const_prop; 936 long num_copy_prop; 937 long num_stmts_folded; 938 long num_dce; 939}; 940 941static struct prop_stats_d prop_stats; 942 943/* Replace USE references in statement STMT with the values stored in 944 PROP_VALUE. Return true if at least one reference was replaced. */ 945 946static bool 947replace_uses_in (gimple stmt, ssa_prop_get_value_fn get_value) 948{ 949 bool replaced = false; 950 use_operand_p use; 951 ssa_op_iter iter; 952 953 FOR_EACH_SSA_USE_OPERAND (use, stmt, iter, SSA_OP_USE) 954 { 955 tree tuse = USE_FROM_PTR (use); 956 tree val = (*get_value) (tuse); 957 958 if (val == tuse || val == NULL_TREE) 959 continue; 960 961 if (gimple_code (stmt) == GIMPLE_ASM 962 && !may_propagate_copy_into_asm (tuse)) 963 continue; 964 965 if (!may_propagate_copy (tuse, val)) 966 continue; 967 968 if (TREE_CODE (val) != SSA_NAME) 969 prop_stats.num_const_prop++; 970 else 971 prop_stats.num_copy_prop++; 972 973 propagate_value (use, val); 974 975 replaced = true; 976 } 977 978 return replaced; 979} 980 981 982/* Replace propagated values into all the arguments for PHI using the 983 values from PROP_VALUE. */ 984 985static bool 986replace_phi_args_in (gphi *phi, ssa_prop_get_value_fn get_value) 987{ 988 size_t i; 989 bool replaced = false; 990 991 if (dump_file && (dump_flags & TDF_DETAILS)) 992 { 993 fprintf (dump_file, "Folding PHI node: "); 994 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM); 995 } 996 997 basic_block bb = gimple_bb (phi); 998 for (i = 0; i < gimple_phi_num_args (phi); i++) 999 { 1000 tree arg = gimple_phi_arg_def (phi, i); 1001 1002 if (TREE_CODE (arg) == SSA_NAME) 1003 { 1004 tree val = (*get_value) (arg); 1005 1006 if (val && val != arg && may_propagate_copy (arg, val)) 1007 { 1008 edge e = gimple_phi_arg_edge (phi, i); 1009 1010 /* Avoid propagating constants into loop latch edge 1011 PHI arguments as this makes coalescing the copy 1012 across this edge impossible. If the argument is 1013 defined by an assert - otherwise the stmt will 1014 get removed without replacing its uses. */ 1015 if (TREE_CODE (val) != SSA_NAME 1016 && bb->loop_father->header == bb 1017 && dominated_by_p (CDI_DOMINATORS, e->src, bb) 1018 && is_gimple_assign (SSA_NAME_DEF_STMT (arg)) 1019 && (gimple_assign_rhs_code (SSA_NAME_DEF_STMT (arg)) 1020 == ASSERT_EXPR)) 1021 continue; 1022 1023 if (TREE_CODE (val) != SSA_NAME) 1024 prop_stats.num_const_prop++; 1025 else 1026 prop_stats.num_copy_prop++; 1027 1028 propagate_value (PHI_ARG_DEF_PTR (phi, i), val); 1029 replaced = true; 1030 1031 /* If we propagated a copy and this argument flows 1032 through an abnormal edge, update the replacement 1033 accordingly. */ 1034 if (TREE_CODE (val) == SSA_NAME 1035 && e->flags & EDGE_ABNORMAL 1036 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val)) 1037 { 1038 /* This can only occur for virtual operands, since 1039 for the real ones SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val)) 1040 would prevent replacement. */ 1041 gcc_checking_assert (virtual_operand_p (val)); 1042 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val) = 1; 1043 } 1044 } 1045 } 1046 } 1047 1048 if (dump_file && (dump_flags & TDF_DETAILS)) 1049 { 1050 if (!replaced) 1051 fprintf (dump_file, "No folding possible\n"); 1052 else 1053 { 1054 fprintf (dump_file, "Folded into: "); 1055 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM); 1056 fprintf (dump_file, "\n"); 1057 } 1058 } 1059 1060 return replaced; 1061} 1062 1063 1064class substitute_and_fold_dom_walker : public dom_walker 1065{ 1066public: 1067 substitute_and_fold_dom_walker (cdi_direction direction, 1068 ssa_prop_get_value_fn get_value_fn_, 1069 ssa_prop_fold_stmt_fn fold_fn_, 1070 bool do_dce_) 1071 : dom_walker (direction), get_value_fn (get_value_fn_), 1072 fold_fn (fold_fn_), do_dce (do_dce_), something_changed (false) 1073 { 1074 stmts_to_remove.create (0); 1075 stmts_to_fixup.create (0); 1076 need_eh_cleanup = BITMAP_ALLOC (NULL); 1077 } 1078 ~substitute_and_fold_dom_walker () 1079 { 1080 stmts_to_remove.release (); 1081 stmts_to_fixup.release (); 1082 BITMAP_FREE (need_eh_cleanup); 1083 } 1084 1085 virtual void before_dom_children (basic_block); 1086 virtual void after_dom_children (basic_block) {} 1087 1088 ssa_prop_get_value_fn get_value_fn; 1089 ssa_prop_fold_stmt_fn fold_fn; 1090 bool do_dce; 1091 bool something_changed; 1092 vec<gimple> stmts_to_remove; 1093 vec<gimple> stmts_to_fixup; 1094 bitmap need_eh_cleanup; 1095}; 1096 1097void 1098substitute_and_fold_dom_walker::before_dom_children (basic_block bb) 1099{ 1100 /* Propagate known values into PHI nodes. */ 1101 for (gphi_iterator i = gsi_start_phis (bb); 1102 !gsi_end_p (i); 1103 gsi_next (&i)) 1104 { 1105 gphi *phi = i.phi (); 1106 tree res = gimple_phi_result (phi); 1107 if (virtual_operand_p (res)) 1108 continue; 1109 if (do_dce 1110 && res && TREE_CODE (res) == SSA_NAME) 1111 { 1112 tree sprime = get_value_fn (res); 1113 if (sprime 1114 && sprime != res 1115 && may_propagate_copy (res, sprime)) 1116 { 1117 stmts_to_remove.safe_push (phi); 1118 continue; 1119 } 1120 } 1121 something_changed |= replace_phi_args_in (phi, get_value_fn); 1122 } 1123 1124 /* Propagate known values into stmts. In some case it exposes 1125 more trivially deletable stmts to walk backward. */ 1126 for (gimple_stmt_iterator i = gsi_start_bb (bb); 1127 !gsi_end_p (i); 1128 gsi_next (&i)) 1129 { 1130 bool did_replace; 1131 gimple stmt = gsi_stmt (i); 1132 enum gimple_code code = gimple_code (stmt); 1133 1134 /* Ignore ASSERT_EXPRs. They are used by VRP to generate 1135 range information for names and they are discarded 1136 afterwards. */ 1137 1138 if (code == GIMPLE_ASSIGN 1139 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR) 1140 continue; 1141 1142 /* No point propagating into a stmt we have a value for we 1143 can propagate into all uses. Mark it for removal instead. */ 1144 tree lhs = gimple_get_lhs (stmt); 1145 if (do_dce 1146 && lhs && TREE_CODE (lhs) == SSA_NAME) 1147 { 1148 tree sprime = get_value_fn (lhs); 1149 if (sprime 1150 && sprime != lhs 1151 && may_propagate_copy (lhs, sprime) 1152 && !stmt_could_throw_p (stmt) 1153 && !gimple_has_side_effects (stmt)) 1154 { 1155 stmts_to_remove.safe_push (stmt); 1156 continue; 1157 } 1158 } 1159 1160 /* Replace the statement with its folded version and mark it 1161 folded. */ 1162 did_replace = false; 1163 if (dump_file && (dump_flags & TDF_DETAILS)) 1164 { 1165 fprintf (dump_file, "Folding statement: "); 1166 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM); 1167 } 1168 1169 gimple old_stmt = stmt; 1170 bool was_noreturn = (is_gimple_call (stmt) 1171 && gimple_call_noreturn_p (stmt)); 1172 1173 /* Some statements may be simplified using propagator 1174 specific information. Do this before propagating 1175 into the stmt to not disturb pass specific information. */ 1176 if (fold_fn 1177 && (*fold_fn)(&i)) 1178 { 1179 did_replace = true; 1180 prop_stats.num_stmts_folded++; 1181 stmt = gsi_stmt (i); 1182 update_stmt (stmt); 1183 } 1184 1185 /* Replace real uses in the statement. */ 1186 did_replace |= replace_uses_in (stmt, get_value_fn); 1187 1188 /* If we made a replacement, fold the statement. */ 1189 if (did_replace) 1190 fold_stmt (&i, follow_single_use_edges); 1191 1192 /* Now cleanup. */ 1193 if (did_replace) 1194 { 1195 stmt = gsi_stmt (i); 1196 1197 /* If we cleaned up EH information from the statement, 1198 remove EH edges. */ 1199 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)) 1200 bitmap_set_bit (need_eh_cleanup, bb->index); 1201 1202 /* If we turned a not noreturn call into a noreturn one 1203 schedule it for fixup. */ 1204 if (!was_noreturn 1205 && is_gimple_call (stmt) 1206 && gimple_call_noreturn_p (stmt)) 1207 stmts_to_fixup.safe_push (stmt); 1208 1209 if (is_gimple_assign (stmt) 1210 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) 1211 == GIMPLE_SINGLE_RHS)) 1212 { 1213 tree rhs = gimple_assign_rhs1 (stmt); 1214 1215 if (TREE_CODE (rhs) == ADDR_EXPR) 1216 recompute_tree_invariant_for_addr_expr (rhs); 1217 } 1218 1219 /* Determine what needs to be done to update the SSA form. */ 1220 update_stmt (stmt); 1221 if (!is_gimple_debug (stmt)) 1222 something_changed = true; 1223 } 1224 1225 if (dump_file && (dump_flags & TDF_DETAILS)) 1226 { 1227 if (did_replace) 1228 { 1229 fprintf (dump_file, "Folded into: "); 1230 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM); 1231 fprintf (dump_file, "\n"); 1232 } 1233 else 1234 fprintf (dump_file, "Not folded\n"); 1235 } 1236 } 1237} 1238 1239 1240 1241/* Perform final substitution and folding of propagated values. 1242 1243 PROP_VALUE[I] contains the single value that should be substituted 1244 at every use of SSA name N_I. If PROP_VALUE is NULL, no values are 1245 substituted. 1246 1247 If FOLD_FN is non-NULL the function will be invoked on all statements 1248 before propagating values for pass specific simplification. 1249 1250 DO_DCE is true if trivially dead stmts can be removed. 1251 1252 If DO_DCE is true, the statements within a BB are walked from 1253 last to first element. Otherwise we scan from first to last element. 1254 1255 Return TRUE when something changed. */ 1256 1257bool 1258substitute_and_fold (ssa_prop_get_value_fn get_value_fn, 1259 ssa_prop_fold_stmt_fn fold_fn, 1260 bool do_dce) 1261{ 1262 gcc_assert (get_value_fn); 1263 1264 if (dump_file && (dump_flags & TDF_DETAILS)) 1265 fprintf (dump_file, "\nSubstituting values and folding statements\n\n"); 1266 1267 memset (&prop_stats, 0, sizeof (prop_stats)); 1268 1269 calculate_dominance_info (CDI_DOMINATORS); 1270 substitute_and_fold_dom_walker walker(CDI_DOMINATORS, 1271 get_value_fn, fold_fn, do_dce); 1272 walker.walk (ENTRY_BLOCK_PTR_FOR_FN (cfun)); 1273 1274 /* We cannot remove stmts during the BB walk, especially not release 1275 SSA names there as that destroys the lattice of our callers. 1276 Remove stmts in reverse order to make debug stmt creation possible. */ 1277 while (!walker.stmts_to_remove.is_empty ()) 1278 { 1279 gimple stmt = walker.stmts_to_remove.pop (); 1280 if (dump_file && dump_flags & TDF_DETAILS) 1281 { 1282 fprintf (dump_file, "Removing dead stmt "); 1283 print_gimple_stmt (dump_file, stmt, 0, 0); 1284 fprintf (dump_file, "\n"); 1285 } 1286 prop_stats.num_dce++; 1287 gimple_stmt_iterator gsi = gsi_for_stmt (stmt); 1288 if (gimple_code (stmt) == GIMPLE_PHI) 1289 remove_phi_node (&gsi, true); 1290 else 1291 { 1292 unlink_stmt_vdef (stmt); 1293 gsi_remove (&gsi, true); 1294 release_defs (stmt); 1295 } 1296 } 1297 1298 if (!bitmap_empty_p (walker.need_eh_cleanup)) 1299 gimple_purge_all_dead_eh_edges (walker.need_eh_cleanup); 1300 1301 /* Fixup stmts that became noreturn calls. This may require splitting 1302 blocks and thus isn't possible during the dominator walk. Do this 1303 in reverse order so we don't inadvertedly remove a stmt we want to 1304 fixup by visiting a dominating now noreturn call first. */ 1305 while (!walker.stmts_to_fixup.is_empty ()) 1306 { 1307 gimple stmt = walker.stmts_to_fixup.pop (); 1308 if (dump_file && dump_flags & TDF_DETAILS) 1309 { 1310 fprintf (dump_file, "Fixing up noreturn call "); 1311 print_gimple_stmt (dump_file, stmt, 0, 0); 1312 fprintf (dump_file, "\n"); 1313 } 1314 fixup_noreturn_call (stmt); 1315 } 1316 1317 statistics_counter_event (cfun, "Constants propagated", 1318 prop_stats.num_const_prop); 1319 statistics_counter_event (cfun, "Copies propagated", 1320 prop_stats.num_copy_prop); 1321 statistics_counter_event (cfun, "Statements folded", 1322 prop_stats.num_stmts_folded); 1323 statistics_counter_event (cfun, "Statements deleted", 1324 prop_stats.num_dce); 1325 1326 return walker.something_changed; 1327} 1328 1329 1330/* Return true if we may propagate ORIG into DEST, false otherwise. */ 1331 1332bool 1333may_propagate_copy (tree dest, tree orig) 1334{ 1335 tree type_d = TREE_TYPE (dest); 1336 tree type_o = TREE_TYPE (orig); 1337 1338 /* If ORIG is a default definition which flows in from an abnormal edge 1339 then the copy can be propagated. It is important that we do so to avoid 1340 uninitialized copies. */ 1341 if (TREE_CODE (orig) == SSA_NAME 1342 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig) 1343 && SSA_NAME_IS_DEFAULT_DEF (orig) 1344 && (SSA_NAME_VAR (orig) == NULL_TREE 1345 || TREE_CODE (SSA_NAME_VAR (orig)) == VAR_DECL)) 1346 ; 1347 /* Otherwise if ORIG just flows in from an abnormal edge then the copy cannot 1348 be propagated. */ 1349 else if (TREE_CODE (orig) == SSA_NAME 1350 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig)) 1351 return false; 1352 /* Similarly if DEST flows in from an abnormal edge then the copy cannot be 1353 propagated. */ 1354 else if (TREE_CODE (dest) == SSA_NAME 1355 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (dest)) 1356 return false; 1357 1358 /* Do not copy between types for which we *do* need a conversion. */ 1359 if (!useless_type_conversion_p (type_d, type_o)) 1360 return false; 1361 1362 /* Generally propagating virtual operands is not ok as that may 1363 create overlapping life-ranges. */ 1364 if (TREE_CODE (dest) == SSA_NAME && virtual_operand_p (dest)) 1365 return false; 1366 1367 /* Anything else is OK. */ 1368 return true; 1369} 1370 1371/* Like may_propagate_copy, but use as the destination expression 1372 the principal expression (typically, the RHS) contained in 1373 statement DEST. This is more efficient when working with the 1374 gimple tuples representation. */ 1375 1376bool 1377may_propagate_copy_into_stmt (gimple dest, tree orig) 1378{ 1379 tree type_d; 1380 tree type_o; 1381 1382 /* If the statement is a switch or a single-rhs assignment, 1383 then the expression to be replaced by the propagation may 1384 be an SSA_NAME. Fortunately, there is an explicit tree 1385 for the expression, so we delegate to may_propagate_copy. */ 1386 1387 if (gimple_assign_single_p (dest)) 1388 return may_propagate_copy (gimple_assign_rhs1 (dest), orig); 1389 else if (gswitch *dest_swtch = dyn_cast <gswitch *> (dest)) 1390 return may_propagate_copy (gimple_switch_index (dest_swtch), orig); 1391 1392 /* In other cases, the expression is not materialized, so there 1393 is no destination to pass to may_propagate_copy. On the other 1394 hand, the expression cannot be an SSA_NAME, so the analysis 1395 is much simpler. */ 1396 1397 if (TREE_CODE (orig) == SSA_NAME 1398 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (orig)) 1399 return false; 1400 1401 if (is_gimple_assign (dest)) 1402 type_d = TREE_TYPE (gimple_assign_lhs (dest)); 1403 else if (gimple_code (dest) == GIMPLE_COND) 1404 type_d = boolean_type_node; 1405 else if (is_gimple_call (dest) 1406 && gimple_call_lhs (dest) != NULL_TREE) 1407 type_d = TREE_TYPE (gimple_call_lhs (dest)); 1408 else 1409 gcc_unreachable (); 1410 1411 type_o = TREE_TYPE (orig); 1412 1413 if (!useless_type_conversion_p (type_d, type_o)) 1414 return false; 1415 1416 return true; 1417} 1418 1419/* Similarly, but we know that we're propagating into an ASM_EXPR. */ 1420 1421bool 1422may_propagate_copy_into_asm (tree dest ATTRIBUTE_UNUSED) 1423{ 1424 return true; 1425} 1426 1427 1428/* Common code for propagate_value and replace_exp. 1429 1430 Replace use operand OP_P with VAL. FOR_PROPAGATION indicates if the 1431 replacement is done to propagate a value or not. */ 1432 1433static void 1434replace_exp_1 (use_operand_p op_p, tree val, 1435 bool for_propagation ATTRIBUTE_UNUSED) 1436{ 1437#if defined ENABLE_CHECKING 1438 tree op = USE_FROM_PTR (op_p); 1439 1440 gcc_assert (!(for_propagation 1441 && TREE_CODE (op) == SSA_NAME 1442 && TREE_CODE (val) == SSA_NAME 1443 && !may_propagate_copy (op, val))); 1444#endif 1445 1446 if (TREE_CODE (val) == SSA_NAME) 1447 SET_USE (op_p, val); 1448 else 1449 SET_USE (op_p, unshare_expr (val)); 1450} 1451 1452 1453/* Propagate the value VAL (assumed to be a constant or another SSA_NAME) 1454 into the operand pointed to by OP_P. 1455 1456 Use this version for const/copy propagation as it will perform additional 1457 checks to ensure validity of the const/copy propagation. */ 1458 1459void 1460propagate_value (use_operand_p op_p, tree val) 1461{ 1462 replace_exp_1 (op_p, val, true); 1463} 1464 1465/* Replace *OP_P with value VAL (assumed to be a constant or another SSA_NAME). 1466 1467 Use this version when not const/copy propagating values. For example, 1468 PRE uses this version when building expressions as they would appear 1469 in specific blocks taking into account actions of PHI nodes. 1470 1471 The statement in which an expression has been replaced should be 1472 folded using fold_stmt_inplace. */ 1473 1474void 1475replace_exp (use_operand_p op_p, tree val) 1476{ 1477 replace_exp_1 (op_p, val, false); 1478} 1479 1480 1481/* Propagate the value VAL (assumed to be a constant or another SSA_NAME) 1482 into the tree pointed to by OP_P. 1483 1484 Use this version for const/copy propagation when SSA operands are not 1485 available. It will perform the additional checks to ensure validity of 1486 the const/copy propagation, but will not update any operand information. 1487 Be sure to mark the stmt as modified. */ 1488 1489void 1490propagate_tree_value (tree *op_p, tree val) 1491{ 1492 if (TREE_CODE (val) == SSA_NAME) 1493 *op_p = val; 1494 else 1495 *op_p = unshare_expr (val); 1496} 1497 1498 1499/* Like propagate_tree_value, but use as the operand to replace 1500 the principal expression (typically, the RHS) contained in the 1501 statement referenced by iterator GSI. Note that it is not 1502 always possible to update the statement in-place, so a new 1503 statement may be created to replace the original. */ 1504 1505void 1506propagate_tree_value_into_stmt (gimple_stmt_iterator *gsi, tree val) 1507{ 1508 gimple stmt = gsi_stmt (*gsi); 1509 1510 if (is_gimple_assign (stmt)) 1511 { 1512 tree expr = NULL_TREE; 1513 if (gimple_assign_single_p (stmt)) 1514 expr = gimple_assign_rhs1 (stmt); 1515 propagate_tree_value (&expr, val); 1516 gimple_assign_set_rhs_from_tree (gsi, expr); 1517 } 1518 else if (gcond *cond_stmt = dyn_cast <gcond *> (stmt)) 1519 { 1520 tree lhs = NULL_TREE; 1521 tree rhs = build_zero_cst (TREE_TYPE (val)); 1522 propagate_tree_value (&lhs, val); 1523 gimple_cond_set_code (cond_stmt, NE_EXPR); 1524 gimple_cond_set_lhs (cond_stmt, lhs); 1525 gimple_cond_set_rhs (cond_stmt, rhs); 1526 } 1527 else if (is_gimple_call (stmt) 1528 && gimple_call_lhs (stmt) != NULL_TREE) 1529 { 1530 tree expr = NULL_TREE; 1531 bool res; 1532 propagate_tree_value (&expr, val); 1533 res = update_call_from_tree (gsi, expr); 1534 gcc_assert (res); 1535 } 1536 else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (stmt)) 1537 propagate_tree_value (gimple_switch_index_ptr (swtch_stmt), val); 1538 else 1539 gcc_unreachable (); 1540} 1541