1 /* Gimple Represented as Polyhedra.
2 Copyright (C) 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3 Contributed by Sebastian Pop <sebastian.pop@inria.fr>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* This pass converts GIMPLE to GRAPHITE, performs some loop
22 transformations and then converts the resulting representation back
25 An early description of this pass can be found in the GCC Summit'06
26 paper "GRAPHITE: Polyhedral Analyses and Optimizations for GCC".
27 The wiki page http://gcc.gnu.org/wiki/Graphite contains pointers to
30 One important document to read is CLooG's internal manual:
31 http://repo.or.cz/w/cloog-ppl.git?a=blob_plain;f=doc/cloog.texi;hb=HEAD
32 that describes the data structure of loops used in this file, and
33 the functions that are used for transforming the code. */
37 #include "coretypes.h"
42 #include "basic-block.h"
43 #include "diagnostic.h"
44 #include "tree-flow.h"
46 #include "tree-dump.h"
49 #include "tree-chrec.h"
50 #include "tree-data-ref.h"
51 #include "tree-scalar-evolution.h"
52 #include "tree-pass.h"
54 #include "value-prof.h"
55 #include "pointer-set.h"
59 #include "cloog/cloog.h"
62 static VEC (scop_p, heap) *current_scops;
64 /* Converts a GMP constant V to a tree and returns it. */
67 gmp_cst_to_tree (tree type, Value v)
69 return build_int_cst (type, value_get_si (v));
72 /* Returns true when BB is in REGION. */
75 bb_in_sese_p (basic_block bb, sese region)
77 return pointer_set_contains (SESE_REGION_BBS (region), bb);
80 /* Returns true when LOOP is in the SESE region R. */
83 loop_in_sese_p (struct loop *loop, sese r)
85 return (bb_in_sese_p (loop->header, r)
86 && bb_in_sese_p (loop->latch, r));
89 /* For a USE in BB, if BB is outside REGION, mark the USE in the
90 SESE_LIVEIN and SESE_LIVEOUT sets. */
93 sese_build_livein_liveouts_use (sese region, basic_block bb, tree use)
98 if (TREE_CODE (use) != SSA_NAME)
101 ver = SSA_NAME_VERSION (use);
102 def_bb = gimple_bb (SSA_NAME_DEF_STMT (use));
104 || !bb_in_sese_p (def_bb, region)
105 || bb_in_sese_p (bb, region))
108 if (!SESE_LIVEIN_VER (region, ver))
109 SESE_LIVEIN_VER (region, ver) = BITMAP_ALLOC (NULL);
111 bitmap_set_bit (SESE_LIVEIN_VER (region, ver), bb->index);
112 bitmap_set_bit (SESE_LIVEOUT (region), ver);
115 /* Marks for rewrite all the SSA_NAMES defined in REGION and that are
116 used in BB that is outside of the REGION. */
119 sese_build_livein_liveouts_bb (sese region, basic_block bb)
121 gimple_stmt_iterator bsi;
127 FOR_EACH_EDGE (e, ei, bb->succs)
128 for (bsi = gsi_start_phis (e->dest); !gsi_end_p (bsi); gsi_next (&bsi))
129 sese_build_livein_liveouts_use (region, bb,
130 PHI_ARG_DEF_FROM_EDGE (gsi_stmt (bsi), e));
132 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
133 FOR_EACH_SSA_TREE_OPERAND (var, gsi_stmt (bsi), iter, SSA_OP_ALL_USES)
134 sese_build_livein_liveouts_use (region, bb, var);
137 /* Build the SESE_LIVEIN and SESE_LIVEOUT for REGION. */
140 sese_build_livein_liveouts (sese region)
144 SESE_LIVEOUT (region) = BITMAP_ALLOC (NULL);
145 SESE_NUM_VER (region) = num_ssa_names;
146 SESE_LIVEIN (region) = XCNEWVEC (bitmap, SESE_NUM_VER (region));
149 sese_build_livein_liveouts_bb (region, bb);
152 /* Register basic blocks belonging to a region in a pointer set. */
155 register_bb_in_sese (basic_block entry_bb, basic_block exit_bb, sese region)
159 basic_block bb = entry_bb;
161 FOR_EACH_EDGE (e, ei, bb->succs)
163 if (!pointer_set_contains (SESE_REGION_BBS (region), e->dest) &&
164 e->dest->index != exit_bb->index)
166 pointer_set_insert (SESE_REGION_BBS (region), e->dest);
167 register_bb_in_sese (e->dest, exit_bb, region);
172 /* Builds a new SESE region from edges ENTRY and EXIT. */
175 new_sese (edge entry, edge exit)
177 sese res = XNEW (struct sese);
179 SESE_ENTRY (res) = entry;
180 SESE_EXIT (res) = exit;
181 SESE_REGION_BBS (res) = pointer_set_create ();
182 register_bb_in_sese (entry->dest, exit->dest, res);
184 SESE_LIVEOUT (res) = NULL;
185 SESE_NUM_VER (res) = 0;
186 SESE_LIVEIN (res) = NULL;
191 /* Deletes REGION. */
194 free_sese (sese region)
198 for (i = 0; i < SESE_NUM_VER (region); i++)
199 BITMAP_FREE (SESE_LIVEIN_VER (region, i));
201 if (SESE_LIVEIN (region))
202 free (SESE_LIVEIN (region));
204 if (SESE_LIVEOUT (region))
205 BITMAP_FREE (SESE_LIVEOUT (region));
207 pointer_set_destroy (SESE_REGION_BBS (region));
213 /* Debug the list of old induction variables for this SCOP. */
216 debug_oldivs (scop_p scop)
221 fprintf (stderr, "Old IVs:");
223 for (i = 0; VEC_iterate (name_tree, SCOP_OLDIVS (scop), i, oldiv); i++)
225 fprintf (stderr, "(");
226 print_generic_expr (stderr, oldiv->t, 0);
227 fprintf (stderr, ", %s, %d)\n", oldiv->name, oldiv->loop->num);
229 fprintf (stderr, "\n");
232 /* Debug the loops around basic block GB. */
235 debug_loop_vec (graphite_bb_p gb)
240 fprintf (stderr, "Loop Vec:");
242 for (i = 0; VEC_iterate (loop_p, GBB_LOOPS (gb), i, loop); i++)
243 fprintf (stderr, "%d: %d, ", i, loop ? loop->num : -1);
245 fprintf (stderr, "\n");
248 /* Returns true if stack ENTRY is a constant. */
251 iv_stack_entry_is_constant (iv_stack_entry *entry)
253 return entry->kind == iv_stack_entry_const;
256 /* Returns true if stack ENTRY is an induction variable. */
259 iv_stack_entry_is_iv (iv_stack_entry *entry)
261 return entry->kind == iv_stack_entry_iv;
264 /* Push (IV, NAME) on STACK. */
267 loop_iv_stack_push_iv (loop_iv_stack stack, tree iv, const char *name)
269 iv_stack_entry *entry = XNEW (iv_stack_entry);
270 name_tree named_iv = XNEW (struct name_tree);
273 named_iv->name = name;
275 entry->kind = iv_stack_entry_iv;
276 entry->data.iv = named_iv;
278 VEC_safe_push (iv_stack_entry_p, heap, *stack, entry);
281 /* Inserts a CONSTANT in STACK at INDEX. */
284 loop_iv_stack_insert_constant (loop_iv_stack stack, int index,
287 iv_stack_entry *entry = XNEW (iv_stack_entry);
289 entry->kind = iv_stack_entry_const;
290 entry->data.constant = constant;
292 VEC_safe_insert (iv_stack_entry_p, heap, *stack, index, entry);
295 /* Pops and frees an element out of STACK. */
298 loop_iv_stack_pop (loop_iv_stack stack)
300 iv_stack_entry_p entry = VEC_pop (iv_stack_entry_p, *stack);
302 free (entry->data.iv);
306 /* Get the IV at INDEX in STACK. */
309 loop_iv_stack_get_iv (loop_iv_stack stack, int index)
311 iv_stack_entry_p entry = VEC_index (iv_stack_entry_p, *stack, index);
312 iv_stack_entry_data data = entry->data;
314 return iv_stack_entry_is_iv (entry) ? data.iv->t : data.constant;
317 /* Get the IV from its NAME in STACK. */
320 loop_iv_stack_get_iv_from_name (loop_iv_stack stack, const char* name)
323 iv_stack_entry_p entry;
325 for (i = 0; VEC_iterate (iv_stack_entry_p, *stack, i, entry); i++)
327 name_tree iv = entry->data.iv;
328 if (!strcmp (name, iv->name))
335 /* Prints on stderr the contents of STACK. */
338 debug_loop_iv_stack (loop_iv_stack stack)
341 iv_stack_entry_p entry;
344 fprintf (stderr, "(");
346 for (i = 0; VEC_iterate (iv_stack_entry_p, *stack, i, entry); i++)
351 fprintf (stderr, " ");
353 if (iv_stack_entry_is_iv (entry))
355 name_tree iv = entry->data.iv;
356 fprintf (stderr, "%s:", iv->name);
357 print_generic_expr (stderr, iv->t, 0);
361 tree constant = entry->data.constant;
362 print_generic_expr (stderr, constant, 0);
363 fprintf (stderr, ":");
364 print_generic_expr (stderr, constant, 0);
368 fprintf (stderr, ")\n");
374 free_loop_iv_stack (loop_iv_stack stack)
377 iv_stack_entry_p entry;
379 for (i = 0; VEC_iterate (iv_stack_entry_p, *stack, i, entry); i++)
381 free (entry->data.iv);
385 VEC_free (iv_stack_entry_p, heap, *stack);
390 /* Structure containing the mapping between the CLooG's induction
391 variable and the type of the old induction variable. */
392 typedef struct ivtype_map_elt
395 const char *cloog_iv;
398 /* Print to stderr the element ELT. */
401 debug_ivtype_elt (ivtype_map_elt elt)
403 fprintf (stderr, "(%s, ", elt->cloog_iv);
404 print_generic_expr (stderr, elt->type, 0);
405 fprintf (stderr, ")\n");
408 /* Helper function for debug_ivtype_map. */
411 debug_ivtype_map_1 (void **slot, void *s ATTRIBUTE_UNUSED)
413 struct ivtype_map_elt *entry = (struct ivtype_map_elt *) *slot;
414 debug_ivtype_elt (entry);
418 /* Print to stderr all the elements of MAP. */
421 debug_ivtype_map (htab_t map)
423 htab_traverse (map, debug_ivtype_map_1, NULL);
426 /* Constructs a new SCEV_INFO_STR structure for VAR and INSTANTIATED_BELOW. */
428 static inline ivtype_map_elt
429 new_ivtype_map_elt (const char *cloog_iv, tree type)
433 res = XNEW (struct ivtype_map_elt);
434 res->cloog_iv = cloog_iv;
440 /* Computes a hash function for database element ELT. */
443 ivtype_map_elt_info (const void *elt)
445 return htab_hash_pointer (((const struct ivtype_map_elt *) elt)->cloog_iv);
448 /* Compares database elements E1 and E2. */
451 eq_ivtype_map_elts (const void *e1, const void *e2)
453 const struct ivtype_map_elt *elt1 = (const struct ivtype_map_elt *) e1;
454 const struct ivtype_map_elt *elt2 = (const struct ivtype_map_elt *) e2;
456 return (elt1->cloog_iv == elt2->cloog_iv);
461 /* Given a CLOOG_IV, returns the type that it should have in GCC land.
462 If the information is not available, i.e. in the case one of the
463 transforms created the loop, just return integer_type_node. */
466 gcc_type_for_cloog_iv (const char *cloog_iv, graphite_bb_p gbb)
468 struct ivtype_map_elt tmp;
471 tmp.cloog_iv = cloog_iv;
472 slot = htab_find_slot (GBB_CLOOG_IV_TYPES (gbb), &tmp, NO_INSERT);
475 return ((ivtype_map_elt) *slot)->type;
477 return integer_type_node;
480 /* Inserts constants derived from the USER_STMT argument list into the
481 STACK. This is needed to map old ivs to constants when loops have
485 loop_iv_stack_patch_for_consts (loop_iv_stack stack,
486 struct clast_user_stmt *user_stmt)
488 struct clast_stmt *t;
490 CloogStatement *cs = user_stmt->statement;
491 graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
493 for (t = user_stmt->substitutions; t; t = t->next)
495 struct clast_expr *expr = (struct clast_expr *)
496 ((struct clast_assignment *)t)->RHS;
497 struct clast_term *term = (struct clast_term *) expr;
499 /* FIXME: What should be done with expr_bin, expr_red? */
500 if (expr->type == expr_term
503 loop_p loop = gbb_loop_at_index (gbb, index);
504 tree oldiv = oldiv_for_loop (GBB_SCOP (gbb), loop);
505 tree type = oldiv ? TREE_TYPE (oldiv) : integer_type_node;
506 tree value = gmp_cst_to_tree (type, term->val);
507 loop_iv_stack_insert_constant (stack, index, value);
513 /* Removes all constants in the iv STACK. */
516 loop_iv_stack_remove_constants (loop_iv_stack stack)
519 iv_stack_entry *entry;
521 for (i = 0; VEC_iterate (iv_stack_entry_p, *stack, i, entry);)
523 if (iv_stack_entry_is_constant (entry))
525 free (VEC_index (iv_stack_entry_p, *stack, i));
526 VEC_ordered_remove (iv_stack_entry_p, *stack, i);
533 /* Returns a new loop_to_cloog_loop_str structure. */
535 static inline struct loop_to_cloog_loop_str *
536 new_loop_to_cloog_loop_str (int loop_num,
538 CloogLoop *cloog_loop)
540 struct loop_to_cloog_loop_str *result;
542 result = XNEW (struct loop_to_cloog_loop_str);
543 result->loop_num = loop_num;
544 result->cloog_loop = cloog_loop;
545 result->loop_position = loop_position;
550 /* Hash function for SCOP_LOOP2CLOOG_LOOP hash table. */
553 hash_loop_to_cloog_loop (const void *elt)
555 return ((const struct loop_to_cloog_loop_str *) elt)->loop_num;
558 /* Equality function for SCOP_LOOP2CLOOG_LOOP hash table. */
561 eq_loop_to_cloog_loop (const void *el1, const void *el2)
563 const struct loop_to_cloog_loop_str *elt1, *elt2;
565 elt1 = (const struct loop_to_cloog_loop_str *) el1;
566 elt2 = (const struct loop_to_cloog_loop_str *) el2;
567 return elt1->loop_num == elt2->loop_num;
570 /* Compares two graphite bbs and returns an integer less than, equal to, or
571 greater than zero if the first argument is considered to be respectively
572 less than, equal to, or greater than the second.
573 We compare using the lexicographic order of the static schedules. */
576 gbb_compare (const void *p_1, const void *p_2)
578 const struct graphite_bb *const gbb_1
579 = *(const struct graphite_bb *const*) p_1;
580 const struct graphite_bb *const gbb_2
581 = *(const struct graphite_bb *const*) p_2;
583 return lambda_vector_compare (GBB_STATIC_SCHEDULE (gbb_1),
584 gbb_nb_loops (gbb_1) + 1,
585 GBB_STATIC_SCHEDULE (gbb_2),
586 gbb_nb_loops (gbb_2) + 1);
589 /* Sort graphite bbs in SCOP. */
592 graphite_sort_gbbs (scop_p scop)
594 VEC (graphite_bb_p, heap) *bbs = SCOP_BBS (scop);
596 qsort (VEC_address (graphite_bb_p, bbs),
597 VEC_length (graphite_bb_p, bbs),
598 sizeof (graphite_bb_p), gbb_compare);
601 /* Dump conditions of a graphite basic block GBB on FILE. */
604 dump_gbb_conditions (FILE *file, graphite_bb_p gbb)
608 VEC (gimple, heap) *conditions = GBB_CONDITIONS (gbb);
610 if (VEC_empty (gimple, conditions))
613 fprintf (file, "\tbb %d\t: cond = {", GBB_BB (gbb)->index);
615 for (i = 0; VEC_iterate (gimple, conditions, i, stmt); i++)
616 print_gimple_stmt (file, stmt, 0, 0);
618 fprintf (file, "}\n");
621 /* Converts the graphite scheduling function into a cloog scattering
622 matrix. This scattering matrix is used to limit the possible cloog
623 output to valid programs in respect to the scheduling function.
625 SCATTERING_DIMENSIONS specifies the dimensionality of the scattering
626 matrix. CLooG 0.14.0 and previous versions require, that all scattering
627 functions of one CloogProgram have the same dimensionality, therefore we
628 allow to specify it. (Should be removed in future versions) */
631 schedule_to_scattering (graphite_bb_p gb, int scattering_dimensions)
634 scop_p scop = GBB_SCOP (gb);
636 int nb_iterators = gbb_nb_loops (gb);
638 /* The cloog scattering matrix consists of these colums:
640 scattering_dimensions cols = Scattering dimensions,
641 nb_iterators cols = bb's iterators,
642 scop_nb_params cols = Parameters,
647 scattering_dimensions = 5
657 s1 s2 s3 s4 s5 i p1 p2 1
658 1 0 0 0 0 0 0 0 -4 = 0
659 0 1 0 0 0 -1 0 0 0 = 0
660 0 0 1 0 0 0 0 0 -5 = 0 */
661 int nb_params = scop_nb_params (scop);
662 int nb_cols = 1 + scattering_dimensions + nb_iterators + nb_params + 1;
663 int col_const = nb_cols - 1;
664 int col_iter_offset = 1 + scattering_dimensions;
666 CloogMatrix *scat = cloog_matrix_alloc (scattering_dimensions, nb_cols);
668 gcc_assert (scattering_dimensions >= nb_iterators * 2 + 1);
670 /* Initialize the identity matrix. */
671 for (i = 0; i < scattering_dimensions; i++)
672 value_set_si (scat->p[i][i + 1], 1);
674 /* Textual order outside the first loop */
675 value_set_si (scat->p[0][col_const], -GBB_STATIC_SCHEDULE (gb)[0]);
677 /* For all surrounding loops. */
678 for (i = 0; i < nb_iterators; i++)
680 int schedule = GBB_STATIC_SCHEDULE (gb)[i + 1];
682 /* Iterations of this loop. */
683 value_set_si (scat->p[2 * i + 1][col_iter_offset + i], -1);
685 /* Textual order inside this loop. */
686 value_set_si (scat->p[2 * i + 2][col_const], -schedule);
692 /* Print the schedules of GB to FILE with INDENT white spaces before.
693 VERBOSITY determines how verbose the code pretty printers are. */
696 print_graphite_bb (FILE *file, graphite_bb_p gb, int indent, int verbosity)
698 CloogMatrix *scattering;
701 fprintf (file, "\nGBB (\n");
703 print_loops_bb (file, GBB_BB (gb), indent+2, verbosity);
707 fprintf (file, " (domain: \n");
708 cloog_matrix_print (file, GBB_DOMAIN (gb));
709 fprintf (file, " )\n");
712 if (GBB_STATIC_SCHEDULE (gb))
714 fprintf (file, " (static schedule: ");
715 print_lambda_vector (file, GBB_STATIC_SCHEDULE (gb),
716 gbb_nb_loops (gb) + 1);
717 fprintf (file, " )\n");
722 fprintf (file, " (contained loops: \n");
723 for (i = 0; VEC_iterate (loop_p, GBB_LOOPS (gb), i, loop); i++)
725 fprintf (file, " iterator %d => NULL \n", i);
727 fprintf (file, " iterator %d => loop %d \n", i,
729 fprintf (file, " )\n");
732 if (GBB_DATA_REFS (gb))
733 dump_data_references (file, GBB_DATA_REFS (gb));
735 if (GBB_CONDITIONS (gb))
737 fprintf (file, " (conditions: \n");
738 dump_gbb_conditions (file, gb);
739 fprintf (file, " )\n");
743 && GBB_STATIC_SCHEDULE (gb))
745 fprintf (file, " (scattering: \n");
746 scattering = schedule_to_scattering (gb, 2 * gbb_nb_loops (gb) + 1);
747 cloog_matrix_print (file, scattering);
748 cloog_matrix_free (scattering);
749 fprintf (file, " )\n");
752 fprintf (file, ")\n");
755 /* Print to STDERR the schedules of GB with VERBOSITY level. */
758 debug_gbb (graphite_bb_p gb, int verbosity)
760 print_graphite_bb (stderr, gb, 0, verbosity);
764 /* Print SCOP to FILE. VERBOSITY determines how verbose the pretty
768 print_scop (FILE *file, scop_p scop, int verbosity)
773 fprintf (file, "\nSCoP_%d_%d (\n",
774 SCOP_ENTRY (scop)->index, SCOP_EXIT (scop)->index);
776 fprintf (file, " (cloog: \n");
777 cloog_program_print (file, SCOP_PROG (scop));
778 fprintf (file, " )\n");
785 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
786 print_graphite_bb (file, gb, 0, verbosity);
789 fprintf (file, ")\n");
792 /* Print all the SCOPs to FILE. VERBOSITY determines how verbose the
793 code pretty printers are. */
796 print_scops (FILE *file, int verbosity)
801 for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
802 print_scop (file, scop, verbosity);
805 /* Debug SCOP. VERBOSITY determines how verbose the code pretty
809 debug_scop (scop_p scop, int verbosity)
811 print_scop (stderr, scop, verbosity);
814 /* Debug all SCOPs from CURRENT_SCOPS. VERBOSITY determines how
815 verbose the code pretty printers are. */
818 debug_scops (int verbosity)
820 print_scops (stderr, verbosity);
823 /* Pretty print to FILE the SCOP in DOT format. */
826 dot_scop_1 (FILE *file, scop_p scop)
831 basic_block entry = SCOP_ENTRY (scop);
832 basic_block exit = SCOP_EXIT (scop);
834 fprintf (file, "digraph SCoP_%d_%d {\n", entry->index,
840 fprintf (file, "%d [shape=triangle];\n", bb->index);
843 fprintf (file, "%d [shape=box];\n", bb->index);
845 if (bb_in_sese_p (bb, SCOP_REGION (scop)))
846 fprintf (file, "%d [color=red];\n", bb->index);
848 FOR_EACH_EDGE (e, ei, bb->succs)
849 fprintf (file, "%d -> %d;\n", bb->index, e->dest->index);
852 fputs ("}\n\n", file);
855 /* Display SCOP using dotty. */
858 dot_scop (scop_p scop)
860 dot_scop_1 (stderr, scop);
863 /* Pretty print all SCoPs in DOT format and mark them with different colors.
864 If there are not enough colors, paint later SCoPs gray.
866 - "*" after the node number: entry of a SCoP,
867 - "#" after the node number: exit of a SCoP,
868 - "()" entry or exit not part of SCoP. */
871 dot_all_scops_1 (FILE *file)
880 /* Disable debugging while printing graph. */
881 int tmp_dump_flags = dump_flags;
884 fprintf (file, "digraph all {\n");
888 int part_of_scop = false;
890 /* Use HTML for every bb label. So we are able to print bbs
891 which are part of two different SCoPs, with two different
892 background colors. */
893 fprintf (file, "%d [label=<\n <TABLE BORDER=\"0\" CELLBORDER=\"1\" ",
895 fprintf (file, "CELLSPACING=\"0\">\n");
897 /* Select color for SCoP. */
898 for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
899 if (bb_in_sese_p (bb, SCOP_REGION (scop))
900 || (SCOP_EXIT (scop) == bb)
901 || (SCOP_ENTRY (scop) == bb))
960 fprintf (file, " <TR><TD WIDTH=\"50\" BGCOLOR=\"%s\">", color);
962 if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
963 fprintf (file, " (");
965 if (bb == SCOP_ENTRY (scop)
966 && bb == SCOP_EXIT (scop))
967 fprintf (file, " %d*# ", bb->index);
968 else if (bb == SCOP_ENTRY (scop))
969 fprintf (file, " %d* ", bb->index);
970 else if (bb == SCOP_EXIT (scop))
971 fprintf (file, " %d# ", bb->index);
973 fprintf (file, " %d ", bb->index);
975 if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
978 fprintf (file, "</TD></TR>\n");
984 fprintf (file, " <TR><TD WIDTH=\"50\" BGCOLOR=\"#ffffff\">");
985 fprintf (file, " %d </TD></TR>\n", bb->index);
988 fprintf (file, " </TABLE>>, shape=box, style=\"setlinewidth(0)\"]\n");
993 FOR_EACH_EDGE (e, ei, bb->succs)
994 fprintf (file, "%d -> %d;\n", bb->index, e->dest->index);
997 fputs ("}\n\n", file);
999 /* Enable debugging again. */
1000 dump_flags = tmp_dump_flags;
1003 /* Display all SCoPs using dotty. */
1006 dot_all_scops (void)
1008 /* When debugging, enable the following code. This cannot be used
1009 in production compilers because it calls "system". */
1011 FILE *stream = fopen ("/tmp/allscops.dot", "w");
1012 gcc_assert (stream);
1014 dot_all_scops_1 (stream);
1017 system ("dotty /tmp/allscops.dot");
1019 dot_all_scops_1 (stderr);
1023 /* Returns the outermost loop in SCOP that contains BB. */
1025 static struct loop *
1026 outermost_loop_in_scop (scop_p scop, basic_block bb)
1030 nest = bb->loop_father;
1031 while (loop_outer (nest)
1032 && loop_in_sese_p (loop_outer (nest), SCOP_REGION (scop)))
1033 nest = loop_outer (nest);
1038 /* Returns the block preceding the entry of SCOP. */
1041 block_before_scop (scop_p scop)
1043 return SESE_ENTRY (SCOP_REGION (scop))->src;
1046 /* Return true when EXPR is an affine function in LOOP with parameters
1047 instantiated relative to SCOP_ENTRY. */
1050 loop_affine_expr (basic_block scop_entry, struct loop *loop, tree expr)
1053 tree scev = analyze_scalar_evolution (loop, expr);
1055 scev = instantiate_scev (scop_entry, loop, scev);
1057 return (evolution_function_is_invariant_p (scev, n)
1058 || evolution_function_is_affine_multivariate_p (scev, n));
1061 /* Return true if REF or any of its subtrees contains a
1065 contains_component_ref_p (tree ref)
1070 while (handled_component_p (ref))
1072 if (TREE_CODE (ref) == COMPONENT_REF)
1075 ref = TREE_OPERAND (ref, 0);
1081 /* Return true if the operand OP is simple. */
1084 is_simple_operand (loop_p loop, gimple stmt, tree op)
1086 /* It is not a simple operand when it is a declaration, */
1088 /* or a structure, */
1089 || AGGREGATE_TYPE_P (TREE_TYPE (op))
1090 /* or a COMPONENT_REF, */
1091 || contains_component_ref_p (op)
1092 /* or a memory access that cannot be analyzed by the data
1093 reference analysis. */
1094 || ((handled_component_p (op) || INDIRECT_REF_P (op))
1095 && !stmt_simple_memref_p (loop, stmt, op)))
1101 /* Return true only when STMT is simple enough for being handled by
1102 Graphite. This depends on SCOP_ENTRY, as the parametetrs are
1103 initialized relatively to this basic block. */
1106 stmt_simple_for_scop_p (basic_block scop_entry, gimple stmt)
1108 basic_block bb = gimple_bb (stmt);
1109 struct loop *loop = bb->loop_father;
1111 /* GIMPLE_ASM and GIMPLE_CALL may embed arbitrary side effects.
1112 Calls have side-effects, except those to const or pure
1114 if (gimple_has_volatile_ops (stmt)
1115 || (gimple_code (stmt) == GIMPLE_CALL
1116 && !(gimple_call_flags (stmt) & (ECF_CONST | ECF_PURE)))
1117 || (gimple_code (stmt) == GIMPLE_ASM))
1120 switch (gimple_code (stmt))
1129 ssa_op_iter op_iter;
1130 enum tree_code code = gimple_cond_code (stmt);
1132 /* We can only handle this kind of conditional expressions.
1133 For inequalities like "if (i != 3 * k)" we need unions of
1134 polyhedrons. Expressions like "if (a)" or "if (a == 15)" need
1135 them for the else branch. */
1136 if (!(code == LT_EXPR
1139 || code == GE_EXPR))
1145 FOR_EACH_SSA_TREE_OPERAND (op, stmt, op_iter, SSA_OP_ALL_USES)
1146 if (!loop_affine_expr (scop_entry, loop, op))
1154 enum tree_code code = gimple_assign_rhs_code (stmt);
1156 switch (get_gimple_rhs_class (code))
1158 case GIMPLE_UNARY_RHS:
1159 case GIMPLE_SINGLE_RHS:
1160 return (is_simple_operand (loop, stmt, gimple_assign_lhs (stmt))
1161 && is_simple_operand (loop, stmt, gimple_assign_rhs1 (stmt)));
1163 case GIMPLE_BINARY_RHS:
1164 return (is_simple_operand (loop, stmt, gimple_assign_lhs (stmt))
1165 && is_simple_operand (loop, stmt, gimple_assign_rhs1 (stmt))
1166 && is_simple_operand (loop, stmt, gimple_assign_rhs2 (stmt)));
1168 case GIMPLE_INVALID_RHS:
1177 size_t n = gimple_call_num_args (stmt);
1178 tree lhs = gimple_call_lhs (stmt);
1180 if (lhs && !is_simple_operand (loop, stmt, lhs))
1183 for (i = 0; i < n; i++)
1184 if (!is_simple_operand (loop, stmt, gimple_call_arg (stmt, i)))
1191 /* These nodes cut a new scope. */
1198 /* Returns the statement of BB that contains a harmful operation: that
1199 can be a function call with side effects, the induction variables
1200 are not linear with respect to SCOP_ENTRY, etc. The current open
1201 scop should end before this statement. */
1204 harmful_stmt_in_bb (basic_block scop_entry, basic_block bb)
1206 gimple_stmt_iterator gsi;
1209 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1210 if (!stmt_simple_for_scop_p (scop_entry, gsi_stmt (gsi)))
1211 return gsi_stmt (gsi);
1213 stmt = last_stmt (bb);
1214 if (stmt && gimple_code (stmt) == GIMPLE_COND)
1216 tree lhs = gimple_cond_lhs (stmt);
1217 tree rhs = gimple_cond_rhs (stmt);
1219 if (TREE_CODE (TREE_TYPE (lhs)) == REAL_TYPE
1220 || TREE_CODE (TREE_TYPE (rhs)) == REAL_TYPE)
1227 /* Returns true when BB will be represented in graphite. Return false
1228 for the basic blocks that contain code eliminated in the code
1229 generation pass: i.e. induction variables and exit conditions. */
1232 graphite_stmt_p (scop_p scop, basic_block bb,
1233 VEC (data_reference_p, heap) *drs)
1235 gimple_stmt_iterator gsi;
1236 loop_p loop = bb->loop_father;
1238 if (VEC_length (data_reference_p, drs) > 0)
1241 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1243 gimple stmt = gsi_stmt (gsi);
1245 switch (gimple_code (stmt))
1247 /* Control flow expressions can be ignored, as they are
1248 represented in the iteration domains and will be
1249 regenerated by graphite. */
1257 tree var = gimple_assign_lhs (stmt);
1258 var = analyze_scalar_evolution (loop, var);
1259 var = instantiate_scev (block_before_scop (scop), loop, var);
1261 if (chrec_contains_undetermined (var))
1275 /* Store the GRAPHITE representation of BB. */
1278 new_graphite_bb (scop_p scop, basic_block bb)
1280 struct graphite_bb *gbb;
1281 VEC (data_reference_p, heap) *drs = VEC_alloc (data_reference_p, heap, 5);
1282 struct loop *nest = outermost_loop_in_scop (scop, bb);
1283 gimple_stmt_iterator gsi;
1285 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1286 find_data_references_in_stmt (nest, gsi_stmt (gsi), &drs);
1288 if (!graphite_stmt_p (scop, bb, drs))
1290 free_data_refs (drs);
1294 gbb = XNEW (struct graphite_bb);
1297 GBB_SCOP (gbb) = scop;
1298 GBB_DATA_REFS (gbb) = drs;
1299 GBB_DOMAIN (gbb) = NULL;
1300 GBB_CONDITIONS (gbb) = NULL;
1301 GBB_CONDITION_CASES (gbb) = NULL;
1302 GBB_LOOPS (gbb) = NULL;
1303 GBB_STATIC_SCHEDULE (gbb) = NULL;
1304 GBB_CLOOG_IV_TYPES (gbb) = NULL;
1305 VEC_safe_push (graphite_bb_p, heap, SCOP_BBS (scop), gbb);
1311 free_graphite_bb (struct graphite_bb *gbb)
1313 if (GBB_DOMAIN (gbb))
1314 cloog_matrix_free (GBB_DOMAIN (gbb));
1316 if (GBB_CLOOG_IV_TYPES (gbb))
1317 htab_delete (GBB_CLOOG_IV_TYPES (gbb));
1319 /* FIXME: free_data_refs is disabled for the moment, but should be
1322 free_data_refs (GBB_DATA_REFS (gbb)); */
1324 VEC_free (gimple, heap, GBB_CONDITIONS (gbb));
1325 VEC_free (gimple, heap, GBB_CONDITION_CASES (gbb));
1326 VEC_free (loop_p, heap, GBB_LOOPS (gbb));
1327 GBB_BB (gbb)->aux = 0;
1333 /* Structure containing the mapping between the old names and the new
1334 names used after block copy in the new loop context. */
1335 typedef struct rename_map_elt
1337 tree old_name, new_name;
1341 /* Print to stderr the element ELT. */
1344 debug_rename_elt (rename_map_elt elt)
1346 fprintf (stderr, "(");
1347 print_generic_expr (stderr, elt->old_name, 0);
1348 fprintf (stderr, ", ");
1349 print_generic_expr (stderr, elt->new_name, 0);
1350 fprintf (stderr, ")\n");
1353 /* Helper function for debug_rename_map. */
1356 debug_rename_map_1 (void **slot, void *s ATTRIBUTE_UNUSED)
1358 struct rename_map_elt *entry = (struct rename_map_elt *) *slot;
1359 debug_rename_elt (entry);
1363 /* Print to stderr all the elements of MAP. */
1366 debug_rename_map (htab_t map)
1368 htab_traverse (map, debug_rename_map_1, NULL);
1371 /* Constructs a new SCEV_INFO_STR structure for VAR and INSTANTIATED_BELOW. */
1373 static inline rename_map_elt
1374 new_rename_map_elt (tree old_name, tree new_name)
1378 res = XNEW (struct rename_map_elt);
1379 res->old_name = old_name;
1380 res->new_name = new_name;
1385 /* Computes a hash function for database element ELT. */
1388 rename_map_elt_info (const void *elt)
1390 return htab_hash_pointer (((const struct rename_map_elt *) elt)->old_name);
1393 /* Compares database elements E1 and E2. */
1396 eq_rename_map_elts (const void *e1, const void *e2)
1398 const struct rename_map_elt *elt1 = (const struct rename_map_elt *) e1;
1399 const struct rename_map_elt *elt2 = (const struct rename_map_elt *) e2;
1401 return (elt1->old_name == elt2->old_name);
1404 /* Returns the new name associated to OLD_NAME in MAP. */
1407 get_new_name_from_old_name (htab_t map, tree old_name)
1409 struct rename_map_elt tmp;
1412 tmp.old_name = old_name;
1413 slot = htab_find_slot (map, &tmp, NO_INSERT);
1416 return ((rename_map_elt) *slot)->new_name;
1423 /* Creates a new scop starting with ENTRY. */
1426 new_scop (edge entry, edge exit)
1428 scop_p scop = XNEW (struct scop);
1430 gcc_assert (entry && exit);
1432 SCOP_REGION (scop) = new_sese (entry, exit);
1433 SCOP_BBS (scop) = VEC_alloc (graphite_bb_p, heap, 3);
1434 SCOP_OLDIVS (scop) = VEC_alloc (name_tree, heap, 3);
1435 SCOP_LOOPS (scop) = BITMAP_ALLOC (NULL);
1436 SCOP_LOOP_NEST (scop) = VEC_alloc (loop_p, heap, 3);
1437 SCOP_ADD_PARAMS (scop) = true;
1438 SCOP_PARAMS (scop) = VEC_alloc (name_tree, heap, 3);
1439 SCOP_PROG (scop) = cloog_program_malloc ();
1440 cloog_program_set_names (SCOP_PROG (scop), cloog_names_malloc ());
1441 SCOP_LOOP2CLOOG_LOOP (scop) = htab_create (10, hash_loop_to_cloog_loop,
1442 eq_loop_to_cloog_loop,
1444 SCOP_LIVEOUT_RENAMES (scop) = htab_create (10, rename_map_elt_info,
1445 eq_rename_map_elts, free);
1452 free_scop (scop_p scop)
1456 struct graphite_bb *gb;
1459 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
1460 free_graphite_bb (gb);
1462 VEC_free (graphite_bb_p, heap, SCOP_BBS (scop));
1463 BITMAP_FREE (SCOP_LOOPS (scop));
1464 VEC_free (loop_p, heap, SCOP_LOOP_NEST (scop));
1466 for (i = 0; VEC_iterate (name_tree, SCOP_OLDIVS (scop), i, iv); i++)
1468 VEC_free (name_tree, heap, SCOP_OLDIVS (scop));
1470 for (i = 0; VEC_iterate (name_tree, SCOP_PARAMS (scop), i, p); i++)
1473 VEC_free (name_tree, heap, SCOP_PARAMS (scop));
1474 cloog_program_free (SCOP_PROG (scop));
1475 htab_delete (SCOP_LOOP2CLOOG_LOOP (scop));
1476 htab_delete (SCOP_LIVEOUT_RENAMES (scop));
1477 free_sese (SCOP_REGION (scop));
1481 /* Deletes all scops in SCOPS. */
1484 free_scops (VEC (scop_p, heap) *scops)
1489 for (i = 0; VEC_iterate (scop_p, scops, i, scop); i++)
1492 VEC_free (scop_p, heap, scops);
1495 typedef enum gbb_type {
1497 GBB_LOOP_SING_EXIT_HEADER,
1498 GBB_LOOP_MULT_EXIT_HEADER,
1505 /* Detect the type of BB. Loop headers are only marked, if they are
1506 new. This means their loop_father is different to LAST_LOOP.
1507 Otherwise they are treated like any other bb and their type can be
1511 get_bb_type (basic_block bb, struct loop *last_loop)
1513 VEC (basic_block, heap) *dom;
1515 struct loop *loop = bb->loop_father;
1517 /* Check, if we entry into a new loop. */
1518 if (loop != last_loop)
1520 if (single_exit (loop) != NULL)
1521 return GBB_LOOP_SING_EXIT_HEADER;
1522 else if (loop->num != 0)
1523 return GBB_LOOP_MULT_EXIT_HEADER;
1525 return GBB_COND_HEADER;
1528 dom = get_dominated_by (CDI_DOMINATORS, bb);
1529 nb_dom = VEC_length (basic_block, dom);
1530 VEC_free (basic_block, heap, dom);
1535 nb_suc = VEC_length (edge, bb->succs);
1537 if (nb_dom == 1 && nb_suc == 1)
1540 return GBB_COND_HEADER;
1543 /* A SCoP detection region, defined using bbs as borders.
1544 All control flow touching this region, comes in passing basic_block ENTRY and
1545 leaves passing basic_block EXIT. By using bbs instead of edges for the
1546 borders we are able to represent also regions that do not have a single
1548 But as they have a single entry basic_block and a single exit basic_block, we
1549 are able to generate for every sd_region a single entry and exit edge.
1556 / \ This region contains: {3, 4, 5, 6, 7, 8}
1564 typedef struct sd_region_p
1566 /* The entry bb dominates all bbs in the sd_region. It is part of the
1570 /* The exit bb postdominates all bbs in the sd_region, but is not
1571 part of the region. */
1575 DEF_VEC_O(sd_region);
1576 DEF_VEC_ALLOC_O(sd_region, heap);
1579 /* Moves the scops from SOURCE to TARGET and clean up SOURCE. */
1582 move_sd_regions (VEC (sd_region, heap) **source, VEC (sd_region, heap) **target)
1587 for (i = 0; VEC_iterate (sd_region, *source, i, s); i++)
1588 VEC_safe_push (sd_region, heap, *target, s);
1590 VEC_free (sd_region, heap, *source);
1593 /* Return true when it is not possible to represent the upper bound of
1594 LOOP in the polyhedral representation. */
1597 graphite_cannot_represent_loop_niter (loop_p loop)
1599 tree niter = number_of_latch_executions (loop);
1601 return chrec_contains_undetermined (niter)
1602 || !scev_is_linear_expression (niter);
1604 /* Store information needed by scopdet_* functions. */
1608 /* Where the last open scop would stop if the current BB is harmful. */
1611 /* Where the next scop would start if the current BB is harmful. */
1614 /* The bb or one of its children contains open loop exits. That means
1615 loop exit nodes that are not surrounded by a loop dominated by bb. */
1618 /* The bb or one of its children contains only structures we can handle. */
1623 static struct scopdet_info build_scops_1 (basic_block, VEC (sd_region, heap) **,
1626 /* Calculates BB infos. If bb is difficult we add valid SCoPs dominated by BB
1627 to SCOPS. TYPE is the gbb_type of BB. */
1629 static struct scopdet_info
1630 scopdet_basic_block_info (basic_block bb, VEC (sd_region, heap) **scops,
1633 struct loop *loop = bb->loop_father;
1634 struct scopdet_info result;
1637 /* XXX: ENTRY_BLOCK_PTR could be optimized in later steps. */
1638 stmt = harmful_stmt_in_bb (ENTRY_BLOCK_PTR, bb);
1639 result.difficult = (stmt != NULL);
1646 result.exits = false;
1649 /* Mark bbs terminating a SESE region difficult, if they start
1651 if (VEC_length (edge, bb->succs) > 1)
1652 result.difficult = true;
1657 result.next = single_succ (bb);
1658 result.exits = false;
1662 case GBB_LOOP_SING_EXIT_HEADER:
1664 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap,3);
1665 struct scopdet_info sinfo;
1667 sinfo = build_scops_1 (bb, &tmp_scops, loop);
1669 result.last = single_exit (bb->loop_father)->src;
1670 result.next = single_exit (bb->loop_father)->dest;
1672 /* If we do not dominate result.next, remove it. It's either
1673 the EXIT_BLOCK_PTR, or another bb dominates it and will
1674 call the scop detection for this bb. */
1675 if (!dominated_by_p (CDI_DOMINATORS, result.next, bb))
1678 if (result.last->loop_father != loop)
1681 if (graphite_cannot_represent_loop_niter (loop))
1682 result.difficult = true;
1684 if (sinfo.difficult)
1685 move_sd_regions (&tmp_scops, scops);
1687 VEC_free (sd_region, heap, tmp_scops);
1689 result.exits = false;
1690 result.difficult |= sinfo.difficult;
1694 case GBB_LOOP_MULT_EXIT_HEADER:
1696 /* XXX: For now we just do not join loops with multiple exits. If the
1697 exits lead to the same bb it may be possible to join the loop. */
1698 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap, 3);
1699 VEC (edge, heap) *exits = get_loop_exit_edges (loop);
1702 build_scops_1 (bb, &tmp_scops, loop);
1704 /* Scan the code dominated by this loop. This means all bbs, that are
1705 are dominated by a bb in this loop, but are not part of this loop.
1708 - The loop exit destination is dominated by the exit sources.
1710 TODO: We miss here the more complex cases:
1711 - The exit destinations are dominated by another bb inside the
1713 - The loop dominates bbs, that are not exit destinations. */
1714 for (i = 0; VEC_iterate (edge, exits, i, e); i++)
1715 if (e->src->loop_father == loop
1716 && dominated_by_p (CDI_DOMINATORS, e->dest, e->src))
1718 /* Pass loop_outer to recognize e->dest as loop header in
1720 if (e->dest->loop_father->header == e->dest)
1721 build_scops_1 (e->dest, &tmp_scops,
1722 loop_outer (e->dest->loop_father));
1724 build_scops_1 (e->dest, &tmp_scops, e->dest->loop_father);
1729 result.difficult = true;
1730 result.exits = false;
1731 move_sd_regions (&tmp_scops, scops);
1732 VEC_free (edge, heap, exits);
1735 case GBB_COND_HEADER:
1737 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap, 3);
1738 struct scopdet_info sinfo;
1739 VEC (basic_block, heap) *dominated;
1742 basic_block last_bb = NULL;
1744 result.exits = false;
1746 /* First check the successors of BB, and check if it is possible to join
1747 the different branches. */
1748 for (i = 0; VEC_iterate (edge, bb->succs, i, e); i++)
1750 /* Ignore loop exits. They will be handled after the loop body. */
1751 if (is_loop_exit (loop, e->dest))
1753 result.exits = true;
1757 /* Do not follow edges that lead to the end of the
1758 conditions block. For example, in
1768 the edge from 0 => 6. Only check if all paths lead to
1771 if (!single_pred_p (e->dest))
1773 /* Check, if edge leads directly to the end of this
1780 if (e->dest != last_bb)
1781 result.difficult = true;
1786 if (!dominated_by_p (CDI_DOMINATORS, e->dest, bb))
1788 result.difficult = true;
1792 sinfo = build_scops_1 (e->dest, &tmp_scops, loop);
1794 result.exits |= sinfo.exits;
1795 result.last = sinfo.last;
1796 result.difficult |= sinfo.difficult;
1798 /* Checks, if all branches end at the same point.
1799 If that is true, the condition stays joinable.
1800 Have a look at the example above. */
1801 if (sinfo.last && single_succ_p (sinfo.last))
1803 basic_block next_tmp = single_succ (sinfo.last);
1808 if (next_tmp != last_bb)
1809 result.difficult = true;
1812 result.difficult = true;
1815 /* If the condition is joinable. */
1816 if (!result.exits && !result.difficult)
1818 /* Only return a next pointer if we dominate this pointer.
1819 Otherwise it will be handled by the bb dominating it. */
1820 if (dominated_by_p (CDI_DOMINATORS, last_bb, bb) && last_bb != bb)
1821 result.next = last_bb;
1825 VEC_free (sd_region, heap, tmp_scops);
1829 /* Scan remaining bbs dominated by BB. */
1830 dominated = get_dominated_by (CDI_DOMINATORS, bb);
1832 for (i = 0; VEC_iterate (basic_block, dominated, i, dom_bb); i++)
1834 /* Ignore loop exits: they will be handled after the loop body. */
1835 if (loop_depth (find_common_loop (loop, dom_bb->loop_father))
1836 < loop_depth (loop))
1838 result.exits = true;
1842 /* Ignore the bbs processed above. */
1843 if (single_pred_p (dom_bb) && single_pred (dom_bb) == bb)
1846 if (loop_depth (loop) > loop_depth (dom_bb->loop_father))
1847 sinfo = build_scops_1 (dom_bb, &tmp_scops, loop_outer (loop));
1849 sinfo = build_scops_1 (dom_bb, &tmp_scops, loop);
1852 result.exits |= sinfo.exits;
1853 result.difficult = true;
1857 VEC_free (basic_block, heap, dominated);
1860 move_sd_regions (&tmp_scops, scops);
1872 /* Creates the SCoPs and writes entry and exit points for every SCoP. */
1874 static struct scopdet_info
1875 build_scops_1 (basic_block current, VEC (sd_region, heap) **scops, loop_p loop)
1877 bool in_scop = false;
1878 sd_region open_scop;
1879 struct scopdet_info sinfo;
1881 /* Initialize result. */
1882 struct scopdet_info result;
1883 result.exits = false;
1884 result.difficult = false;
1887 open_scop.entry = NULL;
1888 open_scop.exit = NULL;
1891 /* Loop over the dominance tree. If we meet a difficult bb, close
1892 the current SCoP. Loop and condition header start a new layer,
1893 and can only be added if all bbs in deeper layers are simple. */
1894 while (current != NULL)
1896 sinfo = scopdet_basic_block_info (current, scops, get_bb_type (current,
1899 if (!in_scop && !(sinfo.exits || sinfo.difficult))
1901 open_scop.entry = current;
1902 open_scop.exit = NULL;
1905 else if (in_scop && (sinfo.exits || sinfo.difficult))
1907 open_scop.exit = current;
1908 VEC_safe_push (sd_region, heap, *scops, &open_scop);
1912 result.difficult |= sinfo.difficult;
1913 result.exits |= sinfo.exits;
1915 current = sinfo.next;
1918 /* Try to close open_scop, if we are still in an open SCoP. */
1924 for (i = 0; VEC_iterate (edge, sinfo.last->succs, i, e); i++)
1925 if (dominated_by_p (CDI_POST_DOMINATORS, sinfo.last, e->dest))
1926 open_scop.exit = e->dest;
1928 if (!open_scop.exit && open_scop.entry != sinfo.last)
1929 open_scop.exit = sinfo.last;
1932 VEC_safe_push (sd_region, heap, *scops, &open_scop);
1936 result.last = sinfo.last;
1940 /* Checks if a bb is contained in REGION. */
1943 bb_in_sd_region (basic_block bb, sd_region *region)
1945 return dominated_by_p (CDI_DOMINATORS, bb, region->entry)
1946 && !(dominated_by_p (CDI_DOMINATORS, bb, region->exit)
1947 && !dominated_by_p (CDI_DOMINATORS, region->entry,
1951 /* Returns the single entry edge of REGION, if it does not exits NULL. */
1954 find_single_entry_edge (sd_region *region)
1960 FOR_EACH_EDGE (e, ei, region->entry->preds)
1961 if (!bb_in_sd_region (e->src, region))
1976 /* Returns the single exit edge of REGION, if it does not exits NULL. */
1979 find_single_exit_edge (sd_region *region)
1985 FOR_EACH_EDGE (e, ei, region->exit->preds)
1986 if (bb_in_sd_region (e->src, region))
2001 /* Create a single entry edge for REGION. */
2004 create_single_entry_edge (sd_region *region)
2006 if (find_single_entry_edge (region))
2009 /* There are multiple predecessors for bb_3
2022 There are two edges (1->3, 2->3), that point from outside into the region,
2023 and another one (5->3), a loop latch, lead to bb_3.
2031 | |\ (3.0 -> 3.1) = single entry edge
2040 If the loop is part of the SCoP, we have to redirect the loop latches.
2046 | | (3.0 -> 3.1) = entry edge
2055 if (region->entry->loop_father->header != region->entry
2056 || dominated_by_p (CDI_DOMINATORS,
2057 loop_latch_edge (region->entry->loop_father)->src,
2060 edge forwarder = split_block_after_labels (region->entry);
2061 region->entry = forwarder->dest;
2064 /* This case is never executed, as the loop headers seem always to have a
2065 single edge pointing from outside into the loop. */
2068 #ifdef ENABLE_CHECKING
2069 gcc_assert (find_single_entry_edge (region));
2073 /* Check if the sd_region, mentioned in EDGE, has no exit bb. */
2076 sd_region_without_exit (edge e)
2078 sd_region *r = (sd_region *) e->aux;
2081 return r->exit == NULL;
2086 /* Create a single exit edge for REGION. */
2089 create_single_exit_edge (sd_region *region)
2093 edge forwarder = NULL;
2096 if (find_single_exit_edge (region))
2099 /* We create a forwarder bb (5) for all edges leaving this region
2100 (3->5, 4->5). All other edges leading to the same bb, are moved
2101 to a new bb (6). If these edges where part of another region (2->5)
2102 we update the region->exit pointer, of this region.
2104 To identify which edge belongs to which region we depend on the e->aux
2105 pointer in every edge. It points to the region of the edge or to NULL,
2106 if the edge is not part of any region.
2108 1 2 3 4 1->5 no region, 2->5 region->exit = 5,
2109 \| |/ 3->5 region->exit = NULL, 4->5 region->exit = NULL
2114 1 2 3 4 1->6 no region, 2->6 region->exit = 6,
2115 | | \/ 3->5 no region, 4->5 no region,
2117 \| / 5->6 region->exit = 6
2120 Now there is only a single exit edge (5->6). */
2121 exit = region->exit;
2122 region->exit = NULL;
2123 forwarder = make_forwarder_block (exit, &sd_region_without_exit, NULL);
2125 /* Unmark the edges, that are no longer exit edges. */
2126 FOR_EACH_EDGE (e, ei, forwarder->src->preds)
2130 /* Mark the new exit edge. */
2131 single_succ_edge (forwarder->src)->aux = region;
2133 /* Update the exit bb of all regions, where exit edges lead to
2135 FOR_EACH_EDGE (e, ei, forwarder->dest->preds)
2137 ((sd_region *) e->aux)->exit = forwarder->dest;
2139 #ifdef ENABLE_CHECKING
2140 gcc_assert (find_single_exit_edge (region));
2144 /* Unmark the exit edges of all REGIONS.
2145 See comment in "create_single_exit_edge". */
2148 unmark_exit_edges (VEC (sd_region, heap) *regions)
2155 for (i = 0; VEC_iterate (sd_region, regions, i, s); i++)
2156 FOR_EACH_EDGE (e, ei, s->exit->preds)
2161 /* Mark the exit edges of all REGIONS.
2162 See comment in "create_single_exit_edge". */
2165 mark_exit_edges (VEC (sd_region, heap) *regions)
2172 for (i = 0; VEC_iterate (sd_region, regions, i, s); i++)
2173 FOR_EACH_EDGE (e, ei, s->exit->preds)
2174 if (bb_in_sd_region (e->src, s))
2178 /* Free and compute again all the dominators information. */
2181 recompute_all_dominators (void)
2183 mark_irreducible_loops ();
2184 free_dominance_info (CDI_DOMINATORS);
2185 free_dominance_info (CDI_POST_DOMINATORS);
2186 calculate_dominance_info (CDI_DOMINATORS);
2187 calculate_dominance_info (CDI_POST_DOMINATORS);
2190 /* Verifies properties that GRAPHITE should maintain during translation. */
2193 graphite_verify (void)
2195 #ifdef ENABLE_CHECKING
2196 verify_loop_structure ();
2197 verify_dominators (CDI_DOMINATORS);
2198 verify_dominators (CDI_POST_DOMINATORS);
2200 verify_loop_closed_ssa ();
2204 /* Create for all scop regions a single entry and a single exit edge. */
2207 create_sese_edges (VEC (sd_region, heap) *regions)
2212 for (i = 0; VEC_iterate (sd_region, regions, i, s); i++)
2213 create_single_entry_edge (s);
2215 mark_exit_edges (regions);
2217 for (i = 0; VEC_iterate (sd_region, regions, i, s); i++)
2218 create_single_exit_edge (s);
2220 unmark_exit_edges (regions);
2222 fix_loop_structure (NULL);
2224 #ifdef ENABLE_CHECKING
2225 verify_loop_structure ();
2226 verify_dominators (CDI_DOMINATORS);
2231 /* Create graphite SCoPs from an array of scop detection regions. */
2234 build_graphite_scops (VEC (sd_region, heap) *scop_regions)
2239 for (i = 0; VEC_iterate (sd_region, scop_regions, i, s); i++)
2241 edge entry = find_single_entry_edge (s);
2242 edge exit = find_single_exit_edge (s);
2243 scop_p scop = new_scop (entry, exit);
2244 VEC_safe_push (scop_p, heap, current_scops, scop);
2246 /* Are there overlapping SCoPs? */
2247 #ifdef ENABLE_CHECKING
2252 for (j = 0; VEC_iterate (sd_region, scop_regions, j, s2); j++)
2254 gcc_assert (!bb_in_sd_region (s->entry, s2));
2260 /* Find static control parts. */
2265 struct loop *loop = current_loops->tree_root;
2266 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap, 3);
2268 build_scops_1 (single_succ (ENTRY_BLOCK_PTR), &tmp_scops, loop);
2269 create_sese_edges (tmp_scops);
2270 build_graphite_scops (tmp_scops);
2271 VEC_free (sd_region, heap, tmp_scops);
2274 /* Gather the basic blocks belonging to the SCOP. */
2277 build_scop_bbs (scop_p scop)
2279 basic_block *stack = XNEWVEC (basic_block, n_basic_blocks + 1);
2280 sbitmap visited = sbitmap_alloc (last_basic_block);
2283 sbitmap_zero (visited);
2284 stack[sp++] = SCOP_ENTRY (scop);
2288 basic_block bb = stack[--sp];
2289 int depth = loop_depth (bb->loop_father);
2290 int num = bb->loop_father->num;
2294 /* Scop's exit is not in the scop. Exclude also bbs, which are
2295 dominated by the SCoP exit. These are e.g. loop latches. */
2296 if (TEST_BIT (visited, bb->index)
2297 || dominated_by_p (CDI_DOMINATORS, bb, SCOP_EXIT (scop))
2298 /* Every block in the scop is dominated by scop's entry. */
2299 || !dominated_by_p (CDI_DOMINATORS, bb, SCOP_ENTRY (scop)))
2302 new_graphite_bb (scop, bb);
2303 SET_BIT (visited, bb->index);
2305 /* First push the blocks that have to be processed last. Note
2306 that this means that the order in which the code is organized
2307 below is important: do not reorder the following code. */
2308 FOR_EACH_EDGE (e, ei, bb->succs)
2309 if (! TEST_BIT (visited, e->dest->index)
2310 && (int) loop_depth (e->dest->loop_father) < depth)
2311 stack[sp++] = e->dest;
2313 FOR_EACH_EDGE (e, ei, bb->succs)
2314 if (! TEST_BIT (visited, e->dest->index)
2315 && (int) loop_depth (e->dest->loop_father) == depth
2316 && e->dest->loop_father->num != num)
2317 stack[sp++] = e->dest;
2319 FOR_EACH_EDGE (e, ei, bb->succs)
2320 if (! TEST_BIT (visited, e->dest->index)
2321 && (int) loop_depth (e->dest->loop_father) == depth
2322 && e->dest->loop_father->num == num
2323 && EDGE_COUNT (e->dest->preds) > 1)
2324 stack[sp++] = e->dest;
2326 FOR_EACH_EDGE (e, ei, bb->succs)
2327 if (! TEST_BIT (visited, e->dest->index)
2328 && (int) loop_depth (e->dest->loop_father) == depth
2329 && e->dest->loop_father->num == num
2330 && EDGE_COUNT (e->dest->preds) == 1)
2331 stack[sp++] = e->dest;
2333 FOR_EACH_EDGE (e, ei, bb->succs)
2334 if (! TEST_BIT (visited, e->dest->index)
2335 && (int) loop_depth (e->dest->loop_father) > depth)
2336 stack[sp++] = e->dest;
2340 sbitmap_free (visited);
2343 /* Returns the number of reduction phi nodes in LOOP. */
2346 nb_reductions_in_loop (loop_p loop)
2349 gimple_stmt_iterator gsi;
2351 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
2353 gimple phi = gsi_stmt (gsi);
2357 if (!is_gimple_reg (PHI_RESULT (phi)))
2360 scev = analyze_scalar_evolution (loop, PHI_RESULT (phi));
2361 scev = instantiate_parameters (loop, scev);
2362 if (!simple_iv (loop, loop, PHI_RESULT (phi), &iv, true))
2369 /* A LOOP is in normal form when it contains only one scalar phi node
2370 that defines the main induction variable of the loop, only one
2371 increment of the IV, and only one exit condition. */
2374 graphite_loop_normal_form (loop_p loop)
2376 struct tree_niter_desc niter;
2379 edge exit = single_dom_exit (loop);
2380 bool known_niter = number_of_iterations_exit (loop, exit, &niter, false);
2382 gcc_assert (known_niter);
2384 nit = force_gimple_operand (unshare_expr (niter.niter), &stmts, true,
2387 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
2389 /* One IV per loop. */
2390 if (nb_reductions_in_loop (loop) > 0)
2393 return canonicalize_loop_ivs (loop, NULL, &nit);
2396 /* Record LOOP as occuring in SCOP. Returns true when the operation
2400 scop_record_loop (scop_p scop, loop_p loop)
2405 if (bitmap_bit_p (SCOP_LOOPS (scop), loop->num))
2408 bitmap_set_bit (SCOP_LOOPS (scop), loop->num);
2409 VEC_safe_push (loop_p, heap, SCOP_LOOP_NEST (scop), loop);
2411 induction_var = graphite_loop_normal_form (loop);
2415 oldiv = XNEW (struct name_tree);
2416 oldiv->t = induction_var;
2417 oldiv->name = get_name (SSA_NAME_VAR (oldiv->t));
2419 VEC_safe_push (name_tree, heap, SCOP_OLDIVS (scop), oldiv);
2423 /* Build the loop nests contained in SCOP. Returns true when the
2424 operation was successful. */
2427 build_scop_loop_nests (scop_p scop)
2431 struct loop *loop0, *loop1;
2434 if (bb_in_sese_p (bb, SCOP_REGION (scop)))
2436 struct loop *loop = bb->loop_father;
2438 /* Only add loops if they are completely contained in the SCoP. */
2439 if (loop->header == bb
2440 && bb_in_sese_p (loop->latch, SCOP_REGION (scop)))
2442 if (!scop_record_loop (scop, loop))
2447 /* Make sure that the loops in the SCOP_LOOP_NEST are ordered. It
2448 can be the case that an inner loop is inserted before an outer
2449 loop. To avoid this, semi-sort once. */
2450 for (i = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), i, loop0); i++)
2452 if (VEC_length (loop_p, SCOP_LOOP_NEST (scop)) == i + 1)
2455 loop1 = VEC_index (loop_p, SCOP_LOOP_NEST (scop), i + 1);
2456 if (loop0->num > loop1->num)
2458 VEC_replace (loop_p, SCOP_LOOP_NEST (scop), i, loop1);
2459 VEC_replace (loop_p, SCOP_LOOP_NEST (scop), i + 1, loop0);
2466 /* Calculate the number of loops around LOOP in the SCOP. */
2469 nb_loops_around_loop_in_scop (struct loop *l, scop_p scop)
2473 for (; loop_in_sese_p (l, SCOP_REGION (scop)); d++, l = loop_outer (l));
2478 /* Calculate the number of loops around GB in the current SCOP. */
2481 nb_loops_around_gb (graphite_bb_p gb)
2483 return nb_loops_around_loop_in_scop (gbb_loop (gb), GBB_SCOP (gb));
2486 /* Returns the dimensionality of an enclosing loop iteration domain
2487 with respect to enclosing SCoP for a given data reference REF. The
2488 returned dimensionality is homogeneous (depth of loop nest + number
2489 of SCoP parameters + const). */
2492 ref_nb_loops (data_reference_p ref)
2494 loop_p loop = loop_containing_stmt (DR_STMT (ref));
2495 scop_p scop = DR_SCOP (ref);
2497 return nb_loops_around_loop_in_scop (loop, scop) + scop_nb_params (scop) + 2;
2500 /* Build dynamic schedules for all the BBs. */
2503 build_scop_dynamic_schedules (scop_p scop)
2505 int i, dim, loop_num, row, col;
2508 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
2510 loop_num = GBB_BB (gb)->loop_father->num;
2514 dim = nb_loops_around_gb (gb);
2515 GBB_DYNAMIC_SCHEDULE (gb) = cloog_matrix_alloc (dim, dim);
2517 for (row = 0; row < GBB_DYNAMIC_SCHEDULE (gb)->NbRows; row++)
2518 for (col = 0; col < GBB_DYNAMIC_SCHEDULE (gb)->NbColumns; col++)
2520 value_set_si (GBB_DYNAMIC_SCHEDULE (gb)->p[row][col], 1);
2522 value_set_si (GBB_DYNAMIC_SCHEDULE (gb)->p[row][col], 0);
2525 GBB_DYNAMIC_SCHEDULE (gb) = NULL;
2529 /* Returns the number of loops that are identical at the beginning of
2530 the vectors A and B. */
2533 compare_prefix_loops (VEC (loop_p, heap) *a, VEC (loop_p, heap) *b)
2542 lb = VEC_length (loop_p, b);
2544 for (i = 0; VEC_iterate (loop_p, a, i, ea); i++)
2546 || ea != VEC_index (loop_p, b, i))
2552 /* Build for BB the static schedule.
2554 The STATIC_SCHEDULE is defined like this:
2573 Static schedules for A to F:
2586 build_scop_canonical_schedules (scop_p scop)
2590 int nb_loops = scop_nb_loops (scop);
2591 lambda_vector static_schedule = lambda_vector_new (nb_loops + 1);
2592 VEC (loop_p, heap) *loops_previous = NULL;
2594 /* We have to start schedules at 0 on the first component and
2595 because we cannot compare_prefix_loops against a previous loop,
2596 prefix will be equal to zero, and that index will be
2597 incremented before copying. */
2598 static_schedule[0] = -1;
2600 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
2602 int prefix = compare_prefix_loops (loops_previous, GBB_LOOPS (gb));
2603 int nb = gbb_nb_loops (gb);
2605 loops_previous = GBB_LOOPS (gb);
2606 memset (&(static_schedule[prefix + 1]), 0, sizeof (int) * (nb_loops - prefix));
2607 ++static_schedule[prefix];
2608 GBB_STATIC_SCHEDULE (gb) = lambda_vector_new (nb + 1);
2609 lambda_vector_copy (static_schedule,
2610 GBB_STATIC_SCHEDULE (gb), nb + 1);
2614 /* Build the LOOPS vector for all bbs in SCOP. */
2617 build_bb_loops (scop_p scop)
2622 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
2627 depth = nb_loops_around_gb (gb) - 1;
2629 GBB_LOOPS (gb) = VEC_alloc (loop_p, heap, 3);
2630 VEC_safe_grow_cleared (loop_p, heap, GBB_LOOPS (gb), depth + 1);
2632 loop = GBB_BB (gb)->loop_father;
2634 while (scop_contains_loop (scop, loop))
2636 VEC_replace (loop_p, GBB_LOOPS (gb), depth, loop);
2637 loop = loop_outer (loop);
2643 /* Get the index for parameter VAR in SCOP. */
2646 param_index (tree var, scop_p scop)
2652 gcc_assert (TREE_CODE (var) == SSA_NAME);
2654 for (i = 0; VEC_iterate (name_tree, SCOP_PARAMS (scop), i, p); i++)
2658 gcc_assert (SCOP_ADD_PARAMS (scop));
2660 nvar = XNEW (struct name_tree);
2663 VEC_safe_push (name_tree, heap, SCOP_PARAMS (scop), nvar);
2664 return VEC_length (name_tree, SCOP_PARAMS (scop)) - 1;
2667 /* Scan EXPR and translate it to an inequality vector INEQ that will
2668 be added, or subtracted, in the constraint domain matrix C at row
2669 R. K is the number of columns for loop iterators in C. */
2672 scan_tree_for_params (scop_p s, tree e, CloogMatrix *c, int r, Value k,
2675 int cst_col, param_col;
2677 if (e == chrec_dont_know)
2680 switch (TREE_CODE (e))
2682 case POLYNOMIAL_CHREC:
2684 tree left = CHREC_LEFT (e);
2685 tree right = CHREC_RIGHT (e);
2686 int var = CHREC_VARIABLE (e);
2688 if (TREE_CODE (right) != INTEGER_CST)
2693 int loop_col = scop_gimple_loop_depth (s, get_loop (var)) + 1;
2696 value_sub_int (c->p[r][loop_col], c->p[r][loop_col],
2697 int_cst_value (right));
2699 value_add_int (c->p[r][loop_col], c->p[r][loop_col],
2700 int_cst_value (right));
2703 switch (TREE_CODE (left))
2705 case POLYNOMIAL_CHREC:
2706 scan_tree_for_params (s, left, c, r, k, subtract);
2710 /* Constant part. */
2713 int v = int_cst_value (left);
2714 cst_col = c->NbColumns - 1;
2719 subtract = subtract ? false : true;
2723 value_sub_int (c->p[r][cst_col], c->p[r][cst_col], v);
2725 value_add_int (c->p[r][cst_col], c->p[r][cst_col], v);
2730 scan_tree_for_params (s, left, c, r, k, subtract);
2737 if (chrec_contains_symbols (TREE_OPERAND (e, 0)))
2742 gcc_assert (host_integerp (TREE_OPERAND (e, 1), 0));
2744 value_set_si (val, int_cst_value (TREE_OPERAND (e, 1)));
2745 value_multiply (k, k, val);
2748 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
2755 gcc_assert (host_integerp (TREE_OPERAND (e, 0), 0));
2757 value_set_si (val, int_cst_value (TREE_OPERAND (e, 0)));
2758 value_multiply (k, k, val);
2761 scan_tree_for_params (s, TREE_OPERAND (e, 1), c, r, k, subtract);
2766 case POINTER_PLUS_EXPR:
2767 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
2768 scan_tree_for_params (s, TREE_OPERAND (e, 1), c, r, k, subtract);
2772 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
2773 scan_tree_for_params (s, TREE_OPERAND (e, 1), c, r, k, !subtract);
2777 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, !subtract);
2781 param_col = param_index (e, s);
2785 param_col += c->NbColumns - scop_nb_params (s) - 1;
2788 value_subtract (c->p[r][param_col], c->p[r][param_col], k);
2790 value_addto (c->p[r][param_col], c->p[r][param_col], k);
2797 int v = int_cst_value (e);
2798 cst_col = c->NbColumns - 1;
2803 subtract = subtract ? false : true;
2807 value_sub_int (c->p[r][cst_col], c->p[r][cst_col], v);
2809 value_add_int (c->p[r][cst_col], c->p[r][cst_col], v);
2814 case NON_LVALUE_EXPR:
2815 scan_tree_for_params (s, TREE_OPERAND (e, 0), c, r, k, subtract);
2824 /* Data structure for idx_record_params. */
2832 /* For a data reference with an ARRAY_REF as its BASE, record the
2833 parameters occurring in IDX. DTA is passed in as complementary
2834 information, and is used by the automatic walker function. This
2835 function is a callback for for_each_index. */
2838 idx_record_params (tree base, tree *idx, void *dta)
2840 struct irp_data *data = (struct irp_data *) dta;
2842 if (TREE_CODE (base) != ARRAY_REF)
2845 if (TREE_CODE (*idx) == SSA_NAME)
2848 scop_p scop = data->scop;
2849 struct loop *loop = data->loop;
2852 scev = analyze_scalar_evolution (loop, *idx);
2853 scev = instantiate_scev (block_before_scop (scop), loop, scev);
2856 value_set_si (one, 1);
2857 scan_tree_for_params (scop, scev, NULL, 0, one, false);
2864 /* Find parameters with respect to SCOP in BB. We are looking in memory
2865 access functions, conditions and loop bounds. */
2868 find_params_in_bb (scop_p scop, graphite_bb_p gb)
2871 data_reference_p dr;
2873 loop_p father = GBB_BB (gb)->loop_father;
2875 for (i = 0; VEC_iterate (data_reference_p, GBB_DATA_REFS (gb), i, dr); i++)
2877 struct irp_data irp;
2881 for_each_index (&dr->ref, idx_record_params, &irp);
2884 /* Find parameters in conditional statements. */
2885 for (i = 0; VEC_iterate (gimple, GBB_CONDITIONS (gb), i, stmt); i++)
2888 loop_p loop = father;
2892 lhs = gimple_cond_lhs (stmt);
2893 lhs = analyze_scalar_evolution (loop, lhs);
2894 lhs = instantiate_scev (block_before_scop (scop), loop, lhs);
2896 rhs = gimple_cond_rhs (stmt);
2897 rhs = analyze_scalar_evolution (loop, rhs);
2898 rhs = instantiate_scev (block_before_scop (scop), loop, rhs);
2901 scan_tree_for_params (scop, lhs, NULL, 0, one, false);
2902 value_set_si (one, 1);
2903 scan_tree_for_params (scop, rhs, NULL, 0, one, false);
2908 /* Saves in NV the name of variable P->T. */
2911 save_var_name (char **nv, int i, name_tree p)
2913 const char *name = get_name (SSA_NAME_VAR (p->t));
2917 int len = strlen (name) + 16;
2918 nv[i] = XNEWVEC (char, len);
2919 snprintf (nv[i], len, "%s_%d", name, SSA_NAME_VERSION (p->t));
2923 nv[i] = XNEWVEC (char, 16);
2924 snprintf (nv[i], 2 + 16, "T_%d", SSA_NAME_VERSION (p->t));
2930 /* Return the maximal loop depth in SCOP. */
2933 scop_max_loop_depth (scop_p scop)
2937 int max_nb_loops = 0;
2939 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
2941 int nb_loops = gbb_nb_loops (gbb);
2942 if (max_nb_loops < nb_loops)
2943 max_nb_loops = nb_loops;
2946 return max_nb_loops;
2949 /* Initialize Cloog's parameter names from the names used in GIMPLE.
2950 Initialize Cloog's iterator names, using 'graphite_iterator_%d'
2951 from 0 to scop_nb_loops (scop). */
2954 initialize_cloog_names (scop_p scop)
2956 int i, nb_params = VEC_length (name_tree, SCOP_PARAMS (scop));
2957 char **params = XNEWVEC (char *, nb_params);
2958 int nb_iterators = scop_max_loop_depth (scop);
2959 int nb_scattering= cloog_program_nb_scattdims (SCOP_PROG (scop));
2960 char **iterators = XNEWVEC (char *, nb_iterators * 2);
2961 char **scattering = XNEWVEC (char *, nb_scattering);
2964 for (i = 0; VEC_iterate (name_tree, SCOP_PARAMS (scop), i, p); i++)
2965 save_var_name (params, i, p);
2967 cloog_names_set_nb_parameters (cloog_program_names (SCOP_PROG (scop)),
2969 cloog_names_set_parameters (cloog_program_names (SCOP_PROG (scop)),
2972 for (i = 0; i < nb_iterators; i++)
2975 iterators[i] = XNEWVEC (char, len);
2976 snprintf (iterators[i], len, "graphite_iterator_%d", i);
2979 cloog_names_set_nb_iterators (cloog_program_names (SCOP_PROG (scop)),
2981 cloog_names_set_iterators (cloog_program_names (SCOP_PROG (scop)),
2984 for (i = 0; i < nb_scattering; i++)
2987 scattering[i] = XNEWVEC (char, len);
2988 snprintf (scattering[i], len, "s_%d", i);
2991 cloog_names_set_nb_scattering (cloog_program_names (SCOP_PROG (scop)),
2993 cloog_names_set_scattering (cloog_program_names (SCOP_PROG (scop)),
2997 /* Record the parameters used in the SCOP. A variable is a parameter
2998 in a scop if it does not vary during the execution of that scop. */
3001 find_scop_parameters (scop_p scop)
3009 value_set_si (one, 1);
3011 /* Find the parameters used in the loop bounds. */
3012 for (i = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), i, loop); i++)
3014 tree nb_iters = number_of_latch_executions (loop);
3016 if (!chrec_contains_symbols (nb_iters))
3019 nb_iters = analyze_scalar_evolution (loop, nb_iters);
3020 nb_iters = instantiate_scev (block_before_scop (scop), loop, nb_iters);
3021 scan_tree_for_params (scop, nb_iters, NULL, 0, one, false);
3026 /* Find the parameters used in data accesses. */
3027 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
3028 find_params_in_bb (scop, gb);
3030 SCOP_ADD_PARAMS (scop) = false;
3033 /* Build the context constraints for SCOP: constraints and relations
3037 build_scop_context (scop_p scop)
3039 int nb_params = scop_nb_params (scop);
3040 CloogMatrix *matrix = cloog_matrix_alloc (1, nb_params + 2);
3042 /* Insert '0 >= 0' in the context matrix, as it is not allowed to be
3045 value_set_si (matrix->p[0][0], 1);
3047 value_set_si (matrix->p[0][nb_params + 1], 0);
3049 cloog_program_set_context (SCOP_PROG (scop),
3050 cloog_domain_matrix2domain (matrix));
3051 cloog_matrix_free (matrix);
3054 /* Returns a graphite_bb from BB. */
3056 static inline graphite_bb_p
3057 gbb_from_bb (basic_block bb)
3059 return (graphite_bb_p) bb->aux;
3062 /* Builds the constraint matrix for LOOP in SCOP. NB_OUTER_LOOPS is the
3063 number of loops surrounding LOOP in SCOP. OUTER_CSTR gives the
3064 constraints matrix for the surrounding loops. */
3067 build_loop_iteration_domains (scop_p scop, struct loop *loop,
3068 CloogMatrix *outer_cstr, int nb_outer_loops)
3074 int nb_rows = outer_cstr->NbRows + 1;
3075 int nb_cols = outer_cstr->NbColumns + 1;
3077 /* Last column of CSTR is the column of constants. */
3078 int cst_col = nb_cols - 1;
3080 /* The column for the current loop is just after the columns of
3081 other outer loops. */
3082 int loop_col = nb_outer_loops + 1;
3084 tree nb_iters = number_of_latch_executions (loop);
3086 /* When the number of iterations is a constant or a parameter, we
3087 add a constraint for the upper bound of the loop. So add a row
3088 to the constraint matrix before allocating it. */
3089 if (TREE_CODE (nb_iters) == INTEGER_CST
3090 || !chrec_contains_undetermined (nb_iters))
3093 cstr = cloog_matrix_alloc (nb_rows, nb_cols);
3095 /* Copy the outer constraints. */
3096 for (i = 0; i < outer_cstr->NbRows; i++)
3098 /* Copy the eq/ineq and loops columns. */
3099 for (j = 0; j < loop_col; j++)
3100 value_assign (cstr->p[i][j], outer_cstr->p[i][j]);
3102 /* Leave an empty column in CSTR for the current loop, and then
3103 copy the parameter columns. */
3104 for (j = loop_col; j < outer_cstr->NbColumns; j++)
3105 value_assign (cstr->p[i][j + 1], outer_cstr->p[i][j]);
3109 row = outer_cstr->NbRows;
3110 value_set_si (cstr->p[row][0], 1);
3111 value_set_si (cstr->p[row][loop_col], 1);
3113 /* loop_i <= nb_iters */
3114 if (TREE_CODE (nb_iters) == INTEGER_CST)
3117 value_set_si (cstr->p[row][0], 1);
3118 value_set_si (cstr->p[row][loop_col], -1);
3120 value_set_si (cstr->p[row][cst_col],
3121 int_cst_value (nb_iters));
3123 else if (!chrec_contains_undetermined (nb_iters))
3125 /* Otherwise nb_iters contains parameters: scan the nb_iters
3126 expression and build its matrix representation. */
3130 value_set_si (cstr->p[row][0], 1);
3131 value_set_si (cstr->p[row][loop_col], -1);
3133 nb_iters = analyze_scalar_evolution (loop, nb_iters);
3134 nb_iters = instantiate_scev (block_before_scop (scop), loop, nb_iters);
3137 value_set_si (one, 1);
3138 scan_tree_for_params (scop, nb_iters, cstr, row, one, false);
3144 if (loop->inner && loop_in_sese_p (loop->inner, SCOP_REGION (scop)))
3145 build_loop_iteration_domains (scop, loop->inner, cstr, nb_outer_loops + 1);
3147 /* Only go to the next loops, if we are not at the outermost layer. These
3148 have to be handled seperately, as we can be sure, that the chain at this
3149 layer will be connected. */
3150 if (nb_outer_loops != 0 && loop->next && loop_in_sese_p (loop->next,
3151 SCOP_REGION (scop)))
3152 build_loop_iteration_domains (scop, loop->next, outer_cstr, nb_outer_loops);
3154 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
3155 if (gbb_loop (gb) == loop)
3156 GBB_DOMAIN (gb) = cloog_matrix_copy (cstr);
3158 cloog_matrix_free (cstr);
3161 /* Add conditions to the domain of GB. */
3164 add_conditions_to_domain (graphite_bb_p gb)
3168 VEC (gimple, heap) *conditions = GBB_CONDITIONS (gb);
3169 CloogMatrix *domain = GBB_DOMAIN (gb);
3170 scop_p scop = GBB_SCOP (gb);
3174 unsigned nb_new_rows = 0;
3177 if (VEC_empty (gimple, conditions))
3182 nb_rows = domain->NbRows;
3183 nb_cols = domain->NbColumns;
3188 nb_cols = nb_loops_around_gb (gb) + scop_nb_params (scop) + 2;
3191 /* Count number of necessary new rows to add the conditions to the
3193 for (i = 0; VEC_iterate (gimple, conditions, i, stmt); i++)
3195 switch (gimple_code (stmt))
3199 enum tree_code code = gimple_cond_code (stmt);
3205 /* NE and EQ statements are not supported right know. */
3221 /* Switch statements are not supported right know. */
3232 /* Enlarge the matrix. */
3234 CloogMatrix *new_domain;
3235 new_domain = cloog_matrix_alloc (nb_rows + nb_new_rows, nb_cols);
3239 for (i = 0; i < nb_rows; i++)
3240 for (j = 0; j < nb_cols; j++)
3241 value_assign (new_domain->p[i][j], domain->p[i][j]);
3243 cloog_matrix_free (domain);
3246 domain = new_domain;
3247 GBB_DOMAIN (gb) = new_domain;
3250 /* Add the conditions to the new enlarged domain matrix. */
3252 for (i = 0; VEC_iterate (gimple, conditions, i, stmt); i++)
3254 switch (gimple_code (stmt))
3259 enum tree_code code;
3262 loop_p loop = GBB_BB (gb)->loop_father;
3264 left = gimple_cond_lhs (stmt);
3265 right = gimple_cond_rhs (stmt);
3267 left = analyze_scalar_evolution (loop, left);
3268 right = analyze_scalar_evolution (loop, right);
3270 left = instantiate_scev (block_before_scop (scop), loop, left);
3271 right = instantiate_scev (block_before_scop (scop), loop, right);
3273 code = gimple_cond_code (stmt);
3275 /* The conditions for ELSE-branches are inverted. */
3276 if (VEC_index (gimple, gb->condition_cases, i) == NULL)
3277 code = invert_tree_comparison (code, false);
3282 /* NE statements are not supported right know. */
3286 value_set_si (domain->p[row][0], 1);
3288 value_set_si (one, 1);
3289 scan_tree_for_params (scop, left, domain, row, one, true);
3290 value_set_si (one, 1);
3291 scan_tree_for_params (scop, right, domain, row, one, false);
3293 value_set_si (domain->p[row][0], 1);
3294 value_set_si (one, 1);
3295 scan_tree_for_params (scop, left, domain, row, one, false);
3296 value_set_si (one, 1);
3297 scan_tree_for_params (scop, right, domain, row, one, true);
3302 value_set_si (domain->p[row][0], 1);
3304 value_set_si (one, 1);
3305 scan_tree_for_params (scop, left, domain, row, one, true);
3306 value_set_si (one, 1);
3307 scan_tree_for_params (scop, right, domain, row, one, false);
3308 value_sub_int (domain->p[row][nb_cols - 1],
3309 domain->p[row][nb_cols - 1], 1);
3314 value_set_si (domain->p[row][0], 1);
3316 value_set_si (one, 1);
3317 scan_tree_for_params (scop, left, domain, row, one, false);
3318 value_set_si (one, 1);
3319 scan_tree_for_params (scop, right, domain, row, one, true);
3320 value_sub_int (domain->p[row][nb_cols - 1],
3321 domain->p[row][nb_cols - 1], 1);
3326 value_set_si (domain->p[row][0], 1);
3328 value_set_si (one, 1);
3329 scan_tree_for_params (scop, left, domain, row, one, true);
3330 value_set_si (one, 1);
3331 scan_tree_for_params (scop, right, domain, row, one, false);
3336 value_set_si (domain->p[row][0], 1);
3338 value_set_si (one, 1);
3339 scan_tree_for_params (scop, left, domain, row, one, false);
3340 value_set_si (one, 1);
3341 scan_tree_for_params (scop, right, domain, row, one, true);
3352 /* Switch statements are not supported right know. */
3363 /* Returns true when PHI defines an induction variable in the loop
3364 containing the PHI node. */
3367 phi_node_is_iv (gimple phi)
3369 loop_p loop = gimple_bb (phi)->loop_father;
3370 tree scev = analyze_scalar_evolution (loop, gimple_phi_result (phi));
3372 return tree_contains_chrecs (scev, NULL);
3375 /* Returns true when BB contains scalar phi nodes that are not an
3376 induction variable of a loop. */
3379 bb_contains_non_iv_scalar_phi_nodes (basic_block bb)
3382 gimple_stmt_iterator si;
3384 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
3385 if (is_gimple_reg (gimple_phi_result (gsi_stmt (si))))
3387 /* Store the unique scalar PHI node: at this point, loops
3388 should be in cannonical form, so we expect to see at most
3389 one scalar phi node in the loop header. */
3391 || bb != bb->loop_father->header)
3394 phi = gsi_stmt (si);
3398 || phi_node_is_iv (phi))
3404 /* Helper recursive function. Record in CONDITIONS and CASES all
3405 conditions from 'if's and 'switch'es occurring in BB from SCOP.
3407 Returns false when the conditions contain scalar computations that
3408 depend on the condition, i.e. when there are scalar phi nodes on
3409 the junction after the condition. Only the computations occurring
3410 on memory can be handled in the polyhedral model: operations that
3411 define scalar evolutions in conditions, that can potentially be
3412 used to index memory, can't be handled by the polyhedral model. */
3415 build_scop_conditions_1 (VEC (gimple, heap) **conditions,
3416 VEC (gimple, heap) **cases, basic_block bb,
3422 basic_block bb_child, bb_iter;
3423 VEC (basic_block, heap) *dom;
3426 /* Make sure we are in the SCoP. */
3427 if (!bb_in_sese_p (bb, SCOP_REGION (scop)))
3430 if (bb_contains_non_iv_scalar_phi_nodes (bb))
3433 gbb = gbb_from_bb (bb);
3436 GBB_CONDITIONS (gbb) = VEC_copy (gimple, heap, *conditions);
3437 GBB_CONDITION_CASES (gbb) = VEC_copy (gimple, heap, *cases);
3440 dom = get_dominated_by (CDI_DOMINATORS, bb);
3442 stmt = last_stmt (bb);
3445 VEC (edge, gc) *edges;
3448 switch (gimple_code (stmt))
3452 for (i = 0; VEC_iterate (edge, edges, i, e); i++)
3453 if ((dominated_by_p (CDI_DOMINATORS, e->dest, bb))
3454 && VEC_length (edge, e->dest->preds) == 1)
3456 /* Remove the scanned block from the dominator successors. */
3457 for (j = 0; VEC_iterate (basic_block, dom, j, bb_iter); j++)
3458 if (bb_iter == e->dest)
3460 VEC_unordered_remove (basic_block, dom, j);
3464 /* Recursively scan the then or else part. */
3465 if (e->flags & EDGE_TRUE_VALUE)
3466 VEC_safe_push (gimple, heap, *cases, stmt);
3469 gcc_assert (e->flags & EDGE_FALSE_VALUE);
3470 VEC_safe_push (gimple, heap, *cases, NULL);
3473 VEC_safe_push (gimple, heap, *conditions, stmt);
3474 if (!build_scop_conditions_1 (conditions, cases, e->dest, scop))
3479 VEC_pop (gimple, *conditions);
3480 VEC_pop (gimple, *cases);
3487 gimple_stmt_iterator gsi_search_gimple_label;
3489 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
3491 basic_block bb_iter;
3493 size_t n_cases = VEC_length (gimple, *conditions);
3494 unsigned n = gimple_switch_num_labels (stmt);
3496 bb_child = label_to_block
3497 (CASE_LABEL (gimple_switch_label (stmt, i)));
3499 for (k = 0; k < n; k++)
3502 (CASE_LABEL (gimple_switch_label (stmt, k))) == bb_child)
3505 /* Switches with multiple case values for the same
3506 block are not handled. */
3508 /* Switch cases with more than one predecessor are
3510 || VEC_length (edge, bb_child->preds) != 1)
3516 /* Recursively scan the corresponding 'case' block. */
3517 for (gsi_search_gimple_label = gsi_start_bb (bb_child);
3518 !gsi_end_p (gsi_search_gimple_label);
3519 gsi_next (&gsi_search_gimple_label))
3521 gimple label = gsi_stmt (gsi_search_gimple_label);
3523 if (gimple_code (label) == GIMPLE_LABEL)
3525 tree t = gimple_label_label (label);
3527 gcc_assert (t == gimple_switch_label (stmt, i));
3528 VEC_replace (gimple, *cases, n_cases, label);
3533 if (!build_scop_conditions_1 (conditions, cases, bb_child, scop))
3539 /* Remove the scanned block from the dominator successors. */
3540 for (j = 0; VEC_iterate (basic_block, dom, j, bb_iter); j++)
3541 if (bb_iter == bb_child)
3543 VEC_unordered_remove (basic_block, dom, j);
3548 VEC_pop (gimple, *conditions);
3549 VEC_pop (gimple, *cases);
3558 /* Scan all immediate dominated successors. */
3559 for (i = 0; VEC_iterate (basic_block, dom, i, bb_child); i++)
3560 if (!build_scop_conditions_1 (conditions, cases, bb_child, scop))
3567 VEC_free (basic_block, heap, dom);
3571 /* Record all conditions from SCOP.
3573 Returns false when the conditions contain scalar computations that
3574 depend on the condition, i.e. when there are scalar phi nodes on
3575 the junction after the condition. Only the computations occurring
3576 on memory can be handled in the polyhedral model: operations that
3577 define scalar evolutions in conditions, that can potentially be
3578 used to index memory, can't be handled by the polyhedral model. */
3581 build_scop_conditions (scop_p scop)
3584 VEC (gimple, heap) *conditions = NULL;
3585 VEC (gimple, heap) *cases = NULL;
3587 res = build_scop_conditions_1 (&conditions, &cases, SCOP_ENTRY (scop), scop);
3589 VEC_free (gimple, heap, conditions);
3590 VEC_free (gimple, heap, cases);
3594 /* Traverses all the GBBs of the SCOP and add their constraints to the
3595 iteration domains. */
3598 add_conditions_to_constraints (scop_p scop)
3603 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
3604 add_conditions_to_domain (gbb);
3607 /* Build the current domain matrix: the loops belonging to the current
3608 SCOP, and that vary for the execution of the current basic block.
3609 Returns false if there is no loop in SCOP. */
3612 build_scop_iteration_domain (scop_p scop)
3615 CloogMatrix *outer_cstr;
3618 /* Build cloog loop for all loops, that are in the uppermost loop layer of
3620 for (i = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), i, loop); i++)
3621 if (!loop_in_sese_p (loop_outer (loop), SCOP_REGION (scop)))
3623 /* The outermost constraints is a matrix that has:
3624 -first column: eq/ineq boolean
3625 -last column: a constant
3626 -scop_nb_params columns for the parameters used in the scop. */
3627 outer_cstr = cloog_matrix_alloc (0, scop_nb_params (scop) + 2);
3628 build_loop_iteration_domains (scop, loop, outer_cstr, 0);
3629 cloog_matrix_free (outer_cstr);
3635 /* Initializes an equation CY of the access matrix using the
3636 information for a subscript from AF, relatively to the loop
3637 indexes from LOOP_NEST and parameter indexes from PARAMS. NDIM is
3638 the dimension of the array access, i.e. the number of
3639 subscripts. Returns true when the operation succeeds. */
3642 build_access_matrix_with_af (tree af, lambda_vector cy,
3643 scop_p scop, int ndim)
3647 switch (TREE_CODE (af))
3649 case POLYNOMIAL_CHREC:
3651 struct loop *outer_loop;
3652 tree left = CHREC_LEFT (af);
3653 tree right = CHREC_RIGHT (af);
3656 if (TREE_CODE (right) != INTEGER_CST)
3659 outer_loop = get_loop (CHREC_VARIABLE (af));
3660 var = nb_loops_around_loop_in_scop (outer_loop, scop);
3661 cy[var] = int_cst_value (right);
3663 switch (TREE_CODE (left))
3665 case POLYNOMIAL_CHREC:
3666 return build_access_matrix_with_af (left, cy, scop, ndim);
3669 cy[ndim - 1] = int_cst_value (left);
3673 return build_access_matrix_with_af (left, cy, scop, ndim);
3678 build_access_matrix_with_af (TREE_OPERAND (af, 0), cy, scop, ndim);
3679 build_access_matrix_with_af (TREE_OPERAND (af, 1), cy, scop, ndim);
3683 build_access_matrix_with_af (TREE_OPERAND (af, 0), cy, scop, ndim);
3684 build_access_matrix_with_af (TREE_OPERAND (af, 1), cy, scop, ndim);
3688 cy[ndim - 1] = int_cst_value (af);
3692 param_col = param_index (af, scop);
3693 cy [ndim - scop_nb_params (scop) + param_col - 1] = 1;
3697 /* FIXME: access_fn can have parameters. */
3702 /* Initialize the access matrix in the data reference REF with respect
3703 to the loop nesting LOOP_NEST. Return true when the operation
3707 build_access_matrix (data_reference_p ref, graphite_bb_p gb)
3709 int i, ndim = DR_NUM_DIMENSIONS (ref);
3710 struct access_matrix *am = GGC_NEW (struct access_matrix);
3712 AM_MATRIX (am) = VEC_alloc (lambda_vector, gc, ndim);
3713 DR_SCOP (ref) = GBB_SCOP (gb);
3715 for (i = 0; i < ndim; i++)
3717 lambda_vector v = lambda_vector_new (ref_nb_loops (ref));
3718 scop_p scop = GBB_SCOP (gb);
3719 tree af = DR_ACCESS_FN (ref, i);
3721 if (!build_access_matrix_with_af (af, v, scop, ref_nb_loops (ref)))
3724 VEC_quick_push (lambda_vector, AM_MATRIX (am), v);
3727 DR_ACCESS_MATRIX (ref) = am;
3731 /* Build the access matrices for the data references in the SCOP. */
3734 build_scop_data_accesses (scop_p scop)
3739 /* FIXME: Construction of access matrix is disabled until some
3740 pass, like the data dependence analysis, is using it. */
3743 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
3746 data_reference_p dr;
3748 /* Construct the access matrix for each data ref, with respect to
3749 the loop nest of the current BB in the considered SCOP. */
3751 VEC_iterate (data_reference_p, GBB_DATA_REFS (gb), j, dr);
3754 bool res = build_access_matrix (dr, gb);
3756 /* FIXME: At this point the DRs should always have an affine
3757 form. For the moment this fails as build_access_matrix
3758 does not build matrices with parameters. */
3764 /* Returns the tree variable from the name NAME that was given in
3765 Cloog representation. All the parameters are stored in PARAMS, and
3766 all the loop induction variables are stored in IVSTACK.
3768 FIXME: This is a hack, and Cloog should be fixed to not work with
3769 variable names represented as "char *string", but with void
3770 pointers that could be casted back to a tree. The only problem in
3771 doing that is that Cloog's pretty printer still assumes that
3772 variable names are char *strings. The solution would be to have a
3773 function pointer for pretty-printing that can be redirected to be
3774 print_generic_stmt in our case, or fprintf by default.
3775 ??? Too ugly to live. */
3778 clast_name_to_gcc (const char *name, VEC (name_tree, heap) *params,
3779 loop_iv_stack ivstack)
3786 for (i = 0; VEC_iterate (name_tree, params, i, t); i++)
3787 if (!strcmp (name, t->name))
3790 iv = loop_iv_stack_get_iv_from_name (ivstack, name);
3797 /* Returns the maximal precision type for expressions E1 and E2. */
3800 max_precision_type (tree e1, tree e2)
3802 tree type1 = TREE_TYPE (e1);
3803 tree type2 = TREE_TYPE (e2);
3804 return TYPE_PRECISION (type1) > TYPE_PRECISION (type2) ? type1 : type2;
3808 clast_to_gcc_expression (tree, struct clast_expr *, VEC (name_tree, heap) *,
3811 /* Converts a Cloog reduction expression R with reduction operation OP
3812 to a GCC expression tree of type TYPE. PARAMS is a vector of
3813 parameters of the scop, and IVSTACK contains the stack of induction
3817 clast_to_gcc_expression_red (tree type, enum tree_code op,
3818 struct clast_reduction *r,
3819 VEC (name_tree, heap) *params,
3820 loop_iv_stack ivstack)
3823 tree res = clast_to_gcc_expression (type, r->elts[0], params, ivstack);
3825 for (i = 1; i < r->n; i++)
3827 tree t = clast_to_gcc_expression (type, r->elts[i], params, ivstack);
3828 res = fold_build2 (op, type, res, t);
3833 /* Converts a Cloog AST expression E back to a GCC expression tree of
3834 type TYPE. PARAMS is a vector of parameters of the scop, and
3835 IVSTACK contains the stack of induction variables. */
3838 clast_to_gcc_expression (tree type, struct clast_expr *e,
3839 VEC (name_tree, heap) *params,
3840 loop_iv_stack ivstack)
3846 struct clast_term *t = (struct clast_term *) e;
3850 if (value_one_p (t->val))
3852 tree name = clast_name_to_gcc (t->var, params, ivstack);
3853 return fold_convert (type, name);
3856 else if (value_mone_p (t->val))
3858 tree name = clast_name_to_gcc (t->var, params, ivstack);
3859 name = fold_convert (type, name);
3860 return fold_build1 (NEGATE_EXPR, type, name);
3864 tree name = clast_name_to_gcc (t->var, params, ivstack);
3865 tree cst = gmp_cst_to_tree (type, t->val);
3866 name = fold_convert (type, name);
3867 return fold_build2 (MULT_EXPR, type, cst, name);
3871 return gmp_cst_to_tree (type, t->val);
3876 struct clast_reduction *r = (struct clast_reduction *) e;
3881 return clast_to_gcc_expression_red (type, PLUS_EXPR, r, params, ivstack);
3884 return clast_to_gcc_expression_red (type, MIN_EXPR, r, params, ivstack);
3887 return clast_to_gcc_expression_red (type, MAX_EXPR, r, params, ivstack);
3897 struct clast_binary *b = (struct clast_binary *) e;
3898 struct clast_expr *lhs = (struct clast_expr *) b->LHS;
3899 tree tl = clast_to_gcc_expression (type, lhs, params, ivstack);
3900 tree tr = gmp_cst_to_tree (type, b->RHS);
3904 case clast_bin_fdiv:
3905 return fold_build2 (FLOOR_DIV_EXPR, type, tl, tr);
3907 case clast_bin_cdiv:
3908 return fold_build2 (CEIL_DIV_EXPR, type, tl, tr);
3911 return fold_build2 (EXACT_DIV_EXPR, type, tl, tr);
3914 return fold_build2 (TRUNC_MOD_EXPR, type, tl, tr);
3928 /* Returns the type for the expression E. */
3931 gcc_type_for_clast_expr (struct clast_expr *e,
3932 VEC (name_tree, heap) *params,
3933 loop_iv_stack ivstack)
3939 struct clast_term *t = (struct clast_term *) e;
3942 return TREE_TYPE (clast_name_to_gcc (t->var, params, ivstack));
3949 struct clast_reduction *r = (struct clast_reduction *) e;
3952 return gcc_type_for_clast_expr (r->elts[0], params, ivstack);
3956 for (i = 0; i < r->n; i++)
3958 tree type = gcc_type_for_clast_expr (r->elts[i], params, ivstack);
3968 struct clast_binary *b = (struct clast_binary *) e;
3969 struct clast_expr *lhs = (struct clast_expr *) b->LHS;
3970 return gcc_type_for_clast_expr (lhs, params, ivstack);
3980 /* Returns the type for the equation CLEQ. */
3983 gcc_type_for_clast_eq (struct clast_equation *cleq,
3984 VEC (name_tree, heap) *params,
3985 loop_iv_stack ivstack)
3987 tree type = gcc_type_for_clast_expr (cleq->LHS, params, ivstack);
3991 return gcc_type_for_clast_expr (cleq->RHS, params, ivstack);
3994 /* Translates a clast equation CLEQ to a tree. */
3997 graphite_translate_clast_equation (scop_p scop,
3998 struct clast_equation *cleq,
3999 loop_iv_stack ivstack)
4001 enum tree_code comp;
4002 tree type = gcc_type_for_clast_eq (cleq, SCOP_PARAMS (scop), ivstack);
4003 tree lhs = clast_to_gcc_expression (type, cleq->LHS, SCOP_PARAMS (scop), ivstack);
4004 tree rhs = clast_to_gcc_expression (type, cleq->RHS, SCOP_PARAMS (scop), ivstack);
4006 if (cleq->sign == 0)
4009 else if (cleq->sign > 0)
4015 return fold_build2 (comp, type, lhs, rhs);
4018 /* Creates the test for the condition in STMT. */
4021 graphite_create_guard_cond_expr (scop_p scop, struct clast_guard *stmt,
4022 loop_iv_stack ivstack)
4027 for (i = 0; i < stmt->n; i++)
4029 tree eq = graphite_translate_clast_equation (scop, &stmt->eq[i], ivstack);
4032 cond = fold_build2 (TRUTH_AND_EXPR, TREE_TYPE (eq), cond, eq);
4040 /* Creates a new if region corresponding to Cloog's guard. */
4043 graphite_create_new_guard (scop_p scop, edge entry_edge,
4044 struct clast_guard *stmt,
4045 loop_iv_stack ivstack)
4047 tree cond_expr = graphite_create_guard_cond_expr (scop, stmt, ivstack);
4048 edge exit_edge = create_empty_if_region_on_edge (entry_edge, cond_expr);
4052 /* Walks a CLAST and returns the first statement in the body of a
4055 static struct clast_user_stmt *
4056 clast_get_body_of_loop (struct clast_stmt *stmt)
4059 || CLAST_STMT_IS_A (stmt, stmt_user))
4060 return (struct clast_user_stmt *) stmt;
4062 if (CLAST_STMT_IS_A (stmt, stmt_for))
4063 return clast_get_body_of_loop (((struct clast_for *) stmt)->body);
4065 if (CLAST_STMT_IS_A (stmt, stmt_guard))
4066 return clast_get_body_of_loop (((struct clast_guard *) stmt)->then);
4068 if (CLAST_STMT_IS_A (stmt, stmt_block))
4069 return clast_get_body_of_loop (((struct clast_block *) stmt)->body);
4074 /* Returns the induction variable for the loop that gets translated to
4078 gcc_type_for_iv_of_clast_loop (struct clast_for *stmt_for)
4080 struct clast_user_stmt *stmt = clast_get_body_of_loop ((struct clast_stmt *) stmt_for);
4081 const char *cloog_iv = stmt_for->iterator;
4082 CloogStatement *cs = stmt->statement;
4083 graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
4085 return gcc_type_for_cloog_iv (cloog_iv, gbb);
4088 /* Creates a new LOOP corresponding to Cloog's STMT. Inserts an induction
4089 variable for the new LOOP. New LOOP is attached to CFG starting at
4090 ENTRY_EDGE. LOOP is inserted into the loop tree and becomes the child
4091 loop of the OUTER_LOOP. */
4093 static struct loop *
4094 graphite_create_new_loop (scop_p scop, edge entry_edge,
4095 struct clast_for *stmt, loop_iv_stack ivstack,
4098 tree type = gcc_type_for_iv_of_clast_loop (stmt);
4099 VEC (name_tree, heap) *params = SCOP_PARAMS (scop);
4100 tree lb = clast_to_gcc_expression (type, stmt->LB, params, ivstack);
4101 tree ub = clast_to_gcc_expression (type, stmt->UB, params, ivstack);
4102 tree stride = gmp_cst_to_tree (type, stmt->stride);
4103 tree ivvar = create_tmp_var (type, "graphiteIV");
4105 loop_p loop = create_empty_loop_on_edge
4106 (entry_edge, lb, stride, ub, ivvar, &iv_before,
4107 outer ? outer : entry_edge->src->loop_father);
4109 add_referenced_var (ivvar);
4110 loop_iv_stack_push_iv (ivstack, iv_before, stmt->iterator);
4114 /* Rename the SSA_NAMEs used in STMT and that appear in IVSTACK. */
4117 rename_variables_in_stmt (gimple stmt, htab_t map)
4120 use_operand_p use_p;
4122 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
4124 tree use = USE_FROM_PTR (use_p);
4125 tree new_name = get_new_name_from_old_name (map, use);
4127 replace_exp (use_p, new_name);
4133 /* Returns true if SSA_NAME is a parameter of SCOP. */
4136 is_parameter (scop_p scop, tree ssa_name)
4139 VEC (name_tree, heap) *params = SCOP_PARAMS (scop);
4142 for (i = 0; VEC_iterate (name_tree, params, i, param); i++)
4143 if (param->t == ssa_name)
4149 /* Returns true if NAME is an induction variable. */
4154 return gimple_code (SSA_NAME_DEF_STMT (name)) == GIMPLE_PHI;
4157 static void expand_scalar_variables_stmt (gimple, basic_block, scop_p,
4160 expand_scalar_variables_expr (tree, tree, enum tree_code, tree, basic_block,
4161 scop_p, htab_t, gimple_stmt_iterator *);
4163 /* Copies at GSI all the scalar computations on which the ssa_name OP0
4164 depends on in the SCOP: these are all the scalar variables used in
4165 the definition of OP0, that are defined outside BB and still in the
4166 SCOP, i.e. not a parameter of the SCOP. The expression that is
4167 returned contains only induction variables from the generated code:
4168 MAP contains the induction variables renaming mapping, and is used
4169 to translate the names of induction variables. */
4172 expand_scalar_variables_ssa_name (tree op0, basic_block bb,
4173 scop_p scop, htab_t map,
4174 gimple_stmt_iterator *gsi)
4176 tree var0, var1, type;
4178 enum tree_code subcode;
4180 if (is_parameter (scop, op0)
4182 return get_new_name_from_old_name (map, op0);
4184 def_stmt = SSA_NAME_DEF_STMT (op0);
4186 if (gimple_bb (def_stmt) == bb)
4188 /* If the defining statement is in the basic block already
4189 we do not need to create a new expression for it, we
4190 only need to ensure its operands are expanded. */
4191 expand_scalar_variables_stmt (def_stmt, bb, scop, map);
4192 return get_new_name_from_old_name (map, op0);
4196 if (gimple_code (def_stmt) != GIMPLE_ASSIGN
4197 || !bb_in_sese_p (gimple_bb (def_stmt), SCOP_REGION (scop)))
4198 return get_new_name_from_old_name (map, op0);
4200 var0 = gimple_assign_rhs1 (def_stmt);
4201 subcode = gimple_assign_rhs_code (def_stmt);
4202 var1 = gimple_assign_rhs2 (def_stmt);
4203 type = gimple_expr_type (def_stmt);
4205 return expand_scalar_variables_expr (type, var0, subcode, var1, bb, scop,
4210 /* Copies at GSI all the scalar computations on which the expression
4211 OP0 CODE OP1 depends on in the SCOP: these are all the scalar
4212 variables used in OP0 and OP1, defined outside BB and still defined
4213 in the SCOP, i.e. not a parameter of the SCOP. The expression that
4214 is returned contains only induction variables from the generated
4215 code: MAP contains the induction variables renaming mapping, and is
4216 used to translate the names of induction variables. */
4219 expand_scalar_variables_expr (tree type, tree op0, enum tree_code code,
4220 tree op1, basic_block bb, scop_p scop,
4221 htab_t map, gimple_stmt_iterator *gsi)
4223 if (TREE_CODE_CLASS (code) == tcc_constant
4224 || TREE_CODE_CLASS (code) == tcc_declaration)
4227 /* For data references we have to duplicate also its memory
4229 if (TREE_CODE_CLASS (code) == tcc_reference)
4235 tree old_name = TREE_OPERAND (op0, 0);
4236 tree expr = expand_scalar_variables_ssa_name
4237 (old_name, bb, scop, map, gsi);
4238 tree new_name = force_gimple_operand_gsi (gsi, expr, true, NULL,
4239 true, GSI_SAME_STMT);
4241 set_symbol_mem_tag (SSA_NAME_VAR (new_name),
4242 symbol_mem_tag (SSA_NAME_VAR (old_name)));
4243 return fold_build1 (code, type, new_name);
4248 tree op00 = TREE_OPERAND (op0, 0);
4249 tree op01 = TREE_OPERAND (op0, 1);
4250 tree op02 = TREE_OPERAND (op0, 2);
4251 tree op03 = TREE_OPERAND (op0, 3);
4252 tree base = expand_scalar_variables_expr
4253 (TREE_TYPE (op00), op00, TREE_CODE (op00), NULL, bb, scop,
4255 tree subscript = expand_scalar_variables_expr
4256 (TREE_TYPE (op01), op01, TREE_CODE (op01), NULL, bb, scop,
4259 return build4 (ARRAY_REF, type, base, subscript, op02, op03);
4263 /* The above cases should catch everything. */
4268 if (TREE_CODE_CLASS (code) == tcc_unary)
4270 tree op0_type = TREE_TYPE (op0);
4271 enum tree_code op0_code = TREE_CODE (op0);
4272 tree op0_expr = expand_scalar_variables_expr (op0_type, op0, op0_code,
4273 NULL, bb, scop, map, gsi);
4275 return fold_build1 (code, type, op0_expr);
4278 if (TREE_CODE_CLASS (code) == tcc_binary)
4280 tree op0_type = TREE_TYPE (op0);
4281 enum tree_code op0_code = TREE_CODE (op0);
4282 tree op0_expr = expand_scalar_variables_expr (op0_type, op0, op0_code,
4283 NULL, bb, scop, map, gsi);
4284 tree op1_type = TREE_TYPE (op1);
4285 enum tree_code op1_code = TREE_CODE (op1);
4286 tree op1_expr = expand_scalar_variables_expr (op1_type, op1, op1_code,
4287 NULL, bb, scop, map, gsi);
4289 return fold_build2 (code, type, op0_expr, op1_expr);
4292 if (code == SSA_NAME)
4293 return expand_scalar_variables_ssa_name (op0, bb, scop, map, gsi);
4299 /* Copies at the beginning of BB all the scalar computations on which
4300 STMT depends on in the SCOP: these are all the scalar variables used
4301 in STMT, defined outside BB and still defined in the SCOP, i.e. not a
4302 parameter of the SCOP. The expression that is returned contains
4303 only induction variables from the generated code: MAP contains the
4304 induction variables renaming mapping, and is used to translate the
4305 names of induction variables. */
4308 expand_scalar_variables_stmt (gimple stmt, basic_block bb, scop_p scop,
4312 use_operand_p use_p;
4313 gimple_stmt_iterator gsi = gsi_after_labels (bb);
4315 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
4317 tree use = USE_FROM_PTR (use_p);
4318 tree type = TREE_TYPE (use);
4319 enum tree_code code = TREE_CODE (use);
4320 tree use_expr = expand_scalar_variables_expr (type, use, code, NULL, bb,
4322 if (use_expr != use)
4325 force_gimple_operand_gsi (&gsi, use_expr, true, NULL,
4326 true, GSI_NEW_STMT);
4327 replace_exp (use_p, new_use);
4334 /* Copies at the beginning of BB all the scalar computations on which
4335 BB depends on in the SCOP: these are all the scalar variables used
4336 in BB, defined outside BB and still defined in the SCOP, i.e. not a
4337 parameter of the SCOP. The expression that is returned contains
4338 only induction variables from the generated code: MAP contains the
4339 induction variables renaming mapping, and is used to translate the
4340 names of induction variables. */
4343 expand_scalar_variables (basic_block bb, scop_p scop, htab_t map)
4345 gimple_stmt_iterator gsi;
4347 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
4349 gimple stmt = gsi_stmt (gsi);
4350 expand_scalar_variables_stmt (stmt, bb, scop, map);
4355 /* Rename all the SSA_NAMEs from block BB according to the MAP. */
4358 rename_variables (basic_block bb, htab_t map)
4360 gimple_stmt_iterator gsi;
4362 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4363 rename_variables_in_stmt (gsi_stmt (gsi), map);
4366 /* Remove condition from BB. */
4369 remove_condition (basic_block bb)
4371 gimple last = last_stmt (bb);
4373 if (last && gimple_code (last) == GIMPLE_COND)
4375 gimple_stmt_iterator gsi = gsi_last_bb (bb);
4376 gsi_remove (&gsi, true);
4380 /* Returns the first successor edge of BB with EDGE_TRUE_VALUE flag set. */
4383 get_true_edge_from_guard_bb (basic_block bb)
4388 FOR_EACH_EDGE (e, ei, bb->succs)
4389 if (e->flags & EDGE_TRUE_VALUE)
4396 /* Returns the first successor edge of BB with EDGE_TRUE_VALUE flag cleared. */
4399 get_false_edge_from_guard_bb (basic_block bb)
4404 FOR_EACH_EDGE (e, ei, bb->succs)
4405 if (!(e->flags & EDGE_TRUE_VALUE))
4412 /* Inserts in MAP a tuple (OLD_NAME, NEW_NAME) for the induction
4413 variables of the loops around GBB in SCOP, i.e. GBB_LOOPS.
4414 NEW_NAME is obtained from IVSTACK. IVSTACK has the same stack
4415 ordering as GBB_LOOPS. */
4418 build_iv_mapping (loop_iv_stack ivstack, htab_t map, gbb_p gbb, scop_p scop)
4424 for (i = 0; VEC_iterate (name_tree, SCOP_OLDIVS (scop), i, iv); i++)
4426 struct rename_map_elt tmp;
4428 if (!flow_bb_inside_loop_p (iv->loop, GBB_BB (gbb)))
4431 tmp.old_name = iv->t;
4432 slot = htab_find_slot (map, &tmp, INSERT);
4436 tree new_name = loop_iv_stack_get_iv (ivstack,
4437 gbb_loop_index (gbb, iv->loop));
4438 *slot = new_rename_map_elt (iv->t, new_name);
4443 /* Register in MAP the tuple (old_name, new_name). */
4446 register_old_and_new_names (htab_t map, tree old_name, tree new_name)
4448 struct rename_map_elt tmp;
4451 tmp.old_name = old_name;
4452 slot = htab_find_slot (map, &tmp, INSERT);
4455 *slot = new_rename_map_elt (old_name, new_name);
4458 /* Create a duplicate of the basic block BB. NOTE: This does not
4459 preserve SSA form. */
4462 graphite_copy_stmts_from_block (basic_block bb, basic_block new_bb, htab_t map)
4464 gimple_stmt_iterator gsi, gsi_tgt;
4466 gsi_tgt = gsi_start_bb (new_bb);
4467 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4469 def_operand_p def_p;
4470 ssa_op_iter op_iter;
4472 gimple stmt = gsi_stmt (gsi);
4475 if (gimple_code (stmt) == GIMPLE_LABEL)
4478 /* Create a new copy of STMT and duplicate STMT's virtual
4480 copy = gimple_copy (stmt);
4481 gsi_insert_after (&gsi_tgt, copy, GSI_NEW_STMT);
4482 mark_symbols_for_renaming (copy);
4484 region = lookup_stmt_eh_region (stmt);
4486 add_stmt_to_eh_region (copy, region);
4487 gimple_duplicate_stmt_histograms (cfun, copy, cfun, stmt);
4489 /* Create new names for all the definitions created by COPY and
4490 add replacement mappings for each new name. */
4491 FOR_EACH_SSA_DEF_OPERAND (def_p, copy, op_iter, SSA_OP_DEF)
4493 tree old_name = DEF_FROM_PTR (def_p);
4494 tree new_name = create_new_def_for (old_name, copy, def_p);
4495 register_old_and_new_names (map, old_name, new_name);
4500 /* Records in SCOP_LIVEOUT_RENAMES the names that are live out of
4501 the SCOP and that appear in the RENAME_MAP. */
4504 register_scop_liveout_renames (scop_p scop, htab_t rename_map)
4507 sese region = SCOP_REGION (scop);
4509 for (i = 0; i < SESE_NUM_VER (region); i++)
4510 if (bitmap_bit_p (SESE_LIVEOUT (region), i)
4511 && is_gimple_reg (ssa_name (i)))
4513 tree old_name = ssa_name (i);
4514 tree new_name = get_new_name_from_old_name (rename_map, old_name);
4516 register_old_and_new_names (SCOP_LIVEOUT_RENAMES (scop),
4517 old_name, new_name);
4521 /* Copies BB and includes in the copied BB all the statements that can
4522 be reached following the use-def chains from the memory accesses,
4523 and returns the next edge following this new block. */
4526 copy_bb_and_scalar_dependences (basic_block bb, scop_p scop,
4527 edge next_e, htab_t map)
4529 basic_block new_bb = split_edge (next_e);
4531 next_e = single_succ_edge (new_bb);
4532 graphite_copy_stmts_from_block (bb, new_bb, map);
4533 remove_condition (new_bb);
4534 rename_variables (new_bb, map);
4535 remove_phi_nodes (new_bb);
4536 expand_scalar_variables (new_bb, scop, map);
4537 register_scop_liveout_renames (scop, map);
4542 /* Helper function for htab_traverse in insert_loop_close_phis. */
4545 add_loop_exit_phis (void **slot, void *s)
4547 struct rename_map_elt *entry = (struct rename_map_elt *) *slot;
4548 tree new_name = entry->new_name;
4549 basic_block bb = (basic_block) s;
4550 gimple phi = create_phi_node (new_name, bb);
4551 tree res = create_new_def_for (gimple_phi_result (phi), phi,
4552 gimple_phi_result_ptr (phi));
4554 add_phi_arg (phi, new_name, single_pred_edge (bb));
4556 entry->new_name = res;
4561 /* Iterate over the SCOP_LIVEOUT_RENAMES (SCOP) and get tuples of the
4562 form (OLD_NAME, NEW_NAME). Insert in BB "RES = phi (NEW_NAME)",
4563 and finally register in SCOP_LIVEOUT_RENAMES (scop) the tuple
4567 insert_loop_close_phis (scop_p scop, basic_block bb)
4569 update_ssa (TODO_update_ssa);
4570 htab_traverse (SCOP_LIVEOUT_RENAMES (scop), add_loop_exit_phis, bb);
4571 update_ssa (TODO_update_ssa);
4574 /* Helper structure for htab_traverse in insert_guard_phis. */
4578 edge true_edge, false_edge;
4579 htab_t liveout_before_guard;
4582 /* Return the default name that is before the guard. */
4585 default_liveout_before_guard (htab_t liveout_before_guard, tree old_name)
4587 tree res = get_new_name_from_old_name (liveout_before_guard, old_name);
4589 if (res == old_name)
4591 if (is_gimple_reg (res))
4592 return fold_convert (TREE_TYPE (res), integer_zero_node);
4593 return gimple_default_def (cfun, res);
4599 /* Helper function for htab_traverse in insert_guard_phis. */
4602 add_guard_exit_phis (void **slot, void *s)
4604 struct rename_map_elt *entry = (struct rename_map_elt *) *slot;
4605 struct igp *i = (struct igp *) s;
4606 basic_block bb = i->bb;
4607 edge true_edge = i->true_edge;
4608 edge false_edge = i->false_edge;
4609 tree name1 = entry->new_name;
4610 tree name2 = default_liveout_before_guard (i->liveout_before_guard,
4612 gimple phi = create_phi_node (name1, bb);
4613 tree res = create_new_def_for (gimple_phi_result (phi), phi,
4614 gimple_phi_result_ptr (phi));
4616 add_phi_arg (phi, name1, true_edge);
4617 add_phi_arg (phi, name2, false_edge);
4619 entry->new_name = res;
4624 /* Iterate over the SCOP_LIVEOUT_RENAMES (SCOP) and get tuples of the
4625 form (OLD_NAME, NAME1). If there is a correspondent tuple of
4626 OLD_NAME in LIVEOUT_BEFORE_GUARD, i.e. (OLD_NAME, NAME2) then
4629 | RES = phi (NAME1 (on TRUE_EDGE), NAME2 (on FALSE_EDGE))"
4631 if there is no tuple for OLD_NAME in LIVEOUT_BEFORE_GUARD, insert
4633 | RES = phi (NAME1 (on TRUE_EDGE),
4634 | DEFAULT_DEFINITION of NAME1 (on FALSE_EDGE))".
4636 Finally register in SCOP_LIVEOUT_RENAMES (scop) the tuple
4640 insert_guard_phis (scop_p scop, basic_block bb, edge true_edge,
4641 edge false_edge, htab_t liveout_before_guard)
4645 i.true_edge = true_edge;
4646 i.false_edge = false_edge;
4647 i.liveout_before_guard = liveout_before_guard;
4649 update_ssa (TODO_update_ssa);
4650 htab_traverse (SCOP_LIVEOUT_RENAMES (scop), add_guard_exit_phis, &i);
4651 update_ssa (TODO_update_ssa);
4654 /* Helper function for htab_traverse. */
4657 copy_renames (void **slot, void *s)
4659 struct rename_map_elt *entry = (struct rename_map_elt *) *slot;
4660 htab_t res = (htab_t) s;
4661 tree old_name = entry->old_name;
4662 tree new_name = entry->new_name;
4663 struct rename_map_elt tmp;
4666 tmp.old_name = old_name;
4667 x = htab_find_slot (res, &tmp, INSERT);
4670 *x = new_rename_map_elt (old_name, new_name);
4675 /* Translates a CLAST statement STMT to GCC representation in the
4678 - NEXT_E is the edge where new generated code should be attached.
4679 - CONTEXT_LOOP is the loop in which the generated code will be placed
4681 - IVSTACK contains the surrounding loops around the statement to be
4686 translate_clast (scop_p scop, struct loop *context_loop,
4687 struct clast_stmt *stmt, edge next_e, loop_iv_stack ivstack)
4692 if (CLAST_STMT_IS_A (stmt, stmt_root))
4693 return translate_clast (scop, context_loop, stmt->next, next_e, ivstack);
4695 if (CLAST_STMT_IS_A (stmt, stmt_user))
4698 CloogStatement *cs = ((struct clast_user_stmt *) stmt)->statement;
4699 graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
4701 if (GBB_BB (gbb) == ENTRY_BLOCK_PTR)
4704 map = htab_create (10, rename_map_elt_info, eq_rename_map_elts, free);
4705 loop_iv_stack_patch_for_consts (ivstack, (struct clast_user_stmt *) stmt);
4706 build_iv_mapping (ivstack, map, gbb, scop);
4707 next_e = copy_bb_and_scalar_dependences (GBB_BB (gbb), scop,
4710 loop_iv_stack_remove_constants (ivstack);
4711 update_ssa (TODO_update_ssa);
4712 recompute_all_dominators ();
4714 return translate_clast (scop, context_loop, stmt->next, next_e, ivstack);
4717 if (CLAST_STMT_IS_A (stmt, stmt_for))
4720 = graphite_create_new_loop (scop, next_e, (struct clast_for *) stmt,
4721 ivstack, context_loop ? context_loop
4723 edge last_e = single_exit (loop);
4725 next_e = translate_clast (scop, loop, ((struct clast_for *) stmt)->body,
4726 single_pred_edge (loop->latch), ivstack);
4727 redirect_edge_succ_nodup (next_e, loop->latch);
4729 set_immediate_dominator (CDI_DOMINATORS, next_e->dest, next_e->src);
4730 loop_iv_stack_pop (ivstack);
4731 last_e = single_succ_edge (split_edge (last_e));
4732 insert_loop_close_phis (scop, last_e->src);
4734 recompute_all_dominators ();
4736 return translate_clast (scop, context_loop, stmt->next, last_e, ivstack);
4739 if (CLAST_STMT_IS_A (stmt, stmt_guard))
4741 htab_t liveout_before_guard = htab_create (10, rename_map_elt_info,
4742 eq_rename_map_elts, free);
4743 edge last_e = graphite_create_new_guard (scop, next_e,
4744 ((struct clast_guard *) stmt),
4746 edge true_e = get_true_edge_from_guard_bb (next_e->dest);
4747 edge false_e = get_false_edge_from_guard_bb (next_e->dest);
4748 edge exit_true_e = single_succ_edge (true_e->dest);
4749 edge exit_false_e = single_succ_edge (false_e->dest);
4751 htab_traverse (SCOP_LIVEOUT_RENAMES (scop), copy_renames,
4752 liveout_before_guard);
4754 next_e = translate_clast (scop, context_loop,
4755 ((struct clast_guard *) stmt)->then,
4757 insert_guard_phis (scop, last_e->src, exit_true_e, exit_false_e,
4758 liveout_before_guard);
4759 htab_delete (liveout_before_guard);
4760 recompute_all_dominators ();
4763 return translate_clast (scop, context_loop, stmt->next, last_e, ivstack);
4766 if (CLAST_STMT_IS_A (stmt, stmt_block))
4768 next_e = translate_clast (scop, context_loop,
4769 ((struct clast_block *) stmt)->body,
4771 recompute_all_dominators ();
4773 return translate_clast (scop, context_loop, stmt->next, next_e, ivstack);
4779 /* Free the SCATTERING domain list. */
4782 free_scattering (CloogDomainList *scattering)
4786 CloogDomain *dom = cloog_domain (scattering);
4787 CloogDomainList *next = cloog_next_domain (scattering);
4789 cloog_domain_free (dom);
4795 /* Build cloog program for SCoP. */
4798 build_cloog_prog (scop_p scop)
4801 int max_nb_loops = scop_max_loop_depth (scop);
4803 CloogLoop *loop_list = NULL;
4804 CloogBlockList *block_list = NULL;
4805 CloogDomainList *scattering = NULL;
4806 CloogProgram *prog = SCOP_PROG (scop);
4807 int nbs = 2 * max_nb_loops + 1;
4808 int *scaldims = (int *) xmalloc (nbs * (sizeof (int)));
4810 cloog_program_set_nb_scattdims (prog, nbs);
4811 initialize_cloog_names (scop);
4813 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
4815 /* Build new block. */
4816 CloogMatrix *domain = GBB_DOMAIN (gbb);
4817 CloogStatement *stmt = cloog_statement_alloc (GBB_BB (gbb)->index);
4818 CloogBlock *block = cloog_block_alloc (stmt, 0, NULL,
4819 nb_loops_around_gb (gbb));
4820 cloog_statement_set_usr (stmt, gbb);
4822 /* Add empty domain to all bbs, which do not yet have a domain, as they
4823 are not part of any loop. */
4826 domain = cloog_matrix_alloc (0, scop_nb_params (scop) + 2);
4827 GBB_DOMAIN (gbb) = domain;
4830 /* Build loop list. */
4832 CloogLoop *new_loop_list = cloog_loop_malloc ();
4833 cloog_loop_set_next (new_loop_list, loop_list);
4834 cloog_loop_set_domain (new_loop_list,
4835 cloog_domain_matrix2domain (domain));
4836 cloog_loop_set_block (new_loop_list, block);
4837 loop_list = new_loop_list;
4840 /* Build block list. */
4842 CloogBlockList *new_block_list = cloog_block_list_malloc ();
4844 cloog_block_list_set_next (new_block_list, block_list);
4845 cloog_block_list_set_block (new_block_list, block);
4846 block_list = new_block_list;
4849 /* Build scattering list. */
4851 /* XXX: Replace with cloog_domain_list_alloc(), when available. */
4852 CloogDomainList *new_scattering
4853 = (CloogDomainList *) xmalloc (sizeof (CloogDomainList));
4854 CloogMatrix *scat_mat = schedule_to_scattering (gbb, nbs);
4856 cloog_set_next_domain (new_scattering, scattering);
4857 cloog_set_domain (new_scattering,
4858 cloog_domain_matrix2domain (scat_mat));
4859 scattering = new_scattering;
4860 cloog_matrix_free (scat_mat);
4864 cloog_program_set_loop (prog, loop_list);
4865 cloog_program_set_blocklist (prog, block_list);
4867 for (i = 0; i < nbs; i++)
4870 cloog_program_set_scaldims (prog, scaldims);
4872 /* Extract scalar dimensions to simplify the code generation problem. */
4873 cloog_program_extract_scalars (prog, scattering);
4875 /* Apply scattering. */
4876 cloog_program_scatter (prog, scattering);
4877 free_scattering (scattering);
4879 /* Iterators corresponding to scalar dimensions have to be extracted. */
4880 cloog_names_scalarize (cloog_program_names (prog), nbs,
4881 cloog_program_scaldims (prog));
4883 /* Free blocklist. */
4885 CloogBlockList *next = cloog_program_blocklist (prog);
4889 CloogBlockList *toDelete = next;
4890 next = cloog_block_list_next (next);
4891 cloog_block_list_set_next (toDelete, NULL);
4892 cloog_block_list_set_block (toDelete, NULL);
4893 cloog_block_list_free (toDelete);
4895 cloog_program_set_blocklist (prog, NULL);
4899 /* Return the options that will be used in GLOOG. */
4901 static CloogOptions *
4902 set_cloog_options (void)
4904 CloogOptions *options = cloog_options_malloc ();
4906 /* Change cloog output language to C. If we do use FORTRAN instead, cloog
4907 will stop e.g. with "ERROR: unbounded loops not allowed in FORTRAN.", if
4908 we pass an incomplete program to cloog. */
4909 options->language = LANGUAGE_C;
4911 /* Enable complex equality spreading: removes dummy statements
4912 (assignments) in the generated code which repeats the
4913 substitution equations for statements. This is useless for
4917 /* Enable C pretty-printing mode: normalizes the substitution
4918 equations for statements. */
4921 /* Allow cloog to build strides with a stride width different to one.
4922 This example has stride = 4:
4924 for (i = 0; i < 20; i += 4)
4926 options->strides = 1;
4928 /* Disable optimizations and make cloog generate source code closer to the
4929 input. This is useful for debugging, but later we want the optimized
4932 XXX: We can not disable optimizations, as loop blocking is not working
4937 options->l = INT_MAX;
4943 /* Prints STMT to STDERR. */
4946 debug_clast_stmt (struct clast_stmt *stmt)
4948 CloogOptions *options = set_cloog_options ();
4950 pprint (stderr, stmt, 0, options);
4953 /* Find the right transform for the SCOP, and return a Cloog AST
4954 representing the new form of the program. */
4956 static struct clast_stmt *
4957 find_transform (scop_p scop)
4959 struct clast_stmt *stmt;
4960 CloogOptions *options = set_cloog_options ();
4962 /* Connect new cloog prog generation to graphite. */
4963 build_cloog_prog (scop);
4965 if (dump_file && (dump_flags & TDF_DETAILS))
4967 fprintf (dump_file, "Cloog Input [\n");
4968 cloog_program_print (dump_file, SCOP_PROG(scop));
4969 fprintf (dump_file, "]\n");
4972 SCOP_PROG (scop) = cloog_program_generate (SCOP_PROG (scop), options);
4973 stmt = cloog_clast_create (SCOP_PROG (scop), options);
4975 if (dump_file && (dump_flags & TDF_DETAILS))
4977 fprintf (dump_file, "Cloog Output[\n");
4978 pprint (dump_file, stmt, 0, options);
4979 cloog_program_dump_cloog (dump_file, SCOP_PROG (scop));
4980 fprintf (dump_file, "]\n");
4983 cloog_options_free (options);
4987 /* Remove from the CFG the REGION. */
4990 remove_sese_region (sese region)
4992 VEC (basic_block, heap) *bbs = NULL;
4993 basic_block entry_bb = SESE_ENTRY (region)->dest;
4994 basic_block exit_bb = SESE_EXIT (region)->dest;
4998 VEC_safe_push (basic_block, heap, bbs, entry_bb);
4999 gather_blocks_in_sese_region (entry_bb, exit_bb, &bbs);
5001 for (i = 0; VEC_iterate (basic_block, bbs, i, bb); i++)
5002 delete_basic_block (bb);
5004 VEC_free (basic_block, heap, bbs);
5007 typedef struct ifsese {
5014 if_region_entry (ifsese if_region)
5016 return SESE_ENTRY (if_region->region);
5020 if_region_exit (ifsese if_region)
5022 return SESE_EXIT (if_region->region);
5025 static inline basic_block
5026 if_region_get_condition_block (ifsese if_region)
5028 return if_region_entry (if_region)->dest;
5032 if_region_set_false_region (ifsese if_region, sese region)
5034 basic_block condition = if_region_get_condition_block (if_region);
5035 edge false_edge = get_false_edge_from_guard_bb (condition);
5036 basic_block dummy = false_edge->dest;
5037 edge entry_region = SESE_ENTRY (region);
5038 edge exit_region = SESE_EXIT (region);
5039 basic_block before_region = entry_region->src;
5040 basic_block last_in_region = exit_region->src;
5041 void **slot = htab_find_slot_with_hash (current_loops->exits, exit_region,
5042 htab_hash_pointer (exit_region),
5045 entry_region->flags = false_edge->flags;
5046 false_edge->flags = exit_region->flags;
5048 redirect_edge_pred (entry_region, condition);
5049 redirect_edge_pred (exit_region, before_region);
5050 redirect_edge_pred (false_edge, last_in_region);
5051 redirect_edge_succ (false_edge, single_succ (dummy));
5052 delete_basic_block (dummy);
5054 exit_region->flags = EDGE_FALLTHRU;
5055 recompute_all_dominators ();
5057 SESE_EXIT (region) = false_edge;
5058 if_region->false_region = region;
5062 struct loop_exit *loop_exit = GGC_CNEW (struct loop_exit);
5064 memcpy (loop_exit, *((struct loop_exit **) slot), sizeof (struct loop_exit));
5065 htab_clear_slot (current_loops->exits, slot);
5067 slot = htab_find_slot_with_hash (current_loops->exits, false_edge,
5068 htab_hash_pointer (false_edge),
5070 loop_exit->e = false_edge;
5072 false_edge->src->loop_father->exits->next = loop_exit;
5077 create_if_region_on_edge (edge entry, tree condition)
5081 sese sese_region = GGC_NEW (struct sese);
5082 sese true_region = GGC_NEW (struct sese);
5083 sese false_region = GGC_NEW (struct sese);
5084 ifsese if_region = GGC_NEW (struct ifsese);
5085 edge exit = create_empty_if_region_on_edge (entry, condition);
5087 if_region->region = sese_region;
5088 if_region->region->entry = entry;
5089 if_region->region->exit = exit;
5091 FOR_EACH_EDGE (e, ei, entry->dest->succs)
5093 if (e->flags & EDGE_TRUE_VALUE)
5095 true_region->entry = e;
5096 true_region->exit = single_succ_edge (e->dest);
5097 if_region->true_region = true_region;
5099 else if (e->flags & EDGE_FALSE_VALUE)
5101 false_region->entry = e;
5102 false_region->exit = single_succ_edge (e->dest);
5103 if_region->false_region = false_region;
5110 /* Moves REGION in a condition expression:
5118 move_sese_in_condition (sese region)
5120 basic_block pred_block = split_edge (SESE_ENTRY (region));
5121 ifsese if_region = NULL;
5123 SESE_ENTRY (region) = single_succ_edge (pred_block);
5124 if_region = create_if_region_on_edge (single_pred_edge (pred_block), integer_one_node);
5125 if_region_set_false_region (if_region, region);
5130 /* Add exit phis for USE on EXIT. */
5133 scop_add_exit_phis_edge (basic_block exit, tree use, edge false_e, edge true_e)
5135 gimple phi = create_phi_node (use, exit);
5137 create_new_def_for (gimple_phi_result (phi), phi,
5138 gimple_phi_result_ptr (phi));
5139 add_phi_arg (phi, use, false_e);
5140 add_phi_arg (phi, use, true_e);
5143 /* Add phi nodes for VAR that is used in LIVEIN. Phi nodes are
5144 inserted in block BB. */
5147 scop_add_exit_phis_var (basic_block bb, tree var, bitmap livein,
5148 edge false_e, edge true_e)
5151 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var));
5153 if (is_gimple_reg (var))
5154 bitmap_clear_bit (livein, def_bb->index);
5156 bitmap_set_bit (livein, def_bb->index);
5158 def = BITMAP_ALLOC (NULL);
5159 bitmap_set_bit (def, def_bb->index);
5160 compute_global_livein (livein, def);
5163 scop_add_exit_phis_edge (bb, var, false_e, true_e);
5166 /* Insert in the block BB phi nodes for variables defined in REGION
5167 and used outside the REGION. The code generation moves REGION in
5168 the else clause of an "if (1)" and generates code in the then
5169 clause that is at this point empty:
5178 scop_insert_phis_for_liveouts (sese region, basic_block bb,
5179 edge false_e, edge true_e)
5184 update_ssa (TODO_update_ssa);
5186 EXECUTE_IF_SET_IN_BITMAP (SESE_LIVEOUT (region), 0, i, bi)
5187 scop_add_exit_phis_var (bb, ssa_name (i), SESE_LIVEIN_VER (region, i),
5190 update_ssa (TODO_update_ssa);
5193 /* Get the definition of NAME before the SCOP. Keep track of the
5194 basic blocks that have been VISITED in a bitmap. */
5197 get_vdef_before_scop (scop_p scop, tree name, sbitmap visited)
5200 gimple def_stmt = SSA_NAME_DEF_STMT (name);
5201 basic_block def_bb = gimple_bb (def_stmt);
5204 || !bb_in_sese_p (def_bb, SCOP_REGION (scop)))
5207 if (TEST_BIT (visited, def_bb->index))
5210 SET_BIT (visited, def_bb->index);
5212 switch (gimple_code (def_stmt))
5215 for (i = 0; i < gimple_phi_num_args (def_stmt); i++)
5217 tree arg = gimple_phi_arg_def (def_stmt, i);
5218 tree res = get_vdef_before_scop (scop, arg, visited);
5229 /* Adjust a virtual phi node PHI that is placed at the end of the
5230 generated code for SCOP:
5233 | generated code from REGION;
5237 The FALSE_E edge comes from the original code, TRUE_E edge comes
5238 from the code generated for the SCOP. */
5241 scop_adjust_vphi (scop_p scop, gimple phi, edge true_e)
5245 gcc_assert (gimple_phi_num_args (phi) == 2);
5247 for (i = 0; i < gimple_phi_num_args (phi); i++)
5248 if (gimple_phi_arg_edge (phi, i) == true_e)
5250 tree true_arg, false_arg, before_scop_arg;
5253 true_arg = gimple_phi_arg_def (phi, i);
5254 if (!SSA_NAME_IS_DEFAULT_DEF (true_arg))
5257 false_arg = gimple_phi_arg_def (phi, i == 0 ? 1 : 0);
5258 if (SSA_NAME_IS_DEFAULT_DEF (false_arg))
5261 visited = sbitmap_alloc (last_basic_block);
5262 sbitmap_zero (visited);
5263 before_scop_arg = get_vdef_before_scop (scop, false_arg, visited);
5264 gcc_assert (before_scop_arg != NULL_TREE);
5265 SET_PHI_ARG_DEF (phi, i, before_scop_arg);
5266 sbitmap_free (visited);
5270 /* Adjusts the phi nodes in the block BB for variables defined in
5271 SCOP_REGION and used outside the SCOP_REGION. The code generation
5272 moves SCOP_REGION in the else clause of an "if (1)" and generates
5273 code in the then clause:
5276 | generated code from REGION;
5280 To adjust the phi nodes after the condition, SCOP_LIVEOUT_RENAMES
5281 hash table is used: this stores for a name that is part of the
5282 LIVEOUT of SCOP_REGION its new name in the generated code. */
5285 scop_adjust_phis_for_liveouts (scop_p scop, basic_block bb, edge false_e,
5288 gimple_stmt_iterator si;
5290 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
5293 unsigned false_i = 0;
5294 gimple phi = gsi_stmt (si);
5296 if (!is_gimple_reg (PHI_RESULT (phi)))
5298 scop_adjust_vphi (scop, phi, true_e);
5302 for (i = 0; i < gimple_phi_num_args (phi); i++)
5303 if (gimple_phi_arg_edge (phi, i) == false_e)
5309 for (i = 0; i < gimple_phi_num_args (phi); i++)
5310 if (gimple_phi_arg_edge (phi, i) == true_e)
5312 tree old_name = gimple_phi_arg_def (phi, false_i);
5313 tree new_name = get_new_name_from_old_name
5314 (SCOP_LIVEOUT_RENAMES (scop), old_name);
5316 gcc_assert (old_name != new_name);
5317 SET_PHI_ARG_DEF (phi, i, new_name);
5322 /* Returns the first cloog name used in EXPR. */
5325 find_cloog_iv_in_expr (struct clast_expr *expr)
5327 struct clast_term *term = (struct clast_term *) expr;
5329 if (expr->type == expr_term
5333 if (expr->type == expr_term)
5336 if (expr->type == expr_red)
5339 struct clast_reduction *red = (struct clast_reduction *) expr;
5341 for (i = 0; i < red->n; i++)
5343 const char *res = find_cloog_iv_in_expr ((red)->elts[i]);
5353 /* Build for a clast_user_stmt USER_STMT a map between the CLAST
5354 induction variables and the corresponding GCC old induction
5355 variables. This information is stored on each GRAPHITE_BB. */
5358 compute_cloog_iv_types_1 (graphite_bb_p gbb,
5359 struct clast_user_stmt *user_stmt)
5361 struct clast_stmt *t;
5364 for (t = user_stmt->substitutions; t; t = t->next, index++)
5367 struct ivtype_map_elt tmp;
5368 struct clast_expr *expr = (struct clast_expr *)
5369 ((struct clast_assignment *)t)->RHS;
5371 /* Create an entry (clast_var, type). */
5372 tmp.cloog_iv = find_cloog_iv_in_expr (expr);
5376 slot = htab_find_slot (GBB_CLOOG_IV_TYPES (gbb), &tmp, INSERT);
5380 loop_p loop = gbb_loop_at_index (gbb, index);
5381 tree oldiv = oldiv_for_loop (GBB_SCOP (gbb), loop);
5382 tree type = oldiv ? TREE_TYPE (oldiv) : integer_type_node;
5383 *slot = new_ivtype_map_elt (tmp.cloog_iv, type);
5388 /* Walk the CLAST tree starting from STMT and build for each
5389 clast_user_stmt a map between the CLAST induction variables and the
5390 corresponding GCC old induction variables. This information is
5391 stored on each GRAPHITE_BB. */
5394 compute_cloog_iv_types (struct clast_stmt *stmt)
5399 if (CLAST_STMT_IS_A (stmt, stmt_root))
5402 if (CLAST_STMT_IS_A (stmt, stmt_user))
5404 CloogStatement *cs = ((struct clast_user_stmt *) stmt)->statement;
5405 graphite_bb_p gbb = (graphite_bb_p) cloog_statement_usr (cs);
5406 GBB_CLOOG_IV_TYPES (gbb) = htab_create (10, ivtype_map_elt_info,
5407 eq_ivtype_map_elts, free);
5408 compute_cloog_iv_types_1 (gbb, (struct clast_user_stmt *) stmt);
5412 if (CLAST_STMT_IS_A (stmt, stmt_for))
5414 struct clast_stmt *s = ((struct clast_for *) stmt)->body;
5415 compute_cloog_iv_types (s);
5419 if (CLAST_STMT_IS_A (stmt, stmt_guard))
5421 struct clast_stmt *s = ((struct clast_guard *) stmt)->then;
5422 compute_cloog_iv_types (s);
5426 if (CLAST_STMT_IS_A (stmt, stmt_block))
5428 struct clast_stmt *s = ((struct clast_block *) stmt)->body;
5429 compute_cloog_iv_types (s);
5436 compute_cloog_iv_types (stmt->next);
5439 /* GIMPLE Loop Generator: generates loops from STMT in GIMPLE form for
5440 the given SCOP. Return true if code generation succeeded. */
5443 gloog (scop_p scop, struct clast_stmt *stmt)
5445 edge new_scop_exit_edge = NULL;
5446 VEC (iv_stack_entry_p, heap) *ivstack = VEC_alloc (iv_stack_entry_p, heap,
5448 loop_p context_loop;
5449 ifsese if_region = NULL;
5451 recompute_all_dominators ();
5453 if_region = move_sese_in_condition (SCOP_REGION (scop));
5454 sese_build_livein_liveouts (SCOP_REGION (scop));
5455 scop_insert_phis_for_liveouts (SCOP_REGION (scop),
5456 if_region->region->exit->src,
5457 if_region->false_region->exit,
5458 if_region->true_region->exit);
5459 recompute_all_dominators ();
5461 context_loop = SESE_ENTRY (SCOP_REGION (scop))->src->loop_father;
5462 compute_cloog_iv_types (stmt);
5464 new_scop_exit_edge = translate_clast (scop, context_loop, stmt,
5465 if_region->true_region->entry,
5467 free_loop_iv_stack (&ivstack);
5468 cloog_clast_free (stmt);
5471 scop_adjust_phis_for_liveouts (scop,
5472 if_region->region->exit->src,
5473 if_region->false_region->exit,
5474 if_region->true_region->exit);
5476 recompute_all_dominators ();
5481 /* Returns the number of data references in SCOP. */
5484 nb_data_refs_in_scop (scop_p scop)
5490 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gbb); i++)
5491 res += VEC_length (data_reference_p, GBB_DATA_REFS (gbb));
5496 /* Move the loop at index LOOP and insert it before index NEW_LOOP_POS.
5497 This transformartion is only valid, if the loop nest between i and k is
5498 perfectly nested. Therefore we do not need to change the static schedule.
5502 for (i = 0; i < 50; i++)
5504 for (k = 5; k < 100; k++)
5507 To move k before i use:
5509 graphite_trans_bb_move_loop (A, 2, 0)
5511 for (k = 5; k < 100; k++)
5512 for (i = 0; i < 50; i++)
5518 graphite_trans_bb_move_loop (A, 0, 2)
5520 This function does not check the validity of interchanging loops.
5521 This should be checked before calling this function. */
5524 graphite_trans_bb_move_loop (graphite_bb_p gb, int loop,
5527 CloogMatrix *domain = GBB_DOMAIN (gb);
5531 gcc_assert (loop < gbb_nb_loops (gb)
5532 && new_loop_pos < gbb_nb_loops (gb));
5534 /* Update LOOPS vector. */
5535 tmp_loop_p = VEC_index (loop_p, GBB_LOOPS (gb), loop);
5536 VEC_ordered_remove (loop_p, GBB_LOOPS (gb), loop);
5537 VEC_safe_insert (loop_p, heap, GBB_LOOPS (gb), new_loop_pos, tmp_loop_p);
5539 /* Move the domain columns. */
5540 if (loop < new_loop_pos)
5541 for (row = 0; row < domain->NbRows; row++)
5545 value_assign (tmp, domain->p[row][loop + 1]);
5547 for (j = loop ; j < new_loop_pos - 1; j++)
5548 value_assign (domain->p[row][j + 1], domain->p[row][j + 2]);
5550 value_assign (domain->p[row][new_loop_pos], tmp);
5554 for (row = 0; row < domain->NbRows; row++)
5558 value_assign (tmp, domain->p[row][loop + 1]);
5560 for (j = loop ; j > new_loop_pos; j--)
5561 value_assign (domain->p[row][j + 1], domain->p[row][j]);
5563 value_assign (domain->p[row][new_loop_pos + 1], tmp);
5568 /* Get the index of the column representing constants in the DOMAIN
5572 const_column_index (CloogMatrix *domain)
5574 return domain->NbColumns - 1;
5578 /* Get the first index that is positive or negative, determined
5579 following the value of POSITIVE, in matrix DOMAIN in COLUMN. */
5582 get_first_matching_sign_row_index (CloogMatrix *domain, int column,
5587 for (row = 0; row < domain->NbRows; row++)
5589 int val = value_get_si (domain->p[row][column]);
5591 if (val > 0 && positive)
5594 else if (val < 0 && !positive)
5601 /* Get the lower bound of COLUMN in matrix DOMAIN. */
5604 get_lower_bound_row (CloogMatrix *domain, int column)
5606 return get_first_matching_sign_row_index (domain, column, true);
5609 /* Get the upper bound of COLUMN in matrix DOMAIN. */
5612 get_upper_bound_row (CloogMatrix *domain, int column)
5614 return get_first_matching_sign_row_index (domain, column, false);
5617 /* Copies the OLD_ROW constraint from OLD_DOMAIN to the NEW_DOMAIN at
5621 copy_constraint (CloogMatrix *old_domain, CloogMatrix *new_domain,
5622 int old_row, int new_row)
5626 gcc_assert (old_domain->NbColumns == new_domain->NbColumns
5627 && old_row < old_domain->NbRows
5628 && new_row < new_domain->NbRows);
5630 for (i = 0; i < old_domain->NbColumns; i++)
5631 value_assign (new_domain->p[new_row][i], old_domain->p[old_row][i]);
5634 /* Swap coefficients of variables X and Y on row R. */
5637 swap_constraint_variables (CloogMatrix *domain,
5638 int r, int x, int y)
5640 value_swap (domain->p[r][x], domain->p[r][y]);
5643 /* Scale by X the coefficient C of constraint at row R in DOMAIN. */
5646 scale_constraint_variable (CloogMatrix *domain,
5647 int r, int c, int x)
5649 Value strip_size_value;
5650 value_init (strip_size_value);
5651 value_set_si (strip_size_value, x);
5652 value_multiply (domain->p[r][c], domain->p[r][c], strip_size_value);
5653 value_clear (strip_size_value);
5656 /* Strip mines the loop of BB at the position LOOP_DEPTH with STRIDE.
5657 Always valid, but not always a performance improvement. */
5660 graphite_trans_bb_strip_mine (graphite_bb_p gb, int loop_depth, int stride)
5664 CloogMatrix *domain = GBB_DOMAIN (gb);
5665 CloogMatrix *new_domain = cloog_matrix_alloc (domain->NbRows + 3,
5666 domain->NbColumns + 1);
5668 int col_loop_old = loop_depth + 2;
5669 int col_loop_strip = col_loop_old - 1;
5671 gcc_assert (loop_depth <= gbb_nb_loops (gb) - 1);
5673 VEC_safe_insert (loop_p, heap, GBB_LOOPS (gb), loop_depth, NULL);
5675 GBB_DOMAIN (gb) = new_domain;
5677 for (row = 0; row < domain->NbRows; row++)
5678 for (col = 0; col < domain->NbColumns; col++)
5679 if (col <= loop_depth)
5680 value_assign (new_domain->p[row][col], domain->p[row][col]);
5682 value_assign (new_domain->p[row][col + 1], domain->p[row][col]);
5684 row = domain->NbRows;
5686 /* Lower bound of the outer stripped loop. */
5687 copy_constraint (new_domain, new_domain,
5688 get_lower_bound_row (new_domain, col_loop_old), row);
5689 swap_constraint_variables (new_domain, row, col_loop_old, col_loop_strip);
5692 /* Upper bound of the outer stripped loop. */
5693 copy_constraint (new_domain, new_domain,
5694 get_upper_bound_row (new_domain, col_loop_old), row);
5695 swap_constraint_variables (new_domain, row, col_loop_old, col_loop_strip);
5696 scale_constraint_variable (new_domain, row, col_loop_strip, stride);
5699 /* Lower bound of a tile starts at "stride * outer_iv". */
5700 row = get_lower_bound_row (new_domain, col_loop_old);
5701 value_set_si (new_domain->p[row][0], 1);
5702 value_set_si (new_domain->p[row][const_column_index (new_domain)], 0);
5703 value_set_si (new_domain->p[row][col_loop_old], 1);
5704 value_set_si (new_domain->p[row][col_loop_strip], -1 * stride);
5706 /* Upper bound of a tile stops at "stride * outer_iv + stride - 1",
5707 or at the old upper bound that is not modified. */
5708 row = new_domain->NbRows - 1;
5709 value_set_si (new_domain->p[row][0], 1);
5710 value_set_si (new_domain->p[row][col_loop_old], -1);
5711 value_set_si (new_domain->p[row][col_loop_strip], stride);
5712 value_set_si (new_domain->p[row][const_column_index (new_domain)],
5715 cloog_matrix_free (domain);
5717 /* Update static schedule. */
5720 int nb_loops = gbb_nb_loops (gb);
5721 lambda_vector new_schedule = lambda_vector_new (nb_loops + 1);
5723 for (i = 0; i <= loop_depth; i++)
5724 new_schedule[i] = GBB_STATIC_SCHEDULE (gb)[i];
5726 for (i = loop_depth + 1; i <= nb_loops - 2; i++)
5727 new_schedule[i + 2] = GBB_STATIC_SCHEDULE (gb)[i];
5729 GBB_STATIC_SCHEDULE (gb) = new_schedule;
5733 /* Returns true when the strip mining of LOOP_INDEX by STRIDE is
5734 profitable or undecidable. GB is the statement around which the
5735 loops will be strip mined. */
5738 strip_mine_profitable_p (graphite_bb_p gb, int stride,
5747 loop = VEC_index (loop_p, GBB_LOOPS (gb), loop_index);
5748 exit = single_exit (loop);
5750 niter = find_loop_niter (loop, &exit);
5751 if (niter == chrec_dont_know
5752 || TREE_CODE (niter) != INTEGER_CST)
5755 niter_val = int_cst_value (niter);
5757 if (niter_val < stride)
5760 if (dump_file && (dump_flags & TDF_DETAILS))
5762 fprintf (dump_file, "\nStrip Mining is not profitable for loop %d:",
5764 fprintf (dump_file, "number of iterations is too low.\n");
5771 /* Determines when the interchange of LOOP_A and LOOP_B belonging to
5772 SCOP is legal. DEPTH is the number of loops around. */
5775 is_interchange_valid (scop_p scop, int loop_a, int loop_b, int depth)
5778 VEC (ddr_p, heap) *dependence_relations;
5779 VEC (data_reference_p, heap) *datarefs;
5781 struct loop *nest = VEC_index (loop_p, SCOP_LOOP_NEST (scop), loop_a);
5782 lambda_trans_matrix trans;
5784 gcc_assert (loop_a < loop_b);
5786 dependence_relations = VEC_alloc (ddr_p, heap, 10 * 10);
5787 datarefs = VEC_alloc (data_reference_p, heap, 10);
5789 if (!compute_data_dependences_for_loop (nest, true, &datarefs,
5790 &dependence_relations))
5793 if (dump_file && (dump_flags & TDF_DETAILS))
5794 dump_ddrs (dump_file, dependence_relations);
5796 trans = lambda_trans_matrix_new (depth, depth);
5797 lambda_matrix_id (LTM_MATRIX (trans), depth);
5799 lambda_matrix_row_exchange (LTM_MATRIX (trans), 0, loop_b - loop_a);
5801 if (!lambda_transform_legal_p (trans, depth, dependence_relations))
5803 lambda_matrix_row_exchange (LTM_MATRIX (trans), 0, loop_b - loop_a);
5809 free_dependence_relations (dependence_relations);
5810 free_data_refs (datarefs);
5814 /* Loop block the LOOPS innermost loops of GB with stride size STRIDE.
5818 for (i = 0; i <= 50; i++=4)
5819 for (k = 0; k <= 100; k++=4)
5820 for (l = 0; l <= 200; l++=4)
5823 To strip mine the two inner most loops with stride = 4 call:
5825 graphite_trans_bb_block (A, 4, 2)
5827 for (i = 0; i <= 50; i++)
5828 for (kk = 0; kk <= 100; kk+=4)
5829 for (ll = 0; ll <= 200; ll+=4)
5830 for (k = kk; k <= min (100, kk + 3); k++)
5831 for (l = ll; l <= min (200, ll + 3); l++)
5836 graphite_trans_bb_block (graphite_bb_p gb, int stride, int loops)
5839 int nb_loops = gbb_nb_loops (gb);
5840 int start = nb_loops - loops;
5841 scop_p scop = GBB_SCOP (gb);
5843 gcc_assert (scop_contains_loop (scop, gbb_loop (gb)));
5845 for (i = start ; i < nb_loops; i++)
5846 for (j = i + 1; j < nb_loops; j++)
5847 if (!is_interchange_valid (scop, i, j, nb_loops))
5849 if (dump_file && (dump_flags & TDF_DETAILS))
5851 "\nInterchange not valid for loops %d and %d:\n", i, j);
5854 else if (dump_file && (dump_flags & TDF_DETAILS))
5856 "\nInterchange valid for loops %d and %d:\n", i, j);
5858 /* Check if strip mining is profitable for every loop. */
5859 for (i = 0; i < nb_loops - start; i++)
5860 if (!strip_mine_profitable_p (gb, stride, start + i))
5863 /* Strip mine loops. */
5864 for (i = 0; i < nb_loops - start; i++)
5865 graphite_trans_bb_strip_mine (gb, start + 2 * i, stride);
5867 /* Interchange loops. */
5868 for (i = 1; i < nb_loops - start; i++)
5869 graphite_trans_bb_move_loop (gb, start + 2 * i, start + i);
5871 if (dump_file && (dump_flags & TDF_DETAILS))
5872 fprintf (dump_file, "\nLoops containing BB %d will be loop blocked.\n",
5873 GBB_BB (gb)->index);
5878 /* Loop block LOOPS innermost loops of a loop nest. BBS represent the
5879 basic blocks that belong to the loop nest to be blocked. */
5882 graphite_trans_loop_block (VEC (graphite_bb_p, heap) *bbs, int loops)
5886 bool transform_done = false;
5888 /* TODO: - Calculate the stride size automatically. */
5889 int stride_size = 51;
5891 for (i = 0; VEC_iterate (graphite_bb_p, bbs, i, gb); i++)
5892 transform_done |= graphite_trans_bb_block (gb, stride_size, loops);
5894 return transform_done;
5897 /* Loop block all basic blocks of SCOP. Return false when the
5898 transform is not performed. */
5901 graphite_trans_scop_block (scop_p scop)
5907 bool perfect = true;
5908 bool transform_done = false;
5910 VEC (graphite_bb_p, heap) *bbs = VEC_alloc (graphite_bb_p, heap, 3);
5911 int max_schedule = scop_max_loop_depth (scop) + 1;
5912 lambda_vector last_schedule = lambda_vector_new (max_schedule);
5914 if (VEC_length (graphite_bb_p, SCOP_BBS (scop)) == 0)
5917 /* Get the data of the first bb. */
5918 gb = VEC_index (graphite_bb_p, SCOP_BBS (scop), 0);
5919 last_nb_loops = gbb_nb_loops (gb);
5920 lambda_vector_copy (GBB_STATIC_SCHEDULE (gb), last_schedule,
5922 VEC_safe_push (graphite_bb_p, heap, bbs, gb);
5924 for (i = 0; VEC_iterate (graphite_bb_p, SCOP_BBS (scop), i, gb); i++)
5926 /* We did the first bb before. */
5930 nb_loops = gbb_nb_loops (gb);
5932 /* If the number of loops is unchanged and only the last element of the
5933 schedule changes, we stay in the loop nest. */
5934 if (nb_loops == last_nb_loops
5935 && (last_schedule [nb_loops + 1]
5936 != GBB_STATIC_SCHEDULE (gb)[nb_loops + 1]))
5938 VEC_safe_push (graphite_bb_p, heap, bbs, gb);
5942 /* Otherwise, we left the innermost loop. So check, if the last bb was in
5943 a perfect loop nest and how many loops are contained in this perfect
5946 Count the number of zeros from the end of the schedule. They are the
5947 number of surrounding loops.
5950 last_bb 2 3 2 0 0 0 0 3
5954 last_bb 2 3 2 0 0 0 0 3
5958 If there is no zero, there were other bbs in outer loops and the loop
5959 nest is not perfect. */
5960 for (j = last_nb_loops - 1; j >= 0; j--)
5962 if (last_schedule [j] != 0
5963 || (j <= nb_loops && GBB_STATIC_SCHEDULE (gb)[j] == 1))
5972 /* Found perfect loop nest. */
5973 if (perfect && last_nb_loops - j >= 2)
5974 transform_done |= graphite_trans_loop_block (bbs, last_nb_loops - j);
5976 /* Check if we start with a new loop.
5980 last_bb 2 3 2 0 0 0 0 3
5983 Here we start with the loop "2 3 2 0 0 1"
5985 last_bb 2 3 2 0 0 0 0 3
5988 But here not, so the loop nest can never be perfect. */
5990 perfect = (GBB_STATIC_SCHEDULE (gb)[nb_loops] == 0);
5992 /* Update the last_bb infos. We do not do that for the bbs in the same
5993 loop, as the data we use is not changed. */
5994 last_nb_loops = nb_loops;
5995 lambda_vector_copy (GBB_STATIC_SCHEDULE (gb), last_schedule,
5997 VEC_truncate (graphite_bb_p, bbs, 0);
5998 VEC_safe_push (graphite_bb_p, heap, bbs, gb);
6001 /* Check if the last loop nest was perfect. It is the same check as above,
6002 but the comparison with the next bb is missing. */
6003 for (j = last_nb_loops - 1; j >= 0; j--)
6004 if (last_schedule [j] != 0)
6012 /* Found perfect loop nest. */
6013 if (last_nb_loops - j >= 2)
6014 transform_done |= graphite_trans_loop_block (bbs, last_nb_loops - j);
6015 VEC_free (graphite_bb_p, heap, bbs);
6017 return transform_done;
6020 /* Apply graphite transformations to all the basic blocks of SCOP. */
6023 graphite_apply_transformations (scop_p scop)
6025 bool transform_done = false;
6027 /* Sort the list of bbs. Keep them always sorted. */
6028 graphite_sort_gbbs (scop);
6030 if (flag_loop_block)
6031 transform_done = graphite_trans_scop_block (scop);
6033 /* Generate code even if we did not apply any real transformation.
6034 This also allows to check the performance for the identity
6035 transformation: GIMPLE -> GRAPHITE -> GIMPLE
6036 Keep in mind that CLooG optimizes in control, so the loop structure
6037 may change, even if we only use -fgraphite-identity. */
6038 if (flag_graphite_identity)
6039 transform_done = true;
6041 return transform_done;
6044 /* We limit all SCoPs to SCoPs, that are completely surrounded by a loop.
6054 * SCoP frontier, as this line is not surrounded by any loop. *
6058 This is necessary as scalar evolution and parameter detection need a
6059 outermost loop to initialize parameters correctly.
6061 TODO: FIX scalar evolution and parameter detection to allow more flexible
6067 VEC (sd_region, heap) *tmp_scops = VEC_alloc (sd_region, heap, 3);
6072 for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
6076 build_scop_bbs (scop);
6078 if (!build_scop_loop_nests (scop))
6081 for (j = 0; VEC_iterate (loop_p, SCOP_LOOP_NEST (scop), j, loop); j++)
6082 if (!loop_in_sese_p (loop_outer (loop), SCOP_REGION (scop)))
6084 sd_region open_scop;
6085 open_scop.entry = loop->header;
6086 open_scop.exit = single_exit (loop)->dest;
6087 VEC_safe_push (sd_region, heap, tmp_scops, &open_scop);
6091 free_scops (current_scops);
6092 current_scops = VEC_alloc (scop_p, heap, 3);
6094 create_sese_edges (tmp_scops);
6095 build_graphite_scops (tmp_scops);
6096 VEC_free (sd_region, heap, tmp_scops);
6099 /* Perform a set of linear transforms on the loops of the current
6103 graphite_transform_loops (void)
6107 bool transform_done = false;
6109 if (number_of_loops () <= 1)
6112 current_scops = VEC_alloc (scop_p, heap, 3);
6113 recompute_all_dominators ();
6115 if (dump_file && (dump_flags & TDF_DETAILS))
6116 fprintf (dump_file, "Graphite loop transformations \n");
6118 initialize_original_copy_tables ();
6119 cloog_initialize ();
6123 if (dump_file && (dump_flags & TDF_DETAILS))
6124 fprintf (dump_file, "\nnumber of SCoPs: %d\n",
6125 VEC_length (scop_p, current_scops));
6127 for (i = 0; VEC_iterate (scop_p, current_scops, i, scop); i++)
6129 build_scop_bbs (scop);
6130 if (!build_scop_loop_nests (scop))
6133 build_bb_loops (scop);
6135 if (!build_scop_conditions (scop))
6138 find_scop_parameters (scop);
6139 build_scop_context (scop);
6141 if (dump_file && (dump_flags & TDF_DETAILS))
6143 fprintf (dump_file, "\n(In SCoP %d:\n", i);
6144 fprintf (dump_file, "\nnumber of bbs: %d\n",
6145 VEC_length (graphite_bb_p, SCOP_BBS (scop)));
6146 fprintf (dump_file, "\nnumber of loops: %d)\n",
6147 VEC_length (loop_p, SCOP_LOOP_NEST (scop)));
6150 if (!build_scop_iteration_domain (scop))
6153 add_conditions_to_constraints (scop);
6154 build_scop_canonical_schedules (scop);
6156 build_scop_data_accesses (scop);
6157 build_scop_dynamic_schedules (scop);
6159 if (dump_file && (dump_flags & TDF_DETAILS))
6161 int nbrefs = nb_data_refs_in_scop (scop);
6162 fprintf (dump_file, "\nnumber of data refs: %d\n", nbrefs);
6165 if (graphite_apply_transformations (scop))
6166 transform_done = gloog (scop, find_transform (scop));
6167 #ifdef ENABLE_CHECKING
6170 struct clast_stmt *stmt = find_transform (scop);
6171 cloog_clast_free (stmt);
6178 cleanup_tree_cfg ();
6180 free_scops (current_scops);
6182 free_original_copy_tables ();
6185 #else /* If Cloog is not available: #ifndef HAVE_cloog. */
6188 graphite_transform_loops (void)
6190 sorry ("Graphite loop optimizations cannot be used");