1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic.h"
35 #include "tree-flow.h"
41 #include "tree-pass.h"
44 #include "splay-tree.h"
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
63 typedef struct omp_context
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
94 /* What to do with variables with implicitly determined sharing
96 enum omp_clause_default_kind default_kind;
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
103 /* True if this parallel directive is nested within another. */
108 struct omp_for_data_loop
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
114 /* A structure describing the main elements of a parallel loop. */
118 struct omp_for_data_loop loop;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
137 #define WALK_SUBSTMTS \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
149 scan_omp_op (tree *tp, omp_context *ctx)
151 struct walk_stmt_info wi;
153 memset (&wi, 0, sizeof (wi));
155 wi.want_locations = true;
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
176 /* Return true if CTX is for an omp parallel. */
179 is_parallel_ctx (omp_context *ctx)
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
185 /* Return true if CTX is for an omp task. */
188 is_task_ctx (omp_context *ctx)
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
194 /* Return true if CTX is for an omp parallel or omp task. */
197 is_taskreg_ctx (omp_context *ctx)
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
204 /* Return true if REGION is a combined parallel+workshare region. */
207 is_combined_parallel (struct omp_region *region)
209 return region->is_combined_parallel;
213 /* Extract the header elements of parallel loop FOR_STMT and store
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
224 struct omp_for_data_loop dummy_loop;
226 fd->for_stmt = for_stmt;
228 fd->collapse = gimple_omp_for_collapse (for_stmt);
229 if (fd->collapse > 1)
232 fd->loops = &fd->loop;
234 fd->have_nowait = fd->have_ordered = false;
235 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
236 fd->chunk_size = NULL_TREE;
237 collapse_iter = NULL;
238 collapse_count = NULL;
240 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
241 switch (OMP_CLAUSE_CODE (t))
243 case OMP_CLAUSE_NOWAIT:
244 fd->have_nowait = true;
246 case OMP_CLAUSE_ORDERED:
247 fd->have_ordered = true;
249 case OMP_CLAUSE_SCHEDULE:
250 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
251 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
253 case OMP_CLAUSE_COLLAPSE:
254 if (fd->collapse > 1)
256 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
257 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
266 is best) or if it varies (then schedule(dynamic,N) is better). */
267 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
269 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
270 gcc_assert (fd->chunk_size == NULL);
272 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
273 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
274 gcc_assert (fd->chunk_size == NULL);
275 else if (fd->chunk_size == NULL)
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
279 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
283 ? integer_zero_node : integer_one_node;
286 for (i = 0; i < fd->collapse; i++)
288 if (fd->collapse == 1)
290 else if (loops != NULL)
296 loop->v = gimple_omp_for_index (for_stmt, i);
297 gcc_assert (SSA_VAR_P (loop->v));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
300 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
301 loop->n1 = gimple_omp_for_initial (for_stmt, i);
303 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
304 loop->n2 = gimple_omp_for_final (for_stmt, i);
305 switch (loop->cond_code)
311 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
312 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
313 loop->n2, size_one_node);
315 loop->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
316 build_int_cst (TREE_TYPE (loop->n2), 1));
317 loop->cond_code = LT_EXPR;
320 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
321 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
322 loop->n2, size_int (-1));
324 loop->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
325 build_int_cst (TREE_TYPE (loop->n2), 1));
326 loop->cond_code = GT_EXPR;
332 t = gimple_omp_for_incr (for_stmt, i);
333 gcc_assert (TREE_OPERAND (t, 0) == var);
334 switch (TREE_CODE (t))
337 case POINTER_PLUS_EXPR:
338 loop->step = TREE_OPERAND (t, 1);
341 loop->step = TREE_OPERAND (t, 1);
342 loop->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (loop->step),
349 if (iter_type != long_long_unsigned_type_node)
351 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
352 iter_type = long_long_unsigned_type_node;
353 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
354 && TYPE_PRECISION (TREE_TYPE (loop->v))
355 >= TYPE_PRECISION (iter_type))
359 if (loop->cond_code == LT_EXPR)
360 n = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
361 loop->n2, loop->step);
364 if (TREE_CODE (n) != INTEGER_CST
365 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
366 iter_type = long_long_unsigned_type_node;
368 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
369 > TYPE_PRECISION (iter_type))
373 if (loop->cond_code == LT_EXPR)
376 n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
377 loop->n2, loop->step);
381 n1 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
385 if (TREE_CODE (n1) != INTEGER_CST
386 || TREE_CODE (n2) != INTEGER_CST
387 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
388 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
389 iter_type = long_long_unsigned_type_node;
393 if (collapse_count && *collapse_count == NULL)
395 if ((i == 0 || count != NULL_TREE)
396 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
397 && TREE_CONSTANT (loop->n1)
398 && TREE_CONSTANT (loop->n2)
399 && TREE_CODE (loop->step) == INTEGER_CST)
401 tree itype = TREE_TYPE (loop->v);
403 if (POINTER_TYPE_P (itype))
405 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
406 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
407 t = fold_build2 (PLUS_EXPR, itype,
408 fold_convert (itype, loop->step), t);
409 t = fold_build2 (PLUS_EXPR, itype, t,
410 fold_convert (itype, loop->n2));
411 t = fold_build2 (MINUS_EXPR, itype, t,
412 fold_convert (itype, loop->n1));
413 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
414 t = fold_build2 (TRUNC_DIV_EXPR, itype,
415 fold_build1 (NEGATE_EXPR, itype, t),
416 fold_build1 (NEGATE_EXPR, itype,
420 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
421 fold_convert (itype, loop->step));
422 t = fold_convert (long_long_unsigned_type_node, t);
423 if (count != NULL_TREE)
424 count = fold_build2 (MULT_EXPR, long_long_unsigned_type_node,
428 if (TREE_CODE (count) != INTEGER_CST)
438 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
439 iter_type = long_long_unsigned_type_node;
441 iter_type = long_integer_type_node;
443 else if (collapse_iter && *collapse_iter != NULL)
444 iter_type = TREE_TYPE (*collapse_iter);
445 fd->iter_type = iter_type;
446 if (collapse_iter && *collapse_iter == NULL)
447 *collapse_iter = create_tmp_var (iter_type, ".iter");
448 if (collapse_count && *collapse_count == NULL)
451 *collapse_count = fold_convert (iter_type, count);
453 *collapse_count = create_tmp_var (iter_type, ".count");
456 if (fd->collapse > 1)
458 fd->loop.v = *collapse_iter;
459 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
460 fd->loop.n2 = *collapse_count;
461 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
462 fd->loop.cond_code = LT_EXPR;
467 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
468 is the immediate dominator of PAR_ENTRY_BB, return true if there
469 are no data dependencies that would prevent expanding the parallel
470 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
472 When expanding a combined parallel+workshare region, the call to
473 the child function may need additional arguments in the case of
474 GIMPLE_OMP_FOR regions. In some cases, these arguments are
475 computed out of variables passed in from the parent to the child
476 via 'struct .omp_data_s'. For instance:
478 #pragma omp parallel for schedule (guided, i * 4)
483 # BLOCK 2 (PAR_ENTRY_BB)
485 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
487 # BLOCK 3 (WS_ENTRY_BB)
488 .omp_data_i = &.omp_data_o;
489 D.1667 = .omp_data_i->i;
491 #pragma omp for schedule (guided, D.1598)
493 When we outline the parallel region, the call to the child function
494 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
495 that value is computed *after* the call site. So, in principle we
496 cannot do the transformation.
498 To see whether the code in WS_ENTRY_BB blocks the combined
499 parallel+workshare call, we collect all the variables used in the
500 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
501 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
504 FIXME. If we had the SSA form built at this point, we could merely
505 hoist the code in block 3 into block 2 and be done with it. But at
506 this point we don't have dataflow information and though we could
507 hack something up here, it is really not worth the aggravation. */
510 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
512 struct omp_for_data fd;
513 gimple par_stmt, ws_stmt;
515 par_stmt = last_stmt (par_entry_bb);
516 ws_stmt = last_stmt (ws_entry_bb);
518 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
521 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
523 extract_omp_for_data (ws_stmt, &fd, NULL);
525 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
527 if (fd.iter_type != long_integer_type_node)
530 /* FIXME. We give up too easily here. If any of these arguments
531 are not constants, they will likely involve variables that have
532 been mapped into fields of .omp_data_s for sharing with the child
533 function. With appropriate data flow, it would be possible to
535 if (!is_gimple_min_invariant (fd.loop.n1)
536 || !is_gimple_min_invariant (fd.loop.n2)
537 || !is_gimple_min_invariant (fd.loop.step)
538 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
545 /* Collect additional arguments needed to emit a combined
546 parallel+workshare call. WS_STMT is the workshare directive being
550 get_ws_args_for (gimple ws_stmt)
554 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
556 struct omp_for_data fd;
559 extract_omp_for_data (ws_stmt, &fd, NULL);
564 t = fold_convert (long_integer_type_node, fd.chunk_size);
565 ws_args = tree_cons (NULL, t, ws_args);
568 t = fold_convert (long_integer_type_node, fd.loop.step);
569 ws_args = tree_cons (NULL, t, ws_args);
571 t = fold_convert (long_integer_type_node, fd.loop.n2);
572 ws_args = tree_cons (NULL, t, ws_args);
574 t = fold_convert (long_integer_type_node, fd.loop.n1);
575 ws_args = tree_cons (NULL, t, ws_args);
579 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
581 /* Number of sections is equal to the number of edges from the
582 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
583 the exit of the sections region. */
584 basic_block bb = single_succ (gimple_bb (ws_stmt));
585 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
586 t = tree_cons (NULL, t, NULL);
594 /* Discover whether REGION is a combined parallel+workshare region. */
597 determine_parallel_type (struct omp_region *region)
599 basic_block par_entry_bb, par_exit_bb;
600 basic_block ws_entry_bb, ws_exit_bb;
602 if (region == NULL || region->inner == NULL
603 || region->exit == NULL || region->inner->exit == NULL
604 || region->inner->cont == NULL)
607 /* We only support parallel+for and parallel+sections. */
608 if (region->type != GIMPLE_OMP_PARALLEL
609 || (region->inner->type != GIMPLE_OMP_FOR
610 && region->inner->type != GIMPLE_OMP_SECTIONS))
613 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
614 WS_EXIT_BB -> PAR_EXIT_BB. */
615 par_entry_bb = region->entry;
616 par_exit_bb = region->exit;
617 ws_entry_bb = region->inner->entry;
618 ws_exit_bb = region->inner->exit;
620 if (single_succ (par_entry_bb) == ws_entry_bb
621 && single_succ (ws_exit_bb) == par_exit_bb
622 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
623 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
624 || (last_and_only_stmt (ws_entry_bb)
625 && last_and_only_stmt (par_exit_bb))))
627 gimple ws_stmt = last_stmt (ws_entry_bb);
629 if (region->inner->type == GIMPLE_OMP_FOR)
631 /* If this is a combined parallel loop, we need to determine
632 whether or not to use the combined library calls. There
633 are two cases where we do not apply the transformation:
634 static loops and any kind of ordered loop. In the first
635 case, we already open code the loop so there is no need
636 to do anything else. In the latter case, the combined
637 parallel loop call would still need extra synchronization
638 to implement ordered semantics, so there would not be any
639 gain in using the combined call. */
640 tree clauses = gimple_omp_for_clauses (ws_stmt);
641 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
643 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
644 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
646 region->is_combined_parallel = false;
647 region->inner->is_combined_parallel = false;
652 region->is_combined_parallel = true;
653 region->inner->is_combined_parallel = true;
654 region->ws_args = get_ws_args_for (ws_stmt);
659 /* Return true if EXPR is variable sized. */
662 is_variable_sized (const_tree expr)
664 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
667 /* Return true if DECL is a reference type. */
670 is_reference (tree decl)
672 return lang_hooks.decls.omp_privatize_by_reference (decl);
675 /* Lookup variables in the decl or field splay trees. The "maybe" form
676 allows for the variable form to not have been entered, otherwise we
677 assert that the variable must have been entered. */
680 lookup_decl (tree var, omp_context *ctx)
683 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
688 maybe_lookup_decl (const_tree var, omp_context *ctx)
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return n ? *n : NULL_TREE;
696 lookup_field (tree var, omp_context *ctx)
699 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
700 return (tree) n->value;
704 lookup_sfield (tree var, omp_context *ctx)
707 n = splay_tree_lookup (ctx->sfield_map
708 ? ctx->sfield_map : ctx->field_map,
709 (splay_tree_key) var);
710 return (tree) n->value;
714 maybe_lookup_field (tree var, omp_context *ctx)
717 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
718 return n ? (tree) n->value : NULL_TREE;
721 /* Return true if DECL should be copied by pointer. SHARED_CTX is
722 the parallel context if DECL is to be shared. */
725 use_pointer_for_field (tree decl, omp_context *shared_ctx)
727 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
730 /* We can only use copy-in/copy-out semantics for shared variables
731 when we know the value is not accessible from an outer scope. */
734 /* ??? Trivially accessible from anywhere. But why would we even
735 be passing an address in this case? Should we simply assert
736 this to be false, or should we have a cleanup pass that removes
737 these from the list of mappings? */
738 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
741 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
742 without analyzing the expression whether or not its location
743 is accessible to anyone else. In the case of nested parallel
744 regions it certainly may be. */
745 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
748 /* Do not use copy-in/copy-out for variables that have their
750 if (TREE_ADDRESSABLE (decl))
753 /* Disallow copy-in/out in nested parallel if
754 decl is shared in outer parallel, otherwise
755 each thread could store the shared variable
756 in its own copy-in location, making the
757 variable no longer really shared. */
758 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
762 for (up = shared_ctx->outer; up; up = up->outer)
763 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
770 for (c = gimple_omp_taskreg_clauses (up->stmt);
771 c; c = OMP_CLAUSE_CHAIN (c))
772 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
773 && OMP_CLAUSE_DECL (c) == decl)
781 /* For tasks avoid using copy-in/out, unless they are readonly
782 (in which case just copy-in is used). As tasks can be
783 deferred or executed in different thread, when GOMP_task
784 returns, the task hasn't necessarily terminated. */
785 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
787 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
788 if (is_gimple_reg (outer))
790 /* Taking address of OUTER in lower_send_shared_vars
791 might need regimplification of everything that uses the
793 if (!task_shared_vars)
794 task_shared_vars = BITMAP_ALLOC (NULL);
795 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
796 TREE_ADDRESSABLE (outer) = 1;
805 /* Create a new VAR_DECL and copy information from VAR to it. */
808 copy_var_decl (tree var, tree name, tree type)
810 tree copy = build_decl (VAR_DECL, name, type);
812 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
813 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
814 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
815 DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
816 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
817 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
818 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
819 DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
820 TREE_USED (copy) = 1;
821 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
826 /* Construct a new automatic decl similar to VAR. */
829 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
831 tree copy = copy_var_decl (var, name, type);
833 DECL_CONTEXT (copy) = current_function_decl;
834 TREE_CHAIN (copy) = ctx->block_vars;
835 ctx->block_vars = copy;
841 omp_copy_decl_1 (tree var, omp_context *ctx)
843 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
846 /* Build tree nodes to access the field for VAR on the receiver side. */
849 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
851 tree x, field = lookup_field (var, ctx);
853 /* If the receiver record type was remapped in the child function,
854 remap the field into the new record type. */
855 x = maybe_lookup_field (field, ctx);
859 x = build_fold_indirect_ref (ctx->receiver_decl);
860 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
862 x = build_fold_indirect_ref (x);
867 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
868 of a parallel, this is a component reference; for workshare constructs
869 this is some variable. */
872 build_outer_var_ref (tree var, omp_context *ctx)
876 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
878 else if (is_variable_sized (var))
880 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
881 x = build_outer_var_ref (x, ctx);
882 x = build_fold_indirect_ref (x);
884 else if (is_taskreg_ctx (ctx))
886 bool by_ref = use_pointer_for_field (var, NULL);
887 x = build_receiver_ref (var, by_ref, ctx);
890 x = lookup_decl (var, ctx->outer);
891 else if (is_reference (var))
892 /* This can happen with orphaned constructs. If var is reference, it is
893 possible it is shared and as such valid. */
898 if (is_reference (var))
899 x = build_fold_indirect_ref (x);
904 /* Build tree nodes to access the field for VAR on the sender side. */
907 build_sender_ref (tree var, omp_context *ctx)
909 tree field = lookup_sfield (var, ctx);
910 return build3 (COMPONENT_REF, TREE_TYPE (field),
911 ctx->sender_decl, field, NULL);
914 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
917 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
919 tree field, type, sfield = NULL_TREE;
921 gcc_assert ((mask & 1) == 0
922 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
923 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
924 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
926 type = TREE_TYPE (var);
928 type = build_pointer_type (type);
929 else if ((mask & 3) == 1 && is_reference (var))
930 type = TREE_TYPE (type);
932 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
934 /* Remember what variable this field was created for. This does have a
935 side effect of making dwarf2out ignore this member, so for helpful
936 debugging we clear it later in delete_omp_context. */
937 DECL_ABSTRACT_ORIGIN (field) = var;
938 if (type == TREE_TYPE (var))
940 DECL_ALIGN (field) = DECL_ALIGN (var);
941 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
942 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
945 DECL_ALIGN (field) = TYPE_ALIGN (type);
949 insert_field_into_struct (ctx->record_type, field);
950 if (ctx->srecord_type)
952 sfield = build_decl (FIELD_DECL, DECL_NAME (var), type);
953 DECL_ABSTRACT_ORIGIN (sfield) = var;
954 DECL_ALIGN (sfield) = DECL_ALIGN (field);
955 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
956 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
957 insert_field_into_struct (ctx->srecord_type, sfield);
962 if (ctx->srecord_type == NULL_TREE)
966 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
967 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
968 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
970 sfield = build_decl (FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
971 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
972 insert_field_into_struct (ctx->srecord_type, sfield);
973 splay_tree_insert (ctx->sfield_map,
974 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
975 (splay_tree_value) sfield);
979 insert_field_into_struct ((mask & 1) ? ctx->record_type
980 : ctx->srecord_type, field);
984 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
985 (splay_tree_value) field);
986 if ((mask & 2) && ctx->sfield_map)
987 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
988 (splay_tree_value) sfield);
992 install_var_local (tree var, omp_context *ctx)
994 tree new_var = omp_copy_decl_1 (var, ctx);
995 insert_decl_map (&ctx->cb, var, new_var);
999 /* Adjust the replacement for DECL in CTX for the new context. This means
1000 copying the DECL_VALUE_EXPR, and fixing up the type. */
1003 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1005 tree new_decl, size;
1007 new_decl = lookup_decl (decl, ctx);
1009 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1011 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1012 && DECL_HAS_VALUE_EXPR_P (decl))
1014 tree ve = DECL_VALUE_EXPR (decl);
1015 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1016 SET_DECL_VALUE_EXPR (new_decl, ve);
1017 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1020 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1022 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1023 if (size == error_mark_node)
1024 size = TYPE_SIZE (TREE_TYPE (new_decl));
1025 DECL_SIZE (new_decl) = size;
1027 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1028 if (size == error_mark_node)
1029 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1030 DECL_SIZE_UNIT (new_decl) = size;
1034 /* The callback for remap_decl. Search all containing contexts for a
1035 mapping of the variable; this avoids having to duplicate the splay
1036 tree ahead of time. We know a mapping doesn't already exist in the
1037 given context. Create new mappings to implement default semantics. */
1040 omp_copy_decl (tree var, copy_body_data *cb)
1042 omp_context *ctx = (omp_context *) cb;
1045 if (TREE_CODE (var) == LABEL_DECL)
1047 new_var = create_artificial_label ();
1048 DECL_CONTEXT (new_var) = current_function_decl;
1049 insert_decl_map (&ctx->cb, var, new_var);
1053 while (!is_taskreg_ctx (ctx))
1058 new_var = maybe_lookup_decl (var, ctx);
1063 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1066 return error_mark_node;
1070 /* Return the parallel region associated with STMT. */
1072 /* Debugging dumps for parallel regions. */
1073 void dump_omp_region (FILE *, struct omp_region *, int);
1074 void debug_omp_region (struct omp_region *);
1075 void debug_all_omp_regions (void);
1077 /* Dump the parallel region tree rooted at REGION. */
1080 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1082 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1083 gimple_code_name[region->type]);
1086 dump_omp_region (file, region->inner, indent + 4);
1090 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1091 region->cont->index);
1095 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1096 region->exit->index);
1098 fprintf (file, "%*s[no exit marker]\n", indent, "");
1101 dump_omp_region (file, region->next, indent);
1105 debug_omp_region (struct omp_region *region)
1107 dump_omp_region (stderr, region, 0);
1111 debug_all_omp_regions (void)
1113 dump_omp_region (stderr, root_omp_region, 0);
1117 /* Create a new parallel region starting at STMT inside region PARENT. */
1120 new_omp_region (basic_block bb, enum gimple_code type,
1121 struct omp_region *parent)
1123 struct omp_region *region = XCNEW (struct omp_region);
1125 region->outer = parent;
1127 region->type = type;
1131 /* This is a nested region. Add it to the list of inner
1132 regions in PARENT. */
1133 region->next = parent->inner;
1134 parent->inner = region;
1138 /* This is a toplevel region. Add it to the list of toplevel
1139 regions in ROOT_OMP_REGION. */
1140 region->next = root_omp_region;
1141 root_omp_region = region;
1147 /* Release the memory associated with the region tree rooted at REGION. */
1150 free_omp_region_1 (struct omp_region *region)
1152 struct omp_region *i, *n;
1154 for (i = region->inner; i ; i = n)
1157 free_omp_region_1 (i);
1163 /* Release the memory for the entire omp region tree. */
1166 free_omp_regions (void)
1168 struct omp_region *r, *n;
1169 for (r = root_omp_region; r ; r = n)
1172 free_omp_region_1 (r);
1174 root_omp_region = NULL;
1178 /* Create a new context, with OUTER_CTX being the surrounding context. */
1180 static omp_context *
1181 new_omp_context (gimple stmt, omp_context *outer_ctx)
1183 omp_context *ctx = XCNEW (omp_context);
1185 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1186 (splay_tree_value) ctx);
1191 ctx->outer = outer_ctx;
1192 ctx->cb = outer_ctx->cb;
1193 ctx->cb.block = NULL;
1194 ctx->depth = outer_ctx->depth + 1;
1198 ctx->cb.src_fn = current_function_decl;
1199 ctx->cb.dst_fn = current_function_decl;
1200 ctx->cb.src_node = cgraph_node (current_function_decl);
1201 ctx->cb.dst_node = ctx->cb.src_node;
1202 ctx->cb.src_cfun = cfun;
1203 ctx->cb.copy_decl = omp_copy_decl;
1204 ctx->cb.eh_region = -1;
1205 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1209 ctx->cb.decl_map = pointer_map_create ();
1214 static gimple_seq maybe_catch_exception (gimple_seq);
1216 /* Finalize task copyfn. */
1219 finalize_task_copyfn (gimple task_stmt)
1221 struct function *child_cfun;
1222 tree child_fn, old_fn;
1223 gimple_seq seq, new_seq;
1226 child_fn = gimple_omp_task_copy_fn (task_stmt);
1227 if (child_fn == NULL_TREE)
1230 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1232 /* Inform the callgraph about the new function. */
1233 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1234 = cfun->curr_properties;
1236 old_fn = current_function_decl;
1237 push_cfun (child_cfun);
1238 current_function_decl = child_fn;
1239 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1240 seq = gimple_seq_alloc ();
1241 gimple_seq_add_stmt (&seq, bind);
1242 new_seq = maybe_catch_exception (seq);
1245 bind = gimple_build_bind (NULL, new_seq, NULL);
1246 seq = gimple_seq_alloc ();
1247 gimple_seq_add_stmt (&seq, bind);
1249 gimple_set_body (child_fn, seq);
1251 current_function_decl = old_fn;
1253 cgraph_add_new_function (child_fn, false);
1256 /* Destroy a omp_context data structures. Called through the splay tree
1257 value delete callback. */
1260 delete_omp_context (splay_tree_value value)
1262 omp_context *ctx = (omp_context *) value;
1264 pointer_map_destroy (ctx->cb.decl_map);
1267 splay_tree_delete (ctx->field_map);
1268 if (ctx->sfield_map)
1269 splay_tree_delete (ctx->sfield_map);
1271 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1272 it produces corrupt debug information. */
1273 if (ctx->record_type)
1276 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1277 DECL_ABSTRACT_ORIGIN (t) = NULL;
1279 if (ctx->srecord_type)
1282 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1283 DECL_ABSTRACT_ORIGIN (t) = NULL;
1286 if (is_task_ctx (ctx))
1287 finalize_task_copyfn (ctx->stmt);
1292 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1296 fixup_child_record_type (omp_context *ctx)
1298 tree f, type = ctx->record_type;
1300 /* ??? It isn't sufficient to just call remap_type here, because
1301 variably_modified_type_p doesn't work the way we expect for
1302 record types. Testing each field for whether it needs remapping
1303 and creating a new record by hand works, however. */
1304 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1305 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1309 tree name, new_fields = NULL;
1311 type = lang_hooks.types.make_type (RECORD_TYPE);
1312 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1313 name = build_decl (TYPE_DECL, name, type);
1314 TYPE_NAME (type) = name;
1316 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1318 tree new_f = copy_node (f);
1319 DECL_CONTEXT (new_f) = type;
1320 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1321 TREE_CHAIN (new_f) = new_fields;
1322 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1323 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1325 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1329 /* Arrange to be able to look up the receiver field
1330 given the sender field. */
1331 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1332 (splay_tree_value) new_f);
1334 TYPE_FIELDS (type) = nreverse (new_fields);
1338 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1341 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1342 specified by CLAUSES. */
1345 scan_sharing_clauses (tree clauses, omp_context *ctx)
1348 bool scan_array_reductions = false;
1350 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1354 switch (OMP_CLAUSE_CODE (c))
1356 case OMP_CLAUSE_PRIVATE:
1357 decl = OMP_CLAUSE_DECL (c);
1358 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1360 else if (!is_variable_sized (decl))
1361 install_var_local (decl, ctx);
1364 case OMP_CLAUSE_SHARED:
1365 gcc_assert (is_taskreg_ctx (ctx));
1366 decl = OMP_CLAUSE_DECL (c);
1367 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1368 || !is_variable_sized (decl));
1369 /* Global variables don't need to be copied,
1370 the receiver side will use them directly. */
1371 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1373 by_ref = use_pointer_for_field (decl, ctx);
1374 if (! TREE_READONLY (decl)
1375 || TREE_ADDRESSABLE (decl)
1377 || is_reference (decl))
1379 install_var_field (decl, by_ref, 3, ctx);
1380 install_var_local (decl, ctx);
1383 /* We don't need to copy const scalar vars back. */
1384 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1387 case OMP_CLAUSE_LASTPRIVATE:
1388 /* Let the corresponding firstprivate clause create
1390 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1394 case OMP_CLAUSE_FIRSTPRIVATE:
1395 case OMP_CLAUSE_REDUCTION:
1396 decl = OMP_CLAUSE_DECL (c);
1398 if (is_variable_sized (decl))
1400 if (is_task_ctx (ctx))
1401 install_var_field (decl, false, 1, ctx);
1404 else if (is_taskreg_ctx (ctx))
1407 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1408 by_ref = use_pointer_for_field (decl, NULL);
1410 if (is_task_ctx (ctx)
1411 && (global || by_ref || is_reference (decl)))
1413 install_var_field (decl, false, 1, ctx);
1415 install_var_field (decl, by_ref, 2, ctx);
1418 install_var_field (decl, by_ref, 3, ctx);
1420 install_var_local (decl, ctx);
1423 case OMP_CLAUSE_COPYPRIVATE:
1424 case OMP_CLAUSE_COPYIN:
1425 decl = OMP_CLAUSE_DECL (c);
1426 by_ref = use_pointer_for_field (decl, NULL);
1427 install_var_field (decl, by_ref, 3, ctx);
1430 case OMP_CLAUSE_DEFAULT:
1431 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1435 case OMP_CLAUSE_NUM_THREADS:
1436 case OMP_CLAUSE_SCHEDULE:
1438 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1441 case OMP_CLAUSE_NOWAIT:
1442 case OMP_CLAUSE_ORDERED:
1443 case OMP_CLAUSE_COLLAPSE:
1444 case OMP_CLAUSE_UNTIED:
1452 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1454 switch (OMP_CLAUSE_CODE (c))
1456 case OMP_CLAUSE_LASTPRIVATE:
1457 /* Let the corresponding firstprivate clause create
1459 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1460 scan_array_reductions = true;
1461 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1465 case OMP_CLAUSE_PRIVATE:
1466 case OMP_CLAUSE_FIRSTPRIVATE:
1467 case OMP_CLAUSE_REDUCTION:
1468 decl = OMP_CLAUSE_DECL (c);
1469 if (is_variable_sized (decl))
1470 install_var_local (decl, ctx);
1471 fixup_remapped_decl (decl, ctx,
1472 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1473 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1474 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1475 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1476 scan_array_reductions = true;
1479 case OMP_CLAUSE_SHARED:
1480 decl = OMP_CLAUSE_DECL (c);
1481 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1482 fixup_remapped_decl (decl, ctx, false);
1485 case OMP_CLAUSE_COPYPRIVATE:
1486 case OMP_CLAUSE_COPYIN:
1487 case OMP_CLAUSE_DEFAULT:
1489 case OMP_CLAUSE_NUM_THREADS:
1490 case OMP_CLAUSE_SCHEDULE:
1491 case OMP_CLAUSE_NOWAIT:
1492 case OMP_CLAUSE_ORDERED:
1493 case OMP_CLAUSE_COLLAPSE:
1494 case OMP_CLAUSE_UNTIED:
1502 if (scan_array_reductions)
1503 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1504 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1505 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1507 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1508 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1510 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1511 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1512 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1515 /* Create a new name for omp child function. Returns an identifier. */
1517 static GTY(()) unsigned int tmp_ompfn_id_num;
1520 create_omp_child_function_name (bool task_copy)
1522 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1523 size_t len = IDENTIFIER_LENGTH (name);
1524 char *tmp_name, *prefix;
1527 suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
1528 prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
1529 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1530 strcpy (prefix + len, suffix);
1531 #ifndef NO_DOT_IN_LABEL
1533 #elif !defined NO_DOLLAR_IN_LABEL
1536 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1537 return get_identifier (tmp_name);
1540 /* Build a decl for the omp child function. It'll not contain a body
1541 yet, just the bare decl. */
1544 create_omp_child_function (omp_context *ctx, bool task_copy)
1546 tree decl, type, name, t;
1548 name = create_omp_child_function_name (task_copy);
1550 type = build_function_type_list (void_type_node, ptr_type_node,
1551 ptr_type_node, NULL_TREE);
1553 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1555 decl = build_decl (FUNCTION_DECL, name, type);
1556 decl = lang_hooks.decls.pushdecl (decl);
1559 ctx->cb.dst_fn = decl;
1561 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1563 TREE_STATIC (decl) = 1;
1564 TREE_USED (decl) = 1;
1565 DECL_ARTIFICIAL (decl) = 1;
1566 DECL_IGNORED_P (decl) = 0;
1567 TREE_PUBLIC (decl) = 0;
1568 DECL_UNINLINABLE (decl) = 1;
1569 DECL_EXTERNAL (decl) = 0;
1570 DECL_CONTEXT (decl) = NULL_TREE;
1571 DECL_INITIAL (decl) = make_node (BLOCK);
1573 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1574 DECL_ARTIFICIAL (t) = 1;
1575 DECL_IGNORED_P (t) = 1;
1576 DECL_RESULT (decl) = t;
1578 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1579 DECL_ARTIFICIAL (t) = 1;
1580 DECL_ARG_TYPE (t) = ptr_type_node;
1581 DECL_CONTEXT (t) = current_function_decl;
1583 DECL_ARGUMENTS (decl) = t;
1585 ctx->receiver_decl = t;
1588 t = build_decl (PARM_DECL, get_identifier (".omp_data_o"),
1590 DECL_ARTIFICIAL (t) = 1;
1591 DECL_ARG_TYPE (t) = ptr_type_node;
1592 DECL_CONTEXT (t) = current_function_decl;
1594 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1595 DECL_ARGUMENTS (decl) = t;
1598 /* Allocate memory for the function structure. The call to
1599 allocate_struct_function clobbers CFUN, so we need to restore
1601 push_struct_function (decl);
1602 DECL_SOURCE_LOCATION (decl) = gimple_location (ctx->stmt);
1603 cfun->function_end_locus = gimple_location (ctx->stmt);
1608 /* Scan an OpenMP parallel directive. */
1611 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1615 gimple stmt = gsi_stmt (*gsi);
1617 /* Ignore parallel directives with empty bodies, unless there
1618 are copyin clauses. */
1620 && empty_body_p (gimple_omp_body (stmt))
1621 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1622 OMP_CLAUSE_COPYIN) == NULL)
1624 gsi_replace (gsi, gimple_build_nop (), false);
1628 ctx = new_omp_context (stmt, outer_ctx);
1629 if (taskreg_nesting_level > 1)
1630 ctx->is_nested = true;
1631 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1632 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1633 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1634 name = create_tmp_var_name (".omp_data_s");
1635 name = build_decl (TYPE_DECL, name, ctx->record_type);
1636 TYPE_NAME (ctx->record_type) = name;
1637 create_omp_child_function (ctx, false);
1638 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1640 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1641 scan_omp (gimple_omp_body (stmt), ctx);
1643 if (TYPE_FIELDS (ctx->record_type) == NULL)
1644 ctx->record_type = ctx->receiver_decl = NULL;
1647 layout_type (ctx->record_type);
1648 fixup_child_record_type (ctx);
1652 /* Scan an OpenMP task directive. */
1655 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1659 gimple stmt = gsi_stmt (*gsi);
1661 /* Ignore task directives with empty bodies. */
1663 && empty_body_p (gimple_omp_body (stmt)))
1665 gsi_replace (gsi, gimple_build_nop (), false);
1669 ctx = new_omp_context (stmt, outer_ctx);
1670 if (taskreg_nesting_level > 1)
1671 ctx->is_nested = true;
1672 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1673 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1674 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1675 name = create_tmp_var_name (".omp_data_s");
1676 name = build_decl (TYPE_DECL, name, ctx->record_type);
1677 TYPE_NAME (ctx->record_type) = name;
1678 create_omp_child_function (ctx, false);
1679 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1681 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1683 if (ctx->srecord_type)
1685 name = create_tmp_var_name (".omp_data_a");
1686 name = build_decl (TYPE_DECL, name, ctx->srecord_type);
1687 TYPE_NAME (ctx->srecord_type) = name;
1688 create_omp_child_function (ctx, true);
1691 scan_omp (gimple_omp_body (stmt), ctx);
1693 if (TYPE_FIELDS (ctx->record_type) == NULL)
1695 ctx->record_type = ctx->receiver_decl = NULL;
1696 t = build_int_cst (long_integer_type_node, 0);
1697 gimple_omp_task_set_arg_size (stmt, t);
1698 t = build_int_cst (long_integer_type_node, 1);
1699 gimple_omp_task_set_arg_align (stmt, t);
1703 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1704 /* Move VLA fields to the end. */
1705 p = &TYPE_FIELDS (ctx->record_type);
1707 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1708 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1711 *p = TREE_CHAIN (*p);
1712 TREE_CHAIN (*q) = NULL_TREE;
1713 q = &TREE_CHAIN (*q);
1716 p = &TREE_CHAIN (*p);
1718 layout_type (ctx->record_type);
1719 fixup_child_record_type (ctx);
1720 if (ctx->srecord_type)
1721 layout_type (ctx->srecord_type);
1722 t = fold_convert (long_integer_type_node,
1723 TYPE_SIZE_UNIT (ctx->record_type));
1724 gimple_omp_task_set_arg_size (stmt, t);
1725 t = build_int_cst (long_integer_type_node,
1726 TYPE_ALIGN_UNIT (ctx->record_type));
1727 gimple_omp_task_set_arg_align (stmt, t);
1732 /* Scan an OpenMP loop directive. */
1735 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1740 ctx = new_omp_context (stmt, outer_ctx);
1742 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1744 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1745 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1747 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1748 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1749 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1750 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1752 scan_omp (gimple_omp_body (stmt), ctx);
1755 /* Scan an OpenMP sections directive. */
1758 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1762 ctx = new_omp_context (stmt, outer_ctx);
1763 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1764 scan_omp (gimple_omp_body (stmt), ctx);
1767 /* Scan an OpenMP single directive. */
1770 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1775 ctx = new_omp_context (stmt, outer_ctx);
1776 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1777 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1778 name = create_tmp_var_name (".omp_copy_s");
1779 name = build_decl (TYPE_DECL, name, ctx->record_type);
1780 TYPE_NAME (ctx->record_type) = name;
1782 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1783 scan_omp (gimple_omp_body (stmt), ctx);
1785 if (TYPE_FIELDS (ctx->record_type) == NULL)
1786 ctx->record_type = NULL;
1788 layout_type (ctx->record_type);
1792 /* Check OpenMP nesting restrictions. */
1794 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1796 switch (gimple_code (stmt))
1798 case GIMPLE_OMP_FOR:
1799 case GIMPLE_OMP_SECTIONS:
1800 case GIMPLE_OMP_SINGLE:
1802 for (; ctx != NULL; ctx = ctx->outer)
1803 switch (gimple_code (ctx->stmt))
1805 case GIMPLE_OMP_FOR:
1806 case GIMPLE_OMP_SECTIONS:
1807 case GIMPLE_OMP_SINGLE:
1808 case GIMPLE_OMP_ORDERED:
1809 case GIMPLE_OMP_MASTER:
1810 case GIMPLE_OMP_TASK:
1811 if (is_gimple_call (stmt))
1813 warning (0, "barrier region may not be closely nested inside "
1814 "of work-sharing, critical, ordered, master or "
1815 "explicit task region");
1818 warning (0, "work-sharing region may not be closely nested inside "
1819 "of work-sharing, critical, ordered, master or explicit "
1822 case GIMPLE_OMP_PARALLEL:
1828 case GIMPLE_OMP_MASTER:
1829 for (; ctx != NULL; ctx = ctx->outer)
1830 switch (gimple_code (ctx->stmt))
1832 case GIMPLE_OMP_FOR:
1833 case GIMPLE_OMP_SECTIONS:
1834 case GIMPLE_OMP_SINGLE:
1835 case GIMPLE_OMP_TASK:
1836 warning (0, "master region may not be closely nested inside "
1837 "of work-sharing or explicit task region");
1839 case GIMPLE_OMP_PARALLEL:
1845 case GIMPLE_OMP_ORDERED:
1846 for (; ctx != NULL; ctx = ctx->outer)
1847 switch (gimple_code (ctx->stmt))
1849 case GIMPLE_OMP_CRITICAL:
1850 case GIMPLE_OMP_TASK:
1851 warning (0, "ordered region may not be closely nested inside "
1852 "of critical or explicit task region");
1854 case GIMPLE_OMP_FOR:
1855 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1856 OMP_CLAUSE_ORDERED) == NULL)
1857 warning (0, "ordered region must be closely nested inside "
1858 "a loop region with an ordered clause");
1860 case GIMPLE_OMP_PARALLEL:
1866 case GIMPLE_OMP_CRITICAL:
1867 for (; ctx != NULL; ctx = ctx->outer)
1868 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1869 && (gimple_omp_critical_name (stmt)
1870 == gimple_omp_critical_name (ctx->stmt)))
1872 warning (0, "critical region may not be nested inside a critical "
1873 "region with the same name");
1883 /* Helper function scan_omp.
1885 Callback for walk_tree or operators in walk_gimple_stmt used to
1886 scan for OpenMP directives in TP. */
1889 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1891 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1892 omp_context *ctx = (omp_context *) wi->info;
1895 switch (TREE_CODE (t))
1902 *tp = remap_decl (t, &ctx->cb);
1906 if (ctx && TYPE_P (t))
1907 *tp = remap_type (t, &ctx->cb);
1908 else if (!DECL_P (t))
1917 /* Helper function for scan_omp.
1919 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1920 the current statement in GSI. */
1923 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1924 struct walk_stmt_info *wi)
1926 gimple stmt = gsi_stmt (*gsi);
1927 omp_context *ctx = (omp_context *) wi->info;
1929 if (gimple_has_location (stmt))
1930 input_location = gimple_location (stmt);
1932 /* Check the OpenMP nesting restrictions. */
1935 if (is_gimple_omp (stmt))
1936 check_omp_nesting_restrictions (stmt, ctx);
1937 else if (is_gimple_call (stmt))
1939 tree fndecl = gimple_call_fndecl (stmt);
1940 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1941 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1942 check_omp_nesting_restrictions (stmt, ctx);
1946 *handled_ops_p = true;
1948 switch (gimple_code (stmt))
1950 case GIMPLE_OMP_PARALLEL:
1951 taskreg_nesting_level++;
1952 scan_omp_parallel (gsi, ctx);
1953 taskreg_nesting_level--;
1956 case GIMPLE_OMP_TASK:
1957 taskreg_nesting_level++;
1958 scan_omp_task (gsi, ctx);
1959 taskreg_nesting_level--;
1962 case GIMPLE_OMP_FOR:
1963 scan_omp_for (stmt, ctx);
1966 case GIMPLE_OMP_SECTIONS:
1967 scan_omp_sections (stmt, ctx);
1970 case GIMPLE_OMP_SINGLE:
1971 scan_omp_single (stmt, ctx);
1974 case GIMPLE_OMP_SECTION:
1975 case GIMPLE_OMP_MASTER:
1976 case GIMPLE_OMP_ORDERED:
1977 case GIMPLE_OMP_CRITICAL:
1978 ctx = new_omp_context (stmt, ctx);
1979 scan_omp (gimple_omp_body (stmt), ctx);
1986 *handled_ops_p = false;
1988 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
1989 insert_decl_map (&ctx->cb, var, var);
1993 *handled_ops_p = false;
2001 /* Scan all the statements starting at the current statement. CTX
2002 contains context information about the OpenMP directives and
2003 clauses found during the scan. */
2006 scan_omp (gimple_seq body, omp_context *ctx)
2008 location_t saved_location;
2009 struct walk_stmt_info wi;
2011 memset (&wi, 0, sizeof (wi));
2013 wi.want_locations = true;
2015 saved_location = input_location;
2016 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2017 input_location = saved_location;
2020 /* Re-gimplification and code generation routines. */
2022 /* Build a call to GOMP_barrier. */
2025 build_omp_barrier (void)
2027 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2030 /* If a context was created for STMT when it was scanned, return it. */
2032 static omp_context *
2033 maybe_lookup_ctx (gimple stmt)
2036 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2037 return n ? (omp_context *) n->value : NULL;
2041 /* Find the mapping for DECL in CTX or the immediately enclosing
2042 context that has a mapping for DECL.
2044 If CTX is a nested parallel directive, we may have to use the decl
2045 mappings created in CTX's parent context. Suppose that we have the
2046 following parallel nesting (variable UIDs showed for clarity):
2049 #omp parallel shared(iD.1562) -> outer parallel
2050 iD.1562 = iD.1562 + 1;
2052 #omp parallel shared (iD.1562) -> inner parallel
2053 iD.1562 = iD.1562 - 1;
2055 Each parallel structure will create a distinct .omp_data_s structure
2056 for copying iD.1562 in/out of the directive:
2058 outer parallel .omp_data_s.1.i -> iD.1562
2059 inner parallel .omp_data_s.2.i -> iD.1562
2061 A shared variable mapping will produce a copy-out operation before
2062 the parallel directive and a copy-in operation after it. So, in
2063 this case we would have:
2066 .omp_data_o.1.i = iD.1562;
2067 #omp parallel shared(iD.1562) -> outer parallel
2068 .omp_data_i.1 = &.omp_data_o.1
2069 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2071 .omp_data_o.2.i = iD.1562; -> **
2072 #omp parallel shared(iD.1562) -> inner parallel
2073 .omp_data_i.2 = &.omp_data_o.2
2074 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2077 ** This is a problem. The symbol iD.1562 cannot be referenced
2078 inside the body of the outer parallel region. But since we are
2079 emitting this copy operation while expanding the inner parallel
2080 directive, we need to access the CTX structure of the outer
2081 parallel directive to get the correct mapping:
2083 .omp_data_o.2.i = .omp_data_i.1->i
2085 Since there may be other workshare or parallel directives enclosing
2086 the parallel directive, it may be necessary to walk up the context
2087 parent chain. This is not a problem in general because nested
2088 parallelism happens only rarely. */
2091 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2096 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2097 t = maybe_lookup_decl (decl, up);
2099 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2101 return t ? t : decl;
2105 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2106 in outer contexts. */
2109 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2114 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2115 t = maybe_lookup_decl (decl, up);
2117 return t ? t : decl;
2121 /* Construct the initialization value for reduction CLAUSE. */
2124 omp_reduction_init (tree clause, tree type)
2126 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2133 case TRUTH_ORIF_EXPR:
2134 case TRUTH_XOR_EXPR:
2136 return fold_convert (type, integer_zero_node);
2139 case TRUTH_AND_EXPR:
2140 case TRUTH_ANDIF_EXPR:
2142 return fold_convert (type, integer_one_node);
2145 return fold_convert (type, integer_minus_one_node);
2148 if (SCALAR_FLOAT_TYPE_P (type))
2150 REAL_VALUE_TYPE max, min;
2151 if (HONOR_INFINITIES (TYPE_MODE (type)))
2154 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2157 real_maxval (&min, 1, TYPE_MODE (type));
2158 return build_real (type, min);
2162 gcc_assert (INTEGRAL_TYPE_P (type));
2163 return TYPE_MIN_VALUE (type);
2167 if (SCALAR_FLOAT_TYPE_P (type))
2169 REAL_VALUE_TYPE max;
2170 if (HONOR_INFINITIES (TYPE_MODE (type)))
2173 real_maxval (&max, 0, TYPE_MODE (type));
2174 return build_real (type, max);
2178 gcc_assert (INTEGRAL_TYPE_P (type));
2179 return TYPE_MAX_VALUE (type);
2187 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2188 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2189 private variables. Initialization statements go in ILIST, while calls
2190 to destructors go in DLIST. */
2193 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2196 gimple_stmt_iterator diter;
2197 tree c, dtor, copyin_seq, x, ptr;
2198 bool copyin_by_ref = false;
2199 bool lastprivate_firstprivate = false;
2202 *dlist = gimple_seq_alloc ();
2203 diter = gsi_start (*dlist);
2206 /* Do all the fixed sized types in the first pass, and the variable sized
2207 types in the second pass. This makes sure that the scalar arguments to
2208 the variable sized types are processed before we use them in the
2209 variable sized operations. */
2210 for (pass = 0; pass < 2; ++pass)
2212 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2214 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2220 case OMP_CLAUSE_PRIVATE:
2221 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2224 case OMP_CLAUSE_SHARED:
2225 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2227 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2230 case OMP_CLAUSE_FIRSTPRIVATE:
2231 case OMP_CLAUSE_COPYIN:
2232 case OMP_CLAUSE_REDUCTION:
2234 case OMP_CLAUSE_LASTPRIVATE:
2235 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2237 lastprivate_firstprivate = true;
2246 new_var = var = OMP_CLAUSE_DECL (c);
2247 if (c_kind != OMP_CLAUSE_COPYIN)
2248 new_var = lookup_decl (var, ctx);
2250 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2255 else if (is_variable_sized (var))
2257 /* For variable sized types, we need to allocate the
2258 actual storage here. Call alloca and store the
2259 result in the pointer decl that we created elsewhere. */
2263 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2268 ptr = DECL_VALUE_EXPR (new_var);
2269 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2270 ptr = TREE_OPERAND (ptr, 0);
2271 gcc_assert (DECL_P (ptr));
2272 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2274 /* void *tmp = __builtin_alloca */
2276 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2277 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2278 gimple_add_tmp_var (tmp);
2279 gimple_call_set_lhs (stmt, tmp);
2281 gimple_seq_add_stmt (ilist, stmt);
2283 x = fold_convert (TREE_TYPE (ptr), tmp);
2284 gimplify_assign (ptr, x, ilist);
2287 else if (is_reference (var))
2289 /* For references that are being privatized for Fortran,
2290 allocate new backing storage for the new pointer
2291 variable. This allows us to avoid changing all the
2292 code that expects a pointer to something that expects
2293 a direct variable. Note that this doesn't apply to
2294 C++, since reference types are disallowed in data
2295 sharing clauses there, except for NRV optimized
2300 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2301 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2303 x = build_receiver_ref (var, false, ctx);
2304 x = build_fold_addr_expr (x);
2306 else if (TREE_CONSTANT (x))
2308 const char *name = NULL;
2309 if (DECL_NAME (var))
2310 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2312 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2314 gimple_add_tmp_var (x);
2315 x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
2319 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2320 x = fold_convert (TREE_TYPE (new_var), x);
2323 gimplify_assign (new_var, x, ilist);
2325 new_var = build_fold_indirect_ref (new_var);
2327 else if (c_kind == OMP_CLAUSE_REDUCTION
2328 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2336 switch (OMP_CLAUSE_CODE (c))
2338 case OMP_CLAUSE_SHARED:
2339 /* Shared global vars are just accessed directly. */
2340 if (is_global_var (new_var))
2342 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2343 needs to be delayed until after fixup_child_record_type so
2344 that we get the correct type during the dereference. */
2345 by_ref = use_pointer_for_field (var, ctx);
2346 x = build_receiver_ref (var, by_ref, ctx);
2347 SET_DECL_VALUE_EXPR (new_var, x);
2348 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2350 /* ??? If VAR is not passed by reference, and the variable
2351 hasn't been initialized yet, then we'll get a warning for
2352 the store into the omp_data_s structure. Ideally, we'd be
2353 able to notice this and not store anything at all, but
2354 we're generating code too early. Suppress the warning. */
2356 TREE_NO_WARNING (var) = 1;
2359 case OMP_CLAUSE_LASTPRIVATE:
2360 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2364 case OMP_CLAUSE_PRIVATE:
2365 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2366 x = build_outer_var_ref (var, ctx);
2367 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2369 if (is_task_ctx (ctx))
2370 x = build_receiver_ref (var, false, ctx);
2372 x = build_outer_var_ref (var, ctx);
2376 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2378 gimplify_and_add (x, ilist);
2382 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2385 gimple_seq tseq = NULL;
2388 gimplify_stmt (&dtor, &tseq);
2389 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2393 case OMP_CLAUSE_FIRSTPRIVATE:
2394 if (is_task_ctx (ctx))
2396 if (is_reference (var) || is_variable_sized (var))
2398 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2400 || use_pointer_for_field (var, NULL))
2402 x = build_receiver_ref (var, false, ctx);
2403 SET_DECL_VALUE_EXPR (new_var, x);
2404 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2408 x = build_outer_var_ref (var, ctx);
2409 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2410 gimplify_and_add (x, ilist);
2414 case OMP_CLAUSE_COPYIN:
2415 by_ref = use_pointer_for_field (var, NULL);
2416 x = build_receiver_ref (var, by_ref, ctx);
2417 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2418 append_to_statement_list (x, ©in_seq);
2419 copyin_by_ref |= by_ref;
2422 case OMP_CLAUSE_REDUCTION:
2423 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2425 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2426 x = build_outer_var_ref (var, ctx);
2428 if (is_reference (var))
2429 x = build_fold_addr_expr (x);
2430 SET_DECL_VALUE_EXPR (placeholder, x);
2431 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2432 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2433 gimple_seq_add_seq (ilist,
2434 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2435 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2436 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2440 x = omp_reduction_init (c, TREE_TYPE (new_var));
2441 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2442 gimplify_assign (new_var, x, ilist);
2452 /* The copyin sequence is not to be executed by the main thread, since
2453 that would result in self-copies. Perhaps not visible to scalars,
2454 but it certainly is to C++ operator=. */
2457 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2458 x = build2 (NE_EXPR, boolean_type_node, x,
2459 build_int_cst (TREE_TYPE (x), 0));
2460 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2461 gimplify_and_add (x, ilist);
2464 /* If any copyin variable is passed by reference, we must ensure the
2465 master thread doesn't modify it before it is copied over in all
2466 threads. Similarly for variables in both firstprivate and
2467 lastprivate clauses we need to ensure the lastprivate copying
2468 happens after firstprivate copying in all threads. */
2469 if (copyin_by_ref || lastprivate_firstprivate)
2470 gimplify_and_add (build_omp_barrier (), ilist);
2474 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2475 both parallel and workshare constructs. PREDICATE may be NULL if it's
2479 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2482 tree x, c, label = NULL;
2483 bool par_clauses = false;
2485 /* Early exit if there are no lastprivate clauses. */
2486 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2487 if (clauses == NULL)
2489 /* If this was a workshare clause, see if it had been combined
2490 with its parallel. In that case, look for the clauses on the
2491 parallel statement itself. */
2492 if (is_parallel_ctx (ctx))
2496 if (ctx == NULL || !is_parallel_ctx (ctx))
2499 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2500 OMP_CLAUSE_LASTPRIVATE);
2501 if (clauses == NULL)
2509 tree label_true, arm1, arm2;
2511 label = create_artificial_label ();
2512 label_true = create_artificial_label ();
2513 arm1 = TREE_OPERAND (predicate, 0);
2514 arm2 = TREE_OPERAND (predicate, 1);
2515 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2516 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2517 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2519 gimple_seq_add_stmt (stmt_list, stmt);
2520 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2523 for (c = clauses; c ;)
2527 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2529 var = OMP_CLAUSE_DECL (c);
2530 new_var = lookup_decl (var, ctx);
2532 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2534 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2535 gimple_seq_add_seq (stmt_list,
2536 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2538 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2540 x = build_outer_var_ref (var, ctx);
2541 if (is_reference (var))
2542 new_var = build_fold_indirect_ref (new_var);
2543 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2544 gimplify_and_add (x, stmt_list);
2546 c = OMP_CLAUSE_CHAIN (c);
2547 if (c == NULL && !par_clauses)
2549 /* If this was a workshare clause, see if it had been combined
2550 with its parallel. In that case, continue looking for the
2551 clauses also on the parallel statement itself. */
2552 if (is_parallel_ctx (ctx))
2556 if (ctx == NULL || !is_parallel_ctx (ctx))
2559 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2560 OMP_CLAUSE_LASTPRIVATE);
2566 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2570 /* Generate code to implement the REDUCTION clauses. */
2573 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2575 gimple_seq sub_seq = NULL;
2580 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2581 update in that case, otherwise use a lock. */
2582 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2583 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2585 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2587 /* Never use OMP_ATOMIC for array reductions. */
2597 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2599 tree var, ref, new_var;
2600 enum tree_code code;
2602 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2605 var = OMP_CLAUSE_DECL (c);
2606 new_var = lookup_decl (var, ctx);
2607 if (is_reference (var))
2608 new_var = build_fold_indirect_ref (new_var);
2609 ref = build_outer_var_ref (var, ctx);
2610 code = OMP_CLAUSE_REDUCTION_CODE (c);
2612 /* reduction(-:var) sums up the partial results, so it acts
2613 identically to reduction(+:var). */
2614 if (code == MINUS_EXPR)
2619 tree addr = build_fold_addr_expr (ref);
2621 addr = save_expr (addr);
2622 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2623 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
2624 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2625 gimplify_and_add (x, stmt_seqp);
2629 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2631 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2633 if (is_reference (var))
2634 ref = build_fold_addr_expr (ref);
2635 SET_DECL_VALUE_EXPR (placeholder, ref);
2636 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2637 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2638 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2639 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2640 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2644 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2645 ref = build_outer_var_ref (var, ctx);
2646 gimplify_assign (ref, x, &sub_seq);
2650 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2651 gimple_seq_add_stmt (stmt_seqp, stmt);
2653 gimple_seq_add_seq (stmt_seqp, sub_seq);
2655 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2656 gimple_seq_add_stmt (stmt_seqp, stmt);
2660 /* Generate code to implement the COPYPRIVATE clauses. */
2663 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2668 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2670 tree var, new_var, ref, x;
2673 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2676 var = OMP_CLAUSE_DECL (c);
2677 by_ref = use_pointer_for_field (var, NULL);
2679 ref = build_sender_ref (var, ctx);
2680 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2683 x = build_fold_addr_expr (new_var);
2684 x = fold_convert (TREE_TYPE (ref), x);
2686 gimplify_assign (ref, x, slist);
2688 ref = build_receiver_ref (var, false, ctx);
2691 ref = fold_convert (build_pointer_type (TREE_TYPE (new_var)), ref);
2692 ref = build_fold_indirect_ref (ref);
2694 if (is_reference (var))
2696 ref = fold_convert (TREE_TYPE (new_var), ref);
2697 ref = build_fold_indirect_ref (ref);
2698 new_var = build_fold_indirect_ref (new_var);
2700 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2701 gimplify_and_add (x, rlist);
2706 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2707 and REDUCTION from the sender (aka parent) side. */
2710 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2715 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2717 tree val, ref, x, var;
2718 bool by_ref, do_in = false, do_out = false;
2720 switch (OMP_CLAUSE_CODE (c))
2722 case OMP_CLAUSE_PRIVATE:
2723 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2726 case OMP_CLAUSE_FIRSTPRIVATE:
2727 case OMP_CLAUSE_COPYIN:
2728 case OMP_CLAUSE_LASTPRIVATE:
2729 case OMP_CLAUSE_REDUCTION:
2735 val = OMP_CLAUSE_DECL (c);
2736 var = lookup_decl_in_outer_ctx (val, ctx);
2738 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2739 && is_global_var (var))
2741 if (is_variable_sized (val))
2743 by_ref = use_pointer_for_field (val, NULL);
2745 switch (OMP_CLAUSE_CODE (c))
2747 case OMP_CLAUSE_PRIVATE:
2748 case OMP_CLAUSE_FIRSTPRIVATE:
2749 case OMP_CLAUSE_COPYIN:
2753 case OMP_CLAUSE_LASTPRIVATE:
2754 if (by_ref || is_reference (val))
2756 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2763 if (lang_hooks.decls.omp_private_outer_ref (val))
2768 case OMP_CLAUSE_REDUCTION:
2770 do_out = !(by_ref || is_reference (val));
2779 ref = build_sender_ref (val, ctx);
2780 x = by_ref ? build_fold_addr_expr (var) : var;
2781 gimplify_assign (ref, x, ilist);
2782 if (is_task_ctx (ctx))
2783 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2788 ref = build_sender_ref (val, ctx);
2789 gimplify_assign (var, ref, olist);
2794 /* Generate code to implement SHARED from the sender (aka parent)
2795 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2796 list things that got automatically shared. */
2799 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2801 tree var, ovar, nvar, f, x, record_type;
2803 if (ctx->record_type == NULL)
2806 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2807 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2809 ovar = DECL_ABSTRACT_ORIGIN (f);
2810 nvar = maybe_lookup_decl (ovar, ctx);
2811 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2814 /* If CTX is a nested parallel directive. Find the immediately
2815 enclosing parallel or workshare construct that contains a
2816 mapping for OVAR. */
2817 var = lookup_decl_in_outer_ctx (ovar, ctx);
2819 if (use_pointer_for_field (ovar, ctx))
2821 x = build_sender_ref (ovar, ctx);
2822 var = build_fold_addr_expr (var);
2823 gimplify_assign (x, var, ilist);
2827 x = build_sender_ref (ovar, ctx);
2828 gimplify_assign (x, var, ilist);
2830 if (!TREE_READONLY (var)
2831 /* We don't need to receive a new reference to a result
2832 or parm decl. In fact we may not store to it as we will
2833 invalidate any pending RSO and generate wrong gimple
2835 && !((TREE_CODE (var) == RESULT_DECL
2836 || TREE_CODE (var) == PARM_DECL)
2837 && DECL_BY_REFERENCE (var)))
2839 x = build_sender_ref (ovar, ctx);
2840 gimplify_assign (var, x, olist);
2847 /* A convenience function to build an empty GIMPLE_COND with just the
2851 gimple_build_cond_empty (tree cond)
2853 enum tree_code pred_code;
2856 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2857 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2861 /* Build the function calls to GOMP_parallel_start etc to actually
2862 generate the parallel operation. REGION is the parallel region
2863 being expanded. BB is the block where to insert the code. WS_ARGS
2864 will be set if this is a call to a combined parallel+workshare
2865 construct, it contains the list of additional arguments needed by
2866 the workshare construct. */
2869 expand_parallel_call (struct omp_region *region, basic_block bb,
2870 gimple entry_stmt, tree ws_args)
2872 tree t, t1, t2, val, cond, c, clauses;
2873 gimple_stmt_iterator gsi;
2877 clauses = gimple_omp_parallel_clauses (entry_stmt);
2879 /* Determine what flavor of GOMP_parallel_start we will be
2881 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2882 if (is_combined_parallel (region))
2884 switch (region->inner->type)
2886 case GIMPLE_OMP_FOR:
2887 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2888 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2889 + (region->inner->sched_kind
2890 == OMP_CLAUSE_SCHEDULE_RUNTIME
2891 ? 3 : region->inner->sched_kind);
2893 case GIMPLE_OMP_SECTIONS:
2894 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2901 /* By default, the value of NUM_THREADS is zero (selected at run time)
2902 and there is no conditional. */
2904 val = build_int_cst (unsigned_type_node, 0);
2906 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2908 cond = OMP_CLAUSE_IF_EXPR (c);
2910 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2912 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2914 /* Ensure 'val' is of the correct type. */
2915 val = fold_convert (unsigned_type_node, val);
2917 /* If we found the clause 'if (cond)', build either
2918 (cond != 0) or (cond ? val : 1u). */
2921 gimple_stmt_iterator gsi;
2923 cond = gimple_boolify (cond);
2925 if (integer_zerop (val))
2926 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2927 build_int_cst (TREE_TYPE (cond), 0));
2930 basic_block cond_bb, then_bb, else_bb;
2931 edge e, e_then, e_else;
2932 tree tmp_then, tmp_else, tmp_join, tmp_var;
2934 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2935 if (gimple_in_ssa_p (cfun))
2937 tmp_then = make_ssa_name (tmp_var, NULL);
2938 tmp_else = make_ssa_name (tmp_var, NULL);
2939 tmp_join = make_ssa_name (tmp_var, NULL);
2948 e = split_block (bb, NULL);
2953 then_bb = create_empty_bb (cond_bb);
2954 else_bb = create_empty_bb (then_bb);
2955 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2956 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2958 stmt = gimple_build_cond_empty (cond);
2959 gsi = gsi_start_bb (cond_bb);
2960 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2962 gsi = gsi_start_bb (then_bb);
2963 stmt = gimple_build_assign (tmp_then, val);
2964 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2966 gsi = gsi_start_bb (else_bb);
2967 stmt = gimple_build_assign
2968 (tmp_else, build_int_cst (unsigned_type_node, 1));
2969 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2971 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2972 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2973 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2974 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
2976 if (gimple_in_ssa_p (cfun))
2978 gimple phi = create_phi_node (tmp_join, bb);
2979 SSA_NAME_DEF_STMT (tmp_join) = phi;
2980 add_phi_arg (phi, tmp_then, e_then);
2981 add_phi_arg (phi, tmp_else, e_else);
2987 gsi = gsi_start_bb (bb);
2988 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
2989 false, GSI_CONTINUE_LINKING);
2992 gsi = gsi_last_bb (bb);
2993 t = gimple_omp_parallel_data_arg (entry_stmt);
2995 t1 = null_pointer_node;
2997 t1 = build_fold_addr_expr (t);
2998 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3002 tree args = tree_cons (NULL, t2,
3003 tree_cons (NULL, t1,
3004 tree_cons (NULL, val, ws_args)));
3005 t = build_function_call_expr (built_in_decls[start_ix], args);
3008 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
3010 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3011 false, GSI_CONTINUE_LINKING);
3013 t = gimple_omp_parallel_data_arg (entry_stmt);
3015 t = null_pointer_node;
3017 t = build_fold_addr_expr (t);
3018 t = build_call_expr (gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3019 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3020 false, GSI_CONTINUE_LINKING);
3022 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3023 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3024 false, GSI_CONTINUE_LINKING);
3028 /* Build the function call to GOMP_task to actually
3029 generate the task operation. BB is the block where to insert the code. */
3032 expand_task_call (basic_block bb, gimple entry_stmt)
3034 tree t, t1, t2, t3, flags, cond, c, clauses;
3035 gimple_stmt_iterator gsi;
3037 clauses = gimple_omp_task_clauses (entry_stmt);
3039 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3041 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3043 cond = boolean_true_node;
3045 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3046 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3048 gsi = gsi_last_bb (bb);
3049 t = gimple_omp_task_data_arg (entry_stmt);
3051 t2 = null_pointer_node;
3053 t2 = build_fold_addr_expr (t);
3054 t1 = build_fold_addr_expr (gimple_omp_task_child_fn (entry_stmt));
3055 t = gimple_omp_task_copy_fn (entry_stmt);
3057 t3 = null_pointer_node;
3059 t3 = build_fold_addr_expr (t);
3061 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3062 gimple_omp_task_arg_size (entry_stmt),
3063 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3065 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3066 false, GSI_CONTINUE_LINKING);
3070 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3071 catch handler and return it. This prevents programs from violating the
3072 structured block semantics with throws. */
3075 maybe_catch_exception (gimple_seq body)
3079 if (!flag_exceptions)
3082 if (lang_protect_cleanup_actions)
3083 t = lang_protect_cleanup_actions ();
3085 t = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
3087 f = gimple_build_eh_filter (NULL, gimple_seq_alloc_with_stmt (t));
3088 gimple_eh_filter_set_must_not_throw (f, true);
3090 t = gimple_build_try (body, gimple_seq_alloc_with_stmt (f),
3093 return gimple_seq_alloc_with_stmt (t);
3096 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3099 list2chain (tree list)
3103 for (t = list; t; t = TREE_CHAIN (t))
3105 tree var = TREE_VALUE (t);
3107 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3109 TREE_CHAIN (var) = NULL_TREE;
3112 return list ? TREE_VALUE (list) : NULL_TREE;
3116 /* Remove barriers in REGION->EXIT's block. Note that this is only
3117 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3118 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3119 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3123 remove_exit_barrier (struct omp_region *region)
3125 gimple_stmt_iterator gsi;
3126 basic_block exit_bb;
3130 int any_addressable_vars = -1;
3132 exit_bb = region->exit;
3134 /* If the parallel region doesn't return, we don't have REGION->EXIT
3139 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3140 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3141 statements that can appear in between are extremely limited -- no
3142 memory operations at all. Here, we allow nothing at all, so the
3143 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3144 gsi = gsi_last_bb (exit_bb);
3145 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3147 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3150 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3152 gsi = gsi_last_bb (e->src);
3153 if (gsi_end_p (gsi))
3155 stmt = gsi_stmt (gsi);
3156 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3157 && !gimple_omp_return_nowait_p (stmt))
3159 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3160 in many cases. If there could be tasks queued, the barrier
3161 might be needed to let the tasks run before some local
3162 variable of the parallel that the task uses as shared
3163 runs out of scope. The task can be spawned either
3164 from within current function (this would be easy to check)
3165 or from some function it calls and gets passed an address
3166 of such a variable. */
3167 if (any_addressable_vars < 0)
3169 gimple parallel_stmt = last_stmt (region->entry);
3170 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3171 tree local_decls = DECL_STRUCT_FUNCTION (child_fun)->local_decls;
3174 any_addressable_vars = 0;
3175 for (; local_decls; local_decls = TREE_CHAIN (local_decls))
3176 if (TREE_ADDRESSABLE (TREE_VALUE (local_decls)))
3178 any_addressable_vars = 1;
3181 for (block = gimple_block (stmt);
3182 !any_addressable_vars
3184 && TREE_CODE (block) == BLOCK;
3185 block = BLOCK_SUPERCONTEXT (block))
3187 for (local_decls = BLOCK_VARS (block);
3189 local_decls = TREE_CHAIN (local_decls))
3190 if (TREE_ADDRESSABLE (local_decls))
3192 any_addressable_vars = 1;
3195 if (block == gimple_block (parallel_stmt))
3199 if (!any_addressable_vars)
3200 gimple_omp_return_set_nowait (stmt);
3206 remove_exit_barriers (struct omp_region *region)
3208 if (region->type == GIMPLE_OMP_PARALLEL)
3209 remove_exit_barrier (region);
3213 region = region->inner;
3214 remove_exit_barriers (region);
3215 while (region->next)
3217 region = region->next;
3218 remove_exit_barriers (region);
3223 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3224 calls. These can't be declared as const functions, but
3225 within one parallel body they are constant, so they can be
3226 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3227 which are declared const. Similarly for task body, except
3228 that in untied task omp_get_thread_num () can change at any task
3229 scheduling point. */
3232 optimize_omp_library_calls (gimple entry_stmt)
3235 gimple_stmt_iterator gsi;
3237 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3239 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3240 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3241 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3242 OMP_CLAUSE_UNTIED) != NULL);
3245 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3247 gimple call = gsi_stmt (gsi);
3250 if (is_gimple_call (call)
3251 && (decl = gimple_call_fndecl (call))
3252 && DECL_EXTERNAL (decl)
3253 && TREE_PUBLIC (decl)
3254 && DECL_INITIAL (decl) == NULL)
3258 if (DECL_NAME (decl) == thr_num_id)
3260 /* In #pragma omp task untied omp_get_thread_num () can change
3261 during the execution of the task region. */
3264 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3266 else if (DECL_NAME (decl) == num_thr_id)
3267 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3271 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3272 || gimple_call_num_args (call) != 0)
3275 if (flag_exceptions && !TREE_NOTHROW (decl))
3278 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3279 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
3280 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
3283 gimple_call_set_fndecl (call, built_in);
3288 /* Expand the OpenMP parallel or task directive starting at REGION. */
3291 expand_omp_taskreg (struct omp_region *region)
3293 basic_block entry_bb, exit_bb, new_bb;
3294 struct function *child_cfun;
3295 tree child_fn, block, t, ws_args, *tp;
3296 gimple_stmt_iterator gsi;
3297 gimple entry_stmt, stmt;
3300 entry_stmt = last_stmt (region->entry);
3301 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3302 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3303 /* If this function has been already instrumented, make sure
3304 the child function isn't instrumented again. */
3305 child_cfun->after_tree_profile = cfun->after_tree_profile;
3307 entry_bb = region->entry;
3308 exit_bb = region->exit;
3310 if (is_combined_parallel (region))
3311 ws_args = region->ws_args;
3313 ws_args = NULL_TREE;
3315 if (child_cfun->cfg)
3317 /* Due to inlining, it may happen that we have already outlined
3318 the region, in which case all we need to do is make the
3319 sub-graph unreachable and emit the parallel call. */
3320 edge entry_succ_e, exit_succ_e;
3321 gimple_stmt_iterator gsi;
3323 entry_succ_e = single_succ_edge (entry_bb);
3325 gsi = gsi_last_bb (entry_bb);
3326 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3327 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3328 gsi_remove (&gsi, true);
3333 exit_succ_e = single_succ_edge (exit_bb);
3334 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3336 remove_edge_and_dominated_blocks (entry_succ_e);
3340 /* If the parallel region needs data sent from the parent
3341 function, then the very first statement (except possible
3342 tree profile counter updates) of the parallel body
3343 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3344 &.OMP_DATA_O is passed as an argument to the child function,
3345 we need to replace it with the argument as seen by the child
3348 In most cases, this will end up being the identity assignment
3349 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3350 a function call that has been inlined, the original PARM_DECL
3351 .OMP_DATA_I may have been converted into a different local
3352 variable. In which case, we need to keep the assignment. */
3353 if (gimple_omp_taskreg_data_arg (entry_stmt))
3355 basic_block entry_succ_bb = single_succ (entry_bb);
3356 gimple_stmt_iterator gsi;
3358 gimple parcopy_stmt = NULL;
3360 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3364 gcc_assert (!gsi_end_p (gsi));
3365 stmt = gsi_stmt (gsi);
3366 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3369 if (gimple_num_ops (stmt) == 2)
3371 tree arg = gimple_assign_rhs1 (stmt);
3373 /* We're ignore the subcode because we're
3374 effectively doing a STRIP_NOPS. */
3376 if (TREE_CODE (arg) == ADDR_EXPR
3377 && TREE_OPERAND (arg, 0)
3378 == gimple_omp_taskreg_data_arg (entry_stmt))
3380 parcopy_stmt = stmt;
3386 gcc_assert (parcopy_stmt != NULL);
3387 arg = DECL_ARGUMENTS (child_fn);
3389 if (!gimple_in_ssa_p (cfun))
3391 if (gimple_assign_lhs (parcopy_stmt) == arg)
3392 gsi_remove (&gsi, true);
3395 /* ?? Is setting the subcode really necessary ?? */
3396 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3397 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3402 /* If we are in ssa form, we must load the value from the default
3403 definition of the argument. That should not be defined now,
3404 since the argument is not used uninitialized. */
3405 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3406 narg = make_ssa_name (arg, gimple_build_nop ());
3407 set_default_def (arg, narg);
3408 /* ?? Is setting the subcode really necessary ?? */
3409 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3410 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3411 update_stmt (parcopy_stmt);
3415 /* Declare local variables needed in CHILD_CFUN. */
3416 block = DECL_INITIAL (child_fn);
3417 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
3418 /* The gimplifier could record temporaries in parallel/task block
3419 rather than in containing function's local_decls chain,
3420 which would mean cgraph missed finalizing them. Do it now. */
3421 for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
3422 if (TREE_CODE (t) == VAR_DECL
3424 && !DECL_EXTERNAL (t))
3425 varpool_finalize_decl (t);
3426 DECL_SAVED_TREE (child_fn) = NULL;
3427 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3428 TREE_USED (block) = 1;
3430 /* Reset DECL_CONTEXT on function arguments. */
3431 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3432 DECL_CONTEXT (t) = child_fn;
3434 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3435 so that it can be moved to the child function. */
3436 gsi = gsi_last_bb (entry_bb);
3437 stmt = gsi_stmt (gsi);
3438 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3439 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3440 gsi_remove (&gsi, true);
3441 e = split_block (entry_bb, stmt);
3443 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3445 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3448 gsi = gsi_last_bb (exit_bb);
3449 gcc_assert (!gsi_end_p (gsi)
3450 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3451 stmt = gimple_build_return (NULL);
3452 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3453 gsi_remove (&gsi, true);
3456 /* Move the parallel region into CHILD_CFUN. */
3458 if (gimple_in_ssa_p (cfun))
3460 push_cfun (child_cfun);
3461 init_tree_ssa (child_cfun);
3462 init_ssa_operands ();
3463 cfun->gimple_df->in_ssa_p = true;
3468 block = gimple_block (entry_stmt);
3470 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3472 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3474 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3475 for (tp = &child_cfun->local_decls; *tp; )
3476 if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3477 tp = &TREE_CHAIN (*tp);
3479 *tp = TREE_CHAIN (*tp);
3481 /* Inform the callgraph about the new function. */
3482 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3483 = cfun->curr_properties;
3484 cgraph_add_new_function (child_fn, true);
3486 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3487 fixed in a following pass. */
3488 push_cfun (child_cfun);
3490 optimize_omp_library_calls (entry_stmt);
3491 rebuild_cgraph_edges ();
3493 /* Some EH regions might become dead, see PR34608. If
3494 pass_cleanup_cfg isn't the first pass to happen with the
3495 new child, these dead EH edges might cause problems.
3496 Clean them up now. */
3497 if (flag_exceptions)
3500 tree save_current = current_function_decl;
3501 bool changed = false;
3503 current_function_decl = child_fn;
3505 changed |= gimple_purge_dead_eh_edges (bb);
3507 cleanup_tree_cfg ();
3508 current_function_decl = save_current;
3513 /* Emit a library call to launch the children threads. */
3514 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3515 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3517 expand_task_call (new_bb, entry_stmt);
3518 update_ssa (TODO_update_ssa_only_virtuals);
3522 /* A subroutine of expand_omp_for. Generate code for a parallel
3523 loop with any schedule. Given parameters:
3525 for (V = N1; V cond N2; V += STEP) BODY;
3527 where COND is "<" or ">", we generate pseudocode
3529 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3530 if (more) goto L0; else goto L3;
3537 if (V cond iend) goto L1; else goto L2;
3539 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3542 If this is a combined omp parallel loop, instead of the call to
3543 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3545 For collapsed loops, given parameters:
3547 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3548 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3549 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3552 we generate pseudocode
3558 count3 = (adj + N32 - N31) / STEP3;
3563 count2 = (adj + N22 - N21) / STEP2;
3568 count1 = (adj + N12 - N11) / STEP1;
3569 count = count1 * count2 * count3;
3570 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3571 if (more) goto L0; else goto L3;
3575 V3 = N31 + (T % count3) * STEP3;
3577 V2 = N21 + (T % count2) * STEP2;
3579 V1 = N11 + T * STEP1;
3584 if (V < iend) goto L10; else goto L2;
3587 if (V3 cond3 N32) goto L1; else goto L11;
3591 if (V2 cond2 N22) goto L1; else goto L12;
3597 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3603 expand_omp_for_generic (struct omp_region *region,
3604 struct omp_for_data *fd,
3605 enum built_in_function start_fn,
3606 enum built_in_function next_fn)
3608 tree type, istart0, iend0, iend;
3609 tree t, vmain, vback, bias = NULL_TREE;
3610 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3611 basic_block l2_bb = NULL, l3_bb = NULL;
3612 gimple_stmt_iterator gsi;
3614 bool in_combined_parallel = is_combined_parallel (region);
3615 bool broken_loop = region->cont == NULL;
3617 tree *counts = NULL;
3620 gcc_assert (!broken_loop || !in_combined_parallel);
3621 gcc_assert (fd->iter_type == long_integer_type_node
3622 || !in_combined_parallel);
3624 type = TREE_TYPE (fd->loop.v);
3625 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3626 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3627 TREE_ADDRESSABLE (istart0) = 1;
3628 TREE_ADDRESSABLE (iend0) = 1;
3629 if (gimple_in_ssa_p (cfun))
3631 add_referenced_var (istart0);
3632 add_referenced_var (iend0);
3635 /* See if we need to bias by LLONG_MIN. */
3636 if (fd->iter_type == long_long_unsigned_type_node
3637 && TREE_CODE (type) == INTEGER_TYPE
3638 && !TYPE_UNSIGNED (type))
3642 if (fd->loop.cond_code == LT_EXPR)
3645 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3649 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3652 if (TREE_CODE (n1) != INTEGER_CST
3653 || TREE_CODE (n2) != INTEGER_CST
3654 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3655 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3658 entry_bb = region->entry;
3659 cont_bb = region->cont;
3661 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3662 gcc_assert (broken_loop
3663 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3664 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3665 l1_bb = single_succ (l0_bb);
3668 l2_bb = create_empty_bb (cont_bb);
3669 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3670 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3674 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3675 exit_bb = region->exit;
3677 gsi = gsi_last_bb (entry_bb);
3679 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3680 if (fd->collapse > 1)
3682 /* collapsed loops need work for expansion in SSA form. */
3683 gcc_assert (!gimple_in_ssa_p (cfun));
3684 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3685 for (i = 0; i < fd->collapse; i++)
3687 tree itype = TREE_TYPE (fd->loops[i].v);
3689 if (POINTER_TYPE_P (itype))
3690 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3691 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3693 t = fold_build2 (PLUS_EXPR, itype,
3694 fold_convert (itype, fd->loops[i].step), t);
3695 t = fold_build2 (PLUS_EXPR, itype, t,
3696 fold_convert (itype, fd->loops[i].n2));
3697 t = fold_build2 (MINUS_EXPR, itype, t,
3698 fold_convert (itype, fd->loops[i].n1));
3699 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3700 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3701 fold_build1 (NEGATE_EXPR, itype, t),
3702 fold_build1 (NEGATE_EXPR, itype,
3703 fold_convert (itype,
3704 fd->loops[i].step)));
3706 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3707 fold_convert (itype, fd->loops[i].step));
3708 t = fold_convert (type, t);
3709 if (TREE_CODE (t) == INTEGER_CST)
3713 counts[i] = create_tmp_var (type, ".count");
3714 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3715 true, GSI_SAME_STMT);
3716 stmt = gimple_build_assign (counts[i], t);
3717 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3719 if (SSA_VAR_P (fd->loop.n2))
3725 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3726 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3727 true, GSI_SAME_STMT);
3729 stmt = gimple_build_assign (fd->loop.n2, t);
3730 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3734 if (in_combined_parallel)
3736 /* In a combined parallel loop, emit a call to
3737 GOMP_loop_foo_next. */
3738 t = build_call_expr (built_in_decls[next_fn], 2,
3739 build_fold_addr_expr (istart0),
3740 build_fold_addr_expr (iend0));
3744 tree t0, t1, t2, t3, t4;
3745 /* If this is not a combined parallel loop, emit a call to
3746 GOMP_loop_foo_start in ENTRY_BB. */
3747 t4 = build_fold_addr_expr (iend0);
3748 t3 = build_fold_addr_expr (istart0);
3749 t2 = fold_convert (fd->iter_type, fd->loop.step);
3750 if (POINTER_TYPE_P (type)
3751 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3753 /* Avoid casting pointers to integer of a different size. */
3755 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3756 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3757 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3761 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3762 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3766 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3767 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3769 if (fd->iter_type == long_integer_type_node)
3773 t = fold_convert (fd->iter_type, fd->chunk_size);
3774 t = build_call_expr (built_in_decls[start_fn], 6,
3775 t0, t1, t2, t, t3, t4);
3778 t = build_call_expr (built_in_decls[start_fn], 5,
3779 t0, t1, t2, t3, t4);
3786 /* The GOMP_loop_ull_*start functions have additional boolean
3787 argument, true for < loops and false for > loops.
3788 In Fortran, the C bool type can be different from
3789 boolean_type_node. */
3790 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3791 t5 = build_int_cst (c_bool_type,
3792 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3795 t = fold_convert (fd->iter_type, fd->chunk_size);
3796 t = build_call_expr (built_in_decls[start_fn], 7,
3797 t5, t0, t1, t2, t, t3, t4);
3800 t = build_call_expr (built_in_decls[start_fn], 6,
3801 t5, t0, t1, t2, t3, t4);
3804 if (TREE_TYPE (t) != boolean_type_node)
3805 t = fold_build2 (NE_EXPR, boolean_type_node,
3806 t, build_int_cst (TREE_TYPE (t), 0));
3807 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3808 true, GSI_SAME_STMT);
3809 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3811 /* Remove the GIMPLE_OMP_FOR statement. */
3812 gsi_remove (&gsi, true);
3814 /* Iteration setup for sequential loop goes in L0_BB. */
3815 gsi = gsi_start_bb (l0_bb);
3817 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3820 t = fold_convert (type, istart0);
3821 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3822 false, GSI_CONTINUE_LINKING);
3823 stmt = gimple_build_assign (fd->loop.v, t);
3824 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3827 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3830 t = fold_convert (type, iend0);
3831 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3832 false, GSI_CONTINUE_LINKING);
3833 if (fd->collapse > 1)
3835 tree tem = create_tmp_var (type, ".tem");
3837 stmt = gimple_build_assign (tem, fd->loop.v);
3838 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3839 for (i = fd->collapse - 1; i >= 0; i--)
3841 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3843 if (POINTER_TYPE_P (vtype))
3844 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3845 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3846 t = fold_convert (itype, t);
3847 t = fold_build2 (MULT_EXPR, itype, t, fd->loops[i].step);
3848 if (POINTER_TYPE_P (vtype))
3849 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3850 fd->loops[i].n1, fold_convert (sizetype, t));
3852 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3853 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3854 false, GSI_CONTINUE_LINKING);
3855 stmt = gimple_build_assign (fd->loops[i].v, t);
3856 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3859 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3860 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3861 false, GSI_CONTINUE_LINKING);
3862 stmt = gimple_build_assign (tem, t);
3863 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3870 /* Code to control the increment and predicate for the sequential
3871 loop goes in the CONT_BB. */
3872 gsi = gsi_last_bb (cont_bb);
3873 stmt = gsi_stmt (gsi);
3874 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3875 vmain = gimple_omp_continue_control_use (stmt);
3876 vback = gimple_omp_continue_control_def (stmt);
3878 if (POINTER_TYPE_P (type))
3879 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3880 fold_convert (sizetype, fd->loop.step));
3882 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3883 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3884 true, GSI_SAME_STMT);
3885 stmt = gimple_build_assign (vback, t);
3886 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3888 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3889 stmt = gimple_build_cond_empty (t);
3890 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3892 /* Remove GIMPLE_OMP_CONTINUE. */
3893 gsi_remove (&gsi, true);
3895 if (fd->collapse > 1)
3897 basic_block last_bb, bb;
3900 for (i = fd->collapse - 1; i >= 0; i--)
3902 tree vtype = TREE_TYPE (fd->loops[i].v);
3904 bb = create_empty_bb (last_bb);
3905 gsi = gsi_start_bb (bb);
3907 if (i < fd->collapse - 1)
3909 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3910 e->probability = REG_BR_PROB_BASE / 8;
3912 t = fd->loops[i + 1].n1;
3913 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3914 false, GSI_CONTINUE_LINKING);
3915 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3916 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3921 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3923 if (POINTER_TYPE_P (vtype))
3924 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3926 fold_convert (sizetype, fd->loops[i].step));
3928 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3930 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3931 false, GSI_CONTINUE_LINKING);
3932 stmt = gimple_build_assign (fd->loops[i].v, t);
3933 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3937 t = fd->loops[i].n2;
3938 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3939 false, GSI_CONTINUE_LINKING);
3940 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
3942 stmt = gimple_build_cond_empty (t);
3943 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3944 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3945 e->probability = REG_BR_PROB_BASE * 7 / 8;
3948 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3953 /* Emit code to get the next parallel iteration in L2_BB. */
3954 gsi = gsi_start_bb (l2_bb);
3956 t = build_call_expr (built_in_decls[next_fn], 2,
3957 build_fold_addr_expr (istart0),
3958 build_fold_addr_expr (iend0));
3959 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3960 false, GSI_CONTINUE_LINKING);
3961 if (TREE_TYPE (t) != boolean_type_node)
3962 t = fold_build2 (NE_EXPR, boolean_type_node,
3963 t, build_int_cst (TREE_TYPE (t), 0));
3964 stmt = gimple_build_cond_empty (t);
3965 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3968 /* Add the loop cleanup function. */
3969 gsi = gsi_last_bb (exit_bb);
3970 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
3971 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
3973 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
3974 stmt = gimple_build_call (t, 0);
3975 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3976 gsi_remove (&gsi, true);
3978 /* Connect the new blocks. */
3979 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
3980 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
3986 e = find_edge (cont_bb, l3_bb);
3987 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
3989 phis = phi_nodes (l3_bb);
3990 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
3992 gimple phi = gsi_stmt (gsi);
3993 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
3994 PHI_ARG_DEF_FROM_EDGE (phi, e));
3998 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
3999 if (fd->collapse > 1)
4001 e = find_edge (cont_bb, l1_bb);
4003 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4007 e = find_edge (cont_bb, l1_bb);
4008 e->flags = EDGE_TRUE_VALUE;
4010 e->probability = REG_BR_PROB_BASE * 7 / 8;
4011 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4012 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4014 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4015 recompute_dominator (CDI_DOMINATORS, l2_bb));
4016 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4017 recompute_dominator (CDI_DOMINATORS, l3_bb));
4018 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4019 recompute_dominator (CDI_DOMINATORS, l0_bb));
4020 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4021 recompute_dominator (CDI_DOMINATORS, l1_bb));
4026 /* A subroutine of expand_omp_for. Generate code for a parallel
4027 loop with static schedule and no specified chunk size. Given
4030 for (V = N1; V cond N2; V += STEP) BODY;
4032 where COND is "<" or ">", we generate pseudocode
4038 if ((__typeof (V)) -1 > 0 && cond is >)
4039 n = -(adj + N2 - N1) / -STEP;
4041 n = (adj + N2 - N1) / STEP;
4043 q += (q * nthreads != n);
4045 e0 = min(s0 + q, n);
4047 if (s0 >= e0) goto L2; else goto L0;
4053 if (V cond e) goto L1;
4058 expand_omp_for_static_nochunk (struct omp_region *region,
4059 struct omp_for_data *fd)
4061 tree n, q, s0, e0, e, t, nthreads, threadid;
4062 tree type, itype, vmain, vback;
4063 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4065 gimple_stmt_iterator gsi;
4068 itype = type = TREE_TYPE (fd->loop.v);
4069 if (POINTER_TYPE_P (type))
4070 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4072 entry_bb = region->entry;
4073 cont_bb = region->cont;
4074 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4075 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4076 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4077 body_bb = single_succ (seq_start_bb);
4078 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4079 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4080 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4081 exit_bb = region->exit;
4083 /* Iteration space partitioning goes in ENTRY_BB. */
4084 gsi = gsi_last_bb (entry_bb);
4085 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4087 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4088 t = fold_convert (itype, t);
4089 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4090 true, GSI_SAME_STMT);
4092 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4093 t = fold_convert (itype, t);
4094 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4095 true, GSI_SAME_STMT);
4098 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4099 true, NULL_TREE, true, GSI_SAME_STMT);
4101 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4102 true, NULL_TREE, true, GSI_SAME_STMT);
4104 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4105 true, NULL_TREE, true, GSI_SAME_STMT);
4107 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4108 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4109 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4110 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4111 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4112 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4113 fold_build1 (NEGATE_EXPR, itype, t),
4114 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4116 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4117 t = fold_convert (itype, t);
4118 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4120 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4121 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4123 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4124 t = fold_build2 (NE_EXPR, itype, t, n);
4125 t = fold_build2 (PLUS_EXPR, itype, q, t);
4126 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4128 t = build2 (MULT_EXPR, itype, q, threadid);
4129 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4131 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4132 t = fold_build2 (MIN_EXPR, itype, t, n);
4133 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4135 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4136 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4138 /* Remove the GIMPLE_OMP_FOR statement. */
4139 gsi_remove (&gsi, true);
4141 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4142 gsi = gsi_start_bb (seq_start_bb);
4144 t = fold_convert (itype, s0);
4145 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4146 if (POINTER_TYPE_P (type))
4147 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4148 fold_convert (sizetype, t));
4150 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4151 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4152 false, GSI_CONTINUE_LINKING);
4153 stmt = gimple_build_assign (fd->loop.v, t);
4154 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4156 t = fold_convert (itype, e0);
4157 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4158 if (POINTER_TYPE_P (type))
4159 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4160 fold_convert (sizetype, t));
4162 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4163 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4164 false, GSI_CONTINUE_LINKING);
4166 /* The code controlling the sequential loop replaces the
4167 GIMPLE_OMP_CONTINUE. */
4168 gsi = gsi_last_bb (cont_bb);
4169 stmt = gsi_stmt (gsi);
4170 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4171 vmain = gimple_omp_continue_control_use (stmt);
4172 vback = gimple_omp_continue_control_def (stmt);
4174 if (POINTER_TYPE_P (type))
4175 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4176 fold_convert (sizetype, fd->loop.step));
4178 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4179 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4180 true, GSI_SAME_STMT);
4181 stmt = gimple_build_assign (vback, t);
4182 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4184 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4185 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4187 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4188 gsi_remove (&gsi, true);
4190 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4191 gsi = gsi_last_bb (exit_bb);
4192 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4193 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4194 false, GSI_SAME_STMT);
4195 gsi_remove (&gsi, true);
4197 /* Connect all the blocks. */
4198 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4199 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4201 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4202 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4204 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4205 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4206 recompute_dominator (CDI_DOMINATORS, body_bb));
4207 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4208 recompute_dominator (CDI_DOMINATORS, fin_bb));
4212 /* A subroutine of expand_omp_for. Generate code for a parallel
4213 loop with static schedule and a specified chunk size. Given
4216 for (V = N1; V cond N2; V += STEP) BODY;
4218 where COND is "<" or ">", we generate pseudocode
4224 if ((__typeof (V)) -1 > 0 && cond is >)
4225 n = -(adj + N2 - N1) / -STEP;
4227 n = (adj + N2 - N1) / STEP;
4229 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4230 here so that V is defined
4231 if the loop is not entered
4233 s0 = (trip * nthreads + threadid) * CHUNK;
4234 e0 = min(s0 + CHUNK, n);
4235 if (s0 < n) goto L1; else goto L4;
4242 if (V cond e) goto L2; else goto L3;
4250 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4252 tree n, s0, e0, e, t;
4253 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4254 tree type, itype, v_main, v_back, v_extra;
4255 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4256 basic_block trip_update_bb, cont_bb, fin_bb;
4257 gimple_stmt_iterator si;
4261 itype = type = TREE_TYPE (fd->loop.v);
4262 if (POINTER_TYPE_P (type))
4263 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4265 entry_bb = region->entry;
4266 se = split_block (entry_bb, last_stmt (entry_bb));
4268 iter_part_bb = se->dest;
4269 cont_bb = region->cont;
4270 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4271 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4272 == FALLTHRU_EDGE (cont_bb)->dest);
4273 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4274 body_bb = single_succ (seq_start_bb);
4275 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4276 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4277 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4278 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4279 exit_bb = region->exit;
4281 /* Trip and adjustment setup goes in ENTRY_BB. */
4282 si = gsi_last_bb (entry_bb);
4283 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4285 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4286 t = fold_convert (itype, t);
4287 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4288 true, GSI_SAME_STMT);
4290 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4291 t = fold_convert (itype, t);
4292 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4293 true, GSI_SAME_STMT);
4296 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4297 true, NULL_TREE, true, GSI_SAME_STMT);
4299 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4300 true, NULL_TREE, true, GSI_SAME_STMT);
4302 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4303 true, NULL_TREE, true, GSI_SAME_STMT);
4305 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4306 true, NULL_TREE, true, GSI_SAME_STMT);
4308 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4309 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4310 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4311 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4312 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4313 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4314 fold_build1 (NEGATE_EXPR, itype, t),
4315 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4317 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4318 t = fold_convert (itype, t);
4319 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4320 true, GSI_SAME_STMT);
4322 trip_var = create_tmp_var (itype, ".trip");
4323 if (gimple_in_ssa_p (cfun))
4325 add_referenced_var (trip_var);
4326 trip_init = make_ssa_name (trip_var, NULL);
4327 trip_main = make_ssa_name (trip_var, NULL);
4328 trip_back = make_ssa_name (trip_var, NULL);
4332 trip_init = trip_var;
4333 trip_main = trip_var;
4334 trip_back = trip_var;
4337 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4338 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4340 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4341 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4342 if (POINTER_TYPE_P (type))
4343 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4344 fold_convert (sizetype, t));
4346 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4347 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4348 true, GSI_SAME_STMT);
4350 /* Remove the GIMPLE_OMP_FOR. */
4351 gsi_remove (&si, true);
4353 /* Iteration space partitioning goes in ITER_PART_BB. */
4354 si = gsi_last_bb (iter_part_bb);
4356 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4357 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4358 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4359 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4360 false, GSI_CONTINUE_LINKING);
4362 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4363 t = fold_build2 (MIN_EXPR, itype, t, n);
4364 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4365 false, GSI_CONTINUE_LINKING);
4367 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4368 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4370 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4371 si = gsi_start_bb (seq_start_bb);
4373 t = fold_convert (itype, s0);
4374 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4375 if (POINTER_TYPE_P (type))
4376 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4377 fold_convert (sizetype, t));
4379 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4380 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4381 false, GSI_CONTINUE_LINKING);
4382 stmt = gimple_build_assign (fd->loop.v, t);
4383 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4385 t = fold_convert (itype, e0);
4386 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4387 if (POINTER_TYPE_P (type))
4388 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4389 fold_convert (sizetype, t));
4391 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4392 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4393 false, GSI_CONTINUE_LINKING);
4395 /* The code controlling the sequential loop goes in CONT_BB,
4396 replacing the GIMPLE_OMP_CONTINUE. */
4397 si = gsi_last_bb (cont_bb);
4398 stmt = gsi_stmt (si);
4399 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4400 v_main = gimple_omp_continue_control_use (stmt);
4401 v_back = gimple_omp_continue_control_def (stmt);
4403 if (POINTER_TYPE_P (type))
4404 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4405 fold_convert (sizetype, fd->loop.step));
4407 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4408 stmt = gimple_build_assign (v_back, t);
4409 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4411 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4412 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4414 /* Remove GIMPLE_OMP_CONTINUE. */
4415 gsi_remove (&si, true);
4417 /* Trip update code goes into TRIP_UPDATE_BB. */
4418 si = gsi_start_bb (trip_update_bb);
4420 t = build_int_cst (itype, 1);
4421 t = build2 (PLUS_EXPR, itype, trip_main, t);
4422 stmt = gimple_build_assign (trip_back, t);
4423 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4425 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4426 si = gsi_last_bb (exit_bb);
4427 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4428 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4429 false, GSI_SAME_STMT);
4430 gsi_remove (&si, true);
4432 /* Connect the new blocks. */
4433 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4434 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4436 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4437 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4439 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4441 if (gimple_in_ssa_p (cfun))
4443 gimple_stmt_iterator psi;
4446 edge_var_map_vector head;
4450 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4451 remove arguments of the phi nodes in fin_bb. We need to create
4452 appropriate phi nodes in iter_part_bb instead. */
4453 se = single_pred_edge (fin_bb);
4454 re = single_succ_edge (trip_update_bb);
4455 head = redirect_edge_var_map_vector (re);
4456 ene = single_succ_edge (entry_bb);
4458 psi = gsi_start_phis (fin_bb);
4459 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4460 gsi_next (&psi), ++i)
4464 phi = gsi_stmt (psi);
4465 t = gimple_phi_result (phi);
4466 gcc_assert (t == redirect_edge_var_map_result (vm));
4467 nphi = create_phi_node (t, iter_part_bb);
4468 SSA_NAME_DEF_STMT (t) = nphi;
4470 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4471 /* A special case -- fd->loop.v is not yet computed in
4472 iter_part_bb, we need to use v_extra instead. */
4473 if (t == fd->loop.v)
4475 add_phi_arg (nphi, t, ene);
4476 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re);
4478 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4479 redirect_edge_var_map_clear (re);
4482 psi = gsi_start_phis (fin_bb);
4483 if (gsi_end_p (psi))
4485 remove_phi_node (&psi, false);
4488 /* Make phi node for trip. */
4489 phi = create_phi_node (trip_main, iter_part_bb);
4490 SSA_NAME_DEF_STMT (trip_main) = phi;
4491 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
4492 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
4495 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4496 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4497 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4498 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4499 recompute_dominator (CDI_DOMINATORS, fin_bb));
4500 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4501 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4502 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4503 recompute_dominator (CDI_DOMINATORS, body_bb));
4507 /* Expand the OpenMP loop defined by REGION. */
4510 expand_omp_for (struct omp_region *region)
4512 struct omp_for_data fd;
4513 struct omp_for_data_loop *loops;
4516 = (struct omp_for_data_loop *)
4517 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4518 * sizeof (struct omp_for_data_loop));
4519 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4520 region->sched_kind = fd.sched_kind;
4522 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4523 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4524 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4527 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4528 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4529 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4532 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4535 && region->cont != NULL)
4537 if (fd.chunk_size == NULL)
4538 expand_omp_for_static_nochunk (region, &fd);
4540 expand_omp_for_static_chunk (region, &fd);
4544 int fn_index, start_ix, next_ix;
4546 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4547 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4548 ? 3 : fd.sched_kind;
4549 fn_index += fd.have_ordered * 4;
4550 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4551 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4552 if (fd.iter_type == long_long_unsigned_type_node)
4554 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4555 - BUILT_IN_GOMP_LOOP_STATIC_START;
4556 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4557 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4559 expand_omp_for_generic (region, &fd, start_ix, next_ix);
4562 update_ssa (TODO_update_ssa_only_virtuals);
4566 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4568 v = GOMP_sections_start (n);
4585 v = GOMP_sections_next ();
4590 If this is a combined parallel sections, replace the call to
4591 GOMP_sections_start with call to GOMP_sections_next. */
4594 expand_omp_sections (struct omp_region *region)
4596 tree t, u, vin = NULL, vmain, vnext, l1, l2;
4597 VEC (tree,heap) *label_vec;
4599 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4600 gimple_stmt_iterator si, switch_si;
4601 gimple sections_stmt, stmt, cont;
4604 struct omp_region *inner;
4606 bool exit_reachable = region->cont != NULL;
4608 gcc_assert (exit_reachable == (region->exit != NULL));
4609 entry_bb = region->entry;
4610 l0_bb = single_succ (entry_bb);
4611 l1_bb = region->cont;
4612 l2_bb = region->exit;
4615 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4616 l2 = gimple_block_label (l2_bb);
4619 /* This can happen if there are reductions. */
4620 len = EDGE_COUNT (l0_bb->succs);
4621 gcc_assert (len > 0);
4622 e = EDGE_SUCC (l0_bb, len - 1);
4623 si = gsi_last_bb (e->dest);
4626 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4627 l2 = gimple_block_label (e->dest);
4629 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4631 si = gsi_last_bb (e->dest);
4633 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4635 l2 = gimple_block_label (e->dest);
4640 default_bb = create_empty_bb (l1_bb->prev_bb);
4641 l1 = gimple_block_label (l1_bb);
4645 default_bb = create_empty_bb (l0_bb);
4647 l2 = gimple_block_label (default_bb);
4650 /* We will build a switch() with enough cases for all the
4651 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4652 and a default case to abort if something goes wrong. */
4653 len = EDGE_COUNT (l0_bb->succs);
4655 /* Use VEC_quick_push on label_vec throughout, since we know the size
4657 label_vec = VEC_alloc (tree, heap, len);
4659 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4660 GIMPLE_OMP_SECTIONS statement. */
4661 si = gsi_last_bb (entry_bb);
4662 sections_stmt = gsi_stmt (si);
4663 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4664 vin = gimple_omp_sections_control (sections_stmt);
4665 if (!is_combined_parallel (region))
4667 /* If we are not inside a combined parallel+sections region,
4668 call GOMP_sections_start. */
4669 t = build_int_cst (unsigned_type_node,
4670 exit_reachable ? len - 1 : len);
4671 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4672 stmt = gimple_build_call (u, 1, t);
4676 /* Otherwise, call GOMP_sections_next. */
4677 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4678 stmt = gimple_build_call (u, 0);
4680 gimple_call_set_lhs (stmt, vin);
4681 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4682 gsi_remove (&si, true);
4684 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4686 switch_si = gsi_last_bb (l0_bb);
4687 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4690 cont = last_stmt (l1_bb);
4691 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4692 vmain = gimple_omp_continue_control_use (cont);
4693 vnext = gimple_omp_continue_control_def (cont);
4704 t = build3 (CASE_LABEL_EXPR, void_type_node,
4705 build_int_cst (unsigned_type_node, 0), NULL, l2);
4706 VEC_quick_push (tree, label_vec, t);
4710 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4711 for (inner = region->inner, casei = 1;
4713 inner = inner->next, i++, casei++)
4715 basic_block s_entry_bb, s_exit_bb;
4717 /* Skip optional reduction region. */
4718 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4725 s_entry_bb = inner->entry;
4726 s_exit_bb = inner->exit;
4728 t = gimple_block_label (s_entry_bb);
4729 u = build_int_cst (unsigned_type_node, casei);
4730 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4731 VEC_quick_push (tree, label_vec, u);
4733 si = gsi_last_bb (s_entry_bb);
4734 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4735 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4736 gsi_remove (&si, true);
4737 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4739 if (s_exit_bb == NULL)
4742 si = gsi_last_bb (s_exit_bb);
4743 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4744 gsi_remove (&si, true);
4746 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4749 /* Error handling code goes in DEFAULT_BB. */
4750 t = gimple_block_label (default_bb);
4751 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4752 make_edge (l0_bb, default_bb, 0);
4754 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4755 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4756 gsi_remove (&switch_si, true);
4757 VEC_free (tree, heap, label_vec);
4759 si = gsi_start_bb (default_bb);
4760 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4761 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4765 /* Code to get the next section goes in L1_BB. */
4766 si = gsi_last_bb (l1_bb);
4767 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4769 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4770 gimple_call_set_lhs (stmt, vnext);
4771 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4772 gsi_remove (&si, true);
4774 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4776 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4777 si = gsi_last_bb (l2_bb);
4778 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4779 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4781 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4782 stmt = gimple_build_call (t, 0);
4783 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4784 gsi_remove (&si, true);
4787 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4791 /* Expand code for an OpenMP single directive. We've already expanded
4792 much of the code, here we simply place the GOMP_barrier call. */
4795 expand_omp_single (struct omp_region *region)
4797 basic_block entry_bb, exit_bb;
4798 gimple_stmt_iterator si;
4799 bool need_barrier = false;
4801 entry_bb = region->entry;
4802 exit_bb = region->exit;
4804 si = gsi_last_bb (entry_bb);
4805 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4806 be removed. We need to ensure that the thread that entered the single
4807 does not exit before the data is copied out by the other threads. */
4808 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4809 OMP_CLAUSE_COPYPRIVATE))
4810 need_barrier = true;
4811 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4812 gsi_remove (&si, true);
4813 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4815 si = gsi_last_bb (exit_bb);
4816 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4817 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4818 false, GSI_SAME_STMT);
4819 gsi_remove (&si, true);
4820 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4824 /* Generic expansion for OpenMP synchronization directives: master,
4825 ordered and critical. All we need to do here is remove the entry
4826 and exit markers for REGION. */
4829 expand_omp_synch (struct omp_region *region)
4831 basic_block entry_bb, exit_bb;
4832 gimple_stmt_iterator si;
4834 entry_bb = region->entry;
4835 exit_bb = region->exit;
4837 si = gsi_last_bb (entry_bb);
4838 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4839 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4840 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4841 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4842 gsi_remove (&si, true);
4843 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4847 si = gsi_last_bb (exit_bb);
4848 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4849 gsi_remove (&si, true);
4850 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4854 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4855 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4856 size of the data type, and thus usable to find the index of the builtin
4857 decl. Returns false if the expression is not of the proper form. */
4860 expand_omp_atomic_fetch_op (basic_block load_bb,
4861 tree addr, tree loaded_val,
4862 tree stored_val, int index)
4864 enum built_in_function base;
4865 tree decl, itype, call;
4866 enum insn_code *optab;
4868 basic_block store_bb = single_succ (load_bb);
4869 gimple_stmt_iterator gsi;
4872 /* We expect to find the following sequences:
4875 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4878 val = tmp OP something; (or: something OP tmp)
4879 GIMPLE_OMP_STORE (val)
4881 ???FIXME: Allow a more flexible sequence.
4882 Perhaps use data flow to pick the statements.
4886 gsi = gsi_after_labels (store_bb);
4887 stmt = gsi_stmt (gsi);
4888 if (!is_gimple_assign (stmt))
4891 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4894 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4897 /* Check for one of the supported fetch-op operations. */
4898 switch (gimple_assign_rhs_code (stmt))
4901 case POINTER_PLUS_EXPR:
4902 base = BUILT_IN_FETCH_AND_ADD_N;
4903 optab = sync_add_optab;
4906 base = BUILT_IN_FETCH_AND_SUB_N;
4907 optab = sync_add_optab;
4910 base = BUILT_IN_FETCH_AND_AND_N;
4911 optab = sync_and_optab;
4914 base = BUILT_IN_FETCH_AND_OR_N;
4915 optab = sync_ior_optab;
4918 base = BUILT_IN_FETCH_AND_XOR_N;
4919 optab = sync_xor_optab;
4924 /* Make sure the expression is of the proper form. */
4925 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4926 rhs = gimple_assign_rhs2 (stmt);
4927 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4928 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4929 rhs = gimple_assign_rhs1 (stmt);
4933 decl = built_in_decls[base + index + 1];
4934 itype = TREE_TYPE (TREE_TYPE (decl));
4936 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4939 gsi = gsi_last_bb (load_bb);
4940 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
4941 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
4942 call = fold_convert (void_type_node, call);
4943 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
4944 gsi_remove (&gsi, true);
4946 gsi = gsi_last_bb (store_bb);
4947 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
4948 gsi_remove (&gsi, true);
4949 gsi = gsi_last_bb (store_bb);
4950 gsi_remove (&gsi, true);
4952 if (gimple_in_ssa_p (cfun))
4953 update_ssa (TODO_update_ssa_no_phi);
4958 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
4962 newval = rhs; // with oldval replacing *addr in rhs
4963 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
4964 if (oldval != newval)
4967 INDEX is log2 of the size of the data type, and thus usable to find the
4968 index of the builtin decl. */
4971 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
4972 tree addr, tree loaded_val, tree stored_val,
4975 tree loadedi, storedi, initial, new_storedi, old_vali;
4976 tree type, itype, cmpxchg, iaddr;
4977 gimple_stmt_iterator si;
4978 basic_block loop_header = single_succ (load_bb);
4982 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
4983 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
4984 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
4986 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
4989 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
4990 si = gsi_last_bb (load_bb);
4991 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
4993 /* For floating-point values, we'll need to view-convert them to integers
4994 so that we can perform the atomic compare and swap. Simplify the
4995 following code by always setting up the "i"ntegral variables. */
4996 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5000 iaddr = create_tmp_var (build_pointer_type (itype), NULL);
5002 = force_gimple_operand_gsi (&si,
5003 fold_convert (TREE_TYPE (iaddr), addr),
5004 false, NULL_TREE, true, GSI_SAME_STMT);
5005 stmt = gimple_build_assign (iaddr, iaddr_val);
5006 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5007 DECL_NO_TBAA_P (iaddr) = 1;
5008 DECL_POINTER_ALIAS_SET (iaddr) = 0;
5009 loadedi = create_tmp_var (itype, NULL);
5010 if (gimple_in_ssa_p (cfun))
5012 add_referenced_var (iaddr);
5013 add_referenced_var (loadedi);
5014 loadedi = make_ssa_name (loadedi, NULL);
5020 loadedi = loaded_val;
5023 initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
5024 true, NULL_TREE, true, GSI_SAME_STMT);
5026 /* Move the value to the LOADEDI temporary. */
5027 if (gimple_in_ssa_p (cfun))
5029 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5030 phi = create_phi_node (loadedi, loop_header);
5031 SSA_NAME_DEF_STMT (loadedi) = phi;
5032 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5036 gsi_insert_before (&si,
5037 gimple_build_assign (loadedi, initial),
5039 if (loadedi != loaded_val)
5041 gimple_stmt_iterator gsi2;
5044 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5045 gsi2 = gsi_start_bb (loop_header);
5046 if (gimple_in_ssa_p (cfun))
5049 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5050 true, GSI_SAME_STMT);
5051 stmt = gimple_build_assign (loaded_val, x);
5052 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5056 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5057 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5058 true, GSI_SAME_STMT);
5061 gsi_remove (&si, true);
5063 si = gsi_last_bb (store_bb);
5064 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5067 storedi = stored_val;
5070 force_gimple_operand_gsi (&si,
5071 build1 (VIEW_CONVERT_EXPR, itype,
5072 stored_val), true, NULL_TREE, true,
5075 /* Build the compare&swap statement. */
5076 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5077 new_storedi = force_gimple_operand_gsi (&si,
5078 fold_convert (itype, new_storedi),
5080 true, GSI_SAME_STMT);
5082 if (gimple_in_ssa_p (cfun))
5086 old_vali = create_tmp_var (itype, NULL);
5087 if (gimple_in_ssa_p (cfun))
5088 add_referenced_var (old_vali);
5089 stmt = gimple_build_assign (old_vali, loadedi);
5090 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5092 stmt = gimple_build_assign (loadedi, new_storedi);
5093 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5096 /* Note that we always perform the comparison as an integer, even for
5097 floating point. This allows the atomic operation to properly
5098 succeed even with NaNs and -0.0. */
5099 stmt = gimple_build_cond_empty
5100 (build2 (NE_EXPR, boolean_type_node,
5101 new_storedi, old_vali));
5102 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5105 e = single_succ_edge (store_bb);
5106 e->flags &= ~EDGE_FALLTHRU;
5107 e->flags |= EDGE_FALSE_VALUE;
5109 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5111 /* Copy the new value to loadedi (we already did that before the condition
5112 if we are not in SSA). */
5113 if (gimple_in_ssa_p (cfun))
5115 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5116 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5119 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5120 gsi_remove (&si, true);
5122 if (gimple_in_ssa_p (cfun))
5123 update_ssa (TODO_update_ssa_no_phi);
5128 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5130 GOMP_atomic_start ();
5134 The result is not globally atomic, but works so long as all parallel
5135 references are within #pragma omp atomic directives. According to
5136 responses received from omp@openmp.org, appears to be within spec.
5137 Which makes sense, since that's how several other compilers handle
5138 this situation as well.
5139 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5140 expanding. STORED_VAL is the operand of the matching
5141 GIMPLE_OMP_ATOMIC_STORE.
5144 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5148 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5153 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5154 tree addr, tree loaded_val, tree stored_val)
5156 gimple_stmt_iterator si;
5160 si = gsi_last_bb (load_bb);
5161 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5163 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5164 t = build_function_call_expr (t, 0);
5165 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5167 stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
5168 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5169 gsi_remove (&si, true);
5171 si = gsi_last_bb (store_bb);
5172 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5174 stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
5176 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5178 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5179 t = build_function_call_expr (t, 0);
5180 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5181 gsi_remove (&si, true);
5183 if (gimple_in_ssa_p (cfun))
5184 update_ssa (TODO_update_ssa_no_phi);
5188 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5189 using expand_omp_atomic_fetch_op. If it failed, we try to
5190 call expand_omp_atomic_pipeline, and if it fails too, the
5191 ultimate fallback is wrapping the operation in a mutex
5192 (expand_omp_atomic_mutex). REGION is the atomic region built
5193 by build_omp_regions_1(). */
5196 expand_omp_atomic (struct omp_region *region)
5198 basic_block load_bb = region->entry, store_bb = region->exit;
5199 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5200 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5201 tree addr = gimple_omp_atomic_load_rhs (load);
5202 tree stored_val = gimple_omp_atomic_store_val (store);
5203 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5204 HOST_WIDE_INT index;
5206 /* Make sure the type is one of the supported sizes. */
5207 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5208 index = exact_log2 (index);
5209 if (index >= 0 && index <= 4)
5211 unsigned int align = TYPE_ALIGN_UNIT (type);
5213 /* __sync builtins require strict data alignment. */
5214 if (exact_log2 (align) >= index)
5216 /* When possible, use specialized atomic update functions. */
5217 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5218 && store_bb == single_succ (load_bb))
5220 if (expand_omp_atomic_fetch_op (load_bb, addr,
5221 loaded_val, stored_val, index))
5225 /* If we don't have specialized __sync builtins, try and implement
5226 as a compare and swap loop. */
5227 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5228 loaded_val, stored_val, index))
5233 /* The ultimate fallback is wrapping the operation in a mutex. */
5234 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5238 /* Expand the parallel region tree rooted at REGION. Expansion
5239 proceeds in depth-first order. Innermost regions are expanded
5240 first. This way, parallel regions that require a new function to
5241 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5242 internal dependencies in their body. */
5245 expand_omp (struct omp_region *region)
5249 location_t saved_location;
5251 /* First, determine whether this is a combined parallel+workshare
5253 if (region->type == GIMPLE_OMP_PARALLEL)
5254 determine_parallel_type (region);
5257 expand_omp (region->inner);
5259 saved_location = input_location;
5260 if (gimple_has_location (last_stmt (region->entry)))
5261 input_location = gimple_location (last_stmt (region->entry));
5263 switch (region->type)
5265 case GIMPLE_OMP_PARALLEL:
5266 case GIMPLE_OMP_TASK:
5267 expand_omp_taskreg (region);
5270 case GIMPLE_OMP_FOR:
5271 expand_omp_for (region);
5274 case GIMPLE_OMP_SECTIONS:
5275 expand_omp_sections (region);
5278 case GIMPLE_OMP_SECTION:
5279 /* Individual omp sections are handled together with their
5280 parent GIMPLE_OMP_SECTIONS region. */
5283 case GIMPLE_OMP_SINGLE:
5284 expand_omp_single (region);
5287 case GIMPLE_OMP_MASTER:
5288 case GIMPLE_OMP_ORDERED:
5289 case GIMPLE_OMP_CRITICAL:
5290 expand_omp_synch (region);
5293 case GIMPLE_OMP_ATOMIC_LOAD:
5294 expand_omp_atomic (region);
5301 input_location = saved_location;
5302 region = region->next;
5307 /* Helper for build_omp_regions. Scan the dominator tree starting at
5308 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5309 true, the function ends once a single tree is built (otherwise, whole
5310 forest of OMP constructs may be built). */
5313 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5316 gimple_stmt_iterator gsi;
5320 gsi = gsi_last_bb (bb);
5321 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5323 struct omp_region *region;
5324 enum gimple_code code;
5326 stmt = gsi_stmt (gsi);
5327 code = gimple_code (stmt);
5328 if (code == GIMPLE_OMP_RETURN)
5330 /* STMT is the return point out of region PARENT. Mark it
5331 as the exit point and make PARENT the immediately
5332 enclosing region. */
5333 gcc_assert (parent);
5336 parent = parent->outer;
5338 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5340 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5341 GIMPLE_OMP_RETURN, but matches with
5342 GIMPLE_OMP_ATOMIC_LOAD. */
5343 gcc_assert (parent);
5344 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5347 parent = parent->outer;
5350 else if (code == GIMPLE_OMP_CONTINUE)
5352 gcc_assert (parent);
5355 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5357 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5358 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5363 /* Otherwise, this directive becomes the parent for a new
5365 region = new_omp_region (bb, code, parent);
5370 if (single_tree && !parent)
5373 for (son = first_dom_son (CDI_DOMINATORS, bb);
5375 son = next_dom_son (CDI_DOMINATORS, son))
5376 build_omp_regions_1 (son, parent, single_tree);
5379 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5383 build_omp_regions_root (basic_block root)
5385 gcc_assert (root_omp_region == NULL);
5386 build_omp_regions_1 (root, NULL, true);
5387 gcc_assert (root_omp_region != NULL);
5390 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5393 omp_expand_local (basic_block head)
5395 build_omp_regions_root (head);
5396 if (dump_file && (dump_flags & TDF_DETAILS))
5398 fprintf (dump_file, "\nOMP region tree\n\n");
5399 dump_omp_region (dump_file, root_omp_region, 0);
5400 fprintf (dump_file, "\n");
5403 remove_exit_barriers (root_omp_region);
5404 expand_omp (root_omp_region);
5406 free_omp_regions ();
5409 /* Scan the CFG and build a tree of OMP regions. Return the root of
5410 the OMP region tree. */
5413 build_omp_regions (void)
5415 gcc_assert (root_omp_region == NULL);
5416 calculate_dominance_info (CDI_DOMINATORS);
5417 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5420 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5423 execute_expand_omp (void)
5425 build_omp_regions ();
5427 if (!root_omp_region)
5432 fprintf (dump_file, "\nOMP region tree\n\n");
5433 dump_omp_region (dump_file, root_omp_region, 0);
5434 fprintf (dump_file, "\n");
5437 remove_exit_barriers (root_omp_region);
5439 expand_omp (root_omp_region);
5441 cleanup_tree_cfg ();
5443 free_omp_regions ();
5448 /* OMP expansion -- the default pass, run before creation of SSA form. */
5451 gate_expand_omp (void)
5453 return (flag_openmp != 0 && errorcount == 0);
5456 struct gimple_opt_pass pass_expand_omp =
5460 "ompexp", /* name */
5461 gate_expand_omp, /* gate */
5462 execute_expand_omp, /* execute */
5465 0, /* static_pass_number */
5467 PROP_gimple_any, /* properties_required */
5468 PROP_gimple_lomp, /* properties_provided */
5469 0, /* properties_destroyed */
5470 0, /* todo_flags_start */
5471 TODO_dump_func /* todo_flags_finish */
5475 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5477 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5478 CTX is the enclosing OMP context for the current statement. */
5481 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5483 tree block, control;
5484 gimple_stmt_iterator tgsi;
5486 gimple stmt, new_stmt, bind, t;
5487 gimple_seq ilist, dlist, olist, new_body, body;
5488 struct gimplify_ctx gctx;
5490 stmt = gsi_stmt (*gsi_p);
5492 push_gimplify_context (&gctx);
5496 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5497 &ilist, &dlist, ctx);
5499 tgsi = gsi_start (gimple_omp_body (stmt));
5500 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5503 tgsi = gsi_start (gimple_omp_body (stmt));
5505 for (i = 0; i < len; i++, gsi_next (&tgsi))
5510 sec_start = gsi_stmt (tgsi);
5511 sctx = maybe_lookup_ctx (sec_start);
5514 gimple_seq_add_stmt (&body, sec_start);
5516 lower_omp (gimple_omp_body (sec_start), sctx);
5517 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5518 gimple_omp_set_body (sec_start, NULL);
5522 gimple_seq l = NULL;
5523 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5525 gimple_seq_add_seq (&body, l);
5526 gimple_omp_section_set_last (sec_start);
5529 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5532 block = make_node (BLOCK);
5533 bind = gimple_build_bind (NULL, body, block);
5536 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5538 block = make_node (BLOCK);
5539 new_stmt = gimple_build_bind (NULL, NULL, block);
5541 pop_gimplify_context (new_stmt);
5542 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5543 BLOCK_VARS (block) = gimple_bind_vars (bind);
5544 if (BLOCK_VARS (block))
5545 TREE_USED (block) = 1;
5548 gimple_seq_add_seq (&new_body, ilist);
5549 gimple_seq_add_stmt (&new_body, stmt);
5550 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5551 gimple_seq_add_stmt (&new_body, bind);
5553 control = create_tmp_var (unsigned_type_node, ".section");
5554 t = gimple_build_omp_continue (control, control);
5555 gimple_omp_sections_set_control (stmt, control);
5556 gimple_seq_add_stmt (&new_body, t);
5558 gimple_seq_add_seq (&new_body, olist);
5559 gimple_seq_add_seq (&new_body, dlist);
5561 new_body = maybe_catch_exception (new_body);
5563 t = gimple_build_omp_return
5564 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5565 OMP_CLAUSE_NOWAIT));
5566 gimple_seq_add_stmt (&new_body, t);
5568 gimple_bind_set_body (new_stmt, new_body);
5569 gimple_omp_set_body (stmt, NULL);
5571 gsi_replace (gsi_p, new_stmt, true);
5575 /* A subroutine of lower_omp_single. Expand the simple form of
5576 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5578 if (GOMP_single_start ())
5580 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5582 FIXME. It may be better to delay expanding the logic of this until
5583 pass_expand_omp. The expanded logic may make the job more difficult
5584 to a synchronization analysis pass. */
5587 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5589 tree tlabel = create_artificial_label ();
5590 tree flabel = create_artificial_label ();
5594 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5595 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5596 call = gimple_build_call (decl, 0);
5597 gimple_call_set_lhs (call, lhs);
5598 gimple_seq_add_stmt (pre_p, call);
5600 cond = gimple_build_cond (EQ_EXPR, lhs,
5601 fold_convert (TREE_TYPE (lhs), boolean_true_node),
5603 gimple_seq_add_stmt (pre_p, cond);
5604 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5605 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5606 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5610 /* A subroutine of lower_omp_single. Expand the simple form of
5611 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5613 #pragma omp single copyprivate (a, b, c)
5615 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5618 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5624 GOMP_single_copy_end (©out);
5635 FIXME. It may be better to delay expanding the logic of this until
5636 pass_expand_omp. The expanded logic may make the job more difficult
5637 to a synchronization analysis pass. */
5640 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5642 tree ptr_type, t, l0, l1, l2;
5643 gimple_seq copyin_seq;
5645 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5647 ptr_type = build_pointer_type (ctx->record_type);
5648 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5650 l0 = create_artificial_label ();
5651 l1 = create_artificial_label ();
5652 l2 = create_artificial_label ();
5654 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5655 t = fold_convert (ptr_type, t);
5656 gimplify_assign (ctx->receiver_decl, t, pre_p);
5658 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5659 build_int_cst (ptr_type, 0));
5660 t = build3 (COND_EXPR, void_type_node, t,
5661 build_and_jump (&l0), build_and_jump (&l1));
5662 gimplify_and_add (t, pre_p);
5664 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5666 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5669 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5672 t = build_fold_addr_expr (ctx->sender_decl);
5673 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
5674 gimplify_and_add (t, pre_p);
5676 t = build_and_jump (&l2);
5677 gimplify_and_add (t, pre_p);
5679 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5681 gimple_seq_add_seq (pre_p, copyin_seq);
5683 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5687 /* Expand code for an OpenMP single directive. */
5690 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5693 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5694 gimple_seq bind_body, dlist;
5695 struct gimplify_ctx gctx;
5697 push_gimplify_context (&gctx);
5700 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5701 &bind_body, &dlist, ctx);
5702 lower_omp (gimple_omp_body (single_stmt), ctx);
5704 gimple_seq_add_stmt (&bind_body, single_stmt);
5706 if (ctx->record_type)
5707 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5709 lower_omp_single_simple (single_stmt, &bind_body);
5711 gimple_omp_set_body (single_stmt, NULL);
5713 gimple_seq_add_seq (&bind_body, dlist);
5715 bind_body = maybe_catch_exception (bind_body);
5717 t = gimple_build_omp_return
5718 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5719 OMP_CLAUSE_NOWAIT));
5720 gimple_seq_add_stmt (&bind_body, t);
5722 block = make_node (BLOCK);
5723 bind = gimple_build_bind (NULL, bind_body, block);
5725 pop_gimplify_context (bind);
5727 gimple_bind_append_vars (bind, ctx->block_vars);
5728 BLOCK_VARS (block) = ctx->block_vars;
5729 gsi_replace (gsi_p, bind, true);
5730 if (BLOCK_VARS (block))
5731 TREE_USED (block) = 1;
5735 /* Expand code for an OpenMP master directive. */
5738 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5740 tree block, lab = NULL, x;
5741 gimple stmt = gsi_stmt (*gsi_p), bind;
5743 struct gimplify_ctx gctx;
5745 push_gimplify_context (&gctx);
5747 block = make_node (BLOCK);
5748 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5751 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5752 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5753 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5755 gimplify_and_add (x, &tseq);
5756 gimple_bind_add_seq (bind, tseq);
5758 lower_omp (gimple_omp_body (stmt), ctx);
5759 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5760 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5761 gimple_omp_set_body (stmt, NULL);
5763 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5765 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5767 pop_gimplify_context (bind);
5769 gimple_bind_append_vars (bind, ctx->block_vars);
5770 BLOCK_VARS (block) = ctx->block_vars;
5771 gsi_replace (gsi_p, bind, true);
5775 /* Expand code for an OpenMP ordered directive. */
5778 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5781 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5782 struct gimplify_ctx gctx;
5784 push_gimplify_context (&gctx);
5786 block = make_node (BLOCK);
5787 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5790 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5791 gimple_bind_add_stmt (bind, x);
5793 lower_omp (gimple_omp_body (stmt), ctx);
5794 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5795 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5796 gimple_omp_set_body (stmt, NULL);
5798 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5799 gimple_bind_add_stmt (bind, x);
5801 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5803 pop_gimplify_context (bind);
5805 gimple_bind_append_vars (bind, ctx->block_vars);
5806 BLOCK_VARS (block) = gimple_bind_vars (bind);
5807 gsi_replace (gsi_p, bind, true);
5811 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5812 substitution of a couple of function calls. But in the NAMED case,
5813 requires that languages coordinate a symbol name. It is therefore
5814 best put here in common code. */
5816 static GTY((param1_is (tree), param2_is (tree)))
5817 splay_tree critical_name_mutexes;
5820 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5823 tree name, lock, unlock;
5824 gimple stmt = gsi_stmt (*gsi_p), bind;
5826 struct gimplify_ctx gctx;
5828 name = gimple_omp_critical_name (stmt);
5834 if (!critical_name_mutexes)
5835 critical_name_mutexes
5836 = splay_tree_new_ggc (splay_tree_compare_pointers);
5838 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5843 decl = create_tmp_var_raw (ptr_type_node, NULL);
5845 new_str = ACONCAT ((".gomp_critical_user_",
5846 IDENTIFIER_POINTER (name), NULL));
5847 DECL_NAME (decl) = get_identifier (new_str);
5848 TREE_PUBLIC (decl) = 1;
5849 TREE_STATIC (decl) = 1;
5850 DECL_COMMON (decl) = 1;
5851 DECL_ARTIFICIAL (decl) = 1;
5852 DECL_IGNORED_P (decl) = 1;
5853 varpool_finalize_decl (decl);
5855 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5856 (splay_tree_value) decl);
5859 decl = (tree) n->value;
5861 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5862 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
5864 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5865 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
5869 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5870 lock = build_call_expr (lock, 0);
5872 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5873 unlock = build_call_expr (unlock, 0);
5876 push_gimplify_context (&gctx);
5878 block = make_node (BLOCK);
5879 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5881 tbody = gimple_bind_body (bind);
5882 gimplify_and_add (lock, &tbody);
5883 gimple_bind_set_body (bind, tbody);
5885 lower_omp (gimple_omp_body (stmt), ctx);
5886 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5887 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5888 gimple_omp_set_body (stmt, NULL);
5890 tbody = gimple_bind_body (bind);
5891 gimplify_and_add (unlock, &tbody);
5892 gimple_bind_set_body (bind, tbody);
5894 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5896 pop_gimplify_context (bind);
5897 gimple_bind_append_vars (bind, ctx->block_vars);
5898 BLOCK_VARS (block) = gimple_bind_vars (bind);
5899 gsi_replace (gsi_p, bind, true);
5903 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5904 for a lastprivate clause. Given a loop control predicate of (V
5905 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5906 is appended to *DLIST, iterator initialization is appended to
5910 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5911 gimple_seq *dlist, struct omp_context *ctx)
5913 tree clauses, cond, vinit;
5914 enum tree_code cond_code;
5917 cond_code = fd->loop.cond_code;
5918 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5920 /* When possible, use a strict equality expression. This can let VRP
5921 type optimizations deduce the value and remove a copy. */
5922 if (host_integerp (fd->loop.step, 0))
5924 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
5925 if (step == 1 || step == -1)
5926 cond_code = EQ_EXPR;
5929 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
5931 clauses = gimple_omp_for_clauses (fd->for_stmt);
5933 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
5934 if (!gimple_seq_empty_p (stmts))
5936 gimple_seq_add_seq (&stmts, *dlist);
5939 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
5940 vinit = fd->loop.n1;
5941 if (cond_code == EQ_EXPR
5942 && host_integerp (fd->loop.n2, 0)
5943 && ! integer_zerop (fd->loop.n2))
5944 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
5946 /* Initialize the iterator variable, so that threads that don't execute
5947 any iterations don't execute the lastprivate clauses by accident. */
5948 gimplify_assign (fd->loop.v, vinit, body_p);
5953 /* Lower code for an OpenMP loop directive. */
5956 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5959 struct omp_for_data fd;
5960 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
5961 gimple_seq omp_for_body, body, dlist, ilist;
5963 struct gimplify_ctx gctx;
5965 push_gimplify_context (&gctx);
5967 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
5968 lower_omp (gimple_omp_body (stmt), ctx);
5970 block = make_node (BLOCK);
5971 new_stmt = gimple_build_bind (NULL, NULL, block);
5973 /* Move declaration of temporaries in the loop body before we make
5975 omp_for_body = gimple_omp_body (stmt);
5976 if (!gimple_seq_empty_p (omp_for_body)
5977 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
5979 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
5980 gimple_bind_append_vars (new_stmt, vars);
5983 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
5987 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
5988 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
5990 /* Lower the header expressions. At this point, we can assume that
5991 the header is of the form:
5993 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
5995 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
5996 using the .omp_data_s mapping, if needed. */
5997 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
5999 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6000 if (!is_gimple_min_invariant (*rhs_p))
6001 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6003 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6004 if (!is_gimple_min_invariant (*rhs_p))
6005 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6007 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6008 if (!is_gimple_min_invariant (*rhs_p))
6009 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6012 /* Once lowered, extract the bounds and clauses. */
6013 extract_omp_for_data (stmt, &fd, NULL);
6015 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6017 gimple_seq_add_stmt (&body, stmt);
6018 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6020 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6023 /* After the loop, add exit clauses. */
6024 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6025 gimple_seq_add_seq (&body, dlist);
6027 body = maybe_catch_exception (body);
6029 /* Region exit marker goes at the end of the loop body. */
6030 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6032 pop_gimplify_context (new_stmt);
6034 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6035 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6036 if (BLOCK_VARS (block))
6037 TREE_USED (block) = 1;
6039 gimple_bind_set_body (new_stmt, body);
6040 gimple_omp_set_body (stmt, NULL);
6041 gimple_omp_for_set_pre_body (stmt, NULL);
6042 gsi_replace (gsi_p, new_stmt, true);
6045 /* Callback for walk_stmts. Check if the current statement only contains
6046 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6049 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6050 bool *handled_ops_p,
6051 struct walk_stmt_info *wi)
6053 int *info = (int *) wi->info;
6054 gimple stmt = gsi_stmt (*gsi_p);
6056 *handled_ops_p = true;
6057 switch (gimple_code (stmt))
6061 case GIMPLE_OMP_FOR:
6062 case GIMPLE_OMP_SECTIONS:
6063 *info = *info == 0 ? 1 : -1;
6072 struct omp_taskcopy_context
6074 /* This field must be at the beginning, as we do "inheritance": Some
6075 callback functions for tree-inline.c (e.g., omp_copy_decl)
6076 receive a copy_body_data pointer that is up-casted to an
6077 omp_context pointer. */
6083 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6085 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6087 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6088 return create_tmp_var (TREE_TYPE (var), NULL);
6094 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6096 tree name, new_fields = NULL, type, f;
6098 type = lang_hooks.types.make_type (RECORD_TYPE);
6099 name = DECL_NAME (TYPE_NAME (orig_type));
6100 name = build_decl (TYPE_DECL, name, type);
6101 TYPE_NAME (type) = name;
6103 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6105 tree new_f = copy_node (f);
6106 DECL_CONTEXT (new_f) = type;
6107 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6108 TREE_CHAIN (new_f) = new_fields;
6109 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6110 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6111 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6114 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6116 TYPE_FIELDS (type) = nreverse (new_fields);
6121 /* Create task copyfn. */
6124 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6126 struct function *child_cfun;
6127 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6128 tree record_type, srecord_type, bind, list;
6129 bool record_needs_remap = false, srecord_needs_remap = false;
6131 struct omp_taskcopy_context tcctx;
6132 struct gimplify_ctx gctx;
6134 child_fn = gimple_omp_task_copy_fn (task_stmt);
6135 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6136 gcc_assert (child_cfun->cfg == NULL);
6137 child_cfun->dont_save_pending_sizes_p = 1;
6138 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6140 /* Reset DECL_CONTEXT on function arguments. */
6141 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6142 DECL_CONTEXT (t) = child_fn;
6144 /* Populate the function. */
6145 push_gimplify_context (&gctx);
6146 current_function_decl = child_fn;
6148 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6149 TREE_SIDE_EFFECTS (bind) = 1;
6151 DECL_SAVED_TREE (child_fn) = bind;
6152 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6154 /* Remap src and dst argument types if needed. */
6155 record_type = ctx->record_type;
6156 srecord_type = ctx->srecord_type;
6157 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6158 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6160 record_needs_remap = true;
6163 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6164 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6166 srecord_needs_remap = true;
6170 if (record_needs_remap || srecord_needs_remap)
6172 memset (&tcctx, '\0', sizeof (tcctx));
6173 tcctx.cb.src_fn = ctx->cb.src_fn;
6174 tcctx.cb.dst_fn = child_fn;
6175 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6176 tcctx.cb.dst_node = tcctx.cb.src_node;
6177 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6178 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6179 tcctx.cb.eh_region = -1;
6180 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6181 tcctx.cb.decl_map = pointer_map_create ();
6184 if (record_needs_remap)
6185 record_type = task_copyfn_remap_type (&tcctx, record_type);
6186 if (srecord_needs_remap)
6187 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6190 tcctx.cb.decl_map = NULL;
6192 push_cfun (child_cfun);
6194 arg = DECL_ARGUMENTS (child_fn);
6195 TREE_TYPE (arg) = build_pointer_type (record_type);
6196 sarg = TREE_CHAIN (arg);
6197 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6199 /* First pass: initialize temporaries used in record_type and srecord_type
6200 sizes and field offsets. */
6201 if (tcctx.cb.decl_map)
6202 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6203 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6207 decl = OMP_CLAUSE_DECL (c);
6208 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6211 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6212 sf = (tree) n->value;
6213 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6214 src = build_fold_indirect_ref (sarg);
6215 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6216 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6217 append_to_statement_list (t, &list);
6220 /* Second pass: copy shared var pointers and copy construct non-VLA
6221 firstprivate vars. */
6222 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6223 switch (OMP_CLAUSE_CODE (c))
6225 case OMP_CLAUSE_SHARED:
6226 decl = OMP_CLAUSE_DECL (c);
6227 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6230 f = (tree) n->value;
6231 if (tcctx.cb.decl_map)
6232 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6233 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6234 sf = (tree) n->value;
6235 if (tcctx.cb.decl_map)
6236 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6237 src = build_fold_indirect_ref (sarg);
6238 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6239 dst = build_fold_indirect_ref (arg);
6240 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6241 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6242 append_to_statement_list (t, &list);
6244 case OMP_CLAUSE_FIRSTPRIVATE:
6245 decl = OMP_CLAUSE_DECL (c);
6246 if (is_variable_sized (decl))
6248 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6251 f = (tree) n->value;
6252 if (tcctx.cb.decl_map)
6253 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6254 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6257 sf = (tree) n->value;
6258 if (tcctx.cb.decl_map)
6259 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6260 src = build_fold_indirect_ref (sarg);
6261 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6262 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6263 src = build_fold_indirect_ref (src);
6267 dst = build_fold_indirect_ref (arg);
6268 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6269 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6270 append_to_statement_list (t, &list);
6272 case OMP_CLAUSE_PRIVATE:
6273 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6275 decl = OMP_CLAUSE_DECL (c);
6276 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6277 f = (tree) n->value;
6278 if (tcctx.cb.decl_map)
6279 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6280 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6283 sf = (tree) n->value;
6284 if (tcctx.cb.decl_map)
6285 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6286 src = build_fold_indirect_ref (sarg);
6287 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6288 if (use_pointer_for_field (decl, NULL))
6289 src = build_fold_indirect_ref (src);
6293 dst = build_fold_indirect_ref (arg);
6294 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6295 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6296 append_to_statement_list (t, &list);
6302 /* Last pass: handle VLA firstprivates. */
6303 if (tcctx.cb.decl_map)
6304 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6305 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6309 decl = OMP_CLAUSE_DECL (c);
6310 if (!is_variable_sized (decl))
6312 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6315 f = (tree) n->value;
6316 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6317 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6318 ind = DECL_VALUE_EXPR (decl);
6319 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6320 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6321 n = splay_tree_lookup (ctx->sfield_map,
6322 (splay_tree_key) TREE_OPERAND (ind, 0));
6323 sf = (tree) n->value;
6324 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6325 src = build_fold_indirect_ref (sarg);
6326 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6327 src = build_fold_indirect_ref (src);
6328 dst = build_fold_indirect_ref (arg);
6329 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6330 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6331 append_to_statement_list (t, &list);
6332 n = splay_tree_lookup (ctx->field_map,
6333 (splay_tree_key) TREE_OPERAND (ind, 0));
6334 df = (tree) n->value;
6335 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6336 ptr = build_fold_indirect_ref (arg);
6337 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6338 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6339 build_fold_addr_expr (dst));
6340 append_to_statement_list (t, &list);
6343 t = build1 (RETURN_EXPR, void_type_node, NULL);
6344 append_to_statement_list (t, &list);
6346 if (tcctx.cb.decl_map)
6347 pointer_map_destroy (tcctx.cb.decl_map);
6348 pop_gimplify_context (NULL);
6349 BIND_EXPR_BODY (bind) = list;
6351 current_function_decl = ctx->cb.src_fn;
6354 /* Lower the OpenMP parallel or task directive in the current statement
6355 in GSI_P. CTX holds context information for the directive. */
6358 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6362 gimple stmt = gsi_stmt (*gsi_p);
6363 gimple par_bind, bind;
6364 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6365 struct gimplify_ctx gctx;
6367 clauses = gimple_omp_taskreg_clauses (stmt);
6368 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6369 par_body = gimple_bind_body (par_bind);
6370 child_fn = ctx->cb.dst_fn;
6371 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6372 && !gimple_omp_parallel_combined_p (stmt))
6374 struct walk_stmt_info wi;
6377 memset (&wi, 0, sizeof (wi));
6380 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6382 gimple_omp_parallel_set_combined_p (stmt, true);
6384 if (ctx->srecord_type)
6385 create_task_copyfn (stmt, ctx);
6387 push_gimplify_context (&gctx);
6391 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6392 lower_omp (par_body, ctx);
6393 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6394 lower_reduction_clauses (clauses, &par_olist, ctx);
6396 /* Declare all the variables created by mapping and the variables
6397 declared in the scope of the parallel body. */
6398 record_vars_into (ctx->block_vars, child_fn);
6399 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6401 if (ctx->record_type)
6404 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6405 : ctx->record_type, ".omp_data_o");
6406 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6411 lower_send_clauses (clauses, &ilist, &olist, ctx);
6412 lower_send_shared_vars (&ilist, &olist, ctx);
6414 /* Once all the expansions are done, sequence all the different
6415 fragments inside gimple_omp_body. */
6419 if (ctx->record_type)
6421 t = build_fold_addr_expr (ctx->sender_decl);
6422 /* fixup_child_record_type might have changed receiver_decl's type. */
6423 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
6424 gimple_seq_add_stmt (&new_body,
6425 gimple_build_assign (ctx->receiver_decl, t));
6428 gimple_seq_add_seq (&new_body, par_ilist);
6429 gimple_seq_add_seq (&new_body, par_body);
6430 gimple_seq_add_seq (&new_body, par_olist);
6431 new_body = maybe_catch_exception (new_body);
6432 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6433 gimple_omp_set_body (stmt, new_body);
6435 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6436 gimple_bind_add_stmt (bind, stmt);
6439 gimple_seq_add_stmt (&ilist, bind);
6440 gimple_seq_add_seq (&ilist, olist);
6441 bind = gimple_build_bind (NULL, ilist, NULL);
6444 gsi_replace (gsi_p, bind, true);
6446 pop_gimplify_context (NULL);
6449 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6450 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6451 of OpenMP context, but with task_shared_vars set. */
6454 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6459 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6460 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6463 if (task_shared_vars
6465 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6468 /* If a global variable has been privatized, TREE_CONSTANT on
6469 ADDR_EXPR might be wrong. */
6470 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6471 recompute_tree_invariant_for_addr_expr (t);
6473 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6478 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6480 gimple stmt = gsi_stmt (*gsi_p);
6481 struct walk_stmt_info wi;
6483 if (gimple_has_location (stmt))
6484 input_location = gimple_location (stmt);
6486 if (task_shared_vars)
6487 memset (&wi, '\0', sizeof (wi));
6489 /* If we have issued syntax errors, avoid doing any heavy lifting.
6490 Just replace the OpenMP directives with a NOP to avoid
6491 confusing RTL expansion. */
6492 if (errorcount && is_gimple_omp (stmt))
6494 gsi_replace (gsi_p, gimple_build_nop (), true);
6498 switch (gimple_code (stmt))
6501 if ((ctx || task_shared_vars)
6502 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6503 ctx ? NULL : &wi, NULL)
6504 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6505 ctx ? NULL : &wi, NULL)))
6506 gimple_regimplify_operands (stmt, gsi_p);
6509 lower_omp (gimple_catch_handler (stmt), ctx);
6511 case GIMPLE_EH_FILTER:
6512 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6515 lower_omp (gimple_try_eval (stmt), ctx);
6516 lower_omp (gimple_try_cleanup (stmt), ctx);
6519 lower_omp (gimple_bind_body (stmt), ctx);
6521 case GIMPLE_OMP_PARALLEL:
6522 case GIMPLE_OMP_TASK:
6523 ctx = maybe_lookup_ctx (stmt);
6524 lower_omp_taskreg (gsi_p, ctx);
6526 case GIMPLE_OMP_FOR:
6527 ctx = maybe_lookup_ctx (stmt);
6529 lower_omp_for (gsi_p, ctx);
6531 case GIMPLE_OMP_SECTIONS:
6532 ctx = maybe_lookup_ctx (stmt);
6534 lower_omp_sections (gsi_p, ctx);
6536 case GIMPLE_OMP_SINGLE:
6537 ctx = maybe_lookup_ctx (stmt);
6539 lower_omp_single (gsi_p, ctx);
6541 case GIMPLE_OMP_MASTER:
6542 ctx = maybe_lookup_ctx (stmt);
6544 lower_omp_master (gsi_p, ctx);
6546 case GIMPLE_OMP_ORDERED:
6547 ctx = maybe_lookup_ctx (stmt);
6549 lower_omp_ordered (gsi_p, ctx);
6551 case GIMPLE_OMP_CRITICAL:
6552 ctx = maybe_lookup_ctx (stmt);
6554 lower_omp_critical (gsi_p, ctx);
6556 case GIMPLE_OMP_ATOMIC_LOAD:
6557 if ((ctx || task_shared_vars)
6558 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6559 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6560 gimple_regimplify_operands (stmt, gsi_p);
6563 if ((ctx || task_shared_vars)
6564 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6566 gimple_regimplify_operands (stmt, gsi_p);
6572 lower_omp (gimple_seq body, omp_context *ctx)
6574 location_t saved_location = input_location;
6575 gimple_stmt_iterator gsi = gsi_start (body);
6576 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6577 lower_omp_1 (&gsi, ctx);
6578 input_location = saved_location;
6581 /* Main entry point. */
6584 execute_lower_omp (void)
6588 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6589 delete_omp_context);
6591 body = gimple_body (current_function_decl);
6592 scan_omp (body, NULL);
6593 gcc_assert (taskreg_nesting_level == 0);
6595 if (all_contexts->root)
6597 struct gimplify_ctx gctx;
6599 if (task_shared_vars)
6600 push_gimplify_context (&gctx);
6601 lower_omp (body, NULL);
6602 if (task_shared_vars)
6603 pop_gimplify_context (NULL);
6608 splay_tree_delete (all_contexts);
6609 all_contexts = NULL;
6611 BITMAP_FREE (task_shared_vars);
6616 gate_lower_omp (void)
6618 return flag_openmp != 0;
6621 struct gimple_opt_pass pass_lower_omp =
6625 "omplower", /* name */
6626 gate_lower_omp, /* gate */
6627 execute_lower_omp, /* execute */
6630 0, /* static_pass_number */
6632 PROP_gimple_any, /* properties_required */
6633 PROP_gimple_lomp, /* properties_provided */
6634 0, /* properties_destroyed */
6635 0, /* todo_flags_start */
6636 TODO_dump_func /* todo_flags_finish */
6640 /* The following is a utility to diagnose OpenMP structured block violations.
6641 It is not part of the "omplower" pass, as that's invoked too late. It
6642 should be invoked by the respective front ends after gimplification. */
6644 static splay_tree all_labels;
6646 /* Check for mismatched contexts and generate an error if needed. Return
6647 true if an error is detected. */
6650 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6651 gimple branch_ctx, gimple label_ctx)
6653 if (label_ctx == branch_ctx)
6658 Previously we kept track of the label's entire context in diagnose_sb_[12]
6659 so we could traverse it and issue a correct "exit" or "enter" error
6660 message upon a structured block violation.
6662 We built the context by building a list with tree_cons'ing, but there is
6663 no easy counterpart in gimple tuples. It seems like far too much work
6664 for issuing exit/enter error messages. If someone really misses the
6665 distinct error message... patches welcome.
6669 /* Try to avoid confusing the user by producing and error message
6670 with correct "exit" or "enter" verbiage. We prefer "exit"
6671 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6672 if (branch_ctx == NULL)
6678 if (TREE_VALUE (label_ctx) == branch_ctx)
6683 label_ctx = TREE_CHAIN (label_ctx);
6688 error ("invalid exit from OpenMP structured block");
6690 error ("invalid entry to OpenMP structured block");
6693 /* If it's obvious we have an invalid entry, be specific about the error. */
6694 if (branch_ctx == NULL)
6695 error ("invalid entry to OpenMP structured block");
6697 /* Otherwise, be vague and lazy, but efficient. */
6698 error ("invalid branch to/from an OpenMP structured block");
6700 gsi_replace (gsi_p, gimple_build_nop (), false);
6704 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6705 where each label is found. */
6708 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6709 struct walk_stmt_info *wi)
6711 gimple context = (gimple) wi->info;
6712 gimple inner_context;
6713 gimple stmt = gsi_stmt (*gsi_p);
6715 *handled_ops_p = true;
6717 switch (gimple_code (stmt))
6721 case GIMPLE_OMP_PARALLEL:
6722 case GIMPLE_OMP_TASK:
6723 case GIMPLE_OMP_SECTIONS:
6724 case GIMPLE_OMP_SINGLE:
6725 case GIMPLE_OMP_SECTION:
6726 case GIMPLE_OMP_MASTER:
6727 case GIMPLE_OMP_ORDERED:
6728 case GIMPLE_OMP_CRITICAL:
6729 /* The minimal context here is just the current OMP construct. */
6730 inner_context = stmt;
6731 wi->info = inner_context;
6732 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6736 case GIMPLE_OMP_FOR:
6737 inner_context = stmt;
6738 wi->info = inner_context;
6739 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6741 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6742 diagnose_sb_1, NULL, wi);
6743 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6748 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6749 (splay_tree_value) context);
6759 /* Pass 2: Check each branch and see if its context differs from that of
6760 the destination label's context. */
6763 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6764 struct walk_stmt_info *wi)
6766 gimple context = (gimple) wi->info;
6768 gimple stmt = gsi_stmt (*gsi_p);
6770 *handled_ops_p = true;
6772 switch (gimple_code (stmt))
6776 case GIMPLE_OMP_PARALLEL:
6777 case GIMPLE_OMP_TASK:
6778 case GIMPLE_OMP_SECTIONS:
6779 case GIMPLE_OMP_SINGLE:
6780 case GIMPLE_OMP_SECTION:
6781 case GIMPLE_OMP_MASTER:
6782 case GIMPLE_OMP_ORDERED:
6783 case GIMPLE_OMP_CRITICAL:
6785 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6789 case GIMPLE_OMP_FOR:
6791 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6793 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6794 diagnose_sb_2, NULL, wi);
6795 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6801 tree lab = gimple_cond_true_label (stmt);
6804 n = splay_tree_lookup (all_labels,
6805 (splay_tree_key) lab);
6806 diagnose_sb_0 (gsi_p, context,
6807 n ? (gimple) n->value : NULL);
6809 lab = gimple_cond_false_label (stmt);
6812 n = splay_tree_lookup (all_labels,
6813 (splay_tree_key) lab);
6814 diagnose_sb_0 (gsi_p, context,
6815 n ? (gimple) n->value : NULL);
6822 tree lab = gimple_goto_dest (stmt);
6823 if (TREE_CODE (lab) != LABEL_DECL)
6826 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6827 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6834 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6836 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6837 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6838 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6845 diagnose_sb_0 (gsi_p, context, NULL);
6856 diagnose_omp_structured_block_errors (tree fndecl)
6858 tree save_current = current_function_decl;
6859 struct walk_stmt_info wi;
6860 struct function *old_cfun = cfun;
6861 gimple_seq body = gimple_body (fndecl);
6863 current_function_decl = fndecl;
6864 set_cfun (DECL_STRUCT_FUNCTION (fndecl));
6866 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6868 memset (&wi, 0, sizeof (wi));
6869 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6871 memset (&wi, 0, sizeof (wi));
6872 wi.want_locations = true;
6873 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6875 splay_tree_delete (all_labels);
6878 set_cfun (old_cfun);
6879 current_function_decl = save_current;
6882 #include "gt-omp-low.h"