Update gcc-50 to SVN version 221423
[dragonfly.git] / contrib / gcc-5.0 / gcc / omp-low.c
CommitLineData
dda118e3
JM
1/* Lowering pass for OMP directives. Converts OMP directives into explicit
2 calls to the runtime library (libgomp), data marshalling to implement data
3 sharing and copying clauses, offloading to accelerators, and more.
4
5 Contributed by Diego Novillo <dnovillo@redhat.com>
6
7 Copyright (C) 2005-2015 Free Software Foundation, Inc.
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
13Software Foundation; either version 3, or (at your option) any later
14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
24
25#include "config.h"
26#include "system.h"
27#include "coretypes.h"
28#include "tm.h"
29#include "hash-set.h"
30#include "machmode.h"
31#include "vec.h"
32#include "double-int.h"
33#include "input.h"
34#include "alias.h"
35#include "symtab.h"
36#include "wide-int.h"
37#include "inchash.h"
38#include "tree.h"
39#include "fold-const.h"
40#include "stringpool.h"
41#include "stor-layout.h"
42#include "rtl.h"
43#include "predict.h"
44#include "hard-reg-set.h"
45#include "function.h"
46#include "dominance.h"
47#include "cfg.h"
48#include "cfganal.h"
49#include "basic-block.h"
50#include "tree-ssa-alias.h"
51#include "internal-fn.h"
52#include "gimple-fold.h"
53#include "gimple-expr.h"
54#include "is-a.h"
55#include "gimple.h"
56#include "gimplify.h"
57#include "gimple-iterator.h"
58#include "gimplify-me.h"
59#include "gimple-walk.h"
60#include "tree-iterator.h"
61#include "tree-inline.h"
62#include "langhooks.h"
63#include "diagnostic-core.h"
64#include "gimple-ssa.h"
65#include "hash-map.h"
66#include "plugin-api.h"
67#include "ipa-ref.h"
68#include "cgraph.h"
69#include "tree-cfg.h"
70#include "tree-phinodes.h"
71#include "ssa-iterators.h"
72#include "tree-ssanames.h"
73#include "tree-into-ssa.h"
74#include "hashtab.h"
75#include "flags.h"
76#include "statistics.h"
77#include "real.h"
78#include "fixed-value.h"
79#include "insn-config.h"
80#include "expmed.h"
81#include "dojump.h"
82#include "explow.h"
83#include "calls.h"
84#include "emit-rtl.h"
85#include "varasm.h"
86#include "stmt.h"
87#include "expr.h"
88#include "tree-dfa.h"
89#include "tree-ssa.h"
90#include "tree-pass.h"
91#include "except.h"
92#include "splay-tree.h"
93#include "insn-codes.h"
94#include "optabs.h"
95#include "cfgloop.h"
96#include "target.h"
97#include "common/common-target.h"
98#include "omp-low.h"
99#include "gimple-low.h"
100#include "tree-cfgcleanup.h"
101#include "pretty-print.h"
102#include "alloc-pool.h"
103#include "symbol-summary.h"
104#include "ipa-prop.h"
105#include "tree-nested.h"
106#include "tree-eh.h"
107#include "cilk.h"
108#include "context.h"
109#include "lto-section-names.h"
110#include "gomp-constants.h"
111
112
113/* Lowering of OMP parallel and workshare constructs proceeds in two
114 phases. The first phase scans the function looking for OMP statements
115 and then for variables that must be replaced to satisfy data sharing
116 clauses. The second phase expands code for the constructs, as well as
117 re-gimplifying things when variables have been replaced with complex
118 expressions.
119
120 Final code generation is done by pass_expand_omp. The flowgraph is
121 scanned for regions which are then moved to a new
122 function, to be invoked by the thread library, or offloaded. */
123
124/* OMP region information. Every parallel and workshare
125 directive is enclosed between two markers, the OMP_* directive
126 and a corresponding OMP_RETURN statement. */
127
128struct omp_region
129{
130 /* The enclosing region. */
131 struct omp_region *outer;
132
133 /* First child region. */
134 struct omp_region *inner;
135
136 /* Next peer region. */
137 struct omp_region *next;
138
139 /* Block containing the omp directive as its last stmt. */
140 basic_block entry;
141
142 /* Block containing the OMP_RETURN as its last stmt. */
143 basic_block exit;
144
145 /* Block containing the OMP_CONTINUE as its last stmt. */
146 basic_block cont;
147
148 /* If this is a combined parallel+workshare region, this is a list
149 of additional arguments needed by the combined parallel+workshare
150 library call. */
151 vec<tree, va_gc> *ws_args;
152
153 /* The code for the omp directive of this region. */
154 enum gimple_code type;
155
156 /* Schedule kind, only used for OMP_FOR type regions. */
157 enum omp_clause_schedule_kind sched_kind;
158
159 /* True if this is a combined parallel+workshare region. */
160 bool is_combined_parallel;
161};
162
163/* Levels of parallelism as defined by OpenACC. Increasing numbers
164 correspond to deeper loop nesting levels. */
165#define MASK_GANG 1
166#define MASK_WORKER 2
167#define MASK_VECTOR 4
168
169/* Context structure. Used to store information about each parallel
170 directive in the code. */
171
172typedef struct omp_context
173{
174 /* This field must be at the beginning, as we do "inheritance": Some
175 callback functions for tree-inline.c (e.g., omp_copy_decl)
176 receive a copy_body_data pointer that is up-casted to an
177 omp_context pointer. */
178 copy_body_data cb;
179
180 /* The tree of contexts corresponding to the encountered constructs. */
181 struct omp_context *outer;
182 gimple stmt;
183
184 /* Map variables to fields in a structure that allows communication
185 between sending and receiving threads. */
186 splay_tree field_map;
187 tree record_type;
188 tree sender_decl;
189 tree receiver_decl;
190
191 /* These are used just by task contexts, if task firstprivate fn is
192 needed. srecord_type is used to communicate from the thread
193 that encountered the task construct to task firstprivate fn,
194 record_type is allocated by GOMP_task, initialized by task firstprivate
195 fn and passed to the task body fn. */
196 splay_tree sfield_map;
197 tree srecord_type;
198
199 /* A chain of variables to add to the top-level block surrounding the
200 construct. In the case of a parallel, this is in the child function. */
201 tree block_vars;
202
203 /* A map of reduction pointer variables. For accelerators, each
204 reduction variable is replaced with an array. Each thread, in turn,
205 is assigned to a slot on that array. */
206 splay_tree reduction_map;
207
208 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
209 barriers should jump to during omplower pass. */
210 tree cancel_label;
211
212 /* What to do with variables with implicitly determined sharing
213 attributes. */
214 enum omp_clause_default_kind default_kind;
215
216 /* Nesting depth of this context. Used to beautify error messages re
217 invalid gotos. The outermost ctx is depth 1, with depth 0 being
218 reserved for the main body of the function. */
219 int depth;
220
221 /* True if this parallel directive is nested within another. */
222 bool is_nested;
223
224 /* True if this construct can be cancelled. */
225 bool cancellable;
226
227 /* For OpenACC loops, a mask of gang, worker and vector used at
228 levels below this one. */
229 int gwv_below;
230 /* For OpenACC loops, a mask of gang, worker and vector used at
231 this level and above. For parallel and kernels clauses, a mask
232 indicating which of num_gangs/num_workers/num_vectors was used. */
233 int gwv_this;
234} omp_context;
235
236/* A structure holding the elements of:
237 for (V = N1; V cond N2; V += STEP) [...] */
238
239struct omp_for_data_loop
240{
241 tree v, n1, n2, step;
242 enum tree_code cond_code;
243};
244
245/* A structure describing the main elements of a parallel loop. */
246
247struct omp_for_data
248{
249 struct omp_for_data_loop loop;
250 tree chunk_size;
251 gomp_for *for_stmt;
252 tree pre, iter_type;
253 int collapse;
254 bool have_nowait, have_ordered;
255 enum omp_clause_schedule_kind sched_kind;
256 struct omp_for_data_loop *loops;
257};
258
259
260static splay_tree all_contexts;
261static int taskreg_nesting_level;
262static int target_nesting_level;
263static struct omp_region *root_omp_region;
264static bitmap task_shared_vars;
265static vec<omp_context *> taskreg_contexts;
266
267static void scan_omp (gimple_seq *, omp_context *);
268static tree scan_omp_1_op (tree *, int *, void *);
269
270#define WALK_SUBSTMTS \
271 case GIMPLE_BIND: \
272 case GIMPLE_TRY: \
273 case GIMPLE_CATCH: \
274 case GIMPLE_EH_FILTER: \
275 case GIMPLE_TRANSACTION: \
276 /* The sub-statements for these should be walked. */ \
277 *handled_ops_p = false; \
278 break;
279
280/* Helper function to get the name of the array containing the partial
281 reductions for OpenACC reductions. */
282static const char *
283oacc_get_reduction_array_id (tree node)
284{
285 const char *id = IDENTIFIER_POINTER (DECL_NAME (node));
286 int len = strlen ("OACC") + strlen (id);
287 char *temp_name = XALLOCAVEC (char, len + 1);
288 snprintf (temp_name, len + 1, "OACC%s", id);
289 return IDENTIFIER_POINTER (get_identifier (temp_name));
290}
291
292/* Determine the number of threads OpenACC threads used to determine the
293 size of the array of partial reductions. Currently, this is num_gangs
294 * vector_length. This value may be different than GOACC_GET_NUM_THREADS,
295 because it is independed of the device used. */
296
297static tree
298oacc_max_threads (omp_context *ctx)
299{
300 tree nthreads, vector_length, gangs, clauses;
301
302 gangs = fold_convert (sizetype, integer_one_node);
303 vector_length = gangs;
304
305 /* The reduction clause may be nested inside a loop directive.
306 Scan for the innermost vector_length clause. */
307 for (omp_context *oc = ctx; oc; oc = oc->outer)
308 {
309 if (gimple_code (oc->stmt) != GIMPLE_OMP_TARGET
310 || (gimple_omp_target_kind (oc->stmt)
311 != GF_OMP_TARGET_KIND_OACC_PARALLEL))
312 continue;
313
314 clauses = gimple_omp_target_clauses (oc->stmt);
315
316 vector_length = find_omp_clause (clauses, OMP_CLAUSE_VECTOR_LENGTH);
317 if (vector_length)
318 vector_length = fold_convert_loc (OMP_CLAUSE_LOCATION (vector_length),
319 sizetype,
320 OMP_CLAUSE_VECTOR_LENGTH_EXPR
321 (vector_length));
322 else
323 vector_length = fold_convert (sizetype, integer_one_node);
324
325 gangs = find_omp_clause (clauses, OMP_CLAUSE_NUM_GANGS);
326 if (gangs)
327 gangs = fold_convert_loc (OMP_CLAUSE_LOCATION (gangs), sizetype,
328 OMP_CLAUSE_NUM_GANGS_EXPR (gangs));
329 else
330 gangs = fold_convert (sizetype, integer_one_node);
331
332 break;
333 }
334
335 nthreads = fold_build2 (MULT_EXPR, sizetype, gangs, vector_length);
336
337 return nthreads;
338}
339
340/* Holds offload tables with decls. */
341vec<tree, va_gc> *offload_funcs, *offload_vars;
342
343/* Convenience function for calling scan_omp_1_op on tree operands. */
344
345static inline tree
346scan_omp_op (tree *tp, omp_context *ctx)
347{
348 struct walk_stmt_info wi;
349
350 memset (&wi, 0, sizeof (wi));
351 wi.info = ctx;
352 wi.want_locations = true;
353
354 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
355}
356
357static void lower_omp (gimple_seq *, omp_context *);
358static tree lookup_decl_in_outer_ctx (tree, omp_context *);
359static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
360
361/* Find an OMP clause of type KIND within CLAUSES. */
362
363tree
364find_omp_clause (tree clauses, enum omp_clause_code kind)
365{
366 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
367 if (OMP_CLAUSE_CODE (clauses) == kind)
368 return clauses;
369
370 return NULL_TREE;
371}
372
373/* Return true if CTX is for an omp parallel. */
374
375static inline bool
376is_parallel_ctx (omp_context *ctx)
377{
378 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
379}
380
381
382/* Return true if CTX is for an omp task. */
383
384static inline bool
385is_task_ctx (omp_context *ctx)
386{
387 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
388}
389
390
391/* Return true if CTX is for an omp parallel or omp task. */
392
393static inline bool
394is_taskreg_ctx (omp_context *ctx)
395{
396 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
397 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
398}
399
400
401/* Return true if REGION is a combined parallel+workshare region. */
402
403static inline bool
404is_combined_parallel (struct omp_region *region)
405{
406 return region->is_combined_parallel;
407}
408
409
410/* Extract the header elements of parallel loop FOR_STMT and store
411 them into *FD. */
412
413static void
414extract_omp_for_data (gomp_for *for_stmt, struct omp_for_data *fd,
415 struct omp_for_data_loop *loops)
416{
417 tree t, var, *collapse_iter, *collapse_count;
418 tree count = NULL_TREE, iter_type = long_integer_type_node;
419 struct omp_for_data_loop *loop;
420 int i;
421 struct omp_for_data_loop dummy_loop;
422 location_t loc = gimple_location (for_stmt);
423 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
424 bool distribute = gimple_omp_for_kind (for_stmt)
425 == GF_OMP_FOR_KIND_DISTRIBUTE;
426
427 fd->for_stmt = for_stmt;
428 fd->pre = NULL;
429 fd->collapse = gimple_omp_for_collapse (for_stmt);
430 if (fd->collapse > 1)
431 fd->loops = loops;
432 else
433 fd->loops = &fd->loop;
434
435 fd->have_nowait = distribute || simd;
436 fd->have_ordered = false;
437 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
438 fd->chunk_size = NULL_TREE;
439 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
440 fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR;
441 collapse_iter = NULL;
442 collapse_count = NULL;
443
444 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
445 switch (OMP_CLAUSE_CODE (t))
446 {
447 case OMP_CLAUSE_NOWAIT:
448 fd->have_nowait = true;
449 break;
450 case OMP_CLAUSE_ORDERED:
451 fd->have_ordered = true;
452 break;
453 case OMP_CLAUSE_SCHEDULE:
454 gcc_assert (!distribute);
455 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
456 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
457 break;
458 case OMP_CLAUSE_DIST_SCHEDULE:
459 gcc_assert (distribute);
460 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
461 break;
462 case OMP_CLAUSE_COLLAPSE:
463 if (fd->collapse > 1)
464 {
465 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
466 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
467 }
468 break;
469 default:
470 break;
471 }
472
473 /* FIXME: for now map schedule(auto) to schedule(static).
474 There should be analysis to determine whether all iterations
475 are approximately the same amount of work (then schedule(static)
476 is best) or if it varies (then schedule(dynamic,N) is better). */
477 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
478 {
479 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
480 gcc_assert (fd->chunk_size == NULL);
481 }
482 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
483 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
484 gcc_assert (fd->chunk_size == NULL);
485 else if (fd->chunk_size == NULL)
486 {
487 /* We only need to compute a default chunk size for ordered
488 static loops and dynamic loops. */
489 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
490 || fd->have_ordered)
491 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
492 ? integer_zero_node : integer_one_node;
493 }
494
495 for (i = 0; i < fd->collapse; i++)
496 {
497 if (fd->collapse == 1)
498 loop = &fd->loop;
499 else if (loops != NULL)
500 loop = loops + i;
501 else
502 loop = &dummy_loop;
503
504 loop->v = gimple_omp_for_index (for_stmt, i);
505 gcc_assert (SSA_VAR_P (loop->v));
506 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
507 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
508 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
509 loop->n1 = gimple_omp_for_initial (for_stmt, i);
510
511 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
512 loop->n2 = gimple_omp_for_final (for_stmt, i);
513 switch (loop->cond_code)
514 {
515 case LT_EXPR:
516 case GT_EXPR:
517 break;
518 case NE_EXPR:
519 gcc_assert (gimple_omp_for_kind (for_stmt)
520 == GF_OMP_FOR_KIND_CILKSIMD
521 || (gimple_omp_for_kind (for_stmt)
522 == GF_OMP_FOR_KIND_CILKFOR));
523 break;
524 case LE_EXPR:
525 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
526 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
527 else
528 loop->n2 = fold_build2_loc (loc,
529 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
530 build_int_cst (TREE_TYPE (loop->n2), 1));
531 loop->cond_code = LT_EXPR;
532 break;
533 case GE_EXPR:
534 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
535 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
536 else
537 loop->n2 = fold_build2_loc (loc,
538 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
539 build_int_cst (TREE_TYPE (loop->n2), 1));
540 loop->cond_code = GT_EXPR;
541 break;
542 default:
543 gcc_unreachable ();
544 }
545
546 t = gimple_omp_for_incr (for_stmt, i);
547 gcc_assert (TREE_OPERAND (t, 0) == var);
548 switch (TREE_CODE (t))
549 {
550 case PLUS_EXPR:
551 loop->step = TREE_OPERAND (t, 1);
552 break;
553 case POINTER_PLUS_EXPR:
554 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
555 break;
556 case MINUS_EXPR:
557 loop->step = TREE_OPERAND (t, 1);
558 loop->step = fold_build1_loc (loc,
559 NEGATE_EXPR, TREE_TYPE (loop->step),
560 loop->step);
561 break;
562 default:
563 gcc_unreachable ();
564 }
565
566 if (simd
567 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
568 && !fd->have_ordered))
569 {
570 if (fd->collapse == 1)
571 iter_type = TREE_TYPE (loop->v);
572 else if (i == 0
573 || TYPE_PRECISION (iter_type)
574 < TYPE_PRECISION (TREE_TYPE (loop->v)))
575 iter_type
576 = build_nonstandard_integer_type
577 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
578 }
579 else if (iter_type != long_long_unsigned_type_node)
580 {
581 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
582 iter_type = long_long_unsigned_type_node;
583 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
584 && TYPE_PRECISION (TREE_TYPE (loop->v))
585 >= TYPE_PRECISION (iter_type))
586 {
587 tree n;
588
589 if (loop->cond_code == LT_EXPR)
590 n = fold_build2_loc (loc,
591 PLUS_EXPR, TREE_TYPE (loop->v),
592 loop->n2, loop->step);
593 else
594 n = loop->n1;
595 if (TREE_CODE (n) != INTEGER_CST
596 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
597 iter_type = long_long_unsigned_type_node;
598 }
599 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
600 > TYPE_PRECISION (iter_type))
601 {
602 tree n1, n2;
603
604 if (loop->cond_code == LT_EXPR)
605 {
606 n1 = loop->n1;
607 n2 = fold_build2_loc (loc,
608 PLUS_EXPR, TREE_TYPE (loop->v),
609 loop->n2, loop->step);
610 }
611 else
612 {
613 n1 = fold_build2_loc (loc,
614 MINUS_EXPR, TREE_TYPE (loop->v),
615 loop->n2, loop->step);
616 n2 = loop->n1;
617 }
618 if (TREE_CODE (n1) != INTEGER_CST
619 || TREE_CODE (n2) != INTEGER_CST
620 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
621 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
622 iter_type = long_long_unsigned_type_node;
623 }
624 }
625
626 if (collapse_count && *collapse_count == NULL)
627 {
628 t = fold_binary (loop->cond_code, boolean_type_node,
629 fold_convert (TREE_TYPE (loop->v), loop->n1),
630 fold_convert (TREE_TYPE (loop->v), loop->n2));
631 if (t && integer_zerop (t))
632 count = build_zero_cst (long_long_unsigned_type_node);
633 else if ((i == 0 || count != NULL_TREE)
634 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
635 && TREE_CONSTANT (loop->n1)
636 && TREE_CONSTANT (loop->n2)
637 && TREE_CODE (loop->step) == INTEGER_CST)
638 {
639 tree itype = TREE_TYPE (loop->v);
640
641 if (POINTER_TYPE_P (itype))
642 itype = signed_type_for (itype);
643 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
644 t = fold_build2_loc (loc,
645 PLUS_EXPR, itype,
646 fold_convert_loc (loc, itype, loop->step), t);
647 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
648 fold_convert_loc (loc, itype, loop->n2));
649 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
650 fold_convert_loc (loc, itype, loop->n1));
651 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
652 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
653 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
654 fold_build1_loc (loc, NEGATE_EXPR, itype,
655 fold_convert_loc (loc, itype,
656 loop->step)));
657 else
658 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
659 fold_convert_loc (loc, itype, loop->step));
660 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
661 if (count != NULL_TREE)
662 count = fold_build2_loc (loc,
663 MULT_EXPR, long_long_unsigned_type_node,
664 count, t);
665 else
666 count = t;
667 if (TREE_CODE (count) != INTEGER_CST)
668 count = NULL_TREE;
669 }
670 else if (count && !integer_zerop (count))
671 count = NULL_TREE;
672 }
673 }
674
675 if (count
676 && !simd
677 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
678 || fd->have_ordered))
679 {
680 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
681 iter_type = long_long_unsigned_type_node;
682 else
683 iter_type = long_integer_type_node;
684 }
685 else if (collapse_iter && *collapse_iter != NULL)
686 iter_type = TREE_TYPE (*collapse_iter);
687 fd->iter_type = iter_type;
688 if (collapse_iter && *collapse_iter == NULL)
689 *collapse_iter = create_tmp_var (iter_type, ".iter");
690 if (collapse_count && *collapse_count == NULL)
691 {
692 if (count)
693 *collapse_count = fold_convert_loc (loc, iter_type, count);
694 else
695 *collapse_count = create_tmp_var (iter_type, ".count");
696 }
697
698 if (fd->collapse > 1)
699 {
700 fd->loop.v = *collapse_iter;
701 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
702 fd->loop.n2 = *collapse_count;
703 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
704 fd->loop.cond_code = LT_EXPR;
705 }
706
707 /* For OpenACC loops, force a chunk size of one, as this avoids the default
708 scheduling where several subsequent iterations are being executed by the
709 same thread. */
710 if (gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
711 {
712 gcc_assert (fd->chunk_size == NULL_TREE);
713 fd->chunk_size = build_int_cst (TREE_TYPE (fd->loop.v), 1);
714 }
715}
716
717
718/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
719 is the immediate dominator of PAR_ENTRY_BB, return true if there
720 are no data dependencies that would prevent expanding the parallel
721 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
722
723 When expanding a combined parallel+workshare region, the call to
724 the child function may need additional arguments in the case of
725 GIMPLE_OMP_FOR regions. In some cases, these arguments are
726 computed out of variables passed in from the parent to the child
727 via 'struct .omp_data_s'. For instance:
728
729 #pragma omp parallel for schedule (guided, i * 4)
730 for (j ...)
731
732 Is lowered into:
733
734 # BLOCK 2 (PAR_ENTRY_BB)
735 .omp_data_o.i = i;
736 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
737
738 # BLOCK 3 (WS_ENTRY_BB)
739 .omp_data_i = &.omp_data_o;
740 D.1667 = .omp_data_i->i;
741 D.1598 = D.1667 * 4;
742 #pragma omp for schedule (guided, D.1598)
743
744 When we outline the parallel region, the call to the child function
745 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
746 that value is computed *after* the call site. So, in principle we
747 cannot do the transformation.
748
749 To see whether the code in WS_ENTRY_BB blocks the combined
750 parallel+workshare call, we collect all the variables used in the
751 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
752 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
753 call.
754
755 FIXME. If we had the SSA form built at this point, we could merely
756 hoist the code in block 3 into block 2 and be done with it. But at
757 this point we don't have dataflow information and though we could
758 hack something up here, it is really not worth the aggravation. */
759
760static bool
761workshare_safe_to_combine_p (basic_block ws_entry_bb)
762{
763 struct omp_for_data fd;
764 gimple ws_stmt = last_stmt (ws_entry_bb);
765
766 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
767 return true;
768
769 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
770
771 extract_omp_for_data (as_a <gomp_for *> (ws_stmt), &fd, NULL);
772
773 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
774 return false;
775 if (fd.iter_type != long_integer_type_node)
776 return false;
777
778 /* FIXME. We give up too easily here. If any of these arguments
779 are not constants, they will likely involve variables that have
780 been mapped into fields of .omp_data_s for sharing with the child
781 function. With appropriate data flow, it would be possible to
782 see through this. */
783 if (!is_gimple_min_invariant (fd.loop.n1)
784 || !is_gimple_min_invariant (fd.loop.n2)
785 || !is_gimple_min_invariant (fd.loop.step)
786 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
787 return false;
788
789 return true;
790}
791
792
793/* Collect additional arguments needed to emit a combined
794 parallel+workshare call. WS_STMT is the workshare directive being
795 expanded. */
796
797static vec<tree, va_gc> *
798get_ws_args_for (gimple par_stmt, gimple ws_stmt)
799{
800 tree t;
801 location_t loc = gimple_location (ws_stmt);
802 vec<tree, va_gc> *ws_args;
803
804 if (gomp_for *for_stmt = dyn_cast <gomp_for *> (ws_stmt))
805 {
806 struct omp_for_data fd;
807 tree n1, n2;
808
809 extract_omp_for_data (for_stmt, &fd, NULL);
810 n1 = fd.loop.n1;
811 n2 = fd.loop.n2;
812
813 if (gimple_omp_for_combined_into_p (for_stmt))
814 {
815 tree innerc
816 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
817 OMP_CLAUSE__LOOPTEMP_);
818 gcc_assert (innerc);
819 n1 = OMP_CLAUSE_DECL (innerc);
820 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
821 OMP_CLAUSE__LOOPTEMP_);
822 gcc_assert (innerc);
823 n2 = OMP_CLAUSE_DECL (innerc);
824 }
825
826 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
827
828 t = fold_convert_loc (loc, long_integer_type_node, n1);
829 ws_args->quick_push (t);
830
831 t = fold_convert_loc (loc, long_integer_type_node, n2);
832 ws_args->quick_push (t);
833
834 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
835 ws_args->quick_push (t);
836
837 if (fd.chunk_size)
838 {
839 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
840 ws_args->quick_push (t);
841 }
842
843 return ws_args;
844 }
845 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
846 {
847 /* Number of sections is equal to the number of edges from the
848 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
849 the exit of the sections region. */
850 basic_block bb = single_succ (gimple_bb (ws_stmt));
851 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
852 vec_alloc (ws_args, 1);
853 ws_args->quick_push (t);
854 return ws_args;
855 }
856
857 gcc_unreachable ();
858}
859
860
861/* Discover whether REGION is a combined parallel+workshare region. */
862
863static void
864determine_parallel_type (struct omp_region *region)
865{
866 basic_block par_entry_bb, par_exit_bb;
867 basic_block ws_entry_bb, ws_exit_bb;
868
869 if (region == NULL || region->inner == NULL
870 || region->exit == NULL || region->inner->exit == NULL
871 || region->inner->cont == NULL)
872 return;
873
874 /* We only support parallel+for and parallel+sections. */
875 if (region->type != GIMPLE_OMP_PARALLEL
876 || (region->inner->type != GIMPLE_OMP_FOR
877 && region->inner->type != GIMPLE_OMP_SECTIONS))
878 return;
879
880 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
881 WS_EXIT_BB -> PAR_EXIT_BB. */
882 par_entry_bb = region->entry;
883 par_exit_bb = region->exit;
884 ws_entry_bb = region->inner->entry;
885 ws_exit_bb = region->inner->exit;
886
887 if (single_succ (par_entry_bb) == ws_entry_bb
888 && single_succ (ws_exit_bb) == par_exit_bb
889 && workshare_safe_to_combine_p (ws_entry_bb)
890 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
891 || (last_and_only_stmt (ws_entry_bb)
892 && last_and_only_stmt (par_exit_bb))))
893 {
894 gimple par_stmt = last_stmt (par_entry_bb);
895 gimple ws_stmt = last_stmt (ws_entry_bb);
896
897 if (region->inner->type == GIMPLE_OMP_FOR)
898 {
899 /* If this is a combined parallel loop, we need to determine
900 whether or not to use the combined library calls. There
901 are two cases where we do not apply the transformation:
902 static loops and any kind of ordered loop. In the first
903 case, we already open code the loop so there is no need
904 to do anything else. In the latter case, the combined
905 parallel loop call would still need extra synchronization
906 to implement ordered semantics, so there would not be any
907 gain in using the combined call. */
908 tree clauses = gimple_omp_for_clauses (ws_stmt);
909 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
910 if (c == NULL
911 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
912 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
913 {
914 region->is_combined_parallel = false;
915 region->inner->is_combined_parallel = false;
916 return;
917 }
918 }
919
920 region->is_combined_parallel = true;
921 region->inner->is_combined_parallel = true;
922 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
923 }
924}
925
926
927/* Return true if EXPR is variable sized. */
928
929static inline bool
930is_variable_sized (const_tree expr)
931{
932 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
933}
934
935/* Return true if DECL is a reference type. */
936
937static inline bool
938is_reference (tree decl)
939{
940 return lang_hooks.decls.omp_privatize_by_reference (decl);
941}
942
943/* Return the type of a decl. If the decl is reference type,
944 return its base type. */
945static inline tree
946get_base_type (tree decl)
947{
948 tree type = TREE_TYPE (decl);
949 if (is_reference (decl))
950 type = TREE_TYPE (type);
951 return type;
952}
953
954/* Lookup variables. The "maybe" form
955 allows for the variable form to not have been entered, otherwise we
956 assert that the variable must have been entered. */
957
958static inline tree
959lookup_decl (tree var, omp_context *ctx)
960{
961 tree *n = ctx->cb.decl_map->get (var);
962 return *n;
963}
964
965static inline tree
966maybe_lookup_decl (const_tree var, omp_context *ctx)
967{
968 tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
969 return n ? *n : NULL_TREE;
970}
971
972static inline tree
973lookup_field (tree var, omp_context *ctx)
974{
975 splay_tree_node n;
976 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
977 return (tree) n->value;
978}
979
980static inline tree
981lookup_sfield (tree var, omp_context *ctx)
982{
983 splay_tree_node n;
984 n = splay_tree_lookup (ctx->sfield_map
985 ? ctx->sfield_map : ctx->field_map,
986 (splay_tree_key) var);
987 return (tree) n->value;
988}
989
990static inline tree
991maybe_lookup_field (tree var, omp_context *ctx)
992{
993 splay_tree_node n;
994 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
995 return n ? (tree) n->value : NULL_TREE;
996}
997
998static inline tree
999lookup_oacc_reduction (const char *id, omp_context *ctx)
1000{
1001 splay_tree_node n;
1002 n = splay_tree_lookup (ctx->reduction_map, (splay_tree_key) id);
1003 return (tree) n->value;
1004}
1005
1006static inline tree
1007maybe_lookup_oacc_reduction (tree var, omp_context *ctx)
1008{
1009 splay_tree_node n = NULL;
1010 if (ctx->reduction_map)
1011 n = splay_tree_lookup (ctx->reduction_map, (splay_tree_key) var);
1012 return n ? (tree) n->value : NULL_TREE;
1013}
1014
1015/* Return true if DECL should be copied by pointer. SHARED_CTX is
1016 the parallel context if DECL is to be shared. */
1017
1018static bool
1019use_pointer_for_field (tree decl, omp_context *shared_ctx)
1020{
1021 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
1022 return true;
1023
1024 /* We can only use copy-in/copy-out semantics for shared variables
1025 when we know the value is not accessible from an outer scope. */
1026 if (shared_ctx)
1027 {
1028 gcc_assert (!is_gimple_omp_oacc (shared_ctx->stmt));
1029
1030 /* ??? Trivially accessible from anywhere. But why would we even
1031 be passing an address in this case? Should we simply assert
1032 this to be false, or should we have a cleanup pass that removes
1033 these from the list of mappings? */
1034 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
1035 return true;
1036
1037 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
1038 without analyzing the expression whether or not its location
1039 is accessible to anyone else. In the case of nested parallel
1040 regions it certainly may be. */
1041 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
1042 return true;
1043
1044 /* Do not use copy-in/copy-out for variables that have their
1045 address taken. */
1046 if (TREE_ADDRESSABLE (decl))
1047 return true;
1048
1049 /* lower_send_shared_vars only uses copy-in, but not copy-out
1050 for these. */
1051 if (TREE_READONLY (decl)
1052 || ((TREE_CODE (decl) == RESULT_DECL
1053 || TREE_CODE (decl) == PARM_DECL)
1054 && DECL_BY_REFERENCE (decl)))
1055 return false;
1056
1057 /* Disallow copy-in/out in nested parallel if
1058 decl is shared in outer parallel, otherwise
1059 each thread could store the shared variable
1060 in its own copy-in location, making the
1061 variable no longer really shared. */
1062 if (shared_ctx->is_nested)
1063 {
1064 omp_context *up;
1065
1066 for (up = shared_ctx->outer; up; up = up->outer)
1067 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
1068 break;
1069
1070 if (up)
1071 {
1072 tree c;
1073
1074 for (c = gimple_omp_taskreg_clauses (up->stmt);
1075 c; c = OMP_CLAUSE_CHAIN (c))
1076 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
1077 && OMP_CLAUSE_DECL (c) == decl)
1078 break;
1079
1080 if (c)
1081 goto maybe_mark_addressable_and_ret;
1082 }
1083 }
1084
1085 /* For tasks avoid using copy-in/out. As tasks can be
1086 deferred or executed in different thread, when GOMP_task
1087 returns, the task hasn't necessarily terminated. */
1088 if (is_task_ctx (shared_ctx))
1089 {
1090 tree outer;
1091 maybe_mark_addressable_and_ret:
1092 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
1093 if (is_gimple_reg (outer))
1094 {
1095 /* Taking address of OUTER in lower_send_shared_vars
1096 might need regimplification of everything that uses the
1097 variable. */
1098 if (!task_shared_vars)
1099 task_shared_vars = BITMAP_ALLOC (NULL);
1100 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
1101 TREE_ADDRESSABLE (outer) = 1;
1102 }
1103 return true;
1104 }
1105 }
1106
1107 return false;
1108}
1109
1110/* Construct a new automatic decl similar to VAR. */
1111
1112static tree
1113omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
1114{
1115 tree copy = copy_var_decl (var, name, type);
1116
1117 DECL_CONTEXT (copy) = current_function_decl;
1118 DECL_CHAIN (copy) = ctx->block_vars;
1119 ctx->block_vars = copy;
1120
1121 return copy;
1122}
1123
1124static tree
1125omp_copy_decl_1 (tree var, omp_context *ctx)
1126{
1127 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
1128}
1129
1130/* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
1131 as appropriate. */
1132static tree
1133omp_build_component_ref (tree obj, tree field)
1134{
1135 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
1136 if (TREE_THIS_VOLATILE (field))
1137 TREE_THIS_VOLATILE (ret) |= 1;
1138 if (TREE_READONLY (field))
1139 TREE_READONLY (ret) |= 1;
1140 return ret;
1141}
1142
1143/* Build tree nodes to access the field for VAR on the receiver side. */
1144
1145static tree
1146build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
1147{
1148 tree x, field = lookup_field (var, ctx);
1149
1150 /* If the receiver record type was remapped in the child function,
1151 remap the field into the new record type. */
1152 x = maybe_lookup_field (field, ctx);
1153 if (x != NULL)
1154 field = x;
1155
1156 x = build_simple_mem_ref (ctx->receiver_decl);
1157 x = omp_build_component_ref (x, field);
1158 if (by_ref)
1159 x = build_simple_mem_ref (x);
1160
1161 return x;
1162}
1163
1164/* Build tree nodes to access VAR in the scope outer to CTX. In the case
1165 of a parallel, this is a component reference; for workshare constructs
1166 this is some variable. */
1167
1168static tree
1169build_outer_var_ref (tree var, omp_context *ctx)
1170{
1171 tree x;
1172
1173 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1174 x = var;
1175 else if (is_variable_sized (var))
1176 {
1177 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
1178 x = build_outer_var_ref (x, ctx);
1179 x = build_simple_mem_ref (x);
1180 }
1181 else if (is_taskreg_ctx (ctx))
1182 {
1183 bool by_ref = use_pointer_for_field (var, NULL);
1184 x = build_receiver_ref (var, by_ref, ctx);
1185 }
1186 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1187 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
1188 {
1189 /* #pragma omp simd isn't a worksharing construct, and can reference even
1190 private vars in its linear etc. clauses. */
1191 x = NULL_TREE;
1192 if (ctx->outer && is_taskreg_ctx (ctx))
1193 x = lookup_decl (var, ctx->outer);
1194 else if (ctx->outer)
1195 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
1196 if (x == NULL_TREE)
1197 x = var;
1198 }
1199 else if (ctx->outer)
1200 x = lookup_decl (var, ctx->outer);
1201 else if (is_reference (var))
1202 /* This can happen with orphaned constructs. If var is reference, it is
1203 possible it is shared and as such valid. */
1204 x = var;
1205 else
1206 gcc_unreachable ();
1207
1208 if (is_reference (var))
1209 x = build_simple_mem_ref (x);
1210
1211 return x;
1212}
1213
1214/* Build tree nodes to access the field for VAR on the sender side. */
1215
1216static tree
1217build_sender_ref (tree var, omp_context *ctx)
1218{
1219 tree field = lookup_sfield (var, ctx);
1220 return omp_build_component_ref (ctx->sender_decl, field);
1221}
1222
1223/* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1224
1225static void
1226install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1227{
1228 tree field, type, sfield = NULL_TREE;
1229
1230 gcc_assert ((mask & 1) == 0
1231 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
1232 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1233 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1234 gcc_assert ((mask & 3) == 3
1235 || !is_gimple_omp_oacc (ctx->stmt));
1236
1237 type = TREE_TYPE (var);
1238 if (mask & 4)
1239 {
1240 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1241 type = build_pointer_type (build_pointer_type (type));
1242 }
1243 else if (by_ref)
1244 type = build_pointer_type (type);
1245 else if ((mask & 3) == 1 && is_reference (var))
1246 type = TREE_TYPE (type);
1247
1248 field = build_decl (DECL_SOURCE_LOCATION (var),
1249 FIELD_DECL, DECL_NAME (var), type);
1250
1251 /* Remember what variable this field was created for. This does have a
1252 side effect of making dwarf2out ignore this member, so for helpful
1253 debugging we clear it later in delete_omp_context. */
1254 DECL_ABSTRACT_ORIGIN (field) = var;
1255 if (type == TREE_TYPE (var))
1256 {
1257 DECL_ALIGN (field) = DECL_ALIGN (var);
1258 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1259 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1260 }
1261 else
1262 DECL_ALIGN (field) = TYPE_ALIGN (type);
1263
1264 if ((mask & 3) == 3)
1265 {
1266 insert_field_into_struct (ctx->record_type, field);
1267 if (ctx->srecord_type)
1268 {
1269 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1270 FIELD_DECL, DECL_NAME (var), type);
1271 DECL_ABSTRACT_ORIGIN (sfield) = var;
1272 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1273 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1274 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1275 insert_field_into_struct (ctx->srecord_type, sfield);
1276 }
1277 }
1278 else
1279 {
1280 if (ctx->srecord_type == NULL_TREE)
1281 {
1282 tree t;
1283
1284 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1285 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1286 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1287 {
1288 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1289 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1290 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1291 insert_field_into_struct (ctx->srecord_type, sfield);
1292 splay_tree_insert (ctx->sfield_map,
1293 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1294 (splay_tree_value) sfield);
1295 }
1296 }
1297 sfield = field;
1298 insert_field_into_struct ((mask & 1) ? ctx->record_type
1299 : ctx->srecord_type, field);
1300 }
1301
1302 if (mask & 1)
1303 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1304 (splay_tree_value) field);
1305 if ((mask & 2) && ctx->sfield_map)
1306 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1307 (splay_tree_value) sfield);
1308}
1309
1310static tree
1311install_var_local (tree var, omp_context *ctx)
1312{
1313 tree new_var = omp_copy_decl_1 (var, ctx);
1314 insert_decl_map (&ctx->cb, var, new_var);
1315 return new_var;
1316}
1317
1318/* Adjust the replacement for DECL in CTX for the new context. This means
1319 copying the DECL_VALUE_EXPR, and fixing up the type. */
1320
1321static void
1322fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1323{
1324 tree new_decl, size;
1325
1326 new_decl = lookup_decl (decl, ctx);
1327
1328 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1329
1330 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1331 && DECL_HAS_VALUE_EXPR_P (decl))
1332 {
1333 tree ve = DECL_VALUE_EXPR (decl);
1334 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1335 SET_DECL_VALUE_EXPR (new_decl, ve);
1336 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1337 }
1338
1339 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1340 {
1341 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1342 if (size == error_mark_node)
1343 size = TYPE_SIZE (TREE_TYPE (new_decl));
1344 DECL_SIZE (new_decl) = size;
1345
1346 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1347 if (size == error_mark_node)
1348 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1349 DECL_SIZE_UNIT (new_decl) = size;
1350 }
1351}
1352
1353/* The callback for remap_decl. Search all containing contexts for a
1354 mapping of the variable; this avoids having to duplicate the splay
1355 tree ahead of time. We know a mapping doesn't already exist in the
1356 given context. Create new mappings to implement default semantics. */
1357
1358static tree
1359omp_copy_decl (tree var, copy_body_data *cb)
1360{
1361 omp_context *ctx = (omp_context *) cb;
1362 tree new_var;
1363
1364 if (TREE_CODE (var) == LABEL_DECL)
1365 {
1366 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1367 DECL_CONTEXT (new_var) = current_function_decl;
1368 insert_decl_map (&ctx->cb, var, new_var);
1369 return new_var;
1370 }
1371
1372 while (!is_taskreg_ctx (ctx))
1373 {
1374 ctx = ctx->outer;
1375 if (ctx == NULL)
1376 return var;
1377 new_var = maybe_lookup_decl (var, ctx);
1378 if (new_var)
1379 return new_var;
1380 }
1381
1382 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1383 return var;
1384
1385 return error_mark_node;
1386}
1387
1388
1389/* Debugging dumps for parallel regions. */
1390void dump_omp_region (FILE *, struct omp_region *, int);
1391void debug_omp_region (struct omp_region *);
1392void debug_all_omp_regions (void);
1393
1394/* Dump the parallel region tree rooted at REGION. */
1395
1396void
1397dump_omp_region (FILE *file, struct omp_region *region, int indent)
1398{
1399 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1400 gimple_code_name[region->type]);
1401
1402 if (region->inner)
1403 dump_omp_region (file, region->inner, indent + 4);
1404
1405 if (region->cont)
1406 {
1407 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1408 region->cont->index);
1409 }
1410
1411 if (region->exit)
1412 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1413 region->exit->index);
1414 else
1415 fprintf (file, "%*s[no exit marker]\n", indent, "");
1416
1417 if (region->next)
1418 dump_omp_region (file, region->next, indent);
1419}
1420
1421DEBUG_FUNCTION void
1422debug_omp_region (struct omp_region *region)
1423{
1424 dump_omp_region (stderr, region, 0);
1425}
1426
1427DEBUG_FUNCTION void
1428debug_all_omp_regions (void)
1429{
1430 dump_omp_region (stderr, root_omp_region, 0);
1431}
1432
1433
1434/* Create a new parallel region starting at STMT inside region PARENT. */
1435
1436static struct omp_region *
1437new_omp_region (basic_block bb, enum gimple_code type,
1438 struct omp_region *parent)
1439{
1440 struct omp_region *region = XCNEW (struct omp_region);
1441
1442 region->outer = parent;
1443 region->entry = bb;
1444 region->type = type;
1445
1446 if (parent)
1447 {
1448 /* This is a nested region. Add it to the list of inner
1449 regions in PARENT. */
1450 region->next = parent->inner;
1451 parent->inner = region;
1452 }
1453 else
1454 {
1455 /* This is a toplevel region. Add it to the list of toplevel
1456 regions in ROOT_OMP_REGION. */
1457 region->next = root_omp_region;
1458 root_omp_region = region;
1459 }
1460
1461 return region;
1462}
1463
1464/* Release the memory associated with the region tree rooted at REGION. */
1465
1466static void
1467free_omp_region_1 (struct omp_region *region)
1468{
1469 struct omp_region *i, *n;
1470
1471 for (i = region->inner; i ; i = n)
1472 {
1473 n = i->next;
1474 free_omp_region_1 (i);
1475 }
1476
1477 free (region);
1478}
1479
1480/* Release the memory for the entire omp region tree. */
1481
1482void
1483free_omp_regions (void)
1484{
1485 struct omp_region *r, *n;
1486 for (r = root_omp_region; r ; r = n)
1487 {
1488 n = r->next;
1489 free_omp_region_1 (r);
1490 }
1491 root_omp_region = NULL;
1492}
1493
1494
1495/* Create a new context, with OUTER_CTX being the surrounding context. */
1496
1497static omp_context *
1498new_omp_context (gimple stmt, omp_context *outer_ctx)
1499{
1500 omp_context *ctx = XCNEW (omp_context);
1501
1502 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1503 (splay_tree_value) ctx);
1504 ctx->stmt = stmt;
1505
1506 if (outer_ctx)
1507 {
1508 ctx->outer = outer_ctx;
1509 ctx->cb = outer_ctx->cb;
1510 ctx->cb.block = NULL;
1511 ctx->depth = outer_ctx->depth + 1;
1512 ctx->reduction_map = outer_ctx->reduction_map;
1513 }
1514 else
1515 {
1516 ctx->cb.src_fn = current_function_decl;
1517 ctx->cb.dst_fn = current_function_decl;
1518 ctx->cb.src_node = cgraph_node::get (current_function_decl);
1519 gcc_checking_assert (ctx->cb.src_node);
1520 ctx->cb.dst_node = ctx->cb.src_node;
1521 ctx->cb.src_cfun = cfun;
1522 ctx->cb.copy_decl = omp_copy_decl;
1523 ctx->cb.eh_lp_nr = 0;
1524 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1525 ctx->depth = 1;
1526 }
1527
1528 ctx->cb.decl_map = new hash_map<tree, tree>;
1529
1530 return ctx;
1531}
1532
1533static gimple_seq maybe_catch_exception (gimple_seq);
1534
1535/* Finalize task copyfn. */
1536
1537static void
1538finalize_task_copyfn (gomp_task *task_stmt)
1539{
1540 struct function *child_cfun;
1541 tree child_fn;
1542 gimple_seq seq = NULL, new_seq;
1543 gbind *bind;
1544
1545 child_fn = gimple_omp_task_copy_fn (task_stmt);
1546 if (child_fn == NULL_TREE)
1547 return;
1548
1549 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1550 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1551
1552 push_cfun (child_cfun);
1553 bind = gimplify_body (child_fn, false);
1554 gimple_seq_add_stmt (&seq, bind);
1555 new_seq = maybe_catch_exception (seq);
1556 if (new_seq != seq)
1557 {
1558 bind = gimple_build_bind (NULL, new_seq, NULL);
1559 seq = NULL;
1560 gimple_seq_add_stmt (&seq, bind);
1561 }
1562 gimple_set_body (child_fn, seq);
1563 pop_cfun ();
1564
1565 /* Inform the callgraph about the new function. */
1566 cgraph_node::add_new_function (child_fn, false);
1567}
1568
1569/* Destroy a omp_context data structures. Called through the splay tree
1570 value delete callback. */
1571
1572static void
1573delete_omp_context (splay_tree_value value)
1574{
1575 omp_context *ctx = (omp_context *) value;
1576
1577 delete ctx->cb.decl_map;
1578
1579 if (ctx->field_map)
1580 splay_tree_delete (ctx->field_map);
1581 if (ctx->sfield_map)
1582 splay_tree_delete (ctx->sfield_map);
1583 if (ctx->reduction_map
1584 /* Shared over several omp_contexts. */
1585 && (ctx->outer == NULL
1586 || ctx->reduction_map != ctx->outer->reduction_map))
1587 splay_tree_delete (ctx->reduction_map);
1588
1589 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1590 it produces corrupt debug information. */
1591 if (ctx->record_type)
1592 {
1593 tree t;
1594 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1595 DECL_ABSTRACT_ORIGIN (t) = NULL;
1596 }
1597 if (ctx->srecord_type)
1598 {
1599 tree t;
1600 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1601 DECL_ABSTRACT_ORIGIN (t) = NULL;
1602 }
1603
1604 if (is_task_ctx (ctx))
1605 finalize_task_copyfn (as_a <gomp_task *> (ctx->stmt));
1606
1607 XDELETE (ctx);
1608}
1609
1610/* Fix up RECEIVER_DECL with a type that has been remapped to the child
1611 context. */
1612
1613static void
1614fixup_child_record_type (omp_context *ctx)
1615{
1616 tree f, type = ctx->record_type;
1617
1618 /* ??? It isn't sufficient to just call remap_type here, because
1619 variably_modified_type_p doesn't work the way we expect for
1620 record types. Testing each field for whether it needs remapping
1621 and creating a new record by hand works, however. */
1622 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1623 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1624 break;
1625 if (f)
1626 {
1627 tree name, new_fields = NULL;
1628
1629 type = lang_hooks.types.make_type (RECORD_TYPE);
1630 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1631 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1632 TYPE_DECL, name, type);
1633 TYPE_NAME (type) = name;
1634
1635 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1636 {
1637 tree new_f = copy_node (f);
1638 DECL_CONTEXT (new_f) = type;
1639 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1640 DECL_CHAIN (new_f) = new_fields;
1641 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1642 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1643 &ctx->cb, NULL);
1644 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1645 &ctx->cb, NULL);
1646 new_fields = new_f;
1647
1648 /* Arrange to be able to look up the receiver field
1649 given the sender field. */
1650 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1651 (splay_tree_value) new_f);
1652 }
1653 TYPE_FIELDS (type) = nreverse (new_fields);
1654 layout_type (type);
1655 }
1656
1657 TREE_TYPE (ctx->receiver_decl)
1658 = build_qualified_type (build_reference_type (type), TYPE_QUAL_RESTRICT);
1659}
1660
1661/* Instantiate decls as necessary in CTX to satisfy the data sharing
1662 specified by CLAUSES. */
1663
1664static void
1665scan_sharing_clauses (tree clauses, omp_context *ctx)
1666{
1667 tree c, decl;
1668 bool scan_array_reductions = false;
1669
1670 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1671 {
1672 bool by_ref;
1673
1674 switch (OMP_CLAUSE_CODE (c))
1675 {
1676 case OMP_CLAUSE_PRIVATE:
1677 decl = OMP_CLAUSE_DECL (c);
1678 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1679 goto do_private;
1680 else if (!is_variable_sized (decl))
1681 install_var_local (decl, ctx);
1682 break;
1683
1684 case OMP_CLAUSE_SHARED:
1685 decl = OMP_CLAUSE_DECL (c);
1686 /* Ignore shared directives in teams construct. */
1687 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1688 {
1689 /* Global variables don't need to be copied,
1690 the receiver side will use them directly. */
1691 tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
1692 if (is_global_var (odecl))
1693 break;
1694 insert_decl_map (&ctx->cb, decl, odecl);
1695 break;
1696 }
1697 gcc_assert (is_taskreg_ctx (ctx));
1698 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1699 || !is_variable_sized (decl));
1700 /* Global variables don't need to be copied,
1701 the receiver side will use them directly. */
1702 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1703 break;
1704 by_ref = use_pointer_for_field (decl, ctx);
1705 if (! TREE_READONLY (decl)
1706 || TREE_ADDRESSABLE (decl)
1707 || by_ref
1708 || is_reference (decl))
1709 {
1710 install_var_field (decl, by_ref, 3, ctx);
1711 install_var_local (decl, ctx);
1712 break;
1713 }
1714 /* We don't need to copy const scalar vars back. */
1715 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1716 goto do_private;
1717
1718 case OMP_CLAUSE_LASTPRIVATE:
1719 /* Let the corresponding firstprivate clause create
1720 the variable. */
1721 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1722 break;
1723 /* FALLTHRU */
1724
1725 case OMP_CLAUSE_FIRSTPRIVATE:
1726 if (is_gimple_omp_oacc (ctx->stmt))
1727 {
1728 sorry ("clause not supported yet");
1729 break;
1730 }
1731 /* FALLTHRU */
1732 case OMP_CLAUSE_REDUCTION:
1733 case OMP_CLAUSE_LINEAR:
1734 decl = OMP_CLAUSE_DECL (c);
1735 do_private:
1736 if (is_variable_sized (decl))
1737 {
1738 if (is_task_ctx (ctx))
1739 install_var_field (decl, false, 1, ctx);
1740 break;
1741 }
1742 else if (is_taskreg_ctx (ctx))
1743 {
1744 bool global
1745 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1746 by_ref = use_pointer_for_field (decl, NULL);
1747
1748 if (is_task_ctx (ctx)
1749 && (global || by_ref || is_reference (decl)))
1750 {
1751 install_var_field (decl, false, 1, ctx);
1752 if (!global)
1753 install_var_field (decl, by_ref, 2, ctx);
1754 }
1755 else if (!global)
1756 install_var_field (decl, by_ref, 3, ctx);
1757 }
1758 install_var_local (decl, ctx);
1759 if (is_gimple_omp_oacc (ctx->stmt)
1760 && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
1761 {
1762 /* Create a decl for the reduction array. */
1763 tree var = OMP_CLAUSE_DECL (c);
1764 tree type = get_base_type (var);
1765 tree ptype = build_pointer_type (type);
1766 tree array = create_tmp_var (ptype,
1767 oacc_get_reduction_array_id (var));
1768 omp_context *c = (ctx->field_map ? ctx : ctx->outer);
1769 install_var_field (array, true, 3, c);
1770 install_var_local (array, c);
1771
1772 /* Insert it into the current context. */
1773 splay_tree_insert (ctx->reduction_map, (splay_tree_key)
1774 oacc_get_reduction_array_id (var),
1775 (splay_tree_value) array);
1776 splay_tree_insert (ctx->reduction_map,
1777 (splay_tree_key) array,
1778 (splay_tree_value) array);
1779 }
1780 break;
1781
1782 case OMP_CLAUSE__LOOPTEMP_:
1783 gcc_assert (is_parallel_ctx (ctx));
1784 decl = OMP_CLAUSE_DECL (c);
1785 install_var_field (decl, false, 3, ctx);
1786 install_var_local (decl, ctx);
1787 break;
1788
1789 case OMP_CLAUSE_COPYPRIVATE:
1790 case OMP_CLAUSE_COPYIN:
1791 decl = OMP_CLAUSE_DECL (c);
1792 by_ref = use_pointer_for_field (decl, NULL);
1793 install_var_field (decl, by_ref, 3, ctx);
1794 break;
1795
1796 case OMP_CLAUSE_DEFAULT:
1797 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1798 break;
1799
1800 case OMP_CLAUSE_FINAL:
1801 case OMP_CLAUSE_IF:
1802 case OMP_CLAUSE_NUM_THREADS:
1803 case OMP_CLAUSE_NUM_TEAMS:
1804 case OMP_CLAUSE_THREAD_LIMIT:
1805 case OMP_CLAUSE_DEVICE:
1806 case OMP_CLAUSE_SCHEDULE:
1807 case OMP_CLAUSE_DIST_SCHEDULE:
1808 case OMP_CLAUSE_DEPEND:
1809 case OMP_CLAUSE__CILK_FOR_COUNT_:
1810 case OMP_CLAUSE_NUM_GANGS:
1811 case OMP_CLAUSE_NUM_WORKERS:
1812 case OMP_CLAUSE_VECTOR_LENGTH:
1813 if (ctx->outer)
1814 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1815 break;
1816
1817 case OMP_CLAUSE_TO:
1818 case OMP_CLAUSE_FROM:
1819 case OMP_CLAUSE_MAP:
1820 if (ctx->outer)
1821 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
1822 decl = OMP_CLAUSE_DECL (c);
1823 /* Global variables with "omp declare target" attribute
1824 don't need to be copied, the receiver side will use them
1825 directly. */
1826 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1827 && DECL_P (decl)
1828 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1829 && varpool_node::get_create (decl)->offloadable)
1830 break;
1831 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1832 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER)
1833 {
1834 /* Ignore GOMP_MAP_POINTER kind for arrays in regions that are
1835 not offloaded; there is nothing to map for those. */
1836 if (!is_gimple_omp_offloaded (ctx->stmt)
1837 && !POINTER_TYPE_P (TREE_TYPE (decl))
1838 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
1839 break;
1840 }
1841 if (DECL_P (decl))
1842 {
1843 if (DECL_SIZE (decl)
1844 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1845 {
1846 tree decl2 = DECL_VALUE_EXPR (decl);
1847 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1848 decl2 = TREE_OPERAND (decl2, 0);
1849 gcc_assert (DECL_P (decl2));
1850 install_var_field (decl2, true, 3, ctx);
1851 install_var_local (decl2, ctx);
1852 install_var_local (decl, ctx);
1853 }
1854 else
1855 {
1856 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1857 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
1858 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
1859 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1860 install_var_field (decl, true, 7, ctx);
1861 else
1862 install_var_field (decl, true, 3, ctx);
1863 if (is_gimple_omp_offloaded (ctx->stmt))
1864 install_var_local (decl, ctx);
1865 }
1866 }
1867 else
1868 {
1869 tree base = get_base_address (decl);
1870 tree nc = OMP_CLAUSE_CHAIN (c);
1871 if (DECL_P (base)
1872 && nc != NULL_TREE
1873 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
1874 && OMP_CLAUSE_DECL (nc) == base
1875 && OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_POINTER
1876 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
1877 {
1878 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
1879 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
1880 }
1881 else
1882 {
1883 if (ctx->outer)
1884 {
1885 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1886 decl = OMP_CLAUSE_DECL (c);
1887 }
1888 gcc_assert (!splay_tree_lookup (ctx->field_map,
1889 (splay_tree_key) decl));
1890 tree field
1891 = build_decl (OMP_CLAUSE_LOCATION (c),
1892 FIELD_DECL, NULL_TREE, ptr_type_node);
1893 DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
1894 insert_field_into_struct (ctx->record_type, field);
1895 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
1896 (splay_tree_value) field);
1897 }
1898 }
1899 break;
1900
1901 case OMP_CLAUSE_NOWAIT:
1902 case OMP_CLAUSE_ORDERED:
1903 case OMP_CLAUSE_COLLAPSE:
1904 case OMP_CLAUSE_UNTIED:
1905 case OMP_CLAUSE_MERGEABLE:
1906 case OMP_CLAUSE_PROC_BIND:
1907 case OMP_CLAUSE_SAFELEN:
1908 case OMP_CLAUSE_ASYNC:
1909 case OMP_CLAUSE_WAIT:
1910 case OMP_CLAUSE_GANG:
1911 case OMP_CLAUSE_WORKER:
1912 case OMP_CLAUSE_VECTOR:
1913 break;
1914
1915 case OMP_CLAUSE_ALIGNED:
1916 decl = OMP_CLAUSE_DECL (c);
1917 if (is_global_var (decl)
1918 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1919 install_var_local (decl, ctx);
1920 break;
1921
1922 case OMP_CLAUSE_DEVICE_RESIDENT:
1923 case OMP_CLAUSE_USE_DEVICE:
1924 case OMP_CLAUSE__CACHE_:
1925 case OMP_CLAUSE_INDEPENDENT:
1926 case OMP_CLAUSE_AUTO:
1927 case OMP_CLAUSE_SEQ:
1928 sorry ("Clause not supported yet");
1929 break;
1930
1931 default:
1932 gcc_unreachable ();
1933 }
1934 }
1935
1936 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1937 {
1938 switch (OMP_CLAUSE_CODE (c))
1939 {
1940 case OMP_CLAUSE_LASTPRIVATE:
1941 /* Let the corresponding firstprivate clause create
1942 the variable. */
1943 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1944 scan_array_reductions = true;
1945 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1946 break;
1947 /* FALLTHRU */
1948
1949 case OMP_CLAUSE_FIRSTPRIVATE:
1950 if (is_gimple_omp_oacc (ctx->stmt))
1951 {
1952 sorry ("clause not supported yet");
1953 break;
1954 }
1955 /* FALLTHRU */
1956 case OMP_CLAUSE_PRIVATE:
1957 case OMP_CLAUSE_REDUCTION:
1958 case OMP_CLAUSE_LINEAR:
1959 decl = OMP_CLAUSE_DECL (c);
1960 if (is_variable_sized (decl))
1961 install_var_local (decl, ctx);
1962 fixup_remapped_decl (decl, ctx,
1963 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1964 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1965 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1966 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1967 scan_array_reductions = true;
1968 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1969 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1970 scan_array_reductions = true;
1971 break;
1972
1973 case OMP_CLAUSE_SHARED:
1974 /* Ignore shared directives in teams construct. */
1975 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1976 break;
1977 decl = OMP_CLAUSE_DECL (c);
1978 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1979 fixup_remapped_decl (decl, ctx, false);
1980 break;
1981
1982 case OMP_CLAUSE_MAP:
1983 if (!is_gimple_omp_offloaded (ctx->stmt))
1984 break;
1985 decl = OMP_CLAUSE_DECL (c);
1986 if (DECL_P (decl)
1987 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1988 && varpool_node::get_create (decl)->offloadable)
1989 break;
1990 if (DECL_P (decl))
1991 {
1992 if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
1993 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
1994 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
1995 {
1996 tree new_decl = lookup_decl (decl, ctx);
1997 TREE_TYPE (new_decl)
1998 = remap_type (TREE_TYPE (decl), &ctx->cb);
1999 }
2000 else if (DECL_SIZE (decl)
2001 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
2002 {
2003 tree decl2 = DECL_VALUE_EXPR (decl);
2004 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
2005 decl2 = TREE_OPERAND (decl2, 0);
2006 gcc_assert (DECL_P (decl2));
2007 fixup_remapped_decl (decl2, ctx, false);
2008 fixup_remapped_decl (decl, ctx, true);
2009 }
2010 else
2011 fixup_remapped_decl (decl, ctx, false);
2012 }
2013 break;
2014
2015 case OMP_CLAUSE_COPYPRIVATE:
2016 case OMP_CLAUSE_COPYIN:
2017 case OMP_CLAUSE_DEFAULT:
2018 case OMP_CLAUSE_IF:
2019 case OMP_CLAUSE_NUM_THREADS:
2020 case OMP_CLAUSE_NUM_TEAMS:
2021 case OMP_CLAUSE_THREAD_LIMIT:
2022 case OMP_CLAUSE_DEVICE:
2023 case OMP_CLAUSE_SCHEDULE:
2024 case OMP_CLAUSE_DIST_SCHEDULE:
2025 case OMP_CLAUSE_NOWAIT:
2026 case OMP_CLAUSE_ORDERED:
2027 case OMP_CLAUSE_COLLAPSE:
2028 case OMP_CLAUSE_UNTIED:
2029 case OMP_CLAUSE_FINAL:
2030 case OMP_CLAUSE_MERGEABLE:
2031 case OMP_CLAUSE_PROC_BIND:
2032 case OMP_CLAUSE_SAFELEN:
2033 case OMP_CLAUSE_ALIGNED:
2034 case OMP_CLAUSE_DEPEND:
2035 case OMP_CLAUSE__LOOPTEMP_:
2036 case OMP_CLAUSE_TO:
2037 case OMP_CLAUSE_FROM:
2038 case OMP_CLAUSE__CILK_FOR_COUNT_:
2039 case OMP_CLAUSE_ASYNC:
2040 case OMP_CLAUSE_WAIT:
2041 case OMP_CLAUSE_NUM_GANGS:
2042 case OMP_CLAUSE_NUM_WORKERS:
2043 case OMP_CLAUSE_VECTOR_LENGTH:
2044 case OMP_CLAUSE_GANG:
2045 case OMP_CLAUSE_WORKER:
2046 case OMP_CLAUSE_VECTOR:
2047 break;
2048
2049 case OMP_CLAUSE_DEVICE_RESIDENT:
2050 case OMP_CLAUSE_USE_DEVICE:
2051 case OMP_CLAUSE__CACHE_:
2052 case OMP_CLAUSE_INDEPENDENT:
2053 case OMP_CLAUSE_AUTO:
2054 case OMP_CLAUSE_SEQ:
2055 sorry ("Clause not supported yet");
2056 break;
2057
2058 default:
2059 gcc_unreachable ();
2060 }
2061 }
2062
2063 gcc_checking_assert (!scan_array_reductions
2064 || !is_gimple_omp_oacc (ctx->stmt));
2065 if (scan_array_reductions)
2066 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2067 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
2068 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2069 {
2070 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2071 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2072 }
2073 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
2074 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2075 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2076 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2077 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
2078 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
2079}
2080
2081/* Create a new name for omp child function. Returns an identifier. If
2082 IS_CILK_FOR is true then the suffix for the child function is
2083 "_cilk_for_fn." */
2084
2085static tree
2086create_omp_child_function_name (bool task_copy, bool is_cilk_for)
2087{
2088 if (is_cilk_for)
2089 return clone_function_name (current_function_decl, "_cilk_for_fn");
2090 return clone_function_name (current_function_decl,
2091 task_copy ? "_omp_cpyfn" : "_omp_fn");
2092}
2093
2094/* Returns the type of the induction variable for the child function for
2095 _Cilk_for and the types for _high and _low variables based on TYPE. */
2096
2097static tree
2098cilk_for_check_loop_diff_type (tree type)
2099{
2100 if (TYPE_PRECISION (type) <= TYPE_PRECISION (uint32_type_node))
2101 {
2102 if (TYPE_UNSIGNED (type))
2103 return uint32_type_node;
2104 else
2105 return integer_type_node;
2106 }
2107 else
2108 {
2109 if (TYPE_UNSIGNED (type))
2110 return uint64_type_node;
2111 else
2112 return long_long_integer_type_node;
2113 }
2114}
2115
2116/* Build a decl for the omp child function. It'll not contain a body
2117 yet, just the bare decl. */
2118
2119static void
2120create_omp_child_function (omp_context *ctx, bool task_copy)
2121{
2122 tree decl, type, name, t;
2123
2124 tree cilk_for_count
2125 = (flag_cilkplus && gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2126 ? find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2127 OMP_CLAUSE__CILK_FOR_COUNT_) : NULL_TREE;
2128 tree cilk_var_type = NULL_TREE;
2129
2130 name = create_omp_child_function_name (task_copy,
2131 cilk_for_count != NULL_TREE);
2132 if (task_copy)
2133 type = build_function_type_list (void_type_node, ptr_type_node,
2134 ptr_type_node, NULL_TREE);
2135 else if (cilk_for_count)
2136 {
2137 type = TREE_TYPE (OMP_CLAUSE_OPERAND (cilk_for_count, 0));
2138 cilk_var_type = cilk_for_check_loop_diff_type (type);
2139 type = build_function_type_list (void_type_node, ptr_type_node,
2140 cilk_var_type, cilk_var_type, NULL_TREE);
2141 }
2142 else
2143 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
2144
2145 decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
2146
2147 gcc_checking_assert (!is_gimple_omp_oacc (ctx->stmt)
2148 || !task_copy);
2149 if (!task_copy)
2150 ctx->cb.dst_fn = decl;
2151 else
2152 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
2153
2154 TREE_STATIC (decl) = 1;
2155 TREE_USED (decl) = 1;
2156 DECL_ARTIFICIAL (decl) = 1;
2157 DECL_IGNORED_P (decl) = 0;
2158 TREE_PUBLIC (decl) = 0;
2159 DECL_UNINLINABLE (decl) = 1;
2160 DECL_EXTERNAL (decl) = 0;
2161 DECL_CONTEXT (decl) = NULL_TREE;
2162 DECL_INITIAL (decl) = make_node (BLOCK);
2163 if (cgraph_node::get (current_function_decl)->offloadable)
2164 cgraph_node::get_create (decl)->offloadable = 1;
2165 else
2166 {
2167 omp_context *octx;
2168 for (octx = ctx; octx; octx = octx->outer)
2169 if (is_gimple_omp_offloaded (octx->stmt))
2170 {
2171 cgraph_node::get_create (decl)->offloadable = 1;
2172#ifdef ENABLE_OFFLOADING
2173 g->have_offload = true;
2174#endif
2175 break;
2176 }
2177 }
2178
224ceb26
JM
2179 if (cgraph_node::get_create (decl)->offloadable
2180 && !lookup_attribute ("omp declare target",
2181 DECL_ATTRIBUTES (current_function_decl)))
2182 DECL_ATTRIBUTES (decl)
2183 = tree_cons (get_identifier ("omp target entrypoint"),
2184 NULL_TREE, DECL_ATTRIBUTES (decl));
2185
dda118e3
JM
2186 t = build_decl (DECL_SOURCE_LOCATION (decl),
2187 RESULT_DECL, NULL_TREE, void_type_node);
2188 DECL_ARTIFICIAL (t) = 1;
2189 DECL_IGNORED_P (t) = 1;
2190 DECL_CONTEXT (t) = decl;
2191 DECL_RESULT (decl) = t;
2192
2193 /* _Cilk_for's child function requires two extra parameters called
2194 __low and __high that are set the by Cilk runtime when it calls this
2195 function. */
2196 if (cilk_for_count)
2197 {
2198 t = build_decl (DECL_SOURCE_LOCATION (decl),
2199 PARM_DECL, get_identifier ("__high"), cilk_var_type);
2200 DECL_ARTIFICIAL (t) = 1;
2201 DECL_NAMELESS (t) = 1;
2202 DECL_ARG_TYPE (t) = ptr_type_node;
2203 DECL_CONTEXT (t) = current_function_decl;
2204 TREE_USED (t) = 1;
2205 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2206 DECL_ARGUMENTS (decl) = t;
2207
2208 t = build_decl (DECL_SOURCE_LOCATION (decl),
2209 PARM_DECL, get_identifier ("__low"), cilk_var_type);
2210 DECL_ARTIFICIAL (t) = 1;
2211 DECL_NAMELESS (t) = 1;
2212 DECL_ARG_TYPE (t) = ptr_type_node;
2213 DECL_CONTEXT (t) = current_function_decl;
2214 TREE_USED (t) = 1;
2215 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2216 DECL_ARGUMENTS (decl) = t;
2217 }
2218
2219 tree data_name = get_identifier (".omp_data_i");
2220 t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
2221 ptr_type_node);
2222 DECL_ARTIFICIAL (t) = 1;
2223 DECL_NAMELESS (t) = 1;
2224 DECL_ARG_TYPE (t) = ptr_type_node;
2225 DECL_CONTEXT (t) = current_function_decl;
2226 TREE_USED (t) = 1;
2227 if (cilk_for_count)
2228 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2229 DECL_ARGUMENTS (decl) = t;
2230 if (!task_copy)
2231 ctx->receiver_decl = t;
2232 else
2233 {
2234 t = build_decl (DECL_SOURCE_LOCATION (decl),
2235 PARM_DECL, get_identifier (".omp_data_o"),
2236 ptr_type_node);
2237 DECL_ARTIFICIAL (t) = 1;
2238 DECL_NAMELESS (t) = 1;
2239 DECL_ARG_TYPE (t) = ptr_type_node;
2240 DECL_CONTEXT (t) = current_function_decl;
2241 TREE_USED (t) = 1;
2242 TREE_ADDRESSABLE (t) = 1;
2243 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2244 DECL_ARGUMENTS (decl) = t;
2245 }
2246
2247 /* Allocate memory for the function structure. The call to
2248 allocate_struct_function clobbers CFUN, so we need to restore
2249 it afterward. */
2250 push_struct_function (decl);
2251 cfun->function_end_locus = gimple_location (ctx->stmt);
2252 pop_cfun ();
2253}
2254
2255/* Callback for walk_gimple_seq. Check if combined parallel
2256 contains gimple_omp_for_combined_into_p OMP_FOR. */
2257
2258static tree
2259find_combined_for (gimple_stmt_iterator *gsi_p,
2260 bool *handled_ops_p,
2261 struct walk_stmt_info *wi)
2262{
2263 gimple stmt = gsi_stmt (*gsi_p);
2264
2265 *handled_ops_p = true;
2266 switch (gimple_code (stmt))
2267 {
2268 WALK_SUBSTMTS;
2269
2270 case GIMPLE_OMP_FOR:
2271 if (gimple_omp_for_combined_into_p (stmt)
2272 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
2273 {
2274 wi->info = stmt;
2275 return integer_zero_node;
2276 }
2277 break;
2278 default:
2279 break;
2280 }
2281 return NULL;
2282}
2283
2284/* Scan an OpenMP parallel directive. */
2285
2286static void
2287scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2288{
2289 omp_context *ctx;
2290 tree name;
2291 gomp_parallel *stmt = as_a <gomp_parallel *> (gsi_stmt (*gsi));
2292
2293 /* Ignore parallel directives with empty bodies, unless there
2294 are copyin clauses. */
2295 if (optimize > 0
2296 && empty_body_p (gimple_omp_body (stmt))
2297 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
2298 OMP_CLAUSE_COPYIN) == NULL)
2299 {
2300 gsi_replace (gsi, gimple_build_nop (), false);
2301 return;
2302 }
2303
2304 if (gimple_omp_parallel_combined_p (stmt))
2305 {
2306 struct walk_stmt_info wi;
2307
2308 memset (&wi, 0, sizeof (wi));
2309 wi.val_only = true;
2310 walk_gimple_seq (gimple_omp_body (stmt),
2311 find_combined_for, NULL, &wi);
2312 if (wi.info)
2313 {
2314 gomp_for *for_stmt = as_a <gomp_for *> ((gimple) wi.info);
2315 struct omp_for_data fd;
2316 extract_omp_for_data (for_stmt, &fd, NULL);
2317 /* We need two temporaries with fd.loop.v type (istart/iend)
2318 and then (fd.collapse - 1) temporaries with the same
2319 type for count2 ... countN-1 vars if not constant. */
2320 size_t count = 2, i;
2321 tree type = fd.iter_type;
2322 if (fd.collapse > 1
2323 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
2324 count += fd.collapse - 1;
2325 for (i = 0; i < count; i++)
2326 {
2327 tree temp = create_tmp_var (type);
2328 tree c = build_omp_clause (UNKNOWN_LOCATION,
2329 OMP_CLAUSE__LOOPTEMP_);
2330 insert_decl_map (&outer_ctx->cb, temp, temp);
2331 OMP_CLAUSE_DECL (c) = temp;
2332 OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
2333 gimple_omp_parallel_set_clauses (stmt, c);
2334 }
2335 }
2336 }
2337
2338 ctx = new_omp_context (stmt, outer_ctx);
2339 taskreg_contexts.safe_push (ctx);
2340 if (taskreg_nesting_level > 1)
2341 ctx->is_nested = true;
2342 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2343 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2344 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2345 name = create_tmp_var_name (".omp_data_s");
2346 name = build_decl (gimple_location (stmt),
2347 TYPE_DECL, name, ctx->record_type);
2348 DECL_ARTIFICIAL (name) = 1;
2349 DECL_NAMELESS (name) = 1;
2350 TYPE_NAME (ctx->record_type) = name;
2351 create_omp_child_function (ctx, false);
2352 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
2353
2354 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
2355 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2356
2357 if (TYPE_FIELDS (ctx->record_type) == NULL)
2358 ctx->record_type = ctx->receiver_decl = NULL;
2359}
2360
2361/* Scan an OpenMP task directive. */
2362
2363static void
2364scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2365{
2366 omp_context *ctx;
2367 tree name, t;
2368 gomp_task *stmt = as_a <gomp_task *> (gsi_stmt (*gsi));
2369
2370 /* Ignore task directives with empty bodies. */
2371 if (optimize > 0
2372 && empty_body_p (gimple_omp_body (stmt)))
2373 {
2374 gsi_replace (gsi, gimple_build_nop (), false);
2375 return;
2376 }
2377
2378 ctx = new_omp_context (stmt, outer_ctx);
2379 taskreg_contexts.safe_push (ctx);
2380 if (taskreg_nesting_level > 1)
2381 ctx->is_nested = true;
2382 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2383 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2384 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2385 name = create_tmp_var_name (".omp_data_s");
2386 name = build_decl (gimple_location (stmt),
2387 TYPE_DECL, name, ctx->record_type);
2388 DECL_ARTIFICIAL (name) = 1;
2389 DECL_NAMELESS (name) = 1;
2390 TYPE_NAME (ctx->record_type) = name;
2391 create_omp_child_function (ctx, false);
2392 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
2393
2394 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
2395
2396 if (ctx->srecord_type)
2397 {
2398 name = create_tmp_var_name (".omp_data_a");
2399 name = build_decl (gimple_location (stmt),
2400 TYPE_DECL, name, ctx->srecord_type);
2401 DECL_ARTIFICIAL (name) = 1;
2402 DECL_NAMELESS (name) = 1;
2403 TYPE_NAME (ctx->srecord_type) = name;
2404 create_omp_child_function (ctx, true);
2405 }
2406
2407 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2408
2409 if (TYPE_FIELDS (ctx->record_type) == NULL)
2410 {
2411 ctx->record_type = ctx->receiver_decl = NULL;
2412 t = build_int_cst (long_integer_type_node, 0);
2413 gimple_omp_task_set_arg_size (stmt, t);
2414 t = build_int_cst (long_integer_type_node, 1);
2415 gimple_omp_task_set_arg_align (stmt, t);
2416 }
2417}
2418
2419
2420/* If any decls have been made addressable during scan_omp,
2421 adjust their fields if needed, and layout record types
2422 of parallel/task constructs. */
2423
2424static void
2425finish_taskreg_scan (omp_context *ctx)
2426{
2427 if (ctx->record_type == NULL_TREE)
2428 return;
2429
2430 /* If any task_shared_vars were needed, verify all
2431 OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
2432 statements if use_pointer_for_field hasn't changed
2433 because of that. If it did, update field types now. */
2434 if (task_shared_vars)
2435 {
2436 tree c;
2437
2438 for (c = gimple_omp_taskreg_clauses (ctx->stmt);
2439 c; c = OMP_CLAUSE_CHAIN (c))
2440 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
2441 {
2442 tree decl = OMP_CLAUSE_DECL (c);
2443
2444 /* Global variables don't need to be copied,
2445 the receiver side will use them directly. */
2446 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
2447 continue;
2448 if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
2449 || !use_pointer_for_field (decl, ctx))
2450 continue;
2451 tree field = lookup_field (decl, ctx);
2452 if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
2453 && TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
2454 continue;
2455 TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
2456 TREE_THIS_VOLATILE (field) = 0;
2457 DECL_USER_ALIGN (field) = 0;
2458 DECL_ALIGN (field) = TYPE_ALIGN (TREE_TYPE (field));
2459 if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
2460 TYPE_ALIGN (ctx->record_type) = DECL_ALIGN (field);
2461 if (ctx->srecord_type)
2462 {
2463 tree sfield = lookup_sfield (decl, ctx);
2464 TREE_TYPE (sfield) = TREE_TYPE (field);
2465 TREE_THIS_VOLATILE (sfield) = 0;
2466 DECL_USER_ALIGN (sfield) = 0;
2467 DECL_ALIGN (sfield) = DECL_ALIGN (field);
2468 if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
2469 TYPE_ALIGN (ctx->srecord_type) = DECL_ALIGN (sfield);
2470 }
2471 }
2472 }
2473
2474 if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2475 {
2476 layout_type (ctx->record_type);
2477 fixup_child_record_type (ctx);
2478 }
2479 else
2480 {
2481 location_t loc = gimple_location (ctx->stmt);
2482 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2483 /* Move VLA fields to the end. */
2484 p = &TYPE_FIELDS (ctx->record_type);
2485 while (*p)
2486 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2487 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2488 {
2489 *q = *p;
2490 *p = TREE_CHAIN (*p);
2491 TREE_CHAIN (*q) = NULL_TREE;
2492 q = &TREE_CHAIN (*q);
2493 }
2494 else
2495 p = &DECL_CHAIN (*p);
2496 *p = vla_fields;
2497 layout_type (ctx->record_type);
2498 fixup_child_record_type (ctx);
2499 if (ctx->srecord_type)
2500 layout_type (ctx->srecord_type);
2501 tree t = fold_convert_loc (loc, long_integer_type_node,
2502 TYPE_SIZE_UNIT (ctx->record_type));
2503 gimple_omp_task_set_arg_size (ctx->stmt, t);
2504 t = build_int_cst (long_integer_type_node,
2505 TYPE_ALIGN_UNIT (ctx->record_type));
2506 gimple_omp_task_set_arg_align (ctx->stmt, t);
2507 }
2508}
2509
2510
2511static omp_context *
2512enclosing_target_ctx (omp_context *ctx)
2513{
2514 while (ctx != NULL
2515 && gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET)
2516 ctx = ctx->outer;
2517 gcc_assert (ctx != NULL);
2518 return ctx;
2519}
2520
2521static bool
2522oacc_loop_or_target_p (gimple stmt)
2523{
2524 enum gimple_code outer_type = gimple_code (stmt);
2525 return ((outer_type == GIMPLE_OMP_TARGET
2526 && ((gimple_omp_target_kind (stmt)
2527 == GF_OMP_TARGET_KIND_OACC_PARALLEL)
2528 || (gimple_omp_target_kind (stmt)
2529 == GF_OMP_TARGET_KIND_OACC_KERNELS)))
2530 || (outer_type == GIMPLE_OMP_FOR
2531 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_OACC_LOOP));
2532}
2533
2534/* Scan a GIMPLE_OMP_FOR. */
2535
2536static void
2537scan_omp_for (gomp_for *stmt, omp_context *outer_ctx)
2538{
2539 enum gimple_code outer_type = GIMPLE_ERROR_MARK;
2540 omp_context *ctx;
2541 size_t i;
2542 tree clauses = gimple_omp_for_clauses (stmt);
2543
2544 if (outer_ctx)
2545 outer_type = gimple_code (outer_ctx->stmt);
2546
2547 ctx = new_omp_context (stmt, outer_ctx);
2548
2549 if (is_gimple_omp_oacc (stmt))
2550 {
2551 if (outer_ctx && outer_type == GIMPLE_OMP_FOR)
2552 ctx->gwv_this = outer_ctx->gwv_this;
2553 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2554 {
2555 int val;
2556 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_GANG)
2557 val = MASK_GANG;
2558 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WORKER)
2559 val = MASK_WORKER;
2560 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_VECTOR)
2561 val = MASK_VECTOR;
2562 else
2563 continue;
2564 ctx->gwv_this |= val;
2565 if (!outer_ctx)
2566 {
2567 /* Skip; not nested inside a region. */
2568 continue;
2569 }
2570 if (!oacc_loop_or_target_p (outer_ctx->stmt))
2571 {
2572 /* Skip; not nested inside an OpenACC region. */
2573 continue;
2574 }
2575 if (outer_type == GIMPLE_OMP_FOR)
2576 outer_ctx->gwv_below |= val;
2577 if (OMP_CLAUSE_OPERAND (c, 0) != NULL_TREE)
2578 {
2579 omp_context *enclosing = enclosing_target_ctx (outer_ctx);
2580 if (gimple_omp_target_kind (enclosing->stmt)
2581 == GF_OMP_TARGET_KIND_OACC_PARALLEL)
2582 error_at (gimple_location (stmt),
2583 "no arguments allowed to gang, worker and vector clauses inside parallel");
2584 }
2585 }
2586 }
2587
2588 scan_sharing_clauses (clauses, ctx);
2589
2590 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
2591 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
2592 {
2593 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
2594 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
2595 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
2596 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
2597 }
2598 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2599
2600 if (is_gimple_omp_oacc (stmt))
2601 {
2602 if (ctx->gwv_this & ctx->gwv_below)
2603 error_at (gimple_location (stmt),
2604 "gang, worker and vector may occur only once in a loop nest");
2605 else if (ctx->gwv_below != 0
2606 && ctx->gwv_this > ctx->gwv_below)
2607 error_at (gimple_location (stmt),
2608 "gang, worker and vector must occur in this order in a loop nest");
2609 if (outer_ctx && outer_type == GIMPLE_OMP_FOR)
2610 outer_ctx->gwv_below |= ctx->gwv_below;
2611 }
2612}
2613
2614/* Scan an OpenMP sections directive. */
2615
2616static void
2617scan_omp_sections (gomp_sections *stmt, omp_context *outer_ctx)
2618{
2619 omp_context *ctx;
2620
2621 ctx = new_omp_context (stmt, outer_ctx);
2622 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
2623 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2624}
2625
2626/* Scan an OpenMP single directive. */
2627
2628static void
2629scan_omp_single (gomp_single *stmt, omp_context *outer_ctx)
2630{
2631 omp_context *ctx;
2632 tree name;
2633
2634 ctx = new_omp_context (stmt, outer_ctx);
2635 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2636 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2637 name = create_tmp_var_name (".omp_copy_s");
2638 name = build_decl (gimple_location (stmt),
2639 TYPE_DECL, name, ctx->record_type);
2640 TYPE_NAME (ctx->record_type) = name;
2641
2642 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
2643 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2644
2645 if (TYPE_FIELDS (ctx->record_type) == NULL)
2646 ctx->record_type = NULL;
2647 else
2648 layout_type (ctx->record_type);
2649}
2650
2651/* Scan a GIMPLE_OMP_TARGET. */
2652
2653static void
2654scan_omp_target (gomp_target *stmt, omp_context *outer_ctx)
2655{
2656 omp_context *ctx;
2657 tree name;
2658 bool offloaded = is_gimple_omp_offloaded (stmt);
2659 tree clauses = gimple_omp_target_clauses (stmt);
2660
2661 ctx = new_omp_context (stmt, outer_ctx);
2662 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2663 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2664 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2665 name = create_tmp_var_name (".omp_data_t");
2666 name = build_decl (gimple_location (stmt),
2667 TYPE_DECL, name, ctx->record_type);
2668 DECL_ARTIFICIAL (name) = 1;
2669 DECL_NAMELESS (name) = 1;
2670 TYPE_NAME (ctx->record_type) = name;
2671 if (offloaded)
2672 {
2673 if (is_gimple_omp_oacc (stmt))
2674 ctx->reduction_map = splay_tree_new (splay_tree_compare_pointers,
2675 0, 0);
2676
2677 create_omp_child_function (ctx, false);
2678 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
2679 }
2680
2681 if (is_gimple_omp_oacc (stmt))
2682 {
2683 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2684 {
2685 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_GANGS)
2686 ctx->gwv_this |= MASK_GANG;
2687 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_WORKERS)
2688 ctx->gwv_this |= MASK_WORKER;
2689 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_VECTOR_LENGTH)
2690 ctx->gwv_this |= MASK_VECTOR;
2691 }
2692 }
2693
2694 scan_sharing_clauses (clauses, ctx);
2695 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2696
2697 if (TYPE_FIELDS (ctx->record_type) == NULL)
2698 ctx->record_type = ctx->receiver_decl = NULL;
2699 else
2700 {
2701 TYPE_FIELDS (ctx->record_type)
2702 = nreverse (TYPE_FIELDS (ctx->record_type));
2703#ifdef ENABLE_CHECKING
2704 tree field;
2705 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
2706 for (field = TYPE_FIELDS (ctx->record_type);
2707 field;
2708 field = DECL_CHAIN (field))
2709 gcc_assert (DECL_ALIGN (field) == align);
2710#endif
2711 layout_type (ctx->record_type);
2712 if (offloaded)
2713 fixup_child_record_type (ctx);
2714 }
2715}
2716
2717/* Scan an OpenMP teams directive. */
2718
2719static void
2720scan_omp_teams (gomp_teams *stmt, omp_context *outer_ctx)
2721{
2722 omp_context *ctx = new_omp_context (stmt, outer_ctx);
2723 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
2724 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2725}
2726
2727/* Check nesting restrictions. */
2728static bool
2729check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
2730{
2731 /* No nesting of non-OpenACC STMT (that is, an OpenMP one, or a GOMP builtin)
2732 inside an OpenACC CTX. */
2733 if (!(is_gimple_omp (stmt)
2734 && is_gimple_omp_oacc (stmt)))
2735 {
2736 for (omp_context *ctx_ = ctx; ctx_ != NULL; ctx_ = ctx_->outer)
2737 if (is_gimple_omp (ctx_->stmt)
2738 && is_gimple_omp_oacc (ctx_->stmt))
2739 {
2740 error_at (gimple_location (stmt),
2741 "non-OpenACC construct inside of OpenACC region");
2742 return false;
2743 }
2744 }
2745
2746 if (ctx != NULL)
2747 {
2748 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2749 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2750 {
2751 error_at (gimple_location (stmt),
2752 "OpenMP constructs may not be nested inside simd region");
2753 return false;
2754 }
2755 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2756 {
2757 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
2758 || (gimple_omp_for_kind (stmt)
2759 != GF_OMP_FOR_KIND_DISTRIBUTE))
2760 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
2761 {
2762 error_at (gimple_location (stmt),
2763 "only distribute or parallel constructs are allowed to "
2764 "be closely nested inside teams construct");
2765 return false;
2766 }
2767 }
2768 }
2769 switch (gimple_code (stmt))
2770 {
2771 case GIMPLE_OMP_FOR:
2772 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
2773 return true;
2774 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
2775 {
2776 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
2777 {
2778 error_at (gimple_location (stmt),
2779 "distribute construct must be closely nested inside "
2780 "teams construct");
2781 return false;
2782 }
2783 return true;
2784 }
2785 /* FALLTHRU */
2786 case GIMPLE_CALL:
2787 if (is_gimple_call (stmt)
2788 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2789 == BUILT_IN_GOMP_CANCEL
2790 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2791 == BUILT_IN_GOMP_CANCELLATION_POINT))
2792 {
2793 const char *bad = NULL;
2794 const char *kind = NULL;
2795 if (ctx == NULL)
2796 {
2797 error_at (gimple_location (stmt), "orphaned %qs construct",
2798 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2799 == BUILT_IN_GOMP_CANCEL
2800 ? "#pragma omp cancel"
2801 : "#pragma omp cancellation point");
2802 return false;
2803 }
2804 switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
2805 ? tree_to_shwi (gimple_call_arg (stmt, 0))
2806 : 0)
2807 {
2808 case 1:
2809 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
2810 bad = "#pragma omp parallel";
2811 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2812 == BUILT_IN_GOMP_CANCEL
2813 && !integer_zerop (gimple_call_arg (stmt, 1)))
2814 ctx->cancellable = true;
2815 kind = "parallel";
2816 break;
2817 case 2:
2818 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2819 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
2820 bad = "#pragma omp for";
2821 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2822 == BUILT_IN_GOMP_CANCEL
2823 && !integer_zerop (gimple_call_arg (stmt, 1)))
2824 {
2825 ctx->cancellable = true;
2826 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2827 OMP_CLAUSE_NOWAIT))
2828 warning_at (gimple_location (stmt), 0,
2829 "%<#pragma omp cancel for%> inside "
2830 "%<nowait%> for construct");
2831 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2832 OMP_CLAUSE_ORDERED))
2833 warning_at (gimple_location (stmt), 0,
2834 "%<#pragma omp cancel for%> inside "
2835 "%<ordered%> for construct");
2836 }
2837 kind = "for";
2838 break;
2839 case 4:
2840 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
2841 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
2842 bad = "#pragma omp sections";
2843 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2844 == BUILT_IN_GOMP_CANCEL
2845 && !integer_zerop (gimple_call_arg (stmt, 1)))
2846 {
2847 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
2848 {
2849 ctx->cancellable = true;
2850 if (find_omp_clause (gimple_omp_sections_clauses
2851 (ctx->stmt),
2852 OMP_CLAUSE_NOWAIT))
2853 warning_at (gimple_location (stmt), 0,
2854 "%<#pragma omp cancel sections%> inside "
2855 "%<nowait%> sections construct");
2856 }
2857 else
2858 {
2859 gcc_assert (ctx->outer
2860 && gimple_code (ctx->outer->stmt)
2861 == GIMPLE_OMP_SECTIONS);
2862 ctx->outer->cancellable = true;
2863 if (find_omp_clause (gimple_omp_sections_clauses
2864 (ctx->outer->stmt),
2865 OMP_CLAUSE_NOWAIT))
2866 warning_at (gimple_location (stmt), 0,
2867 "%<#pragma omp cancel sections%> inside "
2868 "%<nowait%> sections construct");
2869 }
2870 }
2871 kind = "sections";
2872 break;
2873 case 8:
2874 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
2875 bad = "#pragma omp task";
2876 else
2877 ctx->cancellable = true;
2878 kind = "taskgroup";
2879 break;
2880 default:
2881 error_at (gimple_location (stmt), "invalid arguments");
2882 return false;
2883 }
2884 if (bad)
2885 {
2886 error_at (gimple_location (stmt),
2887 "%<%s %s%> construct not closely nested inside of %qs",
2888 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2889 == BUILT_IN_GOMP_CANCEL
2890 ? "#pragma omp cancel"
2891 : "#pragma omp cancellation point", kind, bad);
2892 return false;
2893 }
2894 }
2895 /* FALLTHRU */
2896 case GIMPLE_OMP_SECTIONS:
2897 case GIMPLE_OMP_SINGLE:
2898 for (; ctx != NULL; ctx = ctx->outer)
2899 switch (gimple_code (ctx->stmt))
2900 {
2901 case GIMPLE_OMP_FOR:
2902 case GIMPLE_OMP_SECTIONS:
2903 case GIMPLE_OMP_SINGLE:
2904 case GIMPLE_OMP_ORDERED:
2905 case GIMPLE_OMP_MASTER:
2906 case GIMPLE_OMP_TASK:
2907 case GIMPLE_OMP_CRITICAL:
2908 if (is_gimple_call (stmt))
2909 {
2910 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2911 != BUILT_IN_GOMP_BARRIER)
2912 return true;
2913 error_at (gimple_location (stmt),
2914 "barrier region may not be closely nested inside "
2915 "of work-sharing, critical, ordered, master or "
2916 "explicit task region");
2917 return false;
2918 }
2919 error_at (gimple_location (stmt),
2920 "work-sharing region may not be closely nested inside "
2921 "of work-sharing, critical, ordered, master or explicit "
2922 "task region");
2923 return false;
2924 case GIMPLE_OMP_PARALLEL:
2925 return true;
2926 default:
2927 break;
2928 }
2929 break;
2930 case GIMPLE_OMP_MASTER:
2931 for (; ctx != NULL; ctx = ctx->outer)
2932 switch (gimple_code (ctx->stmt))
2933 {
2934 case GIMPLE_OMP_FOR:
2935 case GIMPLE_OMP_SECTIONS:
2936 case GIMPLE_OMP_SINGLE:
2937 case GIMPLE_OMP_TASK:
2938 error_at (gimple_location (stmt),
2939 "master region may not be closely nested inside "
2940 "of work-sharing or explicit task region");
2941 return false;
2942 case GIMPLE_OMP_PARALLEL:
2943 return true;
2944 default:
2945 break;
2946 }
2947 break;
2948 case GIMPLE_OMP_ORDERED:
2949 for (; ctx != NULL; ctx = ctx->outer)
2950 switch (gimple_code (ctx->stmt))
2951 {
2952 case GIMPLE_OMP_CRITICAL:
2953 case GIMPLE_OMP_TASK:
2954 error_at (gimple_location (stmt),
2955 "ordered region may not be closely nested inside "
2956 "of critical or explicit task region");
2957 return false;
2958 case GIMPLE_OMP_FOR:
2959 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2960 OMP_CLAUSE_ORDERED) == NULL)
2961 {
2962 error_at (gimple_location (stmt),
2963 "ordered region must be closely nested inside "
2964 "a loop region with an ordered clause");
2965 return false;
2966 }
2967 return true;
2968 case GIMPLE_OMP_PARALLEL:
2969 error_at (gimple_location (stmt),
2970 "ordered region must be closely nested inside "
2971 "a loop region with an ordered clause");
2972 return false;
2973 default:
2974 break;
2975 }
2976 break;
2977 case GIMPLE_OMP_CRITICAL:
2978 {
2979 tree this_stmt_name
2980 = gimple_omp_critical_name (as_a <gomp_critical *> (stmt));
2981 for (; ctx != NULL; ctx = ctx->outer)
2982 if (gomp_critical *other_crit
2983 = dyn_cast <gomp_critical *> (ctx->stmt))
2984 if (this_stmt_name == gimple_omp_critical_name (other_crit))
2985 {
2986 error_at (gimple_location (stmt),
2987 "critical region may not be nested inside a critical "
2988 "region with the same name");
2989 return false;
2990 }
2991 }
2992 break;
2993 case GIMPLE_OMP_TEAMS:
2994 if (ctx == NULL
2995 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
2996 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
2997 {
2998 error_at (gimple_location (stmt),
2999 "teams construct not closely nested inside of target "
3000 "region");
3001 return false;
3002 }
3003 break;
3004 case GIMPLE_OMP_TARGET:
3005 for (; ctx != NULL; ctx = ctx->outer)
3006 {
3007 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET)
3008 {
3009 if (is_gimple_omp (stmt)
3010 && is_gimple_omp_oacc (stmt)
3011 && is_gimple_omp (ctx->stmt))
3012 {
3013 error_at (gimple_location (stmt),
3014 "OpenACC construct inside of non-OpenACC region");
3015 return false;
3016 }
3017 continue;
3018 }
3019
3020 const char *stmt_name, *ctx_stmt_name;
3021 switch (gimple_omp_target_kind (stmt))
3022 {
3023 case GF_OMP_TARGET_KIND_REGION: stmt_name = "target"; break;
3024 case GF_OMP_TARGET_KIND_DATA: stmt_name = "target data"; break;
3025 case GF_OMP_TARGET_KIND_UPDATE: stmt_name = "target update"; break;
3026 case GF_OMP_TARGET_KIND_OACC_PARALLEL: stmt_name = "parallel"; break;
3027 case GF_OMP_TARGET_KIND_OACC_KERNELS: stmt_name = "kernels"; break;
3028 case GF_OMP_TARGET_KIND_OACC_DATA: stmt_name = "data"; break;
3029 case GF_OMP_TARGET_KIND_OACC_UPDATE: stmt_name = "update"; break;
3030 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: stmt_name = "enter/exit data"; break;
3031 default: gcc_unreachable ();
3032 }
3033 switch (gimple_omp_target_kind (ctx->stmt))
3034 {
3035 case GF_OMP_TARGET_KIND_REGION: ctx_stmt_name = "target"; break;
3036 case GF_OMP_TARGET_KIND_DATA: ctx_stmt_name = "target data"; break;
3037 case GF_OMP_TARGET_KIND_OACC_PARALLEL: ctx_stmt_name = "parallel"; break;
3038 case GF_OMP_TARGET_KIND_OACC_KERNELS: ctx_stmt_name = "kernels"; break;
3039 case GF_OMP_TARGET_KIND_OACC_DATA: ctx_stmt_name = "data"; break;
3040 default: gcc_unreachable ();
3041 }
3042
3043 /* OpenACC/OpenMP mismatch? */
3044 if (is_gimple_omp_oacc (stmt)
3045 != is_gimple_omp_oacc (ctx->stmt))
3046 {
3047 error_at (gimple_location (stmt),
3048 "%s %s construct inside of %s %s region",
3049 (is_gimple_omp_oacc (stmt)
3050 ? "OpenACC" : "OpenMP"), stmt_name,
3051 (is_gimple_omp_oacc (ctx->stmt)
3052 ? "OpenACC" : "OpenMP"), ctx_stmt_name);
3053 return false;
3054 }
3055 if (is_gimple_omp_offloaded (ctx->stmt))
3056 {
3057 /* No GIMPLE_OMP_TARGET inside offloaded OpenACC CTX. */
3058 if (is_gimple_omp_oacc (ctx->stmt))
3059 {
3060 error_at (gimple_location (stmt),
3061 "%s construct inside of %s region",
3062 stmt_name, ctx_stmt_name);
3063 return false;
3064 }
3065 else
3066 {
3067 gcc_checking_assert (!is_gimple_omp_oacc (stmt));
3068 warning_at (gimple_location (stmt), 0,
3069 "%s construct inside of %s region",
3070 stmt_name, ctx_stmt_name);
3071 }
3072 }
3073 }
3074 break;
3075 default:
3076 break;
3077 }
3078 return true;
3079}
3080
3081
3082/* Helper function scan_omp.
3083
3084 Callback for walk_tree or operators in walk_gimple_stmt used to
3085 scan for OMP directives in TP. */
3086
3087static tree
3088scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
3089{
3090 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
3091 omp_context *ctx = (omp_context *) wi->info;
3092 tree t = *tp;
3093
3094 switch (TREE_CODE (t))
3095 {
3096 case VAR_DECL:
3097 case PARM_DECL:
3098 case LABEL_DECL:
3099 case RESULT_DECL:
3100 if (ctx)
3101 *tp = remap_decl (t, &ctx->cb);
3102 break;
3103
3104 default:
3105 if (ctx && TYPE_P (t))
3106 *tp = remap_type (t, &ctx->cb);
3107 else if (!DECL_P (t))
3108 {
3109 *walk_subtrees = 1;
3110 if (ctx)
3111 {
3112 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
3113 if (tem != TREE_TYPE (t))
3114 {
3115 if (TREE_CODE (t) == INTEGER_CST)
3116 *tp = wide_int_to_tree (tem, t);
3117 else
3118 TREE_TYPE (t) = tem;
3119 }
3120 }
3121 }
3122 break;
3123 }
3124
3125 return NULL_TREE;
3126}
3127
3128/* Return true if FNDECL is a setjmp or a longjmp. */
3129
3130static bool
3131setjmp_or_longjmp_p (const_tree fndecl)
3132{
3133 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
3134 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
3135 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
3136 return true;
3137
3138 tree declname = DECL_NAME (fndecl);
3139 if (!declname)
3140 return false;
3141 const char *name = IDENTIFIER_POINTER (declname);
3142 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
3143}
3144
3145
3146/* Helper function for scan_omp.
3147
3148 Callback for walk_gimple_stmt used to scan for OMP directives in
3149 the current statement in GSI. */
3150
3151static tree
3152scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
3153 struct walk_stmt_info *wi)
3154{
3155 gimple stmt = gsi_stmt (*gsi);
3156 omp_context *ctx = (omp_context *) wi->info;
3157
3158 if (gimple_has_location (stmt))
3159 input_location = gimple_location (stmt);
3160
3161 /* Check the nesting restrictions. */
3162 bool remove = false;
3163 if (is_gimple_omp (stmt))
3164 remove = !check_omp_nesting_restrictions (stmt, ctx);
3165 else if (is_gimple_call (stmt))
3166 {
3167 tree fndecl = gimple_call_fndecl (stmt);
3168 if (fndecl)
3169 {
3170 if (setjmp_or_longjmp_p (fndecl)
3171 && ctx
3172 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3173 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3174 {
3175 remove = true;
3176 error_at (gimple_location (stmt),
3177 "setjmp/longjmp inside simd construct");
3178 }
3179 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3180 switch (DECL_FUNCTION_CODE (fndecl))
3181 {
3182 case BUILT_IN_GOMP_BARRIER:
3183 case BUILT_IN_GOMP_CANCEL:
3184 case BUILT_IN_GOMP_CANCELLATION_POINT:
3185 case BUILT_IN_GOMP_TASKYIELD:
3186 case BUILT_IN_GOMP_TASKWAIT:
3187 case BUILT_IN_GOMP_TASKGROUP_START:
3188 case BUILT_IN_GOMP_TASKGROUP_END:
3189 remove = !check_omp_nesting_restrictions (stmt, ctx);
3190 break;
3191 default:
3192 break;
3193 }
3194 }
3195 }
3196 if (remove)
3197 {
3198 stmt = gimple_build_nop ();
3199 gsi_replace (gsi, stmt, false);
3200 }
3201
3202 *handled_ops_p = true;
3203
3204 switch (gimple_code (stmt))
3205 {
3206 case GIMPLE_OMP_PARALLEL:
3207 taskreg_nesting_level++;
3208 scan_omp_parallel (gsi, ctx);
3209 taskreg_nesting_level--;
3210 break;
3211
3212 case GIMPLE_OMP_TASK:
3213 taskreg_nesting_level++;
3214 scan_omp_task (gsi, ctx);
3215 taskreg_nesting_level--;
3216 break;
3217
3218 case GIMPLE_OMP_FOR:
3219 scan_omp_for (as_a <gomp_for *> (stmt), ctx);
3220 break;
3221
3222 case GIMPLE_OMP_SECTIONS:
3223 scan_omp_sections (as_a <gomp_sections *> (stmt), ctx);
3224 break;
3225
3226 case GIMPLE_OMP_SINGLE:
3227 scan_omp_single (as_a <gomp_single *> (stmt), ctx);
3228 break;
3229
3230 case GIMPLE_OMP_SECTION:
3231 case GIMPLE_OMP_MASTER:
3232 case GIMPLE_OMP_TASKGROUP:
3233 case GIMPLE_OMP_ORDERED:
3234 case GIMPLE_OMP_CRITICAL:
3235 ctx = new_omp_context (stmt, ctx);
3236 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3237 break;
3238
3239 case GIMPLE_OMP_TARGET:
3240 scan_omp_target (as_a <gomp_target *> (stmt), ctx);
3241 break;
3242
3243 case GIMPLE_OMP_TEAMS:
3244 scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
3245 break;
3246
3247 case GIMPLE_BIND:
3248 {
3249 tree var;
3250
3251 *handled_ops_p = false;
3252 if (ctx)
3253 for (var = gimple_bind_vars (as_a <gbind *> (stmt));
3254 var ;
3255 var = DECL_CHAIN (var))
3256 insert_decl_map (&ctx->cb, var, var);
3257 }
3258 break;
3259 default:
3260 *handled_ops_p = false;
3261 break;
3262 }
3263
3264 return NULL_TREE;
3265}
3266
3267
3268/* Scan all the statements starting at the current statement. CTX
3269 contains context information about the OMP directives and
3270 clauses found during the scan. */
3271
3272static void
3273scan_omp (gimple_seq *body_p, omp_context *ctx)
3274{
3275 location_t saved_location;
3276 struct walk_stmt_info wi;
3277
3278 memset (&wi, 0, sizeof (wi));
3279 wi.info = ctx;
3280 wi.want_locations = true;
3281
3282 saved_location = input_location;
3283 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
3284 input_location = saved_location;
3285}
3286\f
3287/* Re-gimplification and code generation routines. */
3288
3289/* Build a call to GOMP_barrier. */
3290
3291static gimple
3292build_omp_barrier (tree lhs)
3293{
3294 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
3295 : BUILT_IN_GOMP_BARRIER);
3296 gcall *g = gimple_build_call (fndecl, 0);
3297 if (lhs)
3298 gimple_call_set_lhs (g, lhs);
3299 return g;
3300}
3301
3302/* If a context was created for STMT when it was scanned, return it. */
3303
3304static omp_context *
3305maybe_lookup_ctx (gimple stmt)
3306{
3307 splay_tree_node n;
3308 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
3309 return n ? (omp_context *) n->value : NULL;
3310}
3311
3312
3313/* Find the mapping for DECL in CTX or the immediately enclosing
3314 context that has a mapping for DECL.
3315
3316 If CTX is a nested parallel directive, we may have to use the decl
3317 mappings created in CTX's parent context. Suppose that we have the
3318 following parallel nesting (variable UIDs showed for clarity):
3319
3320 iD.1562 = 0;
3321 #omp parallel shared(iD.1562) -> outer parallel
3322 iD.1562 = iD.1562 + 1;
3323
3324 #omp parallel shared (iD.1562) -> inner parallel
3325 iD.1562 = iD.1562 - 1;
3326
3327 Each parallel structure will create a distinct .omp_data_s structure
3328 for copying iD.1562 in/out of the directive:
3329
3330 outer parallel .omp_data_s.1.i -> iD.1562
3331 inner parallel .omp_data_s.2.i -> iD.1562
3332
3333 A shared variable mapping will produce a copy-out operation before
3334 the parallel directive and a copy-in operation after it. So, in
3335 this case we would have:
3336
3337 iD.1562 = 0;
3338 .omp_data_o.1.i = iD.1562;
3339 #omp parallel shared(iD.1562) -> outer parallel
3340 .omp_data_i.1 = &.omp_data_o.1
3341 .omp_data_i.1->i = .omp_data_i.1->i + 1;
3342
3343 .omp_data_o.2.i = iD.1562; -> **
3344 #omp parallel shared(iD.1562) -> inner parallel
3345 .omp_data_i.2 = &.omp_data_o.2
3346 .omp_data_i.2->i = .omp_data_i.2->i - 1;
3347
3348
3349 ** This is a problem. The symbol iD.1562 cannot be referenced
3350 inside the body of the outer parallel region. But since we are
3351 emitting this copy operation while expanding the inner parallel
3352 directive, we need to access the CTX structure of the outer
3353 parallel directive to get the correct mapping:
3354
3355 .omp_data_o.2.i = .omp_data_i.1->i
3356
3357 Since there may be other workshare or parallel directives enclosing
3358 the parallel directive, it may be necessary to walk up the context
3359 parent chain. This is not a problem in general because nested
3360 parallelism happens only rarely. */
3361
3362static tree
3363lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
3364{
3365 tree t;
3366 omp_context *up;
3367
3368 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
3369 t = maybe_lookup_decl (decl, up);
3370
3371 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
3372
3373 return t ? t : decl;
3374}
3375
3376
3377/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
3378 in outer contexts. */
3379
3380static tree
3381maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
3382{
3383 tree t = NULL;
3384 omp_context *up;
3385
3386 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
3387 t = maybe_lookup_decl (decl, up);
3388
3389 return t ? t : decl;
3390}
3391
3392
3393/* Construct the initialization value for reduction CLAUSE. */
3394
3395tree
3396omp_reduction_init (tree clause, tree type)
3397{
3398 location_t loc = OMP_CLAUSE_LOCATION (clause);
3399 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
3400 {
3401 case PLUS_EXPR:
3402 case MINUS_EXPR:
3403 case BIT_IOR_EXPR:
3404 case BIT_XOR_EXPR:
3405 case TRUTH_OR_EXPR:
3406 case TRUTH_ORIF_EXPR:
3407 case TRUTH_XOR_EXPR:
3408 case NE_EXPR:
3409 return build_zero_cst (type);
3410
3411 case MULT_EXPR:
3412 case TRUTH_AND_EXPR:
3413 case TRUTH_ANDIF_EXPR:
3414 case EQ_EXPR:
3415 return fold_convert_loc (loc, type, integer_one_node);
3416
3417 case BIT_AND_EXPR:
3418 return fold_convert_loc (loc, type, integer_minus_one_node);
3419
3420 case MAX_EXPR:
3421 if (SCALAR_FLOAT_TYPE_P (type))
3422 {
3423 REAL_VALUE_TYPE max, min;
3424 if (HONOR_INFINITIES (type))
3425 {
3426 real_inf (&max);
3427 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
3428 }
3429 else
3430 real_maxval (&min, 1, TYPE_MODE (type));
3431 return build_real (type, min);
3432 }
3433 else
3434 {
3435 gcc_assert (INTEGRAL_TYPE_P (type));
3436 return TYPE_MIN_VALUE (type);
3437 }
3438
3439 case MIN_EXPR:
3440 if (SCALAR_FLOAT_TYPE_P (type))
3441 {
3442 REAL_VALUE_TYPE max;
3443 if (HONOR_INFINITIES (type))
3444 real_inf (&max);
3445 else
3446 real_maxval (&max, 0, TYPE_MODE (type));
3447 return build_real (type, max);
3448 }
3449 else
3450 {
3451 gcc_assert (INTEGRAL_TYPE_P (type));
3452 return TYPE_MAX_VALUE (type);
3453 }
3454
3455 default:
3456 gcc_unreachable ();
3457 }
3458}
3459
3460/* Return alignment to be assumed for var in CLAUSE, which should be
3461 OMP_CLAUSE_ALIGNED. */
3462
3463static tree
3464omp_clause_aligned_alignment (tree clause)
3465{
3466 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
3467 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
3468
3469 /* Otherwise return implementation defined alignment. */
3470 unsigned int al = 1;
3471 machine_mode mode, vmode;
3472 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3473 if (vs)
3474 vs = 1 << floor_log2 (vs);
3475 static enum mode_class classes[]
3476 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
3477 for (int i = 0; i < 4; i += 2)
3478 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
3479 mode != VOIDmode;
3480 mode = GET_MODE_WIDER_MODE (mode))
3481 {
3482 vmode = targetm.vectorize.preferred_simd_mode (mode);
3483 if (GET_MODE_CLASS (vmode) != classes[i + 1])
3484 continue;
3485 while (vs
3486 && GET_MODE_SIZE (vmode) < vs
3487 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
3488 vmode = GET_MODE_2XWIDER_MODE (vmode);
3489
3490 tree type = lang_hooks.types.type_for_mode (mode, 1);
3491 if (type == NULL_TREE || TYPE_MODE (type) != mode)
3492 continue;
3493 type = build_vector_type (type, GET_MODE_SIZE (vmode)
3494 / GET_MODE_SIZE (mode));
3495 if (TYPE_MODE (type) != vmode)
3496 continue;
3497 if (TYPE_ALIGN_UNIT (type) > al)
3498 al = TYPE_ALIGN_UNIT (type);
3499 }
3500 return build_int_cst (integer_type_node, al);
3501}
3502
3503/* Return maximum possible vectorization factor for the target. */
3504
3505static int
3506omp_max_vf (void)
3507{
3508 if (!optimize
3509 || optimize_debug
3510 || !flag_tree_loop_optimize
3511 || (!flag_tree_loop_vectorize
3512 && (global_options_set.x_flag_tree_loop_vectorize
3513 || global_options_set.x_flag_tree_vectorize)))
3514 return 1;
3515
3516 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3517 if (vs)
3518 {
3519 vs = 1 << floor_log2 (vs);
3520 return vs;
3521 }
3522 machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
3523 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
3524 return GET_MODE_NUNITS (vqimode);
3525 return 1;
3526}
3527
3528/* Helper function of lower_rec_input_clauses, used for #pragma omp simd
3529 privatization. */
3530
3531static bool
3532lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
3533 tree &idx, tree &lane, tree &ivar, tree &lvar)
3534{
3535 if (max_vf == 0)
3536 {
3537 max_vf = omp_max_vf ();
3538 if (max_vf > 1)
3539 {
3540 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3541 OMP_CLAUSE_SAFELEN);
3542 if (c && TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST)
3543 max_vf = 1;
3544 else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3545 max_vf) == -1)
3546 max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
3547 }
3548 if (max_vf > 1)
3549 {
3550 idx = create_tmp_var (unsigned_type_node);
3551 lane = create_tmp_var (unsigned_type_node);
3552 }
3553 }
3554 if (max_vf == 1)
3555 return false;
3556
3557 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
3558 tree avar = create_tmp_var_raw (atype);
3559 if (TREE_ADDRESSABLE (new_var))
3560 TREE_ADDRESSABLE (avar) = 1;
3561 DECL_ATTRIBUTES (avar)
3562 = tree_cons (get_identifier ("omp simd array"), NULL,
3563 DECL_ATTRIBUTES (avar));
3564 gimple_add_tmp_var (avar);
3565 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
3566 NULL_TREE, NULL_TREE);
3567 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
3568 NULL_TREE, NULL_TREE);
3569 if (DECL_P (new_var))
3570 {
3571 SET_DECL_VALUE_EXPR (new_var, lvar);
3572 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3573 }
3574 return true;
3575}
3576
3577/* Helper function of lower_rec_input_clauses. For a reference
3578 in simd reduction, add an underlying variable it will reference. */
3579
3580static void
3581handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
3582{
3583 tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
3584 if (TREE_CONSTANT (z))
3585 {
3586 const char *name = NULL;
3587 if (DECL_NAME (new_vard))
3588 name = IDENTIFIER_POINTER (DECL_NAME (new_vard));
3589
3590 z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)), name);
3591 gimple_add_tmp_var (z);
3592 TREE_ADDRESSABLE (z) = 1;
3593 z = build_fold_addr_expr_loc (loc, z);
3594 gimplify_assign (new_vard, z, ilist);
3595 }
3596}
3597
3598/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
3599 from the receiver (aka child) side and initializers for REFERENCE_TYPE
3600 private variables. Initialization statements go in ILIST, while calls
3601 to destructors go in DLIST. */
3602
3603static void
3604lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3605 omp_context *ctx, struct omp_for_data *fd)
3606{
3607 tree c, dtor, copyin_seq, x, ptr;
3608 bool copyin_by_ref = false;
3609 bool lastprivate_firstprivate = false;
3610 bool reduction_omp_orig_ref = false;
3611 int pass;
3612 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3613 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
3614 int max_vf = 0;
3615 tree lane = NULL_TREE, idx = NULL_TREE;
3616 tree ivar = NULL_TREE, lvar = NULL_TREE;
3617 gimple_seq llist[2] = { NULL, NULL };
3618
3619 copyin_seq = NULL;
3620
3621 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3622 with data sharing clauses referencing variable sized vars. That
3623 is unnecessarily hard to support and very unlikely to result in
3624 vectorized code anyway. */
3625 if (is_simd)
3626 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3627 switch (OMP_CLAUSE_CODE (c))
3628 {
3629 case OMP_CLAUSE_LINEAR:
3630 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3631 max_vf = 1;
3632 /* FALLTHRU */
3633 case OMP_CLAUSE_REDUCTION:
3634 case OMP_CLAUSE_PRIVATE:
3635 case OMP_CLAUSE_FIRSTPRIVATE:
3636 case OMP_CLAUSE_LASTPRIVATE:
3637 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
3638 max_vf = 1;
3639 break;
3640 default:
3641 continue;
3642 }
3643
3644 /* Do all the fixed sized types in the first pass, and the variable sized
3645 types in the second pass. This makes sure that the scalar arguments to
3646 the variable sized types are processed before we use them in the
3647 variable sized operations. */
3648 for (pass = 0; pass < 2; ++pass)
3649 {
3650 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3651 {
3652 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
3653 tree var, new_var;
3654 bool by_ref;
3655 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3656
3657 switch (c_kind)
3658 {
3659 case OMP_CLAUSE_PRIVATE:
3660 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
3661 continue;
3662 break;
3663 case OMP_CLAUSE_SHARED:
3664 /* Ignore shared directives in teams construct. */
3665 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3666 continue;
3667 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
3668 {
3669 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
3670 continue;
3671 }
3672 case OMP_CLAUSE_FIRSTPRIVATE:
3673 case OMP_CLAUSE_COPYIN:
3674 case OMP_CLAUSE_LINEAR:
3675 break;
3676 case OMP_CLAUSE_REDUCTION:
3677 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
3678 reduction_omp_orig_ref = true;
3679 break;
3680 case OMP_CLAUSE__LOOPTEMP_:
3681 /* Handle _looptemp_ clauses only on parallel. */
3682 if (fd)
3683 continue;
3684 break;
3685 case OMP_CLAUSE_LASTPRIVATE:
3686 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3687 {
3688 lastprivate_firstprivate = true;
3689 if (pass != 0)
3690 continue;
3691 }
3692 /* Even without corresponding firstprivate, if
3693 decl is Fortran allocatable, it needs outer var
3694 reference. */
3695 else if (pass == 0
3696 && lang_hooks.decls.omp_private_outer_ref
3697 (OMP_CLAUSE_DECL (c)))
3698 lastprivate_firstprivate = true;
3699 break;
3700 case OMP_CLAUSE_ALIGNED:
3701 if (pass == 0)
3702 continue;
3703 var = OMP_CLAUSE_DECL (c);
3704 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
3705 && !is_global_var (var))
3706 {
3707 new_var = maybe_lookup_decl (var, ctx);
3708 if (new_var == NULL_TREE)
3709 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
3710 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3711 x = build_call_expr_loc (clause_loc, x, 2, new_var,
3712 omp_clause_aligned_alignment (c));
3713 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3714 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3715 gimplify_and_add (x, ilist);
3716 }
3717 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
3718 && is_global_var (var))
3719 {
3720 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
3721 new_var = lookup_decl (var, ctx);
3722 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
3723 t = build_fold_addr_expr_loc (clause_loc, t);
3724 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3725 t = build_call_expr_loc (clause_loc, t2, 2, t,
3726 omp_clause_aligned_alignment (c));
3727 t = fold_convert_loc (clause_loc, ptype, t);
3728 x = create_tmp_var (ptype);
3729 t = build2 (MODIFY_EXPR, ptype, x, t);
3730 gimplify_and_add (t, ilist);
3731 t = build_simple_mem_ref_loc (clause_loc, x);
3732 SET_DECL_VALUE_EXPR (new_var, t);
3733 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3734 }
3735 continue;
3736 default:
3737 continue;
3738 }
3739
3740 new_var = var = OMP_CLAUSE_DECL (c);
3741 if (c_kind != OMP_CLAUSE_COPYIN)
3742 new_var = lookup_decl (var, ctx);
3743
3744 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
3745 {
3746 if (pass != 0)
3747 continue;
3748 }
3749 else if (is_variable_sized (var))
3750 {
3751 /* For variable sized types, we need to allocate the
3752 actual storage here. Call alloca and store the
3753 result in the pointer decl that we created elsewhere. */
3754 if (pass == 0)
3755 continue;
3756
3757 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
3758 {
3759 gcall *stmt;
3760 tree tmp, atmp;
3761
3762 ptr = DECL_VALUE_EXPR (new_var);
3763 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
3764 ptr = TREE_OPERAND (ptr, 0);
3765 gcc_assert (DECL_P (ptr));
3766 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
3767
3768 /* void *tmp = __builtin_alloca */
3769 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3770 stmt = gimple_build_call (atmp, 1, x);
3771 tmp = create_tmp_var_raw (ptr_type_node);
3772 gimple_add_tmp_var (tmp);
3773 gimple_call_set_lhs (stmt, tmp);
3774
3775 gimple_seq_add_stmt (ilist, stmt);
3776
3777 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
3778 gimplify_assign (ptr, x, ilist);
3779 }
3780 }
3781 else if (is_reference (var))
3782 {
3783 /* For references that are being privatized for Fortran,
3784 allocate new backing storage for the new pointer
3785 variable. This allows us to avoid changing all the
3786 code that expects a pointer to something that expects
3787 a direct variable. */
3788 if (pass == 0)
3789 continue;
3790
3791 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
3792 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
3793 {
3794 x = build_receiver_ref (var, false, ctx);
3795 x = build_fold_addr_expr_loc (clause_loc, x);
3796 }
3797 else if (TREE_CONSTANT (x))
3798 {
3799 /* For reduction in SIMD loop, defer adding the
3800 initialization of the reference, because if we decide
3801 to use SIMD array for it, the initilization could cause
3802 expansion ICE. */
3803 if (c_kind == OMP_CLAUSE_REDUCTION && is_simd)
3804 x = NULL_TREE;
3805 else
3806 {
3807 const char *name = NULL;
3808 if (DECL_NAME (var))
3809 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
3810
3811 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
3812 name);
3813 gimple_add_tmp_var (x);
3814 TREE_ADDRESSABLE (x) = 1;
3815 x = build_fold_addr_expr_loc (clause_loc, x);
3816 }
3817 }
3818 else
3819 {
3820 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3821 x = build_call_expr_loc (clause_loc, atmp, 1, x);
3822 }
3823
3824 if (x)
3825 {
3826 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3827 gimplify_assign (new_var, x, ilist);
3828 }
3829
3830 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3831 }
3832 else if (c_kind == OMP_CLAUSE_REDUCTION
3833 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3834 {
3835 if (pass == 0)
3836 continue;
3837 }
3838 else if (pass != 0)