Update gcc-50 to SVN version 220871
[dragonfly.git] / contrib / gcc-5.0 / gcc / omp-low.c
CommitLineData
dda118e3
JM
1/* Lowering pass for OMP directives. Converts OMP directives into explicit
2 calls to the runtime library (libgomp), data marshalling to implement data
3 sharing and copying clauses, offloading to accelerators, and more.
4
5 Contributed by Diego Novillo <dnovillo@redhat.com>
6
7 Copyright (C) 2005-2015 Free Software Foundation, Inc.
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
13Software Foundation; either version 3, or (at your option) any later
14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
24
25#include "config.h"
26#include "system.h"
27#include "coretypes.h"
28#include "tm.h"
29#include "hash-set.h"
30#include "machmode.h"
31#include "vec.h"
32#include "double-int.h"
33#include "input.h"
34#include "alias.h"
35#include "symtab.h"
36#include "wide-int.h"
37#include "inchash.h"
38#include "tree.h"
39#include "fold-const.h"
40#include "stringpool.h"
41#include "stor-layout.h"
42#include "rtl.h"
43#include "predict.h"
44#include "hard-reg-set.h"
45#include "function.h"
46#include "dominance.h"
47#include "cfg.h"
48#include "cfganal.h"
49#include "basic-block.h"
50#include "tree-ssa-alias.h"
51#include "internal-fn.h"
52#include "gimple-fold.h"
53#include "gimple-expr.h"
54#include "is-a.h"
55#include "gimple.h"
56#include "gimplify.h"
57#include "gimple-iterator.h"
58#include "gimplify-me.h"
59#include "gimple-walk.h"
60#include "tree-iterator.h"
61#include "tree-inline.h"
62#include "langhooks.h"
63#include "diagnostic-core.h"
64#include "gimple-ssa.h"
65#include "hash-map.h"
66#include "plugin-api.h"
67#include "ipa-ref.h"
68#include "cgraph.h"
69#include "tree-cfg.h"
70#include "tree-phinodes.h"
71#include "ssa-iterators.h"
72#include "tree-ssanames.h"
73#include "tree-into-ssa.h"
74#include "hashtab.h"
75#include "flags.h"
76#include "statistics.h"
77#include "real.h"
78#include "fixed-value.h"
79#include "insn-config.h"
80#include "expmed.h"
81#include "dojump.h"
82#include "explow.h"
83#include "calls.h"
84#include "emit-rtl.h"
85#include "varasm.h"
86#include "stmt.h"
87#include "expr.h"
88#include "tree-dfa.h"
89#include "tree-ssa.h"
90#include "tree-pass.h"
91#include "except.h"
92#include "splay-tree.h"
93#include "insn-codes.h"
94#include "optabs.h"
95#include "cfgloop.h"
96#include "target.h"
97#include "common/common-target.h"
98#include "omp-low.h"
99#include "gimple-low.h"
100#include "tree-cfgcleanup.h"
101#include "pretty-print.h"
102#include "alloc-pool.h"
103#include "symbol-summary.h"
104#include "ipa-prop.h"
105#include "tree-nested.h"
106#include "tree-eh.h"
107#include "cilk.h"
108#include "context.h"
109#include "lto-section-names.h"
110#include "gomp-constants.h"
111
112
113/* Lowering of OMP parallel and workshare constructs proceeds in two
114 phases. The first phase scans the function looking for OMP statements
115 and then for variables that must be replaced to satisfy data sharing
116 clauses. The second phase expands code for the constructs, as well as
117 re-gimplifying things when variables have been replaced with complex
118 expressions.
119
120 Final code generation is done by pass_expand_omp. The flowgraph is
121 scanned for regions which are then moved to a new
122 function, to be invoked by the thread library, or offloaded. */
123
124/* OMP region information. Every parallel and workshare
125 directive is enclosed between two markers, the OMP_* directive
126 and a corresponding OMP_RETURN statement. */
127
128struct omp_region
129{
130 /* The enclosing region. */
131 struct omp_region *outer;
132
133 /* First child region. */
134 struct omp_region *inner;
135
136 /* Next peer region. */
137 struct omp_region *next;
138
139 /* Block containing the omp directive as its last stmt. */
140 basic_block entry;
141
142 /* Block containing the OMP_RETURN as its last stmt. */
143 basic_block exit;
144
145 /* Block containing the OMP_CONTINUE as its last stmt. */
146 basic_block cont;
147
148 /* If this is a combined parallel+workshare region, this is a list
149 of additional arguments needed by the combined parallel+workshare
150 library call. */
151 vec<tree, va_gc> *ws_args;
152
153 /* The code for the omp directive of this region. */
154 enum gimple_code type;
155
156 /* Schedule kind, only used for OMP_FOR type regions. */
157 enum omp_clause_schedule_kind sched_kind;
158
159 /* True if this is a combined parallel+workshare region. */
160 bool is_combined_parallel;
161};
162
163/* Levels of parallelism as defined by OpenACC. Increasing numbers
164 correspond to deeper loop nesting levels. */
165#define MASK_GANG 1
166#define MASK_WORKER 2
167#define MASK_VECTOR 4
168
169/* Context structure. Used to store information about each parallel
170 directive in the code. */
171
172typedef struct omp_context
173{
174 /* This field must be at the beginning, as we do "inheritance": Some
175 callback functions for tree-inline.c (e.g., omp_copy_decl)
176 receive a copy_body_data pointer that is up-casted to an
177 omp_context pointer. */
178 copy_body_data cb;
179
180 /* The tree of contexts corresponding to the encountered constructs. */
181 struct omp_context *outer;
182 gimple stmt;
183
184 /* Map variables to fields in a structure that allows communication
185 between sending and receiving threads. */
186 splay_tree field_map;
187 tree record_type;
188 tree sender_decl;
189 tree receiver_decl;
190
191 /* These are used just by task contexts, if task firstprivate fn is
192 needed. srecord_type is used to communicate from the thread
193 that encountered the task construct to task firstprivate fn,
194 record_type is allocated by GOMP_task, initialized by task firstprivate
195 fn and passed to the task body fn. */
196 splay_tree sfield_map;
197 tree srecord_type;
198
199 /* A chain of variables to add to the top-level block surrounding the
200 construct. In the case of a parallel, this is in the child function. */
201 tree block_vars;
202
203 /* A map of reduction pointer variables. For accelerators, each
204 reduction variable is replaced with an array. Each thread, in turn,
205 is assigned to a slot on that array. */
206 splay_tree reduction_map;
207
208 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
209 barriers should jump to during omplower pass. */
210 tree cancel_label;
211
212 /* What to do with variables with implicitly determined sharing
213 attributes. */
214 enum omp_clause_default_kind default_kind;
215
216 /* Nesting depth of this context. Used to beautify error messages re
217 invalid gotos. The outermost ctx is depth 1, with depth 0 being
218 reserved for the main body of the function. */
219 int depth;
220
221 /* True if this parallel directive is nested within another. */
222 bool is_nested;
223
224 /* True if this construct can be cancelled. */
225 bool cancellable;
226
227 /* For OpenACC loops, a mask of gang, worker and vector used at
228 levels below this one. */
229 int gwv_below;
230 /* For OpenACC loops, a mask of gang, worker and vector used at
231 this level and above. For parallel and kernels clauses, a mask
232 indicating which of num_gangs/num_workers/num_vectors was used. */
233 int gwv_this;
234} omp_context;
235
236/* A structure holding the elements of:
237 for (V = N1; V cond N2; V += STEP) [...] */
238
239struct omp_for_data_loop
240{
241 tree v, n1, n2, step;
242 enum tree_code cond_code;
243};
244
245/* A structure describing the main elements of a parallel loop. */
246
247struct omp_for_data
248{
249 struct omp_for_data_loop loop;
250 tree chunk_size;
251 gomp_for *for_stmt;
252 tree pre, iter_type;
253 int collapse;
254 bool have_nowait, have_ordered;
255 enum omp_clause_schedule_kind sched_kind;
256 struct omp_for_data_loop *loops;
257};
258
259
260static splay_tree all_contexts;
261static int taskreg_nesting_level;
262static int target_nesting_level;
263static struct omp_region *root_omp_region;
264static bitmap task_shared_vars;
265static vec<omp_context *> taskreg_contexts;
266
267static void scan_omp (gimple_seq *, omp_context *);
268static tree scan_omp_1_op (tree *, int *, void *);
269
270#define WALK_SUBSTMTS \
271 case GIMPLE_BIND: \
272 case GIMPLE_TRY: \
273 case GIMPLE_CATCH: \
274 case GIMPLE_EH_FILTER: \
275 case GIMPLE_TRANSACTION: \
276 /* The sub-statements for these should be walked. */ \
277 *handled_ops_p = false; \
278 break;
279
280/* Helper function to get the name of the array containing the partial
281 reductions for OpenACC reductions. */
282static const char *
283oacc_get_reduction_array_id (tree node)
284{
285 const char *id = IDENTIFIER_POINTER (DECL_NAME (node));
286 int len = strlen ("OACC") + strlen (id);
287 char *temp_name = XALLOCAVEC (char, len + 1);
288 snprintf (temp_name, len + 1, "OACC%s", id);
289 return IDENTIFIER_POINTER (get_identifier (temp_name));
290}
291
292/* Determine the number of threads OpenACC threads used to determine the
293 size of the array of partial reductions. Currently, this is num_gangs
294 * vector_length. This value may be different than GOACC_GET_NUM_THREADS,
295 because it is independed of the device used. */
296
297static tree
298oacc_max_threads (omp_context *ctx)
299{
300 tree nthreads, vector_length, gangs, clauses;
301
302 gangs = fold_convert (sizetype, integer_one_node);
303 vector_length = gangs;
304
305 /* The reduction clause may be nested inside a loop directive.
306 Scan for the innermost vector_length clause. */
307 for (omp_context *oc = ctx; oc; oc = oc->outer)
308 {
309 if (gimple_code (oc->stmt) != GIMPLE_OMP_TARGET
310 || (gimple_omp_target_kind (oc->stmt)
311 != GF_OMP_TARGET_KIND_OACC_PARALLEL))
312 continue;
313
314 clauses = gimple_omp_target_clauses (oc->stmt);
315
316 vector_length = find_omp_clause (clauses, OMP_CLAUSE_VECTOR_LENGTH);
317 if (vector_length)
318 vector_length = fold_convert_loc (OMP_CLAUSE_LOCATION (vector_length),
319 sizetype,
320 OMP_CLAUSE_VECTOR_LENGTH_EXPR
321 (vector_length));
322 else
323 vector_length = fold_convert (sizetype, integer_one_node);
324
325 gangs = find_omp_clause (clauses, OMP_CLAUSE_NUM_GANGS);
326 if (gangs)
327 gangs = fold_convert_loc (OMP_CLAUSE_LOCATION (gangs), sizetype,
328 OMP_CLAUSE_NUM_GANGS_EXPR (gangs));
329 else
330 gangs = fold_convert (sizetype, integer_one_node);
331
332 break;
333 }
334
335 nthreads = fold_build2 (MULT_EXPR, sizetype, gangs, vector_length);
336
337 return nthreads;
338}
339
340/* Holds offload tables with decls. */
341vec<tree, va_gc> *offload_funcs, *offload_vars;
342
343/* Convenience function for calling scan_omp_1_op on tree operands. */
344
345static inline tree
346scan_omp_op (tree *tp, omp_context *ctx)
347{
348 struct walk_stmt_info wi;
349
350 memset (&wi, 0, sizeof (wi));
351 wi.info = ctx;
352 wi.want_locations = true;
353
354 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
355}
356
357static void lower_omp (gimple_seq *, omp_context *);
358static tree lookup_decl_in_outer_ctx (tree, omp_context *);
359static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
360
361/* Find an OMP clause of type KIND within CLAUSES. */
362
363tree
364find_omp_clause (tree clauses, enum omp_clause_code kind)
365{
366 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
367 if (OMP_CLAUSE_CODE (clauses) == kind)
368 return clauses;
369
370 return NULL_TREE;
371}
372
373/* Return true if CTX is for an omp parallel. */
374
375static inline bool
376is_parallel_ctx (omp_context *ctx)
377{
378 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
379}
380
381
382/* Return true if CTX is for an omp task. */
383
384static inline bool
385is_task_ctx (omp_context *ctx)
386{
387 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
388}
389
390
391/* Return true if CTX is for an omp parallel or omp task. */
392
393static inline bool
394is_taskreg_ctx (omp_context *ctx)
395{
396 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
397 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
398}
399
400
401/* Return true if REGION is a combined parallel+workshare region. */
402
403static inline bool
404is_combined_parallel (struct omp_region *region)
405{
406 return region->is_combined_parallel;
407}
408
409
410/* Extract the header elements of parallel loop FOR_STMT and store
411 them into *FD. */
412
413static void
414extract_omp_for_data (gomp_for *for_stmt, struct omp_for_data *fd,
415 struct omp_for_data_loop *loops)
416{
417 tree t, var, *collapse_iter, *collapse_count;
418 tree count = NULL_TREE, iter_type = long_integer_type_node;
419 struct omp_for_data_loop *loop;
420 int i;
421 struct omp_for_data_loop dummy_loop;
422 location_t loc = gimple_location (for_stmt);
423 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
424 bool distribute = gimple_omp_for_kind (for_stmt)
425 == GF_OMP_FOR_KIND_DISTRIBUTE;
426
427 fd->for_stmt = for_stmt;
428 fd->pre = NULL;
429 fd->collapse = gimple_omp_for_collapse (for_stmt);
430 if (fd->collapse > 1)
431 fd->loops = loops;
432 else
433 fd->loops = &fd->loop;
434
435 fd->have_nowait = distribute || simd;
436 fd->have_ordered = false;
437 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
438 fd->chunk_size = NULL_TREE;
439 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_CILKFOR)
440 fd->sched_kind = OMP_CLAUSE_SCHEDULE_CILKFOR;
441 collapse_iter = NULL;
442 collapse_count = NULL;
443
444 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
445 switch (OMP_CLAUSE_CODE (t))
446 {
447 case OMP_CLAUSE_NOWAIT:
448 fd->have_nowait = true;
449 break;
450 case OMP_CLAUSE_ORDERED:
451 fd->have_ordered = true;
452 break;
453 case OMP_CLAUSE_SCHEDULE:
454 gcc_assert (!distribute);
455 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
456 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
457 break;
458 case OMP_CLAUSE_DIST_SCHEDULE:
459 gcc_assert (distribute);
460 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
461 break;
462 case OMP_CLAUSE_COLLAPSE:
463 if (fd->collapse > 1)
464 {
465 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
466 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
467 }
468 break;
469 default:
470 break;
471 }
472
473 /* FIXME: for now map schedule(auto) to schedule(static).
474 There should be analysis to determine whether all iterations
475 are approximately the same amount of work (then schedule(static)
476 is best) or if it varies (then schedule(dynamic,N) is better). */
477 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
478 {
479 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
480 gcc_assert (fd->chunk_size == NULL);
481 }
482 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
483 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
484 gcc_assert (fd->chunk_size == NULL);
485 else if (fd->chunk_size == NULL)
486 {
487 /* We only need to compute a default chunk size for ordered
488 static loops and dynamic loops. */
489 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
490 || fd->have_ordered)
491 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
492 ? integer_zero_node : integer_one_node;
493 }
494
495 for (i = 0; i < fd->collapse; i++)
496 {
497 if (fd->collapse == 1)
498 loop = &fd->loop;
499 else if (loops != NULL)
500 loop = loops + i;
501 else
502 loop = &dummy_loop;
503
504 loop->v = gimple_omp_for_index (for_stmt, i);
505 gcc_assert (SSA_VAR_P (loop->v));
506 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
507 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
508 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
509 loop->n1 = gimple_omp_for_initial (for_stmt, i);
510
511 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
512 loop->n2 = gimple_omp_for_final (for_stmt, i);
513 switch (loop->cond_code)
514 {
515 case LT_EXPR:
516 case GT_EXPR:
517 break;
518 case NE_EXPR:
519 gcc_assert (gimple_omp_for_kind (for_stmt)
520 == GF_OMP_FOR_KIND_CILKSIMD
521 || (gimple_omp_for_kind (for_stmt)
522 == GF_OMP_FOR_KIND_CILKFOR));
523 break;
524 case LE_EXPR:
525 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
526 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
527 else
528 loop->n2 = fold_build2_loc (loc,
529 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
530 build_int_cst (TREE_TYPE (loop->n2), 1));
531 loop->cond_code = LT_EXPR;
532 break;
533 case GE_EXPR:
534 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
535 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
536 else
537 loop->n2 = fold_build2_loc (loc,
538 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
539 build_int_cst (TREE_TYPE (loop->n2), 1));
540 loop->cond_code = GT_EXPR;
541 break;
542 default:
543 gcc_unreachable ();
544 }
545
546 t = gimple_omp_for_incr (for_stmt, i);
547 gcc_assert (TREE_OPERAND (t, 0) == var);
548 switch (TREE_CODE (t))
549 {
550 case PLUS_EXPR:
551 loop->step = TREE_OPERAND (t, 1);
552 break;
553 case POINTER_PLUS_EXPR:
554 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
555 break;
556 case MINUS_EXPR:
557 loop->step = TREE_OPERAND (t, 1);
558 loop->step = fold_build1_loc (loc,
559 NEGATE_EXPR, TREE_TYPE (loop->step),
560 loop->step);
561 break;
562 default:
563 gcc_unreachable ();
564 }
565
566 if (simd
567 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
568 && !fd->have_ordered))
569 {
570 if (fd->collapse == 1)
571 iter_type = TREE_TYPE (loop->v);
572 else if (i == 0
573 || TYPE_PRECISION (iter_type)
574 < TYPE_PRECISION (TREE_TYPE (loop->v)))
575 iter_type
576 = build_nonstandard_integer_type
577 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
578 }
579 else if (iter_type != long_long_unsigned_type_node)
580 {
581 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
582 iter_type = long_long_unsigned_type_node;
583 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
584 && TYPE_PRECISION (TREE_TYPE (loop->v))
585 >= TYPE_PRECISION (iter_type))
586 {
587 tree n;
588
589 if (loop->cond_code == LT_EXPR)
590 n = fold_build2_loc (loc,
591 PLUS_EXPR, TREE_TYPE (loop->v),
592 loop->n2, loop->step);
593 else
594 n = loop->n1;
595 if (TREE_CODE (n) != INTEGER_CST
596 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
597 iter_type = long_long_unsigned_type_node;
598 }
599 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
600 > TYPE_PRECISION (iter_type))
601 {
602 tree n1, n2;
603
604 if (loop->cond_code == LT_EXPR)
605 {
606 n1 = loop->n1;
607 n2 = fold_build2_loc (loc,
608 PLUS_EXPR, TREE_TYPE (loop->v),
609 loop->n2, loop->step);
610 }
611 else
612 {
613 n1 = fold_build2_loc (loc,
614 MINUS_EXPR, TREE_TYPE (loop->v),
615 loop->n2, loop->step);
616 n2 = loop->n1;
617 }
618 if (TREE_CODE (n1) != INTEGER_CST
619 || TREE_CODE (n2) != INTEGER_CST
620 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
621 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
622 iter_type = long_long_unsigned_type_node;
623 }
624 }
625
626 if (collapse_count && *collapse_count == NULL)
627 {
628 t = fold_binary (loop->cond_code, boolean_type_node,
629 fold_convert (TREE_TYPE (loop->v), loop->n1),
630 fold_convert (TREE_TYPE (loop->v), loop->n2));
631 if (t && integer_zerop (t))
632 count = build_zero_cst (long_long_unsigned_type_node);
633 else if ((i == 0 || count != NULL_TREE)
634 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
635 && TREE_CONSTANT (loop->n1)
636 && TREE_CONSTANT (loop->n2)
637 && TREE_CODE (loop->step) == INTEGER_CST)
638 {
639 tree itype = TREE_TYPE (loop->v);
640
641 if (POINTER_TYPE_P (itype))
642 itype = signed_type_for (itype);
643 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
644 t = fold_build2_loc (loc,
645 PLUS_EXPR, itype,
646 fold_convert_loc (loc, itype, loop->step), t);
647 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
648 fold_convert_loc (loc, itype, loop->n2));
649 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
650 fold_convert_loc (loc, itype, loop->n1));
651 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
652 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
653 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
654 fold_build1_loc (loc, NEGATE_EXPR, itype,
655 fold_convert_loc (loc, itype,
656 loop->step)));
657 else
658 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
659 fold_convert_loc (loc, itype, loop->step));
660 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
661 if (count != NULL_TREE)
662 count = fold_build2_loc (loc,
663 MULT_EXPR, long_long_unsigned_type_node,
664 count, t);
665 else
666 count = t;
667 if (TREE_CODE (count) != INTEGER_CST)
668 count = NULL_TREE;
669 }
670 else if (count && !integer_zerop (count))
671 count = NULL_TREE;
672 }
673 }
674
675 if (count
676 && !simd
677 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
678 || fd->have_ordered))
679 {
680 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
681 iter_type = long_long_unsigned_type_node;
682 else
683 iter_type = long_integer_type_node;
684 }
685 else if (collapse_iter && *collapse_iter != NULL)
686 iter_type = TREE_TYPE (*collapse_iter);
687 fd->iter_type = iter_type;
688 if (collapse_iter && *collapse_iter == NULL)
689 *collapse_iter = create_tmp_var (iter_type, ".iter");
690 if (collapse_count && *collapse_count == NULL)
691 {
692 if (count)
693 *collapse_count = fold_convert_loc (loc, iter_type, count);
694 else
695 *collapse_count = create_tmp_var (iter_type, ".count");
696 }
697
698 if (fd->collapse > 1)
699 {
700 fd->loop.v = *collapse_iter;
701 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
702 fd->loop.n2 = *collapse_count;
703 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
704 fd->loop.cond_code = LT_EXPR;
705 }
706
707 /* For OpenACC loops, force a chunk size of one, as this avoids the default
708 scheduling where several subsequent iterations are being executed by the
709 same thread. */
710 if (gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
711 {
712 gcc_assert (fd->chunk_size == NULL_TREE);
713 fd->chunk_size = build_int_cst (TREE_TYPE (fd->loop.v), 1);
714 }
715}
716
717
718/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
719 is the immediate dominator of PAR_ENTRY_BB, return true if there
720 are no data dependencies that would prevent expanding the parallel
721 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
722
723 When expanding a combined parallel+workshare region, the call to
724 the child function may need additional arguments in the case of
725 GIMPLE_OMP_FOR regions. In some cases, these arguments are
726 computed out of variables passed in from the parent to the child
727 via 'struct .omp_data_s'. For instance:
728
729 #pragma omp parallel for schedule (guided, i * 4)
730 for (j ...)
731
732 Is lowered into:
733
734 # BLOCK 2 (PAR_ENTRY_BB)
735 .omp_data_o.i = i;
736 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
737
738 # BLOCK 3 (WS_ENTRY_BB)
739 .omp_data_i = &.omp_data_o;
740 D.1667 = .omp_data_i->i;
741 D.1598 = D.1667 * 4;
742 #pragma omp for schedule (guided, D.1598)
743
744 When we outline the parallel region, the call to the child function
745 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
746 that value is computed *after* the call site. So, in principle we
747 cannot do the transformation.
748
749 To see whether the code in WS_ENTRY_BB blocks the combined
750 parallel+workshare call, we collect all the variables used in the
751 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
752 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
753 call.
754
755 FIXME. If we had the SSA form built at this point, we could merely
756 hoist the code in block 3 into block 2 and be done with it. But at
757 this point we don't have dataflow information and though we could
758 hack something up here, it is really not worth the aggravation. */
759
760static bool
761workshare_safe_to_combine_p (basic_block ws_entry_bb)
762{
763 struct omp_for_data fd;
764 gimple ws_stmt = last_stmt (ws_entry_bb);
765
766 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
767 return true;
768
769 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
770
771 extract_omp_for_data (as_a <gomp_for *> (ws_stmt), &fd, NULL);
772
773 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
774 return false;
775 if (fd.iter_type != long_integer_type_node)
776 return false;
777
778 /* FIXME. We give up too easily here. If any of these arguments
779 are not constants, they will likely involve variables that have
780 been mapped into fields of .omp_data_s for sharing with the child
781 function. With appropriate data flow, it would be possible to
782 see through this. */
783 if (!is_gimple_min_invariant (fd.loop.n1)
784 || !is_gimple_min_invariant (fd.loop.n2)
785 || !is_gimple_min_invariant (fd.loop.step)
786 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
787 return false;
788
789 return true;
790}
791
792
793/* Collect additional arguments needed to emit a combined
794 parallel+workshare call. WS_STMT is the workshare directive being
795 expanded. */
796
797static vec<tree, va_gc> *
798get_ws_args_for (gimple par_stmt, gimple ws_stmt)
799{
800 tree t;
801 location_t loc = gimple_location (ws_stmt);
802 vec<tree, va_gc> *ws_args;
803
804 if (gomp_for *for_stmt = dyn_cast <gomp_for *> (ws_stmt))
805 {
806 struct omp_for_data fd;
807 tree n1, n2;
808
809 extract_omp_for_data (for_stmt, &fd, NULL);
810 n1 = fd.loop.n1;
811 n2 = fd.loop.n2;
812
813 if (gimple_omp_for_combined_into_p (for_stmt))
814 {
815 tree innerc
816 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
817 OMP_CLAUSE__LOOPTEMP_);
818 gcc_assert (innerc);
819 n1 = OMP_CLAUSE_DECL (innerc);
820 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
821 OMP_CLAUSE__LOOPTEMP_);
822 gcc_assert (innerc);
823 n2 = OMP_CLAUSE_DECL (innerc);
824 }
825
826 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
827
828 t = fold_convert_loc (loc, long_integer_type_node, n1);
829 ws_args->quick_push (t);
830
831 t = fold_convert_loc (loc, long_integer_type_node, n2);
832 ws_args->quick_push (t);
833
834 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
835 ws_args->quick_push (t);
836
837 if (fd.chunk_size)
838 {
839 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
840 ws_args->quick_push (t);
841 }
842
843 return ws_args;
844 }
845 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
846 {
847 /* Number of sections is equal to the number of edges from the
848 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
849 the exit of the sections region. */
850 basic_block bb = single_succ (gimple_bb (ws_stmt));
851 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
852 vec_alloc (ws_args, 1);
853 ws_args->quick_push (t);
854 return ws_args;
855 }
856
857 gcc_unreachable ();
858}
859
860
861/* Discover whether REGION is a combined parallel+workshare region. */
862
863static void
864determine_parallel_type (struct omp_region *region)
865{
866 basic_block par_entry_bb, par_exit_bb;
867 basic_block ws_entry_bb, ws_exit_bb;
868
869 if (region == NULL || region->inner == NULL
870 || region->exit == NULL || region->inner->exit == NULL
871 || region->inner->cont == NULL)
872 return;
873
874 /* We only support parallel+for and parallel+sections. */
875 if (region->type != GIMPLE_OMP_PARALLEL
876 || (region->inner->type != GIMPLE_OMP_FOR
877 && region->inner->type != GIMPLE_OMP_SECTIONS))
878 return;
879
880 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
881 WS_EXIT_BB -> PAR_EXIT_BB. */
882 par_entry_bb = region->entry;
883 par_exit_bb = region->exit;
884 ws_entry_bb = region->inner->entry;
885 ws_exit_bb = region->inner->exit;
886
887 if (single_succ (par_entry_bb) == ws_entry_bb
888 && single_succ (ws_exit_bb) == par_exit_bb
889 && workshare_safe_to_combine_p (ws_entry_bb)
890 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
891 || (last_and_only_stmt (ws_entry_bb)
892 && last_and_only_stmt (par_exit_bb))))
893 {
894 gimple par_stmt = last_stmt (par_entry_bb);
895 gimple ws_stmt = last_stmt (ws_entry_bb);
896
897 if (region->inner->type == GIMPLE_OMP_FOR)
898 {
899 /* If this is a combined parallel loop, we need to determine
900 whether or not to use the combined library calls. There
901 are two cases where we do not apply the transformation:
902 static loops and any kind of ordered loop. In the first
903 case, we already open code the loop so there is no need
904 to do anything else. In the latter case, the combined
905 parallel loop call would still need extra synchronization
906 to implement ordered semantics, so there would not be any
907 gain in using the combined call. */
908 tree clauses = gimple_omp_for_clauses (ws_stmt);
909 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
910 if (c == NULL
911 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
912 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
913 {
914 region->is_combined_parallel = false;
915 region->inner->is_combined_parallel = false;
916 return;
917 }
918 }
919
920 region->is_combined_parallel = true;
921 region->inner->is_combined_parallel = true;
922 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
923 }
924}
925
926
927/* Return true if EXPR is variable sized. */
928
929static inline bool
930is_variable_sized (const_tree expr)
931{
932 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
933}
934
935/* Return true if DECL is a reference type. */
936
937static inline bool
938is_reference (tree decl)
939{
940 return lang_hooks.decls.omp_privatize_by_reference (decl);
941}
942
943/* Return the type of a decl. If the decl is reference type,
944 return its base type. */
945static inline tree
946get_base_type (tree decl)
947{
948 tree type = TREE_TYPE (decl);
949 if (is_reference (decl))
950 type = TREE_TYPE (type);
951 return type;
952}
953
954/* Lookup variables. The "maybe" form
955 allows for the variable form to not have been entered, otherwise we
956 assert that the variable must have been entered. */
957
958static inline tree
959lookup_decl (tree var, omp_context *ctx)
960{
961 tree *n = ctx->cb.decl_map->get (var);
962 return *n;
963}
964
965static inline tree
966maybe_lookup_decl (const_tree var, omp_context *ctx)
967{
968 tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
969 return n ? *n : NULL_TREE;
970}
971
972static inline tree
973lookup_field (tree var, omp_context *ctx)
974{
975 splay_tree_node n;
976 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
977 return (tree) n->value;
978}
979
980static inline tree
981lookup_sfield (tree var, omp_context *ctx)
982{
983 splay_tree_node n;
984 n = splay_tree_lookup (ctx->sfield_map
985 ? ctx->sfield_map : ctx->field_map,
986 (splay_tree_key) var);
987 return (tree) n->value;
988}
989
990static inline tree
991maybe_lookup_field (tree var, omp_context *ctx)
992{
993 splay_tree_node n;
994 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
995 return n ? (tree) n->value : NULL_TREE;
996}
997
998static inline tree
999lookup_oacc_reduction (const char *id, omp_context *ctx)
1000{
1001 splay_tree_node n;
1002 n = splay_tree_lookup (ctx->reduction_map, (splay_tree_key) id);
1003 return (tree) n->value;
1004}
1005
1006static inline tree
1007maybe_lookup_oacc_reduction (tree var, omp_context *ctx)
1008{
1009 splay_tree_node n = NULL;
1010 if (ctx->reduction_map)
1011 n = splay_tree_lookup (ctx->reduction_map, (splay_tree_key) var);
1012 return n ? (tree) n->value : NULL_TREE;
1013}
1014
1015/* Return true if DECL should be copied by pointer. SHARED_CTX is
1016 the parallel context if DECL is to be shared. */
1017
1018static bool
1019use_pointer_for_field (tree decl, omp_context *shared_ctx)
1020{
1021 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
1022 return true;
1023
1024 /* We can only use copy-in/copy-out semantics for shared variables
1025 when we know the value is not accessible from an outer scope. */
1026 if (shared_ctx)
1027 {
1028 gcc_assert (!is_gimple_omp_oacc (shared_ctx->stmt));
1029
1030 /* ??? Trivially accessible from anywhere. But why would we even
1031 be passing an address in this case? Should we simply assert
1032 this to be false, or should we have a cleanup pass that removes
1033 these from the list of mappings? */
1034 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
1035 return true;
1036
1037 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
1038 without analyzing the expression whether or not its location
1039 is accessible to anyone else. In the case of nested parallel
1040 regions it certainly may be. */
1041 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
1042 return true;
1043
1044 /* Do not use copy-in/copy-out for variables that have their
1045 address taken. */
1046 if (TREE_ADDRESSABLE (decl))
1047 return true;
1048
1049 /* lower_send_shared_vars only uses copy-in, but not copy-out
1050 for these. */
1051 if (TREE_READONLY (decl)
1052 || ((TREE_CODE (decl) == RESULT_DECL
1053 || TREE_CODE (decl) == PARM_DECL)
1054 && DECL_BY_REFERENCE (decl)))
1055 return false;
1056
1057 /* Disallow copy-in/out in nested parallel if
1058 decl is shared in outer parallel, otherwise
1059 each thread could store the shared variable
1060 in its own copy-in location, making the
1061 variable no longer really shared. */
1062 if (shared_ctx->is_nested)
1063 {
1064 omp_context *up;
1065
1066 for (up = shared_ctx->outer; up; up = up->outer)
1067 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
1068 break;
1069
1070 if (up)
1071 {
1072 tree c;
1073
1074 for (c = gimple_omp_taskreg_clauses (up->stmt);
1075 c; c = OMP_CLAUSE_CHAIN (c))
1076 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
1077 && OMP_CLAUSE_DECL (c) == decl)
1078 break;
1079
1080 if (c)
1081 goto maybe_mark_addressable_and_ret;
1082 }
1083 }
1084
1085 /* For tasks avoid using copy-in/out. As tasks can be
1086 deferred or executed in different thread, when GOMP_task
1087 returns, the task hasn't necessarily terminated. */
1088 if (is_task_ctx (shared_ctx))
1089 {
1090 tree outer;
1091 maybe_mark_addressable_and_ret:
1092 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
1093 if (is_gimple_reg (outer))
1094 {
1095 /* Taking address of OUTER in lower_send_shared_vars
1096 might need regimplification of everything that uses the
1097 variable. */
1098 if (!task_shared_vars)
1099 task_shared_vars = BITMAP_ALLOC (NULL);
1100 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
1101 TREE_ADDRESSABLE (outer) = 1;
1102 }
1103 return true;
1104 }
1105 }
1106
1107 return false;
1108}
1109
1110/* Construct a new automatic decl similar to VAR. */
1111
1112static tree
1113omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
1114{
1115 tree copy = copy_var_decl (var, name, type);
1116
1117 DECL_CONTEXT (copy) = current_function_decl;
1118 DECL_CHAIN (copy) = ctx->block_vars;
1119 ctx->block_vars = copy;
1120
1121 return copy;
1122}
1123
1124static tree
1125omp_copy_decl_1 (tree var, omp_context *ctx)
1126{
1127 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
1128}
1129
1130/* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
1131 as appropriate. */
1132static tree
1133omp_build_component_ref (tree obj, tree field)
1134{
1135 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
1136 if (TREE_THIS_VOLATILE (field))
1137 TREE_THIS_VOLATILE (ret) |= 1;
1138 if (TREE_READONLY (field))
1139 TREE_READONLY (ret) |= 1;
1140 return ret;
1141}
1142
1143/* Build tree nodes to access the field for VAR on the receiver side. */
1144
1145static tree
1146build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
1147{
1148 tree x, field = lookup_field (var, ctx);
1149
1150 /* If the receiver record type was remapped in the child function,
1151 remap the field into the new record type. */
1152 x = maybe_lookup_field (field, ctx);
1153 if (x != NULL)
1154 field = x;
1155
1156 x = build_simple_mem_ref (ctx->receiver_decl);
1157 x = omp_build_component_ref (x, field);
1158 if (by_ref)
1159 x = build_simple_mem_ref (x);
1160
1161 return x;
1162}
1163
1164/* Build tree nodes to access VAR in the scope outer to CTX. In the case
1165 of a parallel, this is a component reference; for workshare constructs
1166 this is some variable. */
1167
1168static tree
1169build_outer_var_ref (tree var, omp_context *ctx)
1170{
1171 tree x;
1172
1173 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
1174 x = var;
1175 else if (is_variable_sized (var))
1176 {
1177 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
1178 x = build_outer_var_ref (x, ctx);
1179 x = build_simple_mem_ref (x);
1180 }
1181 else if (is_taskreg_ctx (ctx))
1182 {
1183 bool by_ref = use_pointer_for_field (var, NULL);
1184 x = build_receiver_ref (var, by_ref, ctx);
1185 }
1186 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1187 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
1188 {
1189 /* #pragma omp simd isn't a worksharing construct, and can reference even
1190 private vars in its linear etc. clauses. */
1191 x = NULL_TREE;
1192 if (ctx->outer && is_taskreg_ctx (ctx))
1193 x = lookup_decl (var, ctx->outer);
1194 else if (ctx->outer)
1195 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
1196 if (x == NULL_TREE)
1197 x = var;
1198 }
1199 else if (ctx->outer)
1200 x = lookup_decl (var, ctx->outer);
1201 else if (is_reference (var))
1202 /* This can happen with orphaned constructs. If var is reference, it is
1203 possible it is shared and as such valid. */
1204 x = var;
1205 else
1206 gcc_unreachable ();
1207
1208 if (is_reference (var))
1209 x = build_simple_mem_ref (x);
1210
1211 return x;
1212}
1213
1214/* Build tree nodes to access the field for VAR on the sender side. */
1215
1216static tree
1217build_sender_ref (tree var, omp_context *ctx)
1218{
1219 tree field = lookup_sfield (var, ctx);
1220 return omp_build_component_ref (ctx->sender_decl, field);
1221}
1222
1223/* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1224
1225static void
1226install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1227{
1228 tree field, type, sfield = NULL_TREE;
1229
1230 gcc_assert ((mask & 1) == 0
1231 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
1232 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1233 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1234 gcc_assert ((mask & 3) == 3
1235 || !is_gimple_omp_oacc (ctx->stmt));
1236
1237 type = TREE_TYPE (var);
1238 if (mask & 4)
1239 {
1240 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1241 type = build_pointer_type (build_pointer_type (type));
1242 }
1243 else if (by_ref)
1244 type = build_pointer_type (type);
1245 else if ((mask & 3) == 1 && is_reference (var))
1246 type = TREE_TYPE (type);
1247
1248 field = build_decl (DECL_SOURCE_LOCATION (var),
1249 FIELD_DECL, DECL_NAME (var), type);
1250
1251 /* Remember what variable this field was created for. This does have a
1252 side effect of making dwarf2out ignore this member, so for helpful
1253 debugging we clear it later in delete_omp_context. */
1254 DECL_ABSTRACT_ORIGIN (field) = var;
1255 if (type == TREE_TYPE (var))
1256 {
1257 DECL_ALIGN (field) = DECL_ALIGN (var);
1258 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1259 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1260 }
1261 else
1262 DECL_ALIGN (field) = TYPE_ALIGN (type);
1263
1264 if ((mask & 3) == 3)
1265 {
1266 insert_field_into_struct (ctx->record_type, field);
1267 if (ctx->srecord_type)
1268 {
1269 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1270 FIELD_DECL, DECL_NAME (var), type);
1271 DECL_ABSTRACT_ORIGIN (sfield) = var;
1272 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1273 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1274 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1275 insert_field_into_struct (ctx->srecord_type, sfield);
1276 }
1277 }
1278 else
1279 {
1280 if (ctx->srecord_type == NULL_TREE)
1281 {
1282 tree t;
1283
1284 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1285 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1286 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1287 {
1288 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1289 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1290 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1291 insert_field_into_struct (ctx->srecord_type, sfield);
1292 splay_tree_insert (ctx->sfield_map,
1293 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1294 (splay_tree_value) sfield);
1295 }
1296 }
1297 sfield = field;
1298 insert_field_into_struct ((mask & 1) ? ctx->record_type
1299 : ctx->srecord_type, field);
1300 }
1301
1302 if (mask & 1)
1303 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1304 (splay_tree_value) field);
1305 if ((mask & 2) && ctx->sfield_map)
1306 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1307 (splay_tree_value) sfield);
1308}
1309
1310static tree
1311install_var_local (tree var, omp_context *ctx)
1312{
1313 tree new_var = omp_copy_decl_1 (var, ctx);
1314 insert_decl_map (&ctx->cb, var, new_var);
1315 return new_var;
1316}
1317
1318/* Adjust the replacement for DECL in CTX for the new context. This means
1319 copying the DECL_VALUE_EXPR, and fixing up the type. */
1320
1321static void
1322fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1323{
1324 tree new_decl, size;
1325
1326 new_decl = lookup_decl (decl, ctx);
1327
1328 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1329
1330 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1331 && DECL_HAS_VALUE_EXPR_P (decl))
1332 {
1333 tree ve = DECL_VALUE_EXPR (decl);
1334 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1335 SET_DECL_VALUE_EXPR (new_decl, ve);
1336 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1337 }
1338
1339 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1340 {
1341 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1342 if (size == error_mark_node)
1343 size = TYPE_SIZE (TREE_TYPE (new_decl));
1344 DECL_SIZE (new_decl) = size;
1345
1346 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1347 if (size == error_mark_node)
1348 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1349 DECL_SIZE_UNIT (new_decl) = size;
1350 }
1351}
1352
1353/* The callback for remap_decl. Search all containing contexts for a
1354 mapping of the variable; this avoids having to duplicate the splay
1355 tree ahead of time. We know a mapping doesn't already exist in the
1356 given context. Create new mappings to implement default semantics. */
1357
1358static tree
1359omp_copy_decl (tree var, copy_body_data *cb)
1360{
1361 omp_context *ctx = (omp_context *) cb;
1362 tree new_var;
1363
1364 if (TREE_CODE (var) == LABEL_DECL)
1365 {
1366 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1367 DECL_CONTEXT (new_var) = current_function_decl;
1368 insert_decl_map (&ctx->cb, var, new_var);
1369 return new_var;
1370 }
1371
1372 while (!is_taskreg_ctx (ctx))
1373 {
1374 ctx = ctx->outer;
1375 if (ctx == NULL)
1376 return var;
1377 new_var = maybe_lookup_decl (var, ctx);
1378 if (new_var)
1379 return new_var;
1380 }
1381
1382 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1383 return var;
1384
1385 return error_mark_node;
1386}
1387
1388
1389/* Debugging dumps for parallel regions. */
1390void dump_omp_region (FILE *, struct omp_region *, int);
1391void debug_omp_region (struct omp_region *);
1392void debug_all_omp_regions (void);
1393
1394/* Dump the parallel region tree rooted at REGION. */
1395
1396void
1397dump_omp_region (FILE *file, struct omp_region *region, int indent)
1398{
1399 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1400 gimple_code_name[region->type]);
1401
1402 if (region->inner)
1403 dump_omp_region (file, region->inner, indent + 4);
1404
1405 if (region->cont)
1406 {
1407 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1408 region->cont->index);
1409 }
1410
1411 if (region->exit)
1412 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1413 region->exit->index);
1414 else
1415 fprintf (file, "%*s[no exit marker]\n", indent, "");
1416
1417 if (region->next)
1418 dump_omp_region (file, region->next, indent);
1419}
1420
1421DEBUG_FUNCTION void
1422debug_omp_region (struct omp_region *region)
1423{
1424 dump_omp_region (stderr, region, 0);
1425}
1426
1427DEBUG_FUNCTION void
1428debug_all_omp_regions (void)
1429{
1430 dump_omp_region (stderr, root_omp_region, 0);
1431}
1432
1433
1434/* Create a new parallel region starting at STMT inside region PARENT. */
1435
1436static struct omp_region *
1437new_omp_region (basic_block bb, enum gimple_code type,
1438 struct omp_region *parent)
1439{
1440 struct omp_region *region = XCNEW (struct omp_region);
1441
1442 region->outer = parent;
1443 region->entry = bb;
1444 region->type = type;
1445
1446 if (parent)
1447 {
1448 /* This is a nested region. Add it to the list of inner
1449 regions in PARENT. */
1450 region->next = parent->inner;
1451 parent->inner = region;
1452 }
1453 else
1454 {
1455 /* This is a toplevel region. Add it to the list of toplevel
1456 regions in ROOT_OMP_REGION. */
1457 region->next = root_omp_region;
1458 root_omp_region = region;
1459 }
1460
1461 return region;
1462}
1463
1464/* Release the memory associated with the region tree rooted at REGION. */
1465
1466static void
1467free_omp_region_1 (struct omp_region *region)
1468{
1469 struct omp_region *i, *n;
1470
1471 for (i = region->inner; i ; i = n)
1472 {
1473 n = i->next;
1474 free_omp_region_1 (i);
1475 }
1476
1477 free (region);
1478}
1479
1480/* Release the memory for the entire omp region tree. */
1481
1482void
1483free_omp_regions (void)
1484{
1485 struct omp_region *r, *n;
1486 for (r = root_omp_region; r ; r = n)
1487 {
1488 n = r->next;
1489 free_omp_region_1 (r);
1490 }
1491 root_omp_region = NULL;
1492}
1493
1494
1495/* Create a new context, with OUTER_CTX being the surrounding context. */
1496
1497static omp_context *
1498new_omp_context (gimple stmt, omp_context *outer_ctx)
1499{
1500 omp_context *ctx = XCNEW (omp_context);
1501
1502 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1503 (splay_tree_value) ctx);
1504 ctx->stmt = stmt;
1505
1506 if (outer_ctx)
1507 {
1508 ctx->outer = outer_ctx;
1509 ctx->cb = outer_ctx->cb;
1510 ctx->cb.block = NULL;
1511 ctx->depth = outer_ctx->depth + 1;
1512 ctx->reduction_map = outer_ctx->reduction_map;
1513 }
1514 else
1515 {
1516 ctx->cb.src_fn = current_function_decl;
1517 ctx->cb.dst_fn = current_function_decl;
1518 ctx->cb.src_node = cgraph_node::get (current_function_decl);
1519 gcc_checking_assert (ctx->cb.src_node);
1520 ctx->cb.dst_node = ctx->cb.src_node;
1521 ctx->cb.src_cfun = cfun;
1522 ctx->cb.copy_decl = omp_copy_decl;
1523 ctx->cb.eh_lp_nr = 0;
1524 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1525 ctx->depth = 1;
1526 }
1527
1528 ctx->cb.decl_map = new hash_map<tree, tree>;
1529
1530 return ctx;
1531}
1532
1533static gimple_seq maybe_catch_exception (gimple_seq);
1534
1535/* Finalize task copyfn. */
1536
1537static void
1538finalize_task_copyfn (gomp_task *task_stmt)
1539{
1540 struct function *child_cfun;
1541 tree child_fn;
1542 gimple_seq seq = NULL, new_seq;
1543 gbind *bind;
1544
1545 child_fn = gimple_omp_task_copy_fn (task_stmt);
1546 if (child_fn == NULL_TREE)
1547 return;
1548
1549 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1550 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1551
1552 push_cfun (child_cfun);
1553 bind = gimplify_body (child_fn, false);
1554 gimple_seq_add_stmt (&seq, bind);
1555 new_seq = maybe_catch_exception (seq);
1556 if (new_seq != seq)
1557 {
1558 bind = gimple_build_bind (NULL, new_seq, NULL);
1559 seq = NULL;
1560 gimple_seq_add_stmt (&seq, bind);
1561 }
1562 gimple_set_body (child_fn, seq);
1563 pop_cfun ();
1564
1565 /* Inform the callgraph about the new function. */
1566 cgraph_node::add_new_function (child_fn, false);
1567}
1568
1569/* Destroy a omp_context data structures. Called through the splay tree
1570 value delete callback. */
1571
1572static void
1573delete_omp_context (splay_tree_value value)
1574{
1575 omp_context *ctx = (omp_context *) value;
1576
1577 delete ctx->cb.decl_map;
1578
1579 if (ctx->field_map)
1580 splay_tree_delete (ctx->field_map);
1581 if (ctx->sfield_map)
1582 splay_tree_delete (ctx->sfield_map);
1583 if (ctx->reduction_map
1584 /* Shared over several omp_contexts. */
1585 && (ctx->outer == NULL
1586 || ctx->reduction_map != ctx->outer->reduction_map))
1587 splay_tree_delete (ctx->reduction_map);
1588
1589 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1590 it produces corrupt debug information. */
1591 if (ctx->record_type)
1592 {
1593 tree t;
1594 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1595 DECL_ABSTRACT_ORIGIN (t) = NULL;
1596 }
1597 if (ctx->srecord_type)
1598 {
1599 tree t;
1600 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1601 DECL_ABSTRACT_ORIGIN (t) = NULL;
1602 }
1603
1604 if (is_task_ctx (ctx))
1605 finalize_task_copyfn (as_a <gomp_task *> (ctx->stmt));
1606
1607 XDELETE (ctx);
1608}
1609
1610/* Fix up RECEIVER_DECL with a type that has been remapped to the child
1611 context. */
1612
1613static void
1614fixup_child_record_type (omp_context *ctx)
1615{
1616 tree f, type = ctx->record_type;
1617
1618 /* ??? It isn't sufficient to just call remap_type here, because
1619 variably_modified_type_p doesn't work the way we expect for
1620 record types. Testing each field for whether it needs remapping
1621 and creating a new record by hand works, however. */
1622 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1623 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1624 break;
1625 if (f)
1626 {
1627 tree name, new_fields = NULL;
1628
1629 type = lang_hooks.types.make_type (RECORD_TYPE);
1630 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1631 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1632 TYPE_DECL, name, type);
1633 TYPE_NAME (type) = name;
1634
1635 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1636 {
1637 tree new_f = copy_node (f);
1638 DECL_CONTEXT (new_f) = type;
1639 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1640 DECL_CHAIN (new_f) = new_fields;
1641 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1642 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1643 &ctx->cb, NULL);
1644 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1645 &ctx->cb, NULL);
1646 new_fields = new_f;
1647
1648 /* Arrange to be able to look up the receiver field
1649 given the sender field. */
1650 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1651 (splay_tree_value) new_f);
1652 }
1653 TYPE_FIELDS (type) = nreverse (new_fields);
1654 layout_type (type);
1655 }
1656
1657 TREE_TYPE (ctx->receiver_decl)
1658 = build_qualified_type (build_reference_type (type), TYPE_QUAL_RESTRICT);
1659}
1660
1661/* Instantiate decls as necessary in CTX to satisfy the data sharing
1662 specified by CLAUSES. */
1663
1664static void
1665scan_sharing_clauses (tree clauses, omp_context *ctx)
1666{
1667 tree c, decl;
1668 bool scan_array_reductions = false;
1669
1670 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1671 {
1672 bool by_ref;
1673
1674 switch (OMP_CLAUSE_CODE (c))
1675 {
1676 case OMP_CLAUSE_PRIVATE:
1677 decl = OMP_CLAUSE_DECL (c);
1678 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1679 goto do_private;
1680 else if (!is_variable_sized (decl))
1681 install_var_local (decl, ctx);
1682 break;
1683
1684 case OMP_CLAUSE_SHARED:
1685 decl = OMP_CLAUSE_DECL (c);
1686 /* Ignore shared directives in teams construct. */
1687 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1688 {
1689 /* Global variables don't need to be copied,
1690 the receiver side will use them directly. */
1691 tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
1692 if (is_global_var (odecl))
1693 break;
1694 insert_decl_map (&ctx->cb, decl, odecl);
1695 break;
1696 }
1697 gcc_assert (is_taskreg_ctx (ctx));
1698 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1699 || !is_variable_sized (decl));
1700 /* Global variables don't need to be copied,
1701 the receiver side will use them directly. */
1702 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1703 break;
1704 by_ref = use_pointer_for_field (decl, ctx);
1705 if (! TREE_READONLY (decl)
1706 || TREE_ADDRESSABLE (decl)
1707 || by_ref
1708 || is_reference (decl))
1709 {
1710 install_var_field (decl, by_ref, 3, ctx);
1711 install_var_local (decl, ctx);
1712 break;
1713 }
1714 /* We don't need to copy const scalar vars back. */
1715 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1716 goto do_private;
1717
1718 case OMP_CLAUSE_LASTPRIVATE:
1719 /* Let the corresponding firstprivate clause create
1720 the variable. */
1721 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1722 break;
1723 /* FALLTHRU */
1724
1725 case OMP_CLAUSE_FIRSTPRIVATE:
1726 if (is_gimple_omp_oacc (ctx->stmt))
1727 {
1728 sorry ("clause not supported yet");
1729 break;
1730 }
1731 /* FALLTHRU */
1732 case OMP_CLAUSE_REDUCTION:
1733 case OMP_CLAUSE_LINEAR:
1734 decl = OMP_CLAUSE_DECL (c);
1735 do_private:
1736 if (is_variable_sized (decl))
1737 {
1738 if (is_task_ctx (ctx))
1739 install_var_field (decl, false, 1, ctx);
1740 break;
1741 }
1742 else if (is_taskreg_ctx (ctx))
1743 {
1744 bool global
1745 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1746 by_ref = use_pointer_for_field (decl, NULL);
1747
1748 if (is_task_ctx (ctx)
1749 && (global || by_ref || is_reference (decl)))
1750 {
1751 install_var_field (decl, false, 1, ctx);
1752 if (!global)
1753 install_var_field (decl, by_ref, 2, ctx);
1754 }
1755 else if (!global)
1756 install_var_field (decl, by_ref, 3, ctx);
1757 }
1758 install_var_local (decl, ctx);
1759 if (is_gimple_omp_oacc (ctx->stmt)
1760 && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
1761 {
1762 /* Create a decl for the reduction array. */
1763 tree var = OMP_CLAUSE_DECL (c);
1764 tree type = get_base_type (var);
1765 tree ptype = build_pointer_type (type);
1766 tree array = create_tmp_var (ptype,
1767 oacc_get_reduction_array_id (var));
1768 omp_context *c = (ctx->field_map ? ctx : ctx->outer);
1769 install_var_field (array, true, 3, c);
1770 install_var_local (array, c);
1771
1772 /* Insert it into the current context. */
1773 splay_tree_insert (ctx->reduction_map, (splay_tree_key)
1774 oacc_get_reduction_array_id (var),
1775 (splay_tree_value) array);
1776 splay_tree_insert (ctx->reduction_map,
1777 (splay_tree_key) array,
1778 (splay_tree_value) array);
1779 }
1780 break;
1781
1782 case OMP_CLAUSE__LOOPTEMP_:
1783 gcc_assert (is_parallel_ctx (ctx));
1784 decl = OMP_CLAUSE_DECL (c);
1785 install_var_field (decl, false, 3, ctx);
1786 install_var_local (decl, ctx);
1787 break;
1788
1789 case OMP_CLAUSE_COPYPRIVATE:
1790 case OMP_CLAUSE_COPYIN:
1791 decl = OMP_CLAUSE_DECL (c);
1792 by_ref = use_pointer_for_field (decl, NULL);
1793 install_var_field (decl, by_ref, 3, ctx);
1794 break;
1795
1796 case OMP_CLAUSE_DEFAULT:
1797 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1798 break;
1799
1800 case OMP_CLAUSE_FINAL:
1801 case OMP_CLAUSE_IF:
1802 case OMP_CLAUSE_NUM_THREADS:
1803 case OMP_CLAUSE_NUM_TEAMS:
1804 case OMP_CLAUSE_THREAD_LIMIT:
1805 case OMP_CLAUSE_DEVICE:
1806 case OMP_CLAUSE_SCHEDULE:
1807 case OMP_CLAUSE_DIST_SCHEDULE:
1808 case OMP_CLAUSE_DEPEND:
1809 case OMP_CLAUSE__CILK_FOR_COUNT_:
1810 case OMP_CLAUSE_NUM_GANGS:
1811 case OMP_CLAUSE_NUM_WORKERS:
1812 case OMP_CLAUSE_VECTOR_LENGTH:
1813 if (ctx->outer)
1814 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1815 break;
1816
1817 case OMP_CLAUSE_TO:
1818 case OMP_CLAUSE_FROM:
1819 case OMP_CLAUSE_MAP:
1820 if (ctx->outer)
1821 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
1822 decl = OMP_CLAUSE_DECL (c);
1823 /* Global variables with "omp declare target" attribute
1824 don't need to be copied, the receiver side will use them
1825 directly. */
1826 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1827 && DECL_P (decl)
1828 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1829 && varpool_node::get_create (decl)->offloadable)
1830 break;
1831 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1832 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER)
1833 {
1834 /* Ignore GOMP_MAP_POINTER kind for arrays in regions that are
1835 not offloaded; there is nothing to map for those. */
1836 if (!is_gimple_omp_offloaded (ctx->stmt)
1837 && !POINTER_TYPE_P (TREE_TYPE (decl))
1838 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
1839 break;
1840 }
1841 if (DECL_P (decl))
1842 {
1843 if (DECL_SIZE (decl)
1844 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1845 {
1846 tree decl2 = DECL_VALUE_EXPR (decl);
1847 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1848 decl2 = TREE_OPERAND (decl2, 0);
1849 gcc_assert (DECL_P (decl2));
1850 install_var_field (decl2, true, 3, ctx);
1851 install_var_local (decl2, ctx);
1852 install_var_local (decl, ctx);
1853 }
1854 else
1855 {
1856 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1857 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
1858 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
1859 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1860 install_var_field (decl, true, 7, ctx);
1861 else
1862 install_var_field (decl, true, 3, ctx);
1863 if (is_gimple_omp_offloaded (ctx->stmt))
1864 install_var_local (decl, ctx);
1865 }
1866 }
1867 else
1868 {
1869 tree base = get_base_address (decl);
1870 tree nc = OMP_CLAUSE_CHAIN (c);
1871 if (DECL_P (base)
1872 && nc != NULL_TREE
1873 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
1874 && OMP_CLAUSE_DECL (nc) == base
1875 && OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_POINTER
1876 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
1877 {
1878 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
1879 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
1880 }
1881 else
1882 {
1883 if (ctx->outer)
1884 {
1885 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1886 decl = OMP_CLAUSE_DECL (c);
1887 }
1888 gcc_assert (!splay_tree_lookup (ctx->field_map,
1889 (splay_tree_key) decl));
1890 tree field
1891 = build_decl (OMP_CLAUSE_LOCATION (c),
1892 FIELD_DECL, NULL_TREE, ptr_type_node);
1893 DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
1894 insert_field_into_struct (ctx->record_type, field);
1895 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
1896 (splay_tree_value) field);
1897 }
1898 }
1899 break;
1900
1901 case OMP_CLAUSE_NOWAIT:
1902 case OMP_CLAUSE_ORDERED:
1903 case OMP_CLAUSE_COLLAPSE:
1904 case OMP_CLAUSE_UNTIED:
1905 case OMP_CLAUSE_MERGEABLE:
1906 case OMP_CLAUSE_PROC_BIND:
1907 case OMP_CLAUSE_SAFELEN:
1908 case OMP_CLAUSE_ASYNC:
1909 case OMP_CLAUSE_WAIT:
1910 case OMP_CLAUSE_GANG:
1911 case OMP_CLAUSE_WORKER:
1912 case OMP_CLAUSE_VECTOR:
1913 break;
1914
1915 case OMP_CLAUSE_ALIGNED:
1916 decl = OMP_CLAUSE_DECL (c);
1917 if (is_global_var (decl)
1918 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1919 install_var_local (decl, ctx);
1920 break;
1921
1922 case OMP_CLAUSE_DEVICE_RESIDENT:
1923 case OMP_CLAUSE_USE_DEVICE:
1924 case OMP_CLAUSE__CACHE_:
1925 case OMP_CLAUSE_INDEPENDENT:
1926 case OMP_CLAUSE_AUTO:
1927 case OMP_CLAUSE_SEQ:
1928 sorry ("Clause not supported yet");
1929 break;
1930
1931 default:
1932 gcc_unreachable ();
1933 }
1934 }
1935
1936 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1937 {
1938 switch (OMP_CLAUSE_CODE (c))
1939 {
1940 case OMP_CLAUSE_LASTPRIVATE:
1941 /* Let the corresponding firstprivate clause create
1942 the variable. */
1943 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1944 scan_array_reductions = true;
1945 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1946 break;
1947 /* FALLTHRU */
1948
1949 case OMP_CLAUSE_FIRSTPRIVATE:
1950 if (is_gimple_omp_oacc (ctx->stmt))
1951 {
1952 sorry ("clause not supported yet");
1953 break;
1954 }
1955 /* FALLTHRU */
1956 case OMP_CLAUSE_PRIVATE:
1957 case OMP_CLAUSE_REDUCTION:
1958 case OMP_CLAUSE_LINEAR:
1959 decl = OMP_CLAUSE_DECL (c);
1960 if (is_variable_sized (decl))
1961 install_var_local (decl, ctx);
1962 fixup_remapped_decl (decl, ctx,
1963 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1964 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1965 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1966 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1967 scan_array_reductions = true;
1968 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1969 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1970 scan_array_reductions = true;
1971 break;
1972
1973 case OMP_CLAUSE_SHARED:
1974 /* Ignore shared directives in teams construct. */
1975 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1976 break;
1977 decl = OMP_CLAUSE_DECL (c);
1978 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1979 fixup_remapped_decl (decl, ctx, false);
1980 break;
1981
1982 case OMP_CLAUSE_MAP:
1983 if (!is_gimple_omp_offloaded (ctx->stmt))
1984 break;
1985 decl = OMP_CLAUSE_DECL (c);
1986 if (DECL_P (decl)
1987 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1988 && varpool_node::get_create (decl)->offloadable)
1989 break;
1990 if (DECL_P (decl))
1991 {
1992 if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
1993 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
1994 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
1995 {
1996 tree new_decl = lookup_decl (decl, ctx);
1997 TREE_TYPE (new_decl)
1998 = remap_type (TREE_TYPE (decl), &ctx->cb);
1999 }
2000 else if (DECL_SIZE (decl)
2001 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
2002 {
2003 tree decl2 = DECL_VALUE_EXPR (decl);
2004 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
2005 decl2 = TREE_OPERAND (decl2, 0);
2006 gcc_assert (DECL_P (decl2));
2007 fixup_remapped_decl (decl2, ctx, false);
2008 fixup_remapped_decl (decl, ctx, true);
2009 }
2010 else
2011 fixup_remapped_decl (decl, ctx, false);
2012 }
2013 break;
2014
2015 case OMP_CLAUSE_COPYPRIVATE:
2016 case OMP_CLAUSE_COPYIN:
2017 case OMP_CLAUSE_DEFAULT:
2018 case OMP_CLAUSE_IF:
2019 case OMP_CLAUSE_NUM_THREADS:
2020 case OMP_CLAUSE_NUM_TEAMS:
2021 case OMP_CLAUSE_THREAD_LIMIT:
2022 case OMP_CLAUSE_DEVICE:
2023 case OMP_CLAUSE_SCHEDULE:
2024 case OMP_CLAUSE_DIST_SCHEDULE:
2025 case OMP_CLAUSE_NOWAIT:
2026 case OMP_CLAUSE_ORDERED:
2027 case OMP_CLAUSE_COLLAPSE:
2028 case OMP_CLAUSE_UNTIED:
2029 case OMP_CLAUSE_FINAL:
2030 case OMP_CLAUSE_MERGEABLE:
2031 case OMP_CLAUSE_PROC_BIND:
2032 case OMP_CLAUSE_SAFELEN:
2033 case OMP_CLAUSE_ALIGNED:
2034 case OMP_CLAUSE_DEPEND:
2035 case OMP_CLAUSE__LOOPTEMP_:
2036 case OMP_CLAUSE_TO:
2037 case OMP_CLAUSE_FROM:
2038 case OMP_CLAUSE__CILK_FOR_COUNT_:
2039 case OMP_CLAUSE_ASYNC:
2040 case OMP_CLAUSE_WAIT:
2041 case OMP_CLAUSE_NUM_GANGS:
2042 case OMP_CLAUSE_NUM_WORKERS:
2043 case OMP_CLAUSE_VECTOR_LENGTH:
2044 case OMP_CLAUSE_GANG:
2045 case OMP_CLAUSE_WORKER:
2046 case OMP_CLAUSE_VECTOR:
2047 break;
2048
2049 case OMP_CLAUSE_DEVICE_RESIDENT:
2050 case OMP_CLAUSE_USE_DEVICE:
2051 case OMP_CLAUSE__CACHE_:
2052 case OMP_CLAUSE_INDEPENDENT:
2053 case OMP_CLAUSE_AUTO:
2054 case OMP_CLAUSE_SEQ:
2055 sorry ("Clause not supported yet");
2056 break;
2057
2058 default:
2059 gcc_unreachable ();
2060 }
2061 }
2062
2063 gcc_checking_assert (!scan_array_reductions
2064 || !is_gimple_omp_oacc (ctx->stmt));
2065 if (scan_array_reductions)
2066 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2067 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
2068 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2069 {
2070 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2071 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2072 }
2073 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
2074 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2075 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2076 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2077 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
2078 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
2079}
2080
2081/* Create a new name for omp child function. Returns an identifier. If
2082 IS_CILK_FOR is true then the suffix for the child function is
2083 "_cilk_for_fn." */
2084
2085static tree
2086create_omp_child_function_name (bool task_copy, bool is_cilk_for)
2087{
2088 if (is_cilk_for)
2089 return clone_function_name (current_function_decl, "_cilk_for_fn");
2090 return clone_function_name (current_function_decl,
2091 task_copy ? "_omp_cpyfn" : "_omp_fn");
2092}
2093
2094/* Returns the type of the induction variable for the child function for
2095 _Cilk_for and the types for _high and _low variables based on TYPE. */
2096
2097static tree
2098cilk_for_check_loop_diff_type (tree type)
2099{
2100 if (TYPE_PRECISION (type) <= TYPE_PRECISION (uint32_type_node))
2101 {
2102 if (TYPE_UNSIGNED (type))
2103 return uint32_type_node;
2104 else
2105 return integer_type_node;
2106 }
2107 else
2108 {
2109 if (TYPE_UNSIGNED (type))
2110 return uint64_type_node;
2111 else
2112 return long_long_integer_type_node;
2113 }
2114}
2115
2116/* Build a decl for the omp child function. It'll not contain a body
2117 yet, just the bare decl. */
2118
2119static void
2120create_omp_child_function (omp_context *ctx, bool task_copy)
2121{
2122 tree decl, type, name, t;
2123
2124 tree cilk_for_count
2125 = (flag_cilkplus && gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2126 ? find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2127 OMP_CLAUSE__CILK_FOR_COUNT_) : NULL_TREE;
2128 tree cilk_var_type = NULL_TREE;
2129
2130 name = create_omp_child_function_name (task_copy,
2131 cilk_for_count != NULL_TREE);
2132 if (task_copy)
2133 type = build_function_type_list (void_type_node, ptr_type_node,
2134 ptr_type_node, NULL_TREE);
2135 else if (cilk_for_count)
2136 {
2137 type = TREE_TYPE (OMP_CLAUSE_OPERAND (cilk_for_count, 0));
2138 cilk_var_type = cilk_for_check_loop_diff_type (type);
2139 type = build_function_type_list (void_type_node, ptr_type_node,
2140 cilk_var_type, cilk_var_type, NULL_TREE);
2141 }
2142 else
2143 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
2144
2145 decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
2146
2147 gcc_checking_assert (!is_gimple_omp_oacc (ctx->stmt)
2148 || !task_copy);
2149 if (!task_copy)
2150 ctx->cb.dst_fn = decl;
2151 else
2152 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
2153
2154 TREE_STATIC (decl) = 1;
2155 TREE_USED (decl) = 1;
2156 DECL_ARTIFICIAL (decl) = 1;
2157 DECL_IGNORED_P (decl) = 0;
2158 TREE_PUBLIC (decl) = 0;
2159 DECL_UNINLINABLE (decl) = 1;
2160 DECL_EXTERNAL (decl) = 0;
2161 DECL_CONTEXT (decl) = NULL_TREE;
2162 DECL_INITIAL (decl) = make_node (BLOCK);
2163 if (cgraph_node::get (current_function_decl)->offloadable)
2164 cgraph_node::get_create (decl)->offloadable = 1;
2165 else
2166 {
2167 omp_context *octx;
2168 for (octx = ctx; octx; octx = octx->outer)
2169 if (is_gimple_omp_offloaded (octx->stmt))
2170 {
2171 cgraph_node::get_create (decl)->offloadable = 1;
2172#ifdef ENABLE_OFFLOADING
2173 g->have_offload = true;
2174#endif
2175 break;
2176 }
2177 }
2178
2179 t = build_decl (DECL_SOURCE_LOCATION (decl),
2180 RESULT_DECL, NULL_TREE, void_type_node);
2181 DECL_ARTIFICIAL (t) = 1;
2182 DECL_IGNORED_P (t) = 1;
2183 DECL_CONTEXT (t) = decl;
2184 DECL_RESULT (decl) = t;
2185
2186 /* _Cilk_for's child function requires two extra parameters called
2187 __low and __high that are set the by Cilk runtime when it calls this
2188 function. */
2189 if (cilk_for_count)
2190 {
2191 t = build_decl (DECL_SOURCE_LOCATION (decl),
2192 PARM_DECL, get_identifier ("__high"), cilk_var_type);
2193 DECL_ARTIFICIAL (t) = 1;
2194 DECL_NAMELESS (t) = 1;
2195 DECL_ARG_TYPE (t) = ptr_type_node;
2196 DECL_CONTEXT (t) = current_function_decl;
2197 TREE_USED (t) = 1;
2198 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2199 DECL_ARGUMENTS (decl) = t;
2200
2201 t = build_decl (DECL_SOURCE_LOCATION (decl),
2202 PARM_DECL, get_identifier ("__low"), cilk_var_type);
2203 DECL_ARTIFICIAL (t) = 1;
2204 DECL_NAMELESS (t) = 1;
2205 DECL_ARG_TYPE (t) = ptr_type_node;
2206 DECL_CONTEXT (t) = current_function_decl;
2207 TREE_USED (t) = 1;
2208 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2209 DECL_ARGUMENTS (decl) = t;
2210 }
2211
2212 tree data_name = get_identifier (".omp_data_i");
2213 t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
2214 ptr_type_node);
2215 DECL_ARTIFICIAL (t) = 1;
2216 DECL_NAMELESS (t) = 1;
2217 DECL_ARG_TYPE (t) = ptr_type_node;
2218 DECL_CONTEXT (t) = current_function_decl;
2219 TREE_USED (t) = 1;
2220 if (cilk_for_count)
2221 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2222 DECL_ARGUMENTS (decl) = t;
2223 if (!task_copy)
2224 ctx->receiver_decl = t;
2225 else
2226 {
2227 t = build_decl (DECL_SOURCE_LOCATION (decl),
2228 PARM_DECL, get_identifier (".omp_data_o"),
2229 ptr_type_node);
2230 DECL_ARTIFICIAL (t) = 1;
2231 DECL_NAMELESS (t) = 1;
2232 DECL_ARG_TYPE (t) = ptr_type_node;
2233 DECL_CONTEXT (t) = current_function_decl;
2234 TREE_USED (t) = 1;
2235 TREE_ADDRESSABLE (t) = 1;
2236 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
2237 DECL_ARGUMENTS (decl) = t;
2238 }
2239
2240 /* Allocate memory for the function structure. The call to
2241 allocate_struct_function clobbers CFUN, so we need to restore
2242 it afterward. */
2243 push_struct_function (decl);
2244 cfun->function_end_locus = gimple_location (ctx->stmt);
2245 pop_cfun ();
2246}
2247
2248/* Callback for walk_gimple_seq. Check if combined parallel
2249 contains gimple_omp_for_combined_into_p OMP_FOR. */
2250
2251static tree
2252find_combined_for (gimple_stmt_iterator *gsi_p,
2253 bool *handled_ops_p,
2254 struct walk_stmt_info *wi)
2255{
2256 gimple stmt = gsi_stmt (*gsi_p);
2257
2258 *handled_ops_p = true;
2259 switch (gimple_code (stmt))
2260 {
2261 WALK_SUBSTMTS;
2262
2263 case GIMPLE_OMP_FOR:
2264 if (gimple_omp_for_combined_into_p (stmt)
2265 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
2266 {
2267 wi->info = stmt;
2268 return integer_zero_node;
2269 }
2270 break;
2271 default:
2272 break;
2273 }
2274 return NULL;
2275}
2276
2277/* Scan an OpenMP parallel directive. */
2278
2279static void
2280scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2281{
2282 omp_context *ctx;
2283 tree name;
2284 gomp_parallel *stmt = as_a <gomp_parallel *> (gsi_stmt (*gsi));
2285
2286 /* Ignore parallel directives with empty bodies, unless there
2287 are copyin clauses. */
2288 if (optimize > 0
2289 && empty_body_p (gimple_omp_body (stmt))
2290 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
2291 OMP_CLAUSE_COPYIN) == NULL)
2292 {
2293 gsi_replace (gsi, gimple_build_nop (), false);
2294 return;
2295 }
2296
2297 if (gimple_omp_parallel_combined_p (stmt))
2298 {
2299 struct walk_stmt_info wi;
2300
2301 memset (&wi, 0, sizeof (wi));
2302 wi.val_only = true;
2303 walk_gimple_seq (gimple_omp_body (stmt),
2304 find_combined_for, NULL, &wi);
2305 if (wi.info)
2306 {
2307 gomp_for *for_stmt = as_a <gomp_for *> ((gimple) wi.info);
2308 struct omp_for_data fd;
2309 extract_omp_for_data (for_stmt, &fd, NULL);
2310 /* We need two temporaries with fd.loop.v type (istart/iend)
2311 and then (fd.collapse - 1) temporaries with the same
2312 type for count2 ... countN-1 vars if not constant. */
2313 size_t count = 2, i;
2314 tree type = fd.iter_type;
2315 if (fd.collapse > 1
2316 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
2317 count += fd.collapse - 1;
2318 for (i = 0; i < count; i++)
2319 {
2320 tree temp = create_tmp_var (type);
2321 tree c = build_omp_clause (UNKNOWN_LOCATION,
2322 OMP_CLAUSE__LOOPTEMP_);
2323 insert_decl_map (&outer_ctx->cb, temp, temp);
2324 OMP_CLAUSE_DECL (c) = temp;
2325 OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
2326 gimple_omp_parallel_set_clauses (stmt, c);
2327 }
2328 }
2329 }
2330
2331 ctx = new_omp_context (stmt, outer_ctx);
2332 taskreg_contexts.safe_push (ctx);
2333 if (taskreg_nesting_level > 1)
2334 ctx->is_nested = true;
2335 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2336 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2337 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2338 name = create_tmp_var_name (".omp_data_s");
2339 name = build_decl (gimple_location (stmt),
2340 TYPE_DECL, name, ctx->record_type);
2341 DECL_ARTIFICIAL (name) = 1;
2342 DECL_NAMELESS (name) = 1;
2343 TYPE_NAME (ctx->record_type) = name;
2344 create_omp_child_function (ctx, false);
2345 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
2346
2347 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
2348 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2349
2350 if (TYPE_FIELDS (ctx->record_type) == NULL)
2351 ctx->record_type = ctx->receiver_decl = NULL;
2352}
2353
2354/* Scan an OpenMP task directive. */
2355
2356static void
2357scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2358{
2359 omp_context *ctx;
2360 tree name, t;
2361 gomp_task *stmt = as_a <gomp_task *> (gsi_stmt (*gsi));
2362
2363 /* Ignore task directives with empty bodies. */
2364 if (optimize > 0
2365 && empty_body_p (gimple_omp_body (stmt)))
2366 {
2367 gsi_replace (gsi, gimple_build_nop (), false);
2368 return;
2369 }
2370
2371 ctx = new_omp_context (stmt, outer_ctx);
2372 taskreg_contexts.safe_push (ctx);
2373 if (taskreg_nesting_level > 1)
2374 ctx->is_nested = true;
2375 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2376 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2377 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2378 name = create_tmp_var_name (".omp_data_s");
2379 name = build_decl (gimple_location (stmt),
2380 TYPE_DECL, name, ctx->record_type);
2381 DECL_ARTIFICIAL (name) = 1;
2382 DECL_NAMELESS (name) = 1;
2383 TYPE_NAME (ctx->record_type) = name;
2384 create_omp_child_function (ctx, false);
2385 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
2386
2387 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
2388
2389 if (ctx->srecord_type)
2390 {
2391 name = create_tmp_var_name (".omp_data_a");
2392 name = build_decl (gimple_location (stmt),
2393 TYPE_DECL, name, ctx->srecord_type);
2394 DECL_ARTIFICIAL (name) = 1;
2395 DECL_NAMELESS (name) = 1;
2396 TYPE_NAME (ctx->srecord_type) = name;
2397 create_omp_child_function (ctx, true);
2398 }
2399
2400 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2401
2402 if (TYPE_FIELDS (ctx->record_type) == NULL)
2403 {
2404 ctx->record_type = ctx->receiver_decl = NULL;
2405 t = build_int_cst (long_integer_type_node, 0);
2406 gimple_omp_task_set_arg_size (stmt, t);
2407 t = build_int_cst (long_integer_type_node, 1);
2408 gimple_omp_task_set_arg_align (stmt, t);
2409 }
2410}
2411
2412
2413/* If any decls have been made addressable during scan_omp,
2414 adjust their fields if needed, and layout record types
2415 of parallel/task constructs. */
2416
2417static void
2418finish_taskreg_scan (omp_context *ctx)
2419{
2420 if (ctx->record_type == NULL_TREE)
2421 return;
2422
2423 /* If any task_shared_vars were needed, verify all
2424 OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
2425 statements if use_pointer_for_field hasn't changed
2426 because of that. If it did, update field types now. */
2427 if (task_shared_vars)
2428 {
2429 tree c;
2430
2431 for (c = gimple_omp_taskreg_clauses (ctx->stmt);
2432 c; c = OMP_CLAUSE_CHAIN (c))
2433 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
2434 {
2435 tree decl = OMP_CLAUSE_DECL (c);
2436
2437 /* Global variables don't need to be copied,
2438 the receiver side will use them directly. */
2439 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
2440 continue;
2441 if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
2442 || !use_pointer_for_field (decl, ctx))
2443 continue;
2444 tree field = lookup_field (decl, ctx);
2445 if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
2446 && TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
2447 continue;
2448 TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
2449 TREE_THIS_VOLATILE (field) = 0;
2450 DECL_USER_ALIGN (field) = 0;
2451 DECL_ALIGN (field) = TYPE_ALIGN (TREE_TYPE (field));
2452 if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
2453 TYPE_ALIGN (ctx->record_type) = DECL_ALIGN (field);
2454 if (ctx->srecord_type)
2455 {
2456 tree sfield = lookup_sfield (decl, ctx);
2457 TREE_TYPE (sfield) = TREE_TYPE (field);
2458 TREE_THIS_VOLATILE (sfield) = 0;
2459 DECL_USER_ALIGN (sfield) = 0;
2460 DECL_ALIGN (sfield) = DECL_ALIGN (field);
2461 if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
2462 TYPE_ALIGN (ctx->srecord_type) = DECL_ALIGN (sfield);
2463 }
2464 }
2465 }
2466
2467 if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
2468 {
2469 layout_type (ctx->record_type);
2470 fixup_child_record_type (ctx);
2471 }
2472 else
2473 {
2474 location_t loc = gimple_location (ctx->stmt);
2475 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2476 /* Move VLA fields to the end. */
2477 p = &TYPE_FIELDS (ctx->record_type);
2478 while (*p)
2479 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2480 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2481 {
2482 *q = *p;
2483 *p = TREE_CHAIN (*p);
2484 TREE_CHAIN (*q) = NULL_TREE;
2485 q = &TREE_CHAIN (*q);
2486 }
2487 else
2488 p = &DECL_CHAIN (*p);
2489 *p = vla_fields;
2490 layout_type (ctx->record_type);
2491 fixup_child_record_type (ctx);
2492 if (ctx->srecord_type)
2493 layout_type (ctx->srecord_type);
2494 tree t = fold_convert_loc (loc, long_integer_type_node,
2495 TYPE_SIZE_UNIT (ctx->record_type));
2496 gimple_omp_task_set_arg_size (ctx->stmt, t);
2497 t = build_int_cst (long_integer_type_node,
2498 TYPE_ALIGN_UNIT (ctx->record_type));
2499 gimple_omp_task_set_arg_align (ctx->stmt, t);
2500 }
2501}
2502
2503
2504static omp_context *
2505enclosing_target_ctx (omp_context *ctx)
2506{
2507 while (ctx != NULL
2508 && gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET)
2509 ctx = ctx->outer;
2510 gcc_assert (ctx != NULL);
2511 return ctx;
2512}
2513
2514static bool
2515oacc_loop_or_target_p (gimple stmt)
2516{
2517 enum gimple_code outer_type = gimple_code (stmt);
2518 return ((outer_type == GIMPLE_OMP_TARGET
2519 && ((gimple_omp_target_kind (stmt)
2520 == GF_OMP_TARGET_KIND_OACC_PARALLEL)
2521 || (gimple_omp_target_kind (stmt)
2522 == GF_OMP_TARGET_KIND_OACC_KERNELS)))
2523 || (outer_type == GIMPLE_OMP_FOR
2524 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_OACC_LOOP));
2525}
2526
2527/* Scan a GIMPLE_OMP_FOR. */
2528
2529static void
2530scan_omp_for (gomp_for *stmt, omp_context *outer_ctx)
2531{
2532 enum gimple_code outer_type = GIMPLE_ERROR_MARK;
2533 omp_context *ctx;
2534 size_t i;
2535 tree clauses = gimple_omp_for_clauses (stmt);
2536
2537 if (outer_ctx)
2538 outer_type = gimple_code (outer_ctx->stmt);
2539
2540 ctx = new_omp_context (stmt, outer_ctx);
2541
2542 if (is_gimple_omp_oacc (stmt))
2543 {
2544 if (outer_ctx && outer_type == GIMPLE_OMP_FOR)
2545 ctx->gwv_this = outer_ctx->gwv_this;
2546 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2547 {
2548 int val;
2549 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_GANG)
2550 val = MASK_GANG;
2551 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WORKER)
2552 val = MASK_WORKER;
2553 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_VECTOR)
2554 val = MASK_VECTOR;
2555 else
2556 continue;
2557 ctx->gwv_this |= val;
2558 if (!outer_ctx)
2559 {
2560 /* Skip; not nested inside a region. */
2561 continue;
2562 }
2563 if (!oacc_loop_or_target_p (outer_ctx->stmt))
2564 {
2565 /* Skip; not nested inside an OpenACC region. */
2566 continue;
2567 }
2568 if (outer_type == GIMPLE_OMP_FOR)
2569 outer_ctx->gwv_below |= val;
2570 if (OMP_CLAUSE_OPERAND (c, 0) != NULL_TREE)
2571 {
2572 omp_context *enclosing = enclosing_target_ctx (outer_ctx);
2573 if (gimple_omp_target_kind (enclosing->stmt)
2574 == GF_OMP_TARGET_KIND_OACC_PARALLEL)
2575 error_at (gimple_location (stmt),
2576 "no arguments allowed to gang, worker and vector clauses inside parallel");
2577 }
2578 }
2579 }
2580
2581 scan_sharing_clauses (clauses, ctx);
2582
2583 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
2584 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
2585 {
2586 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
2587 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
2588 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
2589 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
2590 }
2591 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2592
2593 if (is_gimple_omp_oacc (stmt))
2594 {
2595 if (ctx->gwv_this & ctx->gwv_below)
2596 error_at (gimple_location (stmt),
2597 "gang, worker and vector may occur only once in a loop nest");
2598 else if (ctx->gwv_below != 0
2599 && ctx->gwv_this > ctx->gwv_below)
2600 error_at (gimple_location (stmt),
2601 "gang, worker and vector must occur in this order in a loop nest");
2602 if (outer_ctx && outer_type == GIMPLE_OMP_FOR)
2603 outer_ctx->gwv_below |= ctx->gwv_below;
2604 }
2605}
2606
2607/* Scan an OpenMP sections directive. */
2608
2609static void
2610scan_omp_sections (gomp_sections *stmt, omp_context *outer_ctx)
2611{
2612 omp_context *ctx;
2613
2614 ctx = new_omp_context (stmt, outer_ctx);
2615 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
2616 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2617}
2618
2619/* Scan an OpenMP single directive. */
2620
2621static void
2622scan_omp_single (gomp_single *stmt, omp_context *outer_ctx)
2623{
2624 omp_context *ctx;
2625 tree name;
2626
2627 ctx = new_omp_context (stmt, outer_ctx);
2628 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2629 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2630 name = create_tmp_var_name (".omp_copy_s");
2631 name = build_decl (gimple_location (stmt),
2632 TYPE_DECL, name, ctx->record_type);
2633 TYPE_NAME (ctx->record_type) = name;
2634
2635 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
2636 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2637
2638 if (TYPE_FIELDS (ctx->record_type) == NULL)
2639 ctx->record_type = NULL;
2640 else
2641 layout_type (ctx->record_type);
2642}
2643
2644/* Scan a GIMPLE_OMP_TARGET. */
2645
2646static void
2647scan_omp_target (gomp_target *stmt, omp_context *outer_ctx)
2648{
2649 omp_context *ctx;
2650 tree name;
2651 bool offloaded = is_gimple_omp_offloaded (stmt);
2652 tree clauses = gimple_omp_target_clauses (stmt);
2653
2654 ctx = new_omp_context (stmt, outer_ctx);
2655 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2656 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2657 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2658 name = create_tmp_var_name (".omp_data_t");
2659 name = build_decl (gimple_location (stmt),
2660 TYPE_DECL, name, ctx->record_type);
2661 DECL_ARTIFICIAL (name) = 1;
2662 DECL_NAMELESS (name) = 1;
2663 TYPE_NAME (ctx->record_type) = name;
2664 if (offloaded)
2665 {
2666 if (is_gimple_omp_oacc (stmt))
2667 ctx->reduction_map = splay_tree_new (splay_tree_compare_pointers,
2668 0, 0);
2669
2670 create_omp_child_function (ctx, false);
2671 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
2672 }
2673
2674 if (is_gimple_omp_oacc (stmt))
2675 {
2676 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2677 {
2678 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_GANGS)
2679 ctx->gwv_this |= MASK_GANG;
2680 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_WORKERS)
2681 ctx->gwv_this |= MASK_WORKER;
2682 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_VECTOR_LENGTH)
2683 ctx->gwv_this |= MASK_VECTOR;
2684 }
2685 }
2686
2687 scan_sharing_clauses (clauses, ctx);
2688 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2689
2690 if (TYPE_FIELDS (ctx->record_type) == NULL)
2691 ctx->record_type = ctx->receiver_decl = NULL;
2692 else
2693 {
2694 TYPE_FIELDS (ctx->record_type)
2695 = nreverse (TYPE_FIELDS (ctx->record_type));
2696#ifdef ENABLE_CHECKING
2697 tree field;
2698 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
2699 for (field = TYPE_FIELDS (ctx->record_type);
2700 field;
2701 field = DECL_CHAIN (field))
2702 gcc_assert (DECL_ALIGN (field) == align);
2703#endif
2704 layout_type (ctx->record_type);
2705 if (offloaded)
2706 fixup_child_record_type (ctx);
2707 }
2708}
2709
2710/* Scan an OpenMP teams directive. */
2711
2712static void
2713scan_omp_teams (gomp_teams *stmt, omp_context *outer_ctx)
2714{
2715 omp_context *ctx = new_omp_context (stmt, outer_ctx);
2716 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
2717 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2718}
2719
2720/* Check nesting restrictions. */
2721static bool
2722check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
2723{
2724 /* No nesting of non-OpenACC STMT (that is, an OpenMP one, or a GOMP builtin)
2725 inside an OpenACC CTX. */
2726 if (!(is_gimple_omp (stmt)
2727 && is_gimple_omp_oacc (stmt)))
2728 {
2729 for (omp_context *ctx_ = ctx; ctx_ != NULL; ctx_ = ctx_->outer)
2730 if (is_gimple_omp (ctx_->stmt)
2731 && is_gimple_omp_oacc (ctx_->stmt))
2732 {
2733 error_at (gimple_location (stmt),
2734 "non-OpenACC construct inside of OpenACC region");
2735 return false;
2736 }
2737 }
2738
2739 if (ctx != NULL)
2740 {
2741 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2742 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2743 {
2744 error_at (gimple_location (stmt),
2745 "OpenMP constructs may not be nested inside simd region");
2746 return false;
2747 }
2748 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2749 {
2750 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
2751 || (gimple_omp_for_kind (stmt)
2752 != GF_OMP_FOR_KIND_DISTRIBUTE))
2753 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
2754 {
2755 error_at (gimple_location (stmt),
2756 "only distribute or parallel constructs are allowed to "
2757 "be closely nested inside teams construct");
2758 return false;
2759 }
2760 }
2761 }
2762 switch (gimple_code (stmt))
2763 {
2764 case GIMPLE_OMP_FOR:
2765 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
2766 return true;
2767 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
2768 {
2769 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
2770 {
2771 error_at (gimple_location (stmt),
2772 "distribute construct must be closely nested inside "
2773 "teams construct");
2774 return false;
2775 }
2776 return true;
2777 }
2778 /* FALLTHRU */
2779 case GIMPLE_CALL:
2780 if (is_gimple_call (stmt)
2781 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2782 == BUILT_IN_GOMP_CANCEL
2783 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2784 == BUILT_IN_GOMP_CANCELLATION_POINT))
2785 {
2786 const char *bad = NULL;
2787 const char *kind = NULL;
2788 if (ctx == NULL)
2789 {
2790 error_at (gimple_location (stmt), "orphaned %qs construct",
2791 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2792 == BUILT_IN_GOMP_CANCEL
2793 ? "#pragma omp cancel"
2794 : "#pragma omp cancellation point");
2795 return false;
2796 }
2797 switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
2798 ? tree_to_shwi (gimple_call_arg (stmt, 0))
2799 : 0)
2800 {
2801 case 1:
2802 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
2803 bad = "#pragma omp parallel";
2804 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2805 == BUILT_IN_GOMP_CANCEL
2806 && !integer_zerop (gimple_call_arg (stmt, 1)))
2807 ctx->cancellable = true;
2808 kind = "parallel";
2809 break;
2810 case 2:
2811 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2812 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
2813 bad = "#pragma omp for";
2814 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2815 == BUILT_IN_GOMP_CANCEL
2816 && !integer_zerop (gimple_call_arg (stmt, 1)))
2817 {
2818 ctx->cancellable = true;
2819 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2820 OMP_CLAUSE_NOWAIT))
2821 warning_at (gimple_location (stmt), 0,
2822 "%<#pragma omp cancel for%> inside "
2823 "%<nowait%> for construct");
2824 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2825 OMP_CLAUSE_ORDERED))
2826 warning_at (gimple_location (stmt), 0,
2827 "%<#pragma omp cancel for%> inside "
2828 "%<ordered%> for construct");
2829 }
2830 kind = "for";
2831 break;
2832 case 4:
2833 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
2834 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
2835 bad = "#pragma omp sections";
2836 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2837 == BUILT_IN_GOMP_CANCEL
2838 && !integer_zerop (gimple_call_arg (stmt, 1)))
2839 {
2840 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
2841 {
2842 ctx->cancellable = true;
2843 if (find_omp_clause (gimple_omp_sections_clauses
2844 (ctx->stmt),
2845 OMP_CLAUSE_NOWAIT))
2846 warning_at (gimple_location (stmt), 0,
2847 "%<#pragma omp cancel sections%> inside "
2848 "%<nowait%> sections construct");
2849 }
2850 else
2851 {
2852 gcc_assert (ctx->outer
2853 && gimple_code (ctx->outer->stmt)
2854 == GIMPLE_OMP_SECTIONS);
2855 ctx->outer->cancellable = true;
2856 if (find_omp_clause (gimple_omp_sections_clauses
2857 (ctx->outer->stmt),
2858 OMP_CLAUSE_NOWAIT))
2859 warning_at (gimple_location (stmt), 0,
2860 "%<#pragma omp cancel sections%> inside "
2861 "%<nowait%> sections construct");
2862 }
2863 }
2864 kind = "sections";
2865 break;
2866 case 8:
2867 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
2868 bad = "#pragma omp task";
2869 else
2870 ctx->cancellable = true;
2871 kind = "taskgroup";
2872 break;
2873 default:
2874 error_at (gimple_location (stmt), "invalid arguments");
2875 return false;
2876 }
2877 if (bad)
2878 {
2879 error_at (gimple_location (stmt),
2880 "%<%s %s%> construct not closely nested inside of %qs",
2881 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2882 == BUILT_IN_GOMP_CANCEL
2883 ? "#pragma omp cancel"
2884 : "#pragma omp cancellation point", kind, bad);
2885 return false;
2886 }
2887 }
2888 /* FALLTHRU */
2889 case GIMPLE_OMP_SECTIONS:
2890 case GIMPLE_OMP_SINGLE:
2891 for (; ctx != NULL; ctx = ctx->outer)
2892 switch (gimple_code (ctx->stmt))
2893 {
2894 case GIMPLE_OMP_FOR:
2895 case GIMPLE_OMP_SECTIONS:
2896 case GIMPLE_OMP_SINGLE:
2897 case GIMPLE_OMP_ORDERED:
2898 case GIMPLE_OMP_MASTER:
2899 case GIMPLE_OMP_TASK:
2900 case GIMPLE_OMP_CRITICAL:
2901 if (is_gimple_call (stmt))
2902 {
2903 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2904 != BUILT_IN_GOMP_BARRIER)
2905 return true;
2906 error_at (gimple_location (stmt),
2907 "barrier region may not be closely nested inside "
2908 "of work-sharing, critical, ordered, master or "
2909 "explicit task region");
2910 return false;
2911 }
2912 error_at (gimple_location (stmt),
2913 "work-sharing region may not be closely nested inside "
2914 "of work-sharing, critical, ordered, master or explicit "
2915 "task region");
2916 return false;
2917 case GIMPLE_OMP_PARALLEL:
2918 return true;
2919 default:
2920 break;
2921 }
2922 break;
2923 case GIMPLE_OMP_MASTER:
2924 for (; ctx != NULL; ctx = ctx->outer)
2925 switch (gimple_code (ctx->stmt))
2926 {
2927 case GIMPLE_OMP_FOR:
2928 case GIMPLE_OMP_SECTIONS:
2929 case GIMPLE_OMP_SINGLE:
2930 case GIMPLE_OMP_TASK:
2931 error_at (gimple_location (stmt),
2932 "master region may not be closely nested inside "
2933 "of work-sharing or explicit task region");
2934 return false;
2935 case GIMPLE_OMP_PARALLEL:
2936 return true;
2937 default:
2938 break;
2939 }
2940 break;
2941 case GIMPLE_OMP_ORDERED:
2942 for (; ctx != NULL; ctx = ctx->outer)
2943 switch (gimple_code (ctx->stmt))
2944 {
2945 case GIMPLE_OMP_CRITICAL:
2946 case GIMPLE_OMP_TASK:
2947 error_at (gimple_location (stmt),
2948 "ordered region may not be closely nested inside "
2949 "of critical or explicit task region");
2950 return false;
2951 case GIMPLE_OMP_FOR:
2952 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2953 OMP_CLAUSE_ORDERED) == NULL)
2954 {
2955 error_at (gimple_location (stmt),
2956 "ordered region must be closely nested inside "
2957 "a loop region with an ordered clause");
2958 return false;
2959 }
2960 return true;
2961 case GIMPLE_OMP_PARALLEL:
2962 error_at (gimple_location (stmt),
2963 "ordered region must be closely nested inside "
2964 "a loop region with an ordered clause");
2965 return false;
2966 default:
2967 break;
2968 }
2969 break;
2970 case GIMPLE_OMP_CRITICAL:
2971 {
2972 tree this_stmt_name
2973 = gimple_omp_critical_name (as_a <gomp_critical *> (stmt));
2974 for (; ctx != NULL; ctx = ctx->outer)
2975 if (gomp_critical *other_crit
2976 = dyn_cast <gomp_critical *> (ctx->stmt))
2977 if (this_stmt_name == gimple_omp_critical_name (other_crit))
2978 {
2979 error_at (gimple_location (stmt),
2980 "critical region may not be nested inside a critical "
2981 "region with the same name");
2982 return false;
2983 }
2984 }
2985 break;
2986 case GIMPLE_OMP_TEAMS:
2987 if (ctx == NULL
2988 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
2989 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
2990 {
2991 error_at (gimple_location (stmt),
2992 "teams construct not closely nested inside of target "
2993 "region");
2994 return false;
2995 }
2996 break;
2997 case GIMPLE_OMP_TARGET:
2998 for (; ctx != NULL; ctx = ctx->outer)
2999 {
3000 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET)
3001 {
3002 if (is_gimple_omp (stmt)
3003 && is_gimple_omp_oacc (stmt)
3004 && is_gimple_omp (ctx->stmt))
3005 {
3006 error_at (gimple_location (stmt),
3007 "OpenACC construct inside of non-OpenACC region");
3008 return false;
3009 }
3010 continue;
3011 }
3012
3013 const char *stmt_name, *ctx_stmt_name;
3014 switch (gimple_omp_target_kind (stmt))
3015 {
3016 case GF_OMP_TARGET_KIND_REGION: stmt_name = "target"; break;
3017 case GF_OMP_TARGET_KIND_DATA: stmt_name = "target data"; break;
3018 case GF_OMP_TARGET_KIND_UPDATE: stmt_name = "target update"; break;
3019 case GF_OMP_TARGET_KIND_OACC_PARALLEL: stmt_name = "parallel"; break;
3020 case GF_OMP_TARGET_KIND_OACC_KERNELS: stmt_name = "kernels"; break;
3021 case GF_OMP_TARGET_KIND_OACC_DATA: stmt_name = "data"; break;
3022 case GF_OMP_TARGET_KIND_OACC_UPDATE: stmt_name = "update"; break;
3023 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: stmt_name = "enter/exit data"; break;
3024 default: gcc_unreachable ();
3025 }
3026 switch (gimple_omp_target_kind (ctx->stmt))
3027 {
3028 case GF_OMP_TARGET_KIND_REGION: ctx_stmt_name = "target"; break;
3029 case GF_OMP_TARGET_KIND_DATA: ctx_stmt_name = "target data"; break;
3030 case GF_OMP_TARGET_KIND_OACC_PARALLEL: ctx_stmt_name = "parallel"; break;
3031 case GF_OMP_TARGET_KIND_OACC_KERNELS: ctx_stmt_name = "kernels"; break;
3032 case GF_OMP_TARGET_KIND_OACC_DATA: ctx_stmt_name = "data"; break;
3033 default: gcc_unreachable ();
3034 }
3035
3036 /* OpenACC/OpenMP mismatch? */
3037 if (is_gimple_omp_oacc (stmt)
3038 != is_gimple_omp_oacc (ctx->stmt))
3039 {
3040 error_at (gimple_location (stmt),
3041 "%s %s construct inside of %s %s region",
3042 (is_gimple_omp_oacc (stmt)
3043 ? "OpenACC" : "OpenMP"), stmt_name,
3044 (is_gimple_omp_oacc (ctx->stmt)
3045 ? "OpenACC" : "OpenMP"), ctx_stmt_name);
3046 return false;
3047 }
3048 if (is_gimple_omp_offloaded (ctx->stmt))
3049 {
3050 /* No GIMPLE_OMP_TARGET inside offloaded OpenACC CTX. */
3051 if (is_gimple_omp_oacc (ctx->stmt))
3052 {
3053 error_at (gimple_location (stmt),
3054 "%s construct inside of %s region",
3055 stmt_name, ctx_stmt_name);
3056 return false;
3057 }
3058 else
3059 {
3060 gcc_checking_assert (!is_gimple_omp_oacc (stmt));
3061 warning_at (gimple_location (stmt), 0,
3062 "%s construct inside of %s region",
3063 stmt_name, ctx_stmt_name);
3064 }
3065 }
3066 }
3067 break;
3068 default:
3069 break;
3070 }
3071 return true;
3072}
3073
3074
3075/* Helper function scan_omp.
3076
3077 Callback for walk_tree or operators in walk_gimple_stmt used to
3078 scan for OMP directives in TP. */
3079
3080static tree
3081scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
3082{
3083 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
3084 omp_context *ctx = (omp_context *) wi->info;
3085 tree t = *tp;
3086
3087 switch (TREE_CODE (t))
3088 {
3089 case VAR_DECL:
3090 case PARM_DECL:
3091 case LABEL_DECL:
3092 case RESULT_DECL:
3093 if (ctx)
3094 *tp = remap_decl (t, &ctx->cb);
3095 break;
3096
3097 default:
3098 if (ctx && TYPE_P (t))
3099 *tp = remap_type (t, &ctx->cb);
3100 else if (!DECL_P (t))
3101 {
3102 *walk_subtrees = 1;
3103 if (ctx)
3104 {
3105 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
3106 if (tem != TREE_TYPE (t))
3107 {
3108 if (TREE_CODE (t) == INTEGER_CST)
3109 *tp = wide_int_to_tree (tem, t);
3110 else
3111 TREE_TYPE (t) = tem;
3112 }
3113 }
3114 }
3115 break;
3116 }
3117
3118 return NULL_TREE;
3119}
3120
3121/* Return true if FNDECL is a setjmp or a longjmp. */
3122
3123static bool
3124setjmp_or_longjmp_p (const_tree fndecl)
3125{
3126 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
3127 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
3128 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
3129 return true;
3130
3131 tree declname = DECL_NAME (fndecl);
3132 if (!declname)
3133 return false;
3134 const char *name = IDENTIFIER_POINTER (declname);
3135 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
3136}
3137
3138
3139/* Helper function for scan_omp.
3140
3141 Callback for walk_gimple_stmt used to scan for OMP directives in
3142 the current statement in GSI. */
3143
3144static tree
3145scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
3146 struct walk_stmt_info *wi)
3147{
3148 gimple stmt = gsi_stmt (*gsi);
3149 omp_context *ctx = (omp_context *) wi->info;
3150
3151 if (gimple_has_location (stmt))
3152 input_location = gimple_location (stmt);
3153
3154 /* Check the nesting restrictions. */
3155 bool remove = false;
3156 if (is_gimple_omp (stmt))
3157 remove = !check_omp_nesting_restrictions (stmt, ctx);
3158 else if (is_gimple_call (stmt))
3159 {
3160 tree fndecl = gimple_call_fndecl (stmt);
3161 if (fndecl)
3162 {
3163 if (setjmp_or_longjmp_p (fndecl)
3164 && ctx
3165 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3166 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3167 {
3168 remove = true;
3169 error_at (gimple_location (stmt),
3170 "setjmp/longjmp inside simd construct");
3171 }
3172 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3173 switch (DECL_FUNCTION_CODE (fndecl))
3174 {
3175 case BUILT_IN_GOMP_BARRIER:
3176 case BUILT_IN_GOMP_CANCEL:
3177 case BUILT_IN_GOMP_CANCELLATION_POINT:
3178 case BUILT_IN_GOMP_TASKYIELD:
3179 case BUILT_IN_GOMP_TASKWAIT:
3180 case BUILT_IN_GOMP_TASKGROUP_START:
3181 case BUILT_IN_GOMP_TASKGROUP_END:
3182 remove = !check_omp_nesting_restrictions (stmt, ctx);
3183 break;
3184 default:
3185 break;
3186 }
3187 }
3188 }
3189 if (remove)
3190 {
3191 stmt = gimple_build_nop ();
3192 gsi_replace (gsi, stmt, false);
3193 }
3194
3195 *handled_ops_p = true;
3196
3197 switch (gimple_code (stmt))
3198 {
3199 case GIMPLE_OMP_PARALLEL:
3200 taskreg_nesting_level++;
3201 scan_omp_parallel (gsi, ctx);
3202 taskreg_nesting_level--;
3203 break;
3204
3205 case GIMPLE_OMP_TASK:
3206 taskreg_nesting_level++;
3207 scan_omp_task (gsi, ctx);
3208 taskreg_nesting_level--;
3209 break;
3210
3211 case GIMPLE_OMP_FOR:
3212 scan_omp_for (as_a <gomp_for *> (stmt), ctx);
3213 break;
3214
3215 case GIMPLE_OMP_SECTIONS:
3216 scan_omp_sections (as_a <gomp_sections *> (stmt), ctx);
3217 break;
3218
3219 case GIMPLE_OMP_SINGLE:
3220 scan_omp_single (as_a <gomp_single *> (stmt), ctx);
3221 break;
3222
3223 case GIMPLE_OMP_SECTION:
3224 case GIMPLE_OMP_MASTER:
3225 case GIMPLE_OMP_TASKGROUP:
3226 case GIMPLE_OMP_ORDERED:
3227 case GIMPLE_OMP_CRITICAL:
3228 ctx = new_omp_context (stmt, ctx);
3229 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3230 break;
3231
3232 case GIMPLE_OMP_TARGET:
3233 scan_omp_target (as_a <gomp_target *> (stmt), ctx);
3234 break;
3235
3236 case GIMPLE_OMP_TEAMS:
3237 scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
3238 break;
3239
3240 case GIMPLE_BIND:
3241 {
3242 tree var;
3243
3244 *handled_ops_p = false;
3245 if (ctx)
3246 for (var = gimple_bind_vars (as_a <gbind *> (stmt));
3247 var ;
3248 var = DECL_CHAIN (var))
3249 insert_decl_map (&ctx->cb, var, var);
3250 }
3251 break;
3252 default:
3253 *handled_ops_p = false;
3254 break;
3255 }
3256
3257 return NULL_TREE;
3258}
3259
3260
3261/* Scan all the statements starting at the current statement. CTX
3262 contains context information about the OMP directives and
3263 clauses found during the scan. */
3264
3265static void
3266scan_omp (gimple_seq *body_p, omp_context *ctx)
3267{
3268 location_t saved_location;
3269 struct walk_stmt_info wi;
3270
3271 memset (&wi, 0, sizeof (wi));
3272 wi.info = ctx;
3273 wi.want_locations = true;
3274
3275 saved_location = input_location;
3276 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
3277 input_location = saved_location;
3278}
3279\f
3280/* Re-gimplification and code generation routines. */
3281
3282/* Build a call to GOMP_barrier. */
3283
3284static gimple
3285build_omp_barrier (tree lhs)
3286{
3287 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
3288 : BUILT_IN_GOMP_BARRIER);
3289 gcall *g = gimple_build_call (fndecl, 0);
3290 if (lhs)
3291 gimple_call_set_lhs (g, lhs);
3292 return g;
3293}
3294
3295/* If a context was created for STMT when it was scanned, return it. */
3296
3297static omp_context *
3298maybe_lookup_ctx (gimple stmt)
3299{
3300 splay_tree_node n;
3301 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
3302 return n ? (omp_context *) n->value : NULL;
3303}
3304
3305
3306/* Find the mapping for DECL in CTX or the immediately enclosing
3307 context that has a mapping for DECL.
3308
3309 If CTX is a nested parallel directive, we may have to use the decl
3310 mappings created in CTX's parent context. Suppose that we have the
3311 following parallel nesting (variable UIDs showed for clarity):
3312
3313 iD.1562 = 0;
3314 #omp parallel shared(iD.1562) -> outer parallel
3315 iD.1562 = iD.1562 + 1;
3316
3317 #omp parallel shared (iD.1562) -> inner parallel
3318 iD.1562 = iD.1562 - 1;
3319
3320 Each parallel structure will create a distinct .omp_data_s structure
3321 for copying iD.1562 in/out of the directive:
3322
3323 outer parallel .omp_data_s.1.i -> iD.1562
3324 inner parallel .omp_data_s.2.i -> iD.1562
3325
3326 A shared variable mapping will produce a copy-out operation before
3327 the parallel directive and a copy-in operation after it. So, in
3328 this case we would have:
3329
3330 iD.1562 = 0;
3331 .omp_data_o.1.i = iD.1562;
3332 #omp parallel shared(iD.1562) -> outer parallel
3333 .omp_data_i.1 = &.omp_data_o.1
3334 .omp_data_i.1->i = .omp_data_i.1->i + 1;
3335
3336 .omp_data_o.2.i = iD.1562; -> **
3337 #omp parallel shared(iD.1562) -> inner parallel
3338 .omp_data_i.2 = &.omp_data_o.2
3339 .omp_data_i.2->i = .omp_data_i.2->i - 1;
3340
3341
3342 ** This is a problem. The symbol iD.1562 cannot be referenced
3343 inside the body of the outer parallel region. But since we are
3344 emitting this copy operation while expanding the inner parallel
3345 directive, we need to access the CTX structure of the outer
3346 parallel directive to get the correct mapping:
3347
3348 .omp_data_o.2.i = .omp_data_i.1->i
3349
3350 Since there may be other workshare or parallel directives enclosing
3351 the parallel directive, it may be necessary to walk up the context
3352 parent chain. This is not a problem in general because nested
3353 parallelism happens only rarely. */
3354
3355static tree
3356lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
3357{
3358 tree t;
3359 omp_context *up;
3360
3361 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
3362 t = maybe_lookup_decl (decl, up);
3363
3364 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
3365
3366 return t ? t : decl;
3367}
3368
3369
3370/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
3371 in outer contexts. */
3372
3373static tree
3374maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
3375{
3376 tree t = NULL;
3377 omp_context *up;
3378
3379 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
3380 t = maybe_lookup_decl (decl, up);
3381
3382 return t ? t : decl;
3383}
3384
3385
3386/* Construct the initialization value for reduction CLAUSE. */
3387
3388tree
3389omp_reduction_init (tree clause, tree type)
3390{
3391 location_t loc = OMP_CLAUSE_LOCATION (clause);
3392 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
3393 {
3394 case PLUS_EXPR:
3395 case MINUS_EXPR:
3396 case BIT_IOR_EXPR:
3397 case BIT_XOR_EXPR:
3398 case TRUTH_OR_EXPR:
3399 case TRUTH_ORIF_EXPR:
3400 case TRUTH_XOR_EXPR:
3401 case NE_EXPR:
3402 return build_zero_cst (type);
3403
3404 case MULT_EXPR:
3405 case TRUTH_AND_EXPR:
3406 case TRUTH_ANDIF_EXPR:
3407 case EQ_EXPR:
3408 return fold_convert_loc (loc, type, integer_one_node);
3409
3410 case BIT_AND_EXPR:
3411 return fold_convert_loc (loc, type, integer_minus_one_node);
3412
3413 case MAX_EXPR:
3414 if (SCALAR_FLOAT_TYPE_P (type))
3415 {
3416 REAL_VALUE_TYPE max, min;
3417 if (HONOR_INFINITIES (type))
3418 {
3419 real_inf (&max);
3420 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
3421 }
3422 else
3423 real_maxval (&min, 1, TYPE_MODE (type));
3424 return build_real (type, min);
3425 }
3426 else
3427 {
3428 gcc_assert (INTEGRAL_TYPE_P (type));
3429 return TYPE_MIN_VALUE (type);
3430 }
3431
3432 case MIN_EXPR:
3433 if (SCALAR_FLOAT_TYPE_P (type))
3434 {
3435 REAL_VALUE_TYPE max;
3436 if (HONOR_INFINITIES (type))
3437 real_inf (&max);
3438 else
3439 real_maxval (&max, 0, TYPE_MODE (type));
3440 return build_real (type, max);
3441 }
3442 else
3443 {
3444 gcc_assert (INTEGRAL_TYPE_P (type));
3445 return TYPE_MAX_VALUE (type);
3446 }
3447
3448 default:
3449 gcc_unreachable ();
3450 }
3451}
3452
3453/* Return alignment to be assumed for var in CLAUSE, which should be
3454 OMP_CLAUSE_ALIGNED. */
3455
3456static tree
3457omp_clause_aligned_alignment (tree clause)
3458{
3459 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
3460 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
3461
3462 /* Otherwise return implementation defined alignment. */
3463 unsigned int al = 1;
3464 machine_mode mode, vmode;
3465 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3466 if (vs)
3467 vs = 1 << floor_log2 (vs);
3468 static enum mode_class classes[]
3469 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
3470 for (int i = 0; i < 4; i += 2)
3471 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
3472 mode != VOIDmode;
3473 mode = GET_MODE_WIDER_MODE (mode))
3474 {
3475 vmode = targetm.vectorize.preferred_simd_mode (mode);
3476 if (GET_MODE_CLASS (vmode) != classes[i + 1])
3477 continue;
3478 while (vs
3479 && GET_MODE_SIZE (vmode) < vs
3480 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
3481 vmode = GET_MODE_2XWIDER_MODE (vmode);
3482
3483 tree type = lang_hooks.types.type_for_mode (mode, 1);
3484 if (type == NULL_TREE || TYPE_MODE (type) != mode)
3485 continue;
3486 type = build_vector_type (type, GET_MODE_SIZE (vmode)
3487 / GET_MODE_SIZE (mode));
3488 if (TYPE_MODE (type) != vmode)
3489 continue;
3490 if (TYPE_ALIGN_UNIT (type) > al)
3491 al = TYPE_ALIGN_UNIT (type);
3492 }
3493 return build_int_cst (integer_type_node, al);
3494}
3495
3496/* Return maximum possible vectorization factor for the target. */
3497
3498static int
3499omp_max_vf (void)
3500{
3501 if (!optimize
3502 || optimize_debug
3503 || !flag_tree_loop_optimize
3504 || (!flag_tree_loop_vectorize
3505 && (global_options_set.x_flag_tree_loop_vectorize
3506 || global_options_set.x_flag_tree_vectorize)))
3507 return 1;
3508
3509 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3510 if (vs)
3511 {
3512 vs = 1 << floor_log2 (vs);
3513 return vs;
3514 }
3515 machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
3516 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
3517 return GET_MODE_NUNITS (vqimode);
3518 return 1;
3519}
3520
3521/* Helper function of lower_rec_input_clauses, used for #pragma omp simd
3522 privatization. */
3523
3524static bool
3525lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
3526 tree &idx, tree &lane, tree &ivar, tree &lvar)
3527{
3528 if (max_vf == 0)
3529 {
3530 max_vf = omp_max_vf ();
3531 if (max_vf > 1)
3532 {
3533 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3534 OMP_CLAUSE_SAFELEN);
3535 if (c && TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST)
3536 max_vf = 1;
3537 else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3538 max_vf) == -1)
3539 max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
3540 }
3541 if (max_vf > 1)
3542 {
3543 idx = create_tmp_var (unsigned_type_node);
3544 lane = create_tmp_var (unsigned_type_node);
3545 }
3546 }
3547 if (max_vf == 1)
3548 return false;
3549
3550 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
3551 tree avar = create_tmp_var_raw (atype);
3552 if (TREE_ADDRESSABLE (new_var))
3553 TREE_ADDRESSABLE (avar) = 1;
3554 DECL_ATTRIBUTES (avar)
3555 = tree_cons (get_identifier ("omp simd array"), NULL,
3556 DECL_ATTRIBUTES (avar));
3557 gimple_add_tmp_var (avar);
3558 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
3559 NULL_TREE, NULL_TREE);
3560 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
3561 NULL_TREE, NULL_TREE);
3562 if (DECL_P (new_var))
3563 {
3564 SET_DECL_VALUE_EXPR (new_var, lvar);
3565 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3566 }
3567 return true;
3568}
3569
3570/* Helper function of lower_rec_input_clauses. For a reference
3571 in simd reduction, add an underlying variable it will reference. */
3572
3573static void
3574handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
3575{
3576 tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
3577 if (TREE_CONSTANT (z))
3578 {
3579 const char *name = NULL;
3580 if (DECL_NAME (new_vard))
3581 name = IDENTIFIER_POINTER (DECL_NAME (new_vard));
3582
3583 z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)), name);
3584 gimple_add_tmp_var (z);
3585 TREE_ADDRESSABLE (z) = 1;
3586 z = build_fold_addr_expr_loc (loc, z);
3587 gimplify_assign (new_vard, z, ilist);
3588 }
3589}
3590
3591/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
3592 from the receiver (aka child) side and initializers for REFERENCE_TYPE
3593 private variables. Initialization statements go in ILIST, while calls
3594 to destructors go in DLIST. */
3595
3596static void
3597lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3598 omp_context *ctx, struct omp_for_data *fd)
3599{
3600 tree c, dtor, copyin_seq, x, ptr;
3601 bool copyin_by_ref = false;
3602 bool lastprivate_firstprivate = false;
3603 bool reduction_omp_orig_ref = false;
3604 int pass;
3605 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3606 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
3607 int max_vf = 0;
3608 tree lane = NULL_TREE, idx = NULL_TREE;
3609 tree ivar = NULL_TREE, lvar = NULL_TREE;
3610 gimple_seq llist[2] = { NULL, NULL };
3611
3612 copyin_seq = NULL;
3613
3614 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3615 with data sharing clauses referencing variable sized vars. That
3616 is unnecessarily hard to support and very unlikely to result in
3617 vectorized code anyway. */
3618 if (is_simd)
3619 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3620 switch (OMP_CLAUSE_CODE (c))
3621 {
3622 case OMP_CLAUSE_LINEAR:
3623 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3624 max_vf = 1;
3625 /* FALLTHRU */
3626 case OMP_CLAUSE_REDUCTION:
3627 case OMP_CLAUSE_PRIVATE:
3628 case OMP_CLAUSE_FIRSTPRIVATE:
3629 case OMP_CLAUSE_LASTPRIVATE:
3630 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
3631 max_vf = 1;
3632 break;
3633 default:
3634 continue;
3635 }
3636
3637 /* Do all the fixed sized types in the first pass, and the variable sized
3638 types in the second pass. This makes sure that the scalar arguments to
3639 the variable sized types are processed before we use them in the
3640 variable sized operations. */
3641 for (pass = 0; pass < 2; ++pass)
3642 {
3643 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3644 {
3645 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
3646 tree var, new_var;
3647 bool by_ref;
3648 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3649
3650 switch (c_kind)
3651 {
3652 case OMP_CLAUSE_PRIVATE:
3653 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
3654 continue;
3655 break;
3656 case OMP_CLAUSE_SHARED:
3657 /* Ignore shared directives in teams construct. */
3658 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3659 continue;
3660 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
3661 {
3662 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
3663 continue;
3664 }
3665 case OMP_CLAUSE_FIRSTPRIVATE:
3666 case OMP_CLAUSE_COPYIN:
3667 case OMP_CLAUSE_LINEAR:
3668 break;
3669 case OMP_CLAUSE_REDUCTION:
3670 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
3671 reduction_omp_orig_ref = true;
3672 break;
3673 case OMP_CLAUSE__LOOPTEMP_:
3674 /* Handle _looptemp_ clauses only on parallel. */
3675 if (fd)
3676 continue;
3677 break;
3678 case OMP_CLAUSE_LASTPRIVATE:
3679 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3680 {
3681 lastprivate_firstprivate = true;
3682 if (pass != 0)
3683 continue;
3684 }
3685 /* Even without corresponding firstprivate, if
3686 decl is Fortran allocatable, it needs outer var
3687 reference. */
3688 else if (pass == 0
3689 && lang_hooks.decls.omp_private_outer_ref
3690 (OMP_CLAUSE_DECL (c)))
3691 lastprivate_firstprivate = true;
3692 break;
3693 case OMP_CLAUSE_ALIGNED:
3694 if (pass == 0)
3695 continue;
3696 var = OMP_CLAUSE_DECL (c);
3697 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
3698 && !is_global_var (var))
3699 {
3700 new_var = maybe_lookup_decl (var, ctx);
3701 if (new_var == NULL_TREE)
3702 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
3703 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3704 x = build_call_expr_loc (clause_loc, x, 2, new_var,
3705 omp_clause_aligned_alignment (c));
3706 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3707 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3708 gimplify_and_add (x, ilist);
3709 }
3710 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
3711 && is_global_var (var))
3712 {
3713 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
3714 new_var = lookup_decl (var, ctx);
3715 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
3716 t = build_fold_addr_expr_loc (clause_loc, t);
3717 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3718 t = build_call_expr_loc (clause_loc, t2, 2, t,
3719 omp_clause_aligned_alignment (c));
3720 t = fold_convert_loc (clause_loc, ptype, t);
3721 x = create_tmp_var (ptype);
3722 t = build2 (MODIFY_EXPR, ptype, x, t);
3723 gimplify_and_add (t, ilist);
3724 t = build_simple_mem_ref_loc (clause_loc, x);
3725 SET_DECL_VALUE_EXPR (new_var, t);
3726 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3727 }
3728 continue;
3729 default:
3730 continue;
3731 }
3732
3733 new_var = var = OMP_CLAUSE_DECL (c);
3734 if (c_kind != OMP_CLAUSE_COPYIN)
3735 new_var = lookup_decl (var, ctx);
3736
3737 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
3738 {
3739 if (pass != 0)
3740 continue;
3741 }
3742 else if (is_variable_sized (var))
3743 {
3744 /* For variable sized types, we need to allocate the
3745 actual storage here. Call alloca and store the
3746 result in the pointer decl that we created elsewhere. */
3747 if (pass == 0)
3748 continue;
3749
3750 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
3751 {
3752 gcall *stmt;
3753 tree tmp, atmp;
3754
3755 ptr = DECL_VALUE_EXPR (new_var);
3756 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
3757 ptr = TREE_OPERAND (ptr, 0);
3758 gcc_assert (DECL_P (ptr));
3759 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
3760
3761 /* void *tmp = __builtin_alloca */
3762 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3763 stmt = gimple_build_call (atmp, 1, x);
3764 tmp = create_tmp_var_raw (ptr_type_node);
3765 gimple_add_tmp_var (tmp);
3766 gimple_call_set_lhs (stmt, tmp);
3767
3768 gimple_seq_add_stmt (ilist, stmt);
3769
3770 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
3771 gimplify_assign (ptr, x, ilist);
3772 }
3773 }
3774 else if (is_reference (var))
3775 {
3776 /* For references that are being privatized for Fortran,
3777 allocate new backing storage for the new pointer
3778 variable. This allows us to avoid changing all the
3779 code that expects a pointer to something that expects
3780 a direct variable. */
3781 if (pass == 0)
3782 continue;
3783
3784 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
3785 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
3786 {
3787 x = build_receiver_ref (var, false, ctx);
3788 x = build_fold_addr_expr_loc (clause_loc, x);
3789 }
3790 else if (TREE_CONSTANT (x))
3791 {
3792 /* For reduction in SIMD loop, defer adding the
3793 initialization of the reference, because if we decide
3794 to use SIMD array for it, the initilization could cause
3795 expansion ICE. */
3796 if (c_kind == OMP_CLAUSE_REDUCTION && is_simd)
3797 x = NULL_TREE;
3798 else
3799 {
3800 const char *name = NULL;
3801 if (DECL_NAME (var))
3802 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
3803
3804 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
3805 name);
3806 gimple_add_tmp_var (x);
3807 TREE_ADDRESSABLE (x) = 1;
3808 x = build_fold_addr_expr_loc (clause_loc, x);
3809 }
3810 }
3811 else
3812 {
3813 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3814 x = build_call_expr_loc (clause_loc, atmp, 1, x);
3815 }
3816
3817 if (x)
3818 {
3819 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3820 gimplify_assign (new_var, x, ilist);
3821 }
3822
3823 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3824 }
3825 else if (c_kind == OMP_CLAUSE_REDUCTION
3826 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3827 {
3828 if (pass == 0)
3829 continue;
3830 }
3831 else if (pass != 0)
3832 continue;
3833
3834 switch (OMP_CLAUSE_CODE (c))
3835 {
3836 case OMP_CLAUSE_SHARED:
3837 /* Ignore shared directives in teams construct. */
3838 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3839 continue;
3840 /* Shared global vars are just accessed directly. */