1 /* This file contains routines to construct OpenACC and OpenMP constructs,
2 called from parsing in the C and C++ front ends.
4 Copyright (C) 2005-2015 Free Software Foundation, Inc.
5 Contributed by Richard Henderson <rth@redhat.com>,
6 Diego Novillo <dnovillo@redhat.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
31 #include "double-int.h"
40 #include "gimple-expr.h"
41 #include "langhooks.h"
43 #include "gomp-constants.h"
46 /* Complete a #pragma oacc wait construct. LOC is the location of
50 c_finish_oacc_wait (location_t loc, tree parms, tree clauses)
52 const int nparms = list_length (parms);
54 vec<tree, va_gc> *args;
56 vec_alloc (args, nparms + 2);
57 stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT);
59 if (find_omp_clause (clauses, OMP_CLAUSE_ASYNC))
60 t = OMP_CLAUSE_ASYNC_EXPR (clauses);
62 t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC);
65 args->quick_push (build_int_cst (integer_type_node, nparms));
67 for (t = parms; t; t = TREE_CHAIN (t))
69 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST)
70 args->quick_push (build_int_cst (integer_type_node,
71 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t))));
73 args->quick_push (OMP_CLAUSE_WAIT_EXPR (t));
76 stmt = build_call_expr_loc_vec (loc, stmt, args);
84 /* Complete a #pragma omp master construct. STMT is the structured-block
85 that follows the pragma. LOC is the l*/
88 c_finish_omp_master (location_t loc, tree stmt)
90 tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
91 SET_EXPR_LOCATION (t, loc);
95 /* Complete a #pragma omp taskgroup construct. STMT is the structured-block
96 that follows the pragma. LOC is the l*/
99 c_finish_omp_taskgroup (location_t loc, tree stmt)
101 tree t = add_stmt (build1 (OMP_TASKGROUP, void_type_node, stmt));
102 SET_EXPR_LOCATION (t, loc);
106 /* Complete a #pragma omp critical construct. STMT is the structured-block
107 that follows the pragma, NAME is the identifier in the pragma, or null
108 if it was omitted. LOC is the location of the #pragma. */
111 c_finish_omp_critical (location_t loc, tree body, tree name)
113 tree stmt = make_node (OMP_CRITICAL);
114 TREE_TYPE (stmt) = void_type_node;
115 OMP_CRITICAL_BODY (stmt) = body;
116 OMP_CRITICAL_NAME (stmt) = name;
117 SET_EXPR_LOCATION (stmt, loc);
118 return add_stmt (stmt);
121 /* Complete a #pragma omp ordered construct. STMT is the structured-block
122 that follows the pragma. LOC is the location of the #pragma. */
125 c_finish_omp_ordered (location_t loc, tree stmt)
127 tree t = build1 (OMP_ORDERED, void_type_node, stmt);
128 SET_EXPR_LOCATION (t, loc);
133 /* Complete a #pragma omp barrier construct. LOC is the location of
137 c_finish_omp_barrier (location_t loc)
141 x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
142 x = build_call_expr_loc (loc, x, 0);
147 /* Complete a #pragma omp taskwait construct. LOC is the location of the
151 c_finish_omp_taskwait (location_t loc)
155 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
156 x = build_call_expr_loc (loc, x, 0);
161 /* Complete a #pragma omp taskyield construct. LOC is the location of the
165 c_finish_omp_taskyield (location_t loc)
169 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
170 x = build_call_expr_loc (loc, x, 0);
175 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
176 the expression to be implemented atomically is LHS opcode= RHS.
177 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
178 opcode= RHS with the new or old content of LHS returned.
179 LOC is the location of the atomic statement. The value returned
180 is either error_mark_node (if the construct was erroneous) or an
181 OMP_ATOMIC* node which should be added to the current statement
182 tree with add_stmt. */
185 c_finish_omp_atomic (location_t loc, enum tree_code code,
186 enum tree_code opcode, tree lhs, tree rhs,
187 tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst)
189 tree x, type, addr, pre = NULL_TREE;
191 if (lhs == error_mark_node || rhs == error_mark_node
192 || v == error_mark_node || lhs1 == error_mark_node
193 || rhs1 == error_mark_node)
194 return error_mark_node;
196 /* ??? According to one reading of the OpenMP spec, complex type are
197 supported, but there are no atomic stores for any architecture.
198 But at least icc 9.0 doesn't support complex types here either.
199 And lets not even talk about vector types... */
200 type = TREE_TYPE (lhs);
201 if (!INTEGRAL_TYPE_P (type)
202 && !POINTER_TYPE_P (type)
203 && !SCALAR_FLOAT_TYPE_P (type))
205 error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
206 return error_mark_node;
209 if (opcode == RDIV_EXPR)
210 opcode = TRUNC_DIV_EXPR;
212 /* ??? Validate that rhs does not overlap lhs. */
214 /* Take and save the address of the lhs. From then on we'll reference it
216 addr = build_unary_op (loc, ADDR_EXPR, lhs, 0);
217 if (addr == error_mark_node)
218 return error_mark_node;
219 addr = save_expr (addr);
220 if (TREE_CODE (addr) != SAVE_EXPR
221 && (TREE_CODE (addr) != ADDR_EXPR
222 || TREE_CODE (TREE_OPERAND (addr, 0)) != VAR_DECL))
224 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
225 it even after unsharing function body. */
226 tree var = create_tmp_var_raw (TREE_TYPE (addr));
227 DECL_CONTEXT (var) = current_function_decl;
228 addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
230 lhs = build_indirect_ref (loc, addr, RO_NULL);
232 if (code == OMP_ATOMIC_READ)
234 x = build1 (OMP_ATOMIC_READ, type, addr);
235 SET_EXPR_LOCATION (x, loc);
236 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
237 return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
241 /* There are lots of warnings, errors, and conversions that need to happen
242 in the course of interpreting a statement. Use the normal mechanisms
243 to do this, and then take it apart again. */
246 rhs = build_binary_op (loc, opcode, rhs, lhs, 1);
249 bool save = in_late_binary_op;
250 in_late_binary_op = true;
251 x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE);
252 in_late_binary_op = save;
253 if (x == error_mark_node)
254 return error_mark_node;
255 if (TREE_CODE (x) == COMPOUND_EXPR)
257 pre = TREE_OPERAND (x, 0);
258 gcc_assert (TREE_CODE (pre) == SAVE_EXPR);
259 x = TREE_OPERAND (x, 1);
261 gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
262 rhs = TREE_OPERAND (x, 1);
264 /* Punt the actual generation of atomic operations to common code. */
265 if (code == OMP_ATOMIC)
266 type = void_type_node;
267 x = build2 (code, type, addr, rhs);
268 SET_EXPR_LOCATION (x, loc);
269 OMP_ATOMIC_SEQ_CST (x) = seq_cst;
271 /* Generally it is hard to prove lhs1 and lhs are the same memory
272 location, just diagnose different variables. */
274 && TREE_CODE (rhs1) == VAR_DECL
275 && TREE_CODE (lhs) == VAR_DECL
278 if (code == OMP_ATOMIC)
279 error_at (loc, "%<#pragma omp atomic update%> uses two different variables for memory");
281 error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
282 return error_mark_node;
285 if (code != OMP_ATOMIC)
287 /* Generally it is hard to prove lhs1 and lhs are the same memory
288 location, just diagnose different variables. */
289 if (lhs1 && TREE_CODE (lhs1) == VAR_DECL && TREE_CODE (lhs) == VAR_DECL)
293 error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
294 return error_mark_node;
297 x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
299 if (rhs1 && rhs1 != lhs)
301 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
302 if (rhs1addr == error_mark_node)
303 return error_mark_node;
304 x = omit_one_operand_loc (loc, type, x, rhs1addr);
306 if (lhs1 && lhs1 != lhs)
308 tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, 0);
309 if (lhs1addr == error_mark_node)
310 return error_mark_node;
311 if (code == OMP_ATOMIC_CAPTURE_OLD)
312 x = omit_one_operand_loc (loc, type, x, lhs1addr);
316 x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
320 else if (rhs1 && rhs1 != lhs)
322 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, 0);
323 if (rhs1addr == error_mark_node)
324 return error_mark_node;
325 x = omit_one_operand_loc (loc, type, x, rhs1addr);
329 x = omit_one_operand_loc (loc, type, x, pre);
334 /* Complete a #pragma omp flush construct. We don't do anything with
335 the variable list that the syntax allows. LOC is the location of
339 c_finish_omp_flush (location_t loc)
343 x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
344 x = build_call_expr_loc (loc, x, 0);
349 /* Check and canonicalize OMP_FOR increment expression.
350 Helper function for c_finish_omp_for. */
353 check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
357 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
358 || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
359 return error_mark_node;
362 return build_int_cst (TREE_TYPE (exp), 0);
364 switch (TREE_CODE (exp))
367 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
368 if (t != error_mark_node)
369 return fold_convert_loc (loc, TREE_TYPE (exp), t);
372 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
373 if (t != error_mark_node)
374 return fold_build2_loc (loc, MINUS_EXPR,
375 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
378 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
379 if (t != error_mark_node)
380 return fold_build2_loc (loc, PLUS_EXPR,
381 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
382 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
383 if (t != error_mark_node)
384 return fold_build2_loc (loc, PLUS_EXPR,
385 TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
389 /* cp_build_modify_expr forces preevaluation of the RHS to make
390 sure that it is evaluated before the lvalue-rvalue conversion
391 is applied to the LHS. Reconstruct the original expression. */
392 tree op0 = TREE_OPERAND (exp, 0);
393 if (TREE_CODE (op0) == TARGET_EXPR
394 && !VOID_TYPE_P (TREE_TYPE (op0)))
396 tree op1 = TREE_OPERAND (exp, 1);
397 tree temp = TARGET_EXPR_SLOT (op0);
398 if (TREE_CODE_CLASS (TREE_CODE (op1)) == tcc_binary
399 && TREE_OPERAND (op1, 1) == temp)
401 op1 = copy_node (op1);
402 TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
403 return check_omp_for_incr_expr (loc, op1, decl);
412 return error_mark_node;
415 /* If the OMP_FOR increment expression in INCR is of pointer type,
416 canonicalize it into an expression handled by gimplify_omp_for()
417 and return it. DECL is the iteration variable. */
420 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
422 if (POINTER_TYPE_P (TREE_TYPE (decl))
423 && TREE_OPERAND (incr, 1))
425 tree t = fold_convert_loc (loc,
426 sizetype, TREE_OPERAND (incr, 1));
428 if (TREE_CODE (incr) == POSTDECREMENT_EXPR
429 || TREE_CODE (incr) == PREDECREMENT_EXPR)
430 t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
431 t = fold_build_pointer_plus (decl, t);
432 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
437 /* Validate and generate OMP_FOR.
438 DECLV is a vector of iteration variables, for each collapsed loop.
439 INITV, CONDV and INCRV are vectors containing initialization
440 expressions, controlling predicates and increment expressions.
441 BODY is the body of the loop and PRE_BODY statements that go before
445 c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
446 tree initv, tree condv, tree incrv, tree body, tree pre_body)
452 if ((code == CILK_SIMD || code == CILK_FOR)
453 && !c_check_cilk_loop (locus, TREE_VEC_ELT (declv, 0)))
456 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
457 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
458 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
459 for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
461 tree decl = TREE_VEC_ELT (declv, i);
462 tree init = TREE_VEC_ELT (initv, i);
463 tree cond = TREE_VEC_ELT (condv, i);
464 tree incr = TREE_VEC_ELT (incrv, i);
467 if (EXPR_HAS_LOCATION (init))
468 elocus = EXPR_LOCATION (init);
470 /* Validate the iteration variable. */
471 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
472 && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
474 error_at (elocus, "invalid type for iteration variable %qE", decl);
478 /* In the case of "for (int i = 0...)", init will be a decl. It should
479 have a DECL_INITIAL that we can turn into an assignment. */
482 elocus = DECL_SOURCE_LOCATION (decl);
484 init = DECL_INITIAL (decl);
487 error_at (elocus, "%qE is not initialized", decl);
488 init = integer_zero_node;
492 init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
493 /* FIXME diagnostics: This should
494 be the location of the INIT. */
499 if (init != error_mark_node)
501 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
502 gcc_assert (TREE_OPERAND (init, 0) == decl);
505 if (cond == NULL_TREE)
507 error_at (elocus, "missing controlling predicate");
512 bool cond_ok = false;
514 if (EXPR_HAS_LOCATION (cond))
515 elocus = EXPR_LOCATION (cond);
517 if (TREE_CODE (cond) == LT_EXPR
518 || TREE_CODE (cond) == LE_EXPR
519 || TREE_CODE (cond) == GT_EXPR
520 || TREE_CODE (cond) == GE_EXPR
521 || TREE_CODE (cond) == NE_EXPR
522 || TREE_CODE (cond) == EQ_EXPR)
524 tree op0 = TREE_OPERAND (cond, 0);
525 tree op1 = TREE_OPERAND (cond, 1);
527 /* 2.5.1. The comparison in the condition is computed in
528 the type of DECL, otherwise the behavior is undefined.
534 according to ISO will be evaluated as:
539 if (TREE_CODE (op0) == NOP_EXPR
540 && decl == TREE_OPERAND (op0, 0))
542 TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
543 TREE_OPERAND (cond, 1)
544 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
545 TREE_OPERAND (cond, 1));
547 else if (TREE_CODE (op1) == NOP_EXPR
548 && decl == TREE_OPERAND (op1, 0))
550 TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
551 TREE_OPERAND (cond, 0)
552 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
553 TREE_OPERAND (cond, 0));
556 if (decl == TREE_OPERAND (cond, 0))
558 else if (decl == TREE_OPERAND (cond, 1))
561 swap_tree_comparison (TREE_CODE (cond)));
562 TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
563 TREE_OPERAND (cond, 0) = decl;
567 if (TREE_CODE (cond) == NE_EXPR
568 || TREE_CODE (cond) == EQ_EXPR)
570 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
572 if (code != CILK_SIMD && code != CILK_FOR)
575 else if (operand_equal_p (TREE_OPERAND (cond, 1),
576 TYPE_MIN_VALUE (TREE_TYPE (decl)),
578 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
579 ? GT_EXPR : LE_EXPR);
580 else if (operand_equal_p (TREE_OPERAND (cond, 1),
581 TYPE_MAX_VALUE (TREE_TYPE (decl)),
583 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
584 ? LT_EXPR : GE_EXPR);
585 else if (code != CILK_SIMD && code != CILK_FOR)
592 error_at (elocus, "invalid controlling predicate");
597 if (incr == NULL_TREE)
599 error_at (elocus, "missing increment expression");
604 bool incr_ok = false;
606 if (EXPR_HAS_LOCATION (incr))
607 elocus = EXPR_LOCATION (incr);
609 /* Check all the valid increment expressions: v++, v--, ++v, --v,
610 v = v + incr, v = incr + v and v = v - incr. */
611 switch (TREE_CODE (incr))
613 case POSTINCREMENT_EXPR:
614 case PREINCREMENT_EXPR:
615 case POSTDECREMENT_EXPR:
616 case PREDECREMENT_EXPR:
617 if (TREE_OPERAND (incr, 0) != decl)
621 incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
625 if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
626 || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
628 incr = TREE_OPERAND (incr, 1);
631 if (TREE_OPERAND (incr, 0) != decl)
633 if (TREE_OPERAND (incr, 1) == decl)
635 if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
636 && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
637 || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
639 else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
640 || (TREE_CODE (TREE_OPERAND (incr, 1))
641 == POINTER_PLUS_EXPR))
642 && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
646 tree t = check_omp_for_incr_expr (elocus,
647 TREE_OPERAND (incr, 1),
649 if (t != error_mark_node)
652 t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
653 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
663 error_at (elocus, "invalid increment expression");
668 TREE_VEC_ELT (initv, i) = init;
669 TREE_VEC_ELT (incrv, i) = incr;
676 tree t = make_node (code);
678 TREE_TYPE (t) = void_type_node;
679 OMP_FOR_INIT (t) = initv;
680 OMP_FOR_COND (t) = condv;
681 OMP_FOR_INCR (t) = incrv;
682 OMP_FOR_BODY (t) = body;
683 OMP_FOR_PRE_BODY (t) = pre_body;
685 SET_EXPR_LOCATION (t, locus);
690 /* Right now we have 14 different combined constructs, this
691 function attempts to split or duplicate clauses for combined
692 constructs. CODE is the innermost construct in the combined construct,
693 and MASK allows to determine which constructs are combined together,
694 as every construct has at least one clause that no other construct
695 has (except for OMP_SECTIONS, but that can be only combined with parallel).
696 Combined constructs are:
697 #pragma omp parallel for
698 #pragma omp parallel sections
699 #pragma omp parallel for simd
701 #pragma omp distribute simd
702 #pragma omp distribute parallel for
703 #pragma omp distribute parallel for simd
704 #pragma omp teams distribute
705 #pragma omp teams distribute parallel for
706 #pragma omp teams distribute parallel for simd
707 #pragma omp target teams
708 #pragma omp target teams distribute
709 #pragma omp target teams distribute parallel for
710 #pragma omp target teams distribute parallel for simd */
713 c_omp_split_clauses (location_t loc, enum tree_code code,
714 omp_clause_mask mask, tree clauses, tree *cclauses)
717 enum c_omp_clause_split s;
720 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
722 /* Add implicit nowait clause on
723 #pragma omp parallel {for,for simd,sections}. */
724 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
729 cclauses[C_OMP_CLAUSE_SPLIT_FOR]
730 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
733 cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
734 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
740 for (; clauses ; clauses = next)
742 next = OMP_CLAUSE_CHAIN (clauses);
744 switch (OMP_CLAUSE_CODE (clauses))
746 /* First the clauses that are unique to some constructs. */
747 case OMP_CLAUSE_DEVICE:
749 s = C_OMP_CLAUSE_SPLIT_TARGET;
751 case OMP_CLAUSE_NUM_TEAMS:
752 case OMP_CLAUSE_THREAD_LIMIT:
753 s = C_OMP_CLAUSE_SPLIT_TEAMS;
755 case OMP_CLAUSE_DIST_SCHEDULE:
756 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
758 case OMP_CLAUSE_COPYIN:
759 case OMP_CLAUSE_NUM_THREADS:
760 case OMP_CLAUSE_PROC_BIND:
761 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
763 case OMP_CLAUSE_ORDERED:
764 case OMP_CLAUSE_SCHEDULE:
765 case OMP_CLAUSE_NOWAIT:
766 s = C_OMP_CLAUSE_SPLIT_FOR;
768 case OMP_CLAUSE_SAFELEN:
769 case OMP_CLAUSE_LINEAR:
770 case OMP_CLAUSE_ALIGNED:
771 s = C_OMP_CLAUSE_SPLIT_SIMD;
773 /* Duplicate this to all of distribute, for and simd. */
774 case OMP_CLAUSE_COLLAPSE:
775 if (code == OMP_SIMD)
777 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
778 OMP_CLAUSE_COLLAPSE);
779 OMP_CLAUSE_COLLAPSE_EXPR (c)
780 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
781 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
782 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
784 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
786 if ((mask & (OMP_CLAUSE_MASK_1
787 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
789 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
790 OMP_CLAUSE_COLLAPSE);
791 OMP_CLAUSE_COLLAPSE_EXPR (c)
792 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
793 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
794 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
795 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
798 s = C_OMP_CLAUSE_SPLIT_FOR;
801 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
803 /* Private clause is supported on all constructs but target,
804 it is enough to put it on the innermost one. For
805 #pragma omp {for,sections} put it on parallel though,
806 as that's what we did for OpenMP 3.1. */
807 case OMP_CLAUSE_PRIVATE:
810 case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
811 case OMP_FOR: case OMP_SECTIONS:
812 case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
813 case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
814 case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
815 default: gcc_unreachable ();
818 /* Firstprivate clause is supported on all constructs but
819 target and simd. Put it on the outermost of those and
820 duplicate on parallel. */
821 case OMP_CLAUSE_FIRSTPRIVATE:
822 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
825 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
827 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
829 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
830 OMP_CLAUSE_FIRSTPRIVATE);
831 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
832 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
833 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
834 if ((mask & (OMP_CLAUSE_MASK_1
835 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
836 s = C_OMP_CLAUSE_SPLIT_TEAMS;
838 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
842 #pragma omp parallel{, for{, simd}, sections}. */
843 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
845 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
848 /* This must be one of
849 #pragma omp {,target }teams distribute
850 #pragma omp target teams
851 #pragma omp {,target }teams distribute simd. */
852 gcc_assert (code == OMP_DISTRIBUTE
854 || code == OMP_SIMD);
855 s = C_OMP_CLAUSE_SPLIT_TEAMS;
857 else if ((mask & (OMP_CLAUSE_MASK_1
858 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
860 /* This must be #pragma omp distribute simd. */
861 gcc_assert (code == OMP_SIMD);
862 s = C_OMP_CLAUSE_SPLIT_TEAMS;
866 /* This must be #pragma omp for simd. */
867 gcc_assert (code == OMP_SIMD);
868 s = C_OMP_CLAUSE_SPLIT_FOR;
871 /* Lastprivate is allowed on for, sections and simd. In
872 parallel {for{, simd},sections} we actually want to put it on
873 parallel rather than for or sections. */
874 case OMP_CLAUSE_LASTPRIVATE:
875 if (code == OMP_FOR || code == OMP_SECTIONS)
877 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
879 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
881 s = C_OMP_CLAUSE_SPLIT_FOR;
884 gcc_assert (code == OMP_SIMD);
885 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
887 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
888 OMP_CLAUSE_LASTPRIVATE);
889 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
890 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
892 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
894 s = C_OMP_CLAUSE_SPLIT_FOR;
895 OMP_CLAUSE_CHAIN (c) = cclauses[s];
898 s = C_OMP_CLAUSE_SPLIT_SIMD;
900 /* Shared and default clauses are allowed on private and teams. */
901 case OMP_CLAUSE_SHARED:
902 case OMP_CLAUSE_DEFAULT:
903 if (code == OMP_TEAMS)
905 s = C_OMP_CLAUSE_SPLIT_TEAMS;
908 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
911 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
912 OMP_CLAUSE_CODE (clauses));
913 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
914 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
916 OMP_CLAUSE_DEFAULT_KIND (c)
917 = OMP_CLAUSE_DEFAULT_KIND (clauses);
918 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
919 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
922 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
924 /* Reduction is allowed on simd, for, parallel, sections and teams.
925 Duplicate it on all of them, but omit on for or sections if
926 parallel is present. */
927 case OMP_CLAUSE_REDUCTION:
928 if (code == OMP_SIMD)
930 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
931 OMP_CLAUSE_REDUCTION);
932 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
933 OMP_CLAUSE_REDUCTION_CODE (c)
934 = OMP_CLAUSE_REDUCTION_CODE (clauses);
935 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
936 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
937 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
938 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
940 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
942 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
945 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
946 OMP_CLAUSE_REDUCTION);
947 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
948 OMP_CLAUSE_REDUCTION_CODE (c)
949 = OMP_CLAUSE_REDUCTION_CODE (clauses);
950 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
951 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
952 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
953 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
954 s = C_OMP_CLAUSE_SPLIT_TEAMS;
956 else if ((mask & (OMP_CLAUSE_MASK_1
957 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
958 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
960 s = C_OMP_CLAUSE_SPLIT_FOR;
962 else if (code == OMP_SECTIONS)
963 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
965 s = C_OMP_CLAUSE_SPLIT_TEAMS;
968 /* FIXME: This is currently being discussed. */
969 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
971 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
973 s = C_OMP_CLAUSE_SPLIT_TARGET;
978 OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
979 cclauses[s] = clauses;
984 /* qsort callback to compare #pragma omp declare simd clauses. */
987 c_omp_declare_simd_clause_cmp (const void *p, const void *q)
989 tree a = *(const tree *) p;
990 tree b = *(const tree *) q;
991 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
993 if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
997 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
998 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
999 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
1001 int c = tree_to_shwi (OMP_CLAUSE_DECL (a));
1002 int d = tree_to_shwi (OMP_CLAUSE_DECL (b));
1011 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
1012 CLAUSES on FNDECL into argument indexes and sort them. */
1015 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
1018 vec<tree> clvec = vNULL;
1020 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1022 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1023 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1024 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1026 tree decl = OMP_CLAUSE_DECL (c);
1029 for (arg = parms, idx = 0; arg;
1030 arg = TREE_CHAIN (arg), idx++)
1033 if (arg == NULL_TREE)
1035 error_at (OMP_CLAUSE_LOCATION (c),
1036 "%qD is not an function argument", decl);
1039 OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
1041 clvec.safe_push (c);
1043 if (!clvec.is_empty ())
1045 unsigned int len = clvec.length (), i;
1046 clvec.qsort (c_omp_declare_simd_clause_cmp);
1048 for (i = 0; i < len; i++)
1049 OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
1055 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
1058 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
1062 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1063 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
1064 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
1065 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
1067 int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i;
1069 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
1070 arg = TREE_CHAIN (arg), i++)
1074 OMP_CLAUSE_DECL (c) = arg;
1078 /* True if OpenMP sharing attribute of DECL is predetermined. */
1080 enum omp_clause_default_kind
1081 c_omp_predetermined_sharing (tree decl)
1083 /* Variables with const-qualified type having no mutable member
1084 are predetermined shared. */
1085 if (TREE_READONLY (decl))
1086 return OMP_CLAUSE_DEFAULT_SHARED;
1088 return OMP_CLAUSE_DEFAULT_UNSPECIFIED;