From: John Marino Date: Sun, 22 Mar 2015 19:10:23 +0000 (+0100) Subject: Update gcc-50 to SVN version 221572 X-Git-Tag: v4.2.0rc~479^2 X-Git-Url: https://gitweb.dragonflybsd.org/~tuxillo/dragonfly.git/commitdiff_plain/38c0c85b8fb728acd360e59f89e8fd3efee99de1 Update gcc-50 to SVN version 221572 Last Changed Date: 2015-03-22 19:42:52 +0100 (Sun, 22 Mar 2015) --- diff --git a/contrib/gcc-5.0/LAST_UPDATED b/contrib/gcc-5.0/LAST_UPDATED index d8c9337b10..86bb22cd6f 100644 --- a/contrib/gcc-5.0/LAST_UPDATED +++ b/contrib/gcc-5.0/LAST_UPDATED @@ -1,2 +1,2 @@ -221423 -Last Changed Date: 2015-03-13 14:48:21 +0100 (Fri, 13 Mar 2015) +221572 +Last Changed Date: 2015-03-22 19:42:52 +0100 (Sun, 22 Mar 2015) diff --git a/contrib/gcc-5.0/gcc/DATESTAMP b/contrib/gcc-5.0/gcc/DATESTAMP index ea0dd137c0..84fc6e526e 100644 --- a/contrib/gcc-5.0/gcc/DATESTAMP +++ b/contrib/gcc-5.0/gcc/DATESTAMP @@ -1 +1 @@ -20150313 +20150322 diff --git a/contrib/gcc-5.0/gcc/c/c-decl.c b/contrib/gcc-5.0/gcc/c/c-decl.c index c140837f13..e1741f1301 100644 --- a/contrib/gcc-5.0/gcc/c/c-decl.c +++ b/contrib/gcc-5.0/gcc/c/c-decl.c @@ -4407,7 +4407,8 @@ c_decl_attributes (tree *node, tree attributes, int flags) { /* Add implicit "omp declare target" attribute if requested. */ if (current_omp_declare_target_attribute - && ((TREE_CODE (*node) == VAR_DECL && TREE_STATIC (*node)) + && ((TREE_CODE (*node) == VAR_DECL + && (TREE_STATIC (*node) || DECL_EXTERNAL (*node))) || TREE_CODE (*node) == FUNCTION_DECL)) { if (TREE_CODE (*node) == VAR_DECL diff --git a/contrib/gcc-5.0/gcc/cfgexpand.c b/contrib/gcc-5.0/gcc/cfgexpand.c index 67be09fc7e..97e7a2583e 100644 --- a/contrib/gcc-5.0/gcc/cfgexpand.c +++ b/contrib/gcc-5.0/gcc/cfgexpand.c @@ -282,6 +282,15 @@ align_local_variable (tree decl) return align / BITS_PER_UNIT; } +/* Align given offset BASE with ALIGN. Truncate up if ALIGN_UP is true, + down otherwise. Return truncated BASE value. */ + +static inline unsigned HOST_WIDE_INT +align_base (HOST_WIDE_INT base, unsigned HOST_WIDE_INT align, bool align_up) +{ + return align_up ? (base + align - 1) & -align : base & -align; +} + /* Allocate SIZE bytes at byte alignment ALIGN from the stack frame. Return the frame offset. */ @@ -290,20 +299,17 @@ alloc_stack_frame_space (HOST_WIDE_INT size, unsigned HOST_WIDE_INT align) { HOST_WIDE_INT offset, new_frame_offset; - new_frame_offset = frame_offset; if (FRAME_GROWS_DOWNWARD) { - new_frame_offset -= size + frame_phase; - new_frame_offset &= -align; - new_frame_offset += frame_phase; + new_frame_offset + = align_base (frame_offset - frame_phase - size, + align, false) + frame_phase; offset = new_frame_offset; } else { - new_frame_offset -= frame_phase; - new_frame_offset += align - 1; - new_frame_offset &= -align; - new_frame_offset += frame_phase; + new_frame_offset + = align_base (frame_offset - frame_phase, align, true) + frame_phase; offset = new_frame_offset; new_frame_offset += size; } @@ -1038,13 +1044,16 @@ expand_stack_vars (bool (*pred) (size_t), struct stack_vars_data *data) base = virtual_stack_vars_rtx; if ((flag_sanitize & SANITIZE_ADDRESS) && ASAN_STACK && pred) { - HOST_WIDE_INT prev_offset = frame_offset; + HOST_WIDE_INT prev_offset + = align_base (frame_offset, + MAX (alignb, ASAN_RED_ZONE_SIZE), + FRAME_GROWS_DOWNWARD); tree repr_decl = NULL_TREE; - offset = alloc_stack_frame_space (stack_vars[i].size + ASAN_RED_ZONE_SIZE, MAX (alignb, ASAN_RED_ZONE_SIZE)); + data->asan_vec.safe_push (prev_offset); data->asan_vec.safe_push (offset + stack_vars[i].size); /* Find best representative of the partition. diff --git a/contrib/gcc-5.0/gcc/cgraph.c b/contrib/gcc-5.0/gcc/cgraph.c index ede58bf5aa..35164f9f40 100644 --- a/contrib/gcc-5.0/gcc/cgraph.c +++ b/contrib/gcc-5.0/gcc/cgraph.c @@ -553,12 +553,11 @@ cgraph_node::get_create (tree decl) if (dump_file) fprintf (dump_file, "Introduced new external node " "(%s/%i) and turned into root of the clone tree.\n", - xstrdup_for_dump (node->name ()), node->order); + node->name (), node->order); } else if (dump_file) fprintf (dump_file, "Introduced new external node " - "(%s/%i).\n", xstrdup_for_dump (node->name ()), - node->order); + "(%s/%i).\n", node->name (), node->order); return node; } @@ -2009,6 +2008,8 @@ cgraph_node::dump (FILE *f) fprintf (f, " only_called_at_exit"); if (opt_for_fn (decl, optimize_size)) fprintf (f, " optimize_size"); + if (parallelized_function) + fprintf (f, " parallelized_function"); fprintf (f, "\n"); diff --git a/contrib/gcc-5.0/gcc/cgraph.h b/contrib/gcc-5.0/gcc/cgraph.h index 99af026d2d..650e68921f 100644 --- a/contrib/gcc-5.0/gcc/cgraph.h +++ b/contrib/gcc-5.0/gcc/cgraph.h @@ -1317,6 +1317,8 @@ public: unsigned nonfreeing_fn : 1; /* True if there was multiple COMDAT bodies merged by lto-symtab. */ unsigned merged : 1; + /* True if function was created to be executed in parallel. */ + unsigned parallelized_function : 1; private: /* Worker for call_for_symbol_and_aliases. */ @@ -2717,9 +2719,6 @@ cgraph_node::has_gimple_body_p (void) for ((node) = symtab->first_function_with_gimple_body (); (node); \ (node) = symtab->next_function_with_gimple_body (node)) -/* Create a new static variable of type TYPE. */ -tree add_new_static_var (tree type); - /* Uniquize all constants that appear in memory. Each constant in memory thus far output is recorded in `const_desc_table'. */ diff --git a/contrib/gcc-5.0/gcc/cgraphunit.c b/contrib/gcc-5.0/gcc/cgraphunit.c index e640907550..8ac92e1c0c 100644 --- a/contrib/gcc-5.0/gcc/cgraphunit.c +++ b/contrib/gcc-5.0/gcc/cgraphunit.c @@ -2484,8 +2484,9 @@ cgraph_node::create_wrapper (cgraph_node *target) /* Turn alias into thunk and expand it into GIMPLE representation. */ definition = true; + + memset (&thunk, 0, sizeof (cgraph_thunk_info)); thunk.thunk_p = true; - thunk.this_adjusting = false; create_edge (target, NULL, count, CGRAPH_FREQ_BASE); tree arguments = DECL_ARGUMENTS (decl); diff --git a/contrib/gcc-5.0/gcc/cif-code.def b/contrib/gcc-5.0/gcc/cif-code.def index 467cfcc655..202fddf75e 100644 --- a/contrib/gcc-5.0/gcc/cif-code.def +++ b/contrib/gcc-5.0/gcc/cif-code.def @@ -39,7 +39,7 @@ DEFCIFCODE(FUNCTION_NOT_CONSIDERED, CIF_FINAL_NORMAL, N_("function not considered for inlining")) /* Caller is compiled with optimizations disabled. */ -DEFCIFCODE(FUNCTION_NOT_OPTIMIZED, CIF_FINAL_NORMAL, +DEFCIFCODE(FUNCTION_NOT_OPTIMIZED, CIF_FINAL_ERROR, N_("caller is not optimized")) /* Inlining failed owing to unavailable function body. */ @@ -47,7 +47,7 @@ DEFCIFCODE(BODY_NOT_AVAILABLE, CIF_FINAL_ERROR, N_("function body not available")) /* Extern inline function that has been redefined. */ -DEFCIFCODE(REDEFINED_EXTERN_INLINE, CIF_FINAL_NORMAL, +DEFCIFCODE(REDEFINED_EXTERN_INLINE, CIF_FINAL_ERROR, N_("redefined extern inline functions are not considered for " "inlining")) @@ -87,10 +87,6 @@ DEFCIFCODE(UNLIKELY_CALL, CIF_FINAL_NORMAL, DEFCIFCODE(NOT_DECLARED_INLINED, CIF_FINAL_NORMAL, N_("function not declared inline and code size would grow")) -/* Inlining suppressed due to size optimization. */ -DEFCIFCODE(OPTIMIZING_FOR_SIZE, CIF_FINAL_NORMAL, - N_("optimizing for size and code size would grow")) - /* Caller and callee disagree on the arguments. */ DEFCIFCODE(MISMATCHED_ARGUMENTS, CIF_FINAL_ERROR, N_("mismatched arguments")) @@ -121,13 +117,13 @@ DEFCIFCODE(OPTIMIZATION_MISMATCH, CIF_FINAL_ERROR, N_("optimization level attribute mismatch")) /* We can't inline because the callee refers to comdat-local symbols. */ -DEFCIFCODE(USES_COMDAT_LOCAL, CIF_FINAL_NORMAL, +DEFCIFCODE(USES_COMDAT_LOCAL, CIF_FINAL_ERROR, N_("callee refers to comdat-local symbols")) /* We can't inline because of mismatched caller/callee attributes. */ -DEFCIFCODE(ATTRIBUTE_MISMATCH, CIF_FINAL_NORMAL, +DEFCIFCODE(ATTRIBUTE_MISMATCH, CIF_FINAL_ERROR, N_("function attribute mismatch")) /* We proved that the call is unreachable. */ -DEFCIFCODE(UNREACHABLE, CIF_FINAL_NORMAL, +DEFCIFCODE(UNREACHABLE, CIF_FINAL_ERROR, N_("unreachable")) diff --git a/contrib/gcc-5.0/gcc/combine.c b/contrib/gcc-5.0/gcc/combine.c index f779117cd7..71e5690459 100644 --- a/contrib/gcc-5.0/gcc/combine.c +++ b/contrib/gcc-5.0/gcc/combine.c @@ -475,7 +475,7 @@ static rtx force_to_mode (rtx, machine_mode, unsigned HOST_WIDE_INT, int); static rtx if_then_else_cond (rtx, rtx *, rtx *); static rtx known_cond (rtx, enum rtx_code, rtx, rtx); -static int rtx_equal_for_field_assignment_p (rtx, rtx); +static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false); static rtx make_field_assignment (rtx); static rtx apply_distributive_law (rtx); static rtx distribute_and_simplify_rtx (rtx, int); @@ -9184,8 +9184,23 @@ known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val) assignment as a field assignment. */ static int -rtx_equal_for_field_assignment_p (rtx x, rtx y) +rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x) { + if (widen_x && GET_MODE (x) != GET_MODE (y)) + { + if (GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (GET_MODE (y))) + return 0; + if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN) + return 0; + /* For big endian, adjust the memory offset. */ + if (BYTES_BIG_ENDIAN) + x = adjust_address_nv (x, GET_MODE (y), + -subreg_lowpart_offset (GET_MODE (x), + GET_MODE (y))); + else + x = adjust_address_nv (x, GET_MODE (y), 0); + } + if (x == y || rtx_equal_p (x, y)) return 1; @@ -9339,16 +9354,15 @@ make_field_assignment (rtx x) /* The second SUBREG that might get in the way is a paradoxical SUBREG around the first operand of the AND. We want to pretend the operand is as wide as the destination here. We - do this by creating a new MEM in the wider mode for the sole + do this by adjusting the MEM to wider mode for the sole purpose of the call to rtx_equal_for_field_assignment_p. Also note this trick only works for MEMs. */ else if (GET_CODE (rhs) == AND && paradoxical_subreg_p (XEXP (rhs, 0)) - && GET_CODE (SUBREG_REG (XEXP (rhs, 0))) == MEM + && MEM_P (SUBREG_REG (XEXP (rhs, 0))) && CONST_INT_P (XEXP (rhs, 1)) - && rtx_equal_for_field_assignment_p (gen_rtx_MEM (GET_MODE (dest), - XEXP (SUBREG_REG (XEXP (rhs, 0)), 0)), - dest)) + && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)), + dest, true)) c1 = INTVAL (XEXP (rhs, 1)), other = lhs; else if (GET_CODE (lhs) == AND && CONST_INT_P (XEXP (lhs, 1)) @@ -9357,16 +9371,15 @@ make_field_assignment (rtx x) /* The second SUBREG that might get in the way is a paradoxical SUBREG around the first operand of the AND. We want to pretend the operand is as wide as the destination here. We - do this by creating a new MEM in the wider mode for the sole + do this by adjusting the MEM to wider mode for the sole purpose of the call to rtx_equal_for_field_assignment_p. Also note this trick only works for MEMs. */ else if (GET_CODE (lhs) == AND && paradoxical_subreg_p (XEXP (lhs, 0)) - && GET_CODE (SUBREG_REG (XEXP (lhs, 0))) == MEM + && MEM_P (SUBREG_REG (XEXP (lhs, 0))) && CONST_INT_P (XEXP (lhs, 1)) - && rtx_equal_for_field_assignment_p (gen_rtx_MEM (GET_MODE (dest), - XEXP (SUBREG_REG (XEXP (lhs, 0)), 0)), - dest)) + && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)), + dest, true)) c1 = INTVAL (XEXP (lhs, 1)), other = rhs; else return x; diff --git a/contrib/gcc-5.0/gcc/config/i386/sse.md b/contrib/gcc-5.0/gcc/config/i386/sse.md index af74b35d5c..5800a3eebf 100644 --- a/contrib/gcc-5.0/gcc/config/i386/sse.md +++ b/contrib/gcc-5.0/gcc/config/i386/sse.md @@ -12805,6 +12805,65 @@ operands[1] = adjust_address (operands[1], mode, offs); }) +;; Turn SImode or DImode extraction from arbitrary SSE/AVX/AVX512F +;; vector modes into vec_extract*. +(define_split + [(set (match_operand:SWI48x 0 "nonimmediate_operand") + (match_operand:SWI48x 1 "register_operand"))] + "can_create_pseudo_p () + && GET_CODE (operands[1]) == SUBREG + && REG_P (SUBREG_REG (operands[1])) + && (GET_MODE_CLASS (GET_MODE (SUBREG_REG (operands[1]))) == MODE_VECTOR_INT + || (GET_MODE_CLASS (GET_MODE (SUBREG_REG (operands[1]))) + == MODE_VECTOR_FLOAT)) + && SUBREG_BYTE (operands[1]) == 0 + && TARGET_SSE + && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (operands[1]))) == 16 + || (GET_MODE_SIZE (GET_MODE (SUBREG_REG (operands[1]))) == 32 + && TARGET_AVX) + || (GET_MODE_SIZE (GET_MODE (SUBREG_REG (operands[1]))) == 64 + && TARGET_AVX512F)) + && (mode == SImode || TARGET_64BIT || MEM_P (operands[0]))" + [(set (match_dup 0) (vec_select:SWI48x (match_dup 1) + (parallel [(const_int 0)])))] +{ + rtx tmp; + operands[1] = SUBREG_REG (operands[1]); + switch (GET_MODE_SIZE (GET_MODE (operands[1]))) + { + case 64: + if (mode == SImode) + { + tmp = gen_reg_rtx (V8SImode); + emit_insn (gen_vec_extract_lo_v16si (tmp, + gen_lowpart (V16SImode, + operands[1]))); + } + else + { + tmp = gen_reg_rtx (V4DImode); + emit_insn (gen_vec_extract_lo_v8di (tmp, + gen_lowpart (V8DImode, + operands[1]))); + } + operands[1] = tmp; + /* FALLTHRU */ + case 32: + tmp = gen_reg_rtx (mode); + if (mode == SImode) + emit_insn (gen_vec_extract_lo_v8si (tmp, gen_lowpart (V8SImode, + operands[1]))); + else + emit_insn (gen_vec_extract_lo_v4di (tmp, gen_lowpart (V4DImode, + operands[1]))); + operands[1] = tmp; + break; + case 16: + operands[1] = gen_lowpart (mode, operands[1]); + break; + } +}) + (define_insn "*vec_concatv2si_sse4_1" [(set (match_operand:V2SI 0 "register_operand" "=Yr,*x,x, Yr,*x,x, x, *y,*y") (vec_concat:V2SI diff --git a/contrib/gcc-5.0/gcc/cp/call.c b/contrib/gcc-5.0/gcc/cp/call.c index fdd8436d53..31d2b9c2c9 100644 --- a/contrib/gcc-5.0/gcc/cp/call.c +++ b/contrib/gcc-5.0/gcc/cp/call.c @@ -7486,7 +7486,7 @@ build_over_call (struct z_candidate *cand, int flags, tsubst_flags_t complain) } if (!already_used - && !mark_used (fn)) + && !mark_used (fn, complain)) return error_mark_node; if (DECL_VINDEX (fn) && (flags & LOOKUP_NONVIRTUAL) == 0 diff --git a/contrib/gcc-5.0/gcc/cp/class.c b/contrib/gcc-5.0/gcc/cp/class.c index 8612163711..0518320d6b 100644 --- a/contrib/gcc-5.0/gcc/cp/class.c +++ b/contrib/gcc-5.0/gcc/cp/class.c @@ -1382,44 +1382,53 @@ struct abi_tag_data a tag NAMESPACE_DECL) or a STRING_CST (a tag attribute). */ static void -check_tag (tree tag, tree *tp, abi_tag_data *p) +check_tag (tree tag, tree id, tree *tp, abi_tag_data *p) { - tree id; - - if (TREE_CODE (tag) == STRING_CST) - id = get_identifier (TREE_STRING_POINTER (tag)); - else - { - id = tag; - tag = NULL_TREE; - } - if (!IDENTIFIER_MARKED (id)) { - if (!tag) - tag = build_string (IDENTIFIER_LENGTH (id) + 1, - IDENTIFIER_POINTER (id)); if (p->tags != error_mark_node) { - /* We're collecting tags from template arguments. */ + /* We're collecting tags from template arguments or from + the type of a variable or function return type. */ p->tags = tree_cons (NULL_TREE, tag, p->tags); - ABI_TAG_IMPLICIT (p->tags) = true; /* Don't inherit this tag multiple times. */ IDENTIFIER_MARKED (id) = true; + + if (TYPE_P (p->t)) + { + /* Tags inherited from type template arguments are only used + to avoid warnings. */ + ABI_TAG_IMPLICIT (p->tags) = true; + return; + } + /* For functions and variables we want to warn, too. */ } /* Otherwise we're diagnosing missing tags. */ + if (TREE_CODE (p->t) == FUNCTION_DECL) + { + if (warning (OPT_Wabi_tag, "%qD inherits the %E ABI tag " + "that %qT (used in its return type) has", + p->t, tag, *tp)) + inform (location_of (*tp), "%qT declared here", *tp); + } + else if (TREE_CODE (p->t) == VAR_DECL) + { + if (warning (OPT_Wabi_tag, "%qD inherits the %E ABI tag " + "that %qT (used in its type) has", p->t, tag, *tp)) + inform (location_of (*tp), "%qT declared here", *tp); + } else if (TYPE_P (p->subob)) { - if (warning (OPT_Wabi_tag, "%qT does not have the %E abi tag " + if (warning (OPT_Wabi_tag, "%qT does not have the %E ABI tag " "that base %qT has", p->t, tag, p->subob)) inform (location_of (p->subob), "%qT declared here", p->subob); } else { - if (warning (OPT_Wabi_tag, "%qT does not have the %E abi tag " + if (warning (OPT_Wabi_tag, "%qT does not have the %E ABI tag " "that %qT (used in the type of %qD) has", p->t, tag, *tp, p->subob)) { @@ -1431,8 +1440,53 @@ check_tag (tree tag, tree *tp, abi_tag_data *p) } } +/* Find all the ABI tags in the attribute list ATTR and either call + check_tag (if TP is non-null) or set IDENTIFIER_MARKED to val. */ + +static void +mark_or_check_attr_tags (tree attr, tree *tp, abi_tag_data *p, bool val) +{ + if (!attr) + return; + for (; (attr = lookup_attribute ("abi_tag", attr)); + attr = TREE_CHAIN (attr)) + for (tree list = TREE_VALUE (attr); list; + list = TREE_CHAIN (list)) + { + tree tag = TREE_VALUE (list); + tree id = get_identifier (TREE_STRING_POINTER (tag)); + if (tp) + check_tag (tag, id, tp, p); + else + IDENTIFIER_MARKED (id) = val; + } +} + +/* Find all the ABI tags on T and its enclosing scopes and either call + check_tag (if TP is non-null) or set IDENTIFIER_MARKED to val. */ + +static void +mark_or_check_tags (tree t, tree *tp, abi_tag_data *p, bool val) +{ + while (t != global_namespace) + { + tree attr; + if (TYPE_P (t)) + { + attr = TYPE_ATTRIBUTES (t); + t = CP_TYPE_CONTEXT (t); + } + else + { + attr = DECL_ATTRIBUTES (t); + t = CP_DECL_CONTEXT (t); + } + mark_or_check_attr_tags (attr, tp, p, val); + } +} + /* walk_tree callback for check_abi_tags: if the type at *TP involves any - types with abi tags, add the corresponding identifiers to the VEC in + types with ABI tags, add the corresponding identifiers to the VEC in *DATA and set IDENTIFIER_MARKED. */ static tree @@ -1447,63 +1501,112 @@ find_abi_tags_r (tree *tp, int *walk_subtrees, void *data) abi_tag_data *p = static_cast(data); - for (tree ns = decl_namespace_context (*tp); - ns != global_namespace; - ns = CP_DECL_CONTEXT (ns)) - if (NAMESPACE_ABI_TAG (ns)) - check_tag (DECL_NAME (ns), tp, p); + mark_or_check_tags (*tp, tp, p, false); - if (tree attributes = lookup_attribute ("abi_tag", TYPE_ATTRIBUTES (*tp))) - { - for (tree list = TREE_VALUE (attributes); list; - list = TREE_CHAIN (list)) - { - tree tag = TREE_VALUE (list); - check_tag (tag, tp, p); - } - } return NULL_TREE; } -/* Set IDENTIFIER_MARKED on all the ABI tags on T and its (transitively - complete) template arguments. */ +/* walk_tree callback for mark_abi_tags: if *TP is a class, set + IDENTIFIER_MARKED on its ABI tags. */ -static void -mark_type_abi_tags (tree t, bool val) +static tree +mark_abi_tags_r (tree *tp, int *walk_subtrees, void *data) { - for (tree ns = decl_namespace_context (t); - ns != global_namespace; - ns = CP_DECL_CONTEXT (ns)) - if (NAMESPACE_ABI_TAG (ns)) - IDENTIFIER_MARKED (DECL_NAME (ns)) = val; + if (!OVERLOAD_TYPE_P (*tp)) + return NULL_TREE; + + /* walk_tree shouldn't be walking into any subtrees of a RECORD_TYPE + anyway, but let's make sure of it. */ + *walk_subtrees = false; + + bool *valp = static_cast(data); - tree attributes = lookup_attribute ("abi_tag", TYPE_ATTRIBUTES (t)); - if (attributes) + mark_or_check_tags (*tp, NULL, NULL, *valp); + + return NULL_TREE; +} + +/* Set IDENTIFIER_MARKED on all the ABI tags on T and its enclosing + scopes. */ + +static void +mark_abi_tags (tree t, bool val) +{ + mark_or_check_tags (t, NULL, NULL, val); + if (DECL_P (t)) { - for (tree list = TREE_VALUE (attributes); list; - list = TREE_CHAIN (list)) + if (DECL_LANG_SPECIFIC (t) && DECL_USE_TEMPLATE (t) + && PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (t))) { - tree tag = TREE_VALUE (list); - tree id = get_identifier (TREE_STRING_POINTER (tag)); - IDENTIFIER_MARKED (id) = val; + /* Template arguments are part of the signature. */ + tree level = INNERMOST_TEMPLATE_ARGS (DECL_TI_ARGS (t)); + for (int j = 0; j < TREE_VEC_LENGTH (level); ++j) + { + tree arg = TREE_VEC_ELT (level, j); + cp_walk_tree_without_duplicates (&arg, mark_abi_tags_r, &val); + } } + if (TREE_CODE (t) == FUNCTION_DECL) + /* A function's parameter types are part of the signature, so + we don't need to inherit any tags that are also in them. */ + for (tree arg = FUNCTION_FIRST_USER_PARMTYPE (t); arg; + arg = TREE_CHAIN (arg)) + cp_walk_tree_without_duplicates (&TREE_VALUE (arg), + mark_abi_tags_r, &val); } } -/* Check that class T has all the abi tags that subobject SUBOB has, or - warn if not. */ +/* Check that T has all the ABI tags that subobject SUBOB has, or + warn if not. If T is a (variable or function) declaration, also + add any missing tags. */ static void check_abi_tags (tree t, tree subob) { - mark_type_abi_tags (t, true); + bool inherit = DECL_P (t); + + if (!inherit && !warn_abi_tag) + return; + + tree decl = TYPE_P (t) ? TYPE_NAME (t) : t; + if (!TREE_PUBLIC (decl)) + /* No need to worry about things local to this TU. */ + return; + + mark_abi_tags (t, true); tree subtype = TYPE_P (subob) ? subob : TREE_TYPE (subob); struct abi_tag_data data = { t, subob, error_mark_node }; + if (inherit) + data.tags = NULL_TREE; cp_walk_tree_without_duplicates (&subtype, find_abi_tags_r, &data); - mark_type_abi_tags (t, false); + if (inherit && data.tags) + { + tree attr = lookup_attribute ("abi_tag", DECL_ATTRIBUTES (t)); + if (attr) + TREE_VALUE (attr) = chainon (data.tags, TREE_VALUE (attr)); + else + DECL_ATTRIBUTES (t) + = tree_cons (get_identifier ("abi_tag"), data.tags, + DECL_ATTRIBUTES (t)); + } + + mark_abi_tags (t, false); +} + +/* Check that DECL has all the ABI tags that are used in parts of its type + that are not reflected in its mangled name. */ + +void +check_abi_tags (tree decl) +{ + if (TREE_CODE (decl) == VAR_DECL) + check_abi_tags (decl, TREE_TYPE (decl)); + else if (TREE_CODE (decl) == FUNCTION_DECL + && !mangle_return_type_p (decl)) + check_abi_tags (decl, TREE_TYPE (TREE_TYPE (decl))); } void @@ -1513,7 +1616,7 @@ inherit_targ_abi_tags (tree t) || CLASSTYPE_TEMPLATE_INFO (t) == NULL_TREE) return; - mark_type_abi_tags (t, true); + mark_abi_tags (t, true); tree args = CLASSTYPE_TI_ARGS (t); struct abi_tag_data data = { t, NULL_TREE, NULL_TREE }; @@ -1541,7 +1644,7 @@ inherit_targ_abi_tags (tree t) TYPE_ATTRIBUTES (t)); } - mark_type_abi_tags (t, false); + mark_abi_tags (t, false); } /* Return true, iff class T has a non-virtual destructor that is diff --git a/contrib/gcc-5.0/gcc/cp/constexpr.c b/contrib/gcc-5.0/gcc/cp/constexpr.c index 1b5f50cb49..37b619d56f 100644 --- a/contrib/gcc-5.0/gcc/cp/constexpr.c +++ b/contrib/gcc-5.0/gcc/cp/constexpr.c @@ -2427,6 +2427,27 @@ cxx_fold_indirect_ref (location_t loc, tree type, tree op0, bool *empty_base) break; } } + /* *(&A[i] p+ j) => A[i + j] */ + else if (TREE_CODE (op00) == ARRAY_REF + && TREE_CODE (TREE_OPERAND (op00, 1)) == INTEGER_CST + && TREE_CODE (op01) == INTEGER_CST) + { + tree t = fold_convert_loc (loc, ssizetype, + TREE_OPERAND (op00, 1)); + tree nelts + = array_type_nelts_top (TREE_TYPE (TREE_OPERAND (op00, 0))); + /* Don't fold an out-of-bound access. */ + if (!tree_int_cst_le (t, nelts)) + return NULL_TREE; + /* Make sure to treat the second operand of POINTER_PLUS_EXPR + as signed. */ + op01 = fold_build2_loc (loc, EXACT_DIV_EXPR, ssizetype, + cp_fold_convert (ssizetype, op01), + TYPE_SIZE_UNIT (type)); + t = size_binop_loc (loc, PLUS_EXPR, op01, t); + return build4_loc (loc, ARRAY_REF, type, TREE_OPERAND (op00, 0), + t, NULL_TREE, NULL_TREE); + } } } /* *(foo *)fooarrptr => (*fooarrptr)[0] */ diff --git a/contrib/gcc-5.0/gcc/cp/cp-tree.h b/contrib/gcc-5.0/gcc/cp/cp-tree.h index 65219f159e..7111449da4 100644 --- a/contrib/gcc-5.0/gcc/cp/cp-tree.h +++ b/contrib/gcc-5.0/gcc/cp/cp-tree.h @@ -152,7 +152,7 @@ c-common.h, not after. DECL_MUTABLE_P (in FIELD_DECL) DECL_DEPENDENT_P (in USING_DECL) LABEL_DECL_BREAK (in LABEL_DECL) - NAMESPACE_ABI_TAG (in NAMESPACE_DECL) + NAMESPACE_IS_INLINE (in NAMESPACE_DECL) 1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL). DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL) DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL) @@ -2657,9 +2657,8 @@ struct GTY(()) lang_decl { #define LOCAL_CLASS_P(NODE) \ (decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE) -/* 1 iff this NAMESPACE_DECL should also be treated as an ABI tag for - -Wabi-tag. */ -#define NAMESPACE_ABI_TAG(NODE) \ +/* 1 iff this NAMESPACE_DECL is an inline namespace. */ +#define NAMESPACE_IS_INLINE(NODE) \ DECL_LANG_FLAG_0 (NAMESPACE_DECL_CHECK (NODE)) /* For a NAMESPACE_DECL: the list of using namespace directives @@ -5311,6 +5310,7 @@ extern void explain_non_literal_class (tree); extern void inherit_targ_abi_tags (tree); extern void defaulted_late_check (tree); extern bool defaultable_fn_check (tree); +extern void check_abi_tags (tree); extern void fixup_type_variants (tree); extern void fixup_attribute_variants (tree); extern tree* decl_cloned_function_p (const_tree, bool); @@ -6069,6 +6069,7 @@ extern bool type_has_nontrivial_copy_init (const_tree); extern bool class_tmpl_impl_spec_p (const_tree); extern int zero_init_p (const_tree); extern bool check_abi_tag_redeclaration (const_tree, const_tree, const_tree); +extern bool check_abi_tag_args (tree, tree); extern tree strip_typedefs (tree); extern tree strip_typedefs_expr (tree); extern tree copy_binfo (tree, tree, tree, @@ -6345,6 +6346,7 @@ extern tree mangle_tls_wrapper_fn (tree); extern bool decl_tls_wrapper_p (tree); extern tree mangle_ref_init_variable (tree); extern char * get_mangled_vtable_map_var_name (tree); +extern bool mangle_return_type_p (tree); /* in dump.c */ extern bool cp_dump_tree (void *, tree); diff --git a/contrib/gcc-5.0/gcc/cp/decl.c b/contrib/gcc-5.0/gcc/cp/decl.c index e35e48436d..cb0f11f574 100644 --- a/contrib/gcc-5.0/gcc/cp/decl.c +++ b/contrib/gcc-5.0/gcc/cp/decl.c @@ -10134,8 +10134,9 @@ grokdeclarator (const cp_declarator *declarator, the object as `const'. */ if (constexpr_p && innermost_code != cdk_function) { - if (type_quals & TYPE_QUAL_VOLATILE) - error ("both % and % cannot be used here"); + /* DR1688 says that a `constexpr' specifier in combination with + `volatile' is valid. */ + if (TREE_CODE (type) != REFERENCE_TYPE) { type_quals |= TYPE_QUAL_CONST; diff --git a/contrib/gcc-5.0/gcc/cp/decl2.c b/contrib/gcc-5.0/gcc/cp/decl2.c index a4a5ebf58e..989a0303ba 100644 --- a/contrib/gcc-5.0/gcc/cp/decl2.c +++ b/contrib/gcc-5.0/gcc/cp/decl2.c @@ -1440,7 +1440,8 @@ cplus_decl_attributes (tree *decl, tree attributes, int flags) /* Add implicit "omp declare target" attribute if requested. */ if (scope_chain->omp_declare_target_attribute - && ((TREE_CODE (*decl) == VAR_DECL && TREE_STATIC (*decl)) + && ((TREE_CODE (*decl) == VAR_DECL + && (TREE_STATIC (*decl) || DECL_EXTERNAL (*decl))) || TREE_CODE (*decl) == FUNCTION_DECL)) { if (TREE_CODE (*decl) == VAR_DECL @@ -5037,7 +5038,12 @@ mark_used (tree decl, tsubst_flags_t complain) && uses_template_parms (DECL_TI_ARGS (decl))) return true; - require_deduced_type (decl); + if (undeduced_auto_decl (decl)) + { + if (complain & tf_error) + error ("use of %qD before deduction of %", decl); + return false; + } /* If we don't need a value, then we don't need to synthesize DECL. */ if (cp_unevaluated_operand != 0) diff --git a/contrib/gcc-5.0/gcc/cp/mangle.c b/contrib/gcc-5.0/gcc/cp/mangle.c index fbf4bf27c0..b0f72d1ff1 100644 --- a/contrib/gcc-5.0/gcc/cp/mangle.c +++ b/contrib/gcc-5.0/gcc/cp/mangle.c @@ -648,6 +648,48 @@ find_substitution (tree node) return 1; } +/* Returns whether DECL's symbol name should be the plain unqualified-id + rather than a more complicated mangled name. */ + +static bool +unmangled_name_p (const tree decl) +{ + if (TREE_CODE (decl) == FUNCTION_DECL) + { + /* The names of `extern "C"' functions are not mangled. */ + return (DECL_EXTERN_C_FUNCTION_P (decl) + /* But overloaded operator names *are* mangled. */ + && !DECL_OVERLOADED_OPERATOR_P (decl)); + } + else if (VAR_P (decl)) + { + /* static variables are mangled. */ + if (!DECL_EXTERNAL_LINKAGE_P (decl)) + return false; + + /* extern "C" declarations aren't mangled. */ + if (DECL_EXTERN_C_P (decl)) + return true; + + /* Other variables at non-global scope are mangled. */ + if (CP_DECL_CONTEXT (decl) != global_namespace) + return false; + + /* Variable template instantiations are mangled. */ + if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl) + && variable_template_p (DECL_TI_TEMPLATE (decl))) + return false; + + /* Declarations with ABI tags are mangled. */ + if (lookup_attribute ("abi_tag", DECL_ATTRIBUTES (decl))) + return false; + + /* The names of non-static global variables aren't mangled. */ + return true; + } + + return false; +} /* TOP_LEVEL is true, if this is being called at outermost level of mangling. It should be false when mangling a decl appearing in an @@ -660,13 +702,10 @@ write_mangled_name (const tree decl, bool top_level) { MANGLE_TRACE_TREE ("mangled-name", decl); - if (/* The names of `extern "C"' functions are not mangled. */ - DECL_EXTERN_C_FUNCTION_P (decl) - /* But overloaded operator names *are* mangled. */ - && !DECL_OVERLOADED_OPERATOR_P (decl)) - { - unmangled_name:; + check_abi_tags (decl); + if (unmangled_name_p (decl)) + { if (top_level) write_string (IDENTIFIER_POINTER (DECL_NAME (decl))); else @@ -680,18 +719,6 @@ write_mangled_name (const tree decl, bool top_level) write_source_name (DECL_NAME (decl)); } } - else if (VAR_P (decl) - /* Variable template instantiations are mangled. */ - && !(DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl) - && variable_template_p (DECL_TI_TEMPLATE (decl))) - /* The names of non-static global variables aren't mangled. */ - && DECL_EXTERNAL_LINKAGE_P (decl) - && (CP_DECL_CONTEXT (decl) == global_namespace - /* And neither are `extern "C"' variables. */ - || DECL_EXTERN_C_P (decl))) - { - goto unmangled_name; - } else { write_string ("_Z"); @@ -699,6 +726,18 @@ write_mangled_name (const tree decl, bool top_level) } } +/* Returns true if the return type of DECL is part of its signature, and + therefore its mangling. */ + +bool +mangle_return_type_p (tree decl) +{ + return (!DECL_CONSTRUCTOR_P (decl) + && !DECL_DESTRUCTOR_P (decl) + && !DECL_CONV_FN_P (decl) + && decl_is_template_id (decl, NULL)); +} + /* ::= ::= */ @@ -740,10 +779,7 @@ write_encoding (const tree decl) } write_bare_function_type (fn_type, - (!DECL_CONSTRUCTOR_P (decl) - && !DECL_DESTRUCTOR_P (decl) - && !DECL_CONV_FN_P (decl) - && decl_is_template_id (decl, NULL)), + mangle_return_type_p (decl), d); } } @@ -1290,7 +1326,7 @@ write_unqualified_name (tree decl) if (tree tmpl = most_general_template (decl)) decl = DECL_TEMPLATE_RESULT (tmpl); /* Don't crash on an unbound class template. */ - if (decl) + if (decl && TREE_CODE (decl) != NAMESPACE_DECL) { tree attrs = (TREE_CODE (decl) == TYPE_DECL ? TYPE_ATTRIBUTES (TREE_TYPE (decl)) diff --git a/contrib/gcc-5.0/gcc/cp/name-lookup.c b/contrib/gcc-5.0/gcc/cp/name-lookup.c index ba16befa74..c845d521a5 100644 --- a/contrib/gcc-5.0/gcc/cp/name-lookup.c +++ b/contrib/gcc-5.0/gcc/cp/name-lookup.c @@ -3657,7 +3657,24 @@ handle_namespace_attrs (tree ns, tree attributes) } else if (is_attribute_p ("abi_tag", name)) { - NAMESPACE_ABI_TAG (ns) = true; + if (!NAMESPACE_IS_INLINE (ns)) + { + warning (OPT_Wattributes, "ignoring %qD attribute on non-inline " + "namespace", name); + continue; + } + if (!args) + { + tree dn = DECL_NAME (ns); + args = build_string (IDENTIFIER_LENGTH (dn) + 1, + IDENTIFIER_POINTER (dn)); + TREE_TYPE (args) = char_array_type_node; + args = fix_string_type (args); + args = build_tree_list (NULL_TREE, args); + } + if (check_abi_tag_args (args, name)) + DECL_ATTRIBUTES (ns) = tree_cons (name, args, + DECL_ATTRIBUTES (ns)); } else { diff --git a/contrib/gcc-5.0/gcc/cp/parser.c b/contrib/gcc-5.0/gcc/cp/parser.c index a209ee608f..98d741f440 100644 --- a/contrib/gcc-5.0/gcc/cp/parser.c +++ b/contrib/gcc-5.0/gcc/cp/parser.c @@ -14051,6 +14051,8 @@ cp_parser_template_name (cp_parser* parser, /*ambiguous_decls=*/NULL, token->location); + decl = strip_using_decl (decl); + /* If DECL is a template, then the name was a template-name. */ if (TREE_CODE (decl) == TEMPLATE_DECL) { @@ -16231,6 +16233,7 @@ cp_parser_namespace_definition (cp_parser* parser) if (is_inline) { tree name_space = current_namespace; + NAMESPACE_IS_INLINE (name_space) = true; /* Set up namespace association. */ DECL_NAMESPACE_ASSOCIATIONS (name_space) = tree_cons (CP_DECL_CONTEXT (name_space), NULL_TREE, diff --git a/contrib/gcc-5.0/gcc/cp/tree.c b/contrib/gcc-5.0/gcc/cp/tree.c index c8e6f0c796..ef53aff87f 100644 --- a/contrib/gcc-5.0/gcc/cp/tree.c +++ b/contrib/gcc-5.0/gcc/cp/tree.c @@ -3485,13 +3485,17 @@ check_abi_tag_redeclaration (const_tree decl, const_tree old, const_tree new_) return true; } -/* Handle an "abi_tag" attribute; arguments as in - struct attribute_spec.handler. */ +/* The abi_tag attribute with the name NAME was given ARGS. If they are + ill-formed, give an error and return false; otherwise, return true. */ -static tree -handle_abi_tag_attribute (tree* node, tree name, tree args, - int flags, bool* no_add_attrs) +bool +check_abi_tag_args (tree args, tree name) { + if (!args) + { + error ("the %qE attribute requires arguments", name); + return false; + } for (tree arg = args; arg; arg = TREE_CHAIN (arg)) { tree elt = TREE_VALUE (arg); @@ -3502,7 +3506,7 @@ handle_abi_tag_attribute (tree* node, tree name, tree args, { error ("arguments to the %qE attribute must be narrow string " "literals", name); - goto fail; + return false; } const char *begin = TREE_STRING_POINTER (elt); const char *end = begin + TREE_STRING_LENGTH (elt); @@ -3517,7 +3521,7 @@ handle_abi_tag_attribute (tree* node, tree name, tree args, "identifiers", name); inform (input_location, "%<%c%> is not a valid first " "character for an identifier", c); - goto fail; + return false; } } else if (p == end - 1) @@ -3530,11 +3534,23 @@ handle_abi_tag_attribute (tree* node, tree name, tree args, "identifiers", name); inform (input_location, "%<%c%> is not a valid character " "in an identifier", c); - goto fail; + return false; } } } } + return true; +} + +/* Handle an "abi_tag" attribute; arguments as in + struct attribute_spec.handler. */ + +static tree +handle_abi_tag_attribute (tree* node, tree name, tree args, + int flags, bool* no_add_attrs) +{ + if (!check_abi_tag_args (args, name)) + goto fail; if (TYPE_P (*node)) { @@ -3578,14 +3594,16 @@ handle_abi_tag_attribute (tree* node, tree name, tree args, } else { - if (TREE_CODE (*node) != FUNCTION_DECL) + if (TREE_CODE (*node) != FUNCTION_DECL + && TREE_CODE (*node) != VAR_DECL) { - error ("%qE attribute applied to non-function %qD", name, *node); + error ("%qE attribute applied to non-function, non-variable %qD", + name, *node); goto fail; } else if (DECL_LANGUAGE (*node) == lang_c) { - error ("%qE attribute applied to extern \"C\" function %qD", + error ("%qE attribute applied to extern \"C\" declaration %qD", name, *node); goto fail; } diff --git a/contrib/gcc-5.0/gcc/cp/typeck.c b/contrib/gcc-5.0/gcc/cp/typeck.c index 4c128b7ebe..e9d4cae570 100644 --- a/contrib/gcc-5.0/gcc/cp/typeck.c +++ b/contrib/gcc-5.0/gcc/cp/typeck.c @@ -2213,6 +2213,8 @@ lookup_anon_field (tree t, tree type) { tree field; + t = TYPE_MAIN_VARIANT (t); + for (field = TYPE_FIELDS (t); field; field = DECL_CHAIN (field)) { if (TREE_STATIC (field)) diff --git a/contrib/gcc-5.0/gcc/doc/contrib.texi b/contrib/gcc-5.0/gcc/doc/contrib.texi index f1214fdb02..9935ac7d6c 100644 --- a/contrib/gcc-5.0/gcc/doc/contrib.texi +++ b/contrib/gcc-5.0/gcc/doc/contrib.texi @@ -881,6 +881,8 @@ Christopher Smith did the port for Convex machines. @item Danny Smith for his major efforts on the Mingw (and Cygwin) ports. +Retired from GCC maintainership August 2010, having mentored two +new maintainers into the role. @item Randy Smith finished the Sun FPA support. diff --git a/contrib/gcc-5.0/gcc/doc/extend.texi b/contrib/gcc-5.0/gcc/doc/extend.texi index 91b94f7b61..e115ca8f2c 100644 --- a/contrib/gcc-5.0/gcc/doc/extend.texi +++ b/contrib/gcc-5.0/gcc/doc/extend.texi @@ -3688,15 +3688,16 @@ in the function when compiling with the @option{-fsanitize=undefined} option. @item bnd_legacy @cindex @code{bnd_legacy} function attribute -The @code{bnd_legacy} attribute on functions is used to inform -compiler that function should not be instrumented when compiled -with @option{-fcheck-pointer-bounds} option. +@cindex Pointer Bounds Checker attributes +The @code{bnd_legacy} attribute on functions is used to inform the +compiler that the function should not be instrumented when compiled +with the @option{-fcheck-pointer-bounds} option. @item bnd_instrument @cindex @code{bnd_instrument} function attribute -The @code{bnd_instrument} attribute on functions is used to inform -compiler that function should be instrumented when compiled -with @option{-fchkp-instrument-marked-only} option. +The @code{bnd_instrument} attribute on functions is used to inform the +compiler that the function should be instrumented when compiled +with the @option{-fchkp-instrument-marked-only} option. @item regparm (@var{number}) @cindex @code{regparm} attribute @@ -5943,10 +5944,12 @@ GCC emits warnings based on this attribute by default; use @option{-Wno-designated-init} to suppress them. @item bnd_variable_size +@cindex @code{bnd_variable_size} attribute +@cindex Pointer Bounds Checker attributes When applied to a structure field, this attribute tells Pointer Bounds Checker that the size of this field should not be computed -using static type information. It may be used to mark variable -sized static array fields placed at the end of a structure. +using static type information. It may be used to mark variably-sized +static array fields placed at the end of a structure. @smallexample struct S @@ -5958,8 +5961,9 @@ S *p = (S *)malloc (sizeof(S) + 100); p->data[10] = 0; //Bounds violation @end smallexample -By using an attribute for a field we may avoid bound violation -we most probably do not want to see: +@noindent +By using an attribute for the field we may avoid unwanted bound +violation checks: @smallexample struct S @@ -8731,6 +8735,7 @@ is called and the @var{flag} argument passed to it. @node Pointer Bounds Checker builtins @section Pointer Bounds Checker Built-in Functions +@cindex Pointer Bounds Checker builtins @findex __builtin___bnd_set_ptr_bounds @findex __builtin___bnd_narrow_ptr_bounds @findex __builtin___bnd_copy_ptr_bounds @@ -8744,15 +8749,16 @@ is called and the @var{flag} argument passed to it. @findex __builtin___bnd_get_ptr_ubound GCC provides a set of built-in functions to control Pointer Bounds Checker -instrumentation. Note that all Pointer Bounds Checker builtins are allowed -to use even if you compile with Pointer Bounds Checker off. The builtins -behavior may differ in such case as documented below. +instrumentation. Note that all Pointer Bounds Checker builtins can be used +even if you compile with Pointer Bounds Checker off +(@option{-fno-check-pointer-bounds}). +The behavior may differ in such case as documented below. -@deftypefn {Built-in Function} void * __builtin___bnd_set_ptr_bounds (const void * @var{q}, size_t @var{size}) +@deftypefn {Built-in Function} {void *} __builtin___bnd_set_ptr_bounds (const void *@var{q}, size_t @var{size}) This built-in function returns a new pointer with the value of @var{q}, and associate it with the bounds [@var{q}, @var{q}+@var{size}-1]. With Pointer -Bounds Checker off built-in function just returns the first argument. +Bounds Checker off, the built-in function just returns the first argument. @smallexample extern void *__wrap_malloc (size_t n) @@ -8765,72 +8771,75 @@ extern void *__wrap_malloc (size_t n) @end deftypefn -@deftypefn {Built-in Function} void * __builtin___bnd_narrow_ptr_bounds (const void * @var{p}, const void * @var{q}, size_t @var{size}) +@deftypefn {Built-in Function} {void *} __builtin___bnd_narrow_ptr_bounds (const void *@var{p}, const void *@var{q}, size_t @var{size}) This built-in function returns a new pointer with the value of @var{p} -and associate it with the narrowed bounds formed by the intersection -of bounds associated with @var{q} and the [@var{p}, @var{p} + @var{size} - 1]. -With Pointer Bounds Checker off built-in function just returns the first +and associates it with the narrowed bounds formed by the intersection +of bounds associated with @var{q} and the bounds +[@var{p}, @var{p} + @var{size} - 1]. +With Pointer Bounds Checker off, the built-in function just returns the first argument. @smallexample void init_objects (object *objs, size_t size) @{ size_t i; - /* Initialize objects one-by-one passing pointers with bounds of an object, - not the full array of objects. */ + /* Initialize objects one-by-one passing pointers with bounds of + an object, not the full array of objects. */ for (i = 0; i < size; i++) - init_object (__builtin___bnd_narrow_ptr_bounds (objs + i, objs, sizeof(object))); + init_object (__builtin___bnd_narrow_ptr_bounds (objs + i, objs, + sizeof(object))); @} @end smallexample @end deftypefn -@deftypefn {Built-in Function} void * __builtin___bnd_copy_ptr_bounds (const void * @var{q}, const void * @var{r}) +@deftypefn {Built-in Function} {void *} __builtin___bnd_copy_ptr_bounds (const void *@var{q}, const void *@var{r}) This built-in function returns a new pointer with the value of @var{q}, -and associate it with the bounds already associated with pointer @var{r}. -With Pointer Bounds Checker off built-in function just returns the first +and associates it with the bounds already associated with pointer @var{r}. +With Pointer Bounds Checker off, the built-in function just returns the first argument. @smallexample /* Here is a way to get pointer to object's field but still with the full object's bounds. */ -int *field_ptr = __builtin___bnd_copy_ptr_bounds (&objptr->int_filed, objptr); +int *field_ptr = __builtin___bnd_copy_ptr_bounds (&objptr->int_field, + objptr); @end smallexample @end deftypefn -@deftypefn {Built-in Function} void * __builtin___bnd_init_ptr_bounds (const void * @var{q}) +@deftypefn {Built-in Function} {void *} __builtin___bnd_init_ptr_bounds (const void *@var{q}) This built-in function returns a new pointer with the value of @var{q}, and -associate it with INIT (allowing full memory access) bounds. With Pointer -Bounds Checker off built-in function just returns the first argument. +associates it with INIT (allowing full memory access) bounds. With Pointer +Bounds Checker off, the built-in function just returns the first argument. @end deftypefn -@deftypefn {Built-in Function} void * __builtin___bnd_null_ptr_bounds (const void * @var{q}) +@deftypefn {Built-in Function} {void *} __builtin___bnd_null_ptr_bounds (const void *@var{q}) This built-in function returns a new pointer with the value of @var{q}, and -associate it with NULL (allowing no memory access) bounds. With Pointer -Bounds Checker off built-in function just returns the first argument. +associates it with NULL (allowing no memory access) bounds. With Pointer +Bounds Checker off, the built-in function just returns the first argument. @end deftypefn -@deftypefn {Built-in Function} void __builtin___bnd_store_ptr_bounds (const void ** @var{ptr_addr}, const void * @var{ptr_val}) +@deftypefn {Built-in Function} void __builtin___bnd_store_ptr_bounds (const void **@var{ptr_addr}, const void *@var{ptr_val}) This built-in function stores the bounds associated with pointer @var{ptr_val} and location @var{ptr_addr} into Bounds Table. This can be useful to propagate bounds from legacy code without touching the associated pointer's memory when -pointers were copied as integers. With Pointer Bounds Checker off built-in +pointers are copied as integers. With Pointer Bounds Checker off, the built-in function call is ignored. @end deftypefn -@deftypefn {Built-in Function} void __builtin___bnd_chk_ptr_lbounds (const void * @var{q}) +@deftypefn {Built-in Function} void __builtin___bnd_chk_ptr_lbounds (const void *@var{q}) This built-in function checks if the pointer @var{q} is within the lower -bound of its associated bounds. With Pointer Bounds Checker off built-in +bound of its associated bounds. With Pointer Bounds Checker off, the built-in function call is ignored. @smallexample @@ -8848,19 +8857,19 @@ extern void *__wrap_memset (void *dst, int c, size_t len) @end deftypefn -@deftypefn {Built-in Function} void __builtin___bnd_chk_ptr_ubounds (const void * @var{q}) +@deftypefn {Built-in Function} void __builtin___bnd_chk_ptr_ubounds (const void *@var{q}) This built-in function checks if the pointer @var{q} is within the upper -bound of its associated bounds. With Pointer Bounds Checker off built-in +bound of its associated bounds. With Pointer Bounds Checker off, the built-in function call is ignored. @end deftypefn -@deftypefn {Built-in Function} void __builtin___bnd_chk_ptr_bounds (const void * @var{q}, size_t @var{size}) +@deftypefn {Built-in Function} void __builtin___bnd_chk_ptr_bounds (const void *@var{q}, size_t @var{size}) This built-in function checks if [@var{q}, @var{q} + @var{size} - 1] is within the lower and upper bounds associated with @var{q}. With Pointer Bounds Checker -off built-in function call is ignored. +off, the built-in function call is ignored. @smallexample extern void *__wrap_memcpy (void *dst, const void *src, size_t n) @@ -8877,11 +8886,12 @@ extern void *__wrap_memcpy (void *dst, const void *src, size_t n) @end deftypefn -@deftypefn {Built-in Function} const void * __builtin___bnd_get_ptr_lbound (const void * @var{q}) +@deftypefn {Built-in Function} {const void *} __builtin___bnd_get_ptr_lbound (const void *@var{q}) -This built-in function returns the lower bound (which is a pointer) associated -with the pointer @var{q}. This is at least useful for debugging using printf. -With Pointer Bounds Checker off built-in function returns 0. +This built-in function returns the lower bound associated +with the pointer @var{q}, as a pointer value. +This is useful for debugging using @code{printf}. +With Pointer Bounds Checker off, the built-in function returns 0. @smallexample void *lb = __builtin___bnd_get_ptr_lbound (q); @@ -8891,11 +8901,11 @@ printf ("q = %p lb(q) = %p ub(q) = %p", q, lb, ub); @end deftypefn -@deftypefn {Built-in Function} const void * __builtin___bnd_get_ptr_ubound (const void * @var{q}) +@deftypefn {Built-in Function} {const void *} __builtin___bnd_get_ptr_ubound (const void *@var{q}) This built-in function returns the upper bound (which is a pointer) associated -with the pointer @var{q}. With Pointer Bounds Checker off built-in function -returns -1. +with the pointer @var{q}. With Pointer Bounds Checker off, +the built-in function returns -1. @end deftypefn @@ -8906,19 +8916,19 @@ GCC provides support for the following built-in reduction functions if Cilk Plus is enabled. Cilk Plus can be enabled using the @option{-fcilkplus} flag. @itemize @bullet -@item __sec_implicit_index -@item __sec_reduce -@item __sec_reduce_add -@item __sec_reduce_all_nonzero -@item __sec_reduce_all_zero -@item __sec_reduce_any_nonzero -@item __sec_reduce_any_zero -@item __sec_reduce_max -@item __sec_reduce_min -@item __sec_reduce_max_ind -@item __sec_reduce_min_ind -@item __sec_reduce_mul -@item __sec_reduce_mutating +@item @code{__sec_implicit_index} +@item @code{__sec_reduce} +@item @code{__sec_reduce_add} +@item @code{__sec_reduce_all_nonzero} +@item @code{__sec_reduce_all_zero} +@item @code{__sec_reduce_any_nonzero} +@item @code{__sec_reduce_any_zero} +@item @code{__sec_reduce_max} +@item @code{__sec_reduce_min} +@item @code{__sec_reduce_max_ind} +@item @code{__sec_reduce_min_ind} +@item @code{__sec_reduce_mul} +@item @code{__sec_reduce_mutating} @end itemize Further details and examples about these built-in functions are described @@ -18722,18 +18732,26 @@ Some attributes only make sense for C++ programs. @table @code @item abi_tag ("@var{tag}", ...) @cindex @code{abi_tag} attribute -The @code{abi_tag} attribute can be applied to a function or class -declaration. It modifies the mangled name of the function or class to +The @code{abi_tag} attribute can be applied to a function, variable, or class +declaration. It modifies the mangled name of the entity to incorporate the tag name, in order to distinguish the function or class from an earlier version with a different ABI; perhaps the class has changed size, or the function has a different return type that is not encoded in the mangled name. +The attribute can also be applied to an inline namespace, but does not +affect the mangled name of the namespace; in this case it is only used +for @option{-Wabi-tag} warnings and automatic tagging of functions and +variables. Tagging inline namespaces is generally preferable to +tagging individual declarations, but the latter is sometimes +necessary, such as when only certain members of a class need to be +tagged. + The argument can be a list of strings of arbitrary length. The strings are sorted on output, so the order of the list is unimportant. -A redeclaration of a function or class must not add new ABI tags, +A redeclaration of an entity must not add new ABI tags, since doing so would change the mangled name. The ABI tags apply to a name, so all instantiations and @@ -18745,6 +18763,13 @@ not have all the ABI tags used by its subobjects and virtual functions; for user that needs to coexist with an earlier ABI, using this option can help to find all affected types that need to be tagged. +When a type involving an ABI tag is used as the type of a variable or +return type of a function where that tag is not already present in the +signature of the function, the tag is automatically applied to the +variable or function. @option{-Wabi-tag} also warns about this +situation; this warning can be avoided by explicitly tagging the +variable or function or moving it into a tagged inline namespace. + @item init_priority (@var{priority}) @cindex @code{init_priority} attribute diff --git a/contrib/gcc-5.0/gcc/doc/invoke.texi b/contrib/gcc-5.0/gcc/doc/invoke.texi index 08ce074c26..5c6e410aa4 100644 --- a/contrib/gcc-5.0/gcc/doc/invoke.texi +++ b/contrib/gcc-5.0/gcc/doc/invoke.texi @@ -197,7 +197,7 @@ in the following sections. -fvtv-counts -fvtv-debug @gol -fvisibility-ms-compat @gol -fext-numeric-literals @gol --Wabi=@var{n} -Wconversion-null -Wctor-dtor-privacy @gol +-Wabi=@var{n} -Wabi-tag -Wconversion-null -Wctor-dtor-privacy @gol -Wdelete-non-virtual-dtor -Wliteral-suffix -Wnarrowing @gol -Wnoexcept -Wnon-virtual-dtor -Wreorder @gol -Weffc++ -Wstrict-null-sentinel @gol @@ -572,7 +572,7 @@ Objective-C and Objective-C++ Dialects}. @emph{AVR Options} @gccoptlist{-mmcu=@var{mcu} -maccumulate-args -mbranch-cost=@var{cost} @gol -mcall-prologues -mint8 -mn_flash=@var{size} -mno-interrupts @gol --mrelax -mrmw -mstrict-X -mtiny-stack -Waddr-space-convert} +-mrelax -mrmw -mstrict-X -mtiny-stack -nodevicelib -Waddr-space-convert} @emph{Blackfin Options} @gccoptlist{-mcpu=@var{cpu}@r{[}-@var{sirevision}@r{]} @gol @@ -2641,6 +2641,13 @@ union U @{ @end itemize +@item -Wabi-tag @r{(C++ and Objective-C++ only)} +@opindex Wabi-tag +@opindex -Wabi-tag +Warn when a type with an ABI tag is used in a context that does not +have that ABI tag. See @ref{C++ Attributes} for more information +about ABI tags. + @item -Wctor-dtor-privacy @r{(C++ and Objective-C++ only)} @opindex Wctor-dtor-privacy @opindex Wno-ctor-dtor-privacy @@ -3246,7 +3253,7 @@ option is known to the diagnostic machinery). Specifying the By default, each diagnostic emitted includes the original source line and a caret '^' indicating the column. This option suppresses this information. The source line is truncated to @var{n} characters, if -the @option{-fmessage-length=n} is given. When the output is done +the @option{-fmessage-length=n} option is given. When the output is done to the terminal, the width is limited to the width given by the @env{COLUMNS} environment variable or, if not set, to the terminal width. @@ -5150,8 +5157,8 @@ Requires @option{-flto-odr-type-merging} to be enabled. Enabled by default. @item -Wopenmp-simd @opindex Wopenm-simd Warn if the vectorizer cost model overrides the OpenMP or the Cilk Plus -simd directive set by user. The @option{-fsimd-cost-model=unlimited} can -be used to relax the cost model. +simd directive set by user. The @option{-fsimd-cost-model=unlimited} +option can be used to relax the cost model. @item -Woverride-init @r{(C and Objective-C only)} @opindex Woverride-init @@ -5803,7 +5810,8 @@ for a sanitizer component causes it to attempt to continue running the program as if no error happened. This means multiple runtime errors can be reported in a single program run, and the exit code of the program may indicate success even when errors -have been reported. The @option{-fno-sanitize-recover=} can be used to alter +have been reported. The @option{-fno-sanitize-recover=} option +can be used to alter this behavior: only the first detected error is reported and program then exits with a non-zero exit code. @@ -5827,7 +5835,7 @@ Similarly @option{-fno-sanitize-recover} is equivalent to @item -fsanitize-undefined-trap-on-error @opindex fsanitize-undefined-trap-on-error -The @option{-fsanitize-undefined-trap-on-error} instructs the compiler to +The @option{-fsanitize-undefined-trap-on-error} option instructs the compiler to report undefined behavior using @code{__builtin_trap} rather than a @code{libubsan} library routine. The advantage of this is that the @code{libubsan} library is not needed and is not linked in, so this @@ -5836,31 +5844,42 @@ is usable even in freestanding environments. @item -fcheck-pointer-bounds @opindex fcheck-pointer-bounds @opindex fno-check-pointer-bounds +@cindex Pointer Bounds Checker options Enable Pointer Bounds Checker instrumentation. Each memory reference -is instrumented with checks of pointer used for memory access against -bounds associated with that pointer. Generated instrumentation may -be controlled by various @option{-fchkp-*} options. Currently there -is only Intel MPX based implementation available, thus i386 target -and @option{-mmpx} are required. MPX based instrumentation requires -a runtime library to enable MPX in a hardware and handle bounds +is instrumented with checks of the pointer used for memory access against +bounds associated with that pointer. + +Currently there +is only an implementation for Intel MPX available, thus x86 target +and @option{-mmpx} are required to enable this feature. +MPX-based instrumentation requires +a runtime library to enable MPX in hardware and handle bounds violation signals. By default when @option{-fcheck-pointer-bounds} and @option{-mmpx} options are used to link a program, the GCC driver -links against @option{libmpx} runtime library. MPX based instrumentation -may be used for a debugging and also it may be included into a release -version to increase program security. Depending on usage you may -put different requirements to runtime library. Current version - of MPX runtime library is more oriented to be used as a debugging +links against the @file{libmpx} runtime library. MPX-based instrumentation +may be used for debugging and also may be included in production code +to increase program security. Depending on usage, you may +have different requirements for the runtime library. The current version +of the MPX runtime library is more oriented for use as a debugging tool. MPX runtime library usage implies @option{-lpthread}. See also @option{-static-libmpx}. The runtime library behavior can be influenced using various @env{CHKP_RT_*} environment variables. See @uref{https://gcc.gnu.org/wiki/Intel%20MPX%20support%20in%20the%20GCC%20compiler} for more details. +Generated instrumentation may be controlled by various +@option{-fchkp-*} options and by the @code{bnd_variable_size} +structure field attribute (@pxref{Type Attributes}) and +@code{bnd_legacy}, and @code{bnd_instrument} function attributes +(@pxref{Function Attributes}). GCC also provides a number of built-in +functions for controlling the Pointer Bounds Checker. @xref{Pointer +Bounds Checker builtins}, for more information. + @item -fchkp-check-incomplete-type @opindex fchkp-check-incomplete-type @opindex fno-chkp-check-incomplete-type Generate pointer bounds checks for variables with incomplete type. -Enabled by default +Enabled by default. @item -fchkp-narrow-bounds @opindex fchkp-narrow-bounds @@ -5873,15 +5892,15 @@ and @option{-fchkp-first-field-has-own-bounds}. Enabled by default. @item -fchkp-first-field-has-own-bounds @opindex fchkp-first-field-has-own-bounds @opindex fno-chkp-first-field-has-own-bounds -Forces Pointer Bounds Checker to use narrowed bounds for address of the -first field in the structure. By default pointer to the first field has -the same bounds as pointer to the whole structure. +Forces Pointer Bounds Checker to use narrowed bounds for the address of the +first field in the structure. By default a pointer to the first field has +the same bounds as a pointer to the whole structure. @item -fchkp-narrow-to-innermost-array @opindex fchkp-narrow-to-innermost-array @opindex fno-chkp-narrow-to-innermost-array Forces Pointer Bounds Checker to use bounds of the innermost arrays in -case of nested static arryas access. By default it is disabled and +case of nested static array access. By default this option is disabled and bounds of the outermost array are used. @item -fchkp-optimize @@ -5893,13 +5912,13 @@ optimization levels @option{-O}, @option{-O2}, @option{-O3}. @item -fchkp-use-fast-string-functions @opindex fchkp-use-fast-string-functions @opindex fno-chkp-use-fast-string-functions -Allow to use @code{*_nobnd} versions of string functions (not copying bounds) +Enables use of @code{*_nobnd} versions of string functions (not copying bounds) by Pointer Bounds Checker. Disabled by default. @item -fchkp-use-nochk-string-functions @opindex fchkp-use-nochk-string-functions @opindex fno-chkp-use-nochk-string-functions -Allow to use @code{*_nochk} versions of string functions (not checking bounds) +Enables use of @code{*_nochk} versions of string functions (not checking bounds) by Pointer Bounds Checker. Disabled by default. @item -fchkp-use-static-bounds @@ -5911,16 +5930,17 @@ bounds of static variables. Enabled by default. @item -fchkp-use-static-const-bounds @opindex fchkp-use-static-const-bounds @opindex fno-chkp-use-static-const-bounds -Use statically initialized bounds for constant bounds instead of -generating them each time it is required. By default enabled when +Use statically-initialized bounds for constant bounds instead of +generating them each time they are required. By default enabled when @option{-fchkp-use-static-bounds} is enabled. @item -fchkp-treat-zero-dynamic-size-as-infinite @opindex fchkp-treat-zero-dynamic-size-as-infinite @opindex fno-chkp-treat-zero-dynamic-size-as-infinite -With this option zero size obtained dynamically for objects with -incomplete type will be treated as infinite by Pointer Bounds -Checker. It may be helpful if program is linked with a library +With this option, objects with incomplete type whose +dynamically-obtained size is zero are treated as having infinite size +instead by Pointer Bounds +Checker. This option may be helpful if a program is linked with a library missing size information for some symbols. Disabled by default. @item -fchkp-check-read @@ -5951,15 +5971,16 @@ Enabled by default. @opindex fchkp-instrument-marked-only @opindex fno-chkp-instrument-marked-only Instructs Pointer Bounds Checker to instrument only functions -marked with @code{bnd_instrument} attribute. Disabled by default. +marked with the @code{bnd_instrument} attribute +(@pxref{Function Attributes}). Disabled by default. @item -fchkp-use-wrappers @opindex fchkp-use-wrappers @opindex fno-chkp-use-wrappers -Allows Pointer Bounds Checker to replace calls to built-in function -with calls to wrapper functions. When the @option{-fchkp-use-wrappers} +Allows Pointer Bounds Checker to replace calls to built-in functions +with calls to wrapper functions. When @option{-fchkp-use-wrappers} is used to link a program, the GCC driver automatically links -agains @option{libmpxwrappers}. See also @option{-static-libmpxwrappers}. +against @file{libmpxwrappers}. See also @option{-static-libmpxwrappers}. Enabled by default. @item -fdump-final-insns@r{[}=@var{file}@r{]} @@ -9239,7 +9260,8 @@ them as usual to produce @file{myprog}. The only important thing to keep in mind is that to enable link-time optimizations you need to use the GCC driver to perform the link-step. GCC then automatically performs link-time optimization if any of the -objects involved were compiled with the @option{-flto}. You generally +objects involved were compiled with the @option{-flto} command-line option. +You generally should specify the optimization options to be used for link-time optimization though GCC tries to be clever at guessing an optimization level to use from the options used at compile-time @@ -10426,7 +10448,8 @@ This value is used to limit superblock formation once the given percentage of executed instructions is covered. This limits unnecessary code size expansion. -The @option{tracer-dynamic-coverage-feedback} is used only when profile +The @option{tracer-dynamic-coverage-feedback} parameter +is used only when profile feedback is available. The real profiles (as opposed to statically estimated ones) are much less balanced allowing the threshold to be larger value. @@ -10514,7 +10537,8 @@ branch or duplicate the code on its destination. Code is duplicated when its estimated size is smaller than this value multiplied by the estimated size of unconditional jump in the hot spots of the program. -The @option{reorder-block-duplicate-feedback} is used only when profile +The @option{reorder-block-duplicate-feedback} parameter +is used only when profile feedback is available. It may be set to higher values than @option{reorder-block-duplicate} since information about the hot spots is more accurate. @@ -10791,7 +10815,7 @@ length can be changed using the @option{loop-block-tile-size} parameter. The default value is 51 iterations. @item loop-unroll-jam-size -Specify the unroll factor for the @option{-floop-unroll-and-jam}. The +Specify the unroll factor for the @option{-floop-unroll-and-jam} option. The default value is 4. @item loop-unroll-jam-depth @@ -11271,9 +11295,9 @@ other libraries statically. @item -static-libmpx @opindex static-libmpx -When @option{-fcheck-pointer bounds} and @option{-mmpx} options are +When the @option{-fcheck-pointer bounds} and @option{-mmpx} options are used to link a program, the GCC driver automatically links against -@option{libmpx}. If @file{libmpx} is available as a shared library, +@file{libmpx}. If @file{libmpx} is available as a shared library, and the @option{-static} option is not used, then this links against the shared version of @file{libmpx}. The @option{-static-libmpx} option directs the GCC driver to link @file{libmpx} statically, @@ -11281,9 +11305,9 @@ without necessarily linking other libraries statically. @item -static-libmpxwrappers @opindex static-libmpxwrappers -When @option{-fcheck-pointer bounds}, @option{-mmpx} options are used and -@option{-fno-chkp-use-wrappers} option is not used to link a program, the -GCC driver automatically links against @option{libmpxwrappers}. If +When the @option{-fcheck-pointer bounds} and @option{-mmpx} options are used +to link a program without also using @option{-fno-chkp-use-wrappers}, the +GCC driver automatically links against @file{libmpxwrappers}. If @file{libmpxwrappers} is available as a shared library, and the @option{-static} option is not used, then this links against the shared version of @file{libmpxwrappers}. The @option{-static-libmpxwrappers} @@ -11426,7 +11450,7 @@ those results in a file name that is found, the unmodified program name is searched for using the directories specified in your @env{PATH} environment variable. -The compiler checks to see if the path provided by the @option{-B} +The compiler checks to see if the path provided by @option{-B} refers to a directory, and if necessary it adds a directory separator character at the end of the path. @@ -11488,13 +11512,14 @@ such a suffix. @item -I- @opindex I- This option has been deprecated. Please use @option{-iquote} instead for -@option{-I} directories before the @option{-I-} and remove the @option{-I-}. +@option{-I} directories before the @option{-I-} and remove the @option{-I-} +option. Any directories you specify with @option{-I} options before the @option{-I-} option are searched only for the case of @code{#include "@var{file}"}; they are not searched for @code{#include <@var{file}>}. If additional directories are specified with @option{-I} options after -the @option{-I-}, these directories are searched for all @code{#include} +the @option{-I-} option, these directories are searched for all @code{#include} directives. (Ordinarily @emph{all} @option{-I} directories are used this way.) @@ -13572,6 +13597,10 @@ sbiw r26, const ; X -= const @opindex mtiny-stack Only change the lower 8@tie{}bits of the stack pointer. +@item -nodevicelib +@opindex nodevicelib +Don't link against AVR-LibC's device specific library @code{libdev.a}. + @item -Waddr-space-convert @opindex Waddr-space-convert Warn about conversions between address spaces in the case where the @@ -17148,7 +17177,7 @@ These options control the treatment of the special not-a-number (NaN) IEEE 754 floating-point data with the @code{abs.@i{fmt}} and @code{neg.@i{fmt}} machine instructions. -By default or when the @option{-mabs=legacy} is used the legacy +By default or when @option{-mabs=legacy} is used the legacy treatment is selected. In this case these instructions are considered arithmetic and avoided where correct operation is required and the input operand might be a NaN. A longer sequence of instructions that @@ -19024,8 +19053,8 @@ instructions. The @option{-mquad-memory-atomic} option requires use of Generate code that uses (does not use) the scalar double precision instructions that target all 64 registers in the vector/scalar floating point register set that were added in version 2.06 of the -PowerPC ISA. The @option{-mupper-regs-df} turned on by default if you -use either of the @option{-mcpu=power7}, @option{-mcpu=power8}, or +PowerPC ISA. @option{-mupper-regs-df} is turned on by default if you +use any of the @option{-mcpu=power7}, @option{-mcpu=power8}, or @option{-mvsx} options. @item -mupper-regs-sf @@ -19035,8 +19064,8 @@ use either of the @option{-mcpu=power7}, @option{-mcpu=power8}, or Generate code that uses (does not use) the scalar single precision instructions that target all 64 registers in the vector/scalar floating point register set that were added in version 2.07 of the -PowerPC ISA. The @option{-mupper-regs-sf} turned on by default if you -use either of the @option{-mcpu=power8}, or @option{-mpower8-vector} +PowerPC ISA. @option{-mupper-regs-sf} is turned on by default if you +use either of the @option{-mcpu=power8} or @option{-mpower8-vector} options. @item -mupper-regs @@ -19805,7 +19834,7 @@ static chain value to be loaded in register @code{r11}. The @option{-mpointers-to-nested-functions} is on by default. You cannot call through pointers to nested functions or pointers to functions compiled in other languages that use the static chain if -you use the @option{-mno-pointers-to-nested-functions}. +you use @option{-mno-pointers-to-nested-functions}. @item -msave-toc-indirect @itemx -mno-save-toc-indirect @@ -21987,6 +22016,12 @@ instruction set support. Intel Silvermont CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, POPCNT, AES, PCLMUL and RDRND instruction set support. +@item knl +Intel Knight's Landing CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, +SSSE3, SSE4.1, SSE4.2, POPCNT, AVX, AVX2, AES, PCLMUL, FSGSBASE, RDRND, FMA, +BMI, BMI2, F16C, RDSEED, ADCX, PREFETCHW, AVX512F, AVX512PF, AVX512ER and +AVX512CD instruction set support. + @item k6 AMD K6 CPU with MMX instruction set support. diff --git a/contrib/gcc-5.0/gcc/expmed.c b/contrib/gcc-5.0/gcc/expmed.c index 0034203c2f..8d4f964093 100644 --- a/contrib/gcc-5.0/gcc/expmed.c +++ b/contrib/gcc-5.0/gcc/expmed.c @@ -2551,9 +2551,8 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, alg_in->cost.latency += op_cost; if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost)) { - struct algorithm *x; best_cost = alg_in->cost; - x = alg_in, alg_in = best_alg, best_alg = x; + std::swap (alg_in, best_alg); best_alg->log[best_alg->ops] = m; best_alg->op[best_alg->ops] = alg_shift; } @@ -2582,9 +2581,8 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, alg_in->cost.latency += op_cost; if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost)) { - struct algorithm *x; best_cost = alg_in->cost; - x = alg_in, alg_in = best_alg, best_alg = x; + std::swap (alg_in, best_alg); best_alg->log[best_alg->ops] = m; best_alg->op[best_alg->ops] = alg_shift; } @@ -2624,9 +2622,8 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, alg_in->cost.latency += op_cost; if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost)) { - struct algorithm *x; best_cost = alg_in->cost; - x = alg_in, alg_in = best_alg, best_alg = x; + std::swap (alg_in, best_alg); best_alg->log[best_alg->ops] = 0; best_alg->op[best_alg->ops] = alg_sub_t_m2; } @@ -2644,9 +2641,8 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, alg_in->cost.latency += op_cost; if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost)) { - struct algorithm *x; best_cost = alg_in->cost; - x = alg_in, alg_in = best_alg, best_alg = x; + std::swap (alg_in, best_alg); best_alg->log[best_alg->ops] = 0; best_alg->op[best_alg->ops] = alg_add_t_m2; } @@ -2667,9 +2663,8 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, alg_in->cost.latency += op_cost; if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost)) { - struct algorithm *x; best_cost = alg_in->cost; - x = alg_in, alg_in = best_alg, best_alg = x; + std::swap (alg_in, best_alg); best_alg->log[best_alg->ops] = m; best_alg->op[best_alg->ops] = alg_sub_t_m2; } @@ -2723,9 +2718,8 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, alg_in->cost.latency = op_cost; if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost)) { - struct algorithm *x; best_cost = alg_in->cost; - x = alg_in, alg_in = best_alg, best_alg = x; + std::swap (alg_in, best_alg); best_alg->log[best_alg->ops] = m; best_alg->op[best_alg->ops] = alg_add_factor; } @@ -2762,9 +2756,8 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, alg_in->cost.latency = op_cost; if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost)) { - struct algorithm *x; best_cost = alg_in->cost; - x = alg_in, alg_in = best_alg, best_alg = x; + std::swap (alg_in, best_alg); best_alg->log[best_alg->ops] = m; best_alg->op[best_alg->ops] = alg_sub_factor; } @@ -2793,9 +2786,8 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, alg_in->cost.latency += op_cost; if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost)) { - struct algorithm *x; best_cost = alg_in->cost; - x = alg_in, alg_in = best_alg, best_alg = x; + std::swap (alg_in, best_alg); best_alg->log[best_alg->ops] = m; best_alg->op[best_alg->ops] = alg_add_t2_m; } @@ -2818,9 +2810,8 @@ synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t, alg_in->cost.latency += op_cost; if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost)) { - struct algorithm *x; best_cost = alg_in->cost; - x = alg_in, alg_in = best_alg, best_alg = x; + std::swap (alg_in, best_alg); best_alg->log[best_alg->ops] = m; best_alg->op[best_alg->ops] = alg_sub_t2_m; } diff --git a/contrib/gcc-5.0/gcc/expr.c b/contrib/gcc-5.0/gcc/expr.c index 89ca12945f..dc13a14f4e 100644 --- a/contrib/gcc-5.0/gcc/expr.c +++ b/contrib/gcc-5.0/gcc/expr.c @@ -6645,11 +6645,12 @@ store_field (rtx target, HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos, && mode != TYPE_MODE (TREE_TYPE (exp))) temp = convert_modes (mode, TYPE_MODE (TREE_TYPE (exp)), temp, 1); - /* If the modes of TEMP and TARGET are both BLKmode, both - must be in memory and BITPOS must be aligned on a byte - boundary. If so, we simply do a block copy. Likewise - for a BLKmode-like TARGET. */ - if (GET_MODE (temp) == BLKmode + /* If TEMP is not a PARALLEL (see below) and its mode and that of TARGET + are both BLKmode, both must be in memory and BITPOS must be aligned + on a byte boundary. If so, we simply do a block copy. Likewise for + a BLKmode-like TARGET. */ + if (GET_CODE (temp) != PARALLEL + && GET_MODE (temp) == BLKmode && (GET_MODE (target) == BLKmode || (MEM_P (target) && GET_MODE_CLASS (GET_MODE (target)) == MODE_INT diff --git a/contrib/gcc-5.0/gcc/gimplify.c b/contrib/gcc-5.0/gcc/gimplify.c index d822913acc..ff0a225c92 100644 --- a/contrib/gcc-5.0/gcc/gimplify.c +++ b/contrib/gcc-5.0/gcc/gimplify.c @@ -8524,23 +8524,6 @@ gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); ret = MIN (r0, r1); - /* Convert &X + CST to invariant &MEM[&X, CST]. Do this - after gimplifying operands - this is similar to how - it would be folding all gimplified stmts on creation - to have them canonicalized, which is what we eventually - should do anyway. */ - if (TREE_CODE (TREE_OPERAND (*expr_p, 1)) == INTEGER_CST - && is_gimple_min_invariant (TREE_OPERAND (*expr_p, 0))) - { - *expr_p = build_fold_addr_expr_with_type_loc - (input_location, - fold_build2 (MEM_REF, TREE_TYPE (TREE_TYPE (*expr_p)), - TREE_OPERAND (*expr_p, 0), - fold_convert (ptr_type_node, - TREE_OPERAND (*expr_p, 1))), - TREE_TYPE (*expr_p)); - ret = MIN (ret, GS_OK); - } break; } diff --git a/contrib/gcc-5.0/gcc/ipa-chkp.c b/contrib/gcc-5.0/gcc/ipa-chkp.c index 3bea06ab75..a9933e237c 100644 --- a/contrib/gcc-5.0/gcc/ipa-chkp.c +++ b/contrib/gcc-5.0/gcc/ipa-chkp.c @@ -592,7 +592,8 @@ chkp_maybe_create_clone (tree fndecl) /* Clone all thunks. */ for (e = node->callers; e; e = e->next_caller) if (e->caller->thunk.thunk_p - && !e->caller->thunk.add_pointer_bounds_args) + && !e->caller->thunk.add_pointer_bounds_args + && !e->caller->instrumentation_clone) { struct cgraph_node *thunk = chkp_maybe_create_clone (e->caller->decl); diff --git a/contrib/gcc-5.0/gcc/ipa-devirt.c b/contrib/gcc-5.0/gcc/ipa-devirt.c index c9d153c1b9..dd4397bbf2 100644 --- a/contrib/gcc-5.0/gcc/ipa-devirt.c +++ b/contrib/gcc-5.0/gcc/ipa-devirt.c @@ -1412,9 +1412,18 @@ add_type_duplicate (odr_type val, tree type) if (!val->types_set) val->types_set = new hash_set; + /* Chose polymorphic type as leader (this happens only in case of ODR + violations. */ + if ((TREE_CODE (type) == RECORD_TYPE && TYPE_BINFO (type) + && polymorphic_type_binfo_p (TYPE_BINFO (type))) + && (TREE_CODE (val->type) != RECORD_TYPE || !TYPE_BINFO (val->type) + || !polymorphic_type_binfo_p (TYPE_BINFO (val->type)))) + { + prevail = true; + build_bases = true; + } /* Always prefer complete type to be the leader. */ - - if (!COMPLETE_TYPE_P (val->type) && COMPLETE_TYPE_P (type)) + else if (!COMPLETE_TYPE_P (val->type) && COMPLETE_TYPE_P (type)) { prevail = true; build_bases = TYPE_BINFO (type); @@ -1563,7 +1572,8 @@ add_type_duplicate (odr_type val, tree type) Be sure this does not happen. */ gcc_assert (TYPE_BINFO (type2) || !polymorphic_type_binfo_p (TYPE_BINFO (type1)) - || build_bases); + || build_bases + || val->odr_violated); break; } /* One base is polymorphic and the other not. @@ -1865,9 +1875,9 @@ dump_odr_type (FILE *f, odr_type t, int indent=0) fprintf (f, "%s\n", t->all_derivations_known ? " (derivations known)":""); if (TYPE_NAME (t->type)) { - fprintf (f, "%*s defined at: %s:%i\n", indent * 2, "", + /*fprintf (f, "%*s defined at: %s:%i\n", indent * 2, "", DECL_SOURCE_FILE (TYPE_NAME (t->type)), - DECL_SOURCE_LINE (TYPE_NAME (t->type))); + DECL_SOURCE_LINE (TYPE_NAME (t->type)));*/ if (DECL_ASSEMBLER_NAME_SET_P (TYPE_NAME (t->type))) fprintf (f, "%*s mangled name: %s\n", indent * 2, "", IDENTIFIER_POINTER diff --git a/contrib/gcc-5.0/gcc/ipa-icf.c b/contrib/gcc-5.0/gcc/ipa-icf.c index 25b83062d3..360cf17199 100644 --- a/contrib/gcc-5.0/gcc/ipa-icf.c +++ b/contrib/gcc-5.0/gcc/ipa-icf.c @@ -239,12 +239,12 @@ sem_item::dump (void) if (dump_file) { fprintf (dump_file, "[%s] %s (%u) (tree:%p)\n", type == FUNC ? "func" : "var", - name(), node->order, (void *) node->decl); + node->name(), node->order, (void *) node->decl); fprintf (dump_file, " hash: %u\n", get_hash ()); fprintf (dump_file, " references: "); for (unsigned i = 0; i < refs.length (); i++) - fprintf (dump_file, "%s%s ", refs[i]->name (), + fprintf (dump_file, "%s%s ", refs[i]->node->name (), i < refs.length() - 1 ? "," : ""); fprintf (dump_file, "\n"); @@ -575,8 +575,13 @@ sem_function::equals (sem_item *item, if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Equals called for:%s:%s (%u:%u) (%s:%s) with result: %s\n\n", - name(), item->name (), node->order, item->node->order, asm_name (), - item->asm_name (), eq ? "true" : "false"); + xstrdup_for_dump (node->name()), + xstrdup_for_dump (item->node->name ()), + node->order, + item->node->order, + xstrdup_for_dump (node->asm_name ()), + xstrdup_for_dump (item->node->asm_name ()), + eq ? "true" : "false"); return eq; } @@ -809,6 +814,13 @@ sem_function::merge (sem_item *alias_item) bool original_address_matters = original->address_matters_p (); bool alias_address_matters = alias->address_matters_p (); + if (DECL_EXTERNAL (alias->decl)) + { + if (dump_file) + fprintf (dump_file, "Not unifying; alias is external.\n\n"); + return false; + } + if (DECL_NO_INLINE_WARNING_P (original->decl) != DECL_NO_INLINE_WARNING_P (alias->decl)) { @@ -1522,8 +1534,11 @@ sem_variable::equals (sem_item *item, if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Equals called for vars:%s:%s (%u:%u) (%s:%s) with result: %s\n\n", - name(), item->name (), node->order, item->node->order, asm_name (), - item->asm_name (), ret ? "true" : "false"); + xstrdup_for_dump (node->name()), + xstrdup_for_dump (item->node->name ()), + node->order, item->node->order, + xstrdup_for_dump (node->asm_name ()), + xstrdup_for_dump (item->node->asm_name ()), ret ? "true" : "false"); return ret; } @@ -1768,6 +1783,13 @@ sem_variable::merge (sem_item *alias_item) return false; } + if (DECL_EXTERNAL (alias_item->decl)) + { + if (dump_file) + fprintf (dump_file, "Not unifying; alias is external.\n\n"); + return false; + } + sem_variable *alias_var = static_cast (alias_item); varpool_node *original = get_node (); @@ -1995,8 +2017,8 @@ sem_item_optimizer::read_section (lto_file_decl_data *file_data, gcc_assert (node->definition); if (dump_file) - fprintf (dump_file, "Symbol added:%s (tree: %p, uid:%u)\n", node->asm_name (), - (void *) node->decl, node->order); + fprintf (dump_file, "Symbol added:%s (tree: %p, uid:%u)\n", + node->asm_name (), (void *) node->decl, node->order); if (is_a (node)) { @@ -2259,7 +2281,7 @@ sem_item_optimizer::parse_funcs_and_vars (void) m_symtab_node_map.put (cnode, f); if (dump_file) - fprintf (dump_file, "Parsed function:%s\n", f->asm_name ()); + fprintf (dump_file, "Parsed function:%s\n", f->node->asm_name ()); if (dump_file && (dump_flags & TDF_DETAILS)) f->dump_to_file (dump_file); @@ -2955,9 +2977,11 @@ sem_item_optimizer::merge_classes (unsigned int prev_class_count) if (dump_file) { fprintf (dump_file, "Semantic equality hit:%s->%s\n", - source->name (), alias->name ()); + xstrdup_for_dump (source->node->name ()), + xstrdup_for_dump (alias->node->name ())); fprintf (dump_file, "Assembler symbol names:%s->%s\n", - source->asm_name (), alias->asm_name ()); + xstrdup_for_dump (source->node->asm_name ()), + xstrdup_for_dump (alias->node->asm_name ())); } if (lookup_attribute ("no_icf", DECL_ATTRIBUTES (alias->decl))) @@ -2993,7 +3017,8 @@ congruence_class::dump (FILE *file, unsigned int indent) const FPUTS_SPACES (file, indent + 2, ""); for (unsigned i = 0; i < members.length (); i++) - fprintf (file, "%s(%p/%u) ", members[i]->asm_name (), (void *) members[i]->decl, + fprintf (file, "%s(%p/%u) ", members[i]->node->asm_name (), + (void *) members[i]->decl, members[i]->node->order); fprintf (file, "\n"); diff --git a/contrib/gcc-5.0/gcc/ipa-icf.h b/contrib/gcc-5.0/gcc/ipa-icf.h index c51bb4a429..8245b5445c 100644 --- a/contrib/gcc-5.0/gcc/ipa-icf.h +++ b/contrib/gcc-5.0/gcc/ipa-icf.h @@ -171,18 +171,6 @@ public: /* Add reference to a semantic TARGET. */ void add_reference (sem_item *target); - /* Gets symbol name of the item. */ - const char *name (void) - { - return node->name (); - } - - /* Gets assembler name of the item. */ - const char *asm_name (void) - { - return node->asm_name (); - } - /* Fast equality function based on knowledge known in WPA. */ virtual bool equals_wpa (sem_item *item, hash_map &ignored_nodes) = 0; diff --git a/contrib/gcc-5.0/gcc/ipa-inline.c b/contrib/gcc-5.0/gcc/ipa-inline.c index dd2e64ce28..851ef3fa8e 100644 --- a/contrib/gcc-5.0/gcc/ipa-inline.c +++ b/contrib/gcc-5.0/gcc/ipa-inline.c @@ -312,6 +312,15 @@ static bool can_inline_edge_p (struct cgraph_edge *e, bool report, bool disregard_limits = false, bool early = false) { + gcc_checking_assert (e->inline_failed); + + if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR) + { + if (report) + report_inline_failed_reason (e); + return false; + } + bool inlinable = true; enum availability avail; cgraph_node *callee = e->callee->ultimate_alias_target (&avail); @@ -323,9 +332,7 @@ can_inline_edge_p (struct cgraph_edge *e, bool report, struct function *caller_fun = caller->get_fun (); struct function *callee_fun = callee ? callee->get_fun () : NULL; - gcc_assert (e->inline_failed); - - if (!callee || !callee->definition) + if (!callee->definition) { e->inline_failed = CIF_BODY_NOT_AVAILABLE; inlinable = false; @@ -363,8 +370,7 @@ can_inline_edge_p (struct cgraph_edge *e, bool report, } /* TM pure functions should not be inlined into non-TM_pure functions. */ - else if (is_tm_pure (callee->decl) - && !is_tm_pure (caller->decl)) + else if (is_tm_pure (callee->decl) && !is_tm_pure (caller->decl)) { e->inline_failed = CIF_UNSPECIFIED; inlinable = false; @@ -2289,7 +2295,22 @@ ipa_inline (void) nnodes = ipa_reverse_postorder (order); FOR_EACH_FUNCTION (node) - node->aux = 0; + { + node->aux = 0; + + /* Recompute the default reasons for inlining because they may have + changed during merging. */ + if (in_lto_p) + { + for (cgraph_edge *e = node->callees; e; e = e->next_callee) + { + gcc_assert (e->inline_failed); + initialize_inline_failed (e); + } + for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee) + initialize_inline_failed (e); + } + } if (dump_file) fprintf (dump_file, "\nFlattening functions:\n"); diff --git a/contrib/gcc-5.0/gcc/ipa-split.c b/contrib/gcc-5.0/gcc/ipa-split.c index 7e68a87607..5d5db0e4ee 100644 --- a/contrib/gcc-5.0/gcc/ipa-split.c +++ b/contrib/gcc-5.0/gcc/ipa-split.c @@ -598,6 +598,31 @@ consider_split (struct split_point *current, bitmap non_ssa_vars, return; } + /* Splitting functions brings the target out of comdat group; this will + lead to code duplication if the function is reused by other unit. + Limit this duplication. This is consistent with limit in tree-sra.c + FIXME: with LTO we ought to be able to do better! */ + if (DECL_ONE_ONLY (current_function_decl) + && current->split_size >= (unsigned int) MAX_INLINE_INSNS_AUTO) + { + if (dump_file && (dump_flags & TDF_DETAILS)) + fprintf (dump_file, + " Refused: function is COMDAT and tail is too large\n"); + return; + } + /* For comdat functions also reject very small tails; those will likely get + inlined back and we do not want to risk the duplication overhead. + FIXME: with LTO we ought to be able to do better! */ + if (DECL_ONE_ONLY (current_function_decl) + && current->split_size + <= (unsigned int) PARAM_VALUE (PARAM_EARLY_INLINING_INSNS) / 2) + { + if (dump_file && (dump_flags & TDF_DETAILS)) + fprintf (dump_file, + " Refused: function is COMDAT and tail is too small\n"); + return; + } + /* FIXME: we currently can pass only SSA function parameters to the split arguments. Once parm_adjustment infrastructure is supported by cloning, we can pass more than that. */ @@ -744,7 +769,8 @@ consider_split (struct split_point *current, bitmap non_ssa_vars, of the form: = tmp_var; return - but return_bb can not be more complex than this. + but return_bb can not be more complex than this (except for + -fsanitize=thread we allow TSAN_FUNC_EXIT () internal call in there). If nothing is found, return the exit block. When there are multiple RETURN statement, chose one with return value, @@ -789,6 +815,13 @@ find_return_bb (void) found_return = true; retval = gimple_return_retval (return_stmt); } + /* For -fsanitize=thread, allow also TSAN_FUNC_EXIT () in the return + bb. */ + else if ((flag_sanitize & SANITIZE_THREAD) + && is_gimple_call (stmt) + && gimple_call_internal_p (stmt) + && gimple_call_internal_fn (stmt) == IFN_TSAN_FUNC_EXIT) + ; else break; } @@ -1049,12 +1082,11 @@ typedef struct the component used by consider_split. */ static void -find_split_points (int overall_time, int overall_size) +find_split_points (basic_block return_bb, int overall_time, int overall_size) { stack_entry first; vec stack = vNULL; basic_block bb; - basic_block return_bb = find_return_bb (); struct split_point current; current.header_time = overall_time; @@ -1211,19 +1243,20 @@ insert_bndret_call_after (tree retbnd, tree retval, gimple_stmt_iterator *gsi) gimple_call_set_lhs (bndret, retbnd); gsi_insert_after (gsi, bndret, GSI_CONTINUE_LINKING); } + /* Split function at SPLIT_POINT. */ static void -split_function (struct split_point *split_point) +split_function (basic_block return_bb, struct split_point *split_point, + bool add_tsan_func_exit) { vec args_to_pass = vNULL; bitmap args_to_skip; tree parm; int num = 0; cgraph_node *node, *cur_node = cgraph_node::get (current_function_decl); - basic_block return_bb = find_return_bb (); basic_block call_bb; - gcall *call; + gcall *call, *tsan_func_exit_call = NULL; edge e; edge_iterator ei; tree retval = NULL, real_retval = NULL, retbnd = NULL; @@ -1509,11 +1542,18 @@ split_function (struct split_point *split_point) || DECL_BY_REFERENCE (DECL_RESULT (current_function_decl)))) gimple_call_set_return_slot_opt (call, true); + if (add_tsan_func_exit) + tsan_func_exit_call = gimple_build_call_internal (IFN_TSAN_FUNC_EXIT, 0); + /* Update return value. This is bit tricky. When we do not return, do nothing. When we return we might need to update return_bb or produce a new return statement. */ if (!split_part_return_p) - gsi_insert_after (&gsi, call, GSI_NEW_STMT); + { + gsi_insert_after (&gsi, call, GSI_NEW_STMT); + if (tsan_func_exit_call) + gsi_insert_after (&gsi, tsan_func_exit_call, GSI_NEW_STMT); + } else { e = make_edge (call_bb, return_bb, @@ -1617,6 +1657,8 @@ split_function (struct split_point *split_point) } else gsi_insert_after (&gsi, call, GSI_NEW_STMT); + if (tsan_func_exit_call) + gsi_insert_after (&gsi, tsan_func_exit_call, GSI_NEW_STMT); } /* We don't use return block (there is either no return in function or multiple of them). So create new basic block with return statement. @@ -1659,6 +1701,8 @@ split_function (struct split_point *split_point) /* Build bndret call to obtain returned bounds. */ if (retbnd) insert_bndret_call_after (retbnd, retval, &gsi); + if (tsan_func_exit_call) + gsi_insert_after (&gsi, tsan_func_exit_call, GSI_NEW_STMT); ret = gimple_build_return (retval); gsi_insert_after (&gsi, ret, GSI_NEW_STMT); } @@ -1767,6 +1811,8 @@ execute_split_functions (void) /* Compute local info about basic blocks and determine function size/time. */ bb_info_vec.safe_grow_cleared (last_basic_block_for_fn (cfun) + 1); memset (&best_split_point, 0, sizeof (best_split_point)); + basic_block return_bb = find_return_bb (); + int tsan_exit_found = -1; FOR_EACH_BB_FN (bb, cfun) { int time = 0; @@ -1793,16 +1839,37 @@ execute_split_functions (void) freq, this_size, this_time); print_gimple_stmt (dump_file, stmt, 0, 0); } + + if ((flag_sanitize & SANITIZE_THREAD) + && is_gimple_call (stmt) + && gimple_call_internal_p (stmt) + && gimple_call_internal_fn (stmt) == IFN_TSAN_FUNC_EXIT) + { + /* We handle TSAN_FUNC_EXIT for splitting either in the + return_bb, or in its immediate predecessors. */ + if ((bb != return_bb && !find_edge (bb, return_bb)) + || (tsan_exit_found != -1 + && tsan_exit_found != (bb != return_bb))) + { + if (dump_file) + fprintf (dump_file, "Not splitting: TSAN_FUNC_EXIT" + " in unexpected basic block.\n"); + BITMAP_FREE (forbidden_dominators); + bb_info_vec.release (); + return 0; + } + tsan_exit_found = bb != return_bb; + } } overall_time += time; overall_size += size; bb_info_vec[bb->index].time = time; bb_info_vec[bb->index].size = size; } - find_split_points (overall_time, overall_size); + find_split_points (return_bb, overall_time, overall_size); if (best_split_point.split_bbs) { - split_function (&best_split_point); + split_function (return_bb, &best_split_point, tsan_exit_found == 1); BITMAP_FREE (best_split_point.ssa_names_to_pass); BITMAP_FREE (best_split_point.split_bbs); todo = TODO_update_ssa | TODO_cleanup_cfg; diff --git a/contrib/gcc-5.0/gcc/lra-constraints.c b/contrib/gcc-5.0/gcc/lra-constraints.c index 0ddd842dee..57d731a556 100644 --- a/contrib/gcc-5.0/gcc/lra-constraints.c +++ b/contrib/gcc-5.0/gcc/lra-constraints.c @@ -1074,10 +1074,9 @@ static bool check_and_process_move (bool *change_p, bool *sec_mem_p ATTRIBUTE_UNUSED) { int sregno, dregno; - rtx dest, src, dreg, sreg, old_sreg, new_reg, scratch_reg; + rtx dest, src, dreg, sreg, new_reg, scratch_reg; rtx_insn *before; enum reg_class dclass, sclass, secondary_class; - machine_mode sreg_mode; secondary_reload_info sri; lra_assert (curr_insn_set != NULL_RTX); @@ -1101,8 +1100,6 @@ check_and_process_move (bool *change_p, bool *sec_mem_p ATTRIBUTE_UNUSED) were a right class for the pseudo, secondary_... hooks usually are not define for ALL_REGS. */ return false; - sreg_mode = GET_MODE (sreg); - old_sreg = sreg; if (REG_P (sreg)) sclass = get_reg_class (REGNO (sreg)); if (sclass == ALL_REGS) @@ -1161,9 +1158,9 @@ check_and_process_move (bool *change_p, bool *sec_mem_p ATTRIBUTE_UNUSED) sri.icode = CODE_FOR_nothing; sri.extra_cost = 0; secondary_class - = (enum reg_class) targetm.secondary_reload (true, sreg, + = (enum reg_class) targetm.secondary_reload (true, src, (reg_class_t) dclass, - sreg_mode, &sri); + GET_MODE (src), &sri); /* Check the target hook consistency. */ lra_assert ((secondary_class == NO_REGS && sri.icode == CODE_FOR_nothing) @@ -1179,14 +1176,12 @@ check_and_process_move (bool *change_p, bool *sec_mem_p ATTRIBUTE_UNUSED) *change_p = true; new_reg = NULL_RTX; if (secondary_class != NO_REGS) - new_reg = lra_create_new_reg_with_unique_value (sreg_mode, NULL_RTX, + new_reg = lra_create_new_reg_with_unique_value (GET_MODE (src), NULL_RTX, secondary_class, "secondary"); start_sequence (); - if (old_sreg != sreg) - sreg = copy_rtx (sreg); if (sri.icode == CODE_FOR_nothing) - lra_emit_move (new_reg, sreg); + lra_emit_move (new_reg, src); else { enum reg_class scratch_class; @@ -1197,18 +1192,13 @@ check_and_process_move (bool *change_p, bool *sec_mem_p ATTRIBUTE_UNUSED) (insn_data[sri.icode].operand[2].mode, NULL_RTX, scratch_class, "scratch")); emit_insn (GEN_FCN (sri.icode) (new_reg != NULL_RTX ? new_reg : dest, - sreg, scratch_reg)); + src, scratch_reg)); } before = get_insns (); end_sequence (); lra_process_new_insns (curr_insn, before, NULL, "Inserting the move"); if (new_reg != NULL_RTX) - { - if (GET_CODE (src) == SUBREG) - SUBREG_REG (src) = new_reg; - else - SET_SRC (curr_insn_set) = new_reg; - } + SET_SRC (curr_insn_set) = new_reg; else { if (lra_dump_file != NULL) diff --git a/contrib/gcc-5.0/gcc/lra-lives.c b/contrib/gcc-5.0/gcc/lra-lives.c index 5d759ca73d..9dfffb6f28 100644 --- a/contrib/gcc-5.0/gcc/lra-lives.c +++ b/contrib/gcc-5.0/gcc/lra-lives.c @@ -636,12 +636,8 @@ check_pseudos_live_through_calls (int regno) if (! sparseset_bit_p (pseudos_live_through_calls, regno)) return; sparseset_clear_bit (pseudos_live_through_calls, regno); - bool actual_call_used_reg_set_available_p - = !hard_reg_set_empty_p (lra_reg_info[regno].actual_call_used_reg_set); IOR_HARD_REG_SET (lra_reg_info[regno].conflict_hard_regs, - (actual_call_used_reg_set_available_p - ? lra_reg_info[regno].actual_call_used_reg_set - : call_used_reg_set)); + call_used_reg_set); for (hr = 0; hr < FIRST_PSEUDO_REGISTER; hr++) if (HARD_REGNO_CALL_PART_CLOBBERED (hr, PSEUDO_REGNO_MODE (regno))) diff --git a/contrib/gcc-5.0/gcc/lra.c b/contrib/gcc-5.0/gcc/lra.c index 727a70e063..269a0f14f7 100644 --- a/contrib/gcc-5.0/gcc/lra.c +++ b/contrib/gcc-5.0/gcc/lra.c @@ -1633,7 +1633,8 @@ lra_update_insn_regno_info (rtx_insn *insn) lra_insn_recog_data_t data; struct lra_static_insn_data *static_data; enum rtx_code code; - + rtx link; + if (! INSN_P (insn)) return; data = lra_get_insn_recog_data (insn); @@ -1648,6 +1649,18 @@ lra_update_insn_regno_info (rtx_insn *insn) if ((code = GET_CODE (PATTERN (insn))) == CLOBBER || code == USE) add_regs_to_insn_regno_info (data, XEXP (PATTERN (insn), 0), uid, code == USE ? OP_IN : OP_OUT, false); + if (CALL_P (insn)) + /* On some targets call insns can refer to pseudos in memory in + CALL_INSN_FUNCTION_USAGE list. Process them in order to + consider their occurrences in calls for different + transformations (e.g. inheritance) with given pseudos. */ + for (link = CALL_INSN_FUNCTION_USAGE (insn); + link != NULL_RTX; + link = XEXP (link, 1)) + if (((code = GET_CODE (XEXP (link, 0))) == USE || code == CLOBBER) + && MEM_P (XEXP (XEXP (link, 0), 0))) + add_regs_to_insn_regno_info (data, XEXP (XEXP (link, 0), 0), uid, + code == USE ? OP_IN : OP_OUT, false); if (NONDEBUG_INSN_P (insn)) setup_insn_reg_info (data, freq); } diff --git a/contrib/gcc-5.0/gcc/lto-cgraph.c b/contrib/gcc-5.0/gcc/lto-cgraph.c index c875fed1db..088de86064 100644 --- a/contrib/gcc-5.0/gcc/lto-cgraph.c +++ b/contrib/gcc-5.0/gcc/lto-cgraph.c @@ -574,6 +574,7 @@ lto_output_node (struct lto_simple_output_block *ob, struct cgraph_node *node, bp_pack_value (&bp, node->icf_merged, 1); bp_pack_value (&bp, node->nonfreeing_fn, 1); bp_pack_value (&bp, node->thunk.thunk_p, 1); + bp_pack_value (&bp, node->parallelized_function, 1); bp_pack_enum (&bp, ld_plugin_symbol_resolution, LDPR_NUM_KNOWN, node->resolution); bp_pack_value (&bp, node->instrumentation_clone, 1); @@ -1209,6 +1210,7 @@ input_overwrite_node (struct lto_file_decl_data *file_data, node->icf_merged = bp_unpack_value (bp, 1); node->nonfreeing_fn = bp_unpack_value (bp, 1); node->thunk.thunk_p = bp_unpack_value (bp, 1); + node->parallelized_function = bp_unpack_value (bp, 1); node->resolution = bp_unpack_enum (bp, ld_plugin_symbol_resolution, LDPR_NUM_KNOWN); node->instrumentation_clone = bp_unpack_value (bp, 1); diff --git a/contrib/gcc-5.0/gcc/omp-low.c b/contrib/gcc-5.0/gcc/omp-low.c index 2d64a74e3e..80bddf059b 100644 --- a/contrib/gcc-5.0/gcc/omp-low.c +++ b/contrib/gcc-5.0/gcc/omp-low.c @@ -1564,6 +1564,7 @@ finalize_task_copyfn (gomp_task *task_stmt) /* Inform the callgraph about the new function. */ cgraph_node::add_new_function (child_fn, false); + cgraph_node::get (child_fn)->parallelized_function = 1; } /* Destroy a omp_context data structures. Called through the splay tree @@ -1580,10 +1581,12 @@ delete_omp_context (splay_tree_value value) splay_tree_delete (ctx->field_map); if (ctx->sfield_map) splay_tree_delete (ctx->sfield_map); + /* Reduction map is copied to nested contexts, so only delete it in the + owner. */ if (ctx->reduction_map - /* Shared over several omp_contexts. */ - && (ctx->outer == NULL - || ctx->reduction_map != ctx->outer->reduction_map)) + && gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET + && is_gimple_omp_offloaded (ctx->stmt) + && is_gimple_omp_oacc (ctx->stmt)) splay_tree_delete (ctx->reduction_map); /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before @@ -5567,6 +5570,7 @@ expand_omp_taskreg (struct omp_region *region) /* Inform the callgraph about the new function. */ DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties; cgraph_node::add_new_function (child_fn, true); + cgraph_node::get (child_fn)->parallelized_function = 1; /* Fix the callgraph edges for child_cfun. Those for cfun will be fixed in a following pass. */ @@ -9103,14 +9107,11 @@ expand_omp_target (struct omp_region *region) } gimple g; - vec *args; /* The maximum number used by any start_ix, without varargs. */ - unsigned int argcnt = 11; - - vec_alloc (args, argcnt); - args->quick_push (device); + auto_vec args; + args.quick_push (device); if (offloaded) - args->quick_push (build_fold_addr_expr (child_fn)); + args.quick_push (build_fold_addr_expr (child_fn)); switch (start_ix) { case BUILT_IN_GOMP_TARGET: @@ -9118,7 +9119,7 @@ expand_omp_target (struct omp_region *region) case BUILT_IN_GOMP_TARGET_UPDATE: /* This const void * is part of the current ABI, but we're not actually using it. */ - args->quick_push (build_zero_cst (ptr_type_node)); + args.quick_push (build_zero_cst (ptr_type_node)); break; case BUILT_IN_GOACC_DATA_START: case BUILT_IN_GOACC_ENTER_EXIT_DATA: @@ -9128,10 +9129,10 @@ expand_omp_target (struct omp_region *region) default: gcc_unreachable (); } - args->quick_push (t1); - args->quick_push (t2); - args->quick_push (t3); - args->quick_push (t4); + args.quick_push (t1); + args.quick_push (t2); + args.quick_push (t3); + args.quick_push (t4); switch (start_ix) { case BUILT_IN_GOACC_DATA_START: @@ -9164,9 +9165,9 @@ expand_omp_target (struct omp_region *region) t_vector_length = fold_convert_loc (OMP_CLAUSE_LOCATION (c), integer_type_node, OMP_CLAUSE_VECTOR_LENGTH_EXPR (c)); - args->quick_push (t_num_gangs); - args->quick_push (t_num_workers); - args->quick_push (t_vector_length); + args.quick_push (t_num_gangs); + args.quick_push (t_num_workers); + args.quick_push (t_vector_length); } /* FALLTHRU */ case BUILT_IN_GOACC_ENTER_EXIT_DATA: @@ -9188,13 +9189,13 @@ expand_omp_target (struct omp_region *region) integer_type_node, OMP_CLAUSE_ASYNC_EXPR (c)); - args->quick_push (t_async); + args.quick_push (t_async); /* Save the index, and... */ - t_wait_idx = args->length (); + t_wait_idx = args.length (); /* ... push a default value. */ - args->quick_push (fold_convert_loc (gimple_location (entry_stmt), - integer_type_node, - integer_zero_node)); + args.quick_push (fold_convert_loc (gimple_location (entry_stmt), + integer_type_node, + integer_zero_node)); c = find_omp_clause (clauses, OMP_CLAUSE_WAIT); if (c) { @@ -9204,19 +9205,19 @@ expand_omp_target (struct omp_region *region) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WAIT) { - args->safe_push (fold_convert_loc (OMP_CLAUSE_LOCATION (c), - integer_type_node, - OMP_CLAUSE_WAIT_EXPR (c))); + args.safe_push (fold_convert_loc (OMP_CLAUSE_LOCATION (c), + integer_type_node, + OMP_CLAUSE_WAIT_EXPR (c))); n++; } } /* Now that we know the number, replace the default value. */ - args->ordered_remove (t_wait_idx); - args->quick_insert (t_wait_idx, - fold_convert_loc (gimple_location (entry_stmt), - integer_type_node, - build_int_cst (integer_type_node, n))); + args.ordered_remove (t_wait_idx); + args.quick_insert (t_wait_idx, + fold_convert_loc (gimple_location (entry_stmt), + integer_type_node, + build_int_cst (integer_type_node, n))); } } break; @@ -9224,8 +9225,7 @@ expand_omp_target (struct omp_region *region) gcc_unreachable (); } - g = gimple_build_call_vec (builtin_decl_explicit (start_ix), *args); - args->release (); + g = gimple_build_call_vec (builtin_decl_explicit (start_ix), args); gimple_set_location (g, gimple_location (entry_stmt)); gsi_insert_before (&gsi, g, GSI_SAME_STMT); if (!offloaded) diff --git a/contrib/gcc-5.0/gcc/recog.c b/contrib/gcc-5.0/gcc/recog.c index 7b5ca8b9f1..a9d3b1f779 100644 --- a/contrib/gcc-5.0/gcc/recog.c +++ b/contrib/gcc-5.0/gcc/recog.c @@ -2773,8 +2773,12 @@ constrain_operands (int strict, alternative_mask alternatives) /* Every memory operand can be reloaded to fit. */ && ((strict < 0 && MEM_P (op)) /* Before reload, accept what reload can turn - into mem. */ + into a mem. */ || (strict < 0 && CONSTANT_P (op)) + /* Before reload, accept a pseudo, + since LRA can turn it into a mem. */ + || (strict < 0 && targetm.lra_p () && REG_P (op) + && REGNO (op) >= FIRST_PSEUDO_REGISTER) /* During reload, accept a pseudo */ || (reload_in_progress && REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER))) diff --git a/contrib/gcc-5.0/gcc/tree-chrec.c b/contrib/gcc-5.0/gcc/tree-chrec.c index 84ba30e4e9..b599c2c3e5 100644 --- a/contrib/gcc-5.0/gcc/tree-chrec.c +++ b/contrib/gcc-5.0/gcc/tree-chrec.c @@ -78,8 +78,8 @@ chrec_fold_poly_cst (enum tree_code code, gcc_assert (poly); gcc_assert (cst); gcc_assert (TREE_CODE (poly) == POLYNOMIAL_CHREC); - gcc_assert (!is_not_constant_evolution (cst)); - gcc_assert (type == chrec_type (poly)); + gcc_checking_assert (!is_not_constant_evolution (cst)); + gcc_checking_assert (useless_type_conversion_p (type, chrec_type (poly))); switch (code) { @@ -124,10 +124,11 @@ chrec_fold_plus_poly_poly (enum tree_code code, gcc_assert (TREE_CODE (poly0) == POLYNOMIAL_CHREC); gcc_assert (TREE_CODE (poly1) == POLYNOMIAL_CHREC); if (POINTER_TYPE_P (chrec_type (poly0))) - gcc_assert (ptrofftype_p (chrec_type (poly1))); + gcc_checking_assert (ptrofftype_p (chrec_type (poly1)) + && useless_type_conversion_p (type, chrec_type (poly0))); else - gcc_assert (chrec_type (poly0) == chrec_type (poly1)); - gcc_assert (type == chrec_type (poly0)); + gcc_checking_assert (useless_type_conversion_p (type, chrec_type (poly0)) + && useless_type_conversion_p (type, chrec_type (poly1))); /* {a, +, b}_1 + {c, +, d}_2 -> {{a, +, b}_1 + c, +, d}_2, @@ -208,8 +209,8 @@ chrec_fold_multiply_poly_poly (tree type, gcc_assert (poly1); gcc_assert (TREE_CODE (poly0) == POLYNOMIAL_CHREC); gcc_assert (TREE_CODE (poly1) == POLYNOMIAL_CHREC); - gcc_assert (chrec_type (poly0) == chrec_type (poly1)); - gcc_assert (type == chrec_type (poly0)); + gcc_checking_assert (useless_type_conversion_p (type, chrec_type (poly0)) + && useless_type_conversion_p (type, chrec_type (poly1))); /* {a, +, b}_1 * {c, +, d}_2 -> {c*{a, +, b}_1, +, d}_2, {a, +, b}_2 * {c, +, d}_1 -> {a*{c, +, d}_1, +, b}_2, @@ -1352,7 +1353,7 @@ chrec_convert_1 (tree type, tree chrec, gimple at_stmt, return chrec; ct = chrec_type (chrec); - if (ct == type) + if (useless_type_conversion_p (type, ct)) return chrec; if (!evolution_function_is_affine_p (chrec)) diff --git a/contrib/gcc-5.0/gcc/tree-data-ref.h b/contrib/gcc-5.0/gcc/tree-data-ref.h index 3c4569072b..edb3b562b8 100644 --- a/contrib/gcc-5.0/gcc/tree-data-ref.h +++ b/contrib/gcc-5.0/gcc/tree-data-ref.h @@ -100,66 +100,7 @@ typedef int *lambda_vector; all vectors are the same length). */ typedef lambda_vector *lambda_matrix; -/* Each vector of the access matrix represents a linear access - function for a subscript. First elements correspond to the - leftmost indices, ie. for a[i][j] the first vector corresponds to - the subscript in "i". The elements of a vector are relative to - the loop nests in which the data reference is considered, - i.e. the vector is relative to the SCoP that provides the context - in which this data reference occurs. - For example, in - - | loop_1 - | loop_2 - | a[i+3][2*j+n-1] - - if "i" varies in loop_1 and "j" varies in loop_2, the access - matrix with respect to the loop nest {loop_1, loop_2} is: - - | loop_1 loop_2 param_n cst - | 1 0 0 3 - | 0 2 1 -1 - - whereas the access matrix with respect to loop_2 considers "i" as - a parameter: - - | loop_2 param_i param_n cst - | 0 1 0 3 - | 2 0 1 -1 -*/ -struct access_matrix -{ - vec loop_nest; - int nb_induction_vars; - vec parameters; - vec *matrix; -}; - -#define AM_LOOP_NEST(M) (M)->loop_nest -#define AM_NB_INDUCTION_VARS(M) (M)->nb_induction_vars -#define AM_PARAMETERS(M) (M)->parameters -#define AM_MATRIX(M) (M)->matrix -#define AM_NB_PARAMETERS(M) (AM_PARAMETERS (M)).length () -#define AM_CONST_COLUMN_INDEX(M) (AM_NB_INDUCTION_VARS (M) + AM_NB_PARAMETERS (M)) -#define AM_NB_COLUMNS(M) (AM_NB_INDUCTION_VARS (M) + AM_NB_PARAMETERS (M) + 1) -#define AM_GET_SUBSCRIPT_ACCESS_VECTOR(M, I) AM_MATRIX (M)[I] -#define AM_GET_ACCESS_MATRIX_ELEMENT(M, I, J) AM_GET_SUBSCRIPT_ACCESS_VECTOR (M, I)[J] - -/* Return the column in the access matrix of LOOP_NUM. */ - -static inline int -am_vector_index_for_loop (struct access_matrix *access_matrix, int loop_num) -{ - int i; - loop_p l; - - for (i = 0; AM_LOOP_NEST (access_matrix).iterate (i, &l); i++) - if (l->num == loop_num) - return i; - - gcc_unreachable (); -} struct data_reference { @@ -183,9 +124,6 @@ struct data_reference /* Alias information for the data reference. */ struct dr_alias alias; - - /* Matrix representation for the data access functions. */ - struct access_matrix *access_matrix; }; #define DR_STMT(DR) (DR)->stmt @@ -202,7 +140,6 @@ struct data_reference #define DR_STEP(DR) (DR)->innermost.step #define DR_PTR_INFO(DR) (DR)->alias.ptr_info #define DR_ALIGNED_TO(DR) (DR)->innermost.aligned_to -#define DR_ACCESS_MATRIX(DR) (DR)->access_matrix typedef struct data_reference *data_reference_p; @@ -560,6 +497,7 @@ lambda_vector_gcd (lambda_vector vector, int size) static inline lambda_vector lambda_vector_new (int size) { + /* ??? We shouldn't abuse the GC allocator here. */ return ggc_cleared_vec_alloc (size); } @@ -611,11 +549,10 @@ lambda_matrix_new (int m, int n, struct obstack *lambda_obstack) lambda_matrix mat; int i; - mat = (lambda_matrix) obstack_alloc (lambda_obstack, - sizeof (lambda_vector *) * m); + mat = XOBNEWVEC (lambda_obstack, lambda_vector, m); for (i = 0; i < m; i++) - mat[i] = lambda_vector_new (n); + mat[i] = XOBNEWVEC (lambda_obstack, int, n); return mat; } diff --git a/contrib/gcc-5.0/gcc/tree-parloops.c b/contrib/gcc-5.0/gcc/tree-parloops.c index fbb9eebddc..62a6444e66 100644 --- a/contrib/gcc-5.0/gcc/tree-parloops.c +++ b/contrib/gcc-5.0/gcc/tree-parloops.c @@ -75,6 +75,9 @@ along with GCC; see the file COPYING3. If not see #include "tree-parloops.h" #include "omp-low.h" #include "tree-nested.h" +#include "plugin-api.h" +#include "ipa-ref.h" +#include "cgraph.h" /* This pass tries to distribute iterations of loops into several threads. The implementation is straightforward -- for each loop we test whether its @@ -1422,21 +1425,14 @@ separate_decls_in_region (edge entry, edge exit, } } -/* Bitmap containing uids of functions created by parallelization. We cannot - allocate it from the default obstack, as it must live across compilation - of several functions; we make it gc allocated instead. */ - -static GTY(()) bitmap parallelized_functions; - -/* Returns true if FN was created by create_loop_fn. */ +/* Returns true if FN was created to run in parallel. */ bool -parallelized_function_p (tree fn) +parallelized_function_p (tree fndecl) { - if (!parallelized_functions || !DECL_ARTIFICIAL (fn)) - return false; - - return bitmap_bit_p (parallelized_functions, DECL_UID (fn)); + cgraph_node *node = cgraph_node::get (fndecl); + gcc_assert (node != NULL); + return node->parallelized_function; } /* Creates and returns an empty function that will receive the body of @@ -1459,10 +1455,6 @@ create_loop_fn (location_t loc) type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE); decl = build_decl (loc, FUNCTION_DECL, name, type); - if (!parallelized_functions) - parallelized_functions = BITMAP_GGC_ALLOC (); - bitmap_set_bit (parallelized_functions, DECL_UID (decl)); - TREE_STATIC (decl) = 1; TREE_USED (decl) = 1; DECL_ARTIFICIAL (decl) = 1; @@ -2153,7 +2145,7 @@ try_create_reduction_list (loop_p loop, primitives. Returns true if some loop was parallelized, false otherwise. */ -bool +static bool parallelize_loops (void) { unsigned n_threads = flag_tree_parallelize_loops; @@ -2314,6 +2306,3 @@ make_pass_parallelize_loops (gcc::context *ctxt) { return new pass_parallelize_loops (ctxt); } - - -#include "gt-tree-parloops.h" diff --git a/contrib/gcc-5.0/gcc/tree-parloops.h b/contrib/gcc-5.0/gcc/tree-parloops.h index 3256446942..d71f0a4e55 100644 --- a/contrib/gcc-5.0/gcc/tree-parloops.h +++ b/contrib/gcc-5.0/gcc/tree-parloops.h @@ -21,6 +21,5 @@ along with GCC; see the file COPYING3. If not see #define GCC_TREE_PARLOOPS_H extern bool parallelized_function_p (tree); -extern bool parallelize_loops (void); #endif /* GCC_TREE_PARLOOPS_H */ diff --git a/contrib/gcc-5.0/gcc/tree-sra.c b/contrib/gcc-5.0/gcc/tree-sra.c index a49e9504e1..91b72fbdc8 100644 --- a/contrib/gcc-5.0/gcc/tree-sra.c +++ b/contrib/gcc-5.0/gcc/tree-sra.c @@ -5109,7 +5109,7 @@ ipa_sra_preliminary_function_checks (struct cgraph_node *node) return false; } - if ((DECL_COMDAT (node->decl) || DECL_EXTERNAL (node->decl)) + if ((DECL_ONE_ONLY (node->decl) || DECL_EXTERNAL (node->decl)) && inline_summaries->get (node)->size >= MAX_INLINE_INSNS_AUTO) { if (dump_file) diff --git a/contrib/gcc-5.0/gcc/tree-ssa-coalesce.c b/contrib/gcc-5.0/gcc/tree-ssa-coalesce.c index dd6b9c04f8..1afeefef2e 100644 --- a/contrib/gcc-5.0/gcc/tree-ssa-coalesce.c +++ b/contrib/gcc-5.0/gcc/tree-ssa-coalesce.c @@ -59,7 +59,6 @@ along with GCC; see the file COPYING3. If not see #include "tree-ssa-live.h" #include "tree-ssa-coalesce.h" #include "diagnostic-core.h" -#include "timevar.h" /* This set of routines implements a coalesce_list. This is an object which @@ -1122,8 +1121,8 @@ create_outofssa_var_map (coalesce_list_p cl, bitmap used_in_copy) /* Attempt to coalesce ssa versions X and Y together using the partition - mapping in MAP and checking conflicts in GRAPH if not NULL. - Output any debug info to DEBUG, if it is nun-NULL. */ + mapping in MAP and checking conflicts in GRAPH. Output any debug info to + DEBUG, if it is nun-NULL. */ static inline bool attempt_coalesce (var_map map, ssa_conflicts_p graph, int x, int y, @@ -1155,8 +1154,7 @@ attempt_coalesce (var_map map, ssa_conflicts_p graph, int x, int y, fprintf (debug, " [map: %d, %d] ", p1, p2); - if (!graph - || !ssa_conflicts_test_p (graph, p1, p2)) + if (!ssa_conflicts_test_p (graph, p1, p2)) { var1 = partition_to_var (map, p1); var2 = partition_to_var (map, p2); @@ -1170,13 +1168,10 @@ attempt_coalesce (var_map map, ssa_conflicts_p graph, int x, int y, /* z is the new combined partition. Remove the other partition from the list, and merge the conflicts. */ - if (graph) - { - if (z == p1) - ssa_conflicts_merge (graph, p1, p2); - else - ssa_conflicts_merge (graph, p2, p1); - } + if (z == p1) + ssa_conflicts_merge (graph, p1, p2); + else + ssa_conflicts_merge (graph, p2, p1); if (debug) fprintf (debug, ": Success -> %d\n", z); @@ -1190,16 +1185,24 @@ attempt_coalesce (var_map map, ssa_conflicts_p graph, int x, int y, } -/* Perform all abnormal coalescing on MAP. - Debug output is sent to DEBUG if it is non-NULL. */ +/* Attempt to Coalesce partitions in MAP which occur in the list CL using + GRAPH. Debug output is sent to DEBUG if it is non-NULL. */ static void -perform_abnormal_coalescing (var_map map, FILE *debug) +coalesce_partitions (var_map map, ssa_conflicts_p graph, coalesce_list_p cl, + FILE *debug) { + int x = 0, y = 0; + tree var1, var2; + int cost; basic_block bb; edge e; edge_iterator ei; + /* First, coalesce all the copies across abnormal edges. These are not placed + in the coalesce list because they do not need to be sorted, and simply + consume extra memory/compilation time in large programs. */ + FOR_EACH_BB_FN (bb, cfun) { FOR_EACH_EDGE (e, ei, bb->preds) @@ -1223,23 +1226,11 @@ perform_abnormal_coalescing (var_map map, FILE *debug) if (debug) fprintf (debug, "Abnormal coalesce: "); - if (!attempt_coalesce (map, NULL, v1, v2, debug)) + if (!attempt_coalesce (map, graph, v1, v2, debug)) fail_abnormal_edge_coalesce (v1, v2); } } } -} - -/* Attempt to Coalesce partitions in MAP which occur in the list CL using - GRAPH. Debug output is sent to DEBUG if it is non-NULL. */ - -static void -coalesce_partitions (var_map map, ssa_conflicts_p graph, coalesce_list_p cl, - FILE *debug) -{ - int x = 0, y = 0; - tree var1, var2; - int cost; /* Now process the items in the coalesce list. */ @@ -1294,11 +1285,6 @@ coalesce_ssa_name (void) var_map map; unsigned int i; -#ifdef ENABLE_CHECKING - /* Verify we can perform all must coalesces. */ - verify_ssa_coalescing (); -#endif - cl = create_coalesce_list (); map = create_outofssa_var_map (cl, used_in_copies); @@ -1355,15 +1341,6 @@ coalesce_ssa_name (void) return map; } - /* First, coalesce all the copies across abnormal edges. These are not placed - in the coalesce list because they do not need to be sorted, and simply - consume extra memory/compilation time in large programs. - Performing abnormal coalescing also needs no live/conflict computation - because it must succeed (but we lose checking that it indeed does). - Still for PR63155 this reduces memory usage from 10GB to zero. */ - perform_abnormal_coalescing (map, - ((dump_flags & TDF_DETAILS) ? dump_file : NULL)); - if (dump_file && (dump_flags & TDF_DETAILS)) dump_var_map (dump_file, map); @@ -1394,100 +1371,11 @@ coalesce_ssa_name (void) /* Now coalesce everything in the list. */ coalesce_partitions (map, graph, cl, - ((dump_flags & TDF_DETAILS) ? dump_file : NULL)); + ((dump_flags & TDF_DETAILS) ? dump_file + : NULL)); delete_coalesce_list (cl); ssa_conflicts_delete (graph); return map; } - - -/* Helper for verify_ssa_coalescing. Operates in two modes: - 1) scan the function for coalesces we must perform and store the - SSA names participating in USED_IN_COPIES - 2) scan the function for coalesces and verify they can be performed - under the constraints of GRAPH updating MAP in the process - FIXME: This can be extended to verify that the virtual operands - form a factored use-def chain (coalescing the active virtual use - with the virtual def at virtual def point). */ - -static void -verify_ssa_coalescing_worker (bitmap used_in_copies, - var_map map, ssa_conflicts_p graph) -{ - basic_block bb; - - FOR_EACH_BB_FN (bb, cfun) - { - edge e; - edge_iterator ei; - - FOR_EACH_EDGE (e, ei, bb->preds) - if (e->flags & EDGE_ABNORMAL) - { - gphi_iterator gsi; - for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); - gsi_next (&gsi)) - { - gphi *phi = gsi.phi (); - tree arg = PHI_ARG_DEF (phi, e->dest_idx); - if (SSA_NAME_IS_DEFAULT_DEF (arg) - && (!SSA_NAME_VAR (arg) - || TREE_CODE (SSA_NAME_VAR (arg)) != PARM_DECL)) - continue; - - tree res = PHI_RESULT (phi); - - int v1 = SSA_NAME_VERSION (res); - int v2 = SSA_NAME_VERSION (arg); - if (used_in_copies) - { - bitmap_set_bit (used_in_copies, v1); - bitmap_set_bit (used_in_copies, v2); - } - else - { - int p1 = var_to_partition (map, res); - int p2 = var_to_partition (map, arg); - if (p1 != p2) - { - if (ssa_conflicts_test_p (graph, p1, p2)) - fail_abnormal_edge_coalesce (v1, v2); - int z = var_union (map, - partition_to_var (map, p1), - partition_to_var (map, p2)); - if (z == p1) - ssa_conflicts_merge (graph, p1, p2); - else - ssa_conflicts_merge (graph, p2, p1); - } - } - } - } - } -} - -/* Verify that we can coalesce SSA names we must coalesce. */ - -DEBUG_FUNCTION void -verify_ssa_coalescing (void) -{ - auto_timevar tv (TV_TREE_SSA_VERIFY); - bitmap used_in_copies = BITMAP_ALLOC (NULL); - verify_ssa_coalescing_worker (used_in_copies, NULL, NULL); - if (bitmap_empty_p (used_in_copies)) - { - BITMAP_FREE (used_in_copies); - return; - } - var_map map = init_var_map (num_ssa_names); - partition_view_bitmap (map, used_in_copies, true); - BITMAP_FREE (used_in_copies); - tree_live_info_p liveinfo = calculate_live_ranges (map, false); - ssa_conflicts_p graph = build_ssa_conflict_graph (liveinfo); - delete_tree_live_info (liveinfo); - verify_ssa_coalescing_worker (NULL, map, graph); - ssa_conflicts_delete (graph); - delete_var_map (map); -} diff --git a/contrib/gcc-5.0/gcc/tree-ssa-coalesce.h b/contrib/gcc-5.0/gcc/tree-ssa-coalesce.h index 06c33bfc7e..99b188a393 100644 --- a/contrib/gcc-5.0/gcc/tree-ssa-coalesce.h +++ b/contrib/gcc-5.0/gcc/tree-ssa-coalesce.h @@ -21,6 +21,5 @@ along with GCC; see the file COPYING3. If not see #define GCC_TREE_SSA_COALESCE_H extern var_map coalesce_ssa_name (void); -extern void verify_ssa_coalescing (void); #endif /* GCC_TREE_SSA_COALESCE_H */ diff --git a/contrib/gcc-5.0/gcc/tree-ssa-loop-ch.c b/contrib/gcc-5.0/gcc/tree-ssa-loop-ch.c index d759de7952..c6441b87d6 100644 --- a/contrib/gcc-5.0/gcc/tree-ssa-loop-ch.c +++ b/contrib/gcc-5.0/gcc/tree-ssa-loop-ch.c @@ -159,7 +159,7 @@ const pass_data pass_data_ch = 0, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ - TODO_cleanup_cfg, /* todo_flags_finish */ + 0, /* todo_flags_finish */ }; class pass_ch : public gimple_opt_pass @@ -184,6 +184,7 @@ pass_ch::execute (function *fun) basic_block *bbs, *copied_bbs; unsigned n_bbs; unsigned bbs_size; + bool changed = false; loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES); @@ -291,6 +292,8 @@ pass_ch::execute (function *fun) are not now, since there was the loop exit condition. */ split_edge (loop_preheader_edge (loop)); split_edge (loop_latch_edge (loop)); + + changed = true; } update_ssa (TODO_update_ssa); @@ -298,7 +301,7 @@ pass_ch::execute (function *fun) free (copied_bbs); loop_optimizer_finalize (); - return 0; + return changed ? TODO_cleanup_cfg : 0; } } // anon namespace diff --git a/contrib/gcc-5.0/gcc/tree-ssa-loop-niter.c b/contrib/gcc-5.0/gcc/tree-ssa-loop-niter.c index 7f6c451c0c..fc63825904 100644 --- a/contrib/gcc-5.0/gcc/tree-ssa-loop-niter.c +++ b/contrib/gcc-5.0/gcc/tree-ssa-loop-niter.c @@ -3329,7 +3329,6 @@ maybe_lower_iteration_bound (struct loop *loop) struct nb_iter_bound *elt; bool found_exit = false; vec queue = vNULL; - vec problem_stmts = vNULL; bitmap visited; /* Collect all statements with interesting (i.e. lower than @@ -3375,7 +3374,6 @@ maybe_lower_iteration_bound (struct loop *loop) if (not_executed_last_iteration->contains (stmt)) { stmt_found = true; - problem_stmts.safe_push (stmt); break; } if (gimple_has_side_effects (stmt)) @@ -3419,53 +3417,10 @@ maybe_lower_iteration_bound (struct loop *loop) "undefined statement must be executed at the last iteration.\n"); record_niter_bound (loop, loop->nb_iterations_upper_bound - 1, false, true); - - if (warn_aggressive_loop_optimizations) - { - bool exit_warned = false; - for (elt = loop->bounds; elt; elt = elt->next) - { - if (elt->is_exit - && wi::gtu_p (elt->bound, loop->nb_iterations_upper_bound)) - { - basic_block bb = gimple_bb (elt->stmt); - edge exit_edge = EDGE_SUCC (bb, 0); - struct tree_niter_desc niter; - - if (!loop_exit_edge_p (loop, exit_edge)) - exit_edge = EDGE_SUCC (bb, 1); - - if(number_of_iterations_exit (loop, exit_edge, - &niter, false, false) - && integer_onep (niter.assumptions) - && integer_zerop (niter.may_be_zero) - && niter.niter - && TREE_CODE (niter.niter) == INTEGER_CST - && wi::ltu_p (loop->nb_iterations_upper_bound, - wi::to_widest (niter.niter))) - { - if (warning_at (gimple_location (elt->stmt), - OPT_Waggressive_loop_optimizations, - "loop exit may only be reached after undefined behavior")) - exit_warned = true; - } - } - } - - if (exit_warned && !problem_stmts.is_empty ()) - { - gimple stmt; - int index; - FOR_EACH_VEC_ELT (problem_stmts, index, stmt) - inform (gimple_location (stmt), - "possible undefined statement is here"); - } - } } BITMAP_FREE (visited); queue.release (); - problem_stmts.release (); delete not_executed_last_iteration; } diff --git a/contrib/gcc-5.0/gcc/tree-ssa-reassoc.c b/contrib/gcc-5.0/gcc/tree-ssa-reassoc.c index 2e933e769f..77640e5dd5 100644 --- a/contrib/gcc-5.0/gcc/tree-ssa-reassoc.c +++ b/contrib/gcc-5.0/gcc/tree-ssa-reassoc.c @@ -2439,26 +2439,25 @@ extract_bit_test_mask (tree exp, int prec, tree totallow, tree low, tree high, && TREE_CODE (exp) == PLUS_EXPR && TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST) { + tree ret = TREE_OPERAND (exp, 0); + STRIP_NOPS (ret); widest_int bias = wi::neg (wi::sext (wi::to_widest (TREE_OPERAND (exp, 1)), TYPE_PRECISION (TREE_TYPE (low)))); - tree tbias = wide_int_to_tree (TREE_TYPE (low), bias); + tree tbias = wide_int_to_tree (TREE_TYPE (ret), bias); if (totallowp) { *totallowp = tbias; - exp = TREE_OPERAND (exp, 0); - STRIP_NOPS (exp); - return exp; + return ret; } else if (!tree_int_cst_lt (totallow, tbias)) return NULL_TREE; + bias = wi::to_widest (tbias); bias -= wi::to_widest (totallow); if (wi::ges_p (bias, 0) && wi::lts_p (bias, prec - max)) { *mask = wi::lshift (*mask, bias); - exp = TREE_OPERAND (exp, 0); - STRIP_NOPS (exp); - return exp; + return ret; } } } diff --git a/contrib/gcc-5.0/gcc/tree-vect-data-refs.c b/contrib/gcc-5.0/gcc/tree-vect-data-refs.c index b308ac7ac6..094275e843 100644 --- a/contrib/gcc-5.0/gcc/tree-vect-data-refs.c +++ b/contrib/gcc-5.0/gcc/tree-vect-data-refs.c @@ -3845,6 +3845,20 @@ vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name) return new_vect_var; } +/* Duplicate ptr info and set alignment/misaligment on NAME from DR. */ + +static void +vect_duplicate_ssa_name_ptr_info (tree name, data_reference *dr, + stmt_vec_info stmt_info) +{ + duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr)); + unsigned int align = TYPE_ALIGN_UNIT (STMT_VINFO_VECTYPE (stmt_info)); + int misalign = DR_MISALIGNMENT (dr); + if (misalign == -1) + mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (name)); + else + set_ptr_info_alignment (SSA_NAME_PTR_INFO (name), align, misalign); +} /* Function vect_create_addr_base_for_vector_ref. @@ -3964,13 +3978,9 @@ vect_create_addr_base_for_vector_ref (gimple stmt, if (DR_PTR_INFO (dr) && TREE_CODE (addr_base) == SSA_NAME) { - duplicate_ssa_name_ptr_info (addr_base, DR_PTR_INFO (dr)); - unsigned int align = TYPE_ALIGN_UNIT (STMT_VINFO_VECTYPE (stmt_info)); - int misalign = DR_MISALIGNMENT (dr); - if (offset || byte_offset || (misalign == -1)) + vect_duplicate_ssa_name_ptr_info (addr_base, dr, stmt_info); + if (offset || byte_offset) mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base)); - else - set_ptr_info_alignment (SSA_NAME_PTR_INFO (addr_base), align, misalign); } if (dump_enabled_p ()) @@ -4210,7 +4220,7 @@ vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop, aggr_ptr_init = make_ssa_name (aggr_ptr, vec_stmt); /* Copy the points-to information if it exists. */ if (DR_PTR_INFO (dr)) - duplicate_ssa_name_ptr_info (aggr_ptr_init, DR_PTR_INFO (dr)); + vect_duplicate_ssa_name_ptr_info (aggr_ptr_init, dr, stmt_info); gimple_assign_set_lhs (vec_stmt, aggr_ptr_init); if (pe) { @@ -4253,8 +4263,8 @@ vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop, /* Copy the points-to information if it exists. */ if (DR_PTR_INFO (dr)) { - duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr)); - duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr)); + vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info); + vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info); } if (ptr_incr) *ptr_incr = incr; @@ -4283,8 +4293,8 @@ vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop, /* Copy the points-to information if it exists. */ if (DR_PTR_INFO (dr)) { - duplicate_ssa_name_ptr_info (indx_before_incr, DR_PTR_INFO (dr)); - duplicate_ssa_name_ptr_info (indx_after_incr, DR_PTR_INFO (dr)); + vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info); + vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info); } if (ptr_incr) *ptr_incr = incr; diff --git a/contrib/gcc-5.0/gcc/tree-vect-generic.c b/contrib/gcc-5.0/gcc/tree-vect-generic.c index 8233c65a86..a88b22f659 100644 --- a/contrib/gcc-5.0/gcc/tree-vect-generic.c +++ b/contrib/gcc-5.0/gcc/tree-vect-generic.c @@ -1417,6 +1417,57 @@ count_type_subparts (tree type) return VECTOR_TYPE_P (type) ? TYPE_VECTOR_SUBPARTS (type) : 1; } +static tree +do_cond (gimple_stmt_iterator *gsi, tree inner_type, tree a, tree b, + tree bitpos, tree bitsize, enum tree_code code) +{ + if (TREE_CODE (TREE_TYPE (a)) == VECTOR_TYPE) + a = tree_vec_extract (gsi, inner_type, a, bitsize, bitpos); + if (TREE_CODE (TREE_TYPE (b)) == VECTOR_TYPE) + b = tree_vec_extract (gsi, inner_type, b, bitsize, bitpos); + tree cond = gimple_assign_rhs1 (gsi_stmt (*gsi)); + return gimplify_build3 (gsi, code, inner_type, cond, a, b); +} + +/* Expand a vector COND_EXPR to scalars, piecewise. */ +static void +expand_vector_scalar_condition (gimple_stmt_iterator *gsi) +{ + gassign *stmt = as_a (gsi_stmt (*gsi)); + tree type = gimple_expr_type (stmt); + tree compute_type = get_compute_type (COND_EXPR, mov_optab, type); + machine_mode compute_mode = TYPE_MODE (compute_type); + gcc_assert (compute_mode != BLKmode); + tree lhs = gimple_assign_lhs (stmt); + tree rhs2 = gimple_assign_rhs2 (stmt); + tree rhs3 = gimple_assign_rhs3 (stmt); + tree new_rhs; + + /* If the compute mode is not a vector mode (hence we are not decomposing + a BLKmode vector to smaller, hardware-supported vectors), we may want + to expand the operations in parallel. */ + if (GET_MODE_CLASS (compute_mode) != MODE_VECTOR_INT + && GET_MODE_CLASS (compute_mode) != MODE_VECTOR_FLOAT + && GET_MODE_CLASS (compute_mode) != MODE_VECTOR_FRACT + && GET_MODE_CLASS (compute_mode) != MODE_VECTOR_UFRACT + && GET_MODE_CLASS (compute_mode) != MODE_VECTOR_ACCUM + && GET_MODE_CLASS (compute_mode) != MODE_VECTOR_UACCUM) + new_rhs = expand_vector_parallel (gsi, do_cond, type, rhs2, rhs3, + COND_EXPR); + else + new_rhs = expand_vector_piecewise (gsi, do_cond, type, compute_type, + rhs2, rhs3, COND_EXPR); + if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (new_rhs))) + new_rhs = gimplify_build1 (gsi, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), + new_rhs); + + /* NOTE: We should avoid using gimple_assign_set_rhs_from_tree. One + way to do it is change expand_vector_operation and its callees to + return a tree_code, RHS1 and RHS2 instead of a tree. */ + gimple_assign_set_rhs_from_tree (gsi, new_rhs); + update_stmt (gsi_stmt (*gsi)); +} + /* Process one statement. If we identify a vector operation, expand it. */ static void @@ -1449,6 +1500,14 @@ expand_vector_operations_1 (gimple_stmt_iterator *gsi) return; } + if (code == COND_EXPR + && TREE_CODE (TREE_TYPE (gimple_assign_lhs (stmt))) == VECTOR_TYPE + && TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt))) == BLKmode) + { + expand_vector_scalar_condition (gsi); + return; + } + if (code == CONSTRUCTOR && TREE_CODE (lhs) == SSA_NAME && VECTOR_MODE_P (TYPE_MODE (TREE_TYPE (lhs))) @@ -1681,8 +1740,7 @@ const pass_data pass_data_lower_vector = PROP_gimple_lvec, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ - ( TODO_update_ssa - | TODO_cleanup_cfg ), /* todo_flags_finish */ + TODO_update_ssa, /* todo_flags_finish */ }; class pass_lower_vector : public gimple_opt_pass diff --git a/contrib/gcc-5.0/gcc/tree-vect-stmts.c b/contrib/gcc-5.0/gcc/tree-vect-stmts.c index 41ff80245e..e0cebc685b 100644 --- a/contrib/gcc-5.0/gcc/tree-vect-stmts.c +++ b/contrib/gcc-5.0/gcc/tree-vect-stmts.c @@ -6468,9 +6468,8 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, case dr_explicit_realign: { tree ptr, bump; - tree vs_minus_1; - vs_minus_1 = size_int (TYPE_VECTOR_SUBPARTS (vectype) - 1); + tree vs = size_int (TYPE_VECTOR_SUBPARTS (vectype)); if (compute_in_loop) msq = vect_setup_realignment (first_stmt, gsi, @@ -6499,8 +6498,9 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, vect_finish_stmt_generation (stmt, new_stmt, gsi); msq = new_temp; - bump = size_binop (MULT_EXPR, vs_minus_1, + bump = size_binop (MULT_EXPR, vs, TYPE_SIZE_UNIT (elem_type)); + bump = size_binop (MINUS_EXPR, bump, size_one_node); ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump); new_stmt = gimple_build_assign (NULL_TREE, BIT_AND_EXPR, ptr, diff --git a/contrib/gcc-5.0/gcc/tsan.c b/contrib/gcc-5.0/gcc/tsan.c index ae89d5fdaf..ebafbb03cb 100644 --- a/contrib/gcc-5.0/gcc/tsan.c +++ b/contrib/gcc-5.0/gcc/tsan.c @@ -680,6 +680,10 @@ instrument_gimple (gimple_stmt_iterator *gsi) && (gimple_call_fndecl (stmt) != builtin_decl_implicit (BUILT_IN_TSAN_INIT))) { + /* All functions with function call will have exit instrumented, + therefore no function calls other than __tsan_func_exit + shall appear in the functions. */ + gimple_call_set_tail (as_a (stmt), false); if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)) instrument_builtin_call (gsi); return true; diff --git a/contrib/gcc-5.0/gcc/varpool.c b/contrib/gcc-5.0/gcc/varpool.c index ce6427956d..f1439ca6ee 100644 --- a/contrib/gcc-5.0/gcc/varpool.c +++ b/contrib/gcc-5.0/gcc/varpool.c @@ -760,27 +760,6 @@ symbol_table::output_variables (void) return changed; } -/* Create a new global variable of type TYPE. */ -tree -add_new_static_var (tree type) -{ - tree new_decl; - varpool_node *new_node; - - new_decl = create_tmp_var_raw (type); - DECL_NAME (new_decl) = create_tmp_var_name (NULL); - TREE_READONLY (new_decl) = 0; - TREE_STATIC (new_decl) = 1; - TREE_USED (new_decl) = 1; - DECL_CONTEXT (new_decl) = NULL_TREE; - DECL_ABSTRACT_P (new_decl) = false; - lang_hooks.dup_lang_specific_decl (new_decl); - new_node = varpool_node::get_create (new_decl); - varpool_node::finalize_decl (new_decl); - - return new_node->decl; -} - /* Attempt to mark ALIAS as an alias to DECL. Return TRUE if successful. Extra name aliases are output whenever DECL is output. */ diff --git a/contrib/gcc-5.0/include/partition.h b/contrib/gcc-5.0/include/partition.h index d8b554f8f9..c39873b601 100644 --- a/contrib/gcc-5.0/include/partition.h +++ b/contrib/gcc-5.0/include/partition.h @@ -45,12 +45,12 @@ extern "C" { struct partition_elem { - /* The canonical element that represents the class containing this - element. */ - int class_element; /* The next element in this class. Elements in each class form a circular list. */ struct partition_elem* next; + /* The canonical element that represents the class containing this + element. */ + int class_element; /* The number of elements in this class. Valid only if this is the canonical element for its class. */ unsigned class_count; diff --git a/contrib/gcc-5.0/libcpp/lex.c b/contrib/gcc-5.0/libcpp/lex.c index 0dc4737118..bca5629913 100644 --- a/contrib/gcc-5.0/libcpp/lex.c +++ b/contrib/gcc-5.0/libcpp/lex.c @@ -1400,6 +1400,9 @@ lex_number (cpp_reader *pfile, cpp_string *number, NORMALIZE_STATE_UPDATE_IDNUM (nst, *cur); cur++; } + /* A number can't end with a digit separator. */ + while (cur > pfile->buffer->cur && DIGIT_SEP (cur[-1])) + --cur; pfile->buffer->cur = cur; } diff --git a/contrib/gcc-5.0/libstdc++-v3/include/bits/c++config b/contrib/gcc-5.0/libstdc++-v3/include/bits/c++config index 46ffa1f5d0..ae3065feaa 100644 --- a/contrib/gcc-5.0/libstdc++-v3/include/bits/c++config +++ b/contrib/gcc-5.0/libstdc++-v3/include/bits/c++config @@ -215,7 +215,11 @@ namespace std #if _GLIBCXX_USE_CXX11_ABI namespace std { - inline namespace __cxx11 __attribute__((__abi_tag__)) { } + inline namespace __cxx11 __attribute__((__abi_tag__ ("cxx11"))) { } +} +namespace __gnu_cxx +{ + inline namespace __cxx11 __attribute__((__abi_tag__ ("cxx11"))) { } } # define _GLIBCXX_NAMESPACE_CXX11 __cxx11:: # define _GLIBCXX_BEGIN_NAMESPACE_CXX11 namespace __cxx11 { diff --git a/contrib/gcc-5.0/libstdc++-v3/include/ext/codecvt_specializations.h b/contrib/gcc-5.0/libstdc++-v3/include/ext/codecvt_specializations.h index a24adfcfd1..34e90bdf3f 100644 --- a/contrib/gcc-5.0/libstdc++-v3/include/ext/codecvt_specializations.h +++ b/contrib/gcc-5.0/libstdc++-v3/include/ext/codecvt_specializations.h @@ -41,6 +41,7 @@ namespace __gnu_cxx _GLIBCXX_VISIBILITY(default) { +_GLIBCXX_BEGIN_NAMESPACE_CXX11 _GLIBCXX_BEGIN_NAMESPACE_VERSION /// Extension to use iconv for dealing with character encodings. @@ -207,13 +208,15 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION // associated fpos for the position type, all other // bits equivalent to the required char_traits instantiations. template - struct encoding_char_traits : public std::char_traits<_CharT> + struct encoding_char_traits + : public std::char_traits<_CharT> { typedef encoding_state state_type; typedef typename std::fpos pos_type; }; _GLIBCXX_END_NAMESPACE_VERSION +_GLIBCXX_END_NAMESPACE_CXX11 } // namespace diff --git a/contrib/gcc-5.0/libstdc++-v3/include/std/shared_mutex b/contrib/gcc-5.0/libstdc++-v3/include/std/shared_mutex index 5dcc295746..ab1b45b87a 100644 --- a/contrib/gcc-5.0/libstdc++-v3/include/std/shared_mutex +++ b/contrib/gcc-5.0/libstdc++-v3/include/std/shared_mutex @@ -57,10 +57,17 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION /// shared_timed_mutex class shared_timed_mutex { -#if defined(__GTHREADS_CXX0X) +#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_T typedef chrono::system_clock __clock_t; - pthread_rwlock_t _M_rwlock; +#ifdef PTHREAD_RWLOCK_INITIALIZER + pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER; + + public: + shared_timed_mutex() = default; + ~shared_timed_mutex() = default; +#else + pthread_rwlock_t _M_rwlock; public: shared_timed_mutex() @@ -82,6 +89,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION // Errors not handled: EBUSY, EINVAL _GLIBCXX_DEBUG_ASSERT(__ret == 0); } +#endif shared_timed_mutex(const shared_timed_mutex&) = delete; shared_timed_mutex& operator=(const shared_timed_mutex&) = delete; @@ -165,12 +173,16 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION void lock_shared() { - int __ret = pthread_rwlock_rdlock(&_M_rwlock); + int __ret; + // We retry if we exceeded the maximum number of read locks supported by + // the POSIX implementation; this can result in busy-waiting, but this + // is okay based on the current specification of forward progress + // guarantees by the standard. + do + __ret = pthread_rwlock_rdlock(&_M_rwlock); + while (__ret == EAGAIN); if (__ret == EDEADLK) __throw_system_error(int(errc::resource_deadlock_would_occur)); - if (__ret == EAGAIN) - // Maximum number of read locks has been exceeded. - __throw_system_error(int(errc::device_or_resource_busy)); // Errors not handled: EINVAL _GLIBCXX_DEBUG_ASSERT(__ret == 0); } @@ -210,11 +222,24 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION static_cast(__ns.count()) }; - int __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts); - // If the maximum number of read locks has been exceeded, or we would - // deadlock, we just fail to acquire the lock. Unlike for lock(), - // we are not allowed to throw an exception. - if (__ret == ETIMEDOUT || __ret == EAGAIN || __ret == EDEADLK) + int __ret; + // Unlike for lock(), we are not allowed to throw an exception so if + // the maximum number of read locks has been exceeded, or we would + // deadlock, we just try to acquire the lock again (and will time out + // eventually). + // In cases where we would exceed the maximum number of read locks + // throughout the whole time until the timeout, we will fail to + // acquire the lock even if it would be logically free; however, this + // is allowed by the standard, and we made a "strong effort" + // (see C++14 30.4.1.4p26). + // For cases where the implementation detects a deadlock we + // intentionally block and timeout so that an early return isn't + // mistaken for a spurious failure, which might help users realise + // there is a deadlock. + do + __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts); + while (__ret == EAGAIN || __ret == EDEADLK); + if (__ret == ETIMEDOUT) return false; // Errors not handled: EINVAL _GLIBCXX_DEBUG_ASSERT(__ret == 0); @@ -241,7 +266,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION unlock(); } -#else // defined(__GTHREADS_CXX0X) +#else // ! _GLIBCXX_USE_PTHREAD_RWLOCK_T #if _GTHREAD_USE_MUTEX_TIMEDLOCK struct _Mutex : mutex, __timed_mutex_impl<_Mutex> @@ -438,7 +463,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION _M_gate1.notify_one(); } } -#endif // !defined(__GTHREADS_CXX0X) +#endif // ! _GLIBCXX_USE_PTHREAD_RWLOCK_T }; #endif // _GLIBCXX_HAS_GTHREADS diff --git a/contrib/gcc-5.0/libstdc++-v3/libsupc++/nested_exception.h b/contrib/gcc-5.0/libstdc++-v3/libsupc++/nested_exception.h index 7f7e14e32f..a716f75adf 100644 --- a/contrib/gcc-5.0/libstdc++-v3/libsupc++/nested_exception.h +++ b/contrib/gcc-5.0/libstdc++-v3/libsupc++/nested_exception.h @@ -108,7 +108,7 @@ namespace std { throw static_cast<_Up&&>(__t); } }; - template + template struct _Throw_with_nested_helper : _Throw_with_nested_impl<_Tp> { }; diff --git a/contrib/gcc-5.0/libstdc++-v3/src/c++11/cxx11-ios_failure.cc b/contrib/gcc-5.0/libstdc++-v3/src/c++11/cxx11-ios_failure.cc index e1c8d4e013..b0a7c46ab9 100644 --- a/contrib/gcc-5.0/libstdc++-v3/src/c++11/cxx11-ios_failure.cc +++ b/contrib/gcc-5.0/libstdc++-v3/src/c++11/cxx11-ios_failure.cc @@ -41,6 +41,7 @@ namespace name() const noexcept { return "iostream"; } + _GLIBCXX_DEFAULT_ABI_TAG virtual std::string message(int __ec) const { std::string __msg; diff --git a/contrib/gcc-5.0/libstdc++-v3/src/c++11/cxx11-shim_facets.cc b/contrib/gcc-5.0/libstdc++-v3/src/c++11/cxx11-shim_facets.cc index 82bdf6f2ce..4e30088bac 100644 --- a/contrib/gcc-5.0/libstdc++-v3/src/c++11/cxx11-shim_facets.cc +++ b/contrib/gcc-5.0/libstdc++-v3/src/c++11/cxx11-shim_facets.cc @@ -147,6 +147,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION // The returned object will match the caller's string ABI, even when the // stored string doesn't. template + _GLIBCXX_DEFAULT_ABI_TAG operator basic_string() const { if (!_M_dtor) diff --git a/contrib/gcc-5.0/libstdc++-v3/src/c++11/future.cc b/contrib/gcc-5.0/libstdc++-v3/src/c++11/future.cc index 3cf503b6b3..21dbd8a685 100644 --- a/contrib/gcc-5.0/libstdc++-v3/src/c++11/future.cc +++ b/contrib/gcc-5.0/libstdc++-v3/src/c++11/future.cc @@ -32,6 +32,7 @@ namespace name() const noexcept { return "future"; } + _GLIBCXX_DEFAULT_ABI_TAG virtual std::string message(int __ec) const { std::string __msg; diff --git a/contrib/gcc-5.0/libstdc++-v3/src/c++11/system_error.cc b/contrib/gcc-5.0/libstdc++-v3/src/c++11/system_error.cc index 71f5c8b652..b7ac1f869d 100644 --- a/contrib/gcc-5.0/libstdc++-v3/src/c++11/system_error.cc +++ b/contrib/gcc-5.0/libstdc++-v3/src/c++11/system_error.cc @@ -41,6 +41,7 @@ namespace name() const noexcept { return "generic"; } + _GLIBCXX_DEFAULT_ABI_TAG virtual string message(int i) const { @@ -56,6 +57,7 @@ namespace name() const noexcept { return "system"; } + _GLIBCXX_DEFAULT_ABI_TAG virtual string message(int i) const { @@ -111,6 +113,8 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION #endif #if _GLIBCXX_USE_DUAL_ABI +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wabi-tag" // Redefine __sso_string so that we can define and export its members // in terms of the SSO std::string. struct __sso_string @@ -137,6 +141,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION __sso_string(__sso_string&&) noexcept; __sso_string& operator=(__sso_string&&) noexcept; }; +#pragma GCC diagnostic pop __sso_string::__sso_string() : _M_str() { }