From d8f1d01e64e3c6aa23c0904c0b23310a33e418ea Mon Sep 17 00:00:00 2001 From: John Marino Date: Fri, 28 Oct 2011 00:03:08 +0200 Subject: [PATCH] Upgrade GCC from 4.4.6-RELEASE to 4.4.7 snapshot 2011-10-25 --- contrib/gcc-4.4/LAST_UPDATED | 2 +- contrib/gcc-4.4/gcc/BASE-VER | 2 +- contrib/gcc-4.4/gcc/DATESTAMP | 2 +- contrib/gcc-4.4/gcc/c-common.c | 19 +-- contrib/gcc-4.4/gcc/combine.c | 2 +- contrib/gcc-4.4/gcc/config/i386/i386.c | 6 +- contrib/gcc-4.4/gcc/config/i386/i386.md | 102 ++++++++++------ contrib/gcc-4.4/gcc/config/i386/sse.md | 36 ++++-- contrib/gcc-4.4/gcc/cp/call.c | 3 +- contrib/gcc-4.4/gcc/cp/cp-tree.h | 1 + contrib/gcc-4.4/gcc/cp/decl2.c | 38 +++--- contrib/gcc-4.4/gcc/cp/init.c | 63 ++++++++-- contrib/gcc-4.4/gcc/cp/parser.c | 6 + contrib/gcc-4.4/gcc/cp/tree.c | 9 +- contrib/gcc-4.4/gcc/expr.c | 35 ++++-- contrib/gcc-4.4/gcc/final.c | 5 + contrib/gcc-4.4/gcc/gimplify.c | 31 ++++- contrib/gcc-4.4/gcc/haifa-sched.c | 4 +- contrib/gcc-4.4/gcc/reload.c | 9 ++ contrib/gcc-4.4/gcc/reload1.c | 7 ++ contrib/gcc-4.4/gcc/reorg.c | 45 +++---- contrib/gcc-4.4/gcc/sel-sched-ir.c | 116 ++++++++++--------- contrib/gcc-4.4/gcc/sel-sched-ir.h | 2 - contrib/gcc-4.4/gcc/tree-cfgcleanup.c | 2 +- contrib/gcc-4.4/gcc/tree-inline.c | 6 +- contrib/gcc-4.4/gcc/tree-ssa-dom.c | 3 +- contrib/gcc-4.4/gcc/tree-switch-conversion.c | 57 ++++----- contrib/gcc-4.4/gcc/tree-tailcall.c | 8 ++ contrib/gcc-4.4/gcc/tree-vrp.c | 28 +++-- 29 files changed, 415 insertions(+), 234 deletions(-) diff --git a/contrib/gcc-4.4/LAST_UPDATED b/contrib/gcc-4.4/LAST_UPDATED index c625976f00..ccf07bfa22 100644 --- a/contrib/gcc-4.4/LAST_UPDATED +++ b/contrib/gcc-4.4/LAST_UPDATED @@ -1 +1 @@ -Obtained from SVN: tags/gcc_4_4_6_release revision 172579 +Obtained from SVN: branches/gcc-4_4-branch revision 180457 diff --git a/contrib/gcc-4.4/gcc/BASE-VER b/contrib/gcc-4.4/gcc/BASE-VER index b98ff4c483..c966188e11 100644 --- a/contrib/gcc-4.4/gcc/BASE-VER +++ b/contrib/gcc-4.4/gcc/BASE-VER @@ -1 +1 @@ -4.4.6 +4.4.7 diff --git a/contrib/gcc-4.4/gcc/DATESTAMP b/contrib/gcc-4.4/gcc/DATESTAMP index cbd0df9c76..5392057132 100644 --- a/contrib/gcc-4.4/gcc/DATESTAMP +++ b/contrib/gcc-4.4/gcc/DATESTAMP @@ -1 +1 @@ -20110416 +20111025 diff --git a/contrib/gcc-4.4/gcc/c-common.c b/contrib/gcc-4.4/gcc/c-common.c index 5f4079054a..2d62a1cc1b 100644 --- a/contrib/gcc-4.4/gcc/c-common.c +++ b/contrib/gcc-4.4/gcc/c-common.c @@ -3430,13 +3430,18 @@ c_common_truthvalue_conversion (location_t location, tree expr) TREE_OPERAND (expr, 0)); case COND_EXPR: - /* Distribute the conversion into the arms of a COND_EXPR. */ - return fold_build3 (COND_EXPR, truthvalue_type_node, - TREE_OPERAND (expr, 0), - c_common_truthvalue_conversion (location, - TREE_OPERAND (expr, 1)), - c_common_truthvalue_conversion (location, - TREE_OPERAND (expr, 2))); + { + tree op1 = TREE_OPERAND (expr, 1); + tree op2 = TREE_OPERAND (expr, 2); + /* In C++ one of the arms might have void type if it is throw. */ + if (!VOID_TYPE_P (TREE_TYPE (op1))) + op1 = c_common_truthvalue_conversion (location, op1); + if (!VOID_TYPE_P (TREE_TYPE (op2))) + op2 = c_common_truthvalue_conversion (location, op2); + /* Distribute the conversion into the arms of a COND_EXPR. */ + return fold_build3 (COND_EXPR, truthvalue_type_node, + TREE_OPERAND (expr, 0), op1, op2); + } CASE_CONVERT: /* Don't cancel the effect of a CONVERT_EXPR from a REFERENCE_TYPE, diff --git a/contrib/gcc-4.4/gcc/combine.c b/contrib/gcc-4.4/gcc/combine.c index 42a465ecc5..d1ee51c848 100644 --- a/contrib/gcc-4.4/gcc/combine.c +++ b/contrib/gcc-4.4/gcc/combine.c @@ -4983,7 +4983,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest) { /* Try to simplify the expression further. */ rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1)); - temp = combine_simplify_rtx (tor, mode, in_dest); + temp = combine_simplify_rtx (tor, VOIDmode, in_dest); /* If we could, great. If not, do not go ahead with the IOR replacement, since PLUS appears in many special purpose diff --git a/contrib/gcc-4.4/gcc/config/i386/i386.c b/contrib/gcc-4.4/gcc/config/i386/i386.c index f0ab2968e2..101374934a 100644 --- a/contrib/gcc-4.4/gcc/config/i386/i386.c +++ b/contrib/gcc-4.4/gcc/config/i386/i386.c @@ -13645,11 +13645,15 @@ ix86_match_ccmode (rtx insn, enum machine_mode req_mode) if (req_mode == CCZmode) return 0; /* FALLTHRU */ + case CCZmode: + break; + case CCAmode: case CCCmode: case CCOmode: case CCSmode: - case CCZmode: + if (set_mode != req_mode) + return 0; break; default: diff --git a/contrib/gcc-4.4/gcc/config/i386/i386.md b/contrib/gcc-4.4/gcc/config/i386/i386.md index 82f7ecbb5a..1de7b3a3a5 100644 --- a/contrib/gcc-4.4/gcc/config/i386/i386.md +++ b/contrib/gcc-4.4/gcc/config/i386/i386.md @@ -19018,7 +19018,8 @@ (set (match_operand:DI 1 "register_operand" "=S") (plus:DI (match_dup 3) (const_int 8)))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[SI_REG] || fixed_regs[DI_REG])" "movsq" [(set_attr "type" "str") (set_attr "mode" "DI") @@ -19033,7 +19034,8 @@ (set (match_operand:SI 1 "register_operand" "=S") (plus:SI (match_dup 3) (const_int 4)))] - "!TARGET_64BIT" + "!TARGET_64BIT + && !(fixed_regs[SI_REG] || fixed_regs[DI_REG])" "movs{l|d}" [(set_attr "type" "str") (set_attr "mode" "SI") @@ -19048,7 +19050,8 @@ (set (match_operand:DI 1 "register_operand" "=S") (plus:DI (match_dup 3) (const_int 4)))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[SI_REG] || fixed_regs[DI_REG])" "movs{l|d}" [(set_attr "type" "str") (set_attr "mode" "SI") @@ -19063,7 +19066,8 @@ (set (match_operand:SI 1 "register_operand" "=S") (plus:SI (match_dup 3) (const_int 2)))] - "!TARGET_64BIT" + "!TARGET_64BIT + && !(fixed_regs[SI_REG] || fixed_regs[DI_REG])" "movsw" [(set_attr "type" "str") (set_attr "memory" "both") @@ -19078,7 +19082,8 @@ (set (match_operand:DI 1 "register_operand" "=S") (plus:DI (match_dup 3) (const_int 2)))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[SI_REG] || fixed_regs[DI_REG])" "movsw" [(set_attr "type" "str") (set_attr "memory" "both") @@ -19093,7 +19098,8 @@ (set (match_operand:SI 1 "register_operand" "=S") (plus:SI (match_dup 3) (const_int 1)))] - "!TARGET_64BIT" + "!TARGET_64BIT + && !(fixed_regs[SI_REG] || fixed_regs[DI_REG])" "movsb" [(set_attr "type" "str") (set_attr "memory" "both") @@ -19108,7 +19114,8 @@ (set (match_operand:DI 1 "register_operand" "=S") (plus:DI (match_dup 3) (const_int 1)))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[SI_REG] || fixed_regs[DI_REG])" "movsb" [(set_attr "type" "str") (set_attr "memory" "both") @@ -19138,7 +19145,8 @@ (set (mem:BLK (match_dup 3)) (mem:BLK (match_dup 4))) (use (match_dup 5))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[CX_REG] || fixed_regs[SI_REG] || fixed_regs[DI_REG])" "rep{%;} movsq" [(set_attr "type" "str") (set_attr "prefix_rep" "1") @@ -19157,7 +19165,8 @@ (set (mem:BLK (match_dup 3)) (mem:BLK (match_dup 4))) (use (match_dup 5))] - "!TARGET_64BIT" + "!TARGET_64BIT + && !(fixed_regs[CX_REG] || fixed_regs[SI_REG] || fixed_regs[DI_REG])" "rep{%;} movs{l|d}" [(set_attr "type" "str") (set_attr "prefix_rep" "1") @@ -19176,7 +19185,8 @@ (set (mem:BLK (match_dup 3)) (mem:BLK (match_dup 4))) (use (match_dup 5))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[CX_REG] || fixed_regs[SI_REG] || fixed_regs[DI_REG])" "rep{%;} movs{l|d}" [(set_attr "type" "str") (set_attr "prefix_rep" "1") @@ -19193,7 +19203,8 @@ (set (mem:BLK (match_dup 3)) (mem:BLK (match_dup 4))) (use (match_dup 5))] - "!TARGET_64BIT" + "!TARGET_64BIT + && !(fixed_regs[CX_REG] || fixed_regs[SI_REG] || fixed_regs[DI_REG])" "rep{%;} movsb" [(set_attr "type" "str") (set_attr "prefix_rep" "1") @@ -19210,7 +19221,8 @@ (set (mem:BLK (match_dup 3)) (mem:BLK (match_dup 4))) (use (match_dup 5))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[CX_REG] || fixed_regs[SI_REG] || fixed_regs[DI_REG])" "rep{%;} movsb" [(set_attr "type" "str") (set_attr "prefix_rep" "1") @@ -19270,7 +19282,9 @@ operands[3] = gen_rtx_PLUS (Pmode, operands[0], GEN_INT (GET_MODE_SIZE (GET_MODE (operands[2])))); - if (TARGET_SINGLE_STRINGOP || optimize_insn_for_size_p ()) + /* Can't use this if the user has appropriated eax or edi. */ + if ((TARGET_SINGLE_STRINGOP || optimize_insn_for_size_p ()) + && !(fixed_regs[AX_REG] || fixed_regs[DI_REG])) { emit_insn (gen_strset_singleop (operands[0], operands[1], operands[2], operands[3])); @@ -19292,7 +19306,8 @@ (set (match_operand:DI 0 "register_operand" "=D") (plus:DI (match_dup 1) (const_int 8)))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[AX_REG] || fixed_regs[DI_REG])" "stosq" [(set_attr "type" "str") (set_attr "memory" "store") @@ -19304,7 +19319,8 @@ (set (match_operand:SI 0 "register_operand" "=D") (plus:SI (match_dup 1) (const_int 4)))] - "!TARGET_64BIT" + "!TARGET_64BIT + && !(fixed_regs[AX_REG] || fixed_regs[DI_REG])" "stos{l|d}" [(set_attr "type" "str") (set_attr "memory" "store") @@ -19316,7 +19332,8 @@ (set (match_operand:DI 0 "register_operand" "=D") (plus:DI (match_dup 1) (const_int 4)))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[AX_REG] || fixed_regs[DI_REG])" "stos{l|d}" [(set_attr "type" "str") (set_attr "memory" "store") @@ -19328,7 +19345,8 @@ (set (match_operand:SI 0 "register_operand" "=D") (plus:SI (match_dup 1) (const_int 2)))] - "!TARGET_64BIT" + "!TARGET_64BIT + && !(fixed_regs[AX_REG] || fixed_regs[DI_REG])" "stosw" [(set_attr "type" "str") (set_attr "memory" "store") @@ -19340,7 +19358,8 @@ (set (match_operand:DI 0 "register_operand" "=D") (plus:DI (match_dup 1) (const_int 2)))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[AX_REG] || fixed_regs[DI_REG])" "stosw" [(set_attr "type" "str") (set_attr "memory" "store") @@ -19352,7 +19371,8 @@ (set (match_operand:SI 0 "register_operand" "=D") (plus:SI (match_dup 1) (const_int 1)))] - "!TARGET_64BIT" + "!TARGET_64BIT + && !(fixed_regs[AX_REG] || fixed_regs[DI_REG])" "stosb" [(set_attr "type" "str") (set_attr "memory" "store") @@ -19364,7 +19384,8 @@ (set (match_operand:DI 0 "register_operand" "=D") (plus:DI (match_dup 1) (const_int 1)))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[AX_REG] || fixed_regs[DI_REG])" "stosb" [(set_attr "type" "str") (set_attr "memory" "store") @@ -19390,7 +19411,8 @@ (const_int 0)) (use (match_operand:DI 2 "register_operand" "a")) (use (match_dup 4))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])" "rep{%;} stosq" [(set_attr "type" "str") (set_attr "prefix_rep" "1") @@ -19407,7 +19429,8 @@ (const_int 0)) (use (match_operand:SI 2 "register_operand" "a")) (use (match_dup 4))] - "!TARGET_64BIT" + "!TARGET_64BIT + && !(fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])" "rep{%;} stos{l|d}" [(set_attr "type" "str") (set_attr "prefix_rep" "1") @@ -19424,7 +19447,8 @@ (const_int 0)) (use (match_operand:SI 2 "register_operand" "a")) (use (match_dup 4))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])" "rep{%;} stos{l|d}" [(set_attr "type" "str") (set_attr "prefix_rep" "1") @@ -19440,7 +19464,8 @@ (const_int 0)) (use (match_operand:QI 2 "register_operand" "a")) (use (match_dup 4))] - "!TARGET_64BIT" + "!TARGET_64BIT + && !(fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])" "rep{%;} stosb" [(set_attr "type" "str") (set_attr "prefix_rep" "1") @@ -19456,7 +19481,8 @@ (const_int 0)) (use (match_operand:QI 2 "register_operand" "a")) (use (match_dup 4))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])" "rep{%;} stosb" [(set_attr "type" "str") (set_attr "prefix_rep" "1") @@ -19476,8 +19502,8 @@ if (optimize_insn_for_size_p () && !TARGET_INLINE_ALL_STRINGOPS) FAIL; - /* Can't use this if the user has appropriated esi or edi. */ - if (fixed_regs[SI_REG] || fixed_regs[DI_REG]) + /* Can't use this if the user has appropriated ecx, esi or edi. */ + if (fixed_regs[CX_REG] || fixed_regs[SI_REG] || fixed_regs[DI_REG]) FAIL; out = operands[0]; @@ -19568,7 +19594,8 @@ (clobber (match_operand:SI 0 "register_operand" "=S")) (clobber (match_operand:SI 1 "register_operand" "=D")) (clobber (match_operand:SI 2 "register_operand" "=c"))] - "!TARGET_64BIT" + "!TARGET_64BIT + && !(fixed_regs[CX_REG] || fixed_regs[SI_REG] || fixed_regs[DI_REG])" "repz{%;} cmpsb" [(set_attr "type" "str") (set_attr "mode" "QI") @@ -19583,7 +19610,8 @@ (clobber (match_operand:DI 0 "register_operand" "=S")) (clobber (match_operand:DI 1 "register_operand" "=D")) (clobber (match_operand:DI 2 "register_operand" "=c"))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[CX_REG] || fixed_regs[SI_REG] || fixed_regs[DI_REG])" "repz{%;} cmpsb" [(set_attr "type" "str") (set_attr "mode" "QI") @@ -19618,7 +19646,8 @@ (clobber (match_operand:SI 0 "register_operand" "=S")) (clobber (match_operand:SI 1 "register_operand" "=D")) (clobber (match_operand:SI 2 "register_operand" "=c"))] - "!TARGET_64BIT" + "!TARGET_64BIT + && !(fixed_regs[CX_REG] || fixed_regs[SI_REG] || fixed_regs[DI_REG])" "repz{%;} cmpsb" [(set_attr "type" "str") (set_attr "mode" "QI") @@ -19636,7 +19665,8 @@ (clobber (match_operand:DI 0 "register_operand" "=S")) (clobber (match_operand:DI 1 "register_operand" "=D")) (clobber (match_operand:DI 2 "register_operand" "=c"))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[CX_REG] || fixed_regs[SI_REG] || fixed_regs[DI_REG])" "repz{%;} cmpsb" [(set_attr "type" "str") (set_attr "mode" "QI") @@ -19647,7 +19677,7 @@ (unspec:SI [(match_operand:BLK 1 "general_operand" "") (match_operand:QI 2 "immediate_operand" "") (match_operand 3 "immediate_operand" "")] UNSPEC_SCAS))] - "" + "!TARGET_64BIT" { if (ix86_expand_strlen (operands[0], operands[1], operands[2], operands[3])) DONE; @@ -19660,7 +19690,7 @@ (unspec:DI [(match_operand:BLK 1 "general_operand" "") (match_operand:QI 2 "immediate_operand" "") (match_operand 3 "immediate_operand" "")] UNSPEC_SCAS))] - "" + "TARGET_64BIT" { if (ix86_expand_strlen (operands[0], operands[1], operands[2], operands[3])) DONE; @@ -19683,7 +19713,8 @@ (match_operand:SI 4 "register_operand" "0")] UNSPEC_SCAS)) (clobber (match_operand:SI 1 "register_operand" "=D")) (clobber (reg:CC FLAGS_REG))] - "!TARGET_64BIT" + "!TARGET_64BIT + && !(fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])" "repnz{%;} scasb" [(set_attr "type" "str") (set_attr "mode" "QI") @@ -19697,7 +19728,8 @@ (match_operand:DI 4 "register_operand" "0")] UNSPEC_SCAS)) (clobber (match_operand:DI 1 "register_operand" "=D")) (clobber (reg:CC FLAGS_REG))] - "TARGET_64BIT" + "TARGET_64BIT + && !(fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])" "repnz{%;} scasb" [(set_attr "type" "str") (set_attr "mode" "QI") diff --git a/contrib/gcc-4.4/gcc/config/i386/sse.md b/contrib/gcc-4.4/gcc/config/i386/sse.md index 83be3cc671..afe2b700a1 100644 --- a/contrib/gcc-4.4/gcc/config/i386/sse.md +++ b/contrib/gcc-4.4/gcc/config/i386/sse.md @@ -3655,7 +3655,16 @@ (match_operand:SI 3 "const_0_to_255_operand" "n")] UNSPEC_INSERTPS))] "TARGET_AVX" - "vinsertps\t{%3, %2, %1, %0|%0, %1, %2, %3}"; +{ + if (MEM_P (operands[2])) + { + unsigned count_s = INTVAL (operands[3]) >> 6; + if (count_s) + operands[3] = GEN_INT (INTVAL (operands[3]) & 0x3f); + operands[2] = adjust_address_nv (operands[2], SFmode, count_s * 4); + } + return "vinsertps\t{%3, %2, %1, %0|%0, %1, %2, %3}"; +} [(set_attr "type" "sselog") (set_attr "prefix" "vex") (set_attr "mode" "V4SF")]) @@ -3667,7 +3676,16 @@ (match_operand:SI 3 "const_0_to_255_operand" "n")] UNSPEC_INSERTPS))] "TARGET_SSE4_1" - "insertps\t{%3, %2, %0|%0, %2, %3}"; +{ + if (MEM_P (operands[2])) + { + unsigned count_s = INTVAL (operands[3]) >> 6; + if (count_s) + operands[3] = GEN_INT (INTVAL (operands[3]) & 0x3f); + operands[2] = adjust_address_nv (operands[2], SFmode, count_s * 4); + } + return "insertps\t{%3, %2, %0|%0, %2, %3}"; +} [(set_attr "type" "sselog") (set_attr "prefix_extra" "1") (set_attr "mode" "V4SF")]) @@ -4384,22 +4402,21 @@ (set_attr "mode" "V1DF,V2DF,DF,DF,DF")]) (define_insn "sse2_loadhpd" - [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,x,o,o,o") + [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,o,o,o") (vec_concat:V2DF (vec_select:DF - (match_operand:V2DF 1 "nonimmediate_operand" " 0,0,x,0,0,0") + (match_operand:V2DF 1 "nonimmediate_operand" " 0,0,0,0,0") (parallel [(const_int 0)])) - (match_operand:DF 2 "nonimmediate_operand" " m,x,0,x,*f,r")))] + (match_operand:DF 2 "nonimmediate_operand" " m,x,x,*f,r")))] "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ movhpd\t{%2, %0|%0, %2} unpcklpd\t{%2, %0|%0, %2} - shufpd\t{$1, %1, %0|%0, %1, 1} # # #" - [(set_attr "type" "ssemov,sselog,sselog,ssemov,fmov,imov") - (set_attr "mode" "V1DF,V2DF,V2DF,DF,DF,DF")]) + [(set_attr "type" "ssemov,sselog,ssemov,fmov,imov") + (set_attr "mode" "V1DF,V2DF,DF,DF,DF")]) (define_split [(set (match_operand:V2DF 0 "memory_operand" "") @@ -11657,8 +11674,7 @@ [(set (match_operand:AVXMODEF2P 0 "register_operand" "=x") (unspec:AVXMODEF2P [(match_operand:AVXMODEF2P 1 "memory_operand" "m") - (match_operand: 2 "register_operand" "x") - (match_dup 0)] + (match_operand: 2 "register_operand" "x")] UNSPEC_MASKLOAD))] "TARGET_AVX" "vmaskmovp\t{%1, %2, %0|%0, %2, %1}" diff --git a/contrib/gcc-4.4/gcc/cp/call.c b/contrib/gcc-4.4/gcc/cp/call.c index bc45868401..7bfd59d736 100644 --- a/contrib/gcc-4.4/gcc/cp/call.c +++ b/contrib/gcc-4.4/gcc/cp/call.c @@ -145,7 +145,6 @@ static tree convert_like_real (conversion *, tree, tree, int, int, bool, bool, tsubst_flags_t); static void op_error (enum tree_code, enum tree_code, tree, tree, tree, const char *); -static tree build_object_call (tree, tree, tsubst_flags_t); static tree resolve_args (tree); static struct z_candidate *build_user_type_conversion_1 (tree, tree, int); static void print_z_candidate (const char *, struct z_candidate *); @@ -3194,7 +3193,7 @@ build_operator_new_call (tree fnname, tree args, return build_over_call (cand, LOOKUP_NORMAL, tf_warning_or_error); } -static tree +tree build_object_call (tree obj, tree args, tsubst_flags_t complain) { struct z_candidate *candidates = 0, *cand; diff --git a/contrib/gcc-4.4/gcc/cp/cp-tree.h b/contrib/gcc-4.4/gcc/cp/cp-tree.h index 3a3f5f5cab..7b244026a5 100644 --- a/contrib/gcc-4.4/gcc/cp/cp-tree.h +++ b/contrib/gcc-4.4/gcc/cp/cp-tree.h @@ -4193,6 +4193,7 @@ extern tree build_new_function_call (tree, tree, bool, tsubst_flags_t); extern tree build_operator_new_call (tree, tree, tree *, tree *, tree *); +extern tree build_object_call (tree, tree, tsubst_flags_t); extern tree build_new_method_call (tree, tree, tree, tree, int, tree *, tsubst_flags_t); extern tree build_special_member_call (tree, tree, tree, tree, int, diff --git a/contrib/gcc-4.4/gcc/cp/decl2.c b/contrib/gcc-4.4/gcc/cp/decl2.c index ddd219533d..7bfe773701 100644 --- a/contrib/gcc-4.4/gcc/cp/decl2.c +++ b/contrib/gcc-4.4/gcc/cp/decl2.c @@ -3725,9 +3725,12 @@ build_offset_ref_call_from_tree (tree fn, tree args) because we depend on the form of FN. */ args = build_non_dependent_args (args); object = build_non_dependent_expr (object); - if (TREE_CODE (fn) == DOTSTAR_EXPR) - object = cp_build_unary_op (ADDR_EXPR, object, 0, tf_warning_or_error); - args = tree_cons (NULL_TREE, object, args); + if (TREE_CODE (TREE_TYPE (fn)) == METHOD_TYPE) + { + if (TREE_CODE (fn) == DOTSTAR_EXPR) + object = cp_build_unary_op (ADDR_EXPR, object, 0, tf_warning_or_error); + args = tree_cons (NULL_TREE, object, args); + } /* Now that the arguments are done, transform FN. */ fn = build_non_dependent_expr (fn); } @@ -3747,7 +3750,10 @@ build_offset_ref_call_from_tree (tree fn, tree args) args = tree_cons (NULL_TREE, object_addr, args); } - expr = cp_build_function_call (fn, args, tf_warning_or_error); + if (CLASS_TYPE_P (TREE_TYPE (fn))) + expr = build_object_call (fn, args, tf_warning_or_error); + else + expr = cp_build_function_call (fn, args, tf_warning_or_error); if (processing_template_decl && expr != error_mark_node) return build_min_non_dep_call_list (expr, orig_fn, orig_args); return expr; @@ -3795,8 +3801,6 @@ possibly_inlined_p (tree decl) void mark_used (tree decl) { - HOST_WIDE_INT saved_processing_template_decl = 0; - /* If DECL is a BASELINK for a single function, then treat it just like the DECL for the function. Otherwise, if the BASELINK is for an overloaded function, we don't know which function was @@ -3819,9 +3823,6 @@ mark_used (tree decl) error ("used here"); return; } - /* If we don't need a value, then we don't need to synthesize DECL. */ - if (skip_evaluation) - return; /* If within finish_function, defer the rest until that function finishes, otherwise it might recurse. */ @@ -3835,9 +3836,10 @@ mark_used (tree decl) DECL. However, if DECL is a static data member initialized with a constant, we need the value right now because a reference to such a data member is not value-dependent. */ - if (TREE_CODE (decl) == VAR_DECL - && DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl) - && DECL_CLASS_SCOPE_P (decl)) + if (DECL_INTEGRAL_CONSTANT_VAR_P (decl) + && !DECL_INITIAL (decl) + && DECL_LANG_SPECIFIC (decl) + && DECL_TEMPLATE_INSTANTIATION (decl)) { /* Don't try to instantiate members of dependent types. We cannot just use dependent_type_p here because this function @@ -3847,12 +3849,14 @@ mark_used (tree decl) if (CLASSTYPE_TEMPLATE_INFO ((DECL_CONTEXT (decl))) && uses_template_parms (CLASSTYPE_TI_ARGS (DECL_CONTEXT (decl)))) return; - /* Pretend that we are not in a template, even if we are, so - that the static data member initializer will be processed. */ - saved_processing_template_decl = processing_template_decl; - processing_template_decl = 0; + instantiate_decl (decl, /*defer_ok=*/false, + /*expl_inst_class_mem_p=*/false); } + /* If we don't need a value, then we don't need to synthesize DECL. */ + if (skip_evaluation) + return; + if (processing_template_decl) return; @@ -3912,8 +3916,6 @@ mark_used (tree decl) need. Therefore, we always try to defer instantiation. */ instantiate_decl (decl, /*defer_ok=*/true, /*expl_inst_class_mem_p=*/false); - - processing_template_decl = saved_processing_template_decl; } /* Given function PARM_DECL PARM, return its index in the function's list diff --git a/contrib/gcc-4.4/gcc/cp/init.c b/contrib/gcc-4.4/gcc/cp/init.c index a06309b3f5..d219a4f176 100644 --- a/contrib/gcc-4.4/gcc/cp/init.c +++ b/contrib/gcc-4.4/gcc/cp/init.c @@ -143,10 +143,13 @@ initialize_vtbl_ptrs (tree addr) is the number of elements in the array. If STATIC_STORAGE_P is TRUE, initializers are only generated for entities for which zero-initialization does not simply mean filling the storage with - zero bytes. */ + zero bytes. FIELD_SIZE, if non-NULL, is the bit size of the field, + subfields with bit positions at or above that bit size shouldn't + be added. */ -tree -build_zero_init (tree type, tree nelts, bool static_storage_p) +static tree +build_zero_init_1 (tree type, tree nelts, bool static_storage_p, + tree field_size) { tree init = NULL_TREE; @@ -191,15 +194,32 @@ build_zero_init (tree type, tree nelts, bool static_storage_p) if (TREE_CODE (field) != FIELD_DECL) continue; + /* Don't add virtual bases for base classes if they are beyond + the size of the current field, that means it is present + somewhere else in the object. */ + if (field_size) + { + tree bitpos = bit_position (field); + if (TREE_CODE (bitpos) == INTEGER_CST + && !tree_int_cst_lt (bitpos, field_size)) + continue; + } + /* Note that for class types there will be FIELD_DECLs corresponding to base classes as well. Thus, iterating over TYPE_FIELDs will result in correct initialization of all of the subobjects. */ if (!static_storage_p || !zero_init_p (TREE_TYPE (field))) { - tree value = build_zero_init (TREE_TYPE (field), - /*nelts=*/NULL_TREE, - static_storage_p); + tree new_field_size + = (DECL_FIELD_IS_BASE (field) + && DECL_SIZE (field) + && TREE_CODE (DECL_SIZE (field)) == INTEGER_CST) + ? DECL_SIZE (field) : NULL_TREE; + tree value = build_zero_init_1 (TREE_TYPE (field), + /*nelts=*/NULL_TREE, + static_storage_p, + new_field_size); if (value) CONSTRUCTOR_APPEND_ELT(v, field, value); } @@ -246,9 +266,9 @@ build_zero_init (tree type, tree nelts, bool static_storage_p) ce->index = build2 (RANGE_EXPR, sizetype, size_zero_node, max_index); - ce->value = build_zero_init (TREE_TYPE (type), - /*nelts=*/NULL_TREE, - static_storage_p); + ce->value = build_zero_init_1 (TREE_TYPE (type), + /*nelts=*/NULL_TREE, + static_storage_p, NULL_TREE); } /* Build a constructor to contain the initializations. */ @@ -266,6 +286,24 @@ build_zero_init (tree type, tree nelts, bool static_storage_p) return init; } +/* Return an expression for the zero-initialization of an object with + type T. This expression will either be a constant (in the case + that T is a scalar), or a CONSTRUCTOR (in the case that T is an + aggregate), or NULL (in the case that T does not require + initialization). In either case, the value can be used as + DECL_INITIAL for a decl of the indicated TYPE; it is a valid static + initializer. If NELTS is non-NULL, and TYPE is an ARRAY_TYPE, NELTS + is the number of elements in the array. If STATIC_STORAGE_P is + TRUE, initializers are only generated for entities for which + zero-initialization does not simply mean filling the storage with + zero bytes. */ + +tree +build_zero_init (tree type, tree nelts, bool static_storage_p) +{ + return build_zero_init_1 (type, nelts, static_storage_p, NULL_TREE); +} + /* Return a suitable initializer for value-initializing an object of type TYPE, as described in [dcl.init]. */ @@ -1405,7 +1443,12 @@ expand_aggr_init_1 (tree binfo, tree true_exp, tree exp, tree init, int flags, zero out the object first. */ else if (TYPE_NEEDS_CONSTRUCTING (type)) { - init = build_zero_init (type, NULL_TREE, /*static_storage_p=*/false); + tree field_size = NULL_TREE; + if (exp != true_exp && CLASSTYPE_AS_BASE (type) != type) + /* Don't clobber already initialized virtual bases. */ + field_size = TYPE_SIZE (CLASSTYPE_AS_BASE (type)); + init = build_zero_init_1 (type, NULL_TREE, /*static_storage_p=*/false, + field_size); init = build2 (INIT_EXPR, type, exp, init); finish_expr_stmt (init); /* And then call the constructor. */ diff --git a/contrib/gcc-4.4/gcc/cp/parser.c b/contrib/gcc-4.4/gcc/cp/parser.c index 64e7329641..f219b1b592 100644 --- a/contrib/gcc-4.4/gcc/cp/parser.c +++ b/contrib/gcc-4.4/gcc/cp/parser.c @@ -3274,6 +3274,12 @@ cp_parser_primary_expression (cp_parser *parser, `&A::B' might be a pointer-to-member, but `&(A::B)' is not. */ finish_parenthesized_expr (expr); + /* DR 705: Wrapping an unqualified name in parentheses + suppresses arg-dependent lookup. We want to pass back + CP_ID_KIND_QUALIFIED for suppressing vtable lookup + (c++/37862), but none of the others. */ + if (*idk != CP_ID_KIND_QUALIFIED) + *idk = CP_ID_KIND_NONE; } /* The `>' token might be the end of a template-id or template-parameter-list now. */ diff --git a/contrib/gcc-4.4/gcc/cp/tree.c b/contrib/gcc-4.4/gcc/cp/tree.c index ec069b0ca2..01f4b6a031 100644 --- a/contrib/gcc-4.4/gcc/cp/tree.c +++ b/contrib/gcc-4.4/gcc/cp/tree.c @@ -1536,7 +1536,11 @@ bot_manip (tree* tp, int* walk_subtrees, void* data) tree u; if (TREE_CODE (TREE_OPERAND (t, 1)) == AGGR_INIT_EXPR) - u = build_cplus_new (TREE_TYPE (t), TREE_OPERAND (t, 1)); + { + u = build_cplus_new (TREE_TYPE (t), TREE_OPERAND (t, 1)); + if (AGGR_INIT_ZERO_FIRST (TREE_OPERAND (t, 1))) + AGGR_INIT_ZERO_FIRST (TREE_OPERAND (u, 1)) = true; + } else u = build_target_expr_with_type (TREE_OPERAND (t, 1), TREE_TYPE (t)); @@ -2663,7 +2667,8 @@ stabilize_expr (tree exp, tree* initp) if (!TREE_SIDE_EFFECTS (exp)) init_expr = NULL_TREE; else if (!real_lvalue_p (exp) - || !TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (exp))) + || (!TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (exp)) + && !TYPE_HAS_NONTRIVIAL_DESTRUCTOR (TREE_TYPE (exp)))) { init_expr = get_target_expr (exp); exp = TARGET_EXPR_SLOT (init_expr); diff --git a/contrib/gcc-4.4/gcc/expr.c b/contrib/gcc-4.4/gcc/expr.c index fd67889056..8cc28fb887 100644 --- a/contrib/gcc-4.4/gcc/expr.c +++ b/contrib/gcc-4.4/gcc/expr.c @@ -9113,10 +9113,12 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode, /* If temp is constant, we can just compute the result. */ if (GET_CODE (temp) == CONST_INT) { - if (INTVAL (temp) != 0) - emit_move_insn (target, const1_rtx); - else + if (INTVAL (temp) == 0) emit_move_insn (target, const0_rtx); + else if (TYPE_PRECISION (type) == 1 && !TYPE_UNSIGNED (type)) + emit_move_insn (target, constm1_rtx); + else + emit_move_insn (target, const1_rtx); return target; } @@ -9133,7 +9135,9 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode, op1 = gen_label_rtx (); emit_cmp_and_jump_insns (temp, const0_rtx, EQ, NULL_RTX, GET_MODE (temp), unsignedp, op1); - emit_move_insn (temp, const1_rtx); + emit_move_insn (temp, + TYPE_PRECISION (type) == 1 && !TYPE_UNSIGNED (type) + ? constm1_rtx : const1_rtx); emit_label (op1); return temp; } @@ -9162,7 +9166,9 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode, jumpifnot (exp, op1, -1); if (target) - emit_move_insn (target, const1_rtx); + emit_move_insn (target, + TYPE_PRECISION (type) == 1 && !TYPE_UNSIGNED (type) + ? constm1_rtx : const1_rtx); emit_label (op1); return ignore ? const0_rtx : target; @@ -9757,7 +9763,7 @@ do_store_flag (tree exp, rtx target, enum machine_mode mode, int only_cheap) rtx op0, op1; enum insn_code icode; rtx subtarget = target; - rtx result, label; + rtx result, label, trueval = const1_rtx; /* If this is a TRUTH_NOT_EXPR, set a flag indicating we must invert the result at the end. We can't simply invert the test since it would @@ -9887,7 +9893,9 @@ do_store_flag (tree exp, rtx target, enum machine_mode mode, int only_cheap) if ((code == NE || code == EQ) && TREE_CODE (arg0) == BIT_AND_EXPR && integer_zerop (arg1) - && integer_pow2p (TREE_OPERAND (arg0, 1))) + && integer_pow2p (TREE_OPERAND (arg0, 1)) + && (TYPE_PRECISION (TREE_TYPE (exp)) != 1 + || TYPE_UNSIGNED (TREE_TYPE (exp)))) { tree type = lang_hooks.types.type_for_mode (mode, unsignedp); return expand_expr (fold_single_bit_test (code == NE ? NE_EXPR : EQ_EXPR, @@ -9939,13 +9947,18 @@ do_store_flag (tree exp, rtx target, enum machine_mode mode, int only_cheap) if (target == 0) target = gen_reg_rtx (mode); + if (TYPE_PRECISION (TREE_TYPE (exp)) == 1 + && !TYPE_UNSIGNED (TREE_TYPE (exp))) + trueval = constm1_rtx; + result = emit_store_flag (target, code, op0, op1, - operand_mode, unsignedp, 1); + operand_mode, unsignedp, + trueval == const1_rtx ? 1 : -1); if (result) { if (invert) - result = expand_binop (mode, xor_optab, result, const1_rtx, + result = expand_binop (mode, xor_optab, result, trueval, result, 0, OPTAB_LIB_WIDEN); return result; } @@ -9955,12 +9968,12 @@ do_store_flag (tree exp, rtx target, enum machine_mode mode, int only_cheap) || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1)) target = gen_reg_rtx (GET_MODE (target)); - emit_move_insn (target, invert ? const0_rtx : const1_rtx); + emit_move_insn (target, invert ? const0_rtx : trueval); label = gen_label_rtx (); do_compare_rtx_and_jump (op0, op1, code, unsignedp, operand_mode, NULL_RTX, NULL_RTX, label, -1); - emit_move_insn (target, invert ? const1_rtx : const0_rtx); + emit_move_insn (target, invert ? trueval : const0_rtx); emit_label (label); return target; diff --git a/contrib/gcc-4.4/gcc/final.c b/contrib/gcc-4.4/gcc/final.c index 1735a73207..c4812eac09 100644 --- a/contrib/gcc-4.4/gcc/final.c +++ b/contrib/gcc-4.4/gcc/final.c @@ -2207,6 +2207,11 @@ final_scan_insn (rtx insn, FILE *file, int optimize ATTRIBUTE_UNUSED, location_t loc; expanded_location expanded; + /* Make sure we flush any queued register saves in case this + clobbers affected registers. */ + if (dwarf2out_do_frame ()) + dwarf2out_frame_debug (insn, false); + /* There's no telling what that did to the condition codes. */ CC_STATUS_INIT; diff --git a/contrib/gcc-4.4/gcc/gimplify.c b/contrib/gcc-4.4/gcc/gimplify.c index 94bae9c83f..19b985fb40 100644 --- a/contrib/gcc-4.4/gcc/gimplify.c +++ b/contrib/gcc-4.4/gcc/gimplify.c @@ -2053,8 +2053,14 @@ gimplify_compound_lval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, ret = MIN (ret, tret); } } + else + { + tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, + is_gimple_reg, fb_rvalue); + ret = MIN (ret, tret); + } - if (!TREE_OPERAND (t, 3)) + if (TREE_OPERAND (t, 3) == NULL_TREE) { tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0))); tree elmt_size = unshare_expr (array_ref_element_size (t)); @@ -2073,11 +2079,17 @@ gimplify_compound_lval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, ret = MIN (ret, tret); } } + else + { + tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p, + is_gimple_reg, fb_rvalue); + ret = MIN (ret, tret); + } } else if (TREE_CODE (t) == COMPONENT_REF) { /* Set the field offset into T and gimplify it. */ - if (!TREE_OPERAND (t, 2)) + if (TREE_OPERAND (t, 2) == NULL_TREE) { tree offset = unshare_expr (component_ref_field_offset (t)); tree field = TREE_OPERAND (t, 1); @@ -2096,6 +2108,12 @@ gimplify_compound_lval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, ret = MIN (ret, tret); } } + else + { + tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, + is_gimple_reg, fb_rvalue); + ret = MIN (ret, tret); + } } } @@ -2570,13 +2588,18 @@ shortcut_cond_r (tree pred, tree *true_label_p, tree *false_label_p) false_label_p); append_to_statement_list (t, &expr); } - else if (TREE_CODE (pred) == COND_EXPR) + else if (TREE_CODE (pred) == COND_EXPR + && !VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (pred, 1))) + && !VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (pred, 2)))) { /* As long as we're messing with gotos, turn if (a ? b : c) into if (a) if (b) goto yes; else goto no; else - if (c) goto yes; else goto no; */ + if (c) goto yes; else goto no; + + Don't do this if one of the arms has void type, which can happen + in C++ when the arm is throw. */ expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (pred, 0), shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p), diff --git a/contrib/gcc-4.4/gcc/haifa-sched.c b/contrib/gcc-4.4/gcc/haifa-sched.c index 09dc233c25..a3931a591b 100644 --- a/contrib/gcc-4.4/gcc/haifa-sched.c +++ b/contrib/gcc-4.4/gcc/haifa-sched.c @@ -3918,7 +3918,9 @@ sched_create_recovery_edges (basic_block first_bb, basic_block rec, else edge_flags = 0; - make_single_succ_edge (rec, second_bb, edge_flags); + make_single_succ_edge (rec, second_bb, edge_flags); + if (dom_info_available_p (CDI_DOMINATORS)) + set_immediate_dominator (CDI_DOMINATORS, rec, first_bb); } /* This function creates recovery code for INSN. If MUTATE_P is nonzero, diff --git a/contrib/gcc-4.4/gcc/reload.c b/contrib/gcc-4.4/gcc/reload.c index d188233e50..eadaa50b33 100644 --- a/contrib/gcc-4.4/gcc/reload.c +++ b/contrib/gcc-4.4/gcc/reload.c @@ -6731,6 +6731,15 @@ find_equiv_reg (rtx goal, rtx insn, enum reg_class rclass, int other, || num > PARAM_VALUE (PARAM_MAX_RELOAD_SEARCH_INSNS)) return 0; + /* Don't reuse register contents from before a setjmp-type + function call; on the second return (from the longjmp) it + might have been clobbered by a later reuse. It doesn't + seem worthwhile to actually go and see if it is actually + reused even if that information would be readily available; + just don't reuse it across the setjmp call. */ + if (CALL_P (p) && find_reg_note (p, REG_SETJMP, NULL_RTX)) + return 0; + if (NONJUMP_INSN_P (p) /* If we don't want spill regs ... */ && (! (reload_reg_p != 0 diff --git a/contrib/gcc-4.4/gcc/reload1.c b/contrib/gcc-4.4/gcc/reload1.c index d579dc5188..070ffeee20 100644 --- a/contrib/gcc-4.4/gcc/reload1.c +++ b/contrib/gcc-4.4/gcc/reload1.c @@ -4428,6 +4428,13 @@ reload_as_needed (int live_known) { AND_COMPL_HARD_REG_SET (reg_reloaded_valid, call_used_reg_set); AND_COMPL_HARD_REG_SET (reg_reloaded_valid, reg_reloaded_call_part_clobbered); + + /* If this is a call to a setjmp-type function, we must not + reuse any reload reg contents across the call; that will + just be clobbered by other uses of the register in later + code, before the longjmp. */ + if (find_reg_note (insn, REG_SETJMP, NULL_RTX)) + CLEAR_HARD_REG_SET (reg_reloaded_valid); } } diff --git a/contrib/gcc-4.4/gcc/reorg.c b/contrib/gcc-4.4/gcc/reorg.c index 0bbe031f8c..2f2b0bac8d 100644 --- a/contrib/gcc-4.4/gcc/reorg.c +++ b/contrib/gcc-4.4/gcc/reorg.c @@ -3453,9 +3453,13 @@ relax_delay_slots (rtx first) We do this by deleting the INSN containing the SEQUENCE, then re-emitting the insns separately, and then deleting the RETURN. This allows the count of the jump target to be properly - decremented. */ + decremented. - /* Clear the from target bit, since these insns are no longer + Note that we need to change the INSN_UID of the re-emitted insns + since it is used to hash the insns for mark_target_live_regs and + the re-emitted insns will no longer be wrapped up in a SEQUENCE. + + Clear the from target bit, since these insns are no longer in delay slots. */ for (i = 0; i < XVECLEN (pat, 0); i++) INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0; @@ -3463,13 +3467,10 @@ relax_delay_slots (rtx first) trial = PREV_INSN (insn); delete_related_insns (insn); gcc_assert (GET_CODE (pat) == SEQUENCE); - after = trial; - for (i = 0; i < XVECLEN (pat, 0); i++) - { - rtx this_insn = XVECEXP (pat, 0, i); - add_insn_after (this_insn, after, NULL); - after = this_insn; - } + add_insn_after (delay_insn, trial, NULL); + after = delay_insn; + for (i = 1; i < XVECLEN (pat, 0); i++) + after = emit_copy_of_insn_after (XVECEXP (pat, 0, i), after); delete_scheduled_jump (delay_insn); continue; } @@ -3498,8 +3499,11 @@ relax_delay_slots (rtx first) } /* If the first insn at TARGET_LABEL is redundant with a previous - insn, redirect the jump to the following insn process again. */ - trial = next_active_insn (target_label); + insn, redirect the jump to the following insn and process again. + We use next_real_insn instead of next_active_insn so we + don't skip USE-markers, or we'll end up with incorrect + liveness info. */ + trial = next_real_insn (target_label); if (trial && GET_CODE (PATTERN (trial)) != SEQUENCE && redundant_insn (trial, insn, 0) && ! can_throw_internal (trial)) @@ -3571,9 +3575,13 @@ relax_delay_slots (rtx first) We do this by deleting the INSN containing the SEQUENCE, then re-emitting the insns separately, and then deleting the jump. This allows the count of the jump target to be properly - decremented. */ + decremented. - /* Clear the from target bit, since these insns are no longer + Note that we need to change the INSN_UID of the re-emitted insns + since it is used to hash the insns for mark_target_live_regs and + the re-emitted insns will no longer be wrapped up in a SEQUENCE. + + Clear the from target bit, since these insns are no longer in delay slots. */ for (i = 0; i < XVECLEN (pat, 0); i++) INSN_FROM_TARGET_P (XVECEXP (pat, 0, i)) = 0; @@ -3581,13 +3589,10 @@ relax_delay_slots (rtx first) trial = PREV_INSN (insn); delete_related_insns (insn); gcc_assert (GET_CODE (pat) == SEQUENCE); - after = trial; - for (i = 0; i < XVECLEN (pat, 0); i++) - { - rtx this_insn = XVECEXP (pat, 0, i); - add_insn_after (this_insn, after, NULL); - after = this_insn; - } + add_insn_after (delay_insn, trial, NULL); + after = delay_insn; + for (i = 1; i < XVECLEN (pat, 0); i++) + after = emit_copy_of_insn_after (XVECEXP (pat, 0, i), after); delete_scheduled_jump (delay_insn); continue; } diff --git a/contrib/gcc-4.4/gcc/sel-sched-ir.c b/contrib/gcc-4.4/gcc/sel-sched-ir.c index a6b73b997d..ab2742fd87 100644 --- a/contrib/gcc-4.4/gcc/sel-sched-ir.c +++ b/contrib/gcc-4.4/gcc/sel-sched-ir.c @@ -152,6 +152,7 @@ static void free_history_vect (VEC (expr_history_def, heap) **); static void move_bb_info (basic_block, basic_block); static void remove_empty_bb (basic_block, bool); +static void sel_merge_blocks (basic_block, basic_block); static void sel_remove_loop_preheader (void); static bool insn_is_the_only_one_in_bb_p (insn_t); @@ -3539,6 +3540,7 @@ static bool maybe_tidy_empty_bb (basic_block bb, bool recompute_toporder_p) { basic_block succ_bb, pred_bb; + VEC (basic_block, heap) *dom_bbs; edge e; edge_iterator ei; bool rescan_p; @@ -3574,6 +3576,7 @@ maybe_tidy_empty_bb (basic_block bb, bool recompute_toporder_p) succ_bb = single_succ (bb); rescan_p = true; pred_bb = NULL; + dom_bbs = NULL; /* Redirect all non-fallthru edges to the next bb. */ while (rescan_p) @@ -3586,6 +3589,12 @@ maybe_tidy_empty_bb (basic_block bb, bool recompute_toporder_p) if (!(e->flags & EDGE_FALLTHRU)) { + /* We will update dominators here only when we'll get + an unreachable block when redirecting, otherwise + sel_redirect_edge_and_branch will take care of it. */ + if (e->dest != bb + && single_pred_p (e->dest)) + VEC_safe_push (basic_block, heap, dom_bbs, e->dest); recompute_toporder_p |= sel_redirect_edge_and_branch (e, succ_bb); rescan_p = true; break; @@ -3593,13 +3602,11 @@ maybe_tidy_empty_bb (basic_block bb, bool recompute_toporder_p) } } - /* If it is possible - merge BB with its predecessor. */ if (can_merge_blocks_p (bb->prev_bb, bb)) sel_merge_blocks (bb->prev_bb, bb); else - /* Otherwise this is a block without fallthru predecessor. - Just delete it. */ { + /* This is a block without fallthru predecessor. Just delete it. */ gcc_assert (pred_bb != NULL); if (in_current_region_p (pred_bb)) @@ -3607,11 +3614,19 @@ maybe_tidy_empty_bb (basic_block bb, bool recompute_toporder_p) remove_empty_bb (bb, true); } + if (!VEC_empty (basic_block, dom_bbs)) + { + VEC_safe_push (basic_block, heap, dom_bbs, succ_bb); + iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false); + VEC_free (basic_block, heap, dom_bbs); + } + if (recompute_toporder_p) sel_recompute_toporder (); #ifdef ENABLE_CHECKING verify_backedges (); + verify_dominators (CDI_DOMINATORS); #endif return true; @@ -4985,16 +5000,23 @@ sel_add_bb (basic_block bb) static void sel_remove_bb (basic_block bb, bool remove_from_cfg_p) { + unsigned idx = bb->index; + gcc_assert (bb != NULL && BB_NOTE_LIST (bb) == NULL_RTX); remove_bb_from_region (bb); return_bb_to_pool (bb); - bitmap_clear_bit (blocks_to_reschedule, bb->index); + bitmap_clear_bit (blocks_to_reschedule, idx); if (remove_from_cfg_p) - delete_and_free_basic_block (bb); + { + basic_block succ = single_succ (bb); + delete_and_free_basic_block (bb); + set_immediate_dominator (CDI_DOMINATORS, succ, + recompute_dominator (CDI_DOMINATORS, succ)); + } - rgn_setup_region (CONTAINING_RGN (bb->index)); + rgn_setup_region (CONTAINING_RGN (idx)); } /* Concatenate info of EMPTY_BB to info of MERGE_BB. */ @@ -5009,50 +5031,6 @@ move_bb_info (basic_block merge_bb, basic_block empty_bb) } -/* Remove an empty basic block EMPTY_BB. When MERGE_UP_P is true, we put - EMPTY_BB's note lists into its predecessor instead of putting them - into the successor. When REMOVE_FROM_CFG_P is true, also remove - the empty block. */ -void -sel_remove_empty_bb (basic_block empty_bb, bool merge_up_p, - bool remove_from_cfg_p) -{ - basic_block merge_bb; - - gcc_assert (sel_bb_empty_p (empty_bb)); - - if (merge_up_p) - { - merge_bb = empty_bb->prev_bb; - gcc_assert (EDGE_COUNT (empty_bb->preds) == 1 - && EDGE_PRED (empty_bb, 0)->src == merge_bb); - } - else - { - edge e; - edge_iterator ei; - - merge_bb = bb_next_bb (empty_bb); - - /* Redirect incoming edges (except fallthrough one) of EMPTY_BB to its - successor block. */ - for (ei = ei_start (empty_bb->preds); - (e = ei_safe_edge (ei)); ) - { - if (! (e->flags & EDGE_FALLTHRU)) - sel_redirect_edge_and_branch (e, merge_bb); - else - ei_next (&ei); - } - - gcc_assert (EDGE_COUNT (empty_bb->succs) == 1 - && EDGE_SUCC (empty_bb, 0)->dest == merge_bb); - } - - move_bb_info (merge_bb, empty_bb); - remove_empty_bb (empty_bb, remove_from_cfg_p); -} - /* Remove EMPTY_BB. If REMOVE_FROM_CFG_P is false, remove EMPTY_BB from region, but keep it in CFG. */ static void @@ -5352,12 +5330,16 @@ sel_create_recovery_block (insn_t orig_insn) } /* Merge basic block B into basic block A. */ -void +static void sel_merge_blocks (basic_block a, basic_block b) { - sel_remove_empty_bb (b, true, false); - merge_blocks (a, b); + gcc_assert (sel_bb_empty_p (b) + && EDGE_COUNT (b->preds) == 1 + && EDGE_PRED (b, 0)->src == b->prev_bb); + move_bb_info (b->prev_bb, b); + remove_empty_bb (b, false); + merge_blocks (a, b); change_loops_latches (b, a); } @@ -5367,11 +5349,15 @@ sel_merge_blocks (basic_block a, basic_block b) void sel_redirect_edge_and_branch_force (edge e, basic_block to) { - basic_block jump_bb, src; + basic_block jump_bb, src, orig_dest = e->dest; int prev_max_uid; rtx jump; - gcc_assert (!sel_bb_empty_p (e->src)); + /* This function is now used only for bookkeeping code creation, where + we'll never get the single pred of orig_dest block and thus will not + hit unreachable blocks when updating dominator info. */ + gcc_assert (!sel_bb_empty_p (e->src) + && !single_pred_p (orig_dest)); src = e->src; prev_max_uid = get_max_uid (); @@ -5389,6 +5375,10 @@ sel_redirect_edge_and_branch_force (edge e, basic_block to) jump = find_new_jump (src, jump_bb, prev_max_uid); if (jump) sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); + set_immediate_dominator (CDI_DOMINATORS, to, + recompute_dominator (CDI_DOMINATORS, to)); + set_immediate_dominator (CDI_DOMINATORS, orig_dest, + recompute_dominator (CDI_DOMINATORS, orig_dest)); } /* A wrapper for redirect_edge_and_branch. Return TRUE if blocks connected by @@ -5397,11 +5387,12 @@ bool sel_redirect_edge_and_branch (edge e, basic_block to) { bool latch_edge_p; - basic_block src; + basic_block src, orig_dest = e->dest; int prev_max_uid; rtx jump; edge redirected; bool recompute_toporder_p = false; + bool maybe_unreachable = single_pred_p (orig_dest); latch_edge_p = (pipelining_p && current_loop_nest @@ -5432,6 +5423,15 @@ sel_redirect_edge_and_branch (edge e, basic_block to) if (jump) sel_init_new_insn (jump, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SIMPLEJUMP); + /* Only update dominator info when we don't have unreachable blocks. + Otherwise we'll update in maybe_tidy_empty_bb. */ + if (!maybe_unreachable) + { + set_immediate_dominator (CDI_DOMINATORS, to, + recompute_dominator (CDI_DOMINATORS, to)); + set_immediate_dominator (CDI_DOMINATORS, orig_dest, + recompute_dominator (CDI_DOMINATORS, orig_dest)); + } return recompute_toporder_p; } @@ -6156,6 +6156,10 @@ sel_remove_loop_preheader (void) if (BB_END (prev_bb) == bb_note (prev_bb)) free_data_sets (prev_bb); } + + set_immediate_dominator (CDI_DOMINATORS, next_bb, + recompute_dominator (CDI_DOMINATORS, + next_bb)); } } VEC_free (basic_block, heap, preheader_blocks); diff --git a/contrib/gcc-4.4/gcc/sel-sched-ir.h b/contrib/gcc-4.4/gcc/sel-sched-ir.h index 8a67003df4..e5e905b820 100644 --- a/contrib/gcc-4.4/gcc/sel-sched-ir.h +++ b/contrib/gcc-4.4/gcc/sel-sched-ir.h @@ -1579,11 +1579,9 @@ extern bool in_same_ebb_p (insn_t, insn_t); extern bool tidy_control_flow (basic_block, bool); extern void free_bb_note_pool (void); -extern void sel_remove_empty_bb (basic_block, bool, bool); extern void purge_empty_blocks (void); extern basic_block sel_split_edge (edge); extern basic_block sel_create_recovery_block (insn_t); -extern void sel_merge_blocks (basic_block, basic_block); extern bool sel_redirect_edge_and_branch (edge, basic_block); extern void sel_redirect_edge_and_branch_force (edge, basic_block); extern void sel_init_pipelining (void); diff --git a/contrib/gcc-4.4/gcc/tree-cfgcleanup.c b/contrib/gcc-4.4/gcc/tree-cfgcleanup.c index a68dc5c590..f0270af49a 100644 --- a/contrib/gcc-4.4/gcc/tree-cfgcleanup.c +++ b/contrib/gcc-4.4/gcc/tree-cfgcleanup.c @@ -496,7 +496,7 @@ split_bbs_on_noreturn_calls (void) BB is present in the cfg. */ if (bb == NULL || bb->index < NUM_FIXED_BLOCKS - || bb->index >= n_basic_blocks + || bb->index >= last_basic_block || BASIC_BLOCK (bb->index) != bb || last_stmt (bb) == stmt || !gimple_call_noreturn_p (stmt)) diff --git a/contrib/gcc-4.4/gcc/tree-inline.c b/contrib/gcc-4.4/gcc/tree-inline.c index 29fef96f77..a581e5c8c7 100644 --- a/contrib/gcc-4.4/gcc/tree-inline.c +++ b/contrib/gcc-4.4/gcc/tree-inline.c @@ -3722,14 +3722,16 @@ copy_tree_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) CONSTRUCTOR_ELTS (*tp)); *tp = new_tree; } + else if (code == STATEMENT_LIST) + /* We used to just abort on STATEMENT_LIST, but we can run into them + with statement-expressions (c++/40975). */ + copy_statement_list (tp); else if (TREE_CODE_CLASS (code) == tcc_type) *walk_subtrees = 0; else if (TREE_CODE_CLASS (code) == tcc_declaration) *walk_subtrees = 0; else if (TREE_CODE_CLASS (code) == tcc_constant) *walk_subtrees = 0; - else - gcc_assert (code != STATEMENT_LIST); return NULL_TREE; } diff --git a/contrib/gcc-4.4/gcc/tree-ssa-dom.c b/contrib/gcc-4.4/gcc/tree-ssa-dom.c index a9fddbe97d..6a34bda9ff 100644 --- a/contrib/gcc-4.4/gcc/tree-ssa-dom.c +++ b/contrib/gcc-4.4/gcc/tree-ssa-dom.c @@ -225,12 +225,11 @@ initialize_hash_element (gimple stmt, tree lhs, { enum tree_code subcode = gimple_assign_rhs_code (stmt); - expr->type = NULL_TREE; - switch (get_gimple_rhs_class (subcode)) { case GIMPLE_SINGLE_RHS: expr->kind = EXPR_SINGLE; + expr->type = TREE_TYPE (gimple_assign_rhs1 (stmt)); expr->ops.single.rhs = gimple_assign_rhs1 (stmt); break; case GIMPLE_UNARY_RHS: diff --git a/contrib/gcc-4.4/gcc/tree-switch-conversion.c b/contrib/gcc-4.4/gcc/tree-switch-conversion.c index b6c9af9ba0..2b89ff6479 100644 --- a/contrib/gcc-4.4/gcc/tree-switch-conversion.c +++ b/contrib/gcc-4.4/gcc/tree-switch-conversion.c @@ -519,7 +519,7 @@ static void build_arrays (gimple swtch) { tree arr_index_type; - tree tidx, sub; + tree tidx, sub, utype; gimple stmt; gimple_stmt_iterator gsi; int i; @@ -527,12 +527,20 @@ build_arrays (gimple swtch) gsi = gsi_for_stmt (swtch); arr_index_type = build_index_type (info.range_size); - tidx = make_rename_temp (arr_index_type, "csti"); - sub = fold_build2 (MINUS_EXPR, TREE_TYPE (info.index_expr), info.index_expr, - fold_convert (TREE_TYPE (info.index_expr), - info.range_min)); - sub = force_gimple_operand_gsi (&gsi, fold_convert (arr_index_type, sub), - false, NULL, true, GSI_SAME_STMT); + + /* Make sure we do not generate arithmetics in a subrange. */ + if (TREE_TYPE (TREE_TYPE (info.index_expr))) + utype = lang_hooks.types.type_for_mode + (TYPE_MODE (TREE_TYPE (TREE_TYPE (info.index_expr))), 1); + else + utype = lang_hooks.types.type_for_mode + (TYPE_MODE (TREE_TYPE (info.index_expr)), 1); + + tidx = make_rename_temp (utype, "csui"); + sub = fold_build2 (MINUS_EXPR, utype, + fold_convert (utype, info.index_expr), + fold_convert (utype, info.range_min)); + sub = force_gimple_operand_gsi (&gsi, sub, false, NULL, true, GSI_SAME_STMT); stmt = gimple_build_assign (tidx, sub); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); @@ -641,10 +649,7 @@ gen_inbound_check (gimple swtch) gimple label1, label2, label3; tree utype; - tree tmp_u; - tree cast; - gimple cast_assign, minus_assign; - tree ulb, minus; + tree tidx; tree bound; gimple cond_stmt; @@ -657,49 +662,27 @@ gen_inbound_check (gimple swtch) gcc_assert (info.default_values); bb0 = gimple_bb (swtch); - /* Make sure we do not generate arithmetics in a subrange. */ - if (TREE_TYPE (TREE_TYPE (info.index_expr))) - utype = lang_hooks.types.type_for_mode - (TYPE_MODE (TREE_TYPE (TREE_TYPE (info.index_expr))), 1); - else - utype = lang_hooks.types.type_for_mode - (TYPE_MODE (TREE_TYPE (info.index_expr)), 1); + tidx = gimple_assign_lhs (info.arr_ref_first); + utype = TREE_TYPE (tidx); /* (end of) block 0 */ gsi = gsi_for_stmt (info.arr_ref_first); - tmp_u = make_rename_temp (utype, "csui"); - - cast = fold_convert (utype, info.index_expr); - cast_assign = gimple_build_assign (tmp_u, cast); - find_new_referenced_vars (cast_assign); - gsi_insert_before (&gsi, cast_assign, GSI_SAME_STMT); - mark_symbols_for_renaming (cast_assign); - - ulb = fold_convert (utype, info.range_min); - minus = fold_build2 (MINUS_EXPR, utype, tmp_u, ulb); - minus = force_gimple_operand_gsi (&gsi, minus, false, NULL, true, - GSI_SAME_STMT); - minus_assign = gimple_build_assign (tmp_u, minus); - find_new_referenced_vars (minus_assign); - gsi_insert_before (&gsi, minus_assign, GSI_SAME_STMT); - mark_symbols_for_renaming (minus_assign); + gsi_next (&gsi); bound = fold_convert (utype, info.range_size); - cond_stmt = gimple_build_cond (LE_EXPR, tmp_u, bound, NULL_TREE, NULL_TREE); + cond_stmt = gimple_build_cond (LE_EXPR, tidx, bound, NULL_TREE, NULL_TREE); find_new_referenced_vars (cond_stmt); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); mark_symbols_for_renaming (cond_stmt); /* block 2 */ - gsi = gsi_for_stmt (info.arr_ref_first); label2 = gimple_build_label (label_decl2); gsi_insert_before (&gsi, label2, GSI_SAME_STMT); last_assign = gen_def_assigns (&gsi); /* block 1 */ - gsi = gsi_for_stmt (info.arr_ref_first); label1 = gimple_build_label (label_decl1); gsi_insert_before (&gsi, label1, GSI_SAME_STMT); diff --git a/contrib/gcc-4.4/gcc/tree-tailcall.c b/contrib/gcc-4.4/gcc/tree-tailcall.c index 9d2513d271..55cdc51042 100644 --- a/contrib/gcc-4.4/gcc/tree-tailcall.c +++ b/contrib/gcc-4.4/gcc/tree-tailcall.c @@ -953,6 +953,14 @@ tree_optimize_tail_calls_1 (bool opt_tailcalls) integer_one_node); } + if (a_acc || m_acc) + { + /* When the tail call elimination using accumulators is performed, + statements adding the accumulated value are inserted at all exits. + This turns all other tail calls to non-tail ones. */ + opt_tailcalls = false; + } + for (; tailcalls; tailcalls = next) { next = tailcalls->next; diff --git a/contrib/gcc-4.4/gcc/tree-vrp.c b/contrib/gcc-4.4/gcc/tree-vrp.c index 797fa427c5..ed31a9892d 100644 --- a/contrib/gcc-4.4/gcc/tree-vrp.c +++ b/contrib/gcc-4.4/gcc/tree-vrp.c @@ -2259,17 +2259,27 @@ extract_range_from_binary_expr (value_range_t *vr, op0 + op1 == 0, so we cannot claim that the sum is in ~[0,0]. Note that we are guaranteed to have vr0.type == vr1.type at this point. */ - if (code == PLUS_EXPR && vr0.type == VR_ANTI_RANGE) + if (vr0.type == VR_ANTI_RANGE) { - set_value_range_to_varying (vr); - return; + if (code == PLUS_EXPR) + { + set_value_range_to_varying (vr); + return; + } + /* For MIN_EXPR and MAX_EXPR with two VR_ANTI_RANGEs, + the resulting VR_ANTI_RANGE is the same - intersection + of the two ranges. */ + min = vrp_int_const_binop (MAX_EXPR, vr0.min, vr1.min); + max = vrp_int_const_binop (MIN_EXPR, vr0.max, vr1.max); + } + else + { + /* For operations that make the resulting range directly + proportional to the original ranges, apply the operation to + the same end of each range. */ + min = vrp_int_const_binop (code, vr0.min, vr1.min); + max = vrp_int_const_binop (code, vr0.max, vr1.max); } - - /* For operations that make the resulting range directly - proportional to the original ranges, apply the operation to - the same end of each range. */ - min = vrp_int_const_binop (code, vr0.min, vr1.min); - max = vrp_int_const_binop (code, vr0.max, vr1.max); } else if (code == MULT_EXPR || code == TRUNC_DIV_EXPR -- 2.41.0