-223641
-Last Changed Date: 2015-05-25 02:16:10 +0200 (Mon, 25 May 2015)
+225979
+Last Changed Date: 2015-07-18 02:16:08 +0200 (Sat, 18 Jul 2015)
acceleration device (ACCEL_COMPILER conditional). */
static rtx
-expand_builtin_acc_on_device (tree exp, rtx target)
+expand_builtin_acc_on_device (tree exp ATTRIBUTE_UNUSED,
+ rtx target ATTRIBUTE_UNUSED)
{
+#ifdef ACCEL_COMPILER
if (!validate_arglist (exp, INTEGER_TYPE, VOID_TYPE))
return NULL_RTX;
/* Return (arg == v1 || arg == v2) ? 1 : 0. */
machine_mode v_mode = TYPE_MODE (TREE_TYPE (arg));
rtx v = expand_normal (arg), v1, v2;
-#ifdef ACCEL_COMPILER
v1 = GEN_INT (GOMP_DEVICE_NOT_HOST);
v2 = GEN_INT (ACCEL_COMPILER_acc_device);
-#else
- v1 = GEN_INT (GOMP_DEVICE_NONE);
- v2 = GEN_INT (GOMP_DEVICE_HOST);
-#endif
machine_mode target_mode = TYPE_MODE (integer_type_node);
if (!target || !register_operand (target, target_mode))
target = gen_reg_rtx (target_mode);
emit_label (done_label);
return target;
+#else
+ return NULL;
+#endif
}
for (i = 0; i < len; i++)
OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
}
+ else
+ clauses = NULL_TREE;
clvec.release ();
return clauses;
}
/* Change flag_abi_version to be the actual current ABI level for the
benefit of c_cpp_builtins. */
if (flag_abi_version == 0)
- flag_abi_version = 8;
+ flag_abi_version = 9;
if (cxx_dialect >= cxx11)
{
{
tree file_decl = build_translation_unit_decl (NULL_TREE);
context = file_decl;
+ debug_hooks->register_main_translation_unit (file_decl);
}
else
context = block;
tree lhs = gimple_call_lhs (e->call_stmt);
gcall *new_stmt;
gimple_stmt_iterator gsi;
+ bool skip_bounds = false;
#ifdef ENABLE_CHECKING
cgraph_node *node;
#endif
}
}
+ /* We might propagate instrumented function pointer into
+ not instrumented function and vice versa. In such a
+ case we need to either fix function declaration or
+ remove bounds from call statement. */
+ if (flag_check_pointer_bounds && e->callee)
+ skip_bounds = chkp_redirect_edge (e);
+
if (e->indirect_unknown_callee
- || decl == e->callee->decl)
+ || (decl == e->callee->decl
+ && !skip_bounds))
return e->call_stmt;
#ifdef ENABLE_CHECKING
}
}
- if (e->callee->clone.combined_args_to_skip)
+ if (e->callee->clone.combined_args_to_skip
+ || skip_bounds)
{
int lp_nr;
- new_stmt
- = gimple_call_copy_skip_args (e->call_stmt,
- e->callee->clone.combined_args_to_skip);
+ new_stmt = e->call_stmt;
+ if (e->callee->clone.combined_args_to_skip)
+ new_stmt
+ = gimple_call_copy_skip_args (new_stmt,
+ e->callee->clone.combined_args_to_skip);
+ if (skip_bounds)
+ new_stmt = chkp_copy_call_skip_bounds (new_stmt);
+
gimple_call_set_fndecl (new_stmt, e->callee->decl);
gimple_call_set_fntype (new_stmt, gimple_call_fntype (e->call_stmt));
}
}
+ if (instrumentation_clone
+ && DECL_BUILT_IN_CLASS (decl) == NOT_BUILT_IN)
+ {
+ tree name = DECL_ASSEMBLER_NAME (decl);
+ tree orig_name = DECL_ASSEMBLER_NAME (orig_decl);
+
+ if (!IDENTIFIER_TRANSPARENT_ALIAS (name)
+ || TREE_CHAIN (name) != orig_name)
+ {
+ error ("Alias chain for instrumented node is broken");
+ error_found = true;
+ }
+ }
+
if (analyzed && thunk.thunk_p)
{
if (!callees)
; 8: The version of the ABI that corrects the substitution behavior of
; function types with function-cv-qualifiers.
; First selectable in G++ 4.9 and default in G++ 5
+;
+; 9: The version of the ABI that corrects the alignment of nullptr_t.
+; First selectable and default in G++ 5.2.
; (set in c_common_post_options).
;
; Additional positive integers will be assigned as new versions of
This switch is deprecated; use -fsanitize-recover= instead
fsanitize-undefined-trap-on-error
-Common Report Var(flag_sanitize_undefined_trap_on_error) Init(0)
+Common Driver Report Var(flag_sanitize_undefined_trap_on_error) Init(0)
Use trap instead of a library function for undefined behavior sanitization
fasynchronous-unwind-tables
#define OPTION_MASK_ISA_RDRND_SET OPTION_MASK_ISA_RDRND
#define OPTION_MASK_ISA_F16C_SET \
(OPTION_MASK_ISA_F16C | OPTION_MASK_ISA_AVX_SET)
+#define OPTION_MASK_ISA_MWAITX_SET OPTION_MASK_ISA_MWAITX
/* Define a set of ISAs which aren't available when a given ISA is
disabled. MMX and SSE ISAs are handled separately. */
#define OPTION_MASK_ISA_XSAVES_UNSET OPTION_MASK_ISA_XSAVES
#define OPTION_MASK_ISA_PCOMMIT_UNSET OPTION_MASK_ISA_PCOMMIT
#define OPTION_MASK_ISA_CLWB_UNSET OPTION_MASK_ISA_CLWB
+#define OPTION_MASK_ISA_MWAITX_UNSET OPTION_MASK_ISA_MWAITX
/* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
as -mno-sse4.1. */
}
return true;
+ case OPT_mmwaitx:
+ if (value)
+ {
+ opts->x_ix86_isa_flags |= OPTION_MASK_ISA_MWAITX_SET;
+ opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_MWAITX_SET;
+ }
+ else
+ {
+ opts->x_ix86_isa_flags &= ~OPTION_MASK_ISA_MWAITX_UNSET;
+ opts->x_ix86_isa_flags_explicit |= OPTION_MASK_ISA_MWAITX_UNSET;
+ }
+ return true;
+
/* Comes from final.c -- no real reason to change it. */
#define MAX_CODE_ALIGN 16
#define bit_LWP (1 << 15)
#define bit_FMA4 (1 << 16)
#define bit_TBM (1 << 21)
+#define bit_MWAITX (1 << 29)
/* %edx */
#define bit_MMXEXT (1 << 22)
unsigned int has_clflushopt = 0, has_xsavec = 0, has_xsaves = 0;
unsigned int has_avx512dq = 0, has_avx512bw = 0, has_avx512vl = 0;
unsigned int has_avx512vbmi = 0, has_avx512ifma = 0, has_clwb = 0;
- unsigned int has_pcommit = 0;
+ unsigned int has_pcommit = 0, has_mwaitx = 0;
bool arch;
has_longmode = edx & bit_LM;
has_3dnowp = edx & bit_3DNOWP;
has_3dnow = edx & bit_3DNOW;
+ has_mwaitx = ecx & bit_MWAITX;
}
/* Get XCR_XFEATURE_ENABLED_MASK register with xgetbv. */
const char *avx512vbmi = has_avx512vbmi ? " -mavx512vbmi" : " -mno-avx512vbmi";
const char *clwb = has_clwb ? " -mclwb" : " -mno-clwb";
const char *pcommit = has_pcommit ? " -mpcommit" : " -mno-pcommit";
+ const char *mwaitx = has_mwaitx ? " -mmwaitx" : " -mno-mwaitx";
options = concat (options, mmx, mmx3dnow, sse, sse2, sse3, ssse3,
sse4a, cx16, sahf, movbe, aes, sha, pclmul,
fxsr, xsave, xsaveopt, avx512f, avx512er,
avx512cd, avx512pf, prefetchwt1, clflushopt,
xsavec, xsaves, avx512dq, avx512bw, avx512vl,
- avx512ifma, avx512vbmi, clwb, pcommit, NULL);
+ avx512ifma, avx512vbmi, clwb, pcommit, mwaitx, NULL);
}
done:
DEF_FUNCTION_TYPE (VOID, PV4SF, V4SF)
DEF_FUNCTION_TYPE (VOID, PV8SF, V8SF)
DEF_FUNCTION_TYPE (VOID, UNSIGNED, UNSIGNED)
+DEF_FUNCTION_TYPE (VOID, UNSIGNED, UNSIGNED, UNSIGNED)
DEF_FUNCTION_TYPE (VOID, PV8DI, V8DI)
# Instructions returning mask
def_or_undef (parse_in, "__PCOMMIT__");
if (isa_flag & OPTION_MASK_ISA_CLWB)
def_or_undef (parse_in, "__CLWB__");
+ if (isa_flag & OPTION_MASK_ISA_MWAITX)
+ def_or_undef (parse_in, "__MWAITX__");
}
\f
static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
+static rtx (*ix86_gen_monitorx) (rtx, rtx, rtx);
static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
static rtx (*ix86_gen_allocate_stack_worker) (rtx, rtx);
static rtx (*ix86_gen_adjust_stack_and_probe) (rtx, rtx, rtx);
{ "-mmpx", OPTION_MASK_ISA_MPX },
{ "-mclwb", OPTION_MASK_ISA_CLWB },
{ "-mpcommit", OPTION_MASK_ISA_PCOMMIT },
+ { "-mmwaitx", OPTION_MASK_ISA_MWAITX },
};
/* Flag options. */
#define PTA_AVX512VBMI (HOST_WIDE_INT_1 << 54)
#define PTA_CLWB (HOST_WIDE_INT_1 << 55)
#define PTA_PCOMMIT (HOST_WIDE_INT_1 << 56)
+#define PTA_MWAITX (HOST_WIDE_INT_1 << 57)
#define PTA_CORE2 \
(PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_SSSE3 \
| PTA_FMA4 | PTA_XOP | PTA_LWP | PTA_BMI | PTA_BMI2
| PTA_TBM | PTA_F16C | PTA_FMA | PTA_PRFCHW | PTA_FXSR
| PTA_XSAVE | PTA_XSAVEOPT | PTA_FSGSBASE | PTA_RDRND
- | PTA_MOVBE},
+ | PTA_MOVBE | PTA_MWAITX},
{"btver1", PROCESSOR_BTVER1, CPU_GENERIC,
PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
| PTA_SSSE3 | PTA_SSE4A |PTA_ABM | PTA_CX16 | PTA_PRFCHW
opts->x_ix86_isa_flags |= OPTION_MASK_ISA_AVX512IFMA;
if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
x86_prefetch_sse = true;
+ if (processor_alias_table[i].flags & PTA_MWAITX
+ && !(opts->x_ix86_isa_flags_explicit & OPTION_MASK_ISA_MWAITX))
+ opts->x_ix86_isa_flags |= OPTION_MASK_ISA_MWAITX;
break;
}
ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probedi;
ix86_gen_probe_stack_range = gen_probe_stack_rangedi;
ix86_gen_monitor = gen_sse3_monitor_di;
+ ix86_gen_monitorx = gen_monitorx_di;
}
else
{
ix86_gen_adjust_stack_and_probe = gen_adjust_stack_and_probesi;
ix86_gen_probe_stack_range = gen_probe_stack_rangesi;
ix86_gen_monitor = gen_sse3_monitor_si;
+ ix86_gen_monitorx = gen_monitorx_si;
}
#ifdef USE_IX86_CLD
IX86_ATTR_ISA ("avx512ifma", OPT_mavx512ifma),
IX86_ATTR_ISA ("clwb", OPT_mclwb),
IX86_ATTR_ISA ("pcommit", OPT_mpcommit),
+ IX86_ATTR_ISA ("mwaitx", OPT_mmwaitx),
/* enum options */
IX86_ATTR_ENUM ("fpmath=", OPT_mfpmath_),
ix86_function_arg_regno_p (int regno)
{
int i;
+ enum calling_abi call_abi;
const int *parm_regs;
if (TARGET_MPX && BND_REGNO_P (regno))
/* TODO: The function should depend on current function ABI but
builtins.c would need updating then. Therefore we use the
default ABI. */
+ call_abi = ix86_cfun_abi ();
/* RAX is used as hidden argument to va_arg functions. */
- if (ix86_abi == SYSV_ABI && regno == AX_REG)
+ if (call_abi == SYSV_ABI && regno == AX_REG)
return true;
- if (ix86_abi == MS_ABI)
+ if (call_abi == MS_ABI)
parm_regs = x86_64_ms_abi_int_parameter_registers;
else
parm_regs = x86_64_int_parameter_registers;
- for (i = 0; i < (ix86_abi == MS_ABI
+
+ for (i = 0; i < (call_abi == MS_ABI
? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
if (regno == parm_regs[i])
return true;
case AX_REG:
return true;
case DX_REG:
- return (!TARGET_64BIT || ix86_abi != MS_ABI);
+ return (!TARGET_64BIT || ix86_cfun_abi () != MS_ABI);
case DI_REG:
case SI_REG:
- return TARGET_64BIT && ix86_abi != MS_ABI;
+ return TARGET_64BIT && ix86_cfun_abi () != MS_ABI;
case FIRST_BND_REG:
return chkp_function_instrumented_p (current_function_decl);
/* TODO: The function should depend on current function ABI but
builtins.c would need updating then. Therefore we use the
default ABI. */
- if (TARGET_64BIT && ix86_abi == MS_ABI)
+ if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
return false;
return TARGET_FLOAT_RETURNS_IN_80387;
Do an lea to the last part and use only one colliding move. */
else if (collisions > 1)
{
- rtx base;
+ rtx base, addr, tls_base = NULL_RTX;
collisions = 1;
if (GET_MODE (base) != Pmode)
base = gen_rtx_REG (Pmode, REGNO (base));
- emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
+ addr = XEXP (part[1][0], 0);
+ if (TARGET_TLS_DIRECT_SEG_REFS)
+ {
+ struct ix86_address parts;
+ int ok = ix86_decompose_address (addr, &parts);
+ gcc_assert (ok);
+ if (parts.seg == DEFAULT_TLS_SEG_REG)
+ {
+ /* It is not valid to use %gs: or %fs: in
+ lea though, so we need to remove it from the
+ address used for lea and add it to each individual
+ memory loads instead. */
+ addr = copy_rtx (addr);
+ rtx *x = &addr;
+ while (GET_CODE (*x) == PLUS)
+ {
+ for (i = 0; i < 2; i++)
+ {
+ rtx u = XEXP (*x, i);
+ if (GET_CODE (u) == ZERO_EXTEND)
+ u = XEXP (u, 0);
+ if (GET_CODE (u) == UNSPEC
+ && XINT (u, 1) == UNSPEC_TP)
+ {
+ tls_base = XEXP (*x, i);
+ *x = XEXP (*x, 1 - i);
+ break;
+ }
+ }
+ if (tls_base)
+ break;
+ x = &XEXP (*x, 0);
+ }
+ gcc_assert (tls_base);
+ }
+ }
+ emit_insn (gen_rtx_SET (VOIDmode, base, addr));
+ if (tls_base)
+ base = gen_rtx_PLUS (GET_MODE (base), base, tls_base);
part[1][0] = replace_equiv_address (part[1][0], base);
for (i = 1; i < nparts; i++)
{
+ if (tls_base)
+ base = copy_rtx (base);
tmp = plus_constant (Pmode, base, UNITS_PER_WORD * i);
part[1][i] = replace_equiv_address (part[1][i], tmp);
}
IX86_BUILTIN_CVTPS2PH,
IX86_BUILTIN_CVTPS2PH256,
+ /* MONITORX and MWAITX instrucions. */
+ IX86_BUILTIN_MONITORX,
+ IX86_BUILTIN_MWAITX,
+
/* CFString built-in for darwin */
IX86_BUILTIN_CFSTRING,
def_builtin (OPTION_MASK_ISA_CLWB, "__builtin_ia32_clwb",
VOID_FTYPE_PCVOID, IX86_BUILTIN_CLWB);
+ /* MONITORX and MWAITX. */
+ def_builtin (OPTION_MASK_ISA_MWAITX, "__builtin_ia32_monitorx",
+ VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITORX);
+ def_builtin (OPTION_MASK_ISA_MWAITX, "__builtin_ia32_mwaitx",
+ VOID_FTYPE_UNSIGNED_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAITX);
+
/* Add FMA4 multi-arg argument instructions */
for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
{
return 0;
case IX86_BUILTIN_MONITOR:
+ case IX86_BUILTIN_MONITORX:
arg0 = CALL_EXPR_ARG (exp, 0);
arg1 = CALL_EXPR_ARG (exp, 1);
arg2 = CALL_EXPR_ARG (exp, 2);
op1 = copy_to_mode_reg (SImode, op1);
if (!REG_P (op2))
op2 = copy_to_mode_reg (SImode, op2);
- emit_insn (ix86_gen_monitor (op0, op1, op2));
+
+ emit_insn (fcode == IX86_BUILTIN_MONITOR
+ ? ix86_gen_monitor (op0, op1, op2)
+ : ix86_gen_monitorx (op0, op1, op2));
return 0;
case IX86_BUILTIN_MWAIT:
emit_insn (gen_sse3_mwait (op0, op1));
return 0;
+ case IX86_BUILTIN_MWAITX:
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
+ arg2 = CALL_EXPR_ARG (exp, 2);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
+ if (!REG_P (op0))
+ op0 = copy_to_mode_reg (SImode, op0);
+ if (!REG_P (op1))
+ op1 = copy_to_mode_reg (SImode, op1);
+ if (!REG_P (op2))
+ op2 = copy_to_mode_reg (SImode, op2);
+ emit_insn (gen_mwaitx (op0, op1, op2));
+ return 0;
+
case IX86_BUILTIN_VEC_INIT_V2SI:
case IX86_BUILTIN_VEC_INIT_V4HI:
case IX86_BUILTIN_VEC_INIT_V8QI:
{ gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
};
int i, j, n;
+ machine_mode mmode = VOIDmode;
+ rtx (*gen_blendm) (rtx, rtx, rtx, rtx);
switch (mode)
{
case V8DFmode:
if (TARGET_AVX512F)
{
- tmp = gen_reg_rtx (mode);
- emit_insn (gen_rtx_SET (VOIDmode, tmp,
- gen_rtx_VEC_DUPLICATE (mode, val)));
- emit_insn (gen_avx512f_blendmv8df (target, tmp, target,
- force_reg (QImode, GEN_INT (1 << elt))));
- return;
+ mmode = QImode;
+ gen_blendm = gen_avx512f_blendmv8df;
}
- else
- break;
+ break;
+
case V8DImode:
if (TARGET_AVX512F)
{
- tmp = gen_reg_rtx (mode);
- emit_insn (gen_rtx_SET (VOIDmode, tmp,
- gen_rtx_VEC_DUPLICATE (mode, val)));
- emit_insn (gen_avx512f_blendmv8di (target, tmp, target,
- force_reg (QImode, GEN_INT (1 << elt))));
- return;
+ mmode = QImode;
+ gen_blendm = gen_avx512f_blendmv8di;
}
- else
- break;
+ break;
+
case V16SFmode:
if (TARGET_AVX512F)
{
- tmp = gen_reg_rtx (mode);
- emit_insn (gen_rtx_SET (VOIDmode, tmp,
- gen_rtx_VEC_DUPLICATE (mode, val)));
- emit_insn (gen_avx512f_blendmv16sf (target, tmp, target,
- force_reg (HImode, GEN_INT (1 << elt))));
- return;
+ mmode = HImode;
+ gen_blendm = gen_avx512f_blendmv16si;
}
- else
- break;
+ break;
+
case V16SImode:
if (TARGET_AVX512F)
{
- tmp = gen_reg_rtx (mode);
- emit_insn (gen_rtx_SET (VOIDmode, tmp,
- gen_rtx_VEC_DUPLICATE (mode, val)));
- emit_insn (gen_avx512f_blendmv16si (target, tmp, target,
- force_reg (HImode, GEN_INT (1 << elt))));
- return;
+ mmode = HImode;
+ gen_blendm = gen_avx512f_blendmv16si;
}
- else
- break;
+ break;
+
case V32HImode:
if (TARGET_AVX512F && TARGET_AVX512BW)
{
- tmp = gen_reg_rtx (mode);
- emit_insn (gen_rtx_SET (VOIDmode, tmp,
- gen_rtx_VEC_DUPLICATE (mode, val)));
- emit_insn (gen_avx512bw_blendmv32hi (target, tmp, target,
- force_reg (SImode, GEN_INT (1 << elt))));
- return;
+ mmode = SImode;
+ gen_blendm = gen_avx512bw_blendmv32hi;
}
- else
- break;
+ break;
+
case V64QImode:
if (TARGET_AVX512F && TARGET_AVX512BW)
{
- tmp = gen_reg_rtx (mode);
- emit_insn (gen_rtx_SET (VOIDmode, tmp,
- gen_rtx_VEC_DUPLICATE (mode, val)));
- emit_insn (gen_avx512bw_blendmv64qi (target, tmp, target,
- force_reg (DImode, GEN_INT (1 << elt))));
- return;
+ mmode = DImode;
+ gen_blendm = gen_avx512bw_blendmv64qi;
}
- else
- break;
+ break;
default:
break;
}
- if (use_vec_merge)
+ if (mmode != VOIDmode)
+ {
+ tmp = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, tmp,
+ gen_rtx_VEC_DUPLICATE (mode, val)));
+ emit_insn (gen_blendm (target, tmp, target,
+ force_reg (mmode,
+ gen_int_mode (1 << elt, mmode))));
+ }
+ else if (use_vec_merge)
{
tmp = gen_rtx_VEC_DUPLICATE (mode, val);
tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
unsigned int size = INTVAL (operands[1]);
unsigned int pos = INTVAL (operands[2]);
+ if (GET_CODE (src) == SUBREG)
+ {
+ /* Reject non-lowpart subregs. */
+ if (SUBREG_BYTE (src) != 0)
+ return false;
+ src = SUBREG_REG (src);
+ }
+
if (GET_CODE (dst) == SUBREG)
{
pos += SUBREG_BYTE (dst) * BITS_PER_UNIT;
dst = SUBREG_REG (dst);
}
- if (GET_CODE (src) == SUBREG)
- src = SUBREG_REG (src);
-
switch (GET_MODE (dst))
{
case V16QImode:
#define TARGET_PCOMMIT_P(x) TARGET_ISA_PCOMMIT_P(x)
#define TARGET_CLWB TARGET_ISA_CLWB
#define TARGET_CLWB_P(x) TARGET_ISA_CLWB_P(x)
+#define TARGET_MWAITX TARGET_ISA_MWAITX
+#define TARGET_MWAITX_P(x) TARGET_ISA_MWAITX_P(x)
#define TARGET_LP64 TARGET_ABI_64
#define TARGET_LP64_P(x) TARGET_ABI_64_P(x)
;; For CLFLUSHOPT support
UNSPECV_CLFLUSHOPT
+
+ ;; For MONITORX and MWAITX support
+ UNSPECV_MONITORX
+ UNSPECV_MWAITX
+
])
;; Constants to represent rounding modes in the ROUND instruction
/* The DImode arrived in a pair of integral registers (e.g. %edx:%eax).
Assemble the 64-bit DImode value in an xmm register. */
emit_insn (gen_sse2_loadld (operands[3], CONST0_RTX (V4SImode),
- gen_rtx_SUBREG (SImode, operands[1], 0)));
+ gen_lowpart (SImode, operands[1])));
emit_insn (gen_sse2_loadld (operands[4], CONST0_RTX (V4SImode),
- gen_rtx_SUBREG (SImode, operands[1], 4)));
+ gen_highpart (SImode, operands[1])));
emit_insn (gen_vec_interleave_lowv4si (operands[3], operands[3],
- operands[4]));
+ operands[4]));
operands[3] = gen_rtx_REG (DImode, REGNO (operands[3]));
})
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0) (zero_extend:DI (match_dup 2)))]
{
+ operands[1] = shallow_copy_rtx (operands[1]);
PUT_MODE (operands[1], QImode);
operands[2] = gen_lowpart (QImode, operands[0]);
})
(parallel [(set (match_dup 0) (zero_extend:SI (match_dup 2)))
(clobber (reg:CC FLAGS_REG))])]
{
+ operands[1] = shallow_copy_rtx (operands[1]);
PUT_MODE (operands[1], QImode);
operands[2] = gen_lowpart (QImode, operands[0]);
})
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0) (zero_extend:SI (match_dup 2)))]
{
+ operands[1] = shallow_copy_rtx (operands[1]);
PUT_MODE (operands[1], QImode);
operands[2] = gen_lowpart (QImode, operands[0]);
})
(const_int 0)))]
""
[(set (match_dup 0) (match_dup 1))]
- "PUT_MODE (operands[1], QImode);")
+{
+ operands[1] = shallow_copy_rtx (operands[1]);
+ PUT_MODE (operands[1], QImode);
+})
(define_split
[(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand"))
(const_int 0)))]
""
[(set (match_dup 0) (match_dup 1))]
- "PUT_MODE (operands[1], QImode);")
+{
+ operands[1] = shallow_copy_rtx (operands[1]);
+ PUT_MODE (operands[1], QImode);
+})
(define_split
[(set (match_operand:QI 0 "nonimmediate_operand")
""
[(set (match_dup 0) (match_dup 1))]
{
- rtx new_op1 = copy_rtx (operands[1]);
- operands[1] = new_op1;
- PUT_MODE (new_op1, QImode);
- PUT_CODE (new_op1, ix86_reverse_condition (GET_CODE (new_op1),
- GET_MODE (XEXP (new_op1, 0))));
+ operands[1] = shallow_copy_rtx (operands[1]);
+ PUT_MODE (operands[1], QImode);
+ PUT_CODE (operands[1],
+ ix86_reverse_condition (GET_CODE (operands[1]),
+ GET_MODE (XEXP (operands[1], 0))));
/* Make sure that (a) the CCmode we have for the flags is strong
enough for the reversed compare or (b) we have a valid FP compare. */
- if (! ix86_comparison_operator (new_op1, VOIDmode))
+ if (! ix86_comparison_operator (operands[1], VOIDmode))
FAIL;
})
""
[(set (match_dup 0) (match_dup 1))]
{
- rtx new_op1 = copy_rtx (operands[1]);
- operands[1] = new_op1;
- PUT_MODE (new_op1, QImode);
- PUT_CODE (new_op1, ix86_reverse_condition (GET_CODE (new_op1),
- GET_MODE (XEXP (new_op1, 0))));
+ operands[1] = shallow_copy_rtx (operands[1]);
+ PUT_MODE (operands[1], QImode);
+ PUT_CODE (operands[1],
+ ix86_reverse_condition (GET_CODE (operands[1]),
+ GET_MODE (XEXP (operands[1], 0))));
/* Make sure that (a) the CCmode we have for the flags is strong
enough for the reversed compare or (b) we have a valid FP compare. */
- if (! ix86_comparison_operator (new_op1, VOIDmode))
+ if (! ix86_comparison_operator (operands[1], VOIDmode))
FAIL;
})
(if_then_else (match_dup 0)
(label_ref (match_dup 1))
(pc)))]
- "PUT_MODE (operands[0], VOIDmode);")
+{
+ operands[0] = shallow_copy_rtx (operands[0]);
+ PUT_MODE (operands[0], VOIDmode);
+})
(define_split
[(set (pc)
(label_ref (match_dup 1))
(pc)))]
{
- rtx new_op0 = copy_rtx (operands[0]);
- operands[0] = new_op0;
- PUT_MODE (new_op0, VOIDmode);
- PUT_CODE (new_op0, ix86_reverse_condition (GET_CODE (new_op0),
- GET_MODE (XEXP (new_op0, 0))));
+ operands[0] = shallow_copy_rtx (operands[0]);
+ PUT_MODE (operands[0], VOIDmode);
+ PUT_CODE (operands[0],
+ ix86_reverse_condition (GET_CODE (operands[0]),
+ GET_MODE (XEXP (operands[0], 0))));
/* Make sure that (a) the CCmode we have for the flags is strong
enough for the reversed compare or (b) we have a valid FP compare. */
- if (! ix86_comparison_operator (new_op0, VOIDmode))
+ if (! ix86_comparison_operator (operands[0], VOIDmode))
FAIL;
})
(pc)))]
{
operands[2] = simplify_gen_subreg (<MODE>mode, operands[2], QImode, 0);
-
+ operands[0] = shallow_copy_rtx (operands[0]);
PUT_CODE (operands[0], reverse_condition (GET_CODE (operands[0])));
})
(pc)))]
{
operands[2] = simplify_gen_subreg (<MODE>mode, operands[2], SImode, 0);
-
+ operands[0] = shallow_copy_rtx (operands[0]);
PUT_CODE (operands[0], reverse_condition (GET_CODE (operands[0])));
})
(pc)))]
{
operands[2] = simplify_gen_subreg (<MODE>mode, operands[2], SImode, 0);
-
+ operands[0] = shallow_copy_rtx (operands[0]);
PUT_CODE (operands[0], reverse_condition (GET_CODE (operands[0])));
})
(pc)))]
{
operands[2] = simplify_gen_subreg (SImode, operands[2], QImode, 0);
-
+ operands[0] = shallow_copy_rtx (operands[0]);
PUT_CODE (operands[0], reverse_condition (GET_CODE (operands[0])));
})
(if_then_else (match_op_dup 0 [(reg:CCC FLAGS_REG) (const_int 0)])
(label_ref (match_dup 4))
(pc)))]
- "PUT_CODE (operands[0], reverse_condition (GET_CODE (operands[0])));")
+{
+ operands[0] = shallow_copy_rtx (operands[0]);
+ PUT_CODE (operands[0], reverse_condition (GET_CODE (operands[0])));
+})
;; Define combination compare-and-branch fp compare instructions to help
;; combine.
operands[1] = gen_lowpart (SImode, operands[1]);
if (GET_CODE (operands[3]) != ASHIFT)
operands[2] = gen_lowpart (SImode, operands[2]);
+ operands[3] = shallow_copy_rtx (operands[3]);
PUT_MODE (operands[3], SImode);
})
;; lifetime information then.
(define_peephole2
- [(set (match_operand:SWI124 0 "nonimmediate_operand")
- (not:SWI124 (match_operand:SWI124 1 "nonimmediate_operand")))]
+ [(set (match_operand:SWI124 0 "nonimmediate_gr_operand")
+ (not:SWI124 (match_operand:SWI124 1 "nonimmediate_gr_operand")))]
"optimize_insn_for_speed_p ()
&& ((TARGET_NOT_UNPAIRABLE
&& (!MEM_P (operands[0])
[(match_dup 0)
(match_operand 2 "memory_operand")]))]
"REGNO (operands[0]) != REGNO (operands[1])
- && ((MMX_REG_P (operands[0]) && MMX_REG_P (operands[1]))
- || (SSE_REG_P (operands[0]) && SSE_REG_P (operands[1])))"
+ && ((MMX_REGNO_P (REGNO (operands[0]))
+ && MMX_REGNO_P (REGNO (operands[1])))
+ || (SSE_REGNO_P (REGNO (operands[0]))
+ && SSE_REGNO_P (REGNO (operands[1]))))"
[(set (match_dup 0) (match_dup 2))
(set (match_dup 0)
(match_op_dup 3 [(match_dup 0) (match_dup 1)]))])
(match_operand 1 "const0_operand"))]
"GET_MODE_SIZE (GET_MODE (operands[0])) <= UNITS_PER_WORD
&& (! TARGET_USE_MOV0 || optimize_insn_for_size_p ())
- && GENERAL_REG_P (operands[0])
+ && GENERAL_REGNO_P (REGNO (operands[0]))
&& peep2_regno_dead_p (0, FLAGS_REG)"
[(parallel [(set (match_dup 0) (const_int 0))
(clobber (reg:CC FLAGS_REG))])]
[(set (match_operand:SWI248 0 "register_operand")
(const_int -1))]
"(optimize_insn_for_size_p () || TARGET_MOVE_M1_VIA_OR)
+ && GENERAL_REGNO_P (REGNO (operands[0]))
&& peep2_regno_dead_p (0, FLAGS_REG)"
[(parallel [(set (match_dup 0) (const_int -1))
(clobber (reg:CC FLAGS_REG))])]
operands[1] = gen_rtx_PLUS (word_mode, base,
gen_rtx_MULT (word_mode, index, GEN_INT (scale)));
- operands[5] = base;
if (mode != word_mode)
operands[1] = gen_rtx_SUBREG (mode, operands[1], 0);
+
+ operands[5] = base;
if (op1mode != word_mode)
- operands[5] = gen_rtx_SUBREG (op1mode, operands[5], 0);
+ operands[5] = gen_lowpart (op1mode, operands[5]);
+
operands[0] = dest;
})
\f
(set_attr "atom_sse_attr" "fence")
(set_attr "memory" "unknown")])
+;; MONITORX and MWAITX
+(define_insn "mwaitx"
+ [(unspec_volatile [(match_operand:SI 0 "register_operand" "c")
+ (match_operand:SI 1 "register_operand" "a")
+ (match_operand:SI 2 "register_operand" "b")]
+ UNSPECV_MWAITX)]
+ "TARGET_MWAITX"
+;; 64bit version is "mwaitx %rax,%rcx,%rbx". But only lower 32bits are used.
+;; Since 32bit register operands are implicitly zero extended to 64bit,
+;; we only need to set up 32bit registers.
+ "mwaitx"
+ [(set_attr "length" "3")])
+
+(define_insn "monitorx_<mode>"
+ [(unspec_volatile [(match_operand:P 0 "register_operand" "a")
+ (match_operand:SI 1 "register_operand" "c")
+ (match_operand:SI 2 "register_operand" "d")]
+ UNSPECV_MONITORX)]
+ "TARGET_MWAITX"
+;; 64bit version is "monitorx %rax,%rcx,%rdx". But only lower 32bits in
+;; RCX and RDX are used. Since 32bit register operands are implicitly
+;; zero extended to 64bit, we only need to set up 32bit registers.
+ "%^monitorx"
+ [(set (attr "length")
+ (symbol_ref ("(Pmode != word_mode) + 3")))])
+
;; MPX instructions
(define_expand "<mode>_mk"
Target Report Mask(ISA_MPX) Var(ix86_isa_flags) Save
Support MPX code generation
+mmwaitx
+Target Report Mask(ISA_MWAITX) Var(ix86_isa_flags) Save
+Support MWAITX and MONITORX built-in functions and code generation
+
mstack-protector-guard=
Target RejectNegative Joined Enum(stack_protector_guard) Var(ix86_stack_protector_guard) Init(SSP_TLS)
Use given stack-protector guard
-/* Copyright (C) 2008-2015 Free Software Foundation, Inc.
+/* Copyright (C) 2012-2015 Free Software Foundation, Inc.
This file is part of GCC.
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
-#ifndef _X86INTRIN_H_INCLUDED
-#define _X86INTRIN_H_INCLUDED
-
-#include <ia32intrin.h>
-
-#include <mmintrin.h>
-
-#include <xmmintrin.h>
-
-#include <emmintrin.h>
-
-#include <pmmintrin.h>
-
-#include <tmmintrin.h>
-
-#include <ammintrin.h>
-
-#include <smmintrin.h>
-
-#include <wmmintrin.h>
-
-/* For including AVX instructions */
-#include <immintrin.h>
-
-#include <mm3dnow.h>
-
-#include <fma4intrin.h>
-
-#include <xopintrin.h>
-
-#include <lwpintrin.h>
-
-#include <bmiintrin.h>
-
-#include <bmi2intrin.h>
-
-#include <tbmintrin.h>
-
-#include <lzcntintrin.h>
-
-#include <popcntintrin.h>
-
-#include <rdseedintrin.h>
-
-#include <prfchwintrin.h>
-
-#include <fxsrintrin.h>
-
-#include <xsaveintrin.h>
-
-#include <xsaveoptintrin.h>
-
-#include <adxintrin.h>
-
-#include <clwbintrin.h>
-
-#include <pcommitintrin.h>
-
-#include <clflushoptintrin.h>
-
-#include <xsavesintrin.h>
-
-#include <xsavecintrin.h>
-
-#endif /* _X86INTRIN_H_INCLUDED */
+#ifndef _MWAITXINTRIN_H_INCLUDED
+#define _MWAITXINTRIN_H_INCLUDED
+
+#ifndef __MWAITX__
+#pragma GCC push_options
+#pragma GCC target("mwaitx")
+#define __DISABLE_MWAITX__
+#endif /* __MWAITX__ */
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_monitorx (void const * __P, unsigned int __E, unsigned int __H)
+{
+ __builtin_ia32_monitorx (__P, __E, __H);
+}
+
+extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mwaitx (unsigned int __E, unsigned int __H, unsigned int __C)
+{
+ __builtin_ia32_mwaitx (__E, __H, __C);
+}
+
+#ifdef __DISABLE_MWAITX__
+#undef __DISABLE_MWAITX__
+#pragma GCC pop_options
+#endif /* __DISABLE_MWAITX__ */
+
+#endif /* _MWAITXINTRIN_H_INCLUDED */
(and (match_code "reg")
(match_test "GENERAL_REG_P (op)")))
+;; True if the operand is a nonimmediate operand with GENERAL class register.
+(define_predicate "nonimmediate_gr_operand"
+ (if_then_else (match_code "reg")
+ (match_test "GENERAL_REGNO_P (REGNO (op))")
+ (match_operand 0 "nonimmediate_operand")))
+
;; Return true if OP is a register operand other than an i387 fp register.
(define_predicate "register_and_not_fp_reg_operand"
(and (match_code "reg")
/* The DImode arrived in a pair of integral registers (e.g. %edx:%eax).
Assemble the 64-bit DImode value in an xmm register. */
emit_insn (gen_sse2_loadld (operands[0], CONST0_RTX (V4SImode),
- gen_rtx_SUBREG (SImode, operands[1], 0)));
+ gen_lowpart (SImode, operands[1])));
emit_insn (gen_sse2_loadld (operands[2], CONST0_RTX (V4SImode),
- gen_rtx_SUBREG (SImode, operands[1], 4)));
+ gen_highpart (SImode, operands[1])));
emit_insn (gen_vec_interleave_lowv4si (operands[0], operands[0],
operands[2]));
}
(set_attr "atom_sse_attr" "fence")
(set_attr "memory" "unknown")])
-
+;; As per AMD and Intel ISA manuals, the first operand is extensions
+;; and it goes to %ecx. The second operand received is hints and it goes
+;; to %eax.
(define_insn "sse3_mwait"
- [(unspec_volatile [(match_operand:SI 0 "register_operand" "a")
- (match_operand:SI 1 "register_operand" "c")]
+ [(unspec_volatile [(match_operand:SI 0 "register_operand" "c")
+ (match_operand:SI 1 "register_operand" "a")]
UNSPECV_MWAIT)]
"TARGET_SSE3"
;; 64bit version is "mwait %rax,%rcx". But only lower 32bits are used.
#include <xsavecintrin.h>
+#include <mwaitxintrin.h>
#endif /* _X86INTRIN_H_INCLUDED */
tree field = next_initializable_field (TYPE_FIELDS (type));
tree empty_ctor = NULL_TREE;
- ctor = reshape_init (type, ctor, tf_none);
- if (ctor == error_mark_node)
- return NULL;
+ /* We already called reshape_init in implicit_conversion. */
/* The conversions within the init-list aren't affected by the enclosing
context; they're always simple copy-initialization. */
to that conversion. */
complain &= ~tf_error;
+ /* Call reshape_init early to remove redundant braces. */
+ if (expr && BRACE_ENCLOSED_INITIALIZER_P (expr)
+ && CLASS_TYPE_P (to)
+ && COMPLETE_TYPE_P (complete_type (to))
+ && !CLASSTYPE_NON_AGGREGATE (to))
+ {
+ expr = reshape_init (to, expr, complain);
+ if (expr == error_mark_node)
+ return NULL;
+ from = TREE_TYPE (expr);
+ }
+
if (TREE_CODE (to) == REFERENCE_TYPE)
conv = reference_binding (to, from, expr, c_cast_p, flags, complain);
else
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
- warn_logical_operator (loc, code, boolean_type_node,
- code_orig_arg1, arg1, code_orig_arg2, arg2);
+ if (complain & tf_warning)
+ warn_logical_operator (loc, code, boolean_type_node,
+ code_orig_arg1, arg1, code_orig_arg2, arg2);
/* Fall through. */
case GT_EXPR:
case LT_EXPR:
case LE_EXPR:
case EQ_EXPR:
case NE_EXPR:
- if ((code_orig_arg1 == BOOLEAN_TYPE)
- ^ (code_orig_arg2 == BOOLEAN_TYPE))
+ if ((complain & tf_warning)
+ && ((code_orig_arg1 == BOOLEAN_TYPE)
+ ^ (code_orig_arg2 == BOOLEAN_TYPE)))
maybe_warn_bool_compare (loc, code, arg1, arg2);
/* Fall through. */
case PLUS_EXPR:
: TYPE_ALIGN (type)));
normalize_rli (rli);
}
+ else if (TREE_CODE (type) == NULLPTR_TYPE
+ && warn_abi && abi_version_crosses (9))
+ {
+ /* Before ABI v9, we were giving nullptr_t alignment of 1; if
+ the offset wasn't aligned like a pointer when we started to
+ layout this field, that affects its position. */
+ tree pos = rli_size_unit_so_far (&old_rli);
+ if (int_cst_value (pos) % TYPE_ALIGN_UNIT (ptr_type_node) != 0)
+ {
+ if (abi_version_at_least (9))
+ warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wabi,
+ "alignment of %qD increased in -fabi-version=9 "
+ "(GCC 5.2)", decl);
+ else
+ warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wabi, "alignment "
+ "of %qD will increase in -fabi-version=9", decl);
+ }
+ break;
+ }
else
/* There was no conflict. We're done laying out this field. */
break;
unreverse_member_declarations (t);
cplus_decl_attributes (&t, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
+ fixup_attribute_variants (t);
/* Nadger the current location so that diagnostics point to the start of
the struct, not the end. */
|| TREE_CODE (body) == EH_SPEC_BLOCK)
body = TREE_OPERAND (body, 0);
if (TREE_CODE (body) == STATEMENT_LIST)
- body = STATEMENT_LIST_HEAD (body)->stmt;
- body = BIND_EXPR_BODY (body);
+ {
+ for (tree_stmt_iterator i = tsi_start (body);
+ !tsi_end_p (i); tsi_next (&i))
+ {
+ body = tsi_stmt (i);
+ if (TREE_CODE (body) == BIND_EXPR)
+ break;
+ }
+ }
+ if (TREE_CODE (body) == BIND_EXPR)
+ body = BIND_EXPR_BODY (body);
if (TREE_CODE (body) == CLEANUP_POINT_EXPR)
{
body = TREE_OPERAND (body, 0);
return build_zero_init (DECL_CONTEXT (fun), NULL_TREE, false);
}
+ /* We can't defer instantiating the function any longer. */
+ if (!DECL_INITIAL (fun)
+ && DECL_TEMPLOID_INSTANTIATION (fun))
+ {
+ ++function_depth;
+ instantiate_decl (fun, /*defer_ok*/false, /*expl_inst*/false);
+ --function_depth;
+ }
+
/* If in direct recursive call, optimize definition search. */
if (ctx && ctx->call && ctx->call->fundef->decl == fun)
new_call.fundef = ctx->call->fundef;
}
release_tree_vector (refs);
- if ((AGGREGATE_TYPE_P (TREE_TYPE (t)) || VECTOR_TYPE_P (TREE_TYPE (t))))
+ if (AGGREGATE_TYPE_P (type) || VECTOR_TYPE_P (type))
{
/* Create a new CONSTRUCTOR in case evaluation of the initializer
wants to modify it. */
- *valp = new_ctx.ctor = build_constructor (TREE_TYPE (t), NULL);
+ new_ctx.ctor = build_constructor (type, NULL);
+ if (*valp == NULL_TREE)
+ *valp = new_ctx.ctor;
CONSTRUCTOR_NO_IMPLICIT_ZERO (new_ctx.ctor) = true;
new_ctx.object = target;
}
init = cxx_eval_constant_expression (&new_ctx, init, false,
non_constant_p, overflow_p);
if (target == object)
- /* The hash table might have moved since the get earlier. */
- ctx->values->put (object, init);
+ {
+ /* The hash table might have moved since the get earlier. */
+ valp = ctx->values->get (object);
+ if (TREE_CODE (init) == CONSTRUCTOR)
+ /* An outer ctx->ctor might be pointing to *valp, so just replace
+ its contents. */
+ CONSTRUCTOR_ELTS (*valp) = CONSTRUCTOR_ELTS (init);
+ else
+ *valp = init;
+ }
else
*valp = init;
case NON_LVALUE_EXPR:
case TRY_CATCH_EXPR:
+ case TRY_BLOCK:
case CLEANUP_POINT_EXPR:
case MUST_NOT_THROW_EXPR:
case EXPR_STMT:
jump_target);
break;
+ case TRY_FINALLY_EXPR:
+ r = cxx_eval_constant_expression (ctx, TREE_OPERAND (t, 0), lval,
+ non_constant_p, overflow_p,
+ jump_target);
+ if (!*non_constant_p)
+ /* Also evaluate the cleanup. */
+ cxx_eval_constant_expression (ctx, TREE_OPERAND (t, 1), true,
+ non_constant_p, overflow_p,
+ jump_target);
+ break;
+
/* These differ from cxx_eval_unary_expression in that this doesn't
check for a constant operand or result; an address can be
constant without its operand being, and vice versa. */
break;
case PLACEHOLDER_EXPR:
- if (!ctx || !ctx->ctor || (lval && !ctx->object))
+ if (!ctx || !ctx->ctor || (lval && !ctx->object)
+ || !(same_type_ignoring_top_level_qualifiers_p
+ (TREE_TYPE (t), TREE_TYPE (ctx->ctor))))
{
/* A placeholder without a referent. We can get here when
checking whether NSDMIs are noexcept, or in massage_init_elt;
use ctx->object unconditionally, but using ctx->ctor when we
can is a minor optimization. */
tree ctor = lval ? ctx->object : ctx->ctor;
- gcc_assert (same_type_ignoring_top_level_qualifiers_p
- (TREE_TYPE (t), TREE_TYPE (ctor)));
return cxx_eval_constant_expression
(ctx, ctor, lval,
non_constant_p, overflow_p);
case CLEANUP_POINT_EXPR:
case MUST_NOT_THROW_EXPR:
case TRY_CATCH_EXPR:
+ case TRY_BLOCK:
case EH_SPEC_BLOCK:
case EXPR_STMT:
case PAREN_EXPR:
case RETURN_EXPR:
return RECUR (TREE_OPERAND (t, 0), want_rval);
+ case TRY_FINALLY_EXPR:
+ return (RECUR (TREE_OPERAND (t, 0), want_rval)
+ && RECUR (TREE_OPERAND (t, 1), any));
+
case SCOPE_REF:
return RECUR (TREE_OPERAND (t, 1), want_rval);
}
return false;
+ case TYPE_DECL:
+ case TAG_DEFN:
+ /* We can see these in statement-expressions. */
+ return true;
+
default:
if (objc_is_property_ref (t))
return false;
hash_set<tree> *p_set;
vec<tree> bind_expr_stack;
struct cp_genericize_omp_taskreg *omp_ctx;
+ bool no_sanitize_p;
};
/* Perform any pre-gimplification lowering of C++ front end trees to
: OMP_CLAUSE_DEFAULT_PRIVATE);
}
}
+ if (flag_sanitize
+ & (SANITIZE_NULL | SANITIZE_ALIGNMENT | SANITIZE_VPTR))
+ {
+ /* The point here is to not sanitize static initializers. */
+ bool no_sanitize_p = wtd->no_sanitize_p;
+ wtd->no_sanitize_p = true;
+ for (tree decl = BIND_EXPR_VARS (stmt);
+ decl;
+ decl = DECL_CHAIN (decl))
+ if (VAR_P (decl)
+ && TREE_STATIC (decl)
+ && DECL_INITIAL (decl))
+ cp_walk_tree (&DECL_INITIAL (decl), cp_genericize_r, data, NULL);
+ wtd->no_sanitize_p = no_sanitize_p;
+ }
wtd->bind_expr_stack.safe_push (stmt);
cp_walk_tree (&BIND_EXPR_BODY (stmt),
cp_genericize_r, data, NULL);
if (*stmt_p == error_mark_node)
*stmt_p = size_one_node;
return NULL;
- }
- else if (flag_sanitize
- & (SANITIZE_NULL | SANITIZE_ALIGNMENT | SANITIZE_VPTR))
+ }
+ else if ((flag_sanitize
+ & (SANITIZE_NULL | SANITIZE_ALIGNMENT | SANITIZE_VPTR))
+ && !wtd->no_sanitize_p)
{
if ((flag_sanitize & (SANITIZE_NULL | SANITIZE_ALIGNMENT))
&& TREE_CODE (stmt) == NOP_EXPR
wtd.p_set = new hash_set<tree>;
wtd.bind_expr_stack.create (0);
wtd.omp_ctx = NULL;
+ wtd.no_sanitize_p = false;
cp_walk_tree (t_p, cp_genericize_r, &wtd, NULL);
delete wtd.p_set;
wtd.bind_expr_stack.release ();
LABEL_DECL_CONTINUE (in LABEL_DECL)
2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL).
DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL)
+ TEMPLATE_DECL_COMPLEX_ALIAS_P (in TEMPLATE_DECL)
3: DECL_IN_AGGR_P.
4: DECL_C_BIT_FIELD (in a FIELD_DECL)
DECL_ANON_UNION_VAR_P (in a VAR_DECL)
#define TYPE_DECL_ALIAS_P(NODE) \
DECL_LANG_FLAG_6 (TYPE_DECL_CHECK (NODE))
+/* Nonzero for TEMPLATE_DECL means that it is a 'complex' alias template. */
+#define TEMPLATE_DECL_COMPLEX_ALIAS_P(NODE) \
+ DECL_LANG_FLAG_2 (TEMPLATE_DECL_CHECK (NODE))
+
/* Nonzero for a type which is an alias for another type; i.e, a type
which declaration was written 'using name-of-type =
another-type'. */
(DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern)
#define DECL_REALLY_EXTERN(NODE) \
- (DECL_EXTERNAL (NODE) && ! DECL_NOT_REALLY_EXTERN (NODE))
+ (DECL_EXTERNAL (NODE) \
+ && (!DECL_LANG_SPECIFIC (NODE) || !DECL_NOT_REALLY_EXTERN (NODE)))
/* A thunk is a stub function.
global_namespace = build_lang_decl (NAMESPACE_DECL, global_scope_name,
void_type_node);
DECL_CONTEXT (global_namespace) = build_translation_unit_decl (NULL_TREE);
+ debug_hooks->register_main_translation_unit
+ (DECL_CONTEXT (global_namespace));
TREE_PUBLIC (global_namespace) = 1;
begin_scope (sk_namespace, global_namespace);
TYPE_SIZE_UNIT (nullptr_type_node) = size_int (GET_MODE_SIZE (ptr_mode));
TYPE_UNSIGNED (nullptr_type_node) = 1;
TYPE_PRECISION (nullptr_type_node) = GET_MODE_BITSIZE (ptr_mode);
+ if (abi_version_at_least (9))
+ TYPE_ALIGN (nullptr_type_node) = GET_MODE_ALIGNMENT (ptr_mode);
SET_TYPE_MODE (nullptr_type_node, ptr_mode);
record_builtin_type (RID_MAX, "decltype(nullptr)", nullptr_type_node);
nullptr_node = build_int_cst (nullptr_type_node, 0);
if (TYPE_NOTHROW_P (type) || nothrow_libfn_p (decl))
TREE_NOTHROW (decl) = 1;
- if (flag_openmp || flag_cilkplus)
+ if (flag_openmp || flag_openmp_simd || flag_cilkplus)
{
/* Adjust "omp declare simd" attributes. */
tree ods = lookup_attribute ("omp declare simd", *attrlist);
if (type == error_mark_node)
return type;
- /* If a canonical type already exists for this type, use it. We use
- this method instead of type_hash_canon, because it only does a
- simple equality check on the list of field members. */
-
- if ((t = TYPE_GET_PTRMEMFUNC_TYPE (type)))
- return t;
-
/* Make sure that we always have the unqualified pointer-to-member
type first. */
if (cp_cv_quals quals = cp_type_quals (type))
return cp_build_qualified_type (unqual, quals);
}
+ /* If a canonical type already exists for this type, use it. We use
+ this method instead of type_hash_canon, because it only does a
+ simple equality check on the list of field members. */
+
+ if ((t = TYPE_GET_PTRMEMFUNC_TYPE (type)))
+ return t;
+
t = make_node (RECORD_TYPE);
/* Let the front end know this is a pointer to member function. */
&& DECL_TEMPLATE_INFO (decl)
&& (decl_maybe_constant_var_p (decl)
|| (TREE_CODE (decl) == FUNCTION_DECL
- && (DECL_DECLARED_CONSTEXPR_P (decl)
- || DECL_OMP_DECLARE_REDUCTION_P (decl)))
+ && DECL_OMP_DECLARE_REDUCTION_P (decl))
|| undeduced_auto_decl (decl))
&& !uses_template_parms (DECL_TI_ARGS (decl)))
{
return decl;
}
+/* Subroutine of build_vec_init. Returns true if assigning to an array of
+ INNER_ELT_TYPE from INIT is trivial. */
+
+static bool
+vec_copy_assign_is_trivial (tree inner_elt_type, tree init)
+{
+ tree fromtype = inner_elt_type;
+ if (real_lvalue_p (init))
+ fromtype = cp_build_reference_type (fromtype, /*rval*/false);
+ return is_trivially_xible (MODIFY_EXPR, inner_elt_type, fromtype);
+}
+
/* `build_vec_init' returns tree structure that performs
initialization of a vector of aggregate types.
&& TREE_CODE (atype) == ARRAY_TYPE
&& TREE_CONSTANT (maxindex)
&& (from_array == 2
- ? (!CLASS_TYPE_P (inner_elt_type)
- || !TYPE_HAS_COMPLEX_COPY_ASSIGN (inner_elt_type))
+ ? vec_copy_assign_is_trivial (inner_elt_type, init)
: !TYPE_NEEDS_CONSTRUCTING (type))
&& ((TREE_CODE (init) == CONSTRUCTOR
/* Don't do this if the CONSTRUCTOR might contain something
if (args)
length = TREE_VEC_LENGTH (args);
- if (args && TREE_CODE (TREE_VEC_ELT (args, 0)) == TREE_VEC)
+ if (args && length && TREE_CODE (TREE_VEC_ELT (args, 0)) == TREE_VEC)
{
/* We have nested template args. We want the innermost template
argument list. */
return targetm.mangle_decl_assembler_name (decl, id);
}
-/* If DECL is a mangling alias, remove it from the symbol table and return
- true; otherwise return false. */
+/* If DECL is an implicit mangling alias, return its symtab node; otherwise
+ return NULL. */
-bool
-maybe_remove_implicit_alias (tree decl)
+static symtab_node *
+decl_implicit_alias_p (tree decl)
{
if (DECL_P (decl) && DECL_ARTIFICIAL (decl)
&& DECL_IGNORED_P (decl)
{
symtab_node *n = symtab_node::get (decl);
if (n && n->cpp_implicit_alias)
- {
- n->remove();
- return true;
- }
+ return n;
+ }
+ return NULL;
+}
+
+/* If DECL is a mangling alias, remove it from the symbol table and return
+ true; otherwise return false. */
+
+bool
+maybe_remove_implicit_alias (tree decl)
+{
+ if (symtab_node *n = decl_implicit_alias_p (decl))
+ {
+ n->remove();
+ return true;
}
return false;
}
id = get_mangled_id (decl);
SET_DECL_ASSEMBLER_NAME (decl, id);
- if (G.need_abi_warning
+ if (id != DECL_NAME (decl)
+ && !DECL_REALLY_EXTERN (decl)
/* Don't do this for a fake symbol we aren't going to emit anyway. */
&& TREE_CODE (decl) != TYPE_DECL
&& !DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (decl)
&& !DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (decl))
{
+ bool set = false;
+
+ /* Check IDENTIFIER_GLOBAL_VALUE before setting to avoid redundant
+ errors from multiple definitions. */
+ tree d = IDENTIFIER_GLOBAL_VALUE (id);
+ if (!d || decl_implicit_alias_p (d))
+ {
+ set = true;
+ SET_IDENTIFIER_GLOBAL_VALUE (id, decl);
+ }
+
+ if (!G.need_abi_warning)
+ return;
+
/* If the mangling will change in the future, emit an alias with the
future mangled name for forward-compatibility. */
int save_ver;
tree id2;
- SET_IDENTIFIER_GLOBAL_VALUE (id, decl);
- if (IDENTIFIER_GLOBAL_VALUE (id) != decl)
- inform (DECL_SOURCE_LOCATION (decl), "a later -fabi-version= (or =0) "
- "avoids this error with a change in mangling");
+ if (!set)
+ {
+ SET_IDENTIFIER_GLOBAL_VALUE (id, decl);
+ inform (DECL_SOURCE_LOCATION (decl), "a later -fabi-version= (or "
+ "=0) avoids this error with a change in mangling");
+ }
save_ver = flag_abi_version;
flag_abi_version = flag_abi_compat_version;
maybe_warn_cpp0x (CPP0X_AUTO);
if (parser->auto_is_implicit_function_template_parm_p)
{
+ /* The 'auto' might be the placeholder return type for a function decl
+ with trailing return type. */
+ bool have_trailing_return_fn_decl = false;
+ if (cp_lexer_peek_nth_token (parser->lexer, 2)->type
+ == CPP_OPEN_PAREN)
+ {
+ cp_parser_parse_tentatively (parser);
+ cp_lexer_consume_token (parser->lexer);
+ cp_lexer_consume_token (parser->lexer);
+ if (cp_parser_skip_to_closing_parenthesis (parser,
+ /*recovering*/false,
+ /*or_comma*/false,
+ /*consume_paren*/true))
+ have_trailing_return_fn_decl
+ = cp_lexer_next_token_is (parser->lexer, CPP_DEREF);
+ cp_parser_abort_tentative_parse (parser);
+ }
+
+ if (have_trailing_return_fn_decl)
+ {
+ type = make_auto ();
+ break;
+ }
+
if (cxx_dialect >= cxx14)
type = synthesize_implicit_template_parm (parser);
else
static tree current_template_args (void);
static tree tsubst_template_parm (tree, tree, tsubst_flags_t);
static tree instantiate_alias_template (tree, tree, tsubst_flags_t);
+static bool complex_alias_template_p (const_tree tmpl);
/* Make the current scope suitable for access checking when we are
processing T. T can be FUNCTION_DECL for instantiated function
|| TREE_CODE (parm) == TEMPLATE_DECL)
parm = TREE_TYPE (parm);
if (TREE_CODE (parm) == TEMPLATE_TYPE_PARM
+ || TREE_CODE (parm) == BOUND_TEMPLATE_TEMPLATE_PARM
|| TREE_CODE (parm) == TEMPLATE_TEMPLATE_PARM)
parm = TEMPLATE_TYPE_PARM_INDEX (parm);
gcc_assert (TREE_CODE (parm) == TEMPLATE_PARM_INDEX);
if (TREE_CODE (parm) == TEMPLATE_DECL)
DECL_CONTEXT (parm) = tmpl;
}
+
+ if (TREE_CODE (decl) == TYPE_DECL
+ && TYPE_DECL_ALIAS_P (decl)
+ && complex_alias_template_p (tmpl))
+ TEMPLATE_DECL_COMPLEX_ALIAS_P (tmpl) = true;
}
/* The DECL_TI_ARGS of DECL contains full set of arguments referring
return false;
}
-/* Return TRUE iff T is a specialization of an alias template with
+/* An alias template is complex from a SFINAE perspective if a template-id
+ using that alias can be ill-formed when the expansion is not, as with
+ the void_t template. We determine this by checking whether the
+ expansion for the alias template uses all its template parameters. */
+
+struct uses_all_template_parms_data
+{
+ int level;
+ bool *seen;
+};
+
+static int
+uses_all_template_parms_r (tree t, void *data_)
+{
+ struct uses_all_template_parms_data &data
+ = *(struct uses_all_template_parms_data*)data_;
+ tree idx = get_template_parm_index (t);
+
+ if (TEMPLATE_PARM_LEVEL (idx) == data.level)
+ data.seen[TEMPLATE_PARM_IDX (idx)] = true;
+ return 0;
+}
+
+static bool
+complex_alias_template_p (const_tree tmpl)
+{
+ struct uses_all_template_parms_data data;
+ tree pat = DECL_ORIGINAL_TYPE (DECL_TEMPLATE_RESULT (tmpl));
+ tree parms = DECL_TEMPLATE_PARMS (tmpl);
+ data.level = TMPL_PARMS_DEPTH (parms);
+ int len = TREE_VEC_LENGTH (INNERMOST_TEMPLATE_PARMS (parms));
+ data.seen = XALLOCAVEC (bool, len);
+ for (int i = 0; i < len; ++i)
+ data.seen[i] = false;
+
+ for_each_template_parm (pat, uses_all_template_parms_r, &data, NULL, true);
+ for (int i = 0; i < len; ++i)
+ if (!data.seen[i])
+ return true;
+ return false;
+}
+
+/* Return TRUE iff T is a specialization of a complex alias template with
dependent template-arguments. */
bool
dependent_alias_template_spec_p (const_tree t)
{
return (alias_template_specialization_p (t)
+ && TEMPLATE_DECL_COMPLEX_ALIAS_P (DECL_TI_TEMPLATE (TYPE_NAME (t)))
&& (any_dependent_template_arguments_p
(INNERMOST_TEMPLATE_ARGS (TYPE_TI_ARGS (t)))));
}
case TYPEOF_TYPE:
case UNDERLYING_TYPE:
if (pfd->include_nondeduced_p
- && for_each_template_parm (TYPE_FIELDS (t), fn, data,
+ && for_each_template_parm (TYPE_VALUES_RAW (t), fn, data,
pfd->visited,
pfd->include_nondeduced_p))
return error_mark_node;
{
*p = TREE_CHAIN (t);
TREE_CHAIN (t) = NULL_TREE;
- if ((flag_openmp || flag_cilkplus)
+ if ((flag_openmp || flag_openmp_simd || flag_cilkplus)
&& is_attribute_p ("omp declare simd",
get_attribute_name (t))
&& TREE_VALUE (t))
it now. */
push_deferring_access_checks (dk_no_deferred);
+ int saved_unevaluated_operand = cp_unevaluated_operand;
+ int saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings;
+
fn_context = decl_function_context (TYPE_MAIN_DECL (type));
/* Also avoid push_to_top_level for a lambda in an NSDMI. */
if (!fn_context && LAMBDA_TYPE_P (type) && TYPE_CLASS_SCOPE_P (type))
fn_context = error_mark_node;
if (!fn_context)
push_to_top_level ();
+ else
+ {
+ cp_unevaluated_operand = 0;
+ c_inhibit_evaluation_warnings = 0;
+ }
/* Use #pragma pack from the template context. */
saved_maximum_field_alignment = maximum_field_alignment;
maximum_field_alignment = TYPE_PRECISION (pattern);
}
}
+ if (fn_context)
+ {
+ /* Restore these before substituting into the lambda capture
+ initializers. */
+ cp_unevaluated_operand = saved_unevaluated_operand;
+ c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings;
+ }
+
if (tree expr = CLASSTYPE_LAMBDA_EXPR (type))
{
tree decl = lambda_function (type);
return extract_fnparm_pack (NULL_TREE, &spec_parm);
}
-/* Return true iff the Ith element of the argument pack ARG_PACK is a
- pack expansion. */
+/* Return 1 if the Ith element of the argument pack ARG_PACK is a
+ pack expansion with no extra args, 2 if it has extra args, or 0
+ if it is not a pack expansion. */
-static bool
+static int
argument_pack_element_is_expansion_p (tree arg_pack, int i)
{
tree vec = ARGUMENT_PACK_ARGS (arg_pack);
if (i >= TREE_VEC_LENGTH (vec))
- return false;
- return PACK_EXPANSION_P (TREE_VEC_ELT (vec, i));
+ return 0;
+ tree elt = TREE_VEC_ELT (vec, i);
+ if (!PACK_EXPANSION_P (elt))
+ return 0;
+ if (PACK_EXPANSION_EXTRA_ARGS (elt))
+ return 2;
+ return 1;
}
{
tree arg = TREE_VALUE (parm_pack);
- if (argument_pack_element_is_expansion_p (arg, i))
+ int exp = argument_pack_element_is_expansion_p (arg, i);
+ if (exp == 2)
+ /* We can't substitute a pack expansion with extra args into
+ our pattern. */
+ return true;
+ else if (exp)
has_expansion_arg = true;
else
has_non_expansion_arg = true;
{
/* T is a static data member or namespace-scope entity.
We have to substitute into namespace-scope variables
- (even though such entities are never templates) because
- of cases like:
+ (not just variable templates) because of cases like:
template <class T> void f() { extern T t; }
initializer is present. We mimic the non-template
processing here. */
DECL_EXTERNAL (r) = 1;
+ if (DECL_NAMESPACE_SCOPE_P (t))
+ DECL_NOT_REALLY_EXTERN (r) = 1;
- register_specialization (r, gen_tmpl, argvec, false, hash);
DECL_TEMPLATE_INFO (r) = build_template_info (tmpl, argvec);
SET_DECL_IMPLICIT_INSTANTIATION (r);
+ register_specialization (r, gen_tmpl, argvec, false, hash);
}
else if (!cp_unevaluated_operand)
register_local_specialization (r, t);
if (TREE_STATIC (r))
rest_of_decl_compilation (r, toplevel_bindings_p (),
at_eof);
- else if (decl_constant_var_p (r))
- /* A use of a local constant decays to its value.
- FIXME update for core DR 696. */
- r = scalar_constant_value (r);
+ else
+ r = process_outer_var_ref (r, complain);
}
}
/* Remember this for subsequent uses. */
}
}
+/* Helper function for tsubst_omp_clauses, used for instantiation of
+ OMP_CLAUSE_DECL of clauses that handles also OpenMP array sections
+ represented with TREE_LIST. */
+
+static tree
+tsubst_omp_clause_decl (tree decl, tree args, tsubst_flags_t complain,
+ tree in_decl)
+{
+ if (TREE_CODE (decl) == TREE_LIST)
+ {
+ tree low_bound
+ = tsubst_expr (TREE_PURPOSE (decl), args, complain, in_decl,
+ /*integral_constant_expression_p=*/false);
+ tree length = tsubst_expr (TREE_VALUE (decl), args, complain, in_decl,
+ /*integral_constant_expression_p=*/false);
+ tree chain = tsubst_omp_clause_decl (TREE_CHAIN (decl), args, complain,
+ in_decl);
+ if (TREE_PURPOSE (decl) == low_bound
+ && TREE_VALUE (decl) == length
+ && TREE_CHAIN (decl) == chain)
+ return decl;
+ return tree_cons (low_bound, length, chain);
+ }
+ return tsubst_copy (decl, args, complain, in_decl);
+}
+
/* Like tsubst_copy, but specifically for OpenMP clauses. */
static tree
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
+ case OMP_CLAUSE_UNIFORM:
+ OMP_CLAUSE_DECL (nc) = tsubst_copy (OMP_CLAUSE_DECL (oc), args,
+ complain, in_decl);
+ break;
+ case OMP_CLAUSE_DEPEND:
+ case OMP_CLAUSE_FROM:
+ case OMP_CLAUSE_TO:
+ case OMP_CLAUSE_MAP:
+ OMP_CLAUSE_DECL (nc)
+ = tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain,
+ in_decl);
+ break;
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_FINAL:
- case OMP_CLAUSE_DEPEND:
- case OMP_CLAUSE_FROM:
- case OMP_CLAUSE_TO:
- case OMP_CLAUSE_UNIFORM:
- case OMP_CLAUSE_MAP:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_NUM_TEAMS:
else
gcc_assert (identifier_p (placeholder));
}
- OMP_CLAUSE_OPERAND (nc, 0)
- = tsubst_expr (OMP_CLAUSE_OPERAND (oc, 0), args, complain,
- in_decl, /*integral_constant_expression_p=*/false);
+ OMP_CLAUSE_DECL (nc) = tsubst_copy (OMP_CLAUSE_DECL (oc), args,
+ complain, in_decl);
break;
case OMP_CLAUSE_LINEAR:
case OMP_CLAUSE_ALIGNED:
- OMP_CLAUSE_OPERAND (nc, 0)
- = tsubst_expr (OMP_CLAUSE_OPERAND (oc, 0), args, complain,
- in_decl, /*integral_constant_expression_p=*/false);
+ OMP_CLAUSE_DECL (nc) = tsubst_copy (OMP_CLAUSE_DECL (oc), args,
+ complain, in_decl);
OMP_CLAUSE_OPERAND (nc, 1)
= tsubst_expr (OMP_CLAUSE_OPERAND (oc, 1), args, complain,
in_decl, /*integral_constant_expression_p=*/false);
break;
-
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
r = build_cxx_call (wrap, 0, NULL, tf_warning_or_error);
}
else if (outer_automatic_var_p (r))
- r = process_outer_var_ref (r, complain);
+ {
+ r = process_outer_var_ref (r, complain);
+ if (is_capture_proxy (r))
+ register_local_specialization (r, t);
+ }
if (TREE_CODE (TREE_TYPE (t)) != REFERENCE_TYPE)
/* If the original type was a reference, we'll be wrapped in
names a dependent type. */
if (TREE_CODE (type) == TYPENAME_TYPE)
return true;
+
+ /* An alias template specialization can be dependent even if the
+ resulting type is not. */
+ if (dependent_alias_template_spec_p (type))
+ return true;
+
/* -- a cv-qualified type where the cv-unqualified type is
dependent.
No code is necessary for this bullet; the code below handles
&& (any_dependent_template_arguments_p
(INNERMOST_TEMPLATE_ARGS (CLASSTYPE_TI_ARGS (type)))))
return true;
- /* For an alias template specialization, check the arguments both to the
- class template and the alias template. */
- else if (alias_template_specialization_p (type)
- && (any_dependent_template_arguments_p
- (INNERMOST_TEMPLATE_ARGS (TYPE_TI_ARGS (type)))))
- return true;
/* All TYPEOF_TYPEs, DECLTYPE_TYPEs, and UNDERLYING_TYPEs are
dependent; if the argument of the `typeof' expression is not
&& variable_template_p (DECL_TI_TEMPLATE (expression)))
return any_dependent_template_arguments_p (DECL_TI_ARGS (expression));
+ /* Always dependent, on the number of arguments if nothing else. */
+ if (TREE_CODE (expression) == EXPR_PACK_EXPANSION)
+ return true;
+
if (TREE_TYPE (expression) == unknown_type_node)
{
if (TREE_CODE (expression) == ADDR_EXPR)
if (TREE_CODE (expression) == SCOPE_REF)
return false;
- /* Always dependent, on the number of arguments if nothing else. */
- if (TREE_CODE (expression) == EXPR_PACK_EXPANSION)
- return true;
-
if (BASELINK_P (expression))
{
if (BASELINK_OPTYPE (expression)
if (cp_unevaluated_operand)
/* It's not a use (3.2) if we're in an unevaluated context. */
return decl;
+ if (decl == error_mark_node)
+ return decl;
tree context = DECL_CONTEXT (decl);
tree containing_function = current_function_decl;
form, so wait until instantiation time. */
return decl;
else if (decl_constant_var_p (decl))
- return scalar_constant_value (decl);
+ {
+ tree t = maybe_constant_value (convert_from_reference (decl));
+ if (TREE_CONSTANT (t))
+ return t;
+ }
}
if (parsing_nsdmi ())
return no_linkage_check (TYPE_PTRMEM_CLASS_TYPE (t), relaxed_p);
case METHOD_TYPE:
- r = no_linkage_check (TYPE_METHOD_BASETYPE (t), relaxed_p);
- if (r)
- return r;
- /* Fall through. */
case FUNCTION_TYPE:
{
- tree parm;
- for (parm = TYPE_ARG_TYPES (t);
+ tree parm = TYPE_ARG_TYPES (t);
+ if (TREE_CODE (t) == METHOD_TYPE)
+ /* The 'this' pointer isn't interesting; a method has the same
+ linkage (or lack thereof) as its enclosing class. */
+ parm = TREE_CHAIN (parm);
+ for (;
parm && parm != void_list_node;
parm = TREE_CHAIN (parm))
{
*walk_subtrees = 0;
return NULL_TREE;
}
+ if (TREE_CODE (*tp) == SAVE_EXPR)
+ {
+ t = *tp;
+ splay_tree_node n = splay_tree_lookup (target_remap,
+ (splay_tree_key) t);
+ if (n)
+ {
+ *tp = (tree)n->value;
+ *walk_subtrees = 0;
+ }
+ else
+ {
+ copy_tree_r (tp, walk_subtrees, NULL);
+ splay_tree_insert (target_remap,
+ (splay_tree_key)t,
+ (splay_tree_value)*tp);
+ /* Make sure we don't remap an already-remapped SAVE_EXPR. */
+ splay_tree_insert (target_remap,
+ (splay_tree_key)*tp,
+ (splay_tree_value)*tp);
+ }
+ return NULL_TREE;
+ }
/* Make a copy of this node. */
t = copy_tree_r (tp, walk_subtrees, NULL);
switch (TREE_CODE (*t))
{
case PLACEHOLDER_EXPR:
- gcc_assert (same_type_ignoring_top_level_qualifiers_p
- (TREE_TYPE (*t), TREE_TYPE (obj)));
- *t = obj;
- *walk_subtrees = false;
- break;
-
- case TARGET_EXPR:
- /* Don't mess with placeholders in an unrelated object. */
- *walk_subtrees = false;
+ {
+ tree x = obj;
+ for (; !(same_type_ignoring_top_level_qualifiers_p
+ (TREE_TYPE (*t), TREE_TYPE (x)));
+ x = TREE_OPERAND (x, 0))
+ gcc_assert (TREE_CODE (x) == COMPONENT_REF);
+ *t = x;
+ *walk_subtrees = false;
+ }
break;
case CONSTRUCTOR:
if (TREE_CODE (*valp) == CONSTRUCTOR
&& AGGREGATE_TYPE_P (type))
{
- subob = build_ctor_subob_ref (ce->index, type, obj);
+ /* If we're looking at the initializer for OBJ, then build
+ a sub-object reference. If we're looking at an
+ initializer for another object, just pass OBJ down. */
+ if (same_type_ignoring_top_level_qualifiers_p
+ (TREE_TYPE (*t), TREE_TYPE (obj)))
+ subob = build_ctor_subob_ref (ce->index, type, obj);
if (TREE_CODE (*valp) == TARGET_EXPR)
valp = &TARGET_EXPR_INITIAL (*valp);
}
name, *node);
goto fail;
}
- else if (CLASSTYPE_TEMPLATE_INSTANTIATION (*node))
+ else if (CLASS_TYPE_P (*node)
+ && CLASSTYPE_TEMPLATE_INSTANTIATION (*node))
{
warning (OPT_Wattributes, "ignoring %qE attribute applied to "
"template instantiation %qT", name, *node);
goto fail;
}
- else if (CLASSTYPE_TEMPLATE_SPECIALIZATION (*node))
+ else if (CLASS_TYPE_P (*node)
+ && CLASSTYPE_TEMPLATE_SPECIALIZATION (*node))
{
warning (OPT_Wattributes, "ignoring %qE attribute applied to "
"template specialization %qT", name, *node);
return error_mark_node;
}
- if (!lvalue_p (array))
- {
- if (complain & tf_error)
- pedwarn (loc, OPT_Wpedantic,
- "ISO C++ forbids subscripting non-lvalue array");
- else
- return error_mark_node;
- }
-
/* Note in C++ it is valid to subscript a `register' array, since
it is valid to take the address of something with that
storage specification. */
TREE_CONSTANT (decl) = const_init && decl_maybe_constant_var_p (decl);
}
- if (cxx_dialect >= cxx14)
+ if (cxx_dialect >= cxx14 && CLASS_TYPE_P (strip_array_types (type)))
/* Handle aggregate NSDMI in non-constant initializers, too. */
value = replace_placeholders (value, decl);
debug_nothing_tree, /* begin_function */
#endif
debug_nothing_int, /* end_function */
+ debug_nothing_tree, /* register_main_translation_unit */
dbxout_function_decl,
dbxout_global_decl, /* global_decl */
dbxout_type_decl, /* type_decl */
xcoffout_end_epilogue,
debug_nothing_tree, /* begin_function */
xcoffout_end_function,
+ debug_nothing_tree, /* register_main_translation_unit */
debug_nothing_tree, /* function_decl */
dbxout_global_decl, /* global_decl */
dbxout_type_decl, /* type_decl */
debug_nothing_int_charstar, /* end_epilogue */
debug_nothing_tree, /* begin_function */
debug_nothing_int, /* end_function */
+ debug_nothing_tree, /* register_main_translation_unit */
debug_nothing_tree, /* function_decl */
debug_nothing_tree, /* global_decl */
debug_nothing_tree_int, /* type_decl */
/* Record end of function. LINE is highest line number in function. */
void (* end_function) (unsigned int line);
+ /* Register UNIT as the main translation unit. Called from front-ends when
+ they create their main translation unit. */
+ void (* register_main_translation_unit) (tree);
+
/* Debug information for a function DECL. This might include the
function name (a symbol), its parameters, and the block that
makes up the function's body, and the local variables of the
static void dwarf2out_var_location (rtx_insn *);
static void dwarf2out_begin_function (tree);
static void dwarf2out_end_function (unsigned int);
+static void dwarf2out_register_main_translation_unit (tree unit);
static void dwarf2out_set_name (tree, tree);
/* The debug hooks structure. */
dwarf2out_end_epilogue,
dwarf2out_begin_function,
dwarf2out_end_function, /* end_function */
+ dwarf2out_register_main_translation_unit,
dwarf2out_function_decl, /* function_decl */
dwarf2out_global_decl,
dwarf2out_type_decl, /* type_decl */
!= TYPE_NAME (TREE_TYPE (decl))));
}
+/* Looks up the DIE for a context. */
+
+static inline dw_die_ref
+lookup_context_die (tree context)
+{
+ if (context)
+ {
+ /* Find die that represents this context. */
+ if (TYPE_P (context))
+ {
+ context = TYPE_MAIN_VARIANT (context);
+ dw_die_ref ctx = lookup_type_die (context);
+ if (!ctx)
+ return NULL;
+ return strip_naming_typedef (context, ctx);
+ }
+ else
+ return lookup_decl_die (context);
+ }
+ return comp_unit_die ();
+}
+
/* Returns the DIE for a context. */
static inline dw_die_ref
maybe_at_text_label_p = false;
}
+/* Temporary holder for dwarf2out_register_main_translation_unit. Used to let
+ front-ends register a translation unit even before dwarf2out_init is
+ called. */
+static tree main_translation_unit = NULL_TREE;
+
+/* Hook called by front-ends after they built their main translation unit.
+ Associate comp_unit_die to UNIT. */
+
+static void
+dwarf2out_register_main_translation_unit (tree unit)
+{
+ gcc_assert (TREE_CODE (unit) == TRANSLATION_UNIT_DECL
+ && main_translation_unit == NULL_TREE);
+ main_translation_unit = unit;
+ /* If dwarf2out_init has not been called yet, it will perform the association
+ itself looking at main_translation_unit. */
+ if (decl_die_table != NULL)
+ equate_decl_number_to_die (unit, comp_unit_die ());
+}
+
/* Add OPCODE+VAL as an entry at the end of the opcode array in TABLE. */
static void
/* Make sure the line number table for .text always exists. */
text_section_line_info = new_line_info_table ();
text_section_line_info->end_label = text_end_label;
+
+ /* If front-ends already registered a main translation unit but we were not
+ ready to perform the association, do this now. */
+ if (main_translation_unit != NULL_TREE)
+ equate_decl_number_to_die (main_translation_unit, comp_unit_die ());
}
/* Called before compile () starts outputtting functions, variables
{
tree tdecl = SYMBOL_REF_DECL (a->dw_attr_val.v.val_addr);
dw_die_ref tdie = lookup_decl_die (tdecl);
+ dw_die_ref cdie;
if (tdie == NULL
&& DECL_EXTERNAL (tdecl)
- && DECL_ABSTRACT_ORIGIN (tdecl) == NULL_TREE)
+ && DECL_ABSTRACT_ORIGIN (tdecl) == NULL_TREE
+ && (cdie = lookup_context_die (DECL_CONTEXT (tdecl))))
{
- force_decl_die (tdecl);
- tdie = lookup_decl_die (tdecl);
+ /* Creating a full DIE for tdecl is overly expensive and
+ at this point even wrong when in the LTO phase
+ as it can end up generating new type DIEs we didn't
+ output and thus optimize_external_refs will crash. */
+ tdie = new_die (DW_TAG_subprogram, cdie, NULL_TREE);
+ add_AT_flag (tdie, DW_AT_external, 1);
+ add_AT_flag (tdie, DW_AT_declaration, 1);
+ add_linkage_attr (tdie, tdecl);
+ add_name_and_src_coords_attributes (tdie, tdecl);
+ equate_decl_number_to_die (tdecl, tdie);
}
if (tdie)
{
/* Stores, for each insn code, a bitmap that has bits on for each possible
alternative. */
-static int *insn_alternatives;
+static uint64_t *insn_alternatives;
/* Used to simplify expressions. */
ATTRIBUTE_PRINTF_2;
static rtx make_numeric_value (int);
static struct attr_desc *find_attr (const char **, int);
-static rtx mk_attr_alt (int);
+static rtx mk_attr_alt (uint64_t);
static char *next_comma_elt (const char **);
static rtx insert_right_side (enum rtx_code, rtx, rtx, int, int);
static rtx copy_boolean (rtx);
if (attr == NULL)
{
if (! strcmp (XSTR (exp, 0), "alternative"))
- return mk_attr_alt (1 << atoi (XSTR (exp, 1)));
+ return mk_attr_alt (((uint64_t) 1) << atoi (XSTR (exp, 1)));
else
fatal ("unknown attribute `%s' in EQ_ATTR", XSTR (exp, 0));
}
name_ptr = XSTR (exp, 1);
while ((p = next_comma_elt (&name_ptr)) != NULL)
- set |= 1 << atoi (p);
+ set |= ((uint64_t) 1) << atoi (p);
return mk_attr_alt (set);
}
get_attr_value (rtx value, struct attr_desc *attr, int insn_code)
{
struct attr_value *av;
- int num_alt = 0;
+ uint64_t num_alt = 0;
value = make_canonical (attr, value);
if (compares_alternatives_p (value))
This routine is passed an expression and either AND or IOR. It returns a
bitmask indicating which alternatives are mentioned within EXP. */
-static int
+static uint64_t
compute_alternative_mask (rtx exp, enum rtx_code code)
{
const char *string;
return 0;
if (string[1] == 0)
- return 1 << (string[0] - '0');
- return 1 << atoi (string);
+ return ((uint64_t) 1) << (string[0] - '0');
+ return ((uint64_t) 1) << atoi (string);
}
/* Given I, a single-bit mask, return RTX to compare the `alternative'
attribute with the value represented by that bit. */
static rtx
-make_alternative_compare (int mask)
+make_alternative_compare (uint64_t mask)
{
return mk_attr_alt (mask);
}
in E. */
static rtx
-mk_attr_alt (int e)
+mk_attr_alt (uint64_t e)
{
rtx result = rtx_alloc (EQ_ATTR_ALT);
struct attr_value *av;
struct insn_ent *ie;
struct attr_value_list *iv;
- int i;
+ uint64_t i;
rtx newexp = exp;
bool left_alt, right_alt;
case EQ_ATTR:
if (XSTR (exp, 0) == alternative_name)
{
- newexp = mk_attr_alt (1 << atoi (XSTR (exp, 1)));
+ newexp = mk_attr_alt (((uint64_t) 1) << atoi (XSTR (exp, 1)));
break;
}
expand_delays ();
/* Make `insn_alternatives'. */
- insn_alternatives = oballocvec (int, insn_code_number);
+ insn_alternatives = oballocvec (uint64_t, insn_code_number);
for (id = defs; id; id = id->next)
if (id->insn_code >= 0)
- insn_alternatives[id->insn_code] = (1 << id->num_alternatives) - 1;
+ insn_alternatives[id->insn_code]
+ = (((uint64_t) 1) << id->num_alternatives) - 1;
/* Make `insn_n_alternatives'. */
insn_n_alternatives = oballocvec (int, insn_code_number);
return false;
tree orig_len = get_maxval_strlen (orig, 0);
- if (!orig_len)
+ if (!orig_len || TREE_CODE (orig_len) != INTEGER_CST)
return false;
/* We could expand this as
/* We scramble the CFG and loop structures a bit, clean up
appropriately. We really should incrementally update the
loop structures, in theory it shouldn't be that hard. */
+ free_dominance_info (CDI_POST_DOMINATORS);
if (cfg_altered)
{
free_dominance_info (CDI_DOMINATORS);
- free_dominance_info (CDI_POST_DOMINATORS);
loops_state_set (LOOPS_NEED_FIXUP);
return TODO_cleanup_cfg | TODO_update_ssa;
}
return gimplify_expr (arg_p, pre_p, NULL, test, fb);
}
-/* Don't fold inside offloading regions: it can break code by adding decl
- references that weren't in the source. We'll do it during omplower pass
- instead. */
+/* Don't fold inside offloading or taskreg regions: it can break code by
+ adding decl references that weren't in the source. We'll do it during
+ omplower pass instead. */
static bool
maybe_fold_stmt (gimple_stmt_iterator *gsi)
{
struct gimplify_omp_ctx *ctx;
for (ctx = gimplify_omp_ctxp; ctx; ctx = ctx->outer_context)
- if (ctx->region_type == ORT_TARGET)
+ if (ctx->region_type == ORT_TARGET
+ || (ctx->region_type & (ORT_PARALLEL | ORT_TASK)) != 0)
return false;
return fold_stmt (gsi);
}
ctx.offset_by (dst->value.ancestor.offset);
if (!ctx.useless_p ())
{
- vec_safe_grow_cleared (args->polymorphic_call_contexts,
- count);
- dst_ctx = ipa_get_ith_polymorhic_call_context (args, i);
+ if (!dst_ctx)
+ {
+ vec_safe_grow_cleared (args->polymorphic_call_contexts,
+ count);
+ dst_ctx = ipa_get_ith_polymorhic_call_context (args, i);
+ }
+
+ dst_ctx->combine_with (ctx);
}
- dst_ctx->combine_with (ctx);
}
if (src->agg.items
if (cnode->global.inlined_to)
body_needed_for_clonning.add (cnode->decl);
+ /* For instrumentation clones we always need original
+ function node for proper LTO privatization. */
+ if (cnode->instrumentation_clone
+ && cnode->definition)
+ {
+ gcc_assert (cnode->instrumented_version || in_lto_p);
+ if (cnode->instrumented_version)
+ {
+ enqueue_node (cnode->instrumented_version, &first,
+ &reachable);
+ reachable.add (cnode->instrumented_version);
+ }
+ }
+
/* For non-inline clones, force their origins to the boundary and ensure
that body is not removed. */
while (cnode->clone_of)
}
else if (cnode->thunk.thunk_p)
enqueue_node (cnode->callees->callee, &first, &reachable);
-
+
/* If any reachable function has simd clones, mark them as
reachable as well. */
if (cnode->simd_clones)
for (reg = curr_static_id->hard_regs; reg != NULL; reg = reg->next)
if (reg->type == OP_OUT && ! reg->subreg_p)
bitmap_clear_bit (&live_regs, reg->regno);
+ if (curr_id->arg_hard_regs != NULL)
+ /* Make clobbered argument hard registers die. */
+ for (i = 0; (regno = curr_id->arg_hard_regs[i]) >= 0; i++)
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ bitmap_clear_bit (&live_regs, regno - FIRST_PSEUDO_REGISTER);
/* Mark each used value as live. */
for (reg = curr_id->regs; reg != NULL; reg = reg->next)
if (reg->type != OP_OUT
&& bitmap_bit_p (&check_only_regs, reg->regno))
bitmap_set_bit (&live_regs, reg->regno);
if (curr_id->arg_hard_regs != NULL)
- /* Make argument hard registers live. */
+ /* Make used argument hard registers live. */
for (i = 0; (regno = curr_id->arg_hard_regs[i]) >= 0; i++)
- if (bitmap_bit_p (&check_only_regs, regno))
+ if (regno < FIRST_PSEUDO_REGISTER
+ && bitmap_bit_p (&check_only_regs, regno))
bitmap_set_bit (&live_regs, regno);
/* It is quite important to remove dead move insns because it
means removing dead store. We don't need to process them for
}
}
}
+ /* Process clobbered call regs. */
+ if (curr_id->arg_hard_regs != NULL)
+ for (i = 0; (dst_regno = curr_id->arg_hard_regs[i]) >= 0; i++)
+ if (dst_regno >= FIRST_PSEUDO_REGISTER)
+ usage_insns[dst_regno - FIRST_PSEUDO_REGISTER].check
+ = -(int) INSN_UID (curr_insn);
if (! JUMP_P (curr_insn))
for (i = 0; i < to_inherit_num; i++)
if (inherit_reload_reg (true, to_inherit[i].regno,
add_next_usage_insn (src_regno, use_insn, reloads_num);
}
}
- /* Process call args. */
+ /* Process used call regs. */
if (curr_id->arg_hard_regs != NULL)
for (i = 0; (src_regno = curr_id->arg_hard_regs[i]) >= 0; i++)
if (src_regno < FIRST_PSEUDO_REGISTER)
duplication numbers: */
rtx **operand_loc; /* The operand locations, NULL if no operands. */
rtx **dup_loc; /* The dup locations, NULL if no dups. */
- /* Number of hard registers implicitly used in given call insn. The
- value can be NULL or points to array of the hard register numbers
- ending with a negative value. */
+ /* Number of hard registers implicitly used/clobbered in given call
+ insn. The value can be NULL or points to array of the hard
+ register numbers ending with a negative value. To differ
+ clobbered and used hard regs, clobbered hard regs are incremented
+ by FIRST_PSEUDO_REGISTER. */
int *arg_hard_regs;
/* Cached value of get_preferred_alternatives. */
alternative_mask preferred_alternatives;
dst_regno = REGNO (SET_DEST (set));
if (dst_regno >= lra_constraint_new_regno_start
&& src_regno >= lra_constraint_new_regno_start)
- lra_create_copy (dst_regno, src_regno, freq);
+ {
+ /* It might be still an original (non-reload) insn with
+ one unused output and a constraint requiring to use
+ the same reg for input/output operands. In this case
+ dst_regno and src_regno have the same value, we don't
+ need a misleading copy for this case. */
+ if (dst_regno != src_regno)
+ lra_create_copy (dst_regno, src_regno, freq);
+ }
else if (dst_regno >= lra_constraint_new_regno_start)
{
if ((hard_regno = src_regno) >= FIRST_PSEUDO_REGISTER)
if (reg->type != OP_IN)
make_hard_regno_born (reg->regno, false);
+ if (curr_id->arg_hard_regs != NULL)
+ for (i = 0; (regno = curr_id->arg_hard_regs[i]) >= 0; i++)
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ /* It is a clobber. */
+ make_hard_regno_born (regno - FIRST_PSEUDO_REGISTER, false);
+
sparseset_copy (unused_set, start_living);
sparseset_clear (start_dying);
if (reg->type == OP_OUT && ! reg->early_clobber && ! reg->subreg_p)
make_hard_regno_dead (reg->regno);
+ if (curr_id->arg_hard_regs != NULL)
+ for (i = 0; (regno = curr_id->arg_hard_regs[i]) >= 0; i++)
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ /* It is a clobber. */
+ make_hard_regno_dead (regno - FIRST_PSEUDO_REGISTER);
+
if (call_p)
{
if (flag_ipa_ra)
/* Make argument hard registers live. Don't create conflict
of used REAL_PIC_OFFSET_TABLE_REGNUM and the pic pseudo. */
for (i = 0; (regno = curr_id->arg_hard_regs[i]) >= 0; i++)
- make_hard_regno_born (regno, true);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ make_hard_regno_born (regno, true);
sparseset_and_compl (dead_set, start_living, start_dying);
data->arg_hard_regs = NULL;
if (CALL_P (insn))
{
+ bool use_p;
rtx link;
int n_hard_regs, regno, arg_hard_regs[FIRST_PSEUDO_REGISTER];
for (link = CALL_INSN_FUNCTION_USAGE (insn);
link != NULL_RTX;
link = XEXP (link, 1))
- if (GET_CODE (XEXP (link, 0)) == USE
+ if (((use_p = GET_CODE (XEXP (link, 0)) == USE)
+ || GET_CODE (XEXP (link, 0)) == CLOBBER)
&& REG_P (XEXP (XEXP (link, 0), 0)))
{
regno = REGNO (XEXP (XEXP (link, 0), 0));
[regno][GET_MODE (XEXP (XEXP (link, 0), 0))]) - 1;
i >= 0;
i--)
- arg_hard_regs[n_hard_regs++] = regno + i;
+ arg_hard_regs[n_hard_regs++]
+ = regno + i + (use_p ? 0 : FIRST_PSEUDO_REGISTER);
}
if (n_hard_regs != 0)
{
{
symtab_node *node = lto_symtab_encoder_deref (encoder, i);
+ /* IPA_REF_ALIAS and IPA_REF_CHKP references are always preserved
+ in the boundary. Alias node can't have other references and
+ can be always handled as if it's not in the boundary. */
if (!node->alias && !lto_symtab_encoder_in_partition_p (encoder, node))
- continue;
+ {
+ cgraph_node *cnode = dyn_cast <cgraph_node *> (node);
+ /* Output IPA_REF_CHKP reference. */
+ if (cnode
+ && cnode->instrumented_version
+ && !cnode->instrumentation_clone)
+ {
+ for (int i = 0; node->iterate_reference (i, ref); i++)
+ if (ref->use == IPA_REF_CHKP)
+ {
+ if (lto_symtab_encoder_lookup (encoder, ref->referred)
+ != LCC_NOT_FOUND)
+ {
+ int nref = lto_symtab_encoder_lookup (encoder, node);
+ streamer_write_gcov_count_stream (ob->main_stream, 1);
+ streamer_write_uhwi_stream (ob->main_stream, nref);
+ lto_output_ref (ob, ref, encoder);
+ }
+ break;
+ }
+ }
+ continue;
+ }
count = node->ref_list.nreferences ();
if (count)
case OPT_fwrapv:
case OPT_fopenmp:
case OPT_fopenacc:
+ case OPT_fcheck_pointer_bounds:
/* For selected options we can merge conservatively. */
for (j = 0; j < *decoded_options_count; ++j)
if ((*decoded_options)[j].opt_index == foption->opt_index)
case OPT_Ofast:
case OPT_Og:
case OPT_Os:
+ case OPT_fcheck_pointer_bounds:
break;
default:
}
}
-/* Mangle NODE symbol name into a local name.
- This is necessary to do
- 1) if two or more static vars of same assembler name
- are merged into single ltrans unit.
- 2) if previously static var was promoted hidden to avoid possible conflict
- with symbols defined out of the LTO world. */
+/* Helper for privatize_symbol_name. Mangle NODE symbol name
+ represented by DECL. */
static bool
-privatize_symbol_name (symtab_node *node)
+privatize_symbol_name_1 (symtab_node *node, tree decl)
{
- tree decl = node->decl;
- const char *name;
- cgraph_node *cnode = dyn_cast <cgraph_node *> (node);
-
- /* If we want to privatize instrumentation clone
- then we need to change original function name
- which is used via transparent alias chain. */
- if (cnode && cnode->instrumentation_clone)
- decl = cnode->orig_decl;
-
- name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+ const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
if (must_not_rename (node, name))
return false;
symtab->change_decl_assembler_name (decl,
clone_function_name_1 (name,
"lto_priv"));
+
if (node->lto_file_data)
lto_record_renamed_decl (node->lto_file_data, name,
IDENTIFIER_POINTER
(DECL_ASSEMBLER_NAME (decl)));
+
+ if (symtab->dump_file)
+ fprintf (symtab->dump_file,
+ "Privatizing symbol name: %s -> %s\n",
+ name, IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
+
+ return true;
+}
+
+/* Mangle NODE symbol name into a local name.
+ This is necessary to do
+ 1) if two or more static vars of same assembler name
+ are merged into single ltrans unit.
+ 2) if previously static var was promoted hidden to avoid possible conflict
+ with symbols defined out of the LTO world. */
+
+static bool
+privatize_symbol_name (symtab_node *node)
+{
+ if (!privatize_symbol_name_1 (node, node->decl))
+ return false;
+
/* We could change name which is a target of transparent alias
chain of instrumented function name. Fix alias chain if so .*/
- if (cnode)
+ if (cgraph_node *cnode = dyn_cast <cgraph_node *> (node))
{
tree iname = NULL_TREE;
if (cnode->instrumentation_clone)
- iname = DECL_ASSEMBLER_NAME (cnode->decl);
+ {
+ /* If we want to privatize instrumentation clone
+ then we also need to privatize original function. */
+ if (cnode->instrumented_version)
+ privatize_symbol_name (cnode->instrumented_version);
+ else
+ privatize_symbol_name_1 (cnode, cnode->orig_decl);
+ iname = DECL_ASSEMBLER_NAME (cnode->decl);
+ TREE_CHAIN (iname) = DECL_ASSEMBLER_NAME (cnode->orig_decl);
+ }
else if (cnode->instrumented_version
- && cnode->instrumented_version->orig_decl == decl)
- iname = DECL_ASSEMBLER_NAME (cnode->instrumented_version->decl);
-
- if (iname)
+ && cnode->instrumented_version->orig_decl == cnode->decl)
{
- gcc_assert (IDENTIFIER_TRANSPARENT_ALIAS (iname));
- TREE_CHAIN (iname) = DECL_ASSEMBLER_NAME (decl);
+ iname = DECL_ASSEMBLER_NAME (cnode->instrumented_version->decl);
+ TREE_CHAIN (iname) = DECL_ASSEMBLER_NAME (cnode->decl);
}
}
- if (symtab->dump_file)
- fprintf (symtab->dump_file,
- "Privatizing symbol name: %s -> %s\n",
- name, IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
+
return true;
}
/* If we are converting an integer to a floating-point that can
represent it exactly and back to an integer, we can skip the
floating-point conversion. */
- (if (inside_int && inter_float && final_int &&
+ (if (GIMPLE /* PR66211 */
+ && inside_int && inter_float && final_int &&
(unsigned) significand_size (TYPE_MODE (inter_type))
>= inside_prec - !inside_unsignedp)
(convert @0))))))
vec_safe_truncate (child_cfun->local_decls, dstidx);
/* Inform the callgraph about the new function. */
- DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
+ child_cfun->curr_properties = cfun->curr_properties;
+ child_cfun->has_simduid_loops |= cfun->has_simduid_loops;
+ child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops;
cgraph_node::add_new_function (child_fn, true);
cgraph_node::get (child_fn)->parallelized_function = 1;
cfun->has_force_vectorize_loops = true;
}
}
+ else if (simduid)
+ cfun->has_simduid_loops = true;
}
vec_safe_truncate (child_cfun->local_decls, dstidx);
/* Inform the callgraph about the new function. */
- DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
+ child_cfun->curr_properties = cfun->curr_properties;
+ child_cfun->has_simduid_loops |= cfun->has_simduid_loops;
+ child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops;
cgraph_node::add_new_function (child_fn, true);
#ifdef ENABLE_OFFLOADING
for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
lower_omp_1 (&gsi, ctx);
/* During gimplification, we haven't folded statments inside offloading
- regions (gimplify.c:maybe_fold_stmt); do that now. */
- if (target_nesting_level)
+ or taskreg regions (gimplify.c:maybe_fold_stmt); do that now. */
+ if (target_nesting_level || taskreg_nesting_level)
for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
fold_stmt (&gsi);
input_location = saved_location;
uniform args with __builtin_assume_aligned (arg_N(D), alignment)
lhs. Handle linear by adding PHIs. */
for (unsigned i = 0; i < node->simdclone->nargs; i++)
- if (node->simdclone->args[i].alignment
- && node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
- && (node->simdclone->args[i].alignment
- & (node->simdclone->args[i].alignment - 1)) == 0
- && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
- == POINTER_TYPE)
+ if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
+ && (TREE_ADDRESSABLE (node->simdclone->args[i].orig_arg)
+ || !is_gimple_reg_type
+ (TREE_TYPE (node->simdclone->args[i].orig_arg))))
+ {
+ tree orig_arg = node->simdclone->args[i].orig_arg;
+ if (is_gimple_reg_type (TREE_TYPE (orig_arg)))
+ iter1 = make_ssa_name (TREE_TYPE (orig_arg));
+ else
+ {
+ iter1 = create_tmp_var_raw (TREE_TYPE (orig_arg));
+ gimple_add_tmp_var (iter1);
+ }
+ gsi = gsi_after_labels (entry_bb);
+ g = gimple_build_assign (iter1, orig_arg);
+ gsi_insert_before (&gsi, g, GSI_NEW_STMT);
+ gsi = gsi_after_labels (body_bb);
+ g = gimple_build_assign (orig_arg, iter1);
+ gsi_insert_before (&gsi, g, GSI_NEW_STMT);
+ }
+ else if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
+ && DECL_BY_REFERENCE (node->simdclone->args[i].orig_arg)
+ && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
+ == REFERENCE_TYPE
+ && TREE_ADDRESSABLE
+ (TREE_TYPE (TREE_TYPE (node->simdclone->args[i].orig_arg))))
+ {
+ tree orig_arg = node->simdclone->args[i].orig_arg;
+ tree def = ssa_default_def (cfun, orig_arg);
+ if (def && !has_zero_uses (def))
+ {
+ iter1 = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (orig_arg)));
+ gimple_add_tmp_var (iter1);
+ gsi = gsi_after_labels (entry_bb);
+ g = gimple_build_assign (iter1, build_simple_mem_ref (def));
+ gsi_insert_before (&gsi, g, GSI_NEW_STMT);
+ gsi = gsi_after_labels (body_bb);
+ g = gimple_build_assign (build_simple_mem_ref (def), iter1);
+ gsi_insert_before (&gsi, g, GSI_NEW_STMT);
+ }
+ }
+ else if (node->simdclone->args[i].alignment
+ && node->simdclone->args[i].arg_type
+ == SIMD_CLONE_ARG_TYPE_UNIFORM
+ && (node->simdclone->args[i].alignment
+ & (node->simdclone->args[i].alignment - 1)) == 0
+ && TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
+ == POINTER_TYPE)
{
unsigned int alignment = node->simdclone->args[i].alignment;
tree orig_arg = node->simdclone->args[i].orig_arg;
== SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
{
tree orig_arg = node->simdclone->args[i].orig_arg;
- tree def = ssa_default_def (cfun, orig_arg);
gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
|| POINTER_TYPE_P (TREE_TYPE (orig_arg)));
- if (def && !has_zero_uses (def))
+ tree def = NULL_TREE;
+ if (TREE_ADDRESSABLE (orig_arg))
+ {
+ def = make_ssa_name (TREE_TYPE (orig_arg));
+ iter1 = make_ssa_name (TREE_TYPE (orig_arg));
+ iter2 = make_ssa_name (TREE_TYPE (orig_arg));
+ gsi = gsi_after_labels (entry_bb);
+ g = gimple_build_assign (def, orig_arg);
+ gsi_insert_before (&gsi, g, GSI_NEW_STMT);
+ }
+ else
+ {
+ def = ssa_default_def (cfun, orig_arg);
+ if (!def || has_zero_uses (def))
+ def = NULL_TREE;
+ else
+ {
+ iter1 = make_ssa_name (orig_arg);
+ iter2 = make_ssa_name (orig_arg);
+ }
+ }
+ if (def)
{
- iter1 = make_ssa_name (orig_arg);
- iter2 = make_ssa_name (orig_arg);
phi = create_phi_node (iter1, body_bb);
add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
imm_use_iterator iter;
use_operand_p use_p;
gimple use_stmt;
- FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
- if (use_stmt == phi)
- continue;
- else
- FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
- SET_USE (use_p, iter1);
+ if (TREE_ADDRESSABLE (orig_arg))
+ {
+ gsi = gsi_after_labels (body_bb);
+ g = gimple_build_assign (orig_arg, iter1);
+ gsi_insert_before (&gsi, g, GSI_NEW_STMT);
+ }
+ else
+ FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
+ if (use_stmt == phi)
+ continue;
+ else
+ FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
+ SET_USE (use_p, iter1);
}
}
machine_mode selmode = GET_MODE (sel);
if (u == 2)
sel = expand_simple_binop (selmode, PLUS, sel, sel,
- sel, 0, OPTAB_DIRECT);
+ NULL, 0, OPTAB_DIRECT);
else
sel = expand_simple_binop (selmode, ASHIFT, sel,
GEN_INT (exact_log2 (u)),
- sel, 0, OPTAB_DIRECT);
+ NULL, 0, OPTAB_DIRECT);
gcc_assert (sel != NULL);
/* Broadcast the low byte each element into each of its bytes. */
PUSH_INSERT_PASSES_WITHIN (pass_tree_no_loop)
NEXT_PASS (pass_slp_vectorize);
POP_INSERT_PASSES ()
+ NEXT_PASS (pass_simduid_cleanup);
NEXT_PASS (pass_lower_vector_ssa);
NEXT_PASS (pass_cse_reciprocals);
NEXT_PASS (pass_reassoc);
if (CALL_P (insn))
{
rtx link;
+ HARD_REG_SET used_regs;
+
+ get_call_reg_set_usage (insn, &used_regs, call_used_reg_set);
for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
- if (call_used_regs[r])
+ if (TEST_HARD_REG_BIT (used_regs, r))
{
reg_state[r].use_index = RELOAD_COMBINE_MAX_USES;
reg_state[r].store_ruid = reload_combine_ruid;
unknown values. */
if (CALL_P (insn))
{
+ rtx link;
+
for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
{
if (call_used_regs[i])
/* Reset the information about this register. */
reg_mode[i] = VOIDmode;
}
+
+ for (link = CALL_INSN_FUNCTION_USAGE (insn); link;
+ link = XEXP (link, 1))
+ {
+ rtx setuse = XEXP (link, 0);
+ rtx usage_rtx = XEXP (setuse, 0);
+ if (GET_CODE (setuse) == CLOBBER
+ && REG_P (usage_rtx))
+ {
+ unsigned int end_regno = END_REGNO (usage_rtx);
+ for (unsigned int r = REGNO (usage_rtx); r < end_regno; ++r)
+ /* Reset the information about this register. */
+ reg_mode[r] = VOIDmode;
+ }
+ }
}
}
return changed;
/* Random number that should be large enough for all purposes. Also define
a type that has at least MAX_RECOG_ALTERNATIVES + 1 bits, with the extra
bit giving an invalid value that can be used to mean "uninitialized". */
-#define MAX_RECOG_ALTERNATIVES 30
-typedef unsigned int alternative_mask;
+#define MAX_RECOG_ALTERNATIVES 35
+typedef uint64_t alternative_mask;
/* A mask of all alternatives. */
#define ALL_ALTERNATIVES ((alternative_mask) -1)
sdbout_end_epilogue, /* end_epilogue */
sdbout_begin_function, /* begin_function */
sdbout_end_function, /* end_function */
+ debug_nothing_tree, /* register_main_translation_unit */
debug_nothing_tree, /* function_decl */
sdbout_global_decl, /* global_decl */
sdbout_symbol, /* type_decl */
/* Optimize comparisons with upper and lower bounds. */
if (HWI_COMPUTABLE_MODE_P (mode)
- && CONST_INT_P (trueop1))
+ && CONST_INT_P (trueop1)
+ && !side_effects_p (trueop0))
{
int sign;
unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, mode);
}
/* Optimize integer comparisons with zero. */
- if (trueop1 == const0_rtx)
+ if (trueop1 == const0_rtx && !side_effects_p (trueop0))
{
/* Some addresses are known to be nonzero. We don't know
their sign, but equality comparisons are known. */
}
/* Optimize comparison of ABS with zero. */
- if (trueop1 == CONST0_RTX (mode)
+ if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
&& (GET_CODE (trueop0) == ABS
|| (GET_CODE (trueop0) == FLOAT_EXTEND
&& GET_CODE (XEXP (trueop0, 0)) == ABS)))
if (flag_syntax_only || flag_wpa)
return;
+
+ /* Reset maximum_field_alignment, it can be adjusted by #pragma pack
+ and this shouldn't influence any types built by the middle-end
+ from now on (like gcov_info_type). */
+ maximum_field_alignment = initial_max_fld_align * BITS_PER_UNIT;
ggc_protect_identifiers = false;
so we can correctly initialize debug output. */
no_backend = lang_hooks.post_options (&main_input_filename);
- /* Set default values for parameters relation to the Scalar Reduction
- of Aggregates passes (SRA and IP-SRA). We must do this here, rather
- than in opts.c:default_options_optimization as historically these
- tuning heuristics have been based on MOVE_RATIO, which on some
- targets requires other symbols from the backend. */
- maybe_set_param_value
- (PARAM_SRA_MAX_SCALARIZATION_SIZE_SPEED,
- get_move_ratio (true) * UNITS_PER_WORD,
- global_options.x_param_values, global_options_set.x_param_values);
- maybe_set_param_value
- (PARAM_SRA_MAX_SCALARIZATION_SIZE_SIZE,
- get_move_ratio (false) * UNITS_PER_WORD,
- global_options.x_param_values, global_options_set.x_param_values);
-
/* Some machines may reject certain combinations of options. */
targetm.target_option.override ();
return bndval;
}
+/* Build a GIMPLE_CALL identical to CALL but skipping bounds
+ arguments. */
+
+gcall *
+chkp_copy_call_skip_bounds (gcall *call)
+{
+ bitmap bounds;
+ unsigned i;
+
+ bitmap_obstack_initialize (NULL);
+ bounds = BITMAP_ALLOC (NULL);
+
+ for (i = 0; i < gimple_call_num_args (call); i++)
+ if (POINTER_BOUNDS_P (gimple_call_arg (call, i)))
+ bitmap_set_bit (bounds, i);
+
+ if (!bitmap_empty_p (bounds))
+ call = gimple_call_copy_skip_args (call, bounds);
+ gimple_call_set_with_bounds (call, false);
+
+ BITMAP_FREE (bounds);
+ bitmap_obstack_release (NULL);
+
+ return call;
+}
+
+/* Redirect edge E to the correct node according to call_stmt.
+ Return 1 if bounds removal from call_stmt should be done
+ instead of redirection. */
+
+bool
+chkp_redirect_edge (cgraph_edge *e)
+{
+ bool instrumented = false;
+ tree decl = e->callee->decl;
+
+ if (e->callee->instrumentation_clone
+ || chkp_function_instrumented_p (decl))
+ instrumented = true;
+
+ if (instrumented
+ && !gimple_call_with_bounds_p (e->call_stmt))
+ e->redirect_callee (cgraph_node::get_create (e->callee->orig_decl));
+ else if (!instrumented
+ && gimple_call_with_bounds_p (e->call_stmt)
+ && !chkp_gimple_call_builtin_p (e->call_stmt, BUILT_IN_CHKP_BNDCL)
+ && !chkp_gimple_call_builtin_p (e->call_stmt, BUILT_IN_CHKP_BNDCU)
+ && !chkp_gimple_call_builtin_p (e->call_stmt, BUILT_IN_CHKP_BNDSTX))
+ {
+ if (e->callee->instrumented_version)
+ e->redirect_callee (e->callee->instrumented_version);
+ else
+ {
+ tree args = TYPE_ARG_TYPES (TREE_TYPE (decl));
+ /* Avoid bounds removal if all args will be removed. */
+ if (!args || TREE_VALUE (args) != void_type_node)
+ return true;
+ else
+ gimple_call_set_with_bounds (e->call_stmt, false);
+ }
+ }
+
+ return false;
+}
+
/* Mark statement S to not be instrumented. */
static void
chkp_mark_stmt (gimple s)
extern void chkp_expand_bounds_reset_for_mem (tree mem, tree ptr);
extern tree chkp_insert_retbnd_call (tree bndval, tree retval,
gimple_stmt_iterator *gsi);
+extern gcall *chkp_copy_call_skip_bounds (gcall *call);
+extern bool chkp_redirect_edge (cgraph_edge *e);
#endif /* GCC_TREE_CHKP_H */
base, memoff);
MR_DEPENDENCE_CLIQUE (ref) = MR_DEPENDENCE_CLIQUE (old);
MR_DEPENDENCE_BASE (ref) = MR_DEPENDENCE_BASE (old);
+ DR_UNCONSTRAINED_BASE (dr) = true;
access_fns.safe_push (access_fn);
}
}
offset/overlap based analysis but have to rely on points-to
information only. */
if (TREE_CODE (addr_a) == MEM_REF
- && TREE_CODE (TREE_OPERAND (addr_a, 0)) == SSA_NAME)
+ && (DR_UNCONSTRAINED_BASE (a)
+ || TREE_CODE (TREE_OPERAND (addr_a, 0)) == SSA_NAME))
{
/* For true dependences we can apply TBAA. */
if (flag_strict_aliasing
build_fold_addr_expr (addr_b));
}
else if (TREE_CODE (addr_b) == MEM_REF
- && TREE_CODE (TREE_OPERAND (addr_b, 0)) == SSA_NAME)
+ && (DR_UNCONSTRAINED_BASE (b)
+ || TREE_CODE (TREE_OPERAND (addr_b, 0)) == SSA_NAME))
{
/* For true dependences we can apply TBAA. */
if (flag_strict_aliasing
/* A list of chrecs. Access functions of the indices. */
vec<tree> access_fns;
+
+ /* Whether BASE_OBJECT is an access representing the whole object
+ or whether the access could not be constrained. */
+ bool unconstrained_base;
};
struct dr_alias
#define DR_STMT(DR) (DR)->stmt
#define DR_REF(DR) (DR)->ref
#define DR_BASE_OBJECT(DR) (DR)->indices.base_object
+#define DR_UNCONSTRAINED_BASE(DR) (DR)->indices.unconstrained_base
#define DR_ACCESS_FNS(DR) (DR)->indices.access_fns
#define DR_ACCESS_FN(DR, I) DR_ACCESS_FNS (DR)[I]
#define DR_NUM_DIMENSIONS(DR) DR_ACCESS_FNS (DR).length ()
|| TREE_CODE (ref_base_b) == REALPART_EXPR)
ref_base_b = TREE_OPERAND (ref_base_b, 0);
- if (!operand_equal_p (ref_base_a, ref_base_b, 0))
+ if (operand_equal_p (ref_base_a, ref_base_b, 0))
{
tree cb = bb_predicate (gimple_bb (DR_STMT (b)));
base_stmt = gsi_stmt (gsi);
}
- note = gimple_build_debug_bind (tracked_var, value, base_stmt);
+ note = gimple_build_debug_bind (tracked_var, unshare_expr (value), base_stmt);
if (bb)
{
if (info->context == target_context)
{
x = build_addr (info->frame_decl, target_context);
+ info->static_chain_added |= 1;
}
else
{
x = get_chain_decl (info);
+ info->static_chain_added |= 2;
for (i = info->outer; i->context != target_context; i = i->outer)
{
/* Make sure frame_decl gets created. */
(void) get_frame_type (info);
x = info->frame_decl;
+ info->static_chain_added |= 1;
}
else
{
x = get_chain_decl (info);
+ info->static_chain_added |= 2;
for (i = info->outer; i->context != target_context; i = i->outer)
{
(void) get_frame_type (info);
x = info->frame_decl;
i = info;
+ info->static_chain_added |= 1;
}
else
{
x = get_chain_decl (info);
+ info->static_chain_added |= 2;
for (i = info->outer; i->context != target_context; i = i->outer)
{
field = get_chain_field (i);
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
{
- tree save_local_var_chain;
+ tree save_local_var_chain = info->new_local_var_chain;
walk_gimple_op (stmt, convert_tramp_reference_op, wi);
- save_local_var_chain = info->new_local_var_chain;
info->new_local_var_chain = NULL;
+ char save_static_chain_added = info->static_chain_added;
+ info->static_chain_added = 0;
walk_body (convert_tramp_reference_stmt, convert_tramp_reference_op,
info, gimple_omp_body_ptr (stmt));
if (info->new_local_var_chain)
declare_vars (info->new_local_var_chain,
gimple_seq_first_stmt (gimple_omp_body (stmt)),
false);
+ for (int i = 0; i < 2; i++)
+ {
+ tree c, decl;
+ if ((info->static_chain_added & (1 << i)) == 0)
+ continue;
+ decl = i ? get_chain_decl (info) : info->frame_decl;
+ /* Don't add CHAIN.* or FRAME.* twice. */
+ for (c = gimple_omp_taskreg_clauses (stmt);
+ c;
+ c = OMP_CLAUSE_CHAIN (c))
+ if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
+ || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
+ && OMP_CLAUSE_DECL (c) == decl)
+ break;
+ if (c == NULL && gimple_code (stmt) != GIMPLE_OMP_TARGET)
+ {
+ c = build_omp_clause (gimple_location (stmt),
+ i ? OMP_CLAUSE_FIRSTPRIVATE
+ : OMP_CLAUSE_SHARED);
+ OMP_CLAUSE_DECL (c) = decl;
+ OMP_CLAUSE_CHAIN (c) = gimple_omp_taskreg_clauses (stmt);
+ gimple_omp_taskreg_set_clauses (stmt, c);
+ }
+ else if (c == NULL)
+ {
+ c = build_omp_clause (gimple_location (stmt),
+ OMP_CLAUSE_MAP);
+ OMP_CLAUSE_DECL (c) = decl;
+ OMP_CLAUSE_SET_MAP_KIND (c,
+ i ? GOMP_MAP_TO : GOMP_MAP_TOFROM);
+ OMP_CLAUSE_SIZE (c) = DECL_SIZE_UNIT (decl);
+ OMP_CLAUSE_CHAIN (c) = gimple_omp_target_clauses (stmt);
+ gimple_omp_target_set_clauses (as_a <gomp_target *> (stmt),
+ c);
+ }
+ }
info->new_local_var_chain = save_local_var_chain;
+ info->static_chain_added |= save_static_chain_added;
}
break;
extern gimple_opt_pass *make_pass_if_conversion (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_loop_distribution (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_vectorize (gcc::context *ctxt);
+extern gimple_opt_pass *make_pass_simduid_cleanup (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_slp_vectorize (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_complete_unroll (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_complete_unrolli (gcc::context *ctxt);
limit++;
evol = *evolution_of_loop;
- res = follow_ssa_edge
- (loop, SSA_NAME_DEF_STMT (rhs0), halting_phi, &evol, limit);
-
- if (res == t_true)
- *evolution_of_loop = add_to_evolution
+ evol = add_to_evolution
(loop->num,
chrec_convert (type, evol, at_stmt),
code, rhs1, at_stmt);
-
+ res = follow_ssa_edge
+ (loop, SSA_NAME_DEF_STMT (rhs0), halting_phi, &evol, limit);
+ if (res == t_true)
+ *evolution_of_loop = evol;
else if (res == t_false)
{
+ *evolution_of_loop = add_to_evolution
+ (loop->num,
+ chrec_convert (type, *evolution_of_loop, at_stmt),
+ code, rhs0, at_stmt);
res = follow_ssa_edge
(loop, SSA_NAME_DEF_STMT (rhs1), halting_phi,
evolution_of_loop, limit);
-
if (res == t_true)
- *evolution_of_loop = add_to_evolution
- (loop->num,
- chrec_convert (type, *evolution_of_loop, at_stmt),
- code, rhs0, at_stmt);
-
+ ;
else if (res == t_dont_know)
*evolution_of_loop = chrec_dont_know;
}
{
/* Match an assignment under the form:
"a = b + ...". */
+ *evolution_of_loop = add_to_evolution
+ (loop->num, chrec_convert (type, *evolution_of_loop,
+ at_stmt),
+ code, rhs1, at_stmt);
res = follow_ssa_edge
(loop, SSA_NAME_DEF_STMT (rhs0), halting_phi,
evolution_of_loop, limit);
if (res == t_true)
- *evolution_of_loop = add_to_evolution
- (loop->num, chrec_convert (type, *evolution_of_loop,
- at_stmt),
- code, rhs1, at_stmt);
-
+ ;
else if (res == t_dont_know)
*evolution_of_loop = chrec_dont_know;
}
{
/* Match an assignment under the form:
"a = ... + c". */
+ *evolution_of_loop = add_to_evolution
+ (loop->num, chrec_convert (type, *evolution_of_loop,
+ at_stmt),
+ code, rhs0, at_stmt);
res = follow_ssa_edge
(loop, SSA_NAME_DEF_STMT (rhs1), halting_phi,
evolution_of_loop, limit);
if (res == t_true)
- *evolution_of_loop = add_to_evolution
- (loop->num, chrec_convert (type, *evolution_of_loop,
- at_stmt),
- code, rhs0, at_stmt);
-
+ ;
else if (res == t_dont_know)
*evolution_of_loop = chrec_dont_know;
}
if (TREE_CODE (rhs1) == SSA_NAME)
limit++;
+ *evolution_of_loop = add_to_evolution
+ (loop->num, chrec_convert (type, *evolution_of_loop, at_stmt),
+ MINUS_EXPR, rhs1, at_stmt);
res = follow_ssa_edge (loop, SSA_NAME_DEF_STMT (rhs0), halting_phi,
evolution_of_loop, limit);
if (res == t_true)
- *evolution_of_loop = add_to_evolution
- (loop->num, chrec_convert (type, *evolution_of_loop, at_stmt),
- MINUS_EXPR, rhs1, at_stmt);
-
+ ;
else if (res == t_dont_know)
*evolution_of_loop = chrec_dont_know;
}
bitmap tmp = BITMAP_ALLOC (NULL);
bitmap_iterator bi;
unsigned i;
- unsigned max_scalarization_size
- = (optimize_function_for_size_p (cfun)
- ? PARAM_VALUE (PARAM_SRA_MAX_SCALARIZATION_SIZE_SIZE)
- : PARAM_VALUE (PARAM_SRA_MAX_SCALARIZATION_SIZE_SPEED))
- * BITS_PER_UNIT;
+ bool optimize_speed_p = !optimize_function_for_size_p (cfun);
+
+ enum compiler_param param = optimize_speed_p
+ ? PARAM_SRA_MAX_SCALARIZATION_SIZE_SPEED
+ : PARAM_SRA_MAX_SCALARIZATION_SIZE_SIZE;
+
+ /* If the user didn't set PARAM_SRA_MAX_SCALARIZATION_SIZE_<...>,
+ fall back to a target default. */
+ unsigned HOST_WIDE_INT max_scalarization_size
+ = global_options_set.x_param_values[param]
+ ? PARAM_VALUE (param)
+ : get_move_ratio (optimize_speed_p) * UNITS_PER_WORD;
+
+ max_scalarization_size *= BITS_PER_UNIT;
EXECUTE_IF_SET_IN_BITMAP (candidate_bitmap, 0, i, bi)
if (bitmap_bit_p (should_scalarize_away_bitmap, i)
if (e != e2)
{
cfg_altered = true;
- remove_edge (e2);
+ /* If we made a BB unconditionally exit a loop then this
+ transform alters the set of BBs in the loop. Schedule
+ a fixup. */
+ if (loop_exit_edge_p (bb->loop_father, e))
+ loops_state_set (LOOPS_NEED_FIXUP);
+ remove_edge (e2);
}
else
ei_next (&ei);
{
basic_block bb = gimple_bb (use_stmt);
edge te = find_taken_edge (bb, val);
+ if (!te)
+ continue;
+
edge_iterator ei;
edge e;
gimple_stmt_iterator gsi;
gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt);
gcall *stmt = gimple_build_call
(builtin_decl_implicit (BUILT_IN_UNREACHABLE), 0);
-
gimple_set_location (stmt, gimple_location (elt->stmt));
gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
+ split_block (gimple_bb (stmt), stmt);
changed = true;
if (dump_file && (dump_flags & TDF_DETAILS))
{
(OBJ_TYPE_REF_TOKEN (fn)),
context,
&final);
- if (dump_enabled_p ())
+ if (dump_file)
dump_possible_polymorphic_call_targets (dump_file,
obj_type_ref_class (fn),
tree_to_uhwi
{
/* Check if a pure SLP stmt has uses in non-SLP stmts. */
gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo));
+ /* We always get the pattern stmt here, but for immediate
+ uses we have to use the LHS of the original stmt. */
+ gcc_checking_assert (!STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
+ if (STMT_VINFO_RELATED_STMT (stmt_vinfo))
+ stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
if (TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
- if (gimple_bb (use_stmt)
- && flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
- && (use_vinfo = vinfo_for_stmt (use_stmt))
- && !STMT_SLP_TYPE (use_vinfo)
- && (STMT_VINFO_RELEVANT (use_vinfo)
- || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo))
- || (STMT_VINFO_IN_PATTERN_P (use_vinfo)
- && STMT_VINFO_RELATED_STMT (use_vinfo)
- && !STMT_SLP_TYPE (vinfo_for_stmt
- (STMT_VINFO_RELATED_STMT (use_vinfo)))))
- && !(gimple_code (use_stmt) == GIMPLE_PHI
- && STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
- stype = hybrid;
+ {
+ if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
+ continue;
+ use_vinfo = vinfo_for_stmt (use_stmt);
+ if (STMT_VINFO_IN_PATTERN_P (use_vinfo)
+ && STMT_VINFO_RELATED_STMT (use_vinfo))
+ use_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (use_vinfo));
+ if (!STMT_SLP_TYPE (use_vinfo)
+ && (STMT_VINFO_RELEVANT (use_vinfo)
+ || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo)))
+ && !(gimple_code (use_stmt) == GIMPLE_PHI
+ && STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
+ stype = hybrid;
+ }
}
if (stype == hybrid)
into their corresponding constants. */
static void
-adjust_simduid_builtins (hash_table<simduid_to_vf> **htab)
+adjust_simduid_builtins (hash_table<simduid_to_vf> *htab)
{
basic_block bb;
gcc_assert (TREE_CODE (arg) == SSA_NAME);
simduid_to_vf *p = NULL, data;
data.simduid = DECL_UID (SSA_NAME_VAR (arg));
- if (*htab)
- p = (*htab)->find (&data);
- if (p)
- vf = p->vf;
+ if (htab)
+ {
+ p = htab->find (&data);
+ if (p)
+ vf = p->vf;
+ }
switch (ifn)
{
case IFN_GOMP_SIMD_VF:
walk_gimple_op (use_stmt, note_simd_array_uses_cb, &wi);
}
}
+
+/* Shrink arrays with "omp simd array" attribute to the corresponding
+ vectorization factor. */
+
+static void
+shrink_simd_arrays
+ (hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab,
+ hash_table<simduid_to_vf> *simduid_to_vf_htab)
+{
+ for (hash_table<simd_array_to_simduid>::iterator iter
+ = simd_array_to_simduid_htab->begin ();
+ iter != simd_array_to_simduid_htab->end (); ++iter)
+ if ((*iter)->simduid != -1U)
+ {
+ tree decl = (*iter)->decl;
+ int vf = 1;
+ if (simduid_to_vf_htab)
+ {
+ simduid_to_vf *p = NULL, data;
+ data.simduid = (*iter)->simduid;
+ p = simduid_to_vf_htab->find (&data);
+ if (p)
+ vf = p->vf;
+ }
+ tree atype
+ = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl)), vf);
+ TREE_TYPE (decl) = atype;
+ relayout_decl (decl);
+ }
+
+ delete simd_array_to_simduid_htab;
+}
\f
/* A helper function to free data refs. */
/* Bail out if there are no loops. */
if (vect_loops_num <= 1)
- {
- if (cfun->has_simduid_loops)
- adjust_simduid_builtins (&simduid_to_vf_htab);
- return 0;
- }
+ return 0;
if (cfun->has_simduid_loops)
note_simd_array_uses (&simd_array_to_simduid_htab);
/* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE} builtins. */
if (cfun->has_simduid_loops)
- adjust_simduid_builtins (&simduid_to_vf_htab);
+ adjust_simduid_builtins (simduid_to_vf_htab);
/* Shrink any "omp array simd" temporary arrays to the
actual vectorization factors. */
if (simd_array_to_simduid_htab)
- {
- for (hash_table<simd_array_to_simduid>::iterator iter
- = simd_array_to_simduid_htab->begin ();
- iter != simd_array_to_simduid_htab->end (); ++iter)
- if ((*iter)->simduid != -1U)
- {
- tree decl = (*iter)->decl;
- int vf = 1;
- if (simduid_to_vf_htab)
- {
- simduid_to_vf *p = NULL, data;
- data.simduid = (*iter)->simduid;
- p = simduid_to_vf_htab->find (&data);
- if (p)
- vf = p->vf;
- }
- tree atype
- = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl)), vf);
- TREE_TYPE (decl) = atype;
- relayout_decl (decl);
- }
-
- delete simd_array_to_simduid_htab;
- }
- delete simduid_to_vf_htab;
- simduid_to_vf_htab = NULL;
+ shrink_simd_arrays (simd_array_to_simduid_htab, simduid_to_vf_htab);
+ delete simduid_to_vf_htab;
+ cfun->has_simduid_loops = false;
if (num_vectorized_loops > 0)
{
}
+/* Entry point to the simduid cleanup pass. */
+
+namespace {
+
+const pass_data pass_data_simduid_cleanup =
+{
+ GIMPLE_PASS, /* type */
+ "simduid", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_NONE, /* tv_id */
+ ( PROP_ssa | PROP_cfg ), /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
+
+class pass_simduid_cleanup : public gimple_opt_pass
+{
+public:
+ pass_simduid_cleanup (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_simduid_cleanup, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ opt_pass * clone () { return new pass_simduid_cleanup (m_ctxt); }
+ virtual bool gate (function *fun) { return fun->has_simduid_loops; }
+ virtual unsigned int execute (function *);
+
+}; // class pass_simduid_cleanup
+
+unsigned int
+pass_simduid_cleanup::execute (function *fun)
+{
+ hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
+
+ note_simd_array_uses (&simd_array_to_simduid_htab);
+
+ /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE} builtins. */
+ adjust_simduid_builtins (NULL);
+
+ /* Shrink any "omp array simd" temporary arrays to the
+ actual vectorization factors. */
+ if (simd_array_to_simduid_htab)
+ shrink_simd_arrays (simd_array_to_simduid_htab, NULL);
+ fun->has_simduid_loops = false;
+ return 0;
+}
+
+} // anon namespace
+
+gimple_opt_pass *
+make_pass_simduid_cleanup (gcc::context *ctxt)
+{
+ return new pass_simduid_cleanup (ctxt);
+}
+
+
/* Entry point to basic block SLP phase. */
namespace {
--- /dev/null
+/* A typesafe wrapper around libiberty's splay-tree.h.
+ Copyright (C) 2015 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_TYPED_SPLAY_TREE_H
+#define GCC_TYPED_SPLAY_TREE_H
+
+#include "splay-tree.h"
+
+/* Typesafe wrapper around libiberty's splay-tree.h. */
+template <typename KEY_TYPE, typename VALUE_TYPE>
+class typed_splay_tree
+{
+ public:
+ typedef KEY_TYPE key_type;
+ typedef VALUE_TYPE value_type;
+
+ typedef int (*compare_fn) (key_type, key_type);
+ typedef void (*delete_key_fn) (key_type);
+ typedef void (*delete_value_fn) (value_type);
+
+ typed_splay_tree (compare_fn,
+ delete_key_fn,
+ delete_value_fn);
+ ~typed_splay_tree ();
+
+ value_type lookup (key_type k);
+ value_type predecessor (key_type k);
+ value_type successor (key_type k);
+ void insert (key_type k, value_type v);
+
+ private:
+ static value_type node_to_value (splay_tree_node node);
+
+ private:
+ ::splay_tree m_inner;
+};
+
+/* Constructor for typed_splay_tree <K, V>. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline typed_splay_tree<KEY_TYPE, VALUE_TYPE>::
+ typed_splay_tree (compare_fn compare_fn,
+ delete_key_fn delete_key_fn,
+ delete_value_fn delete_value_fn)
+{
+ m_inner = splay_tree_new ((splay_tree_compare_fn)compare_fn,
+ (splay_tree_delete_key_fn)delete_key_fn,
+ (splay_tree_delete_value_fn)delete_value_fn);
+}
+
+/* Destructor for typed_splay_tree <K, V>. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline typed_splay_tree<KEY_TYPE, VALUE_TYPE>::
+ ~typed_splay_tree ()
+{
+ splay_tree_delete (m_inner);
+}
+
+/* Lookup KEY, returning a value if present, and NULL
+ otherwise. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline VALUE_TYPE
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::lookup (key_type key)
+{
+ splay_tree_node node = splay_tree_lookup (m_inner, (splay_tree_key)key);
+ return node_to_value (node);
+}
+
+/* Return the immediate predecessor of KEY, or NULL if there is no
+ predecessor. KEY need not be present in the tree. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline VALUE_TYPE
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::predecessor (key_type key)
+{
+ splay_tree_node node = splay_tree_predecessor (m_inner, (splay_tree_key)key);
+ return node_to_value (node);
+}
+
+/* Return the immediate successor of KEY, or NULL if there is no
+ successor. KEY need not be present in the tree. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline VALUE_TYPE
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::successor (key_type k)
+{
+ splay_tree_node node = splay_tree_successor (m_inner, (splay_tree_key)k);
+ return node_to_value (node);
+}
+
+/* Insert a new node (associating KEY with VALUE). If a
+ previous node with the indicated KEY exists, its data is replaced
+ with the new value. */
+
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline void
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::insert (key_type key,
+ value_type value)
+{
+ splay_tree_insert (m_inner,
+ (splay_tree_key)key,
+ (splay_tree_value)value);
+}
+
+/* Internal function for converting from splay_tree_node to
+ VALUE_TYPE. */
+template <typename KEY_TYPE, typename VALUE_TYPE>
+inline VALUE_TYPE
+typed_splay_tree<KEY_TYPE, VALUE_TYPE>::node_to_value (splay_tree_node node)
+{
+ if (node)
+ return (value_type)node->value;
+ else
+ return 0;
+}
+
+#endif /* GCC_TYPED_SPLAY_TREE_H */
unsigned HOST_WIDE_INT rounded ATTRIBUTE_UNUSED)
{
#if defined ASM_OUTPUT_ALIGNED_DECL_LOCAL
- int align = symtab_node::get (decl)->definition_alignment ();
+ unsigned int align = symtab_node::get (decl)->definition_alignment ();
ASM_OUTPUT_ALIGNED_DECL_LOCAL (asm_out_file, decl, name,
size, align);
return true;
#elif defined ASM_OUTPUT_ALIGNED_LOCAL
- int align = symtab_node::get (decl)->definition_alignment ();
+ unsigned int align = symtab_node::get (decl)->definition_alignment ();
ASM_OUTPUT_ALIGNED_LOCAL (asm_out_file, name, size, align);
return true;
#else
vmsdbgout_end_epilogue,
vmsdbgout_begin_function,
vmsdbgout_end_function,
+ debug_nothing_tree, /* register_main_translation_unit */
vmsdbgout_decl,
vmsdbgout_global_decl,
vmsdbgout_type_decl, /* type_decl */
;;
i[34567]86-*-freebsd*)
tmake_file="${tmake_file} i386/t-freebsd i386/t-crtstuff"
+ md_unwind_header=i386/freebsd-unwind.h
;;
x86_64-*-freebsd*)
tmake_file="${tmake_file} i386/t-freebsd i386/t-crtstuff"
+ md_unwind_header=i386/freebsd-unwind.h
;;
i[34567]86-*-netbsdelf*)
;;
#include "libgomp.h"
#include "oacc-int.h"
#include "openacc.h"
+#include "plugin/plugin-host.h"
#include <assert.h>
#include <stdlib.h>
#include <strings.h>
int
acc_on_device (acc_device_t dev)
{
- if (acc_get_device_type () == acc_device_host_nonshm)
+ struct goacc_thread *thr = goacc_thread ();
+
+ /* We only want to appear to be the "host_nonshm" plugin from "offloaded"
+ code -- i.e. within a parallel region. Test a flag set by the
+ openacc_parallel hook of the host_nonshm plugin to determine that. */
+ if (acc_get_device_type () == acc_device_host_nonshm
+ && thr && thr->target_tls
+ && ((struct nonshm_thread *)thr->target_tls)->nonshm_exec)
return dev == acc_device_host_nonshm || dev == acc_device_not_host;
- /* Just rely on the compiler builtin. */
- return __builtin_acc_on_device (dev);
+ /* For OpenACC, libgomp is only built for the host, so this is sufficient. */
+ return dev == acc_device_host || dev == acc_device_none;
}
ialias (acc_on_device)
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
+#include <stdbool.h>
#ifdef HOST_NONSHM_PLUGIN
#define STATIC
#define SELF "host: "
#endif
+#ifdef HOST_NONSHM_PLUGIN
+#include "plugin-host.h"
+#endif
+
STATIC const char *
GOMP_OFFLOAD_get_name (void)
{
void *targ_mem_desc __attribute__ ((unused)))
{
#ifdef HOST_NONSHM_PLUGIN
+ struct nonshm_thread *thd = GOMP_PLUGIN_acc_thread ();
+ thd->nonshm_exec = true;
fn (devaddrs);
+ thd->nonshm_exec = false;
#else
fn (hostaddrs);
#endif
GOMP_OFFLOAD_openacc_create_thread_data (int ord
__attribute__ ((unused)))
{
+#ifdef HOST_NONSHM_PLUGIN
+ struct nonshm_thread *thd
+ = GOMP_PLUGIN_malloc (sizeof (struct nonshm_thread));
+ thd->nonshm_exec = false;
+ return thd;
+#else
return NULL;
+#endif
}
STATIC void
-GOMP_OFFLOAD_openacc_destroy_thread_data (void *tls_data
- __attribute__ ((unused)))
+GOMP_OFFLOAD_openacc_destroy_thread_data (void *tls_data)
{
+#ifdef HOST_NONSHM_PLUGIN
+ free (tls_data);
+#endif
}
--- /dev/null
+/* OpenACC Runtime Library: acc_device_host, acc_device_host_nonshm.
+
+ Copyright (C) 2015 Free Software Foundation, Inc.
+
+ Contributed by Mentor Embedded.
+
+ This file is part of the GNU Offloading and Multi Processing Library
+ (libgomp).
+
+ Libgomp is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef PLUGIN_HOST_H
+#define PLUGIN_HOST_H
+
+struct nonshm_thread
+{
+ bool nonshm_exec;
+};
+
+#endif
int n;
CUresult r;
+ /* PR libgomp/65099: Currently, we only support offloading in 64-bit
+ configurations. */
+ if (sizeof (void *) != 8)
+ return 0;
+
/* This function will be called before the plugin has been initialized in
order to enumerate available devices, but CUDA API routines can't be used
until cuInit has been called. Just call it now (but don't yet do any
+2015-07-16 Release Manager
+
+ * GCC 5.2.0 released.
+
+2015-07-03 Carlos Sánchez de La Lama <csanchezdll@gmail.com>
+
+ PR target/52482
+ * config/powerpc/sjlj.S: Port to Xcode 2.5.
+
2015-04-22 Release Manager
* GCC 5.1.0 released.
bl \name
.endm
#elif defined(_CALL_DARWIN)
-.macro FUNC name
+.macro FUNC
.globl _$0
_$0:
.endmacro
-.macro END name
+.macro END
.endmacro
-.macro HIDDEN name
+.macro HIDDEN
.private_extern _$0
.endmacro
-.macro CALL name
+.macro CALL
bl _$0
.endmacro
# ifdef __ppc64__
# std::codecvt_byname
_ZNSt14codecvt_bynameI[cw]c11__mbstate_tEC[12]EPKc[jmy];
_ZNSt14codecvt_bynameI[cw]c11__mbstate_tED*;
+#if defined (_WIN32) && !defined (__CYGWIN__)
+ _ZNSt14codecvt_bynameI[cw]ciE[CD]*;
+#endif
# std::collate
_ZNSt7collateI[cw]*;
_ZNKSt8time_getI[cw]St19istreambuf_iteratorI[cw]St11char_traitsI[cw]EEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tmcc;
# codecvt<char16_t, char, mbstate_t>, codecvt<char32_t, char, mbstate_t>
- _ZNKSt7codecvtID[is]c11__mbstate_t*;
- _ZNSt7codecvtID[is]c11__mbstate_t*;
- _ZT[ISV]St7codecvtID[is]c11__mbstate_tE;
+ _ZNKSt7codecvtID[is]c*;
+ _ZNSt7codecvtID[is]c*;
+ _ZT[ISV]St7codecvtID[is]c*E;
extern "C++"
{
typedef _Alloc<_Tp, _Args...> __type;
};
- template<typename _Ptr, typename _Tp>
- using __alloc_rebind = typename __alloctr_rebind<_Ptr, _Tp>::__type;
+ template<typename _Alloc, typename _Tp>
+ using __alloc_rebind = typename __alloctr_rebind<_Alloc, _Tp>::__type;
/**
* @brief Uniform interface to all allocator types.
/**
* @brief Default constructor creates an empty string.
*/
- basic_string() _GLIBCXX_NOEXCEPT
+ basic_string()
+#if __cplusplus >= 201103L
+ noexcept(is_nothrow_default_constructible<_Alloc>::value)
+#endif
: _M_dataplus(_M_local_data())
{ _M_set_length(0); }
// wstring_convert implementation -*- C++ -*-
-// Copyright (C) 2012 Free Software Foundation, Inc.
+// Copyright (C) 2015 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
* @{
*/
+_GLIBCXX_BEGIN_NAMESPACE_CXX11
/// String conversions
template<typename _Codecvt, typename _Elem = wchar_t,
typename _Wide_alloc = allocator<_Elem>,
_M_conv(const _InChar* __first, const _InChar* __last,
const _OutStr* __err, _MemFn __memfn)
{
+ auto __outstr = __err ? _OutStr(__err->get_allocator()) : _OutStr();
+
+ if (__first == __last)
+ {
+ _M_count = 0;
+ return __outstr;
+ }
+
if (!_M_with_cvtstate)
_M_state = state_type();
- auto __outstr = __err ? _OutStr(__err->get_allocator()) : _OutStr();
size_t __outchars = 0;
auto __next = __first;
- const auto __maxlen = _M_cvt->max_length();
+ const auto __maxlen = _M_cvt->max_length() + 1;
codecvt_base::result __result;
do
{
- __outstr.resize(__outstr.size() + (__last - __next) + __maxlen);
+ __outstr.resize(__outstr.size() + (__last - __next) * __maxlen);
auto __outnext = &__outstr.front() + __outchars;
auto const __outlast = &__outstr.back() + 1;
__result = ((*_M_cvt).*__memfn)(_M_state, __next, __last, __next,
bool _M_with_cvtstate = false;
bool _M_with_strings = false;
};
+_GLIBCXX_END_NAMESPACE_CXX11
/// Buffer conversions
template<typename _Codecvt, typename _Elem = wchar_t,
: _M_buf(__bytebuf), _M_cvt(__pcvt), _M_state(__state)
{
if (!_M_cvt)
- __throw_logic_error("wstring_convert");
+ __throw_logic_error("wbuffer_convert");
_M_always_noconv = _M_cvt->always_noconv();
__fill_a(_Tp* __first, _Tp* __last, const _Tp& __c)
{
const _Tp __tmp = __c;
- __builtin_memset(__first, static_cast<unsigned char>(__tmp),
- __last - __first);
+ if (const size_t __len = __last - __first)
+ __builtin_memset(__first, static_cast<unsigned char>(__tmp), __len);
}
/**
static bool
equal(const _Tp* __first1, const _Tp* __last1, const _Tp* __first2)
{
- return !__builtin_memcmp(__first1, __first2, sizeof(_Tp)
- * (__last1 - __first1));
+ if (const size_t __len = (__last1 - __first1))
+ return !__builtin_memcmp(__first1, __first2, sizeof(_Tp) * __len);
+ return true;
}
};
{
const size_t __len1 = __last1 - __first1;
const size_t __len2 = __last2 - __first2;
- const int __result = __builtin_memcmp(__first1, __first2,
- std::min(__len1, __len2));
- return __result != 0 ? __result < 0 : __len1 < __len2;
+ if (const size_t __len = std::min(__len1, __len2))
+ if (int __result = __builtin_memcmp(__first1, __first2, __len))
+ return __result < 0;
+ return __len1 < __len2;
}
};
public:
vector()
+#if __cplusplus >= 201103L
+ noexcept(is_nothrow_default_constructible<allocator_type>::value)
+#endif
: _Base() { }
explicit
* @brief Default constructor creates no elements.
*/
map()
+#if __cplusplus >= 201103L
+ noexcept(is_nothrow_default_constructible<allocator_type>::value)
+#endif
: _M_t() { }
/**
* @brief Default constructor creates no elements.
*/
multimap()
+#if __cplusplus >= 201103L
+ noexcept(is_nothrow_default_constructible<allocator_type>::value)
+#endif
: _M_t() { }
/**
* @brief Default constructor creates no elements.
*/
multiset()
+#if __cplusplus >= 201103L
+ noexcept(is_nothrow_default_constructible<allocator_type>::value)
+#endif
: _M_t() { }
/**
* @brief Default constructor creates no elements.
*/
set()
+#if __cplusplus >= 201103L
+ noexcept(is_nothrow_default_constructible<allocator_type>::value)
+#endif
: _M_t() { }
/**
_M_valptr() const
{ return std::__addressof(_M_value_field); }
#else
- __gnu_cxx::__aligned_buffer<_Val> _M_storage;
+ __gnu_cxx::__aligned_membuf<_Val> _M_storage;
_Val*
_M_valptr()
: _M_node() { }
explicit
- _Rb_tree_iterator(_Link_type __x) _GLIBCXX_NOEXCEPT
+ _Rb_tree_iterator(_Base_ptr __x) _GLIBCXX_NOEXCEPT
: _M_node(__x) { }
reference
: _M_node() { }
explicit
- _Rb_tree_const_iterator(_Link_type __x) _GLIBCXX_NOEXCEPT
+ _Rb_tree_const_iterator(_Base_ptr __x) _GLIBCXX_NOEXCEPT
: _M_node(__x) { }
_Rb_tree_const_iterator(const iterator& __it) _GLIBCXX_NOEXCEPT
iterator
_M_const_cast() const _GLIBCXX_NOEXCEPT
- { return iterator(static_cast<typename iterator::_Link_type>
- (const_cast<typename iterator::_Base_ptr>(_M_node))); }
+ { return iterator(const_cast<typename iterator::_Base_ptr>(_M_node)); }
reference
operator*() const _GLIBCXX_NOEXCEPT
iterator
begin() _GLIBCXX_NOEXCEPT
- {
- return iterator(static_cast<_Link_type>
- (this->_M_impl._M_header._M_left));
- }
+ { return iterator(this->_M_impl._M_header._M_left); }
const_iterator
begin() const _GLIBCXX_NOEXCEPT
- {
- return const_iterator(static_cast<_Const_Link_type>
- (this->_M_impl._M_header._M_left));
- }
+ { return const_iterator(this->_M_impl._M_header._M_left); }
iterator
end() _GLIBCXX_NOEXCEPT
- { return iterator(static_cast<_Link_type>(&this->_M_impl._M_header)); }
+ { return iterator(&this->_M_impl._M_header); }
const_iterator
end() const _GLIBCXX_NOEXCEPT
- {
- return const_iterator(static_cast<_Const_Link_type>
- (&this->_M_impl._M_header));
- }
+ { return const_iterator(&this->_M_impl._M_header); }
reverse_iterator
rbegin() _GLIBCXX_NOEXCEPT
*/
unordered_map(const unordered_map& __umap,
const allocator_type& __a)
- : _M_h(__umap._M_h, __a)
+ : _M_h(__umap._M_h, __a)
{ }
/*
*/
unordered_map(unordered_map&& __umap,
const allocator_type& __a)
- : _M_h(std::move(__umap._M_h), __a)
+ : _M_h(std::move(__umap._M_h), __a)
{ }
/**
const hasher& __hf = hasher(),
const key_equal& __eql = key_equal(),
const allocator_type& __a = allocator_type())
- : _M_h(__l, __n, __hf, __eql, __a)
+ : _M_h(__l, __n, __hf, __eql, __a)
+ { }
+
+ unordered_map(size_type __n, const allocator_type& __a)
+ : unordered_map(__n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_map(size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_map(__n, __hf, key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_map(_InputIterator __first, _InputIterator __last,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_map(__first, __last, __n, hasher(), key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_map(_InputIterator __first, _InputIterator __last,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_map(__first, __last, __n, __hf, key_equal(), __a)
+ { }
+
+ unordered_map(initializer_list<value_type> __l,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_map(__l, __n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_map(initializer_list<value_type> __l,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_map(__l, __n, __hf, key_equal(), __a)
{ }
/// Copy assignment operator.
// modifiers.
/**
- * @brief Attempts to build and insert a std::pair into the %unordered_map.
+ * @brief Attempts to build and insert a std::pair into the
+ * %unordered_map.
*
* @param __args Arguments used to generate a new pair instance (see
* std::piecewise_contruct for passing arguments to each
{ return _M_h.emplace(std::forward<_Args>(__args)...); }
/**
- * @brief Attempts to build and insert a std::pair into the %unordered_map.
+ * @brief Attempts to build and insert a std::pair into the
+ * %unordered_map.
*
* @param __pos An iterator that serves as a hint as to where the pair
* should be inserted.
* @param __x An %unordered_map of the same element and allocator
* types.
*
- * This exchanges the elements between two %unordered_map in constant time.
+ * This exchanges the elements between two %unordered_map in constant
+ * time.
* Note that the global std::swap() function is specialized such that
* std::swap(m1,m2) will feed to this function.
*/
*/
explicit
unordered_multimap(const allocator_type& __a)
- : _M_h(__a)
+ : _M_h(__a)
{ }
/*
*/
unordered_multimap(const unordered_multimap& __ummap,
const allocator_type& __a)
- : _M_h(__ummap._M_h, __a)
+ : _M_h(__ummap._M_h, __a)
{ }
/*
*/
unordered_multimap(unordered_multimap&& __ummap,
const allocator_type& __a)
- : _M_h(std::move(__ummap._M_h), __a)
+ : _M_h(std::move(__ummap._M_h), __a)
{ }
/**
const hasher& __hf = hasher(),
const key_equal& __eql = key_equal(),
const allocator_type& __a = allocator_type())
- : _M_h(__l, __n, __hf, __eql, __a)
+ : _M_h(__l, __n, __hf, __eql, __a)
+ { }
+
+ unordered_multimap(size_type __n, const allocator_type& __a)
+ : unordered_multimap(__n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_multimap(size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multimap(__n, __hf, key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_multimap(_InputIterator __first, _InputIterator __last,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_multimap(__first, __last, __n, hasher(), key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_multimap(_InputIterator __first, _InputIterator __last,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multimap(__first, __last, __n, __hf, key_equal(), __a)
+ { }
+
+ unordered_multimap(initializer_list<value_type> __l,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_multimap(__l, __n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_multimap(initializer_list<value_type> __l,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multimap(__l, __n, __hf, key_equal(), __a)
{ }
/// Copy assignment operator.
{ return _M_h.emplace(std::forward<_Args>(__args)...); }
/**
- * @brief Attempts to build and insert a std::pair into the %unordered_multimap.
+ * @brief Attempts to build and insert a std::pair into the
+ * %unordered_multimap.
*
* @param __pos An iterator that serves as a hint as to where the pair
* should be inserted.
*/
explicit
unordered_set(const allocator_type& __a)
- : _M_h(__a)
+ : _M_h(__a)
{ }
/*
*/
unordered_set(const unordered_set& __uset,
const allocator_type& __a)
- : _M_h(__uset._M_h, __a)
+ : _M_h(__uset._M_h, __a)
{ }
/*
*/
unordered_set(unordered_set&& __uset,
const allocator_type& __a)
- : _M_h(std::move(__uset._M_h), __a)
+ : _M_h(std::move(__uset._M_h), __a)
{ }
/**
const hasher& __hf = hasher(),
const key_equal& __eql = key_equal(),
const allocator_type& __a = allocator_type())
- : _M_h(__l, __n, __hf, __eql, __a)
+ : _M_h(__l, __n, __hf, __eql, __a)
+ { }
+
+ unordered_set(size_type __n, const allocator_type& __a)
+ : unordered_set(__n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_set(size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_set(__n, __hf, key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_set(_InputIterator __first, _InputIterator __last,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_set(__first, __last, __n, hasher(), key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_set(_InputIterator __first, _InputIterator __last,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_set(__first, __last, __n, __hf, key_equal(), __a)
+ { }
+
+ unordered_set(initializer_list<value_type> __l,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_set(__l, __n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_set(initializer_list<value_type> __l,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_set(__l, __n, __hf, key_equal(), __a)
{ }
/// Copy assignment operator.
template<typename _Value1, typename _Hash1, typename _Pred1,
typename _Alloc1>
friend bool
- operator==(const unordered_set<_Value1, _Hash1, _Pred1, _Alloc1>&,
- const unordered_set<_Value1, _Hash1, _Pred1, _Alloc1>&);
+ operator==(const unordered_set<_Value1, _Hash1, _Pred1, _Alloc1>&,
+ const unordered_set<_Value1, _Hash1, _Pred1, _Alloc1>&);
};
/**
const hasher& __hf = hasher(),
const key_equal& __eql = key_equal(),
const allocator_type& __a = allocator_type())
- : _M_h(__l, __n, __hf, __eql, __a)
+ : _M_h(__l, __n, __hf, __eql, __a)
{ }
/// Copy assignment operator.
*/
explicit
unordered_multiset(const allocator_type& __a)
- : _M_h(__a)
+ : _M_h(__a)
{ }
/*
*/
unordered_multiset(const unordered_multiset& __umset,
const allocator_type& __a)
- : _M_h(__umset._M_h, __a)
+ : _M_h(__umset._M_h, __a)
{ }
/*
*/
unordered_multiset(unordered_multiset&& __umset,
const allocator_type& __a)
- : _M_h(std::move(__umset._M_h), __a)
+ : _M_h(std::move(__umset._M_h), __a)
+ { }
+
+ unordered_multiset(size_type __n, const allocator_type& __a)
+ : unordered_multiset(__n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_multiset(size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multiset(__n, __hf, key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_multiset(_InputIterator __first, _InputIterator __last,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_multiset(__first, __last, __n, hasher(), key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_multiset(_InputIterator __first, _InputIterator __last,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multiset(__first, __last, __n, __hf, key_equal(), __a)
+ { }
+
+ unordered_multiset(initializer_list<value_type> __l,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_multiset(__l, __n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_multiset(initializer_list<value_type> __l,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multiset(__l, __n, __hf, key_equal(), __a)
{ }
/**
* in the initializer list @a __l.
*
* Note that the assignment completely changes the %unordered_multiset
- * and that the resulting %unordered_set's size is the same as the number
- * of elements assigned. Old data may be lost.
+ * and that the resulting %unordered_multiset's size is the same as the
+ * number of elements assigned. Old data may be lost.
*/
unordered_multiset&
operator=(initializer_list<value_type> __l)
pointer
data() noexcept
- { return std::__addressof(_AT_Type::_S_ref(_M_elems, 0)); }
+ { return _AT_Type::_S_ptr(_M_elems); }
const_pointer
data() const noexcept
- { return std::__addressof(_AT_Type::_S_ref(_M_elems, 0)); }
+ { return _AT_Type::_S_ptr(_M_elems); }
};
// Array comparisons.
const allocator_type& __a = allocator_type())
: _Base(__l, __n, __hf, __eql, __a) { }
+ unordered_map(size_type __n, const allocator_type& __a)
+ : unordered_map(__n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_map(size_type __n,
+ const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_map(__n, __hf, key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_map(_InputIterator __first, _InputIterator __last,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_map(__first, __last, __n, hasher(), key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_map(_InputIterator __first, _InputIterator __last,
+ size_type __n,
+ const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_map(__first, __last, __n, __hf, key_equal(), __a)
+ { }
+
+ unordered_map(initializer_list<value_type> __l,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_map(__l, __n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_map(initializer_list<value_type> __l,
+ size_type __n,
+ const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_map(__l, __n, __hf, key_equal(), __a)
+ { }
+
~unordered_map() = default;
unordered_map&
const allocator_type& __a = allocator_type())
: _Base(__l, __n, __hf, __eql, __a) { }
+ unordered_multimap(size_type __n, const allocator_type& __a)
+ : unordered_multimap(__n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_multimap(size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multimap(__n, __hf, key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_multimap(_InputIterator __first, _InputIterator __last,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_multimap(__first, __last, __n, hasher(), key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_multimap(_InputIterator __first, _InputIterator __last,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multimap(__first, __last, __n, __hf, key_equal(), __a)
+ { }
+
+ unordered_multimap(initializer_list<value_type> __l,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_multimap(__l, __n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_multimap(initializer_list<value_type> __l,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multimap(__l, __n, __hf, key_equal(), __a)
+ { }
+
~unordered_multimap() = default;
unordered_multimap&
const allocator_type& __a = allocator_type())
: _Base(__l, __n, __hf, __eql, __a) { }
+ unordered_set(size_type __n, const allocator_type& __a)
+ : unordered_set(__n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_set(size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_set(__n, __hf, key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_set(_InputIterator __first, _InputIterator __last,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_set(__first, __last, __n, hasher(), key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_set(_InputIterator __first, _InputIterator __last,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_set(__first, __last, __n, __hf, key_equal(), __a)
+ { }
+
+ unordered_set(initializer_list<value_type> __l,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_set(__l, __n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_set(initializer_list<value_type> __l,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_set(__l, __n, __hf, key_equal(), __a)
+ { }
+
~unordered_set() = default;
unordered_set&
const allocator_type& __a = allocator_type())
: _Base(__l, __n, __hf, __eql, __a) { }
+ unordered_multiset(size_type __n, const allocator_type& __a)
+ : unordered_multiset(__n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_multiset(size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multiset(__n, __hf, key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_multiset(_InputIterator __first, _InputIterator __last,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_multiset(__first, __last, __n, hasher(), key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_multiset(_InputIterator __first, _InputIterator __last,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multiset(__first, __last, __n, __hf, key_equal(), __a)
+ { }
+
+ unordered_multiset(initializer_list<value_type> __l,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_multiset(__l, __n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_multiset(initializer_list<value_type> __l,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multiset(__l, __n, __hf, key_equal(), __a)
+ { }
+
~unordered_multiset() = default;
unordered_multiset&
namespace __gnu_cxx
{
+ // A utility type containing a POD object that can hold an object of type
+ // _Tp initialized via placement new or allocator_traits::construct.
+ // Intended for use as a data member subobject, use __aligned_buffer for
+ // complete objects.
+ template<typename _Tp>
+ struct __aligned_membuf
+ {
+ // Target macro ADJUST_FIELD_ALIGN can produce different alignment for
+ // types when used as class members. __aligned_membuf is intended
+ // for use as a class member, so align the buffer as for a class member.
+ struct _Tp2 { _Tp _M_t; };
+
+ alignas(__alignof__(_Tp2::_M_t)) unsigned char _M_storage[sizeof(_Tp)];
+
+ __aligned_membuf() = default;
+
+ // Can be used to avoid value-initialization zeroing _M_storage.
+ __aligned_membuf(std::nullptr_t) { }
+
+ void*
+ _M_addr() noexcept
+ { return static_cast<void*>(&_M_storage); }
+
+ const void*
+ _M_addr() const noexcept
+ { return static_cast<const void*>(&_M_storage); }
+
+ _Tp*
+ _M_ptr() noexcept
+ { return static_cast<_Tp*>(_M_addr()); }
+
+ const _Tp*
+ _M_ptr() const noexcept
+ { return static_cast<const _Tp*>(_M_addr()); }
+ };
+
+ // Similar to __aligned_membuf but aligned for complete objects, not members.
+ // This type is used in <forward_list>, <future>, <bits/shared_ptr_base.h>
+ // and <bits/hashtable_policy.h>, but ideally they would use __aligned_membuf
+ // instead, as it has smaller size for some types on some targets.
+ // This type is still used to avoid an ABI change.
template<typename _Tp>
struct __aligned_buffer
: std::aligned_storage<sizeof(_Tp), std::alignment_of<_Tp>::value>
pointer
data() noexcept
- { return std::__addressof(_AT_Type::_S_ref(_M_elems, 0)); }
+ { return _AT_Type::_S_ptr(_M_elems); }
const_pointer
data() const noexcept
- { return std::__addressof(_AT_Type::_S_ref(_M_elems, 0)); }
+ { return _AT_Type::_S_ptr(_M_elems); }
};
// Array comparisons.
const allocator_type& __a = allocator_type())
: _Base(__l, __n, __hf, __eql, __a) { }
+ unordered_map(size_type __n, const allocator_type& __a)
+ : unordered_map(__n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_map(size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_map(__n, __hf, key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_map(_InputIterator __first, _InputIterator __last,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_map(__first, __last, __n, hasher(), key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_map(_InputIterator __first, _InputIterator __last,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_map(__first, __last, __n, __hf, key_equal(), __a)
+ { }
+
+ unordered_map(initializer_list<value_type> __l,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_map(__l, __n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_map(initializer_list<value_type> __l,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_map(__l, __n, __hf, key_equal(), __a)
+ { }
+
unordered_map&
operator=(const unordered_map&) = default;
const allocator_type& __a = allocator_type())
: _Base(__l, __n, __hf, __eql, __a) { }
+ unordered_multimap(size_type __n, const allocator_type& __a)
+ : unordered_multimap(__n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_multimap(size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multimap(__n, __hf, key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_multimap(_InputIterator __first, _InputIterator __last,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_multimap(__first, __last, __n, hasher(), key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_multimap(_InputIterator __first, _InputIterator __last,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multimap(__first, __last, __n, __hf, key_equal(), __a)
+ { }
+
+ unordered_multimap(initializer_list<value_type> __l,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_multimap(__l, __n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_multimap(initializer_list<value_type> __l,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multimap(__l, __n, __hf, key_equal(), __a)
+ { }
+
unordered_multimap&
operator=(const unordered_multimap&) = default;
: _Base(__l, __n, __hf, __eql, __a)
{ }
+ unordered_set(size_type __n, const allocator_type& __a)
+ : unordered_set(__n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_set(size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_set(__n, __hf, key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_set(_InputIterator __first, _InputIterator __last,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_set(__first, __last, __n, hasher(), key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_set(_InputIterator __first, _InputIterator __last,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_set(__first, __last, __n, __hf, key_equal(), __a)
+ { }
+
+ unordered_set(initializer_list<value_type> __l,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_set(__l, __n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_set(initializer_list<value_type> __l,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_set(__l, __n, __hf, key_equal(), __a)
+ { }
+
unordered_set&
operator=(const unordered_set&) = default;
: _Base(__l, __n, __hf, __eql, __a)
{ }
+ unordered_multiset(size_type __n, const allocator_type& __a)
+ : unordered_multiset(__n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_multiset(size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multiset(__n, __hf, key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_multiset(_InputIterator __first, _InputIterator __last,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_multiset(__first, __last, __n, hasher(), key_equal(), __a)
+ { }
+
+ template<typename _InputIterator>
+ unordered_multiset(_InputIterator __first, _InputIterator __last,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multiset(__first, __last, __n, __hf, key_equal(), __a)
+ { }
+
+ unordered_multiset(initializer_list<value_type> __l,
+ size_type __n,
+ const allocator_type& __a)
+ : unordered_multiset(__l, __n, hasher(), key_equal(), __a)
+ { }
+
+ unordered_multiset(initializer_list<value_type> __l,
+ size_type __n, const hasher& __hf,
+ const allocator_type& __a)
+ : unordered_multiset(__l, __n, __hf, key_equal(), __a)
+ { }
+
unordered_multiset&
operator=(const unordered_multiset&) = default;
static constexpr _Tp&
_S_ref(const _Type& __t, std::size_t __n) noexcept
{ return const_cast<_Tp&>(__t[__n]); }
+
+ static constexpr _Tp*
+ _S_ptr(const _Type& __t) noexcept
+ { return const_cast<_Tp*>(__t); }
};
template<typename _Tp>
static constexpr _Tp&
_S_ref(const _Type&, std::size_t) noexcept
{ return *static_cast<_Tp*>(nullptr); }
+
+ static constexpr _Tp*
+ _S_ptr(const _Type&) noexcept
+ { return nullptr; }
};
/**
pointer
data() noexcept
- { return std::__addressof(_AT_Type::_S_ref(_M_elems, 0)); }
+ { return _AT_Type::_S_ptr(_M_elems); }
const_pointer
data() const noexcept
- { return std::__addressof(_AT_Type::_S_ref(_M_elems, 0)); }
+ { return _AT_Type::_S_ptr(_M_elems); }
};
// Array comparisons.
{
if (to.size() > 0)
{
- *to.next = codepoint;
+ *to.next = adjust_byte_order(codepoint, mode);
++to.next;
return true;
}
int
__codecvt_utf16_base<char32_t>::do_max_length() const throw()
-{ return 3; }
+{ return 4; }
#ifdef _GLIBCXX_USE_WCHAR_T
// Define members of codecvt_utf16<wchar_t> base class implementation.
std::terminate();
}
- return 0;
+ return nullptr;
}
}
__throw_system_error(int(errc::operation_not_permitted));
#endif
- _M_start_thread(__b, nullptr);
+ _M_start_thread(std::move(__b), nullptr);
}
void
thread::_M_start_thread(__shared_base_type __b, void (*)())
{
- __b->_M_this_ptr = __b;
+ auto ptr = __b.get();
+ ptr->_M_this_ptr = std::move(__b);
int __e = __gthread_create(&_M_id._M_thread,
- &execute_native_thread_routine, __b.get());
+ &execute_native_thread_routine, ptr);
if (__e)
{
- __b->_M_this_ptr.reset();
+ ptr->_M_this_ptr.reset();
__throw_system_error(__e);
}
}