1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table[OTI_MAX];
59 rtx libfunc_table[LTI_MAX];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table[COI_MAX];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab[NUM_RTX_CODE + 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code[NUM_RTX_CODE];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
91 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx;
98 static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx);
99 static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int,
101 static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx,
102 enum machine_mode *, int *,
103 enum can_compare_purpose);
104 static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int,
106 static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
107 static optab new_optab (void);
108 static convert_optab new_convert_optab (void);
109 static inline optab init_optab (enum rtx_code);
110 static inline optab init_optabv (enum rtx_code);
111 static inline convert_optab init_convert_optab (enum rtx_code);
112 static void init_libfuncs (optab, int, int, const char *, int);
113 static void init_integral_libfuncs (optab, const char *, int);
114 static void init_floating_libfuncs (optab, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab, const char *,
116 enum mode_class, enum mode_class);
117 static void init_intraclass_conv_libfuncs (convert_optab, const char *,
118 enum mode_class, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode,
120 enum rtx_code, int, rtx);
121 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
122 enum machine_mode *, int *);
123 static rtx widen_clz (enum machine_mode, rtx, rtx);
124 static rtx expand_parity (enum machine_mode, rtx, rtx);
125 static enum rtx_code get_rtx_code (enum tree_code, bool);
126 static rtx vector_compare_rtx (tree, bool, enum insn_code);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
146 rtx last_insn, insn, set;
149 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
151 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
152 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
153 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
154 && GET_RTX_CLASS (code) != RTX_COMPARE
155 && GET_RTX_CLASS (code) != RTX_UNARY)
158 if (GET_CODE (target) == ZERO_EXTRACT)
161 for (last_insn = insns;
162 NEXT_INSN (last_insn) != NULL_RTX;
163 last_insn = NEXT_INSN (last_insn))
166 set = single_set (last_insn);
170 if (! rtx_equal_p (SET_DEST (set), target)
171 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
172 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
173 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
176 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
177 besides the last insn. */
178 if (reg_overlap_mentioned_p (target, op0)
179 || (op1 && reg_overlap_mentioned_p (target, op1)))
181 insn = PREV_INSN (last_insn);
182 while (insn != NULL_RTX)
184 if (reg_set_p (target, insn))
187 insn = PREV_INSN (insn);
191 if (GET_RTX_CLASS (code) == RTX_UNARY)
192 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
194 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
196 set_unique_reg_note (last_insn, REG_EQUAL, note);
201 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
202 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
203 not actually do a sign-extend or zero-extend, but can leave the
204 higher-order bits of the result rtx undefined, for example, in the case
205 of logical operations, but not right shifts. */
208 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
209 int unsignedp, int no_extend)
213 /* If we don't have to extend and this is a constant, return it. */
214 if (no_extend && GET_MODE (op) == VOIDmode)
217 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
218 extend since it will be more efficient to do so unless the signedness of
219 a promoted object differs from our extension. */
221 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
222 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
223 return convert_modes (mode, oldmode, op, unsignedp);
225 /* If MODE is no wider than a single word, we return a paradoxical
227 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
228 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
230 /* Otherwise, get an object of MODE, clobber it, and set the low-order
233 result = gen_reg_rtx (mode);
234 emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
235 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
239 /* Return the optab used for computing the operation given by
240 the tree code, CODE. This function is not always usable (for
241 example, it cannot give complete results for multiplication
242 or division) but probably ought to be relied on more widely
243 throughout the expander. */
245 optab_for_tree_code (enum tree_code code, tree type)
257 return one_cmpl_optab;
266 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
274 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
280 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
289 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
292 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
294 case REALIGN_LOAD_EXPR:
295 return vec_realign_load_optab;
298 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
301 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
303 case REDUC_PLUS_EXPR:
304 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
306 case VEC_LSHIFT_EXPR:
307 return vec_shl_optab;
309 case VEC_RSHIFT_EXPR:
310 return vec_shr_optab;
316 trapv = flag_trapv && INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type);
320 return trapv ? addv_optab : add_optab;
323 return trapv ? subv_optab : sub_optab;
326 return trapv ? smulv_optab : smul_optab;
329 return trapv ? negv_optab : neg_optab;
332 return trapv ? absv_optab : abs_optab;
340 /* Generate code to perform an operation specified by TERNARY_OPTAB
341 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
343 UNSIGNEDP is for the case where we have to widen the operands
344 to perform the operation. It says to use zero-extension.
346 If TARGET is nonzero, the value
347 is generated there, if it is convenient to do so.
348 In all cases an rtx is returned for the locus of the value;
349 this may or may not be TARGET. */
352 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
353 rtx op1, rtx op2, rtx target, int unsignedp)
355 int icode = (int) ternary_optab->handlers[(int) mode].insn_code;
356 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
357 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
358 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
361 rtx xop0 = op0, xop1 = op1, xop2 = op2;
363 gcc_assert (ternary_optab->handlers[(int) mode].insn_code
364 != CODE_FOR_nothing);
366 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
367 temp = gen_reg_rtx (mode);
371 /* In case the insn wants input operands in modes different from
372 those of the actual operands, convert the operands. It would
373 seem that we don't need to convert CONST_INTs, but we do, so
374 that they're properly zero-extended, sign-extended or truncated
377 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
378 xop0 = convert_modes (mode0,
379 GET_MODE (op0) != VOIDmode
384 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
385 xop1 = convert_modes (mode1,
386 GET_MODE (op1) != VOIDmode
391 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
392 xop2 = convert_modes (mode2,
393 GET_MODE (op2) != VOIDmode
398 /* Now, if insn's predicates don't allow our operands, put them into
401 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
402 && mode0 != VOIDmode)
403 xop0 = copy_to_mode_reg (mode0, xop0);
405 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
406 && mode1 != VOIDmode)
407 xop1 = copy_to_mode_reg (mode1, xop1);
409 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
410 && mode2 != VOIDmode)
411 xop2 = copy_to_mode_reg (mode2, xop2);
413 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
420 /* Like expand_binop, but return a constant rtx if the result can be
421 calculated at compile time. The arguments and return value are
422 otherwise the same as for expand_binop. */
425 simplify_expand_binop (enum machine_mode mode, optab binoptab,
426 rtx op0, rtx op1, rtx target, int unsignedp,
427 enum optab_methods methods)
429 if (CONSTANT_P (op0) && CONSTANT_P (op1))
431 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
437 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
440 /* Like simplify_expand_binop, but always put the result in TARGET.
441 Return true if the expansion succeeded. */
444 force_expand_binop (enum machine_mode mode, optab binoptab,
445 rtx op0, rtx op1, rtx target, int unsignedp,
446 enum optab_methods methods)
448 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
449 target, unsignedp, methods);
453 emit_move_insn (target, x);
457 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
460 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
462 enum insn_code icode;
463 rtx rtx_op1, rtx_op2;
464 enum machine_mode mode1;
465 enum machine_mode mode2;
466 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
467 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
468 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
472 switch (TREE_CODE (vec_shift_expr))
474 case VEC_RSHIFT_EXPR:
475 shift_optab = vec_shr_optab;
477 case VEC_LSHIFT_EXPR:
478 shift_optab = vec_shl_optab;
484 icode = (int) shift_optab->handlers[(int) mode].insn_code;
485 gcc_assert (icode != CODE_FOR_nothing);
487 mode1 = insn_data[icode].operand[1].mode;
488 mode2 = insn_data[icode].operand[2].mode;
490 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
491 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
492 && mode1 != VOIDmode)
493 rtx_op1 = force_reg (mode1, rtx_op1);
495 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL);
496 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
497 && mode2 != VOIDmode)
498 rtx_op2 = force_reg (mode2, rtx_op2);
501 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
502 target = gen_reg_rtx (mode);
504 /* Emit instruction */
505 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
512 /* This subroutine of expand_doubleword_shift handles the cases in which
513 the effective shift value is >= BITS_PER_WORD. The arguments and return
514 value are the same as for the parent routine, except that SUPERWORD_OP1
515 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
516 INTO_TARGET may be null if the caller has decided to calculate it. */
519 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
520 rtx outof_target, rtx into_target,
521 int unsignedp, enum optab_methods methods)
523 if (into_target != 0)
524 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
525 into_target, unsignedp, methods))
528 if (outof_target != 0)
530 /* For a signed right shift, we must fill OUTOF_TARGET with copies
531 of the sign bit, otherwise we must fill it with zeros. */
532 if (binoptab != ashr_optab)
533 emit_move_insn (outof_target, CONST0_RTX (word_mode));
535 if (!force_expand_binop (word_mode, binoptab,
536 outof_input, GEN_INT (BITS_PER_WORD - 1),
537 outof_target, unsignedp, methods))
543 /* This subroutine of expand_doubleword_shift handles the cases in which
544 the effective shift value is < BITS_PER_WORD. The arguments and return
545 value are the same as for the parent routine. */
548 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
549 rtx outof_input, rtx into_input, rtx op1,
550 rtx outof_target, rtx into_target,
551 int unsignedp, enum optab_methods methods,
552 unsigned HOST_WIDE_INT shift_mask)
554 optab reverse_unsigned_shift, unsigned_shift;
557 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
558 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
560 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
561 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
562 the opposite direction to BINOPTAB. */
563 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
565 carries = outof_input;
566 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
567 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
572 /* We must avoid shifting by BITS_PER_WORD bits since that is either
573 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
574 has unknown behavior. Do a single shift first, then shift by the
575 remainder. It's OK to use ~OP1 as the remainder if shift counts
576 are truncated to the mode size. */
577 carries = expand_binop (word_mode, reverse_unsigned_shift,
578 outof_input, const1_rtx, 0, unsignedp, methods);
579 if (shift_mask == BITS_PER_WORD - 1)
581 tmp = immed_double_const (-1, -1, op1_mode);
582 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
587 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
588 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
592 if (tmp == 0 || carries == 0)
594 carries = expand_binop (word_mode, reverse_unsigned_shift,
595 carries, tmp, 0, unsignedp, methods);
599 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
600 so the result can go directly into INTO_TARGET if convenient. */
601 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
602 into_target, unsignedp, methods);
606 /* Now OR in the bits carried over from OUTOF_INPUT. */
607 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
608 into_target, unsignedp, methods))
611 /* Use a standard word_mode shift for the out-of half. */
612 if (outof_target != 0)
613 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
614 outof_target, unsignedp, methods))
621 #ifdef HAVE_conditional_move
622 /* Try implementing expand_doubleword_shift using conditional moves.
623 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
624 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
625 are the shift counts to use in the former and latter case. All other
626 arguments are the same as the parent routine. */
629 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
630 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
631 rtx outof_input, rtx into_input,
632 rtx subword_op1, rtx superword_op1,
633 rtx outof_target, rtx into_target,
634 int unsignedp, enum optab_methods methods,
635 unsigned HOST_WIDE_INT shift_mask)
637 rtx outof_superword, into_superword;
639 /* Put the superword version of the output into OUTOF_SUPERWORD and
641 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
642 if (outof_target != 0 && subword_op1 == superword_op1)
644 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
645 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
646 into_superword = outof_target;
647 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
648 outof_superword, 0, unsignedp, methods))
653 into_superword = gen_reg_rtx (word_mode);
654 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
655 outof_superword, into_superword,
660 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
661 if (!expand_subword_shift (op1_mode, binoptab,
662 outof_input, into_input, subword_op1,
663 outof_target, into_target,
664 unsignedp, methods, shift_mask))
667 /* Select between them. Do the INTO half first because INTO_SUPERWORD
668 might be the current value of OUTOF_TARGET. */
669 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
670 into_target, into_superword, word_mode, false))
673 if (outof_target != 0)
674 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
675 outof_target, outof_superword,
683 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
684 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
685 input operand; the shift moves bits in the direction OUTOF_INPUT->
686 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
687 of the target. OP1 is the shift count and OP1_MODE is its mode.
688 If OP1 is constant, it will have been truncated as appropriate
689 and is known to be nonzero.
691 If SHIFT_MASK is zero, the result of word shifts is undefined when the
692 shift count is outside the range [0, BITS_PER_WORD). This routine must
693 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
695 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
696 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
697 fill with zeros or sign bits as appropriate.
699 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
700 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
701 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
702 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
705 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
706 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
707 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
708 function wants to calculate it itself.
710 Return true if the shift could be successfully synthesized. */
713 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
714 rtx outof_input, rtx into_input, rtx op1,
715 rtx outof_target, rtx into_target,
716 int unsignedp, enum optab_methods methods,
717 unsigned HOST_WIDE_INT shift_mask)
719 rtx superword_op1, tmp, cmp1, cmp2;
720 rtx subword_label, done_label;
721 enum rtx_code cmp_code;
723 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
724 fill the result with sign or zero bits as appropriate. If so, the value
725 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
726 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
727 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
729 This isn't worthwhile for constant shifts since the optimizers will
730 cope better with in-range shift counts. */
731 if (shift_mask >= BITS_PER_WORD
733 && !CONSTANT_P (op1))
735 if (!expand_doubleword_shift (op1_mode, binoptab,
736 outof_input, into_input, op1,
738 unsignedp, methods, shift_mask))
740 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
741 outof_target, unsignedp, methods))
746 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
747 is true when the effective shift value is less than BITS_PER_WORD.
748 Set SUPERWORD_OP1 to the shift count that should be used to shift
749 OUTOF_INPUT into INTO_TARGET when the condition is false. */
750 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
751 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
753 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
754 is a subword shift count. */
755 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
757 cmp2 = CONST0_RTX (op1_mode);
763 /* Set CMP1 to OP1 - BITS_PER_WORD. */
764 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
766 cmp2 = CONST0_RTX (op1_mode);
768 superword_op1 = cmp1;
773 /* If we can compute the condition at compile time, pick the
774 appropriate subroutine. */
775 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
776 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
778 if (tmp == const0_rtx)
779 return expand_superword_shift (binoptab, outof_input, superword_op1,
780 outof_target, into_target,
783 return expand_subword_shift (op1_mode, binoptab,
784 outof_input, into_input, op1,
785 outof_target, into_target,
786 unsignedp, methods, shift_mask);
789 #ifdef HAVE_conditional_move
790 /* Try using conditional moves to generate straight-line code. */
792 rtx start = get_last_insn ();
793 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
794 cmp_code, cmp1, cmp2,
795 outof_input, into_input,
797 outof_target, into_target,
798 unsignedp, methods, shift_mask))
800 delete_insns_since (start);
804 /* As a last resort, use branches to select the correct alternative. */
805 subword_label = gen_label_rtx ();
806 done_label = gen_label_rtx ();
808 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
809 0, 0, subword_label);
811 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
812 outof_target, into_target,
816 emit_jump_insn (gen_jump (done_label));
818 emit_label (subword_label);
820 if (!expand_subword_shift (op1_mode, binoptab,
821 outof_input, into_input, op1,
822 outof_target, into_target,
823 unsignedp, methods, shift_mask))
826 emit_label (done_label);
830 /* Subroutine of expand_binop. Perform a double word multiplication of
831 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
832 as the target's word_mode. This function return NULL_RTX if anything
833 goes wrong, in which case it may have already emitted instructions
834 which need to be deleted.
836 If we want to multiply two two-word values and have normal and widening
837 multiplies of single-word values, we can do this with three smaller
838 multiplications. Note that we do not make a REG_NO_CONFLICT block here
839 because we are not operating on one word at a time.
841 The multiplication proceeds as follows:
842 _______________________
843 [__op0_high_|__op0_low__]
844 _______________________
845 * [__op1_high_|__op1_low__]
846 _______________________________________________
847 _______________________
848 (1) [__op0_low__*__op1_low__]
849 _______________________
850 (2a) [__op0_low__*__op1_high_]
851 _______________________
852 (2b) [__op0_high_*__op1_low__]
853 _______________________
854 (3) [__op0_high_*__op1_high_]
857 This gives a 4-word result. Since we are only interested in the
858 lower 2 words, partial result (3) and the upper words of (2a) and
859 (2b) don't need to be calculated. Hence (2a) and (2b) can be
860 calculated using non-widening multiplication.
862 (1), however, needs to be calculated with an unsigned widening
863 multiplication. If this operation is not directly supported we
864 try using a signed widening multiplication and adjust the result.
865 This adjustment works as follows:
867 If both operands are positive then no adjustment is needed.
869 If the operands have different signs, for example op0_low < 0 and
870 op1_low >= 0, the instruction treats the most significant bit of
871 op0_low as a sign bit instead of a bit with significance
872 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
873 with 2**BITS_PER_WORD - op0_low, and two's complements the
874 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
877 Similarly, if both operands are negative, we need to add
878 (op0_low + op1_low) * 2**BITS_PER_WORD.
880 We use a trick to adjust quickly. We logically shift op0_low right
881 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
882 op0_high (op1_high) before it is used to calculate 2b (2a). If no
883 logical shift exists, we do an arithmetic right shift and subtract
887 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
888 bool umulp, enum optab_methods methods)
890 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
891 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
892 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
893 rtx product, adjust, product_high, temp;
895 rtx op0_high = operand_subword_force (op0, high, mode);
896 rtx op0_low = operand_subword_force (op0, low, mode);
897 rtx op1_high = operand_subword_force (op1, high, mode);
898 rtx op1_low = operand_subword_force (op1, low, mode);
900 /* If we're using an unsigned multiply to directly compute the product
901 of the low-order words of the operands and perform any required
902 adjustments of the operands, we begin by trying two more multiplications
903 and then computing the appropriate sum.
905 We have checked above that the required addition is provided.
906 Full-word addition will normally always succeed, especially if
907 it is provided at all, so we don't worry about its failure. The
908 multiplication may well fail, however, so we do handle that. */
912 /* ??? This could be done with emit_store_flag where available. */
913 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
914 NULL_RTX, 1, methods);
916 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
917 NULL_RTX, 0, OPTAB_DIRECT);
920 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
921 NULL_RTX, 0, methods);
924 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
925 NULL_RTX, 0, OPTAB_DIRECT);
932 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
933 NULL_RTX, 0, OPTAB_DIRECT);
937 /* OP0_HIGH should now be dead. */
941 /* ??? This could be done with emit_store_flag where available. */
942 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
943 NULL_RTX, 1, methods);
945 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
946 NULL_RTX, 0, OPTAB_DIRECT);
949 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
950 NULL_RTX, 0, methods);
953 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
954 NULL_RTX, 0, OPTAB_DIRECT);
961 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
962 NULL_RTX, 0, OPTAB_DIRECT);
966 /* OP1_HIGH should now be dead. */
968 adjust = expand_binop (word_mode, add_optab, adjust, temp,
969 adjust, 0, OPTAB_DIRECT);
971 if (target && !REG_P (target))
975 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
976 target, 1, OPTAB_DIRECT);
978 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
979 target, 1, OPTAB_DIRECT);
984 product_high = operand_subword (product, high, 1, mode);
985 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
986 REG_P (product_high) ? product_high : adjust,
988 emit_move_insn (product_high, adjust);
992 /* Wrapper around expand_binop which takes an rtx code to specify
993 the operation to perform, not an optab pointer. All other
994 arguments are the same. */
996 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
997 rtx op1, rtx target, int unsignedp,
998 enum optab_methods methods)
1000 optab binop = code_to_optab[(int) code];
1003 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1006 /* Generate code to perform an operation specified by BINOPTAB
1007 on operands OP0 and OP1, with result having machine-mode MODE.
1009 UNSIGNEDP is for the case where we have to widen the operands
1010 to perform the operation. It says to use zero-extension.
1012 If TARGET is nonzero, the value
1013 is generated there, if it is convenient to do so.
1014 In all cases an rtx is returned for the locus of the value;
1015 this may or may not be TARGET. */
1018 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1019 rtx target, int unsignedp, enum optab_methods methods)
1021 enum optab_methods next_methods
1022 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1023 ? OPTAB_WIDEN : methods);
1024 enum mode_class class;
1025 enum machine_mode wider_mode;
1027 int commutative_op = 0;
1028 int shift_op = (binoptab->code == ASHIFT
1029 || binoptab->code == ASHIFTRT
1030 || binoptab->code == LSHIFTRT
1031 || binoptab->code == ROTATE
1032 || binoptab->code == ROTATERT);
1033 rtx entry_last = get_last_insn ();
1035 bool first_pass_p = true;
1037 class = GET_MODE_CLASS (mode);
1039 /* If subtracting an integer constant, convert this into an addition of
1040 the negated constant. */
1042 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1044 op1 = negate_rtx (mode, op1);
1045 binoptab = add_optab;
1048 /* If we are inside an appropriately-short loop and we are optimizing,
1049 force expensive constants into a register. */
1050 if (CONSTANT_P (op0) && optimize
1051 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1053 if (GET_MODE (op0) != VOIDmode)
1054 op0 = convert_modes (mode, VOIDmode, op0, unsignedp);
1055 op0 = force_reg (mode, op0);
1058 if (CONSTANT_P (op1) && optimize
1059 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1061 if (GET_MODE (op1) != VOIDmode)
1062 op1 = convert_modes (mode, VOIDmode, op1, unsignedp);
1063 op1 = force_reg (mode, op1);
1066 /* Record where to delete back to if we backtrack. */
1067 last = get_last_insn ();
1069 /* If operation is commutative,
1070 try to make the first operand a register.
1071 Even better, try to make it the same as the target.
1072 Also try to make the last operand a constant. */
1073 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1074 || binoptab == smul_widen_optab
1075 || binoptab == umul_widen_optab
1076 || binoptab == smul_highpart_optab
1077 || binoptab == umul_highpart_optab)
1081 if (((target == 0 || REG_P (target))
1085 : rtx_equal_p (op1, target))
1086 || GET_CODE (op0) == CONST_INT)
1096 /* If we can do it with a three-operand insn, do so. */
1098 if (methods != OPTAB_MUST_WIDEN
1099 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1101 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1102 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1103 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1105 rtx xop0 = op0, xop1 = op1;
1110 temp = gen_reg_rtx (mode);
1112 /* If it is a commutative operator and the modes would match
1113 if we would swap the operands, we can save the conversions. */
1116 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1
1117 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0)
1121 tmp = op0; op0 = op1; op1 = tmp;
1122 tmp = xop0; xop0 = xop1; xop1 = tmp;
1126 /* In case the insn wants input operands in modes different from
1127 those of the actual operands, convert the operands. It would
1128 seem that we don't need to convert CONST_INTs, but we do, so
1129 that they're properly zero-extended, sign-extended or truncated
1132 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
1133 xop0 = convert_modes (mode0,
1134 GET_MODE (op0) != VOIDmode
1139 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
1140 xop1 = convert_modes (mode1,
1141 GET_MODE (op1) != VOIDmode
1146 /* Now, if insn's predicates don't allow our operands, put them into
1149 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1150 && mode0 != VOIDmode)
1151 xop0 = copy_to_mode_reg (mode0, xop0);
1153 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1154 && mode1 != VOIDmode)
1155 xop1 = copy_to_mode_reg (mode1, xop1);
1157 if (!insn_data[icode].operand[0].predicate (temp, mode))
1158 temp = gen_reg_rtx (mode);
1160 pat = GEN_FCN (icode) (temp, xop0, xop1);
1163 /* If PAT is composed of more than one insn, try to add an appropriate
1164 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1165 operand, call ourselves again, this time without a target. */
1166 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1167 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1169 delete_insns_since (last);
1170 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1171 unsignedp, methods);
1178 delete_insns_since (last);
1181 /* If we were trying to rotate by a constant value, and that didn't
1182 work, try rotating the other direction before falling back to
1183 shifts and bitwise-or. */
1185 && (binoptab == rotl_optab || binoptab == rotr_optab)
1186 && class == MODE_INT
1187 && GET_CODE (op1) == CONST_INT
1189 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode))
1191 first_pass_p = false;
1192 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1));
1193 binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab;
1197 /* If this is a multiply, see if we can do a widening operation that
1198 takes operands of this mode and makes a wider mode. */
1200 if (binoptab == smul_optab && GET_MODE_WIDER_MODE (mode) != VOIDmode
1201 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1202 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code)
1203 != CODE_FOR_nothing))
1205 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1206 unsignedp ? umul_widen_optab : smul_widen_optab,
1207 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1211 if (GET_MODE_CLASS (mode) == MODE_INT
1212 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1213 GET_MODE_BITSIZE (GET_MODE (temp))))
1214 return gen_lowpart (mode, temp);
1216 return convert_to_mode (mode, temp, unsignedp);
1220 /* Look for a wider mode of the same class for which we think we
1221 can open-code the operation. Check for a widening multiply at the
1222 wider mode as well. */
1224 if ((class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1225 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1226 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1227 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1229 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing
1230 || (binoptab == smul_optab
1231 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1232 && (((unsignedp ? umul_widen_optab : smul_widen_optab)
1233 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code)
1234 != CODE_FOR_nothing)))
1236 rtx xop0 = op0, xop1 = op1;
1239 /* For certain integer operations, we need not actually extend
1240 the narrow operands, as long as we will truncate
1241 the results to the same narrowness. */
1243 if ((binoptab == ior_optab || binoptab == and_optab
1244 || binoptab == xor_optab
1245 || binoptab == add_optab || binoptab == sub_optab
1246 || binoptab == smul_optab || binoptab == ashl_optab)
1247 && class == MODE_INT)
1250 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1252 /* The second operand of a shift must always be extended. */
1253 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1254 no_extend && binoptab != ashl_optab);
1256 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1257 unsignedp, OPTAB_DIRECT);
1260 if (class != MODE_INT
1261 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1262 GET_MODE_BITSIZE (wider_mode)))
1265 target = gen_reg_rtx (mode);
1266 convert_move (target, temp, 0);
1270 return gen_lowpart (mode, temp);
1273 delete_insns_since (last);
1277 /* These can be done a word at a time. */
1278 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1279 && class == MODE_INT
1280 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1281 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1287 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1288 won't be accurate, so use a new target. */
1289 if (target == 0 || target == op0 || target == op1)
1290 target = gen_reg_rtx (mode);
1294 /* Do the actual arithmetic. */
1295 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1297 rtx target_piece = operand_subword (target, i, 1, mode);
1298 rtx x = expand_binop (word_mode, binoptab,
1299 operand_subword_force (op0, i, mode),
1300 operand_subword_force (op1, i, mode),
1301 target_piece, unsignedp, next_methods);
1306 if (target_piece != x)
1307 emit_move_insn (target_piece, x);
1310 insns = get_insns ();
1313 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1315 if (binoptab->code != UNKNOWN)
1317 = gen_rtx_fmt_ee (binoptab->code, mode,
1318 copy_rtx (op0), copy_rtx (op1));
1322 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1327 /* Synthesize double word shifts from single word shifts. */
1328 if ((binoptab == lshr_optab || binoptab == ashl_optab
1329 || binoptab == ashr_optab)
1330 && class == MODE_INT
1331 && (GET_CODE (op1) == CONST_INT || !optimize_size)
1332 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1333 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1334 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1335 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1337 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1338 enum machine_mode op1_mode;
1340 double_shift_mask = targetm.shift_truncation_mask (mode);
1341 shift_mask = targetm.shift_truncation_mask (word_mode);
1342 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1344 /* Apply the truncation to constant shifts. */
1345 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1346 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1348 if (op1 == CONST0_RTX (op1_mode))
1351 /* Make sure that this is a combination that expand_doubleword_shift
1352 can handle. See the comments there for details. */
1353 if (double_shift_mask == 0
1354 || (shift_mask == BITS_PER_WORD - 1
1355 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1357 rtx insns, equiv_value;
1358 rtx into_target, outof_target;
1359 rtx into_input, outof_input;
1360 int left_shift, outof_word;
1362 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1363 won't be accurate, so use a new target. */
1364 if (target == 0 || target == op0 || target == op1)
1365 target = gen_reg_rtx (mode);
1369 /* OUTOF_* is the word we are shifting bits away from, and
1370 INTO_* is the word that we are shifting bits towards, thus
1371 they differ depending on the direction of the shift and
1372 WORDS_BIG_ENDIAN. */
1374 left_shift = binoptab == ashl_optab;
1375 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1377 outof_target = operand_subword (target, outof_word, 1, mode);
1378 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1380 outof_input = operand_subword_force (op0, outof_word, mode);
1381 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1383 if (expand_doubleword_shift (op1_mode, binoptab,
1384 outof_input, into_input, op1,
1385 outof_target, into_target,
1386 unsignedp, methods, shift_mask))
1388 insns = get_insns ();
1391 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1);
1392 emit_no_conflict_block (insns, target, op0, op1, equiv_value);
1399 /* Synthesize double word rotates from single word shifts. */
1400 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1401 && class == MODE_INT
1402 && GET_CODE (op1) == CONST_INT
1403 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1404 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1405 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1408 rtx into_target, outof_target;
1409 rtx into_input, outof_input;
1411 int shift_count, left_shift, outof_word;
1413 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1414 won't be accurate, so use a new target. Do this also if target is not
1415 a REG, first because having a register instead may open optimization
1416 opportunities, and second because if target and op0 happen to be MEMs
1417 designating the same location, we would risk clobbering it too early
1418 in the code sequence we generate below. */
1419 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1420 target = gen_reg_rtx (mode);
1424 shift_count = INTVAL (op1);
1426 /* OUTOF_* is the word we are shifting bits away from, and
1427 INTO_* is the word that we are shifting bits towards, thus
1428 they differ depending on the direction of the shift and
1429 WORDS_BIG_ENDIAN. */
1431 left_shift = (binoptab == rotl_optab);
1432 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1434 outof_target = operand_subword (target, outof_word, 1, mode);
1435 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1437 outof_input = operand_subword_force (op0, outof_word, mode);
1438 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1440 if (shift_count == BITS_PER_WORD)
1442 /* This is just a word swap. */
1443 emit_move_insn (outof_target, into_input);
1444 emit_move_insn (into_target, outof_input);
1449 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1450 rtx first_shift_count, second_shift_count;
1451 optab reverse_unsigned_shift, unsigned_shift;
1453 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1454 ? lshr_optab : ashl_optab);
1456 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1457 ? ashl_optab : lshr_optab);
1459 if (shift_count > BITS_PER_WORD)
1461 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1462 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1466 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1467 second_shift_count = GEN_INT (shift_count);
1470 into_temp1 = expand_binop (word_mode, unsigned_shift,
1471 outof_input, first_shift_count,
1472 NULL_RTX, unsignedp, next_methods);
1473 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1474 into_input, second_shift_count,
1475 NULL_RTX, unsignedp, next_methods);
1477 if (into_temp1 != 0 && into_temp2 != 0)
1478 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1479 into_target, unsignedp, next_methods);
1483 if (inter != 0 && inter != into_target)
1484 emit_move_insn (into_target, inter);
1486 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1487 into_input, first_shift_count,
1488 NULL_RTX, unsignedp, next_methods);
1489 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1490 outof_input, second_shift_count,
1491 NULL_RTX, unsignedp, next_methods);
1493 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1494 inter = expand_binop (word_mode, ior_optab,
1495 outof_temp1, outof_temp2,
1496 outof_target, unsignedp, next_methods);
1498 if (inter != 0 && inter != outof_target)
1499 emit_move_insn (outof_target, inter);
1502 insns = get_insns ();
1512 /* These can be done a word at a time by propagating carries. */
1513 if ((binoptab == add_optab || binoptab == sub_optab)
1514 && class == MODE_INT
1515 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1516 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1519 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1520 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1521 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1522 rtx xop0, xop1, xtarget;
1524 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1525 value is one of those, use it. Otherwise, use 1 since it is the
1526 one easiest to get. */
1527 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1528 int normalizep = STORE_FLAG_VALUE;
1533 /* Prepare the operands. */
1534 xop0 = force_reg (mode, op0);
1535 xop1 = force_reg (mode, op1);
1537 xtarget = gen_reg_rtx (mode);
1539 if (target == 0 || !REG_P (target))
1542 /* Indicate for flow that the entire target reg is being set. */
1544 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget));
1546 /* Do the actual arithmetic. */
1547 for (i = 0; i < nwords; i++)
1549 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
1550 rtx target_piece = operand_subword (xtarget, index, 1, mode);
1551 rtx op0_piece = operand_subword_force (xop0, index, mode);
1552 rtx op1_piece = operand_subword_force (xop1, index, mode);
1555 /* Main add/subtract of the input operands. */
1556 x = expand_binop (word_mode, binoptab,
1557 op0_piece, op1_piece,
1558 target_piece, unsignedp, next_methods);
1564 /* Store carry from main add/subtract. */
1565 carry_out = gen_reg_rtx (word_mode);
1566 carry_out = emit_store_flag_force (carry_out,
1567 (binoptab == add_optab
1570 word_mode, 1, normalizep);
1577 /* Add/subtract previous carry to main result. */
1578 newx = expand_binop (word_mode,
1579 normalizep == 1 ? binoptab : otheroptab,
1581 NULL_RTX, 1, next_methods);
1585 /* Get out carry from adding/subtracting carry in. */
1586 rtx carry_tmp = gen_reg_rtx (word_mode);
1587 carry_tmp = emit_store_flag_force (carry_tmp,
1588 (binoptab == add_optab
1591 word_mode, 1, normalizep);
1593 /* Logical-ior the two poss. carry together. */
1594 carry_out = expand_binop (word_mode, ior_optab,
1595 carry_out, carry_tmp,
1596 carry_out, 0, next_methods);
1600 emit_move_insn (target_piece, newx);
1604 if (x != target_piece)
1605 emit_move_insn (target_piece, x);
1608 carry_in = carry_out;
1611 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
1613 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
1614 || ! rtx_equal_p (target, xtarget))
1616 rtx temp = emit_move_insn (target, xtarget);
1618 set_unique_reg_note (temp,
1620 gen_rtx_fmt_ee (binoptab->code, mode,
1631 delete_insns_since (last);
1634 /* Attempt to synthesize double word multiplies using a sequence of word
1635 mode multiplications. We first attempt to generate a sequence using a
1636 more efficient unsigned widening multiply, and if that fails we then
1637 try using a signed widening multiply. */
1639 if (binoptab == smul_optab
1640 && class == MODE_INT
1641 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1642 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing
1643 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
1645 rtx product = NULL_RTX;
1647 if (umul_widen_optab->handlers[(int) mode].insn_code
1648 != CODE_FOR_nothing)
1650 product = expand_doubleword_mult (mode, op0, op1, target,
1653 delete_insns_since (last);
1656 if (product == NULL_RTX
1657 && smul_widen_optab->handlers[(int) mode].insn_code
1658 != CODE_FOR_nothing)
1660 product = expand_doubleword_mult (mode, op0, op1, target,
1663 delete_insns_since (last);
1666 if (product != NULL_RTX)
1668 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1670 temp = emit_move_insn (target ? target : product, product);
1671 set_unique_reg_note (temp,
1673 gen_rtx_fmt_ee (MULT, mode,
1681 /* It can't be open-coded in this mode.
1682 Use a library call if one is available and caller says that's ok. */
1684 if (binoptab->handlers[(int) mode].libfunc
1685 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
1689 enum machine_mode op1_mode = mode;
1696 op1_mode = word_mode;
1697 /* Specify unsigned here,
1698 since negative shift counts are meaningless. */
1699 op1x = convert_to_mode (word_mode, op1, 1);
1702 if (GET_MODE (op0) != VOIDmode
1703 && GET_MODE (op0) != mode)
1704 op0 = convert_to_mode (mode, op0, unsignedp);
1706 /* Pass 1 for NO_QUEUE so we don't lose any increments
1707 if the libcall is cse'd or moved. */
1708 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
1709 NULL_RTX, LCT_CONST, mode, 2,
1710 op0, mode, op1x, op1_mode);
1712 insns = get_insns ();
1715 target = gen_reg_rtx (mode);
1716 emit_libcall_block (insns, target, value,
1717 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
1722 delete_insns_since (last);
1724 /* It can't be done in this mode. Can we do it in a wider mode? */
1726 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
1727 || methods == OPTAB_MUST_WIDEN))
1729 /* Caller says, don't even try. */
1730 delete_insns_since (entry_last);
1734 /* Compute the value of METHODS to pass to recursive calls.
1735 Don't allow widening to be tried recursively. */
1737 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
1739 /* Look for a wider mode of the same class for which it appears we can do
1742 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1744 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1745 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1747 if ((binoptab->handlers[(int) wider_mode].insn_code
1748 != CODE_FOR_nothing)
1749 || (methods == OPTAB_LIB
1750 && binoptab->handlers[(int) wider_mode].libfunc))
1752 rtx xop0 = op0, xop1 = op1;
1755 /* For certain integer operations, we need not actually extend
1756 the narrow operands, as long as we will truncate
1757 the results to the same narrowness. */
1759 if ((binoptab == ior_optab || binoptab == and_optab
1760 || binoptab == xor_optab
1761 || binoptab == add_optab || binoptab == sub_optab
1762 || binoptab == smul_optab || binoptab == ashl_optab)
1763 && class == MODE_INT)
1766 xop0 = widen_operand (xop0, wider_mode, mode,
1767 unsignedp, no_extend);
1769 /* The second operand of a shift must always be extended. */
1770 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1771 no_extend && binoptab != ashl_optab);
1773 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1774 unsignedp, methods);
1777 if (class != MODE_INT
1778 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1779 GET_MODE_BITSIZE (wider_mode)))
1782 target = gen_reg_rtx (mode);
1783 convert_move (target, temp, 0);
1787 return gen_lowpart (mode, temp);
1790 delete_insns_since (last);
1795 delete_insns_since (entry_last);
1799 /* Expand a binary operator which has both signed and unsigned forms.
1800 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1803 If we widen unsigned operands, we may use a signed wider operation instead
1804 of an unsigned wider operation, since the result would be the same. */
1807 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
1808 rtx op0, rtx op1, rtx target, int unsignedp,
1809 enum optab_methods methods)
1812 optab direct_optab = unsignedp ? uoptab : soptab;
1813 struct optab wide_soptab;
1815 /* Do it without widening, if possible. */
1816 temp = expand_binop (mode, direct_optab, op0, op1, target,
1817 unsignedp, OPTAB_DIRECT);
1818 if (temp || methods == OPTAB_DIRECT)
1821 /* Try widening to a signed int. Make a fake signed optab that
1822 hides any signed insn for direct use. */
1823 wide_soptab = *soptab;
1824 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing;
1825 wide_soptab.handlers[(int) mode].libfunc = 0;
1827 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1828 unsignedp, OPTAB_WIDEN);
1830 /* For unsigned operands, try widening to an unsigned int. */
1831 if (temp == 0 && unsignedp)
1832 temp = expand_binop (mode, uoptab, op0, op1, target,
1833 unsignedp, OPTAB_WIDEN);
1834 if (temp || methods == OPTAB_WIDEN)
1837 /* Use the right width lib call if that exists. */
1838 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
1839 if (temp || methods == OPTAB_LIB)
1842 /* Must widen and use a lib call, use either signed or unsigned. */
1843 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
1844 unsignedp, methods);
1848 return expand_binop (mode, uoptab, op0, op1, target,
1849 unsignedp, methods);
1853 /* Generate code to perform an operation specified by UNOPPTAB
1854 on operand OP0, with two results to TARG0 and TARG1.
1855 We assume that the order of the operands for the instruction
1856 is TARG0, TARG1, OP0.
1858 Either TARG0 or TARG1 may be zero, but what that means is that
1859 the result is not actually wanted. We will generate it into
1860 a dummy pseudo-reg and discard it. They may not both be zero.
1862 Returns 1 if this operation can be performed; 0 if not. */
1865 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
1868 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1869 enum mode_class class;
1870 enum machine_mode wider_mode;
1871 rtx entry_last = get_last_insn ();
1874 class = GET_MODE_CLASS (mode);
1877 targ0 = gen_reg_rtx (mode);
1879 targ1 = gen_reg_rtx (mode);
1881 /* Record where to go back to if we fail. */
1882 last = get_last_insn ();
1884 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1886 int icode = (int) unoptab->handlers[(int) mode].insn_code;
1887 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
1891 if (GET_MODE (xop0) != VOIDmode
1892 && GET_MODE (xop0) != mode0)
1893 xop0 = convert_to_mode (mode0, xop0, unsignedp);
1895 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1896 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
1897 xop0 = copy_to_mode_reg (mode0, xop0);
1899 /* We could handle this, but we should always be called with a pseudo
1900 for our targets and all insns should take them as outputs. */
1901 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
1902 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
1904 pat = GEN_FCN (icode) (targ0, targ1, xop0);
1911 delete_insns_since (last);
1914 /* It can't be done in this mode. Can we do it in a wider mode? */
1916 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
1918 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
1919 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1921 if (unoptab->handlers[(int) wider_mode].insn_code
1922 != CODE_FOR_nothing)
1924 rtx t0 = gen_reg_rtx (wider_mode);
1925 rtx t1 = gen_reg_rtx (wider_mode);
1926 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
1928 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
1930 convert_move (targ0, t0, unsignedp);
1931 convert_move (targ1, t1, unsignedp);
1935 delete_insns_since (last);
1940 delete_insns_since (entry_last);
1944 /* Generate code to perform an operation specified by BINOPTAB
1945 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1946 We assume that the order of the operands for the instruction
1947 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1948 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1950 Either TARG0 or TARG1 may be zero, but what that means is that
1951 the result is not actually wanted. We will generate it into
1952 a dummy pseudo-reg and discard it. They may not both be zero.
1954 Returns 1 if this operation can be performed; 0 if not. */
1957 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
1960 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
1961 enum mode_class class;
1962 enum machine_mode wider_mode;
1963 rtx entry_last = get_last_insn ();
1966 class = GET_MODE_CLASS (mode);
1968 /* If we are inside an appropriately-short loop and we are optimizing,
1969 force expensive constants into a register. */
1970 if (CONSTANT_P (op0) && optimize
1971 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1))
1972 op0 = force_reg (mode, op0);
1974 if (CONSTANT_P (op1) && optimize
1975 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1))
1976 op1 = force_reg (mode, op1);
1979 targ0 = gen_reg_rtx (mode);
1981 targ1 = gen_reg_rtx (mode);
1983 /* Record where to go back to if we fail. */
1984 last = get_last_insn ();
1986 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
1988 int icode = (int) binoptab->handlers[(int) mode].insn_code;
1989 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1990 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1992 rtx xop0 = op0, xop1 = op1;
1994 /* In case the insn wants input operands in modes different from
1995 those of the actual operands, convert the operands. It would
1996 seem that we don't need to convert CONST_INTs, but we do, so
1997 that they're properly zero-extended, sign-extended or truncated
2000 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2001 xop0 = convert_modes (mode0,
2002 GET_MODE (op0) != VOIDmode
2007 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2008 xop1 = convert_modes (mode1,
2009 GET_MODE (op1) != VOIDmode
2014 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2015 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2016 xop0 = copy_to_mode_reg (mode0, xop0);
2018 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2019 xop1 = copy_to_mode_reg (mode1, xop1);
2021 /* We could handle this, but we should always be called with a pseudo
2022 for our targets and all insns should take them as outputs. */
2023 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2024 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2026 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2033 delete_insns_since (last);
2036 /* It can't be done in this mode. Can we do it in a wider mode? */
2038 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2040 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2041 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2043 if (binoptab->handlers[(int) wider_mode].insn_code
2044 != CODE_FOR_nothing)
2046 rtx t0 = gen_reg_rtx (wider_mode);
2047 rtx t1 = gen_reg_rtx (wider_mode);
2048 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2049 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2051 if (expand_twoval_binop (binoptab, cop0, cop1,
2054 convert_move (targ0, t0, unsignedp);
2055 convert_move (targ1, t1, unsignedp);
2059 delete_insns_since (last);
2064 delete_insns_since (entry_last);
2068 /* Expand the two-valued library call indicated by BINOPTAB, but
2069 preserve only one of the values. If TARG0 is non-NULL, the first
2070 value is placed into TARG0; otherwise the second value is placed
2071 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2072 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2073 This routine assumes that the value returned by the library call is
2074 as if the return value was of an integral mode twice as wide as the
2075 mode of OP0. Returns 1 if the call was successful. */
2078 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2079 rtx targ0, rtx targ1, enum rtx_code code)
2081 enum machine_mode mode;
2082 enum machine_mode libval_mode;
2086 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2087 gcc_assert (!targ0 != !targ1);
2089 mode = GET_MODE (op0);
2090 if (!binoptab->handlers[(int) mode].libfunc)
2093 /* The value returned by the library function will have twice as
2094 many bits as the nominal MODE. */
2095 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2098 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc,
2099 NULL_RTX, LCT_CONST,
2103 /* Get the part of VAL containing the value that we want. */
2104 libval = simplify_gen_subreg (mode, libval, libval_mode,
2105 targ0 ? 0 : GET_MODE_SIZE (mode));
2106 insns = get_insns ();
2108 /* Move the into the desired location. */
2109 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2110 gen_rtx_fmt_ee (code, mode, op0, op1));
2116 /* Wrapper around expand_unop which takes an rtx code to specify
2117 the operation to perform, not an optab pointer. All other
2118 arguments are the same. */
2120 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2121 rtx target, int unsignedp)
2123 optab unop = code_to_optab[(int) code];
2126 return expand_unop (mode, unop, op0, target, unsignedp);
2132 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2134 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2136 enum mode_class class = GET_MODE_CLASS (mode);
2137 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2139 enum machine_mode wider_mode;
2140 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2141 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2143 if (clz_optab->handlers[(int) wider_mode].insn_code
2144 != CODE_FOR_nothing)
2146 rtx xop0, temp, last;
2148 last = get_last_insn ();
2151 target = gen_reg_rtx (mode);
2152 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2153 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2155 temp = expand_binop (wider_mode, sub_optab, temp,
2156 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2157 - GET_MODE_BITSIZE (mode)),
2158 target, true, OPTAB_DIRECT);
2160 delete_insns_since (last);
2169 /* Try calculating (parity x) as (and (popcount x) 1), where
2170 popcount can also be done in a wider mode. */
2172 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2174 enum mode_class class = GET_MODE_CLASS (mode);
2175 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2177 enum machine_mode wider_mode;
2178 for (wider_mode = mode; wider_mode != VOIDmode;
2179 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2181 if (popcount_optab->handlers[(int) wider_mode].insn_code
2182 != CODE_FOR_nothing)
2184 rtx xop0, temp, last;
2186 last = get_last_insn ();
2189 target = gen_reg_rtx (mode);
2190 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2191 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2194 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2195 target, true, OPTAB_DIRECT);
2197 delete_insns_since (last);
2206 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2207 conditions, VAL may already be a SUBREG against which we cannot generate
2208 a further SUBREG. In this case, we expect forcing the value into a
2209 register will work around the situation. */
2212 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2213 enum machine_mode imode)
2216 ret = lowpart_subreg (omode, val, imode);
2219 val = force_reg (imode, val);
2220 ret = lowpart_subreg (omode, val, imode);
2221 gcc_assert (ret != NULL);
2226 /* Expand a floating point absolute value or negation operation via a
2227 logical operation on the sign bit. */
2230 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2231 rtx op0, rtx target)
2233 const struct real_format *fmt;
2234 int bitpos, word, nwords, i;
2235 enum machine_mode imode;
2236 HOST_WIDE_INT hi, lo;
2239 /* The format has to have a simple sign bit. */
2240 fmt = REAL_MODE_FORMAT (mode);
2244 bitpos = fmt->signbit_rw;
2248 /* Don't create negative zeros if the format doesn't support them. */
2249 if (code == NEG && !fmt->has_signed_zero)
2252 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2254 imode = int_mode_for_mode (mode);
2255 if (imode == BLKmode)
2264 if (FLOAT_WORDS_BIG_ENDIAN)
2265 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2267 word = bitpos / BITS_PER_WORD;
2268 bitpos = bitpos % BITS_PER_WORD;
2269 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2272 if (bitpos < HOST_BITS_PER_WIDE_INT)
2275 lo = (HOST_WIDE_INT) 1 << bitpos;
2279 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2285 if (target == 0 || target == op0)
2286 target = gen_reg_rtx (mode);
2292 for (i = 0; i < nwords; ++i)
2294 rtx targ_piece = operand_subword (target, i, 1, mode);
2295 rtx op0_piece = operand_subword_force (op0, i, mode);
2299 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2301 immed_double_const (lo, hi, imode),
2302 targ_piece, 1, OPTAB_LIB_WIDEN);
2303 if (temp != targ_piece)
2304 emit_move_insn (targ_piece, temp);
2307 emit_move_insn (targ_piece, op0_piece);
2310 insns = get_insns ();
2313 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0));
2314 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp);
2318 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
2319 gen_lowpart (imode, op0),
2320 immed_double_const (lo, hi, imode),
2321 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2322 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2324 set_unique_reg_note (get_last_insn (), REG_EQUAL,
2325 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
2331 /* Generate code to perform an operation specified by UNOPTAB
2332 on operand OP0, with result having machine-mode MODE.
2334 UNSIGNEDP is for the case where we have to widen the operands
2335 to perform the operation. It says to use zero-extension.
2337 If TARGET is nonzero, the value
2338 is generated there, if it is convenient to do so.
2339 In all cases an rtx is returned for the locus of the value;
2340 this may or may not be TARGET. */
2343 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
2346 enum mode_class class;
2347 enum machine_mode wider_mode;
2349 rtx last = get_last_insn ();
2352 class = GET_MODE_CLASS (mode);
2354 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
2356 int icode = (int) unoptab->handlers[(int) mode].insn_code;
2357 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2363 temp = gen_reg_rtx (mode);
2365 if (GET_MODE (xop0) != VOIDmode
2366 && GET_MODE (xop0) != mode0)
2367 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2369 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2371 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2372 xop0 = copy_to_mode_reg (mode0, xop0);
2374 if (!insn_data[icode].operand[0].predicate (temp, mode))
2375 temp = gen_reg_rtx (mode);
2377 pat = GEN_FCN (icode) (temp, xop0);
2380 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
2381 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
2383 delete_insns_since (last);
2384 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
2392 delete_insns_since (last);
2395 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2397 /* Widening clz needs special treatment. */
2398 if (unoptab == clz_optab)
2400 temp = widen_clz (mode, op0, target);
2407 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2408 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2409 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2411 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing)
2415 /* For certain operations, we need not actually extend
2416 the narrow operand, as long as we will truncate the
2417 results to the same narrowness. */
2419 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2420 (unoptab == neg_optab
2421 || unoptab == one_cmpl_optab)
2422 && class == MODE_INT);
2424 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2429 if (class != MODE_INT
2430 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2431 GET_MODE_BITSIZE (wider_mode)))
2434 target = gen_reg_rtx (mode);
2435 convert_move (target, temp, 0);
2439 return gen_lowpart (mode, temp);
2442 delete_insns_since (last);
2446 /* These can be done a word at a time. */
2447 if (unoptab == one_cmpl_optab
2448 && class == MODE_INT
2449 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
2450 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing)
2455 if (target == 0 || target == op0)
2456 target = gen_reg_rtx (mode);
2460 /* Do the actual arithmetic. */
2461 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
2463 rtx target_piece = operand_subword (target, i, 1, mode);
2464 rtx x = expand_unop (word_mode, unoptab,
2465 operand_subword_force (op0, i, mode),
2466 target_piece, unsignedp);
2468 if (target_piece != x)
2469 emit_move_insn (target_piece, x);
2472 insns = get_insns ();
2475 emit_no_conflict_block (insns, target, op0, NULL_RTX,
2476 gen_rtx_fmt_e (unoptab->code, mode,
2481 if (unoptab->code == NEG)
2483 /* Try negating floating point values by flipping the sign bit. */
2484 if (class == MODE_FLOAT)
2486 temp = expand_absneg_bit (NEG, mode, op0, target);
2491 /* If there is no negation pattern, and we have no negative zero,
2492 try subtracting from zero. */
2493 if (!HONOR_SIGNED_ZEROS (mode))
2495 temp = expand_binop (mode, (unoptab == negv_optab
2496 ? subv_optab : sub_optab),
2497 CONST0_RTX (mode), op0, target,
2498 unsignedp, OPTAB_DIRECT);
2504 /* Try calculating parity (x) as popcount (x) % 2. */
2505 if (unoptab == parity_optab)
2507 temp = expand_parity (mode, op0, target);
2513 /* Now try a library call in this mode. */
2514 if (unoptab->handlers[(int) mode].libfunc)
2518 enum machine_mode outmode = mode;
2520 /* All of these functions return small values. Thus we choose to
2521 have them return something that isn't a double-word. */
2522 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
2523 || unoptab == popcount_optab || unoptab == parity_optab)
2525 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
2529 /* Pass 1 for NO_QUEUE so we don't lose any increments
2530 if the libcall is cse'd or moved. */
2531 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc,
2532 NULL_RTX, LCT_CONST, outmode,
2534 insns = get_insns ();
2537 target = gen_reg_rtx (outmode);
2538 emit_libcall_block (insns, target, value,
2539 gen_rtx_fmt_e (unoptab->code, mode, op0));
2544 /* It can't be done in this mode. Can we do it in a wider mode? */
2546 if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
2548 for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode;
2549 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2551 if ((unoptab->handlers[(int) wider_mode].insn_code
2552 != CODE_FOR_nothing)
2553 || unoptab->handlers[(int) wider_mode].libfunc)
2557 /* For certain operations, we need not actually extend
2558 the narrow operand, as long as we will truncate the
2559 results to the same narrowness. */
2561 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
2562 (unoptab == neg_optab
2563 || unoptab == one_cmpl_optab)
2564 && class == MODE_INT);
2566 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
2569 /* If we are generating clz using wider mode, adjust the
2571 if (unoptab == clz_optab && temp != 0)
2572 temp = expand_binop (wider_mode, sub_optab, temp,
2573 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2574 - GET_MODE_BITSIZE (mode)),
2575 target, true, OPTAB_DIRECT);
2579 if (class != MODE_INT)
2582 target = gen_reg_rtx (mode);
2583 convert_move (target, temp, 0);
2587 return gen_lowpart (mode, temp);
2590 delete_insns_since (last);
2595 /* One final attempt at implementing negation via subtraction,
2596 this time allowing widening of the operand. */
2597 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
2600 temp = expand_binop (mode,
2601 unoptab == negv_optab ? subv_optab : sub_optab,
2602 CONST0_RTX (mode), op0,
2603 target, unsignedp, OPTAB_LIB_WIDEN);
2611 /* Emit code to compute the absolute value of OP0, with result to
2612 TARGET if convenient. (TARGET may be 0.) The return value says
2613 where the result actually is to be found.
2615 MODE is the mode of the operand; the mode of the result is
2616 different but can be deduced from MODE.
2621 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
2622 int result_unsignedp)
2627 result_unsignedp = 1;
2629 /* First try to do it with a special abs instruction. */
2630 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
2635 /* For floating point modes, try clearing the sign bit. */
2636 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2638 temp = expand_absneg_bit (ABS, mode, op0, target);
2643 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2644 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing
2645 && !HONOR_SIGNED_ZEROS (mode))
2647 rtx last = get_last_insn ();
2649 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
2651 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
2657 delete_insns_since (last);
2660 /* If this machine has expensive jumps, we can do integer absolute
2661 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2662 where W is the width of MODE. */
2664 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2)
2666 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
2667 size_int (GET_MODE_BITSIZE (mode) - 1),
2670 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
2673 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
2674 temp, extended, target, 0, OPTAB_LIB_WIDEN);
2684 expand_abs (enum machine_mode mode, rtx op0, rtx target,
2685 int result_unsignedp, int safe)
2690 result_unsignedp = 1;
2692 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
2696 /* If that does not win, use conditional jump and negate. */
2698 /* It is safe to use the target if it is the same
2699 as the source if this is also a pseudo register */
2700 if (op0 == target && REG_P (op0)
2701 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
2704 op1 = gen_label_rtx ();
2705 if (target == 0 || ! safe
2706 || GET_MODE (target) != mode
2707 || (MEM_P (target) && MEM_VOLATILE_P (target))
2709 && REGNO (target) < FIRST_PSEUDO_REGISTER))
2710 target = gen_reg_rtx (mode);
2712 emit_move_insn (target, op0);
2715 /* If this mode is an integer too wide to compare properly,
2716 compare word by word. Rely on CSE to optimize constant cases. */
2717 if (GET_MODE_CLASS (mode) == MODE_INT
2718 && ! can_compare_p (GE, mode, ccp_jump))
2719 do_jump_by_parts_greater_rtx (mode, 0, target, const0_rtx,
2722 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
2723 NULL_RTX, NULL_RTX, op1);
2725 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
2728 emit_move_insn (target, op0);
2734 /* A subroutine of expand_copysign, perform the copysign operation using the
2735 abs and neg primitives advertised to exist on the target. The assumption
2736 is that we have a split register file, and leaving op0 in fp registers,
2737 and not playing with subregs so much, will help the register allocator. */
2740 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2741 int bitpos, bool op0_is_abs)
2743 enum machine_mode imode;
2744 HOST_WIDE_INT hi, lo;
2753 op0 = expand_unop (mode, abs_optab, op0, target, 0);
2760 if (target == NULL_RTX)
2761 target = copy_to_reg (op0);
2763 emit_move_insn (target, op0);
2766 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2768 imode = int_mode_for_mode (mode);
2769 if (imode == BLKmode)
2771 op1 = gen_lowpart (imode, op1);
2776 if (FLOAT_WORDS_BIG_ENDIAN)
2777 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2779 word = bitpos / BITS_PER_WORD;
2780 bitpos = bitpos % BITS_PER_WORD;
2781 op1 = operand_subword_force (op1, word, mode);
2784 if (bitpos < HOST_BITS_PER_WIDE_INT)
2787 lo = (HOST_WIDE_INT) 1 << bitpos;
2791 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2795 op1 = expand_binop (imode, and_optab, op1,
2796 immed_double_const (lo, hi, imode),
2797 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2799 label = gen_label_rtx ();
2800 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label);
2802 if (GET_CODE (op0) == CONST_DOUBLE)
2803 op0 = simplify_unary_operation (NEG, mode, op0, mode);
2805 op0 = expand_unop (mode, neg_optab, op0, target, 0);
2807 emit_move_insn (target, op0);
2815 /* A subroutine of expand_copysign, perform the entire copysign operation
2816 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2817 is true if op0 is known to have its sign bit clear. */
2820 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
2821 int bitpos, bool op0_is_abs)
2823 enum machine_mode imode;
2824 HOST_WIDE_INT hi, lo;
2825 int word, nwords, i;
2828 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2830 imode = int_mode_for_mode (mode);
2831 if (imode == BLKmode)
2840 if (FLOAT_WORDS_BIG_ENDIAN)
2841 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2843 word = bitpos / BITS_PER_WORD;
2844 bitpos = bitpos % BITS_PER_WORD;
2845 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2848 if (bitpos < HOST_BITS_PER_WIDE_INT)
2851 lo = (HOST_WIDE_INT) 1 << bitpos;
2855 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
2859 if (target == 0 || target == op0 || target == op1)
2860 target = gen_reg_rtx (mode);
2866 for (i = 0; i < nwords; ++i)
2868 rtx targ_piece = operand_subword (target, i, 1, mode);
2869 rtx op0_piece = operand_subword_force (op0, i, mode);
2874 op0_piece = expand_binop (imode, and_optab, op0_piece,
2875 immed_double_const (~lo, ~hi, imode),
2876 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2878 op1 = expand_binop (imode, and_optab,
2879 operand_subword_force (op1, i, mode),
2880 immed_double_const (lo, hi, imode),
2881 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2883 temp = expand_binop (imode, ior_optab, op0_piece, op1,
2884 targ_piece, 1, OPTAB_LIB_WIDEN);
2885 if (temp != targ_piece)
2886 emit_move_insn (targ_piece, temp);
2889 emit_move_insn (targ_piece, op0_piece);
2892 insns = get_insns ();
2895 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX);
2899 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
2900 immed_double_const (lo, hi, imode),
2901 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2903 op0 = gen_lowpart (imode, op0);
2905 op0 = expand_binop (imode, and_optab, op0,
2906 immed_double_const (~lo, ~hi, imode),
2907 NULL_RTX, 1, OPTAB_LIB_WIDEN);
2909 temp = expand_binop (imode, ior_optab, op0, op1,
2910 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
2911 target = lowpart_subreg_maybe_copy (mode, temp, imode);
2917 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2918 scalar floating point mode. Return NULL if we do not know how to
2919 expand the operation inline. */
2922 expand_copysign (rtx op0, rtx op1, rtx target)
2924 enum machine_mode mode = GET_MODE (op0);
2925 const struct real_format *fmt;
2929 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
2930 gcc_assert (GET_MODE (op1) == mode);
2932 /* First try to do it with a special instruction. */
2933 temp = expand_binop (mode, copysign_optab, op0, op1,
2934 target, 0, OPTAB_DIRECT);
2938 fmt = REAL_MODE_FORMAT (mode);
2939 if (fmt == NULL || !fmt->has_signed_zero)
2943 if (GET_CODE (op0) == CONST_DOUBLE)
2945 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
2946 op0 = simplify_unary_operation (ABS, mode, op0, mode);
2950 if (fmt->signbit_ro >= 0
2951 && (GET_CODE (op0) == CONST_DOUBLE
2952 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing
2953 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing)))
2955 temp = expand_copysign_absneg (mode, op0, op1, target,
2956 fmt->signbit_ro, op0_is_abs);
2961 if (fmt->signbit_rw < 0)
2963 return expand_copysign_bit (mode, op0, op1, target,
2964 fmt->signbit_rw, op0_is_abs);
2967 /* Generate an instruction whose insn-code is INSN_CODE,
2968 with two operands: an output TARGET and an input OP0.
2969 TARGET *must* be nonzero, and the output is always stored there.
2970 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2971 the value that is stored into TARGET. */
2974 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
2977 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2982 /* Now, if insn does not accept our operands, put them into pseudos. */
2984 if (!insn_data[icode].operand[1].predicate (op0, mode0))
2985 op0 = copy_to_mode_reg (mode0, op0);
2987 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
2988 temp = gen_reg_rtx (GET_MODE (temp));
2990 pat = GEN_FCN (icode) (temp, op0);
2992 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
2993 add_equal_note (pat, temp, code, op0, NULL_RTX);
2998 emit_move_insn (target, temp);
3001 struct no_conflict_data
3003 rtx target, first, insn;
3007 /* Called via note_stores by emit_no_conflict_block and emit_libcall_block.
3008 Set P->must_stay if the currently examined clobber / store has to stay
3009 in the list of insns that constitute the actual no_conflict block /
3012 no_conflict_move_test (rtx dest, rtx set, void *p0)
3014 struct no_conflict_data *p= p0;
3016 /* If this inns directly contributes to setting the target, it must stay. */
3017 if (reg_overlap_mentioned_p (p->target, dest))
3018 p->must_stay = true;
3019 /* If we haven't committed to keeping any other insns in the list yet,
3020 there is nothing more to check. */
3021 else if (p->insn == p->first)
3023 /* If this insn sets / clobbers a register that feeds one of the insns
3024 already in the list, this insn has to stay too. */
3025 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3026 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3027 || reg_used_between_p (dest, p->first, p->insn)
3028 /* Likewise if this insn depends on a register set by a previous
3029 insn in the list, or if it sets a result (presumably a hard
3030 register) that is set or clobbered by a previous insn.
3031 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3032 SET_DEST perform the former check on the address, and the latter
3033 check on the MEM. */
3034 || (GET_CODE (set) == SET
3035 && (modified_in_p (SET_SRC (set), p->first)
3036 || modified_in_p (SET_DEST (set), p->first)
3037 || modified_between_p (SET_SRC (set), p->first, p->insn)
3038 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3039 p->must_stay = true;
3042 /* Emit code to perform a series of operations on a multi-word quantity, one
3045 Such a block is preceded by a CLOBBER of the output, consists of multiple
3046 insns, each setting one word of the output, and followed by a SET copying
3047 the output to itself.
3049 Each of the insns setting words of the output receives a REG_NO_CONFLICT
3050 note indicating that it doesn't conflict with the (also multi-word)
3051 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
3054 INSNS is a block of code generated to perform the operation, not including
3055 the CLOBBER and final copy. All insns that compute intermediate values
3056 are first emitted, followed by the block as described above.
3058 TARGET, OP0, and OP1 are the output and inputs of the operations,
3059 respectively. OP1 may be zero for a unary operation.
3061 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
3064 If TARGET is not a register, INSNS is simply emitted with no special
3065 processing. Likewise if anything in INSNS is not an INSN or if
3066 there is a libcall block inside INSNS.
3068 The final insn emitted is returned. */
3071 emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv)
3073 rtx prev, next, first, last, insn;
3075 if (!REG_P (target) || reload_in_progress)
3076 return emit_insn (insns);
3078 for (insn = insns; insn; insn = NEXT_INSN (insn))
3079 if (!NONJUMP_INSN_P (insn)
3080 || find_reg_note (insn, REG_LIBCALL, NULL_RTX))
3081 return emit_insn (insns);
3083 /* First emit all insns that do not store into words of the output and remove
3084 these from the list. */
3085 for (insn = insns; insn; insn = next)
3088 struct no_conflict_data data;
3090 next = NEXT_INSN (insn);
3092 /* Some ports (cris) create a libcall regions at their own. We must
3093 avoid any potential nesting of LIBCALLs. */
3094 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3095 remove_note (insn, note);
3096 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3097 remove_note (insn, note);
3099 data.target = target;
3103 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3104 if (! data.must_stay)
3106 if (PREV_INSN (insn))
3107 NEXT_INSN (PREV_INSN (insn)) = next;
3112 PREV_INSN (next) = PREV_INSN (insn);
3118 prev = get_last_insn ();
3120 /* Now write the CLOBBER of the output, followed by the setting of each
3121 of the words, followed by the final copy. */
3122 if (target != op0 && target != op1)
3123 emit_insn (gen_rtx_CLOBBER (VOIDmode, target));
3125 for (insn = insns; insn; insn = next)
3127 next = NEXT_INSN (insn);
3130 if (op1 && REG_P (op1))
3131 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1,
3134 if (op0 && REG_P (op0))
3135 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0,
3139 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3140 != CODE_FOR_nothing)
3142 last = emit_move_insn (target, target);
3144 set_unique_reg_note (last, REG_EQUAL, equiv);
3148 last = get_last_insn ();
3150 /* Remove any existing REG_EQUAL note from "last", or else it will
3151 be mistaken for a note referring to the full contents of the
3152 alleged libcall value when found together with the REG_RETVAL
3153 note added below. An existing note can come from an insn
3154 expansion at "last". */
3155 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3159 first = get_insns ();
3161 first = NEXT_INSN (prev);
3163 /* Encapsulate the block so it gets manipulated as a unit. */
3164 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3166 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, REG_NOTES (last));
3171 /* Emit code to make a call to a constant function or a library call.
3173 INSNS is a list containing all insns emitted in the call.
3174 These insns leave the result in RESULT. Our block is to copy RESULT
3175 to TARGET, which is logically equivalent to EQUIV.
3177 We first emit any insns that set a pseudo on the assumption that these are
3178 loading constants into registers; doing so allows them to be safely cse'ed
3179 between blocks. Then we emit all the other insns in the block, followed by
3180 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3181 note with an operand of EQUIV.
3183 Moving assignments to pseudos outside of the block is done to improve
3184 the generated code, but is not required to generate correct code,
3185 hence being unable to move an assignment is not grounds for not making
3186 a libcall block. There are two reasons why it is safe to leave these
3187 insns inside the block: First, we know that these pseudos cannot be
3188 used in generated RTL outside the block since they are created for
3189 temporary purposes within the block. Second, CSE will not record the
3190 values of anything set inside a libcall block, so we know they must
3191 be dead at the end of the block.
3193 Except for the first group of insns (the ones setting pseudos), the
3194 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3197 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3199 rtx final_dest = target;
3200 rtx prev, next, first, last, insn;
3202 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3203 into a MEM later. Protect the libcall block from this change. */
3204 if (! REG_P (target) || REG_USERVAR_P (target))
3205 target = gen_reg_rtx (GET_MODE (target));
3207 /* If we're using non-call exceptions, a libcall corresponding to an
3208 operation that may trap may also trap. */
3209 if (flag_non_call_exceptions && may_trap_p (equiv))
3211 for (insn = insns; insn; insn = NEXT_INSN (insn))
3214 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3216 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3217 remove_note (insn, note);
3221 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3222 reg note to indicate that this call cannot throw or execute a nonlocal
3223 goto (unless there is already a REG_EH_REGION note, in which case
3225 for (insn = insns; insn; insn = NEXT_INSN (insn))
3228 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3231 XEXP (note, 0) = constm1_rtx;
3233 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx,
3237 /* First emit all insns that set pseudos. Remove them from the list as
3238 we go. Avoid insns that set pseudos which were referenced in previous
3239 insns. These can be generated by move_by_pieces, for example,
3240 to update an address. Similarly, avoid insns that reference things
3241 set in previous insns. */
3243 for (insn = insns; insn; insn = next)
3245 rtx set = single_set (insn);
3248 /* Some ports (cris) create a libcall regions at their own. We must
3249 avoid any potential nesting of LIBCALLs. */
3250 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL)
3251 remove_note (insn, note);
3252 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL)
3253 remove_note (insn, note);
3255 next = NEXT_INSN (insn);
3257 if (set != 0 && REG_P (SET_DEST (set))
3258 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3260 struct no_conflict_data data;
3262 data.target = const0_rtx;
3266 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3267 if (! data.must_stay)
3269 if (PREV_INSN (insn))
3270 NEXT_INSN (PREV_INSN (insn)) = next;
3275 PREV_INSN (next) = PREV_INSN (insn);
3281 /* Some ports use a loop to copy large arguments onto the stack.
3282 Don't move anything outside such a loop. */
3287 prev = get_last_insn ();
3289 /* Write the remaining insns followed by the final copy. */
3291 for (insn = insns; insn; insn = next)
3293 next = NEXT_INSN (insn);
3298 last = emit_move_insn (target, result);
3299 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code
3300 != CODE_FOR_nothing)
3301 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3304 /* Remove any existing REG_EQUAL note from "last", or else it will
3305 be mistaken for a note referring to the full contents of the
3306 libcall value when found together with the REG_RETVAL note added
3307 below. An existing note can come from an insn expansion at
3309 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX));
3312 if (final_dest != target)
3313 emit_move_insn (final_dest, target);
3316 first = get_insns ();
3318 first = NEXT_INSN (prev);
3320 /* Encapsulate the block so it gets manipulated as a unit. */
3321 if (!flag_non_call_exceptions || !may_trap_p (equiv))
3323 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3324 when the encapsulated region would not be in one basic block,
3325 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3327 bool attach_libcall_retval_notes = true;
3328 next = NEXT_INSN (last);
3329 for (insn = first; insn != next; insn = NEXT_INSN (insn))
3330 if (control_flow_insn_p (insn))
3332 attach_libcall_retval_notes = false;
3336 if (attach_libcall_retval_notes)
3338 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last,
3340 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first,
3346 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3347 PURPOSE describes how this comparison will be used. CODE is the rtx
3348 comparison code we will be using.
3350 ??? Actually, CODE is slightly weaker than that. A target is still
3351 required to implement all of the normal bcc operations, but not
3352 required to implement all (or any) of the unordered bcc operations. */
3355 can_compare_p (enum rtx_code code, enum machine_mode mode,
3356 enum can_compare_purpose purpose)
3360 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3362 if (purpose == ccp_jump)
3363 return bcc_gen_fctn[(int) code] != NULL;
3364 else if (purpose == ccp_store_flag)
3365 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3367 /* There's only one cmov entry point, and it's allowed to fail. */
3370 if (purpose == ccp_jump
3371 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3373 if (purpose == ccp_cmov
3374 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3376 if (purpose == ccp_store_flag
3377 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing)
3379 mode = GET_MODE_WIDER_MODE (mode);
3381 while (mode != VOIDmode);
3386 /* This function is called when we are going to emit a compare instruction that
3387 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3389 *PMODE is the mode of the inputs (in case they are const_int).
3390 *PUNSIGNEDP nonzero says that the operands are unsigned;
3391 this matters if they need to be widened.
3393 If they have mode BLKmode, then SIZE specifies the size of both operands.
3395 This function performs all the setup necessary so that the caller only has
3396 to emit a single comparison insn. This setup can involve doing a BLKmode
3397 comparison or emitting a library call to perform the comparison if no insn
3398 is available to handle it.
3399 The values which are passed in through pointers can be modified; the caller
3400 should perform the comparison on the modified values. Constant
3401 comparisons must have already been folded. */
3404 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
3405 enum machine_mode *pmode, int *punsignedp,
3406 enum can_compare_purpose purpose)
3408 enum machine_mode mode = *pmode;
3409 rtx x = *px, y = *py;
3410 int unsignedp = *punsignedp;
3411 enum mode_class class;
3413 class = GET_MODE_CLASS (mode);
3415 /* If we are inside an appropriately-short loop and we are optimizing,
3416 force expensive constants into a register. */
3417 if (CONSTANT_P (x) && optimize
3418 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1))
3419 x = force_reg (mode, x);
3421 if (CONSTANT_P (y) && optimize
3422 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1))
3423 y = force_reg (mode, y);
3426 /* Make sure if we have a canonical comparison. The RTL
3427 documentation states that canonical comparisons are required only
3428 for targets which have cc0. */
3429 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
3432 /* Don't let both operands fail to indicate the mode. */
3433 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
3434 x = force_reg (mode, x);
3436 /* Handle all BLKmode compares. */
3438 if (mode == BLKmode)
3440 enum machine_mode cmp_mode, result_mode;
3441 enum insn_code cmp_code;
3446 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
3450 /* Try to use a memory block compare insn - either cmpstr
3451 or cmpmem will do. */
3452 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
3453 cmp_mode != VOIDmode;
3454 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
3456 cmp_code = cmpmem_optab[cmp_mode];
3457 if (cmp_code == CODE_FOR_nothing)
3458 cmp_code = cmpstr_optab[cmp_mode];
3459 if (cmp_code == CODE_FOR_nothing)
3460 cmp_code = cmpstrn_optab[cmp_mode];
3461 if (cmp_code == CODE_FOR_nothing)
3464 /* Must make sure the size fits the insn's mode. */
3465 if ((GET_CODE (size) == CONST_INT
3466 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
3467 || (GET_MODE_BITSIZE (GET_MODE (size))
3468 > GET_MODE_BITSIZE (cmp_mode)))
3471 result_mode = insn_data[cmp_code].operand[0].mode;
3472 result = gen_reg_rtx (result_mode);
3473 size = convert_to_mode (cmp_mode, size, 1);
3474 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
3478 *pmode = result_mode;
3482 /* Otherwise call a library function, memcmp. */
3483 libfunc = memcmp_libfunc;
3484 length_type = sizetype;
3485 result_mode = TYPE_MODE (integer_type_node);
3486 cmp_mode = TYPE_MODE (length_type);
3487 size = convert_to_mode (TYPE_MODE (length_type), size,
3488 TYPE_UNSIGNED (length_type));
3490 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK,
3497 *pmode = result_mode;
3501 /* Don't allow operands to the compare to trap, as that can put the
3502 compare and branch in different basic blocks. */
3503 if (flag_non_call_exceptions)
3506 x = force_reg (mode, x);
3508 y = force_reg (mode, y);
3513 if (can_compare_p (*pcomparison, mode, purpose))
3516 /* Handle a lib call just for the mode we are using. */
3518 if (cmp_optab->handlers[(int) mode].libfunc && class != MODE_FLOAT)
3520 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc;
3523 /* If we want unsigned, and this mode has a distinct unsigned
3524 comparison routine, use that. */
3525 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc)
3526 libfunc = ucmp_optab->handlers[(int) mode].libfunc;
3528 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK,
3529 word_mode, 2, x, mode, y, mode);
3531 /* There are two kinds of comparison routines. Biased routines
3532 return 0/1/2, and unbiased routines return -1/0/1. Other parts
3533 of gcc expect that the comparison operation is equivalent
3534 to the modified comparison. For signed comparisons compare the
3535 result against 1 in the biased case, and zero in the unbiased
3536 case. For unsigned comparisons always compare against 1 after
3537 biasing the unbiased result by adding 1. This gives us a way to
3543 if (!TARGET_LIB_INT_CMP_BIASED)
3546 *px = plus_constant (result, 1);
3553 gcc_assert (class == MODE_FLOAT);
3554 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
3557 /* Before emitting an insn with code ICODE, make sure that X, which is going
3558 to be used for operand OPNUM of the insn, is converted from mode MODE to
3559 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3560 that it is accepted by the operand predicate. Return the new value. */
3563 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
3564 enum machine_mode wider_mode, int unsignedp)
3566 if (mode != wider_mode)
3567 x = convert_modes (wider_mode, mode, x, unsignedp);
3569 if (!insn_data[icode].operand[opnum].predicate
3570 (x, insn_data[icode].operand[opnum].mode))
3574 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
3580 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3581 we can do the comparison.
3582 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3583 be NULL_RTX which indicates that only a comparison is to be generated. */
3586 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
3587 enum rtx_code comparison, int unsignedp, rtx label)
3589 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
3590 enum mode_class class = GET_MODE_CLASS (mode);
3591 enum machine_mode wider_mode = mode;
3593 /* Try combined insns first. */
3596 enum insn_code icode;
3597 PUT_MODE (test, wider_mode);
3601 icode = cbranch_optab->handlers[(int) wider_mode].insn_code;
3603 if (icode != CODE_FOR_nothing
3604 && insn_data[icode].operand[0].predicate (test, wider_mode))
3606 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
3607 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
3608 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
3613 /* Handle some compares against zero. */
3614 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code;
3615 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
3617 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3618 emit_insn (GEN_FCN (icode) (x));
3620 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3624 /* Handle compares for which there is a directly suitable insn. */
3626 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code;
3627 if (icode != CODE_FOR_nothing)
3629 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
3630 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
3631 emit_insn (GEN_FCN (icode) (x, y));
3633 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
3637 if (class != MODE_INT && class != MODE_FLOAT
3638 && class != MODE_COMPLEX_FLOAT)
3641 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
3643 while (wider_mode != VOIDmode);
3648 /* Generate code to compare X with Y so that the condition codes are
3649 set and to jump to LABEL if the condition is true. If X is a
3650 constant and Y is not a constant, then the comparison is swapped to
3651 ensure that the comparison RTL has the canonical form.
3653 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3654 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3655 the proper branch condition code.
3657 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3659 MODE is the mode of the inputs (in case they are const_int).
3661 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3662 be passed unchanged to emit_cmp_insn, then potentially converted into an
3663 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3666 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
3667 enum machine_mode mode, int unsignedp, rtx label)
3669 rtx op0 = x, op1 = y;
3671 /* Swap operands and condition to ensure canonical RTL. */
3672 if (swap_commutative_operands_p (x, y))
3674 /* If we're not emitting a branch, this means some caller
3679 comparison = swap_condition (comparison);
3683 /* If OP0 is still a constant, then both X and Y must be constants.
3684 Force X into a register to create canonical RTL. */
3685 if (CONSTANT_P (op0))
3686 op0 = force_reg (mode, op0);
3690 comparison = unsigned_condition (comparison);
3692 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
3694 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
3697 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3700 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
3701 enum machine_mode mode, int unsignedp)
3703 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
3706 /* Emit a library call comparison between floating point X and Y.
3707 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3710 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
3711 enum machine_mode *pmode, int *punsignedp)
3713 enum rtx_code comparison = *pcomparison;
3714 enum rtx_code swapped = swap_condition (comparison);
3715 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
3718 enum machine_mode orig_mode = GET_MODE (x);
3719 enum machine_mode mode;
3720 rtx value, target, insns, equiv;
3722 bool reversed_p = false;
3724 for (mode = orig_mode; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3726 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc))
3729 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc))
3732 tmp = x; x = y; y = tmp;
3733 comparison = swapped;
3737 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc)
3738 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
3740 comparison = reversed;
3746 gcc_assert (mode != VOIDmode);
3748 if (mode != orig_mode)
3750 x = convert_to_mode (mode, x, 0);
3751 y = convert_to_mode (mode, y, 0);
3754 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3755 the RTL. The allows the RTL optimizers to delete the libcall if the
3756 condition can be determined at compile-time. */
3757 if (comparison == UNORDERED)
3759 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x);
3760 equiv = simplify_gen_relational (NE, word_mode, mode, y, y);
3761 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3762 temp, const_true_rtx, equiv);
3766 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y);
3767 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3769 rtx true_rtx, false_rtx;
3774 true_rtx = const0_rtx;
3775 false_rtx = const_true_rtx;
3779 true_rtx = const_true_rtx;
3780 false_rtx = const0_rtx;
3784 true_rtx = const1_rtx;
3785 false_rtx = const0_rtx;
3789 true_rtx = const0_rtx;
3790 false_rtx = constm1_rtx;
3794 true_rtx = constm1_rtx;
3795 false_rtx = const0_rtx;
3799 true_rtx = const0_rtx;
3800 false_rtx = const1_rtx;
3806 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode,
3807 equiv, true_rtx, false_rtx);
3812 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
3813 word_mode, 2, x, mode, y, mode);
3814 insns = get_insns ();
3817 target = gen_reg_rtx (word_mode);
3818 emit_libcall_block (insns, target, value, equiv);
3820 if (comparison == UNORDERED
3821 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
3822 comparison = reversed_p ? EQ : NE;
3827 *pcomparison = comparison;
3831 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3834 emit_indirect_jump (rtx loc)
3836 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
3838 loc = copy_to_mode_reg (Pmode, loc);
3840 emit_jump_insn (gen_indirect_jump (loc));
3844 #ifdef HAVE_conditional_move
3846 /* Emit a conditional move instruction if the machine supports one for that
3847 condition and machine mode.
3849 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3850 the mode to use should they be constants. If it is VOIDmode, they cannot
3853 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3854 should be stored there. MODE is the mode to use should they be constants.
3855 If it is VOIDmode, they cannot both be constants.
3857 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3858 is not supported. */
3861 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
3862 enum machine_mode cmode, rtx op2, rtx op3,
3863 enum machine_mode mode, int unsignedp)
3865 rtx tem, subtarget, comparison, insn;
3866 enum insn_code icode;
3867 enum rtx_code reversed;
3869 /* If one operand is constant, make it the second one. Only do this
3870 if the other operand is not constant as well. */
3872 if (swap_commutative_operands_p (op0, op1))
3877 code = swap_condition (code);
3880 /* get_condition will prefer to generate LT and GT even if the old
3881 comparison was against zero, so undo that canonicalization here since
3882 comparisons against zero are cheaper. */
3883 if (code == LT && op1 == const1_rtx)
3884 code = LE, op1 = const0_rtx;
3885 else if (code == GT && op1 == constm1_rtx)
3886 code = GE, op1 = const0_rtx;
3888 if (cmode == VOIDmode)
3889 cmode = GET_MODE (op0);
3891 if (swap_commutative_operands_p (op2, op3)
3892 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
3901 if (mode == VOIDmode)
3902 mode = GET_MODE (op2);
3904 icode = movcc_gen_code[mode];
3906 if (icode == CODE_FOR_nothing)
3910 target = gen_reg_rtx (mode);
3914 /* If the insn doesn't accept these operands, put them in pseudos. */
3916 if (!insn_data[icode].operand[0].predicate
3917 (subtarget, insn_data[icode].operand[0].mode))
3918 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
3920 if (!insn_data[icode].operand[2].predicate
3921 (op2, insn_data[icode].operand[2].mode))
3922 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
3924 if (!insn_data[icode].operand[3].predicate
3925 (op3, insn_data[icode].operand[3].mode))
3926 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
3928 /* Everything should now be in the suitable form, so emit the compare insn
3929 and then the conditional move. */
3932 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
3934 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3935 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3936 return NULL and let the caller figure out how best to deal with this
3938 if (GET_CODE (comparison) != code)
3941 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
3943 /* If that failed, then give up. */
3949 if (subtarget != target)
3950 convert_move (target, subtarget, 0);
3955 /* Return nonzero if a conditional move of mode MODE is supported.
3957 This function is for combine so it can tell whether an insn that looks
3958 like a conditional move is actually supported by the hardware. If we
3959 guess wrong we lose a bit on optimization, but that's it. */
3960 /* ??? sparc64 supports conditionally moving integers values based on fp
3961 comparisons, and vice versa. How do we handle them? */
3964 can_conditionally_move_p (enum machine_mode mode)
3966 if (movcc_gen_code[mode] != CODE_FOR_nothing)
3972 #endif /* HAVE_conditional_move */
3974 /* Emit a conditional addition instruction if the machine supports one for that
3975 condition and machine mode.
3977 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3978 the mode to use should they be constants. If it is VOIDmode, they cannot
3981 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3982 should be stored there. MODE is the mode to use should they be constants.
3983 If it is VOIDmode, they cannot both be constants.
3985 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3986 is not supported. */
3989 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
3990 enum machine_mode cmode, rtx op2, rtx op3,
3991 enum machine_mode mode, int unsignedp)
3993 rtx tem, subtarget, comparison, insn;
3994 enum insn_code icode;
3995 enum rtx_code reversed;
3997 /* If one operand is constant, make it the second one. Only do this
3998 if the other operand is not constant as well. */
4000 if (swap_commutative_operands_p (op0, op1))
4005 code = swap_condition (code);
4008 /* get_condition will prefer to generate LT and GT even if the old
4009 comparison was against zero, so undo that canonicalization here since
4010 comparisons against zero are cheaper. */
4011 if (code == LT && op1 == const1_rtx)
4012 code = LE, op1 = const0_rtx;
4013 else if (code == GT && op1 == constm1_rtx)
4014 code = GE, op1 = const0_rtx;
4016 if (cmode == VOIDmode)
4017 cmode = GET_MODE (op0);
4019 if (swap_commutative_operands_p (op2, op3)
4020 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4029 if (mode == VOIDmode)
4030 mode = GET_MODE (op2);
4032 icode = addcc_optab->handlers[(int) mode].insn_code;
4034 if (icode == CODE_FOR_nothing)
4038 target = gen_reg_rtx (mode);
4040 /* If the insn doesn't accept these operands, put them in pseudos. */
4042 if (!insn_data[icode].operand[0].predicate
4043 (target, insn_data[icode].operand[0].mode))
4044 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4048 if (!insn_data[icode].operand[2].predicate
4049 (op2, insn_data[icode].operand[2].mode))
4050 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4052 if (!insn_data[icode].operand[3].predicate
4053 (op3, insn_data[icode].operand[3].mode))
4054 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4056 /* Everything should now be in the suitable form, so emit the compare insn
4057 and then the conditional move. */
4060 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4062 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4063 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4064 return NULL and let the caller figure out how best to deal with this
4066 if (GET_CODE (comparison) != code)
4069 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4071 /* If that failed, then give up. */
4077 if (subtarget != target)
4078 convert_move (target, subtarget, 0);
4083 /* These functions attempt to generate an insn body, rather than
4084 emitting the insn, but if the gen function already emits them, we
4085 make no attempt to turn them back into naked patterns. */
4087 /* Generate and return an insn body to add Y to X. */
4090 gen_add2_insn (rtx x, rtx y)
4092 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4094 gcc_assert (insn_data[icode].operand[0].predicate
4095 (x, insn_data[icode].operand[0].mode));
4096 gcc_assert (insn_data[icode].operand[1].predicate
4097 (x, insn_data[icode].operand[1].mode));
4098 gcc_assert (insn_data[icode].operand[2].predicate
4099 (y, insn_data[icode].operand[2].mode));
4101 return GEN_FCN (icode) (x, x, y);
4104 /* Generate and return an insn body to add r1 and c,
4105 storing the result in r0. */
4107 gen_add3_insn (rtx r0, rtx r1, rtx c)
4109 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code;
4111 if (icode == CODE_FOR_nothing
4112 || !(insn_data[icode].operand[0].predicate
4113 (r0, insn_data[icode].operand[0].mode))
4114 || !(insn_data[icode].operand[1].predicate
4115 (r1, insn_data[icode].operand[1].mode))
4116 || !(insn_data[icode].operand[2].predicate
4117 (c, insn_data[icode].operand[2].mode)))
4120 return GEN_FCN (icode) (r0, r1, c);
4124 have_add2_insn (rtx x, rtx y)
4128 gcc_assert (GET_MODE (x) != VOIDmode);
4130 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code;
4132 if (icode == CODE_FOR_nothing)
4135 if (!(insn_data[icode].operand[0].predicate
4136 (x, insn_data[icode].operand[0].mode))
4137 || !(insn_data[icode].operand[1].predicate
4138 (x, insn_data[icode].operand[1].mode))
4139 || !(insn_data[icode].operand[2].predicate
4140 (y, insn_data[icode].operand[2].mode)))
4146 /* Generate and return an insn body to subtract Y from X. */
4149 gen_sub2_insn (rtx x, rtx y)
4151 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4153 gcc_assert (insn_data[icode].operand[0].predicate
4154 (x, insn_data[icode].operand[0].mode));
4155 gcc_assert (insn_data[icode].operand[1].predicate
4156 (x, insn_data[icode].operand[1].mode));
4157 gcc_assert (insn_data[icode].operand[2].predicate
4158 (y, insn_data[icode].operand[2].mode));
4160 return GEN_FCN (icode) (x, x, y);
4163 /* Generate and return an insn body to subtract r1 and c,
4164 storing the result in r0. */
4166 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4168 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code;
4170 if (icode == CODE_FOR_nothing
4171 || !(insn_data[icode].operand[0].predicate
4172 (r0, insn_data[icode].operand[0].mode))
4173 || !(insn_data[icode].operand[1].predicate
4174 (r1, insn_data[icode].operand[1].mode))
4175 || !(insn_data[icode].operand[2].predicate
4176 (c, insn_data[icode].operand[2].mode)))
4179 return GEN_FCN (icode) (r0, r1, c);
4183 have_sub2_insn (rtx x, rtx y)
4187 gcc_assert (GET_MODE (x) != VOIDmode);
4189 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code;
4191 if (icode == CODE_FOR_nothing)
4194 if (!(insn_data[icode].operand[0].predicate
4195 (x, insn_data[icode].operand[0].mode))
4196 || !(insn_data[icode].operand[1].predicate
4197 (x, insn_data[icode].operand[1].mode))
4198 || !(insn_data[icode].operand[2].predicate
4199 (y, insn_data[icode].operand[2].mode)))
4205 /* Generate the body of an instruction to copy Y into X.
4206 It may be a list of insns, if one insn isn't enough. */
4209 gen_move_insn (rtx x, rtx y)
4214 emit_move_insn_1 (x, y);
4220 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4221 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4222 no such operation exists, CODE_FOR_nothing will be returned. */
4225 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4229 #ifdef HAVE_ptr_extend
4231 return CODE_FOR_ptr_extend;
4234 tab = unsignedp ? zext_optab : sext_optab;
4235 return tab->handlers[to_mode][from_mode].insn_code;
4238 /* Generate the body of an insn to extend Y (with mode MFROM)
4239 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4242 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4243 enum machine_mode mfrom, int unsignedp)
4245 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4246 return GEN_FCN (icode) (x, y);
4249 /* can_fix_p and can_float_p say whether the target machine
4250 can directly convert a given fixed point type to
4251 a given floating point type, or vice versa.
4252 The returned value is the CODE_FOR_... value to use,
4253 or CODE_FOR_nothing if these modes cannot be directly converted.
4255 *TRUNCP_PTR is set to 1 if it is necessary to output
4256 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4258 static enum insn_code
4259 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4260 int unsignedp, int *truncp_ptr)
4263 enum insn_code icode;
4265 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4266 icode = tab->handlers[fixmode][fltmode].insn_code;
4267 if (icode != CODE_FOR_nothing)
4273 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4274 for this to work. We need to rework the fix* and ftrunc* patterns
4275 and documentation. */
4276 tab = unsignedp ? ufix_optab : sfix_optab;
4277 icode = tab->handlers[fixmode][fltmode].insn_code;
4278 if (icode != CODE_FOR_nothing
4279 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing)
4286 return CODE_FOR_nothing;
4289 static enum insn_code
4290 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4295 tab = unsignedp ? ufloat_optab : sfloat_optab;
4296 return tab->handlers[fltmode][fixmode].insn_code;
4299 /* Generate code to convert FROM to floating point
4300 and store in TO. FROM must be fixed point and not VOIDmode.
4301 UNSIGNEDP nonzero means regard FROM as unsigned.
4302 Normally this is done by correcting the final value
4303 if it is negative. */
4306 expand_float (rtx to, rtx from, int unsignedp)
4308 enum insn_code icode;
4310 enum machine_mode fmode, imode;
4312 /* Crash now, because we won't be able to decide which mode to use. */
4313 gcc_assert (GET_MODE (from) != VOIDmode);
4315 /* Look for an insn to do the conversion. Do it in the specified
4316 modes if possible; otherwise convert either input, output or both to
4317 wider mode. If the integer mode is wider than the mode of FROM,
4318 we can do the conversion signed even if the input is unsigned. */
4320 for (fmode = GET_MODE (to); fmode != VOIDmode;
4321 fmode = GET_MODE_WIDER_MODE (fmode))
4322 for (imode = GET_MODE (from); imode != VOIDmode;
4323 imode = GET_MODE_WIDER_MODE (imode))
4325 int doing_unsigned = unsignedp;
4327 if (fmode != GET_MODE (to)
4328 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4331 icode = can_float_p (fmode, imode, unsignedp);
4332 if (icode == CODE_FOR_nothing && imode != GET_MODE (from) && unsignedp)
4333 icode = can_float_p (fmode, imode, 0), doing_unsigned = 0;
4335 if (icode != CODE_FOR_nothing)
4337 if (imode != GET_MODE (from))
4338 from = convert_to_mode (imode, from, unsignedp);
4340 if (fmode != GET_MODE (to))
4341 target = gen_reg_rtx (fmode);
4343 emit_unop_insn (icode, target, from,
4344 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
4347 convert_move (to, target, 0);
4352 /* Unsigned integer, and no way to convert directly.
4353 Convert as signed, then conditionally adjust the result. */
4356 rtx label = gen_label_rtx ();
4358 REAL_VALUE_TYPE offset;
4360 /* Look for a usable floating mode FMODE wider than the source and at
4361 least as wide as the target. Using FMODE will avoid rounding woes
4362 with unsigned values greater than the signed maximum value. */
4364 for (fmode = GET_MODE (to); fmode != VOIDmode;
4365 fmode = GET_MODE_WIDER_MODE (fmode))
4366 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
4367 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
4370 if (fmode == VOIDmode)
4372 /* There is no such mode. Pretend the target is wide enough. */
4373 fmode = GET_MODE (to);
4375 /* Avoid double-rounding when TO is narrower than FROM. */
4376 if ((significand_size (fmode) + 1)
4377 < GET_MODE_BITSIZE (GET_MODE (from)))
4380 rtx neglabel = gen_label_rtx ();
4382 /* Don't use TARGET if it isn't a register, is a hard register,
4383 or is the wrong mode. */
4385 || REGNO (target) < FIRST_PSEUDO_REGISTER
4386 || GET_MODE (target) != fmode)
4387 target = gen_reg_rtx (fmode);
4389 imode = GET_MODE (from);
4390 do_pending_stack_adjust ();
4392 /* Test whether the sign bit is set. */
4393 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
4396 /* The sign bit is not set. Convert as signed. */
4397 expand_float (target, from, 0);
4398 emit_jump_insn (gen_jump (label));
4401 /* The sign bit is set.
4402 Convert to a usable (positive signed) value by shifting right
4403 one bit, while remembering if a nonzero bit was shifted
4404 out; i.e., compute (from & 1) | (from >> 1). */
4406 emit_label (neglabel);
4407 temp = expand_binop (imode, and_optab, from, const1_rtx,
4408 NULL_RTX, 1, OPTAB_LIB_WIDEN);
4409 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
4411 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
4413 expand_float (target, temp, 0);
4415 /* Multiply by 2 to undo the shift above. */
4416 temp = expand_binop (fmode, add_optab, target, target,
4417 target, 0, OPTAB_LIB_WIDEN);
4419 emit_move_insn (target, temp);
4421 do_pending_stack_adjust ();
4427 /* If we are about to do some arithmetic to correct for an
4428 unsigned operand, do it in a pseudo-register. */
4430 if (GET_MODE (to) != fmode
4431 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
4432 target = gen_reg_rtx (fmode);
4434 /* Convert as signed integer to floating. */
4435 expand_float (target, from, 0);
4437 /* If FROM is negative (and therefore TO is negative),
4438 correct its value by 2**bitwidth. */
4440 do_pending_stack_adjust ();
4441 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
4445 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)));
4446 temp = expand_binop (fmode, add_optab, target,
4447 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
4448 target, 0, OPTAB_LIB_WIDEN);
4450 emit_move_insn (target, temp);
4452 do_pending_stack_adjust ();
4457 /* No hardware instruction available; call a library routine. */
4462 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
4464 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
4465 from = convert_to_mode (SImode, from, unsignedp);
4467 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4468 gcc_assert (libfunc);
4472 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4473 GET_MODE (to), 1, from,
4475 insns = get_insns ();
4478 emit_libcall_block (insns, target, value,
4479 gen_rtx_FLOAT (GET_MODE (to), from));
4484 /* Copy result to requested destination
4485 if we have been computing in a temp location. */
4489 if (GET_MODE (target) == GET_MODE (to))
4490 emit_move_insn (to, target);
4492 convert_move (to, target, 0);
4496 /* Generate code to convert FROM to fixed point and store in TO. FROM
4497 must be floating point. */
4500 expand_fix (rtx to, rtx from, int unsignedp)
4502 enum insn_code icode;
4504 enum machine_mode fmode, imode;
4507 /* We first try to find a pair of modes, one real and one integer, at
4508 least as wide as FROM and TO, respectively, in which we can open-code
4509 this conversion. If the integer mode is wider than the mode of TO,
4510 we can do the conversion either signed or unsigned. */
4512 for (fmode = GET_MODE (from); fmode != VOIDmode;
4513 fmode = GET_MODE_WIDER_MODE (fmode))
4514 for (imode = GET_MODE (to); imode != VOIDmode;
4515 imode = GET_MODE_WIDER_MODE (imode))
4517 int doing_unsigned = unsignedp;
4519 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
4520 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
4521 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
4523 if (icode != CODE_FOR_nothing)
4525 if (fmode != GET_MODE (from))
4526 from = convert_to_mode (fmode, from, 0);
4530 rtx temp = gen_reg_rtx (GET_MODE (from));
4531 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
4535 if (imode != GET_MODE (to))
4536 target = gen_reg_rtx (imode);
4538 emit_unop_insn (icode, target, from,
4539 doing_unsigned ? UNSIGNED_FIX : FIX);
4541 convert_move (to, target, unsignedp);
4546 /* For an unsigned conversion, there is one more way to do it.
4547 If we have a signed conversion, we generate code that compares
4548 the real value to the largest representable positive number. If if
4549 is smaller, the conversion is done normally. Otherwise, subtract
4550 one plus the highest signed number, convert, and add it back.
4552 We only need to check all real modes, since we know we didn't find
4553 anything with a wider integer mode.
4555 This code used to extend FP value into mode wider than the destination.
4556 This is not needed. Consider, for instance conversion from SFmode
4559 The hot path trought the code is dealing with inputs smaller than 2^63
4560 and doing just the conversion, so there is no bits to lose.
4562 In the other path we know the value is positive in the range 2^63..2^64-1
4563 inclusive. (as for other imput overflow happens and result is undefined)
4564 So we know that the most important bit set in mantissa corresponds to
4565 2^63. The subtraction of 2^63 should not generate any rounding as it
4566 simply clears out that bit. The rest is trivial. */
4568 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
4569 for (fmode = GET_MODE (from); fmode != VOIDmode;
4570 fmode = GET_MODE_WIDER_MODE (fmode))
4571 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0,
4575 REAL_VALUE_TYPE offset;
4576 rtx limit, lab1, lab2, insn;
4578 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
4579 real_2expN (&offset, bitsize - 1);
4580 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
4581 lab1 = gen_label_rtx ();
4582 lab2 = gen_label_rtx ();
4584 if (fmode != GET_MODE (from))
4585 from = convert_to_mode (fmode, from, 0);
4587 /* See if we need to do the subtraction. */
4588 do_pending_stack_adjust ();
4589 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
4592 /* If not, do the signed "fix" and branch around fixup code. */
4593 expand_fix (to, from, 0);
4594 emit_jump_insn (gen_jump (lab2));
4597 /* Otherwise, subtract 2**(N-1), convert to signed number,
4598 then add 2**(N-1). Do the addition using XOR since this
4599 will often generate better code. */
4601 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
4602 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4603 expand_fix (to, target, 0);
4604 target = expand_binop (GET_MODE (to), xor_optab, to,
4606 ((HOST_WIDE_INT) 1 << (bitsize - 1),
4608 to, 1, OPTAB_LIB_WIDEN);
4611 emit_move_insn (to, target);
4615 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code
4616 != CODE_FOR_nothing)
4618 /* Make a place for a REG_NOTE and add it. */
4619 insn = emit_move_insn (to, to);
4620 set_unique_reg_note (insn,
4622 gen_rtx_fmt_e (UNSIGNED_FIX,
4630 /* We can't do it with an insn, so use a library call. But first ensure
4631 that the mode of TO is at least as wide as SImode, since those are the
4632 only library calls we know about. */
4634 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
4636 target = gen_reg_rtx (SImode);
4638 expand_fix (target, from, unsignedp);
4646 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
4647 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc;
4648 gcc_assert (libfunc);
4652 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4653 GET_MODE (to), 1, from,
4655 insns = get_insns ();
4658 emit_libcall_block (insns, target, value,
4659 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
4660 GET_MODE (to), from));
4665 if (GET_MODE (to) == GET_MODE (target))
4666 emit_move_insn (to, target);
4668 convert_move (to, target, 0);
4672 /* Report whether we have an instruction to perform the operation
4673 specified by CODE on operands of mode MODE. */
4675 have_insn_for (enum rtx_code code, enum machine_mode mode)
4677 return (code_to_optab[(int) code] != 0
4678 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code
4679 != CODE_FOR_nothing));
4682 /* Create a blank optab. */
4687 optab op = ggc_alloc (sizeof (struct optab));
4688 for (i = 0; i < NUM_MACHINE_MODES; i++)
4690 op->handlers[i].insn_code = CODE_FOR_nothing;
4691 op->handlers[i].libfunc = 0;
4697 static convert_optab
4698 new_convert_optab (void)
4701 convert_optab op = ggc_alloc (sizeof (struct convert_optab));
4702 for (i = 0; i < NUM_MACHINE_MODES; i++)
4703 for (j = 0; j < NUM_MACHINE_MODES; j++)
4705 op->handlers[i][j].insn_code = CODE_FOR_nothing;
4706 op->handlers[i][j].libfunc = 0;
4711 /* Same, but fill in its code as CODE, and write it into the
4712 code_to_optab table. */
4714 init_optab (enum rtx_code code)
4716 optab op = new_optab ();
4718 code_to_optab[(int) code] = op;
4722 /* Same, but fill in its code as CODE, and do _not_ write it into
4723 the code_to_optab table. */
4725 init_optabv (enum rtx_code code)
4727 optab op = new_optab ();
4732 /* Conversion optabs never go in the code_to_optab table. */
4733 static inline convert_optab
4734 init_convert_optab (enum rtx_code code)
4736 convert_optab op = new_convert_optab ();
4741 /* Initialize the libfunc fields of an entire group of entries in some
4742 optab. Each entry is set equal to a string consisting of a leading
4743 pair of underscores followed by a generic operation name followed by
4744 a mode name (downshifted to lowercase) followed by a single character
4745 representing the number of operands for the given operation (which is
4746 usually one of the characters '2', '3', or '4').
4748 OPTABLE is the table in which libfunc fields are to be initialized.
4749 FIRST_MODE is the first machine mode index in the given optab to
4751 LAST_MODE is the last machine mode index in the given optab to
4753 OPNAME is the generic (string) name of the operation.
4754 SUFFIX is the character which specifies the number of operands for
4755 the given generic operation.
4759 init_libfuncs (optab optable, int first_mode, int last_mode,
4760 const char *opname, int suffix)
4763 unsigned opname_len = strlen (opname);
4765 for (mode = first_mode; (int) mode <= (int) last_mode;
4766 mode = (enum machine_mode) ((int) mode + 1))
4768 const char *mname = GET_MODE_NAME (mode);
4769 unsigned mname_len = strlen (mname);
4770 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1);
4777 for (q = opname; *q; )
4779 for (q = mname; *q; q++)
4780 *p++ = TOLOWER (*q);
4784 optable->handlers[(int) mode].libfunc
4785 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name));
4789 /* Initialize the libfunc fields of an entire group of entries in some
4790 optab which correspond to all integer mode operations. The parameters
4791 have the same meaning as similarly named ones for the `init_libfuncs'
4792 routine. (See above). */
4795 init_integral_libfuncs (optab optable, const char *opname, int suffix)
4797 int maxsize = 2*BITS_PER_WORD;
4798 if (maxsize < LONG_LONG_TYPE_SIZE)
4799 maxsize = LONG_LONG_TYPE_SIZE;
4800 init_libfuncs (optable, word_mode,
4801 mode_for_size (maxsize, MODE_INT, 0),
4805 /* Initialize the libfunc fields of an entire group of entries in some
4806 optab which correspond to all real mode operations. The parameters
4807 have the same meaning as similarly named ones for the `init_libfuncs'
4808 routine. (See above). */
4811 init_floating_libfuncs (optab optable, const char *opname, int suffix)
4813 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix);
4816 /* Initialize the libfunc fields of an entire group of entries of an
4817 inter-mode-class conversion optab. The string formation rules are
4818 similar to the ones for init_libfuncs, above, but instead of having
4819 a mode name and an operand count these functions have two mode names
4820 and no operand count. */
4822 init_interclass_conv_libfuncs (convert_optab tab, const char *opname,
4823 enum mode_class from_class,
4824 enum mode_class to_class)
4826 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class);
4827 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class);
4828 size_t opname_len = strlen (opname);
4829 size_t max_mname_len = 0;
4831 enum machine_mode fmode, tmode;
4832 const char *fname, *tname;
4834 char *libfunc_name, *suffix;
4837 for (fmode = first_from_mode;
4839 fmode = GET_MODE_WIDER_MODE (fmode))
4840 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode)));
4842 for (tmode = first_to_mode;
4844 tmode = GET_MODE_WIDER_MODE (tmode))
4845 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode)));
4847 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4848 libfunc_name[0] = '_';
4849 libfunc_name[1] = '_';
4850 memcpy (&libfunc_name[2], opname, opname_len);
4851 suffix = libfunc_name + opname_len + 2;
4853 for (fmode = first_from_mode; fmode != VOIDmode;
4854 fmode = GET_MODE_WIDER_MODE (fmode))
4855 for (tmode = first_to_mode; tmode != VOIDmode;
4856 tmode = GET_MODE_WIDER_MODE (tmode))
4858 fname = GET_MODE_NAME (fmode);
4859 tname = GET_MODE_NAME (tmode);
4862 for (q = fname; *q; p++, q++)
4864 for (q = tname; *q; p++, q++)
4869 tab->handlers[tmode][fmode].libfunc
4870 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4875 /* Initialize the libfunc fields of an entire group of entries of an
4876 intra-mode-class conversion optab. The string formation rules are
4877 similar to the ones for init_libfunc, above. WIDENING says whether
4878 the optab goes from narrow to wide modes or vice versa. These functions
4879 have two mode names _and_ an operand count. */
4881 init_intraclass_conv_libfuncs (convert_optab tab, const char *opname,
4882 enum mode_class class, bool widening)
4884 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class);
4885 size_t opname_len = strlen (opname);
4886 size_t max_mname_len = 0;
4888 enum machine_mode nmode, wmode;
4889 const char *nname, *wname;
4891 char *libfunc_name, *suffix;
4894 for (nmode = first_mode; nmode != VOIDmode;
4895 nmode = GET_MODE_WIDER_MODE (nmode))
4896 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode)));
4898 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1);
4899 libfunc_name[0] = '_';
4900 libfunc_name[1] = '_';
4901 memcpy (&libfunc_name[2], opname, opname_len);
4902 suffix = libfunc_name + opname_len + 2;
4904 for (nmode = first_mode; nmode != VOIDmode;
4905 nmode = GET_MODE_WIDER_MODE (nmode))
4906 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode;
4907 wmode = GET_MODE_WIDER_MODE (wmode))
4909 nname = GET_MODE_NAME (nmode);
4910 wname = GET_MODE_NAME (wmode);
4913 for (q = widening ? nname : wname; *q; p++, q++)
4915 for (q = widening ? wname : nname; *q; p++, q++)
4921 tab->handlers[widening ? wmode : nmode]
4922 [widening ? nmode : wmode].libfunc
4923 = init_one_libfunc (ggc_alloc_string (libfunc_name,
4930 init_one_libfunc (const char *name)
4934 /* Create a FUNCTION_DECL that can be passed to
4935 targetm.encode_section_info. */
4936 /* ??? We don't have any type information except for this is
4937 a function. Pretend this is "int foo()". */
4938 tree decl = build_decl (FUNCTION_DECL, get_identifier (name),
4939 build_function_type (integer_type_node, NULL_TREE));
4940 DECL_ARTIFICIAL (decl) = 1;
4941 DECL_EXTERNAL (decl) = 1;
4942 TREE_PUBLIC (decl) = 1;
4944 symbol = XEXP (DECL_RTL (decl), 0);
4946 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4947 are the flags assigned by targetm.encode_section_info. */
4948 SYMBOL_REF_DECL (symbol) = 0;
4953 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4954 MODE to NAME, which should be either 0 or a string constant. */
4956 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
4959 optable->handlers[mode].libfunc = init_one_libfunc (name);
4961 optable->handlers[mode].libfunc = 0;
4964 /* Call this to reset the function entry for one conversion optab
4965 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4966 either 0 or a string constant. */
4968 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
4969 enum machine_mode fmode, const char *name)
4972 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name);
4974 optable->handlers[tmode][fmode].libfunc = 0;
4977 /* Call this once to initialize the contents of the optabs
4978 appropriately for the current target machine. */
4985 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4987 for (i = 0; i < NUM_RTX_CODE; i++)
4988 setcc_gen_code[i] = CODE_FOR_nothing;
4990 #ifdef HAVE_conditional_move
4991 for (i = 0; i < NUM_MACHINE_MODES; i++)
4992 movcc_gen_code[i] = CODE_FOR_nothing;
4995 for (i = 0; i < NUM_MACHINE_MODES; i++)
4997 vcond_gen_code[i] = CODE_FOR_nothing;
4998 vcondu_gen_code[i] = CODE_FOR_nothing;
5001 add_optab = init_optab (PLUS);
5002 addv_optab = init_optabv (PLUS);
5003 sub_optab = init_optab (MINUS);
5004 subv_optab = init_optabv (MINUS);
5005 smul_optab = init_optab (MULT);
5006 smulv_optab = init_optabv (MULT);
5007 smul_highpart_optab = init_optab (UNKNOWN);
5008 umul_highpart_optab = init_optab (UNKNOWN);
5009 smul_widen_optab = init_optab (UNKNOWN);
5010 umul_widen_optab = init_optab (UNKNOWN);
5011 sdiv_optab = init_optab (DIV);
5012 sdivv_optab = init_optabv (DIV);
5013 sdivmod_optab = init_optab (UNKNOWN);
5014 udiv_optab = init_optab (UDIV);
5015 udivmod_optab = init_optab (UNKNOWN);
5016 smod_optab = init_optab (MOD);
5017 umod_optab = init_optab (UMOD);
5018 fmod_optab = init_optab (UNKNOWN);
5019 drem_optab = init_optab (UNKNOWN);
5020 ftrunc_optab = init_optab (UNKNOWN);
5021 and_optab = init_optab (AND);
5022 ior_optab = init_optab (IOR);
5023 xor_optab = init_optab (XOR);
5024 ashl_optab = init_optab (ASHIFT);
5025 ashr_optab = init_optab (ASHIFTRT);
5026 lshr_optab = init_optab (LSHIFTRT);
5027 rotl_optab = init_optab (ROTATE);
5028 rotr_optab = init_optab (ROTATERT);
5029 smin_optab = init_optab (SMIN);
5030 smax_optab = init_optab (SMAX);
5031 umin_optab = init_optab (UMIN);
5032 umax_optab = init_optab (UMAX);
5033 pow_optab = init_optab (UNKNOWN);
5034 atan2_optab = init_optab (UNKNOWN);
5036 /* These three have codes assigned exclusively for the sake of
5038 mov_optab = init_optab (SET);
5039 movstrict_optab = init_optab (STRICT_LOW_PART);
5040 cmp_optab = init_optab (COMPARE);
5042 ucmp_optab = init_optab (UNKNOWN);
5043 tst_optab = init_optab (UNKNOWN);
5045 eq_optab = init_optab (EQ);
5046 ne_optab = init_optab (NE);
5047 gt_optab = init_optab (GT);
5048 ge_optab = init_optab (GE);
5049 lt_optab = init_optab (LT);
5050 le_optab = init_optab (LE);
5051 unord_optab = init_optab (UNORDERED);
5053 neg_optab = init_optab (NEG);
5054 negv_optab = init_optabv (NEG);
5055 abs_optab = init_optab (ABS);
5056 absv_optab = init_optabv (ABS);
5057 addcc_optab = init_optab (UNKNOWN);
5058 one_cmpl_optab = init_optab (NOT);
5059 ffs_optab = init_optab (FFS);
5060 clz_optab = init_optab (CLZ);
5061 ctz_optab = init_optab (CTZ);
5062 popcount_optab = init_optab (POPCOUNT);
5063 parity_optab = init_optab (PARITY);
5064 sqrt_optab = init_optab (SQRT);
5065 floor_optab = init_optab (UNKNOWN);
5066 lfloor_optab = init_optab (UNKNOWN);
5067 ceil_optab = init_optab (UNKNOWN);
5068 lceil_optab = init_optab (UNKNOWN);
5069 round_optab = init_optab (UNKNOWN);
5070 btrunc_optab = init_optab (UNKNOWN);
5071 nearbyint_optab = init_optab (UNKNOWN);
5072 rint_optab = init_optab (UNKNOWN);
5073 lrint_optab = init_optab (UNKNOWN);
5074 sincos_optab = init_optab (UNKNOWN);
5075 sin_optab = init_optab (UNKNOWN);
5076 asin_optab = init_optab (UNKNOWN);
5077 cos_optab = init_optab (UNKNOWN);
5078 acos_optab = init_optab (UNKNOWN);
5079 exp_optab = init_optab (UNKNOWN);
5080 exp10_optab = init_optab (UNKNOWN);
5081 exp2_optab = init_optab (UNKNOWN);
5082 expm1_optab = init_optab (UNKNOWN);
5083 ldexp_optab = init_optab (UNKNOWN);
5084 logb_optab = init_optab (UNKNOWN);
5085 ilogb_optab = init_optab (UNKNOWN);
5086 log_optab = init_optab (UNKNOWN);
5087 log10_optab = init_optab (UNKNOWN);
5088 log2_optab = init_optab (UNKNOWN);
5089 log1p_optab = init_optab (UNKNOWN);
5090 tan_optab = init_optab (UNKNOWN);
5091 atan_optab = init_optab (UNKNOWN);
5092 copysign_optab = init_optab (UNKNOWN);
5094 strlen_optab = init_optab (UNKNOWN);
5095 cbranch_optab = init_optab (UNKNOWN);
5096 cmov_optab = init_optab (UNKNOWN);
5097 cstore_optab = init_optab (UNKNOWN);
5098 push_optab = init_optab (UNKNOWN);
5100 reduc_smax_optab = init_optab (UNKNOWN);
5101 reduc_umax_optab = init_optab (UNKNOWN);
5102 reduc_smin_optab = init_optab (UNKNOWN);
5103 reduc_umin_optab = init_optab (UNKNOWN);
5104 reduc_splus_optab = init_optab (UNKNOWN);
5105 reduc_uplus_optab = init_optab (UNKNOWN);
5107 vec_extract_optab = init_optab (UNKNOWN);
5108 vec_set_optab = init_optab (UNKNOWN);
5109 vec_init_optab = init_optab (UNKNOWN);
5110 vec_shl_optab = init_optab (UNKNOWN);
5111 vec_shr_optab = init_optab (UNKNOWN);
5112 vec_realign_load_optab = init_optab (UNKNOWN);
5113 movmisalign_optab = init_optab (UNKNOWN);
5115 powi_optab = init_optab (UNKNOWN);
5118 sext_optab = init_convert_optab (SIGN_EXTEND);
5119 zext_optab = init_convert_optab (ZERO_EXTEND);
5120 trunc_optab = init_convert_optab (TRUNCATE);
5121 sfix_optab = init_convert_optab (FIX);
5122 ufix_optab = init_convert_optab (UNSIGNED_FIX);
5123 sfixtrunc_optab = init_convert_optab (UNKNOWN);
5124 ufixtrunc_optab = init_convert_optab (UNKNOWN);
5125 sfloat_optab = init_convert_optab (FLOAT);
5126 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT);
5128 for (i = 0; i < NUM_MACHINE_MODES; i++)
5130 movmem_optab[i] = CODE_FOR_nothing;
5131 cmpstr_optab[i] = CODE_FOR_nothing;
5132 cmpstrn_optab[i] = CODE_FOR_nothing;
5133 cmpmem_optab[i] = CODE_FOR_nothing;
5134 setmem_optab[i] = CODE_FOR_nothing;
5136 sync_add_optab[i] = CODE_FOR_nothing;
5137 sync_sub_optab[i] = CODE_FOR_nothing;
5138 sync_ior_optab[i] = CODE_FOR_nothing;
5139 sync_and_optab[i] = CODE_FOR_nothing;
5140 sync_xor_optab[i] = CODE_FOR_nothing;
5141 sync_nand_optab[i] = CODE_FOR_nothing;
5142 sync_old_add_optab[i] = CODE_FOR_nothing;
5143 sync_old_sub_optab[i] = CODE_FOR_nothing;
5144 sync_old_ior_optab[i] = CODE_FOR_nothing;
5145 sync_old_and_optab[i] = CODE_FOR_nothing;
5146 sync_old_xor_optab[i] = CODE_FOR_nothing;
5147 sync_old_nand_optab[i] = CODE_FOR_nothing;
5148 sync_new_add_optab[i] = CODE_FOR_nothing;
5149 sync_new_sub_optab[i] = CODE_FOR_nothing;
5150 sync_new_ior_optab[i] = CODE_FOR_nothing;
5151 sync_new_and_optab[i] = CODE_FOR_nothing;
5152 sync_new_xor_optab[i] = CODE_FOR_nothing;
5153 sync_new_nand_optab[i] = CODE_FOR_nothing;
5154 sync_compare_and_swap[i] = CODE_FOR_nothing;
5155 sync_compare_and_swap_cc[i] = CODE_FOR_nothing;
5156 sync_lock_test_and_set[i] = CODE_FOR_nothing;
5157 sync_lock_release[i] = CODE_FOR_nothing;
5159 #ifdef HAVE_SECONDARY_RELOADS
5160 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
5164 /* Fill in the optabs with the insns we support. */
5167 /* Initialize the optabs with the names of the library functions. */
5168 init_integral_libfuncs (add_optab, "add", '3');
5169 init_floating_libfuncs (add_optab, "add", '3');
5170 init_integral_libfuncs (addv_optab, "addv", '3');
5171 init_floating_libfuncs (addv_optab, "add", '3');
5172 init_integral_libfuncs (sub_optab, "sub", '3');
5173 init_floating_libfuncs (sub_optab, "sub", '3');
5174 init_integral_libfuncs (subv_optab, "subv", '3');
5175 init_floating_libfuncs (subv_optab, "sub", '3');
5176 init_integral_libfuncs (smul_optab, "mul", '3');
5177 init_floating_libfuncs (smul_optab, "mul", '3');
5178 init_integral_libfuncs (smulv_optab, "mulv", '3');
5179 init_floating_libfuncs (smulv_optab, "mul", '3');
5180 init_integral_libfuncs (sdiv_optab, "div", '3');
5181 init_floating_libfuncs (sdiv_optab, "div", '3');
5182 init_integral_libfuncs (sdivv_optab, "divv", '3');
5183 init_integral_libfuncs (udiv_optab, "udiv", '3');
5184 init_integral_libfuncs (sdivmod_optab, "divmod", '4');
5185 init_integral_libfuncs (udivmod_optab, "udivmod", '4');
5186 init_integral_libfuncs (smod_optab, "mod", '3');
5187 init_integral_libfuncs (umod_optab, "umod", '3');
5188 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2');
5189 init_integral_libfuncs (and_optab, "and", '3');
5190 init_integral_libfuncs (ior_optab, "ior", '3');
5191 init_integral_libfuncs (xor_optab, "xor", '3');
5192 init_integral_libfuncs (ashl_optab, "ashl", '3');
5193 init_integral_libfuncs (ashr_optab, "ashr", '3');
5194 init_integral_libfuncs (lshr_optab, "lshr", '3');
5195 init_integral_libfuncs (smin_optab, "min", '3');
5196 init_floating_libfuncs (smin_optab, "min", '3');
5197 init_integral_libfuncs (smax_optab, "max", '3');
5198 init_floating_libfuncs (smax_optab, "max", '3');
5199 init_integral_libfuncs (umin_optab, "umin", '3');
5200 init_integral_libfuncs (umax_optab, "umax", '3');
5201 init_integral_libfuncs (neg_optab, "neg", '2');
5202 init_floating_libfuncs (neg_optab, "neg", '2');
5203 init_integral_libfuncs (negv_optab, "negv", '2');
5204 init_floating_libfuncs (negv_optab, "neg", '2');
5205 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2');
5206 init_integral_libfuncs (ffs_optab, "ffs", '2');
5207 init_integral_libfuncs (clz_optab, "clz", '2');
5208 init_integral_libfuncs (ctz_optab, "ctz", '2');
5209 init_integral_libfuncs (popcount_optab, "popcount", '2');
5210 init_integral_libfuncs (parity_optab, "parity", '2');
5212 /* Comparison libcalls for integers MUST come in pairs,
5214 init_integral_libfuncs (cmp_optab, "cmp", '2');
5215 init_integral_libfuncs (ucmp_optab, "ucmp", '2');
5216 init_floating_libfuncs (cmp_optab, "cmp", '2');
5218 /* EQ etc are floating point only. */
5219 init_floating_libfuncs (eq_optab, "eq", '2');
5220 init_floating_libfuncs (ne_optab, "ne", '2');
5221 init_floating_libfuncs (gt_optab, "gt", '2');
5222 init_floating_libfuncs (ge_optab, "ge", '2');
5223 init_floating_libfuncs (lt_optab, "lt", '2');
5224 init_floating_libfuncs (le_optab, "le", '2');
5225 init_floating_libfuncs (unord_optab, "unord", '2');
5227 init_floating_libfuncs (powi_optab, "powi", '2');
5230 init_interclass_conv_libfuncs (sfloat_optab, "float",
5231 MODE_INT, MODE_FLOAT);
5232 init_interclass_conv_libfuncs (sfix_optab, "fix",
5233 MODE_FLOAT, MODE_INT);
5234 init_interclass_conv_libfuncs (ufix_optab, "fixuns",
5235 MODE_FLOAT, MODE_INT);
5237 /* sext_optab is also used for FLOAT_EXTEND. */
5238 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true);
5239 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false);
5241 /* Use cabs for double complex abs, since systems generally have cabs.
5242 Don't define any libcall for float complex, so that cabs will be used. */
5243 if (complex_double_type_node)
5244 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc
5245 = init_one_libfunc ("cabs");
5247 /* The ffs function operates on `int'. */
5248 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc
5249 = init_one_libfunc ("ffs");
5251 abort_libfunc = init_one_libfunc ("abort");
5252 memcpy_libfunc = init_one_libfunc ("memcpy");
5253 memmove_libfunc = init_one_libfunc ("memmove");
5254 memcmp_libfunc = init_one_libfunc ("memcmp");
5255 memset_libfunc = init_one_libfunc ("memset");
5256 setbits_libfunc = init_one_libfunc ("__setbits");
5258 #ifndef DONT_USE_BUILTIN_SETJMP
5259 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
5260 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
5262 setjmp_libfunc = init_one_libfunc ("setjmp");
5263 longjmp_libfunc = init_one_libfunc ("longjmp");
5265 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
5266 unwind_sjlj_unregister_libfunc
5267 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5269 /* For function entry/exit instrumentation. */
5270 profile_function_entry_libfunc
5271 = init_one_libfunc ("__cyg_profile_func_enter");
5272 profile_function_exit_libfunc
5273 = init_one_libfunc ("__cyg_profile_func_exit");
5275 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
5277 if (HAVE_conditional_trap)
5278 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
5280 /* Allow the target to add more libcalls or rename some, etc. */
5281 targetm.init_libfuncs ();
5286 /* Print information about the current contents of the optabs on
5290 debug_optab_libfuncs (void)
5296 /* Dump the arithmetic optabs. */
5297 for (i = 0; i != (int) OTI_MAX; i++)
5298 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5301 struct optab_handlers *h;
5304 h = &o->handlers[j];
5307 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5308 fprintf (stderr, "%s\t%s:\t%s\n",
5309 GET_RTX_NAME (o->code),
5311 XSTR (h->libfunc, 0));
5315 /* Dump the conversion optabs. */
5316 for (i = 0; i < (int) COI_MAX; ++i)
5317 for (j = 0; j < NUM_MACHINE_MODES; ++j)
5318 for (k = 0; k < NUM_MACHINE_MODES; ++k)
5321 struct optab_handlers *h;
5323 o = &convert_optab_table[i];
5324 h = &o->handlers[j][k];
5327 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF);
5328 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
5329 GET_RTX_NAME (o->code),
5332 XSTR (h->libfunc, 0));
5340 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5341 CODE. Return 0 on failure. */
5344 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
5345 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
5347 enum machine_mode mode = GET_MODE (op1);
5348 enum insn_code icode;
5351 if (!HAVE_conditional_trap)
5354 if (mode == VOIDmode)
5357 icode = cmp_optab->handlers[(int) mode].insn_code;
5358 if (icode == CODE_FOR_nothing)
5362 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
5363 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
5369 emit_insn (GEN_FCN (icode) (op1, op2));
5371 PUT_CODE (trap_rtx, code);
5372 gcc_assert (HAVE_conditional_trap);
5373 insn = gen_conditional_trap (trap_rtx, tcode);
5377 insn = get_insns ();
5384 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5385 or unsigned operation code. */
5387 static enum rtx_code
5388 get_rtx_code (enum tree_code tcode, bool unsignedp)
5400 code = unsignedp ? LTU : LT;
5403 code = unsignedp ? LEU : LE;
5406 code = unsignedp ? GTU : GT;
5409 code = unsignedp ? GEU : GE;
5412 case UNORDERED_EXPR:
5443 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5444 unsigned operators. Do not generate compare instruction. */
5447 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
5449 enum rtx_code rcode;
5451 rtx rtx_op0, rtx_op1;
5453 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
5454 ensures that condition is a relational operation. */
5455 gcc_assert (COMPARISON_CLASS_P (cond));
5457 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
5458 t_op0 = TREE_OPERAND (cond, 0);
5459 t_op1 = TREE_OPERAND (cond, 1);
5461 /* Expand operands. */
5462 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1);
5463 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1);
5465 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
5466 && GET_MODE (rtx_op0) != VOIDmode)
5467 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
5469 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
5470 && GET_MODE (rtx_op1) != VOIDmode)
5471 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
5473 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
5476 /* Return insn code for VEC_COND_EXPR EXPR. */
5478 static inline enum insn_code
5479 get_vcond_icode (tree expr, enum machine_mode mode)
5481 enum insn_code icode = CODE_FOR_nothing;
5483 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
5484 icode = vcondu_gen_code[mode];
5486 icode = vcond_gen_code[mode];
5490 /* Return TRUE iff, appropriate vector insns are available
5491 for vector cond expr expr in VMODE mode. */
5494 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
5496 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
5501 /* Generate insns for VEC_COND_EXPR. */
5504 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
5506 enum insn_code icode;
5507 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
5508 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
5509 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
5511 icode = get_vcond_icode (vec_cond_expr, mode);
5512 if (icode == CODE_FOR_nothing)
5515 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5516 target = gen_reg_rtx (mode);
5518 /* Get comparison rtx. First expand both cond expr operands. */
5519 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
5521 cc_op0 = XEXP (comparison, 0);
5522 cc_op1 = XEXP (comparison, 1);
5523 /* Expand both operands and force them in reg, if required. */
5524 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1),
5525 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5526 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
5527 && mode != VOIDmode)
5528 rtx_op1 = force_reg (mode, rtx_op1);
5530 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2),
5531 NULL_RTX, VOIDmode, EXPAND_NORMAL);
5532 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
5533 && mode != VOIDmode)
5534 rtx_op2 = force_reg (mode, rtx_op2);
5536 /* Emit instruction! */
5537 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
5538 comparison, cc_op0, cc_op1));
5544 /* This is an internal subroutine of the other compare_and_swap expanders.
5545 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
5546 operation. TARGET is an optional place to store the value result of
5547 the operation. ICODE is the particular instruction to expand. Return
5548 the result of the operation. */
5551 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
5552 rtx target, enum insn_code icode)
5554 enum machine_mode mode = GET_MODE (mem);
5557 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5558 target = gen_reg_rtx (mode);
5560 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
5561 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
5562 if (!insn_data[icode].operand[2].predicate (old_val, mode))
5563 old_val = force_reg (mode, old_val);
5565 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
5566 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
5567 if (!insn_data[icode].operand[3].predicate (new_val, mode))
5568 new_val = force_reg (mode, new_val);
5570 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
5571 if (insn == NULL_RTX)
5578 /* Expand a compare-and-swap operation and return its value. */
5581 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5583 enum machine_mode mode = GET_MODE (mem);
5584 enum insn_code icode = sync_compare_and_swap[mode];
5586 if (icode == CODE_FOR_nothing)
5589 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
5592 /* Expand a compare-and-swap operation and store true into the result if
5593 the operation was successful and false otherwise. Return the result.
5594 Unlike other routines, TARGET is not optional. */
5597 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
5599 enum machine_mode mode = GET_MODE (mem);
5600 enum insn_code icode;
5601 rtx subtarget, label0, label1;
5603 /* If the target supports a compare-and-swap pattern that simultaneously
5604 sets some flag for success, then use it. Otherwise use the regular
5605 compare-and-swap and follow that immediately with a compare insn. */
5606 icode = sync_compare_and_swap_cc[mode];
5610 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5612 if (subtarget != NULL_RTX)
5616 case CODE_FOR_nothing:
5617 icode = sync_compare_and_swap[mode];
5618 if (icode == CODE_FOR_nothing)
5621 /* Ensure that if old_val == mem, that we're not comparing
5622 against an old value. */
5623 if (MEM_P (old_val))
5624 old_val = force_reg (mode, old_val);
5626 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
5628 if (subtarget == NULL_RTX)
5631 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true);
5634 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a
5635 setcc instruction from the beginning. We don't work too hard here,
5636 but it's nice to not be stupid about initial code gen either. */
5637 if (STORE_FLAG_VALUE == 1)
5639 icode = setcc_gen_code[EQ];
5640 if (icode != CODE_FOR_nothing)
5642 enum machine_mode cmode = insn_data[icode].operand[0].mode;
5646 if (!insn_data[icode].operand[0].predicate (target, cmode))
5647 subtarget = gen_reg_rtx (cmode);
5649 insn = GEN_FCN (icode) (subtarget);
5653 if (GET_MODE (target) != GET_MODE (subtarget))
5655 convert_move (target, subtarget, 1);
5663 /* Without an appropriate setcc instruction, use a set of branches to
5664 get 1 and 0 stored into target. Presumably if the target has a
5665 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */
5667 label0 = gen_label_rtx ();
5668 label1 = gen_label_rtx ();
5670 emit_jump_insn (bcc_gen_fctn[EQ] (label0));
5671 emit_move_insn (target, const0_rtx);
5672 emit_jump_insn (gen_jump (label1));
5674 emit_label (label0);
5675 emit_move_insn (target, const1_rtx);
5676 emit_label (label1);
5681 /* This is a helper function for the other atomic operations. This function
5682 emits a loop that contains SEQ that iterates until a compare-and-swap
5683 operation at the end succeeds. MEM is the memory to be modified. SEQ is
5684 a set of instructions that takes a value from OLD_REG as an input and
5685 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
5686 set to the current contents of MEM. After SEQ, a compare-and-swap will
5687 attempt to update MEM with NEW_REG. The function returns true when the
5688 loop was generated successfully. */
5691 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
5693 enum machine_mode mode = GET_MODE (mem);
5694 enum insn_code icode;
5695 rtx label, cmp_reg, subtarget;
5697 /* The loop we want to generate looks like
5703 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
5704 if (cmp_reg != old_reg)
5707 Note that we only do the plain load from memory once. Subsequent
5708 iterations use the value loaded by the compare-and-swap pattern. */
5710 label = gen_label_rtx ();
5711 cmp_reg = gen_reg_rtx (mode);
5713 emit_move_insn (cmp_reg, mem);
5715 emit_move_insn (old_reg, cmp_reg);
5719 /* If the target supports a compare-and-swap pattern that simultaneously
5720 sets some flag for success, then use it. Otherwise use the regular
5721 compare-and-swap and follow that immediately with a compare insn. */
5722 icode = sync_compare_and_swap_cc[mode];
5726 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5728 if (subtarget != NULL_RTX)
5730 gcc_assert (subtarget == cmp_reg);
5735 case CODE_FOR_nothing:
5736 icode = sync_compare_and_swap[mode];
5737 if (icode == CODE_FOR_nothing)
5740 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
5742 if (subtarget == NULL_RTX)
5744 if (subtarget != cmp_reg)
5745 emit_move_insn (cmp_reg, subtarget);
5747 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true);
5750 /* ??? Mark this jump predicted not taken? */
5751 emit_jump_insn (bcc_gen_fctn[NE] (label));
5756 /* This function generates the atomic operation MEM CODE= VAL. In this
5757 case, we do not care about any resulting value. Returns NULL if we
5758 cannot generate the operation. */
5761 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
5763 enum machine_mode mode = GET_MODE (mem);
5764 enum insn_code icode;
5767 /* Look to see if the target supports the operation directly. */
5771 icode = sync_add_optab[mode];
5774 icode = sync_ior_optab[mode];
5777 icode = sync_xor_optab[mode];
5780 icode = sync_and_optab[mode];
5783 icode = sync_nand_optab[mode];
5787 icode = sync_sub_optab[mode];
5788 if (icode == CODE_FOR_nothing)
5790 icode = sync_add_optab[mode];
5791 if (icode != CODE_FOR_nothing)
5793 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5803 /* Generate the direct operation, if present. */
5804 if (icode != CODE_FOR_nothing)
5806 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5807 val = convert_modes (mode, GET_MODE (val), val, 1);
5808 if (!insn_data[icode].operand[1].predicate (val, mode))
5809 val = force_reg (mode, val);
5811 insn = GEN_FCN (icode) (mem, val);
5819 /* Failing that, generate a compare-and-swap loop in which we perform the
5820 operation with normal arithmetic instructions. */
5821 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5823 rtx t0 = gen_reg_rtx (mode), t1;
5830 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
5833 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
5834 true, OPTAB_LIB_WIDEN);
5836 insn = get_insns ();
5839 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
5846 /* This function generates the atomic operation MEM CODE= VAL. In this
5847 case, we do care about the resulting value: if AFTER is true then
5848 return the value MEM holds after the operation, if AFTER is false
5849 then return the value MEM holds before the operation. TARGET is an
5850 optional place for the result value to be stored. */
5853 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
5854 bool after, rtx target)
5856 enum machine_mode mode = GET_MODE (mem);
5857 enum insn_code old_code, new_code, icode;
5861 /* Look to see if the target supports the operation directly. */
5865 old_code = sync_old_add_optab[mode];
5866 new_code = sync_new_add_optab[mode];
5869 old_code = sync_old_ior_optab[mode];
5870 new_code = sync_new_ior_optab[mode];
5873 old_code = sync_old_xor_optab[mode];
5874 new_code = sync_new_xor_optab[mode];
5877 old_code = sync_old_and_optab[mode];
5878 new_code = sync_new_and_optab[mode];
5881 old_code = sync_old_nand_optab[mode];
5882 new_code = sync_new_nand_optab[mode];
5886 old_code = sync_old_sub_optab[mode];
5887 new_code = sync_new_sub_optab[mode];
5888 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
5890 old_code = sync_old_add_optab[mode];
5891 new_code = sync_new_add_optab[mode];
5892 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
5894 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
5904 /* If the target does supports the proper new/old operation, great. But
5905 if we only support the opposite old/new operation, check to see if we
5906 can compensate. In the case in which the old value is supported, then
5907 we can always perform the operation again with normal arithmetic. In
5908 the case in which the new value is supported, then we can only handle
5909 this in the case the operation is reversible. */
5914 if (icode == CODE_FOR_nothing)
5917 if (icode != CODE_FOR_nothing)
5924 if (icode == CODE_FOR_nothing
5925 && (code == PLUS || code == MINUS || code == XOR))
5928 if (icode != CODE_FOR_nothing)
5933 /* If we found something supported, great. */
5934 if (icode != CODE_FOR_nothing)
5936 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
5937 target = gen_reg_rtx (mode);
5939 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
5940 val = convert_modes (mode, GET_MODE (val), val, 1);
5941 if (!insn_data[icode].operand[2].predicate (val, mode))
5942 val = force_reg (mode, val);
5944 insn = GEN_FCN (icode) (target, mem, val);
5949 /* If we need to compensate for using an operation with the
5950 wrong return value, do so now. */
5957 else if (code == MINUS)
5962 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true);
5963 target = expand_simple_binop (mode, code, target, val, NULL_RTX,
5964 true, OPTAB_LIB_WIDEN);
5971 /* Failing that, generate a compare-and-swap loop in which we perform the
5972 operation with normal arithmetic instructions. */
5973 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
5975 rtx t0 = gen_reg_rtx (mode), t1;
5977 if (!target || !register_operand (target, mode))
5978 target = gen_reg_rtx (mode);
5983 emit_move_insn (target, t0);
5987 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true);
5990 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
5991 true, OPTAB_LIB_WIDEN);
5993 emit_move_insn (target, t1);
5995 insn = get_insns ();
5998 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
6005 /* This function expands a test-and-set operation. Ideally we atomically
6006 store VAL in MEM and return the previous value in MEM. Some targets
6007 may not support this operation and only support VAL with the constant 1;
6008 in this case while the return value will be 0/1, but the exact value
6009 stored in MEM is target defined. TARGET is an option place to stick
6010 the return value. */
6013 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
6015 enum machine_mode mode = GET_MODE (mem);
6016 enum insn_code icode;
6019 /* If the target supports the test-and-set directly, great. */
6020 icode = sync_lock_test_and_set[mode];
6021 if (icode != CODE_FOR_nothing)
6023 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6024 target = gen_reg_rtx (mode);
6026 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6027 val = convert_modes (mode, GET_MODE (val), val, 1);
6028 if (!insn_data[icode].operand[2].predicate (val, mode))
6029 val = force_reg (mode, val);
6031 insn = GEN_FCN (icode) (target, mem, val);
6039 /* Otherwise, use a compare-and-swap loop for the exchange. */
6040 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
6042 if (!target || !register_operand (target, mode))
6043 target = gen_reg_rtx (mode);
6044 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
6045 val = convert_modes (mode, GET_MODE (val), val, 1);
6046 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
6053 #include "gt-optabs.h"