1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static bool associative_constant_p (rtx);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
65 neg_const_int (enum machine_mode mode, rtx i)
67 return gen_int_mode (- INTVAL (i), mode);
71 /* Make a binary operation by properly ordering the operands and
72 seeing if the expression folds. */
75 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
80 /* Put complex operands first and constants second if commutative. */
81 if (GET_RTX_CLASS (code) == 'c'
82 && swap_commutative_operands_p (op0, op1))
83 tem = op0, op0 = op1, op1 = tem;
85 /* If this simplifies, do it. */
86 tem = simplify_binary_operation (code, mode, op0, op1);
90 /* Handle addition and subtraction specially. Otherwise, just form
93 if (code == PLUS || code == MINUS)
95 tem = simplify_plus_minus (code, mode, op0, op1, 1);
100 return gen_rtx_fmt_ee (code, mode, op0, op1);
103 /* If X is a MEM referencing the constant pool, return the real value.
104 Otherwise return X. */
106 avoid_constant_pool_reference (rtx x)
109 enum machine_mode cmode;
111 switch (GET_CODE (x))
117 /* Handle float extensions of constant pool references. */
119 c = avoid_constant_pool_reference (tmp);
120 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
124 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
125 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
135 /* Call target hook to avoid the effects of -fpic etc.... */
136 addr = (*targetm.delegitimize_address) (addr);
138 if (GET_CODE (addr) == LO_SUM)
139 addr = XEXP (addr, 1);
141 if (GET_CODE (addr) != SYMBOL_REF
142 || ! CONSTANT_POOL_ADDRESS_P (addr))
145 c = get_pool_constant (addr);
146 cmode = get_pool_mode (addr);
148 /* If we're accessing the constant in a different mode than it was
149 originally stored, attempt to fix that up via subreg simplifications.
150 If that fails we have no choice but to return the original memory. */
151 if (cmode != GET_MODE (x))
153 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
160 /* Make a unary operation by first seeing if it folds and otherwise making
161 the specified operation. */
164 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
165 enum machine_mode op_mode)
169 /* If this simplifies, use it. */
170 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
173 return gen_rtx_fmt_e (code, mode, op);
176 /* Likewise for ternary operations. */
179 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
180 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
184 /* If this simplifies, use it. */
185 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
189 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
192 /* Likewise, for relational operations.
193 CMP_MODE specifies mode comparison is done in.
197 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
198 enum machine_mode cmp_mode, rtx op0, rtx op1)
202 if (cmp_mode == VOIDmode)
203 cmp_mode = GET_MODE (op0);
204 if (cmp_mode == VOIDmode)
205 cmp_mode = GET_MODE (op1);
207 if (cmp_mode != VOIDmode
208 && ! VECTOR_MODE_P (mode))
210 tem = simplify_relational_operation (code, cmp_mode, op0, op1);
214 #ifdef FLOAT_STORE_FLAG_VALUE
215 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
218 if (tem == const0_rtx)
219 return CONST0_RTX (mode);
220 if (tem != const_true_rtx)
222 val = FLOAT_STORE_FLAG_VALUE (mode);
223 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
230 /* For the following tests, ensure const0_rtx is op1. */
231 if (swap_commutative_operands_p (op0, op1)
232 || (op0 == const0_rtx && op1 != const0_rtx))
233 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
235 /* If op0 is a compare, extract the comparison arguments from it. */
236 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
237 return simplify_gen_relational (code, mode, VOIDmode,
238 XEXP (op0, 0), XEXP (op0, 1));
240 /* If op0 is a comparison, extract the comparison arguments form it. */
241 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
245 if (GET_MODE (op0) == mode)
247 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
248 XEXP (op0, 0), XEXP (op0, 1));
252 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
254 return simplify_gen_relational (new, mode, VOIDmode,
255 XEXP (op0, 0), XEXP (op0, 1));
259 return gen_rtx_fmt_ee (code, mode, op0, op1);
262 /* Replace all occurrences of OLD in X with NEW and try to simplify the
263 resulting RTX. Return a new RTX which is as simplified as possible. */
266 simplify_replace_rtx (rtx x, rtx old, rtx new)
268 enum rtx_code code = GET_CODE (x);
269 enum machine_mode mode = GET_MODE (x);
270 enum machine_mode op_mode;
273 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
274 to build a new expression substituting recursively. If we can't do
275 anything, return our input. */
280 switch (GET_RTX_CLASS (code))
284 op_mode = GET_MODE (op0);
285 op0 = simplify_replace_rtx (op0, old, new);
286 if (op0 == XEXP (x, 0))
288 return simplify_gen_unary (code, mode, op0, op_mode);
292 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
293 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
294 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
296 return simplify_gen_binary (code, mode, op0, op1);
301 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
302 op0 = simplify_replace_rtx (op0, old, new);
303 op1 = simplify_replace_rtx (op1, old, new);
304 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
306 return simplify_gen_relational (code, mode, op_mode, op0, op1);
311 op_mode = GET_MODE (op0);
312 op0 = simplify_replace_rtx (op0, old, new);
313 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
314 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
315 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
317 if (op_mode == VOIDmode)
318 op_mode = GET_MODE (op0);
319 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
322 /* The only case we try to handle is a SUBREG. */
325 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
326 if (op0 == SUBREG_REG (x))
328 op0 = simplify_gen_subreg (GET_MODE (x), op0,
329 GET_MODE (SUBREG_REG (x)),
331 return op0 ? op0 : x;
338 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
339 if (op0 == XEXP (x, 0))
341 return replace_equiv_address_nv (x, op0);
343 else if (code == LO_SUM)
345 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
346 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
348 /* (lo_sum (high x) x) -> x */
349 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
352 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
354 return gen_rtx_LO_SUM (mode, op0, op1);
356 else if (code == REG)
358 if (REG_P (old) && REGNO (x) == REGNO (old))
369 /* Try to simplify a unary operation CODE whose output mode is to be
370 MODE with input operand OP whose mode was originally OP_MODE.
371 Return zero if no simplification can be made. */
373 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
374 rtx op, enum machine_mode op_mode)
376 unsigned int width = GET_MODE_BITSIZE (mode);
377 rtx trueop = avoid_constant_pool_reference (op);
379 if (code == VEC_DUPLICATE)
381 if (!VECTOR_MODE_P (mode))
383 if (GET_MODE (trueop) != VOIDmode
384 && !VECTOR_MODE_P (GET_MODE (trueop))
385 && GET_MODE_INNER (mode) != GET_MODE (trueop))
387 if (GET_MODE (trueop) != VOIDmode
388 && VECTOR_MODE_P (GET_MODE (trueop))
389 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
391 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
392 || GET_CODE (trueop) == CONST_VECTOR)
394 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
395 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
396 rtvec v = rtvec_alloc (n_elts);
399 if (GET_CODE (trueop) != CONST_VECTOR)
400 for (i = 0; i < n_elts; i++)
401 RTVEC_ELT (v, i) = trueop;
404 enum machine_mode inmode = GET_MODE (trueop);
405 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
406 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
408 if (in_n_elts >= n_elts || n_elts % in_n_elts)
410 for (i = 0; i < n_elts; i++)
411 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
413 return gen_rtx_CONST_VECTOR (mode, v);
416 else if (GET_CODE (op) == CONST)
417 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
419 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
421 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
422 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
423 enum machine_mode opmode = GET_MODE (trueop);
424 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
425 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
426 rtvec v = rtvec_alloc (n_elts);
429 if (op_n_elts != n_elts)
432 for (i = 0; i < n_elts; i++)
434 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
435 CONST_VECTOR_ELT (trueop, i),
436 GET_MODE_INNER (opmode));
439 RTVEC_ELT (v, i) = x;
441 return gen_rtx_CONST_VECTOR (mode, v);
444 /* The order of these tests is critical so that, for example, we don't
445 check the wrong mode (input vs. output) for a conversion operation,
446 such as FIX. At some point, this should be simplified. */
448 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
449 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
451 HOST_WIDE_INT hv, lv;
454 if (GET_CODE (trueop) == CONST_INT)
455 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
457 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
459 REAL_VALUE_FROM_INT (d, lv, hv, mode);
460 d = real_value_truncate (mode, d);
461 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
463 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
464 && (GET_CODE (trueop) == CONST_DOUBLE
465 || GET_CODE (trueop) == CONST_INT))
467 HOST_WIDE_INT hv, lv;
470 if (GET_CODE (trueop) == CONST_INT)
471 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
473 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
475 if (op_mode == VOIDmode)
477 /* We don't know how to interpret negative-looking numbers in
478 this case, so don't try to fold those. */
482 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
485 hv = 0, lv &= GET_MODE_MASK (op_mode);
487 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
488 d = real_value_truncate (mode, d);
489 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
492 if (GET_CODE (trueop) == CONST_INT
493 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
495 HOST_WIDE_INT arg0 = INTVAL (trueop);
509 val = (arg0 >= 0 ? arg0 : - arg0);
513 /* Don't use ffs here. Instead, get low order bit and then its
514 number. If arg0 is zero, this will return 0, as desired. */
515 arg0 &= GET_MODE_MASK (mode);
516 val = exact_log2 (arg0 & (- arg0)) + 1;
520 arg0 &= GET_MODE_MASK (mode);
521 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
524 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
528 arg0 &= GET_MODE_MASK (mode);
531 /* Even if the value at zero is undefined, we have to come
532 up with some replacement. Seems good enough. */
533 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
534 val = GET_MODE_BITSIZE (mode);
537 val = exact_log2 (arg0 & -arg0);
541 arg0 &= GET_MODE_MASK (mode);
544 val++, arg0 &= arg0 - 1;
548 arg0 &= GET_MODE_MASK (mode);
551 val++, arg0 &= arg0 - 1;
560 /* When zero-extending a CONST_INT, we need to know its
562 if (op_mode == VOIDmode)
564 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
566 /* If we were really extending the mode,
567 we would have to distinguish between zero-extension
568 and sign-extension. */
569 if (width != GET_MODE_BITSIZE (op_mode))
573 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
574 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
580 if (op_mode == VOIDmode)
582 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
584 /* If we were really extending the mode,
585 we would have to distinguish between zero-extension
586 and sign-extension. */
587 if (width != GET_MODE_BITSIZE (op_mode))
591 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
594 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
596 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
597 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
614 val = trunc_int_for_mode (val, mode);
616 return GEN_INT (val);
619 /* We can do some operations on integer CONST_DOUBLEs. Also allow
620 for a DImode operation on a CONST_INT. */
621 else if (GET_MODE (trueop) == VOIDmode
622 && width <= HOST_BITS_PER_WIDE_INT * 2
623 && (GET_CODE (trueop) == CONST_DOUBLE
624 || GET_CODE (trueop) == CONST_INT))
626 unsigned HOST_WIDE_INT l1, lv;
627 HOST_WIDE_INT h1, hv;
629 if (GET_CODE (trueop) == CONST_DOUBLE)
630 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
632 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
642 neg_double (l1, h1, &lv, &hv);
647 neg_double (l1, h1, &lv, &hv);
659 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
662 lv = exact_log2 (l1 & -l1) + 1;
668 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
669 - HOST_BITS_PER_WIDE_INT;
671 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
672 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
673 lv = GET_MODE_BITSIZE (mode);
679 lv = exact_log2 (l1 & -l1);
681 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
682 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
683 lv = GET_MODE_BITSIZE (mode);
706 /* This is just a change-of-mode, so do nothing. */
711 if (op_mode == VOIDmode)
714 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
718 lv = l1 & GET_MODE_MASK (op_mode);
722 if (op_mode == VOIDmode
723 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
727 lv = l1 & GET_MODE_MASK (op_mode);
728 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
729 && (lv & ((HOST_WIDE_INT) 1
730 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
731 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
733 hv = HWI_SIGN_EXTEND (lv);
744 return immed_double_const (lv, hv, mode);
747 else if (GET_CODE (trueop) == CONST_DOUBLE
748 && GET_MODE_CLASS (mode) == MODE_FLOAT)
750 REAL_VALUE_TYPE d, t;
751 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
756 if (HONOR_SNANS (mode) && real_isnan (&d))
758 real_sqrt (&t, mode, &d);
762 d = REAL_VALUE_ABS (d);
765 d = REAL_VALUE_NEGATE (d);
768 d = real_value_truncate (mode, d);
771 /* All this does is change the mode. */
774 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
781 real_to_target (tmp, &d, GET_MODE (trueop));
782 for (i = 0; i < 4; i++)
784 real_from_target (&d, tmp, mode);
790 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
793 else if (GET_CODE (trueop) == CONST_DOUBLE
794 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
795 && GET_MODE_CLASS (mode) == MODE_INT
796 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
798 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
799 operators are intentionally left unspecified (to ease implementation
800 by target backends), for consistency, this routine implements the
801 same semantics for constant folding as used by the middle-end. */
803 HOST_WIDE_INT xh, xl, th, tl;
804 REAL_VALUE_TYPE x, t;
805 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
809 if (REAL_VALUE_ISNAN (x))
812 /* Test against the signed upper bound. */
813 if (width > HOST_BITS_PER_WIDE_INT)
815 th = ((unsigned HOST_WIDE_INT) 1
816 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
822 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
824 real_from_integer (&t, VOIDmode, tl, th, 0);
825 if (REAL_VALUES_LESS (t, x))
832 /* Test against the signed lower bound. */
833 if (width > HOST_BITS_PER_WIDE_INT)
835 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
841 tl = (HOST_WIDE_INT) -1 << (width - 1);
843 real_from_integer (&t, VOIDmode, tl, th, 0);
844 if (REAL_VALUES_LESS (x, t))
850 REAL_VALUE_TO_INT (&xl, &xh, x);
854 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
857 /* Test against the unsigned upper bound. */
858 if (width == 2*HOST_BITS_PER_WIDE_INT)
863 else if (width >= HOST_BITS_PER_WIDE_INT)
865 th = ((unsigned HOST_WIDE_INT) 1
866 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
872 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
874 real_from_integer (&t, VOIDmode, tl, th, 1);
875 if (REAL_VALUES_LESS (t, x))
882 REAL_VALUE_TO_INT (&xl, &xh, x);
888 return immed_double_const (xl, xh, mode);
891 /* This was formerly used only for non-IEEE float.
892 eggert@twinsun.com says it is safe for IEEE also. */
895 enum rtx_code reversed;
898 /* There are some simplifications we can do even if the operands
903 /* (not (not X)) == X. */
904 if (GET_CODE (op) == NOT)
907 /* (not (eq X Y)) == (ne X Y), etc. */
908 if (GET_RTX_CLASS (GET_CODE (op)) == '<'
909 && (mode == BImode || STORE_FLAG_VALUE == -1)
910 && ((reversed = reversed_comparison_code (op, NULL_RTX))
912 return simplify_gen_relational (reversed, mode, VOIDmode,
913 XEXP (op, 0), XEXP (op, 1));
915 /* (not (plus X -1)) can become (neg X). */
916 if (GET_CODE (op) == PLUS
917 && XEXP (op, 1) == constm1_rtx)
918 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
920 /* Similarly, (not (neg X)) is (plus X -1). */
921 if (GET_CODE (op) == NEG)
922 return plus_constant (XEXP (op, 0), -1);
924 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
925 if (GET_CODE (op) == XOR
926 && GET_CODE (XEXP (op, 1)) == CONST_INT
927 && (temp = simplify_unary_operation (NOT, mode,
930 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
933 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
934 operands other than 1, but that is not valid. We could do a
935 similar simplification for (not (lshiftrt C X)) where C is
936 just the sign bit, but this doesn't seem common enough to
938 if (GET_CODE (op) == ASHIFT
939 && XEXP (op, 0) == const1_rtx)
941 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
942 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
945 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
946 by reversing the comparison code if valid. */
947 if (STORE_FLAG_VALUE == -1
948 && GET_RTX_CLASS (GET_CODE (op)) == '<'
949 && (reversed = reversed_comparison_code (op, NULL_RTX))
951 return simplify_gen_relational (reversed, mode, VOIDmode,
952 XEXP (op, 0), XEXP (op, 1));
954 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
955 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
956 so we can perform the above simplification. */
958 if (STORE_FLAG_VALUE == -1
959 && GET_CODE (op) == ASHIFTRT
960 && GET_CODE (XEXP (op, 1)) == CONST_INT
961 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
962 return simplify_gen_relational (GE, mode, VOIDmode,
963 XEXP (op, 0), const0_rtx);
968 /* (neg (neg X)) == X. */
969 if (GET_CODE (op) == NEG)
972 /* (neg (plus X 1)) can become (not X). */
973 if (GET_CODE (op) == PLUS
974 && XEXP (op, 1) == const1_rtx)
975 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
977 /* Similarly, (neg (not X)) is (plus X 1). */
978 if (GET_CODE (op) == NOT)
979 return plus_constant (XEXP (op, 0), 1);
981 /* (neg (minus X Y)) can become (minus Y X). This transformation
982 isn't safe for modes with signed zeros, since if X and Y are
983 both +0, (minus Y X) is the same as (minus X Y). If the
984 rounding mode is towards +infinity (or -infinity) then the two
985 expressions will be rounded differently. */
986 if (GET_CODE (op) == MINUS
987 && !HONOR_SIGNED_ZEROS (mode)
988 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
989 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
992 if (GET_CODE (op) == PLUS
993 && !HONOR_SIGNED_ZEROS (mode)
994 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
996 /* (neg (plus A C)) is simplified to (minus -C A). */
997 if (GET_CODE (XEXP (op, 1)) == CONST_INT
998 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
1000 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
1003 return simplify_gen_binary (MINUS, mode, temp,
1007 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1008 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1009 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1012 /* (neg (mult A B)) becomes (mult (neg A) B).
1013 This works even for floating-point values. */
1014 if (GET_CODE (op) == MULT
1015 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1017 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1018 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1021 /* NEG commutes with ASHIFT since it is multiplication. Only do
1022 this if we can then eliminate the NEG (e.g., if the operand
1024 if (GET_CODE (op) == ASHIFT)
1026 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1029 return simplify_gen_binary (ASHIFT, mode, temp,
1036 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1037 becomes just the MINUS if its mode is MODE. This allows
1038 folding switch statements on machines using casesi (such as
1040 if (GET_CODE (op) == TRUNCATE
1041 && GET_MODE (XEXP (op, 0)) == mode
1042 && GET_CODE (XEXP (op, 0)) == MINUS
1043 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1044 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1045 return XEXP (op, 0);
1047 /* Check for a sign extension of a subreg of a promoted
1048 variable, where the promotion is sign-extended, and the
1049 target mode is the same as the variable's promotion. */
1050 if (GET_CODE (op) == SUBREG
1051 && SUBREG_PROMOTED_VAR_P (op)
1052 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1053 && GET_MODE (XEXP (op, 0)) == mode)
1054 return XEXP (op, 0);
1056 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1057 if (! POINTERS_EXTEND_UNSIGNED
1058 && mode == Pmode && GET_MODE (op) == ptr_mode
1060 || (GET_CODE (op) == SUBREG
1061 && GET_CODE (SUBREG_REG (op)) == REG
1062 && REG_POINTER (SUBREG_REG (op))
1063 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1064 return convert_memory_address (Pmode, op);
1069 /* Check for a zero extension of a subreg of a promoted
1070 variable, where the promotion is zero-extended, and the
1071 target mode is the same as the variable's promotion. */
1072 if (GET_CODE (op) == SUBREG
1073 && SUBREG_PROMOTED_VAR_P (op)
1074 && SUBREG_PROMOTED_UNSIGNED_P (op)
1075 && GET_MODE (XEXP (op, 0)) == mode)
1076 return XEXP (op, 0);
1078 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1079 if (POINTERS_EXTEND_UNSIGNED > 0
1080 && mode == Pmode && GET_MODE (op) == ptr_mode
1082 || (GET_CODE (op) == SUBREG
1083 && GET_CODE (SUBREG_REG (op)) == REG
1084 && REG_POINTER (SUBREG_REG (op))
1085 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1086 return convert_memory_address (Pmode, op);
1098 /* Subroutine of simplify_associative_operation. Return true if rtx OP
1099 is a suitable integer or floating point immediate constant. */
1101 associative_constant_p (rtx op)
1103 if (GET_CODE (op) == CONST_INT
1104 || GET_CODE (op) == CONST_DOUBLE)
1106 op = avoid_constant_pool_reference (op);
1107 return GET_CODE (op) == CONST_INT
1108 || GET_CODE (op) == CONST_DOUBLE;
1111 /* Subroutine of simplify_binary_operation to simplify an associative
1112 binary operation CODE with result mode MODE, operating on OP0 and OP1.
1113 Return 0 if no simplification is possible. */
1115 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1120 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
1121 if (GET_CODE (op0) == code
1122 && associative_constant_p (op1)
1123 && associative_constant_p (XEXP (op0, 1)))
1125 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1128 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1131 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1132 if (GET_CODE (op0) == code
1133 && GET_CODE (op1) == code
1134 && associative_constant_p (XEXP (op0, 1))
1135 && associative_constant_p (XEXP (op1, 1)))
1137 rtx c = simplify_binary_operation (code, mode,
1138 XEXP (op0, 1), XEXP (op1, 1));
1141 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1142 return simplify_gen_binary (code, mode, tem, c);
1145 /* Canonicalize (x op c) op y as (x op y) op c. */
1146 if (GET_CODE (op0) == code
1147 && associative_constant_p (XEXP (op0, 1)))
1149 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1150 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1153 /* Canonicalize x op (y op c) as (x op y) op c. */
1154 if (GET_CODE (op1) == code
1155 && associative_constant_p (XEXP (op1, 1)))
1157 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1158 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1164 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1165 and OP1. Return 0 if no simplification is possible.
1167 Don't use this for relational operations such as EQ or LT.
1168 Use simplify_relational_operation instead. */
1170 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1173 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1175 unsigned int width = GET_MODE_BITSIZE (mode);
1177 rtx trueop0 = avoid_constant_pool_reference (op0);
1178 rtx trueop1 = avoid_constant_pool_reference (op1);
1180 /* Relational operations don't work here. We must know the mode
1181 of the operands in order to do the comparison correctly.
1182 Assuming a full word can give incorrect results.
1183 Consider comparing 128 with -128 in QImode. */
1185 if (GET_RTX_CLASS (code) == '<')
1188 /* Make sure the constant is second. */
1189 if (GET_RTX_CLASS (code) == 'c'
1190 && swap_commutative_operands_p (trueop0, trueop1))
1192 tem = op0, op0 = op1, op1 = tem;
1193 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1196 if (VECTOR_MODE_P (mode)
1197 && code != VEC_CONCAT
1198 && GET_CODE (trueop0) == CONST_VECTOR
1199 && GET_CODE (trueop1) == CONST_VECTOR)
1201 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1202 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1203 enum machine_mode op0mode = GET_MODE (trueop0);
1204 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1205 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1206 enum machine_mode op1mode = GET_MODE (trueop1);
1207 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1208 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1209 rtvec v = rtvec_alloc (n_elts);
1212 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1215 for (i = 0; i < n_elts; i++)
1217 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1218 CONST_VECTOR_ELT (trueop0, i),
1219 CONST_VECTOR_ELT (trueop1, i));
1222 RTVEC_ELT (v, i) = x;
1225 return gen_rtx_CONST_VECTOR (mode, v);
1228 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1229 && GET_CODE (trueop0) == CONST_DOUBLE
1230 && GET_CODE (trueop1) == CONST_DOUBLE
1231 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1242 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1244 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1246 for (i = 0; i < 4; i++)
1250 else if (code == IOR)
1252 else if (code == XOR)
1257 real_from_target (&r, tmp0, mode);
1258 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1262 REAL_VALUE_TYPE f0, f1, value;
1264 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1265 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1266 f0 = real_value_truncate (mode, f0);
1267 f1 = real_value_truncate (mode, f1);
1269 if (HONOR_SNANS (mode)
1270 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1274 && REAL_VALUES_EQUAL (f1, dconst0)
1275 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1278 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1279 && flag_trapping_math
1280 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1282 int s0 = REAL_VALUE_NEGATIVE (f0);
1283 int s1 = REAL_VALUE_NEGATIVE (f1);
1288 /* Inf + -Inf = NaN plus exception. */
1293 /* Inf - Inf = NaN plus exception. */
1298 /* Inf / Inf = NaN plus exception. */
1305 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1306 && flag_trapping_math
1307 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1308 || (REAL_VALUE_ISINF (f1)
1309 && REAL_VALUES_EQUAL (f0, dconst0))))
1310 /* Inf * 0 = NaN plus exception. */
1313 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1315 value = real_value_truncate (mode, value);
1316 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1320 /* We can fold some multi-word operations. */
1321 if (GET_MODE_CLASS (mode) == MODE_INT
1322 && width == HOST_BITS_PER_WIDE_INT * 2
1323 && (GET_CODE (trueop0) == CONST_DOUBLE
1324 || GET_CODE (trueop0) == CONST_INT)
1325 && (GET_CODE (trueop1) == CONST_DOUBLE
1326 || GET_CODE (trueop1) == CONST_INT))
1328 unsigned HOST_WIDE_INT l1, l2, lv;
1329 HOST_WIDE_INT h1, h2, hv;
1331 if (GET_CODE (trueop0) == CONST_DOUBLE)
1332 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1334 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1336 if (GET_CODE (trueop1) == CONST_DOUBLE)
1337 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1339 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1344 /* A - B == A + (-B). */
1345 neg_double (l2, h2, &lv, &hv);
1348 /* Fall through.... */
1351 add_double (l1, h1, l2, h2, &lv, &hv);
1355 mul_double (l1, h1, l2, h2, &lv, &hv);
1358 case DIV: case MOD: case UDIV: case UMOD:
1359 /* We'd need to include tree.h to do this and it doesn't seem worth
1364 lv = l1 & l2, hv = h1 & h2;
1368 lv = l1 | l2, hv = h1 | h2;
1372 lv = l1 ^ l2, hv = h1 ^ h2;
1378 && ((unsigned HOST_WIDE_INT) l1
1379 < (unsigned HOST_WIDE_INT) l2)))
1388 && ((unsigned HOST_WIDE_INT) l1
1389 > (unsigned HOST_WIDE_INT) l2)))
1396 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1398 && ((unsigned HOST_WIDE_INT) l1
1399 < (unsigned HOST_WIDE_INT) l2)))
1406 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1408 && ((unsigned HOST_WIDE_INT) l1
1409 > (unsigned HOST_WIDE_INT) l2)))
1415 case LSHIFTRT: case ASHIFTRT:
1417 case ROTATE: case ROTATERT:
1418 #ifdef SHIFT_COUNT_TRUNCATED
1419 if (SHIFT_COUNT_TRUNCATED)
1420 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1423 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1426 if (code == LSHIFTRT || code == ASHIFTRT)
1427 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1429 else if (code == ASHIFT)
1430 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1431 else if (code == ROTATE)
1432 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1433 else /* code == ROTATERT */
1434 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1441 return immed_double_const (lv, hv, mode);
1444 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1445 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1447 /* Even if we can't compute a constant result,
1448 there are some cases worth simplifying. */
1453 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1454 when x is NaN, infinite, or finite and nonzero. They aren't
1455 when x is -0 and the rounding mode is not towards -infinity,
1456 since (-0) + 0 is then 0. */
1457 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1460 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1461 transformations are safe even for IEEE. */
1462 if (GET_CODE (op0) == NEG)
1463 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1464 else if (GET_CODE (op1) == NEG)
1465 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1467 /* (~a) + 1 -> -a */
1468 if (INTEGRAL_MODE_P (mode)
1469 && GET_CODE (op0) == NOT
1470 && trueop1 == const1_rtx)
1471 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1473 /* Handle both-operands-constant cases. We can only add
1474 CONST_INTs to constants since the sum of relocatable symbols
1475 can't be handled by most assemblers. Don't add CONST_INT
1476 to CONST_INT since overflow won't be computed properly if wider
1477 than HOST_BITS_PER_WIDE_INT. */
1479 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1480 && GET_CODE (op1) == CONST_INT)
1481 return plus_constant (op0, INTVAL (op1));
1482 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1483 && GET_CODE (op0) == CONST_INT)
1484 return plus_constant (op1, INTVAL (op0));
1486 /* See if this is something like X * C - X or vice versa or
1487 if the multiplication is written as a shift. If so, we can
1488 distribute and make a new multiply, shift, or maybe just
1489 have X (if C is 2 in the example above). But don't make
1490 real multiply if we didn't have one before. */
1492 if (! FLOAT_MODE_P (mode))
1494 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1495 rtx lhs = op0, rhs = op1;
1498 if (GET_CODE (lhs) == NEG)
1499 coeff0 = -1, lhs = XEXP (lhs, 0);
1500 else if (GET_CODE (lhs) == MULT
1501 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1503 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1506 else if (GET_CODE (lhs) == ASHIFT
1507 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1508 && INTVAL (XEXP (lhs, 1)) >= 0
1509 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1511 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1512 lhs = XEXP (lhs, 0);
1515 if (GET_CODE (rhs) == NEG)
1516 coeff1 = -1, rhs = XEXP (rhs, 0);
1517 else if (GET_CODE (rhs) == MULT
1518 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1520 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1523 else if (GET_CODE (rhs) == ASHIFT
1524 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1525 && INTVAL (XEXP (rhs, 1)) >= 0
1526 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1528 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1529 rhs = XEXP (rhs, 0);
1532 if (rtx_equal_p (lhs, rhs))
1534 tem = simplify_gen_binary (MULT, mode, lhs,
1535 GEN_INT (coeff0 + coeff1));
1536 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1540 /* If one of the operands is a PLUS or a MINUS, see if we can
1541 simplify this by the associative law.
1542 Don't use the associative law for floating point.
1543 The inaccuracy makes it nonassociative,
1544 and subtle programs can break if operations are associated. */
1546 if (INTEGRAL_MODE_P (mode)
1547 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1548 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1549 || (GET_CODE (op0) == CONST
1550 && GET_CODE (XEXP (op0, 0)) == PLUS)
1551 || (GET_CODE (op1) == CONST
1552 && GET_CODE (XEXP (op1, 0)) == PLUS))
1553 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1556 /* Reassociate floating point addition only when the user
1557 specifies unsafe math optimizations. */
1558 if (FLOAT_MODE_P (mode)
1559 && flag_unsafe_math_optimizations)
1561 tem = simplify_associative_operation (code, mode, op0, op1);
1569 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1570 using cc0, in which case we want to leave it as a COMPARE
1571 so we can distinguish it from a register-register-copy.
1573 In IEEE floating point, x-0 is not the same as x. */
1575 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1576 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1577 && trueop1 == CONST0_RTX (mode))
1581 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1582 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1583 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1584 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1586 rtx xop00 = XEXP (op0, 0);
1587 rtx xop10 = XEXP (op1, 0);
1590 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1592 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1593 && GET_MODE (xop00) == GET_MODE (xop10)
1594 && REGNO (xop00) == REGNO (xop10)
1595 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1596 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1603 /* We can't assume x-x is 0 even with non-IEEE floating point,
1604 but since it is zero except in very strange circumstances, we
1605 will treat it as zero with -funsafe-math-optimizations. */
1606 if (rtx_equal_p (trueop0, trueop1)
1607 && ! side_effects_p (op0)
1608 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1609 return CONST0_RTX (mode);
1611 /* Change subtraction from zero into negation. (0 - x) is the
1612 same as -x when x is NaN, infinite, or finite and nonzero.
1613 But if the mode has signed zeros, and does not round towards
1614 -infinity, then 0 - 0 is 0, not -0. */
1615 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1616 return simplify_gen_unary (NEG, mode, op1, mode);
1618 /* (-1 - a) is ~a. */
1619 if (trueop0 == constm1_rtx)
1620 return simplify_gen_unary (NOT, mode, op1, mode);
1622 /* Subtracting 0 has no effect unless the mode has signed zeros
1623 and supports rounding towards -infinity. In such a case,
1625 if (!(HONOR_SIGNED_ZEROS (mode)
1626 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1627 && trueop1 == CONST0_RTX (mode))
1630 /* See if this is something like X * C - X or vice versa or
1631 if the multiplication is written as a shift. If so, we can
1632 distribute and make a new multiply, shift, or maybe just
1633 have X (if C is 2 in the example above). But don't make
1634 real multiply if we didn't have one before. */
1636 if (! FLOAT_MODE_P (mode))
1638 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1639 rtx lhs = op0, rhs = op1;
1642 if (GET_CODE (lhs) == NEG)
1643 coeff0 = -1, lhs = XEXP (lhs, 0);
1644 else if (GET_CODE (lhs) == MULT
1645 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1647 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1650 else if (GET_CODE (lhs) == ASHIFT
1651 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1652 && INTVAL (XEXP (lhs, 1)) >= 0
1653 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1655 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1656 lhs = XEXP (lhs, 0);
1659 if (GET_CODE (rhs) == NEG)
1660 coeff1 = - 1, rhs = XEXP (rhs, 0);
1661 else if (GET_CODE (rhs) == MULT
1662 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1664 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1667 else if (GET_CODE (rhs) == ASHIFT
1668 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1669 && INTVAL (XEXP (rhs, 1)) >= 0
1670 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1672 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1673 rhs = XEXP (rhs, 0);
1676 if (rtx_equal_p (lhs, rhs))
1678 tem = simplify_gen_binary (MULT, mode, lhs,
1679 GEN_INT (coeff0 - coeff1));
1680 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1684 /* (a - (-b)) -> (a + b). True even for IEEE. */
1685 if (GET_CODE (op1) == NEG)
1686 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1688 /* (-x - c) may be simplified as (-c - x). */
1689 if (GET_CODE (op0) == NEG
1690 && (GET_CODE (op1) == CONST_INT
1691 || GET_CODE (op1) == CONST_DOUBLE))
1693 tem = simplify_unary_operation (NEG, mode, op1, mode);
1695 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1698 /* If one of the operands is a PLUS or a MINUS, see if we can
1699 simplify this by the associative law.
1700 Don't use the associative law for floating point.
1701 The inaccuracy makes it nonassociative,
1702 and subtle programs can break if operations are associated. */
1704 if (INTEGRAL_MODE_P (mode)
1705 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1706 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1707 || (GET_CODE (op0) == CONST
1708 && GET_CODE (XEXP (op0, 0)) == PLUS)
1709 || (GET_CODE (op1) == CONST
1710 && GET_CODE (XEXP (op1, 0)) == PLUS))
1711 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1714 /* Don't let a relocatable value get a negative coeff. */
1715 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1716 return simplify_gen_binary (PLUS, mode,
1718 neg_const_int (mode, op1));
1720 /* (x - (x & y)) -> (x & ~y) */
1721 if (GET_CODE (op1) == AND)
1723 if (rtx_equal_p (op0, XEXP (op1, 0)))
1725 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1726 GET_MODE (XEXP (op1, 1)));
1727 return simplify_gen_binary (AND, mode, op0, tem);
1729 if (rtx_equal_p (op0, XEXP (op1, 1)))
1731 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1732 GET_MODE (XEXP (op1, 0)));
1733 return simplify_gen_binary (AND, mode, op0, tem);
1739 if (trueop1 == constm1_rtx)
1740 return simplify_gen_unary (NEG, mode, op0, mode);
1742 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1743 x is NaN, since x * 0 is then also NaN. Nor is it valid
1744 when the mode has signed zeros, since multiplying a negative
1745 number by 0 will give -0, not 0. */
1746 if (!HONOR_NANS (mode)
1747 && !HONOR_SIGNED_ZEROS (mode)
1748 && trueop1 == CONST0_RTX (mode)
1749 && ! side_effects_p (op0))
1752 /* In IEEE floating point, x*1 is not equivalent to x for
1754 if (!HONOR_SNANS (mode)
1755 && trueop1 == CONST1_RTX (mode))
1758 /* Convert multiply by constant power of two into shift unless
1759 we are still generating RTL. This test is a kludge. */
1760 if (GET_CODE (trueop1) == CONST_INT
1761 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1762 /* If the mode is larger than the host word size, and the
1763 uppermost bit is set, then this isn't a power of two due
1764 to implicit sign extension. */
1765 && (width <= HOST_BITS_PER_WIDE_INT
1766 || val != HOST_BITS_PER_WIDE_INT - 1)
1767 && ! rtx_equal_function_value_matters)
1768 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1770 /* x*2 is x+x and x*(-1) is -x */
1771 if (GET_CODE (trueop1) == CONST_DOUBLE
1772 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1773 && GET_MODE (op0) == mode)
1776 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1778 if (REAL_VALUES_EQUAL (d, dconst2))
1779 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1781 if (REAL_VALUES_EQUAL (d, dconstm1))
1782 return simplify_gen_unary (NEG, mode, op0, mode);
1785 /* Reassociate multiplication, but for floating point MULTs
1786 only when the user specifies unsafe math optimizations. */
1787 if (! FLOAT_MODE_P (mode)
1788 || flag_unsafe_math_optimizations)
1790 tem = simplify_associative_operation (code, mode, op0, op1);
1797 if (trueop1 == const0_rtx)
1799 if (GET_CODE (trueop1) == CONST_INT
1800 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1801 == GET_MODE_MASK (mode)))
1803 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1805 /* A | (~A) -> -1 */
1806 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1807 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1808 && ! side_effects_p (op0)
1809 && GET_MODE_CLASS (mode) != MODE_CC)
1811 tem = simplify_associative_operation (code, mode, op0, op1);
1817 if (trueop1 == const0_rtx)
1819 if (GET_CODE (trueop1) == CONST_INT
1820 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1821 == GET_MODE_MASK (mode)))
1822 return simplify_gen_unary (NOT, mode, op0, mode);
1823 if (trueop0 == trueop1 && ! side_effects_p (op0)
1824 && GET_MODE_CLASS (mode) != MODE_CC)
1826 tem = simplify_associative_operation (code, mode, op0, op1);
1832 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1834 if (GET_CODE (trueop1) == CONST_INT
1835 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1836 == GET_MODE_MASK (mode)))
1838 if (trueop0 == trueop1 && ! side_effects_p (op0)
1839 && GET_MODE_CLASS (mode) != MODE_CC)
1842 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1843 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1844 && ! side_effects_p (op0)
1845 && GET_MODE_CLASS (mode) != MODE_CC)
1847 tem = simplify_associative_operation (code, mode, op0, op1);
1853 /* Convert divide by power of two into shift (divide by 1 handled
1855 if (GET_CODE (trueop1) == CONST_INT
1856 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1857 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1859 /* Fall through.... */
1862 if (trueop1 == CONST1_RTX (mode))
1864 /* On some platforms DIV uses narrower mode than its
1866 rtx x = gen_lowpart_common (mode, op0);
1869 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1870 return gen_lowpart_SUBREG (mode, op0);
1875 /* Maybe change 0 / x to 0. This transformation isn't safe for
1876 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1877 Nor is it safe for modes with signed zeros, since dividing
1878 0 by a negative number gives -0, not 0. */
1879 if (!HONOR_NANS (mode)
1880 && !HONOR_SIGNED_ZEROS (mode)
1881 && trueop0 == CONST0_RTX (mode)
1882 && ! side_effects_p (op1))
1885 /* Change division by a constant into multiplication. Only do
1886 this with -funsafe-math-optimizations. */
1887 else if (GET_CODE (trueop1) == CONST_DOUBLE
1888 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1889 && trueop1 != CONST0_RTX (mode)
1890 && flag_unsafe_math_optimizations)
1893 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1895 if (! REAL_VALUES_EQUAL (d, dconst0))
1897 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1898 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1899 return simplify_gen_binary (MULT, mode, op0, tem);
1905 /* Handle modulus by power of two (mod with 1 handled below). */
1906 if (GET_CODE (trueop1) == CONST_INT
1907 && exact_log2 (INTVAL (trueop1)) > 0)
1908 return simplify_gen_binary (AND, mode, op0,
1909 GEN_INT (INTVAL (op1) - 1));
1911 /* Fall through.... */
1914 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1915 && ! side_effects_p (op0) && ! side_effects_p (op1))
1922 /* Rotating ~0 always results in ~0. */
1923 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1924 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1925 && ! side_effects_p (op1))
1928 /* Fall through.... */
1932 if (trueop1 == const0_rtx)
1934 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1939 if (width <= HOST_BITS_PER_WIDE_INT
1940 && GET_CODE (trueop1) == CONST_INT
1941 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1942 && ! side_effects_p (op0))
1944 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1946 tem = simplify_associative_operation (code, mode, op0, op1);
1952 if (width <= HOST_BITS_PER_WIDE_INT
1953 && GET_CODE (trueop1) == CONST_INT
1954 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1955 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1956 && ! side_effects_p (op0))
1958 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1960 tem = simplify_associative_operation (code, mode, op0, op1);
1966 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1968 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1970 tem = simplify_associative_operation (code, mode, op0, op1);
1976 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1978 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1980 tem = simplify_associative_operation (code, mode, op0, op1);
1989 /* ??? There are simplifications that can be done. */
1993 if (!VECTOR_MODE_P (mode))
1995 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1997 != GET_MODE_INNER (GET_MODE (trueop0)))
1998 || GET_CODE (trueop1) != PARALLEL
1999 || XVECLEN (trueop1, 0) != 1
2000 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
2003 if (GET_CODE (trueop0) == CONST_VECTOR)
2004 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
2008 if (!VECTOR_MODE_P (GET_MODE (trueop0))
2009 || (GET_MODE_INNER (mode)
2010 != GET_MODE_INNER (GET_MODE (trueop0)))
2011 || GET_CODE (trueop1) != PARALLEL)
2014 if (GET_CODE (trueop0) == CONST_VECTOR)
2016 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2017 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2018 rtvec v = rtvec_alloc (n_elts);
2021 if (XVECLEN (trueop1, 0) != (int) n_elts)
2023 for (i = 0; i < n_elts; i++)
2025 rtx x = XVECEXP (trueop1, 0, i);
2027 if (GET_CODE (x) != CONST_INT)
2029 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
2032 return gen_rtx_CONST_VECTOR (mode, v);
2038 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2039 ? GET_MODE (trueop0)
2040 : GET_MODE_INNER (mode));
2041 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2042 ? GET_MODE (trueop1)
2043 : GET_MODE_INNER (mode));
2045 if (!VECTOR_MODE_P (mode)
2046 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2047 != GET_MODE_SIZE (mode)))
2050 if ((VECTOR_MODE_P (op0_mode)
2051 && (GET_MODE_INNER (mode)
2052 != GET_MODE_INNER (op0_mode)))
2053 || (!VECTOR_MODE_P (op0_mode)
2054 && GET_MODE_INNER (mode) != op0_mode))
2057 if ((VECTOR_MODE_P (op1_mode)
2058 && (GET_MODE_INNER (mode)
2059 != GET_MODE_INNER (op1_mode)))
2060 || (!VECTOR_MODE_P (op1_mode)
2061 && GET_MODE_INNER (mode) != op1_mode))
2064 if ((GET_CODE (trueop0) == CONST_VECTOR
2065 || GET_CODE (trueop0) == CONST_INT
2066 || GET_CODE (trueop0) == CONST_DOUBLE)
2067 && (GET_CODE (trueop1) == CONST_VECTOR
2068 || GET_CODE (trueop1) == CONST_INT
2069 || GET_CODE (trueop1) == CONST_DOUBLE))
2071 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2072 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2073 rtvec v = rtvec_alloc (n_elts);
2075 unsigned in_n_elts = 1;
2077 if (VECTOR_MODE_P (op0_mode))
2078 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2079 for (i = 0; i < n_elts; i++)
2083 if (!VECTOR_MODE_P (op0_mode))
2084 RTVEC_ELT (v, i) = trueop0;
2086 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2090 if (!VECTOR_MODE_P (op1_mode))
2091 RTVEC_ELT (v, i) = trueop1;
2093 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2098 return gen_rtx_CONST_VECTOR (mode, v);
2110 /* Get the integer argument values in two forms:
2111 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2113 arg0 = INTVAL (trueop0);
2114 arg1 = INTVAL (trueop1);
2116 if (width < HOST_BITS_PER_WIDE_INT)
2118 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2119 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2122 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2123 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2126 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2127 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2135 /* Compute the value of the arithmetic. */
2140 val = arg0s + arg1s;
2144 val = arg0s - arg1s;
2148 val = arg0s * arg1s;
2153 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2156 val = arg0s / arg1s;
2161 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2164 val = arg0s % arg1s;
2169 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2172 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2177 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2180 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2196 /* If shift count is undefined, don't fold it; let the machine do
2197 what it wants. But truncate it if the machine will do that. */
2201 #ifdef SHIFT_COUNT_TRUNCATED
2202 if (SHIFT_COUNT_TRUNCATED)
2206 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2213 #ifdef SHIFT_COUNT_TRUNCATED
2214 if (SHIFT_COUNT_TRUNCATED)
2218 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2225 #ifdef SHIFT_COUNT_TRUNCATED
2226 if (SHIFT_COUNT_TRUNCATED)
2230 val = arg0s >> arg1;
2232 /* Bootstrap compiler may not have sign extended the right shift.
2233 Manually extend the sign to insure bootstrap cc matches gcc. */
2234 if (arg0s < 0 && arg1 > 0)
2235 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2244 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2245 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2253 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2254 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2258 /* Do nothing here. */
2262 val = arg0s <= arg1s ? arg0s : arg1s;
2266 val = ((unsigned HOST_WIDE_INT) arg0
2267 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2271 val = arg0s > arg1s ? arg0s : arg1s;
2275 val = ((unsigned HOST_WIDE_INT) arg0
2276 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2283 /* ??? There are simplifications that can be done. */
2290 val = trunc_int_for_mode (val, mode);
2292 return GEN_INT (val);
2295 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2298 Rather than test for specific case, we do this by a brute-force method
2299 and do all possible simplifications until no more changes occur. Then
2300 we rebuild the operation.
2302 If FORCE is true, then always generate the rtx. This is used to
2303 canonicalize stuff emitted from simplify_gen_binary. Note that this
2304 can still fail if the rtx is too complex. It won't fail just because
2305 the result is not 'simpler' than the input, however. */
2307 struct simplify_plus_minus_op_data
2314 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2316 const struct simplify_plus_minus_op_data *d1 = p1;
2317 const struct simplify_plus_minus_op_data *d2 = p2;
2319 return (commutative_operand_precedence (d2->op)
2320 - commutative_operand_precedence (d1->op));
2324 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2327 struct simplify_plus_minus_op_data ops[8];
2329 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2333 memset (ops, 0, sizeof ops);
2335 /* Set up the two operands and then expand them until nothing has been
2336 changed. If we run out of room in our array, give up; this should
2337 almost never happen. */
2342 ops[1].neg = (code == MINUS);
2348 for (i = 0; i < n_ops; i++)
2350 rtx this_op = ops[i].op;
2351 int this_neg = ops[i].neg;
2352 enum rtx_code this_code = GET_CODE (this_op);
2361 ops[n_ops].op = XEXP (this_op, 1);
2362 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2365 ops[i].op = XEXP (this_op, 0);
2371 ops[i].op = XEXP (this_op, 0);
2372 ops[i].neg = ! this_neg;
2378 && GET_CODE (XEXP (this_op, 0)) == PLUS
2379 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2380 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2382 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2383 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2384 ops[n_ops].neg = this_neg;
2392 /* ~a -> (-a - 1) */
2395 ops[n_ops].op = constm1_rtx;
2396 ops[n_ops++].neg = this_neg;
2397 ops[i].op = XEXP (this_op, 0);
2398 ops[i].neg = !this_neg;
2406 ops[i].op = neg_const_int (mode, this_op);
2419 /* If we only have two operands, we can't do anything. */
2420 if (n_ops <= 2 && !force)
2423 /* Count the number of CONSTs we didn't split above. */
2424 for (i = 0; i < n_ops; i++)
2425 if (GET_CODE (ops[i].op) == CONST)
2428 /* Now simplify each pair of operands until nothing changes. The first
2429 time through just simplify constants against each other. */
2436 for (i = 0; i < n_ops - 1; i++)
2437 for (j = i + 1; j < n_ops; j++)
2439 rtx lhs = ops[i].op, rhs = ops[j].op;
2440 int lneg = ops[i].neg, rneg = ops[j].neg;
2442 if (lhs != 0 && rhs != 0
2443 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2445 enum rtx_code ncode = PLUS;
2451 tem = lhs, lhs = rhs, rhs = tem;
2453 else if (swap_commutative_operands_p (lhs, rhs))
2454 tem = lhs, lhs = rhs, rhs = tem;
2456 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2458 /* Reject "simplifications" that just wrap the two
2459 arguments in a CONST. Failure to do so can result
2460 in infinite recursion with simplify_binary_operation
2461 when it calls us to simplify CONST operations. */
2463 && ! (GET_CODE (tem) == CONST
2464 && GET_CODE (XEXP (tem, 0)) == ncode
2465 && XEXP (XEXP (tem, 0), 0) == lhs
2466 && XEXP (XEXP (tem, 0), 1) == rhs)
2467 /* Don't allow -x + -1 -> ~x simplifications in the
2468 first pass. This allows us the chance to combine
2469 the -1 with other constants. */
2471 && GET_CODE (tem) == NOT
2472 && XEXP (tem, 0) == rhs))
2475 if (GET_CODE (tem) == NEG)
2476 tem = XEXP (tem, 0), lneg = !lneg;
2477 if (GET_CODE (tem) == CONST_INT && lneg)
2478 tem = neg_const_int (mode, tem), lneg = 0;
2482 ops[j].op = NULL_RTX;
2492 /* Pack all the operands to the lower-numbered entries. */
2493 for (i = 0, j = 0; j < n_ops; j++)
2498 /* Sort the operations based on swap_commutative_operands_p. */
2499 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2501 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2503 && GET_CODE (ops[1].op) == CONST_INT
2504 && CONSTANT_P (ops[0].op)
2506 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2508 /* We suppressed creation of trivial CONST expressions in the
2509 combination loop to avoid recursion. Create one manually now.
2510 The combination loop should have ensured that there is exactly
2511 one CONST_INT, and the sort will have ensured that it is last
2512 in the array and that any other constant will be next-to-last. */
2515 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2516 && CONSTANT_P (ops[n_ops - 2].op))
2518 rtx value = ops[n_ops - 1].op;
2519 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2520 value = neg_const_int (mode, value);
2521 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2525 /* Count the number of CONSTs that we generated. */
2527 for (i = 0; i < n_ops; i++)
2528 if (GET_CODE (ops[i].op) == CONST)
2531 /* Give up if we didn't reduce the number of operands we had. Make
2532 sure we count a CONST as two operands. If we have the same
2533 number of operands, but have made more CONSTs than before, this
2534 is also an improvement, so accept it. */
2536 && (n_ops + n_consts > input_ops
2537 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2540 /* Put a non-negated operand first, if possible. */
2542 for (i = 0; i < n_ops && ops[i].neg; i++)
2545 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2554 /* Now make the result by performing the requested operations. */
2556 for (i = 1; i < n_ops; i++)
2557 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2558 mode, result, ops[i].op);
2563 /* Like simplify_binary_operation except used for relational operators.
2564 MODE is the mode of the operands, not that of the result. If MODE
2565 is VOIDmode, both operands must also be VOIDmode and we compare the
2566 operands in "infinite precision".
2568 If no simplification is possible, this function returns zero. Otherwise,
2569 it returns either const_true_rtx or const0_rtx. */
2572 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2575 int equal, op0lt, op0ltu, op1lt, op1ltu;
2580 if (mode == VOIDmode
2581 && (GET_MODE (op0) != VOIDmode
2582 || GET_MODE (op1) != VOIDmode))
2585 /* If op0 is a compare, extract the comparison arguments from it. */
2586 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2587 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2589 trueop0 = avoid_constant_pool_reference (op0);
2590 trueop1 = avoid_constant_pool_reference (op1);
2592 /* We can't simplify MODE_CC values since we don't know what the
2593 actual comparison is. */
2594 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2597 /* Make sure the constant is second. */
2598 if (swap_commutative_operands_p (trueop0, trueop1))
2600 tem = op0, op0 = op1, op1 = tem;
2601 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2602 code = swap_condition (code);
2605 /* For integer comparisons of A and B maybe we can simplify A - B and can
2606 then simplify a comparison of that with zero. If A and B are both either
2607 a register or a CONST_INT, this can't help; testing for these cases will
2608 prevent infinite recursion here and speed things up.
2610 If CODE is an unsigned comparison, then we can never do this optimization,
2611 because it gives an incorrect result if the subtraction wraps around zero.
2612 ANSI C defines unsigned operations such that they never overflow, and
2613 thus such cases can not be ignored. */
2615 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2616 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2617 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2618 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2619 /* We cannot do this for == or != if tem is a nonzero address. */
2620 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2621 && code != GTU && code != GEU && code != LTU && code != LEU)
2622 return simplify_relational_operation (signed_condition (code),
2623 mode, tem, const0_rtx);
2625 if (flag_unsafe_math_optimizations && code == ORDERED)
2626 return const_true_rtx;
2628 if (flag_unsafe_math_optimizations && code == UNORDERED)
2631 /* For modes without NaNs, if the two operands are equal, we know the
2632 result except if they have side-effects. */
2633 if (! HONOR_NANS (GET_MODE (trueop0))
2634 && rtx_equal_p (trueop0, trueop1)
2635 && ! side_effects_p (trueop0))
2636 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2638 /* If the operands are floating-point constants, see if we can fold
2640 else if (GET_CODE (trueop0) == CONST_DOUBLE
2641 && GET_CODE (trueop1) == CONST_DOUBLE
2642 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2644 REAL_VALUE_TYPE d0, d1;
2646 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2647 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2649 /* Comparisons are unordered iff at least one of the values is NaN. */
2650 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2660 return const_true_rtx;
2673 equal = REAL_VALUES_EQUAL (d0, d1);
2674 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2675 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2678 /* Otherwise, see if the operands are both integers. */
2679 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2680 && (GET_CODE (trueop0) == CONST_DOUBLE
2681 || GET_CODE (trueop0) == CONST_INT)
2682 && (GET_CODE (trueop1) == CONST_DOUBLE
2683 || GET_CODE (trueop1) == CONST_INT))
2685 int width = GET_MODE_BITSIZE (mode);
2686 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2687 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2689 /* Get the two words comprising each integer constant. */
2690 if (GET_CODE (trueop0) == CONST_DOUBLE)
2692 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2693 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2697 l0u = l0s = INTVAL (trueop0);
2698 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2701 if (GET_CODE (trueop1) == CONST_DOUBLE)
2703 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2704 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2708 l1u = l1s = INTVAL (trueop1);
2709 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2712 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2713 we have to sign or zero-extend the values. */
2714 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2716 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2717 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2719 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2720 l0s |= ((HOST_WIDE_INT) (-1) << width);
2722 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2723 l1s |= ((HOST_WIDE_INT) (-1) << width);
2725 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2726 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2728 equal = (h0u == h1u && l0u == l1u);
2729 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2730 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2731 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2732 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2735 /* Otherwise, there are some code-specific tests we can make. */
2741 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2746 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2747 return const_true_rtx;
2751 /* Unsigned values are never negative. */
2752 if (trueop1 == const0_rtx)
2753 return const_true_rtx;
2757 if (trueop1 == const0_rtx)
2762 /* Unsigned values are never greater than the largest
2764 if (GET_CODE (trueop1) == CONST_INT
2765 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2766 && INTEGRAL_MODE_P (mode))
2767 return const_true_rtx;
2771 if (GET_CODE (trueop1) == CONST_INT
2772 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2773 && INTEGRAL_MODE_P (mode))
2778 /* Optimize abs(x) < 0.0. */
2779 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2781 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2783 if (GET_CODE (tem) == ABS)
2789 /* Optimize abs(x) >= 0.0. */
2790 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2792 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2794 if (GET_CODE (tem) == ABS)
2795 return const_true_rtx;
2800 /* Optimize ! (abs(x) < 0.0). */
2801 if (trueop1 == CONST0_RTX (mode))
2803 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2805 if (GET_CODE (tem) == ABS)
2806 return const_true_rtx;
2817 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2823 return equal ? const_true_rtx : const0_rtx;
2826 return ! equal ? const_true_rtx : const0_rtx;
2829 return op0lt ? const_true_rtx : const0_rtx;
2832 return op1lt ? const_true_rtx : const0_rtx;
2834 return op0ltu ? const_true_rtx : const0_rtx;
2836 return op1ltu ? const_true_rtx : const0_rtx;
2839 return equal || op0lt ? const_true_rtx : const0_rtx;
2842 return equal || op1lt ? const_true_rtx : const0_rtx;
2844 return equal || op0ltu ? const_true_rtx : const0_rtx;
2846 return equal || op1ltu ? const_true_rtx : const0_rtx;
2848 return const_true_rtx;
2856 /* Simplify CODE, an operation with result mode MODE and three operands,
2857 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2858 a constant. Return 0 if no simplifications is possible. */
2861 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2862 enum machine_mode op0_mode, rtx op0, rtx op1,
2865 unsigned int width = GET_MODE_BITSIZE (mode);
2867 /* VOIDmode means "infinite" precision. */
2869 width = HOST_BITS_PER_WIDE_INT;
2875 if (GET_CODE (op0) == CONST_INT
2876 && GET_CODE (op1) == CONST_INT
2877 && GET_CODE (op2) == CONST_INT
2878 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2879 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2881 /* Extracting a bit-field from a constant */
2882 HOST_WIDE_INT val = INTVAL (op0);
2884 if (BITS_BIG_ENDIAN)
2885 val >>= (GET_MODE_BITSIZE (op0_mode)
2886 - INTVAL (op2) - INTVAL (op1));
2888 val >>= INTVAL (op2);
2890 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2892 /* First zero-extend. */
2893 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2894 /* If desired, propagate sign bit. */
2895 if (code == SIGN_EXTRACT
2896 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2897 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2900 /* Clear the bits that don't belong in our mode,
2901 unless they and our sign bit are all one.
2902 So we get either a reasonable negative value or a reasonable
2903 unsigned value for this mode. */
2904 if (width < HOST_BITS_PER_WIDE_INT
2905 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2906 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2907 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2909 return GEN_INT (val);
2914 if (GET_CODE (op0) == CONST_INT)
2915 return op0 != const0_rtx ? op1 : op2;
2917 /* Convert c ? a : a into "a". */
2918 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
2921 /* Convert a != b ? a : b into "a". */
2922 if (GET_CODE (op0) == NE
2923 && ! side_effects_p (op0)
2924 && ! HONOR_NANS (mode)
2925 && ! HONOR_SIGNED_ZEROS (mode)
2926 && ((rtx_equal_p (XEXP (op0, 0), op1)
2927 && rtx_equal_p (XEXP (op0, 1), op2))
2928 || (rtx_equal_p (XEXP (op0, 0), op2)
2929 && rtx_equal_p (XEXP (op0, 1), op1))))
2932 /* Convert a == b ? a : b into "b". */
2933 if (GET_CODE (op0) == EQ
2934 && ! side_effects_p (op0)
2935 && ! HONOR_NANS (mode)
2936 && ! HONOR_SIGNED_ZEROS (mode)
2937 && ((rtx_equal_p (XEXP (op0, 0), op1)
2938 && rtx_equal_p (XEXP (op0, 1), op2))
2939 || (rtx_equal_p (XEXP (op0, 0), op2)
2940 && rtx_equal_p (XEXP (op0, 1), op1))))
2943 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2945 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2946 ? GET_MODE (XEXP (op0, 1))
2947 : GET_MODE (XEXP (op0, 0)));
2949 if (cmp_mode == VOIDmode)
2950 cmp_mode = op0_mode;
2951 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2952 XEXP (op0, 0), XEXP (op0, 1));
2954 /* See if any simplifications were possible. */
2955 if (temp == const0_rtx)
2957 else if (temp == const_true_rtx)
2962 /* Look for happy constants in op1 and op2. */
2963 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2965 HOST_WIDE_INT t = INTVAL (op1);
2966 HOST_WIDE_INT f = INTVAL (op2);
2968 if (t == STORE_FLAG_VALUE && f == 0)
2969 code = GET_CODE (op0);
2970 else if (t == 0 && f == STORE_FLAG_VALUE)
2973 tmp = reversed_comparison_code (op0, NULL_RTX);
2981 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2987 if (GET_MODE (op0) != mode
2988 || GET_MODE (op1) != mode
2989 || !VECTOR_MODE_P (mode))
2991 op2 = avoid_constant_pool_reference (op2);
2992 if (GET_CODE (op2) == CONST_INT)
2994 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2995 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2996 int mask = (1 << n_elts) - 1;
2998 if (!(INTVAL (op2) & mask))
3000 if ((INTVAL (op2) & mask) == mask)
3003 op0 = avoid_constant_pool_reference (op0);
3004 op1 = avoid_constant_pool_reference (op1);
3005 if (GET_CODE (op0) == CONST_VECTOR
3006 && GET_CODE (op1) == CONST_VECTOR)
3008 rtvec v = rtvec_alloc (n_elts);
3011 for (i = 0; i < n_elts; i++)
3012 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3013 ? CONST_VECTOR_ELT (op0, i)
3014 : CONST_VECTOR_ELT (op1, i));
3015 return gen_rtx_CONST_VECTOR (mode, v);
3027 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3028 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3030 Works by unpacking OP into a collection of 8-bit values
3031 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3032 and then repacking them again for OUTERMODE. */
3035 simplify_immed_subreg (enum machine_mode outermode, rtx op,
3036 enum machine_mode innermode, unsigned int byte)
3038 /* We support up to 512-bit values (for V8DFmode). */
3042 value_mask = (1 << value_bit) - 1
3044 unsigned char value[max_bitsize / value_bit];
3053 rtvec result_v = NULL;
3054 enum mode_class outer_class;
3055 enum machine_mode outer_submode;
3057 /* Some ports misuse CCmode. */
3058 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3061 /* Unpack the value. */
3063 if (GET_CODE (op) == CONST_VECTOR)
3065 num_elem = CONST_VECTOR_NUNITS (op);
3066 elems = &CONST_VECTOR_ELT (op, 0);
3067 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3073 elem_bitsize = max_bitsize;
3076 if (BITS_PER_UNIT % value_bit != 0)
3077 abort (); /* Too complicated; reducing value_bit may help. */
3078 if (elem_bitsize % BITS_PER_UNIT != 0)
3079 abort (); /* I don't know how to handle endianness of sub-units. */
3081 for (elem = 0; elem < num_elem; elem++)
3084 rtx el = elems[elem];
3086 /* Vectors are kept in target memory order. (This is probably
3089 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3090 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3092 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3093 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3094 unsigned bytele = (subword_byte % UNITS_PER_WORD
3095 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3096 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3099 switch (GET_CODE (el))
3103 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3105 *vp++ = INTVAL (el) >> i;
3106 /* CONST_INTs are always logically sign-extended. */
3107 for (; i < elem_bitsize; i += value_bit)
3108 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3112 if (GET_MODE (el) == VOIDmode)
3114 /* If this triggers, someone should have generated a
3115 CONST_INT instead. */
3116 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3119 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3120 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3121 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3124 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3127 /* It shouldn't matter what's done here, so fill it with
3129 for (; i < max_bitsize; i += value_bit)
3132 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3134 long tmp[max_bitsize / 32];
3135 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3137 if (bitsize > elem_bitsize)
3139 if (bitsize % value_bit != 0)
3142 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3145 /* real_to_target produces its result in words affected by
3146 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3147 and use WORDS_BIG_ENDIAN instead; see the documentation
3148 of SUBREG in rtl.texi. */
3149 for (i = 0; i < bitsize; i += value_bit)
3152 if (WORDS_BIG_ENDIAN)
3153 ibase = bitsize - 1 - i;
3156 *vp++ = tmp[ibase / 32] >> i % 32;
3159 /* It shouldn't matter what's done here, so fill it with
3161 for (; i < elem_bitsize; i += value_bit)
3173 /* Now, pick the right byte to start with. */
3174 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3175 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3176 will already have offset 0. */
3177 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3179 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3181 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3182 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3183 byte = (subword_byte % UNITS_PER_WORD
3184 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3187 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3188 so if it's become negative it will instead be very large.) */
3189 if (byte >= GET_MODE_SIZE (innermode))
3192 /* Convert from bytes to chunks of size value_bit. */
3193 value_start = byte * (BITS_PER_UNIT / value_bit);
3195 /* Re-pack the value. */
3197 if (VECTOR_MODE_P (outermode))
3199 num_elem = GET_MODE_NUNITS (outermode);
3200 result_v = rtvec_alloc (num_elem);
3201 elems = &RTVEC_ELT (result_v, 0);
3202 outer_submode = GET_MODE_INNER (outermode);
3208 outer_submode = outermode;
3211 outer_class = GET_MODE_CLASS (outer_submode);
3212 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3214 if (elem_bitsize % value_bit != 0)
3216 if (elem_bitsize + value_start * value_bit > max_bitsize)
3219 for (elem = 0; elem < num_elem; elem++)
3223 /* Vectors are stored in target memory order. (This is probably
3226 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3227 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3229 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3230 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3231 unsigned bytele = (subword_byte % UNITS_PER_WORD
3232 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3233 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3236 switch (outer_class)
3239 case MODE_PARTIAL_INT:
3241 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3244 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3246 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3247 for (; i < elem_bitsize; i += value_bit)
3248 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3249 << (i - HOST_BITS_PER_WIDE_INT));
3251 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3253 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3254 elems[elem] = gen_int_mode (lo, outer_submode);
3256 elems[elem] = immed_double_const (lo, hi, outer_submode);
3263 long tmp[max_bitsize / 32];
3265 /* real_from_target wants its input in words affected by
3266 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3267 and use WORDS_BIG_ENDIAN instead; see the documentation
3268 of SUBREG in rtl.texi. */
3269 for (i = 0; i < max_bitsize / 32; i++)
3271 for (i = 0; i < elem_bitsize; i += value_bit)
3274 if (WORDS_BIG_ENDIAN)
3275 ibase = elem_bitsize - 1 - i;
3278 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3281 real_from_target (&r, tmp, outer_submode);
3282 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3290 if (VECTOR_MODE_P (outermode))
3291 return gen_rtx_CONST_VECTOR (outermode, result_v);
3296 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3297 Return 0 if no simplifications are possible. */
3299 simplify_subreg (enum machine_mode outermode, rtx op,
3300 enum machine_mode innermode, unsigned int byte)
3302 /* Little bit of sanity checking. */
3303 if (innermode == VOIDmode || outermode == VOIDmode
3304 || innermode == BLKmode || outermode == BLKmode)
3307 if (GET_MODE (op) != innermode
3308 && GET_MODE (op) != VOIDmode)
3311 if (byte % GET_MODE_SIZE (outermode)
3312 || byte >= GET_MODE_SIZE (innermode))
3315 if (outermode == innermode && !byte)
3318 if (GET_CODE (op) == CONST_INT
3319 || GET_CODE (op) == CONST_DOUBLE
3320 || GET_CODE (op) == CONST_VECTOR)
3321 return simplify_immed_subreg (outermode, op, innermode, byte);
3323 /* Changing mode twice with SUBREG => just change it once,
3324 or not at all if changing back op starting mode. */
3325 if (GET_CODE (op) == SUBREG)
3327 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3328 int final_offset = byte + SUBREG_BYTE (op);
3331 if (outermode == innermostmode
3332 && byte == 0 && SUBREG_BYTE (op) == 0)
3333 return SUBREG_REG (op);
3335 /* The SUBREG_BYTE represents offset, as if the value were stored
3336 in memory. Irritating exception is paradoxical subreg, where
3337 we define SUBREG_BYTE to be 0. On big endian machines, this
3338 value should be negative. For a moment, undo this exception. */
3339 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3341 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3342 if (WORDS_BIG_ENDIAN)
3343 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3344 if (BYTES_BIG_ENDIAN)
3345 final_offset += difference % UNITS_PER_WORD;
3347 if (SUBREG_BYTE (op) == 0
3348 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3350 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3351 if (WORDS_BIG_ENDIAN)
3352 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3353 if (BYTES_BIG_ENDIAN)
3354 final_offset += difference % UNITS_PER_WORD;
3357 /* See whether resulting subreg will be paradoxical. */
3358 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3360 /* In nonparadoxical subregs we can't handle negative offsets. */
3361 if (final_offset < 0)
3363 /* Bail out in case resulting subreg would be incorrect. */
3364 if (final_offset % GET_MODE_SIZE (outermode)
3365 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3371 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3373 /* In paradoxical subreg, see if we are still looking on lower part.
3374 If so, our SUBREG_BYTE will be 0. */
3375 if (WORDS_BIG_ENDIAN)
3376 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3377 if (BYTES_BIG_ENDIAN)
3378 offset += difference % UNITS_PER_WORD;
3379 if (offset == final_offset)
3385 /* Recurse for further possible simplifications. */
3386 new = simplify_subreg (outermode, SUBREG_REG (op),
3387 GET_MODE (SUBREG_REG (op)),
3391 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3394 /* SUBREG of a hard register => just change the register number
3395 and/or mode. If the hard register is not valid in that mode,
3396 suppress this simplification. If the hard register is the stack,
3397 frame, or argument pointer, leave this as a SUBREG. */
3400 && (! REG_FUNCTION_VALUE_P (op)
3401 || ! rtx_equal_function_value_matters)
3402 && REGNO (op) < FIRST_PSEUDO_REGISTER
3403 #ifdef CANNOT_CHANGE_MODE_CLASS
3404 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3405 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3406 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3408 && ((reload_completed && !frame_pointer_needed)
3409 || (REGNO (op) != FRAME_POINTER_REGNUM
3410 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3411 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3414 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3415 && REGNO (op) != ARG_POINTER_REGNUM
3417 && REGNO (op) != STACK_POINTER_REGNUM
3418 && subreg_offset_representable_p (REGNO (op), innermode,
3421 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3422 int final_regno = subreg_hard_regno (tem, 0);
3424 /* ??? We do allow it if the current REG is not valid for
3425 its mode. This is a kludge to work around how float/complex
3426 arguments are passed on 32-bit SPARC and should be fixed. */
3427 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3428 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3430 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3432 /* Propagate original regno. We don't have any way to specify
3433 the offset inside original regno, so do so only for lowpart.
3434 The information is used only by alias analysis that can not
3435 grog partial register anyway. */
3437 if (subreg_lowpart_offset (outermode, innermode) == byte)
3438 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3443 /* If we have a SUBREG of a register that we are replacing and we are
3444 replacing it with a MEM, make a new MEM and try replacing the
3445 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3446 or if we would be widening it. */
3448 if (GET_CODE (op) == MEM
3449 && ! mode_dependent_address_p (XEXP (op, 0))
3450 /* Allow splitting of volatile memory references in case we don't
3451 have instruction to move the whole thing. */
3452 && (! MEM_VOLATILE_P (op)
3453 || ! have_insn_for (SET, innermode))
3454 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3455 return adjust_address_nv (op, outermode, byte);
3457 /* Handle complex values represented as CONCAT
3458 of real and imaginary part. */
3459 if (GET_CODE (op) == CONCAT)
3461 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3462 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3463 unsigned int final_offset;
3466 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3467 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3470 /* We can at least simplify it by referring directly to the relevant part. */
3471 return gen_rtx_SUBREG (outermode, part, final_offset);
3477 /* Make a SUBREG operation or equivalent if it folds. */
3480 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3481 enum machine_mode innermode, unsigned int byte)
3484 /* Little bit of sanity checking. */
3485 if (innermode == VOIDmode || outermode == VOIDmode
3486 || innermode == BLKmode || outermode == BLKmode)
3489 if (GET_MODE (op) != innermode
3490 && GET_MODE (op) != VOIDmode)
3493 if (byte % GET_MODE_SIZE (outermode)
3494 || byte >= GET_MODE_SIZE (innermode))
3497 if (GET_CODE (op) == QUEUED)
3500 new = simplify_subreg (outermode, op, innermode, byte);
3504 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3507 return gen_rtx_SUBREG (outermode, op, byte);
3509 /* Simplify X, an rtx expression.
3511 Return the simplified expression or NULL if no simplifications
3514 This is the preferred entry point into the simplification routines;
3515 however, we still allow passes to call the more specific routines.
3517 Right now GCC has three (yes, three) major bodies of RTL simplification
3518 code that need to be unified.
3520 1. fold_rtx in cse.c. This code uses various CSE specific
3521 information to aid in RTL simplification.
3523 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3524 it uses combine specific information to aid in RTL
3527 3. The routines in this file.
3530 Long term we want to only have one body of simplification code; to
3531 get to that state I recommend the following steps:
3533 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3534 which are not pass dependent state into these routines.
3536 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3537 use this routine whenever possible.
3539 3. Allow for pass dependent state to be provided to these
3540 routines and add simplifications based on the pass dependent
3541 state. Remove code from cse.c & combine.c that becomes
3544 It will take time, but ultimately the compiler will be easier to
3545 maintain and improve. It's totally silly that when we add a
3546 simplification that it needs to be added to 4 places (3 for RTL
3547 simplification and 1 for tree simplification. */
3550 simplify_rtx (rtx x)
3552 enum rtx_code code = GET_CODE (x);
3553 enum machine_mode mode = GET_MODE (x);
3556 switch (GET_RTX_CLASS (code))
3559 return simplify_unary_operation (code, mode,
3560 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3562 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3563 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3565 /* Fall through.... */
3568 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3572 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3573 XEXP (x, 0), XEXP (x, 1),
3577 if (VECTOR_MODE_P (mode))
3579 temp = simplify_relational_operation (code,
3580 ((GET_MODE (XEXP (x, 0))
3582 ? GET_MODE (XEXP (x, 0))
3583 : GET_MODE (XEXP (x, 1))),
3584 XEXP (x, 0), XEXP (x, 1));
3585 #ifdef FLOAT_STORE_FLAG_VALUE
3586 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3588 if (temp == const0_rtx)
3589 temp = CONST0_RTX (mode);
3591 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3599 return simplify_gen_subreg (mode, SUBREG_REG (x),
3600 GET_MODE (SUBREG_REG (x)),
3602 if (code == CONSTANT_P_RTX)
3604 if (CONSTANT_P (XEXP (x, 0)))
3612 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3613 if (GET_CODE (XEXP (x, 0)) == HIGH
3614 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))