1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx neg_const_int (enum machine_mode, rtx);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
56 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 static bool associative_constant_p (rtx);
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
62 /* Negate a CONST_INT rtx, truncating (because a conversion from a
63 maximally negative number can overflow). */
65 neg_const_int (enum machine_mode mode, rtx i)
67 return gen_int_mode (- INTVAL (i), mode);
71 /* Make a binary operation by properly ordering the operands and
72 seeing if the expression folds. */
75 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
80 /* Put complex operands first and constants second if commutative. */
81 if (GET_RTX_CLASS (code) == 'c'
82 && swap_commutative_operands_p (op0, op1))
83 tem = op0, op0 = op1, op1 = tem;
85 /* If this simplifies, do it. */
86 tem = simplify_binary_operation (code, mode, op0, op1);
90 /* Handle addition and subtraction specially. Otherwise, just form
93 if (code == PLUS || code == MINUS)
95 tem = simplify_plus_minus (code, mode, op0, op1, 1);
100 return gen_rtx_fmt_ee (code, mode, op0, op1);
103 /* If X is a MEM referencing the constant pool, return the real value.
104 Otherwise return X. */
106 avoid_constant_pool_reference (rtx x)
109 enum machine_mode cmode;
111 switch (GET_CODE (x))
117 /* Handle float extensions of constant pool references. */
119 c = avoid_constant_pool_reference (tmp);
120 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
124 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
125 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
135 /* Call target hook to avoid the effects of -fpic etc.... */
136 addr = (*targetm.delegitimize_address) (addr);
138 if (GET_CODE (addr) == LO_SUM)
139 addr = XEXP (addr, 1);
141 if (GET_CODE (addr) != SYMBOL_REF
142 || ! CONSTANT_POOL_ADDRESS_P (addr))
145 c = get_pool_constant (addr);
146 cmode = get_pool_mode (addr);
148 /* If we're accessing the constant in a different mode than it was
149 originally stored, attempt to fix that up via subreg simplifications.
150 If that fails we have no choice but to return the original memory. */
151 if (cmode != GET_MODE (x))
153 c = simplify_subreg (GET_MODE (x), c, cmode, 0);
160 /* Make a unary operation by first seeing if it folds and otherwise making
161 the specified operation. */
164 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
165 enum machine_mode op_mode)
169 /* If this simplifies, use it. */
170 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
173 return gen_rtx_fmt_e (code, mode, op);
176 /* Likewise for ternary operations. */
179 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
180 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
184 /* If this simplifies, use it. */
185 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
189 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
192 /* Likewise, for relational operations.
193 CMP_MODE specifies mode comparison is done in.
197 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
198 enum machine_mode cmp_mode, rtx op0, rtx op1)
202 if (cmp_mode == VOIDmode)
203 cmp_mode = GET_MODE (op0);
204 if (cmp_mode == VOIDmode)
205 cmp_mode = GET_MODE (op1);
207 if (cmp_mode != VOIDmode
208 && ! VECTOR_MODE_P (mode))
210 tem = simplify_relational_operation (code, cmp_mode, op0, op1);
214 #ifdef FLOAT_STORE_FLAG_VALUE
215 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
218 if (tem == const0_rtx)
219 return CONST0_RTX (mode);
220 if (tem != const_true_rtx)
222 val = FLOAT_STORE_FLAG_VALUE (mode);
223 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
230 /* For the following tests, ensure const0_rtx is op1. */
231 if (swap_commutative_operands_p (op0, op1)
232 || (op0 == const0_rtx && op1 != const0_rtx))
233 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
235 /* If op0 is a compare, extract the comparison arguments from it. */
236 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
237 return simplify_gen_relational (code, mode, VOIDmode,
238 XEXP (op0, 0), XEXP (op0, 1));
240 /* If op0 is a comparison, extract the comparison arguments form it. */
241 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
245 if (GET_MODE (op0) == mode)
247 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
248 XEXP (op0, 0), XEXP (op0, 1));
252 enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
254 return simplify_gen_relational (new, mode, VOIDmode,
255 XEXP (op0, 0), XEXP (op0, 1));
259 return gen_rtx_fmt_ee (code, mode, op0, op1);
262 /* Replace all occurrences of OLD in X with NEW and try to simplify the
263 resulting RTX. Return a new RTX which is as simplified as possible. */
266 simplify_replace_rtx (rtx x, rtx old, rtx new)
268 enum rtx_code code = GET_CODE (x);
269 enum machine_mode mode = GET_MODE (x);
270 enum machine_mode op_mode;
273 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
274 to build a new expression substituting recursively. If we can't do
275 anything, return our input. */
280 switch (GET_RTX_CLASS (code))
284 op_mode = GET_MODE (op0);
285 op0 = simplify_replace_rtx (op0, old, new);
286 if (op0 == XEXP (x, 0))
288 return simplify_gen_unary (code, mode, op0, op_mode);
292 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
293 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
294 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
296 return simplify_gen_binary (code, mode, op0, op1);
301 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
302 op0 = simplify_replace_rtx (op0, old, new);
303 op1 = simplify_replace_rtx (op1, old, new);
304 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
306 return simplify_gen_relational (code, mode, op_mode, op0, op1);
311 op_mode = GET_MODE (op0);
312 op0 = simplify_replace_rtx (op0, old, new);
313 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
314 op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
315 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
317 if (op_mode == VOIDmode)
318 op_mode = GET_MODE (op0);
319 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
322 /* The only case we try to handle is a SUBREG. */
325 op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
326 if (op0 == SUBREG_REG (x))
328 op0 = simplify_gen_subreg (GET_MODE (x), op0,
329 GET_MODE (SUBREG_REG (x)),
331 return op0 ? op0 : x;
338 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
339 if (op0 == XEXP (x, 0))
341 return replace_equiv_address_nv (x, op0);
343 else if (code == LO_SUM)
345 op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
346 op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
348 /* (lo_sum (high x) x) -> x */
349 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
352 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
354 return gen_rtx_LO_SUM (mode, op0, op1);
356 else if (code == REG)
358 if (REG_P (old) && REGNO (x) == REGNO (old))
369 /* Try to simplify a unary operation CODE whose output mode is to be
370 MODE with input operand OP whose mode was originally OP_MODE.
371 Return zero if no simplification can be made. */
373 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
374 rtx op, enum machine_mode op_mode)
376 unsigned int width = GET_MODE_BITSIZE (mode);
377 rtx trueop = avoid_constant_pool_reference (op);
379 if (code == VEC_DUPLICATE)
381 if (!VECTOR_MODE_P (mode))
383 if (GET_MODE (trueop) != VOIDmode
384 && !VECTOR_MODE_P (GET_MODE (trueop))
385 && GET_MODE_INNER (mode) != GET_MODE (trueop))
387 if (GET_MODE (trueop) != VOIDmode
388 && VECTOR_MODE_P (GET_MODE (trueop))
389 && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
391 if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
392 || GET_CODE (trueop) == CONST_VECTOR)
394 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
395 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
396 rtvec v = rtvec_alloc (n_elts);
399 if (GET_CODE (trueop) != CONST_VECTOR)
400 for (i = 0; i < n_elts; i++)
401 RTVEC_ELT (v, i) = trueop;
404 enum machine_mode inmode = GET_MODE (trueop);
405 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
406 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
408 if (in_n_elts >= n_elts || n_elts % in_n_elts)
410 for (i = 0; i < n_elts; i++)
411 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
413 return gen_rtx_CONST_VECTOR (mode, v);
416 else if (GET_CODE (op) == CONST)
417 return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
419 if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
421 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
422 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
423 enum machine_mode opmode = GET_MODE (trueop);
424 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
425 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
426 rtvec v = rtvec_alloc (n_elts);
429 if (op_n_elts != n_elts)
432 for (i = 0; i < n_elts; i++)
434 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
435 CONST_VECTOR_ELT (trueop, i),
436 GET_MODE_INNER (opmode));
439 RTVEC_ELT (v, i) = x;
441 return gen_rtx_CONST_VECTOR (mode, v);
444 /* The order of these tests is critical so that, for example, we don't
445 check the wrong mode (input vs. output) for a conversion operation,
446 such as FIX. At some point, this should be simplified. */
448 if (code == FLOAT && GET_MODE (trueop) == VOIDmode
449 && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
451 HOST_WIDE_INT hv, lv;
454 if (GET_CODE (trueop) == CONST_INT)
455 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
457 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
459 REAL_VALUE_FROM_INT (d, lv, hv, mode);
460 d = real_value_truncate (mode, d);
461 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
463 else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
464 && (GET_CODE (trueop) == CONST_DOUBLE
465 || GET_CODE (trueop) == CONST_INT))
467 HOST_WIDE_INT hv, lv;
470 if (GET_CODE (trueop) == CONST_INT)
471 lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
473 lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
475 if (op_mode == VOIDmode)
477 /* We don't know how to interpret negative-looking numbers in
478 this case, so don't try to fold those. */
482 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
485 hv = 0, lv &= GET_MODE_MASK (op_mode);
487 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
488 d = real_value_truncate (mode, d);
489 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
492 if (GET_CODE (trueop) == CONST_INT
493 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
495 HOST_WIDE_INT arg0 = INTVAL (trueop);
509 val = (arg0 >= 0 ? arg0 : - arg0);
513 /* Don't use ffs here. Instead, get low order bit and then its
514 number. If arg0 is zero, this will return 0, as desired. */
515 arg0 &= GET_MODE_MASK (mode);
516 val = exact_log2 (arg0 & (- arg0)) + 1;
520 arg0 &= GET_MODE_MASK (mode);
521 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
524 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
528 arg0 &= GET_MODE_MASK (mode);
531 /* Even if the value at zero is undefined, we have to come
532 up with some replacement. Seems good enough. */
533 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
534 val = GET_MODE_BITSIZE (mode);
537 val = exact_log2 (arg0 & -arg0);
541 arg0 &= GET_MODE_MASK (mode);
544 val++, arg0 &= arg0 - 1;
548 arg0 &= GET_MODE_MASK (mode);
551 val++, arg0 &= arg0 - 1;
560 /* When zero-extending a CONST_INT, we need to know its
562 if (op_mode == VOIDmode)
564 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
566 /* If we were really extending the mode,
567 we would have to distinguish between zero-extension
568 and sign-extension. */
569 if (width != GET_MODE_BITSIZE (op_mode))
573 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
574 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
580 if (op_mode == VOIDmode)
582 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
584 /* If we were really extending the mode,
585 we would have to distinguish between zero-extension
586 and sign-extension. */
587 if (width != GET_MODE_BITSIZE (op_mode))
591 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
594 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
596 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
597 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
614 val = trunc_int_for_mode (val, mode);
616 return GEN_INT (val);
619 /* We can do some operations on integer CONST_DOUBLEs. Also allow
620 for a DImode operation on a CONST_INT. */
621 else if (GET_MODE (trueop) == VOIDmode
622 && width <= HOST_BITS_PER_WIDE_INT * 2
623 && (GET_CODE (trueop) == CONST_DOUBLE
624 || GET_CODE (trueop) == CONST_INT))
626 unsigned HOST_WIDE_INT l1, lv;
627 HOST_WIDE_INT h1, hv;
629 if (GET_CODE (trueop) == CONST_DOUBLE)
630 l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
632 l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
642 neg_double (l1, h1, &lv, &hv);
647 neg_double (l1, h1, &lv, &hv);
659 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
662 lv = exact_log2 (l1 & -l1) + 1;
668 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
669 - HOST_BITS_PER_WIDE_INT;
671 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
672 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
673 lv = GET_MODE_BITSIZE (mode);
679 lv = exact_log2 (l1 & -l1);
681 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
682 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
683 lv = GET_MODE_BITSIZE (mode);
706 /* This is just a change-of-mode, so do nothing. */
711 if (op_mode == VOIDmode)
714 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
718 lv = l1 & GET_MODE_MASK (op_mode);
722 if (op_mode == VOIDmode
723 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
727 lv = l1 & GET_MODE_MASK (op_mode);
728 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
729 && (lv & ((HOST_WIDE_INT) 1
730 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
731 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
733 hv = HWI_SIGN_EXTEND (lv);
744 return immed_double_const (lv, hv, mode);
747 else if (GET_CODE (trueop) == CONST_DOUBLE
748 && GET_MODE_CLASS (mode) == MODE_FLOAT)
750 REAL_VALUE_TYPE d, t;
751 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
756 if (HONOR_SNANS (mode) && real_isnan (&d))
758 real_sqrt (&t, mode, &d);
762 d = REAL_VALUE_ABS (d);
765 d = REAL_VALUE_NEGATE (d);
768 d = real_value_truncate (mode, d);
771 /* All this does is change the mode. */
774 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
780 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
783 else if (GET_CODE (trueop) == CONST_DOUBLE
784 && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
785 && GET_MODE_CLASS (mode) == MODE_INT
786 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
788 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
789 operators are intentionally left unspecified (to ease implementation
790 by target backends), for consistency, this routine implements the
791 same semantics for constant folding as used by the middle-end. */
793 HOST_WIDE_INT xh, xl, th, tl;
794 REAL_VALUE_TYPE x, t;
795 REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
799 if (REAL_VALUE_ISNAN (x))
802 /* Test against the signed upper bound. */
803 if (width > HOST_BITS_PER_WIDE_INT)
805 th = ((unsigned HOST_WIDE_INT) 1
806 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
812 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
814 real_from_integer (&t, VOIDmode, tl, th, 0);
815 if (REAL_VALUES_LESS (t, x))
822 /* Test against the signed lower bound. */
823 if (width > HOST_BITS_PER_WIDE_INT)
825 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
831 tl = (HOST_WIDE_INT) -1 << (width - 1);
833 real_from_integer (&t, VOIDmode, tl, th, 0);
834 if (REAL_VALUES_LESS (x, t))
840 REAL_VALUE_TO_INT (&xl, &xh, x);
844 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
847 /* Test against the unsigned upper bound. */
848 if (width == 2*HOST_BITS_PER_WIDE_INT)
853 else if (width >= HOST_BITS_PER_WIDE_INT)
855 th = ((unsigned HOST_WIDE_INT) 1
856 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
862 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
864 real_from_integer (&t, VOIDmode, tl, th, 1);
865 if (REAL_VALUES_LESS (t, x))
872 REAL_VALUE_TO_INT (&xl, &xh, x);
878 return immed_double_const (xl, xh, mode);
881 /* This was formerly used only for non-IEEE float.
882 eggert@twinsun.com says it is safe for IEEE also. */
885 enum rtx_code reversed;
888 /* There are some simplifications we can do even if the operands
893 /* (not (not X)) == X. */
894 if (GET_CODE (op) == NOT)
897 /* (not (eq X Y)) == (ne X Y), etc. */
898 if (GET_RTX_CLASS (GET_CODE (op)) == '<'
899 && (mode == BImode || STORE_FLAG_VALUE == -1)
900 && ((reversed = reversed_comparison_code (op, NULL_RTX))
902 return simplify_gen_relational (reversed, mode, VOIDmode,
903 XEXP (op, 0), XEXP (op, 1));
905 /* (not (plus X -1)) can become (neg X). */
906 if (GET_CODE (op) == PLUS
907 && XEXP (op, 1) == constm1_rtx)
908 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
910 /* Similarly, (not (neg X)) is (plus X -1). */
911 if (GET_CODE (op) == NEG)
912 return plus_constant (XEXP (op, 0), -1);
914 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
915 if (GET_CODE (op) == XOR
916 && GET_CODE (XEXP (op, 1)) == CONST_INT
917 && (temp = simplify_unary_operation (NOT, mode,
920 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
923 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
924 operands other than 1, but that is not valid. We could do a
925 similar simplification for (not (lshiftrt C X)) where C is
926 just the sign bit, but this doesn't seem common enough to
928 if (GET_CODE (op) == ASHIFT
929 && XEXP (op, 0) == const1_rtx)
931 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
932 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
935 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
936 by reversing the comparison code if valid. */
937 if (STORE_FLAG_VALUE == -1
938 && GET_RTX_CLASS (GET_CODE (op)) == '<'
939 && (reversed = reversed_comparison_code (op, NULL_RTX))
941 return simplify_gen_relational (reversed, mode, VOIDmode,
942 XEXP (op, 0), XEXP (op, 1));
944 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
945 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
946 so we can perform the above simplification. */
948 if (STORE_FLAG_VALUE == -1
949 && GET_CODE (op) == ASHIFTRT
950 && GET_CODE (XEXP (op, 1)) == CONST_INT
951 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
952 return simplify_gen_relational (GE, mode, VOIDmode,
953 XEXP (op, 0), const0_rtx);
958 /* (neg (neg X)) == X. */
959 if (GET_CODE (op) == NEG)
962 /* (neg (plus X 1)) can become (not X). */
963 if (GET_CODE (op) == PLUS
964 && XEXP (op, 1) == const1_rtx)
965 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
967 /* Similarly, (neg (not X)) is (plus X 1). */
968 if (GET_CODE (op) == NOT)
969 return plus_constant (XEXP (op, 0), 1);
971 /* (neg (minus X Y)) can become (minus Y X). This transformation
972 isn't safe for modes with signed zeros, since if X and Y are
973 both +0, (minus Y X) is the same as (minus X Y). If the
974 rounding mode is towards +infinity (or -infinity) then the two
975 expressions will be rounded differently. */
976 if (GET_CODE (op) == MINUS
977 && !HONOR_SIGNED_ZEROS (mode)
978 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
979 return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
982 if (GET_CODE (op) == PLUS
983 && !HONOR_SIGNED_ZEROS (mode)
984 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
986 /* (neg (plus A C)) is simplified to (minus -C A). */
987 if (GET_CODE (XEXP (op, 1)) == CONST_INT
988 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
990 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
993 return simplify_gen_binary (MINUS, mode, temp,
997 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
998 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
999 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1002 /* (neg (mult A B)) becomes (mult (neg A) B).
1003 This works even for floating-point values. */
1004 if (GET_CODE (op) == MULT
1005 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1007 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1008 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1011 /* NEG commutes with ASHIFT since it is multiplication. Only do
1012 this if we can then eliminate the NEG (e.g., if the operand
1014 if (GET_CODE (op) == ASHIFT)
1016 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1019 return simplify_gen_binary (ASHIFT, mode, temp,
1026 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1027 becomes just the MINUS if its mode is MODE. This allows
1028 folding switch statements on machines using casesi (such as
1030 if (GET_CODE (op) == TRUNCATE
1031 && GET_MODE (XEXP (op, 0)) == mode
1032 && GET_CODE (XEXP (op, 0)) == MINUS
1033 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1034 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1035 return XEXP (op, 0);
1037 /* Check for a sign extension of a subreg of a promoted
1038 variable, where the promotion is sign-extended, and the
1039 target mode is the same as the variable's promotion. */
1040 if (GET_CODE (op) == SUBREG
1041 && SUBREG_PROMOTED_VAR_P (op)
1042 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1043 && GET_MODE (XEXP (op, 0)) == mode)
1044 return XEXP (op, 0);
1046 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1047 if (! POINTERS_EXTEND_UNSIGNED
1048 && mode == Pmode && GET_MODE (op) == ptr_mode
1050 || (GET_CODE (op) == SUBREG
1051 && GET_CODE (SUBREG_REG (op)) == REG
1052 && REG_POINTER (SUBREG_REG (op))
1053 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1054 return convert_memory_address (Pmode, op);
1059 /* Check for a zero extension of a subreg of a promoted
1060 variable, where the promotion is zero-extended, and the
1061 target mode is the same as the variable's promotion. */
1062 if (GET_CODE (op) == SUBREG
1063 && SUBREG_PROMOTED_VAR_P (op)
1064 && SUBREG_PROMOTED_UNSIGNED_P (op)
1065 && GET_MODE (XEXP (op, 0)) == mode)
1066 return XEXP (op, 0);
1068 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1069 if (POINTERS_EXTEND_UNSIGNED > 0
1070 && mode == Pmode && GET_MODE (op) == ptr_mode
1072 || (GET_CODE (op) == SUBREG
1073 && GET_CODE (SUBREG_REG (op)) == REG
1074 && REG_POINTER (SUBREG_REG (op))
1075 && GET_MODE (SUBREG_REG (op)) == Pmode)))
1076 return convert_memory_address (Pmode, op);
1088 /* Subroutine of simplify_associative_operation. Return true if rtx OP
1089 is a suitable integer or floating point immediate constant. */
1091 associative_constant_p (rtx op)
1093 if (GET_CODE (op) == CONST_INT
1094 || GET_CODE (op) == CONST_DOUBLE)
1096 op = avoid_constant_pool_reference (op);
1097 return GET_CODE (op) == CONST_INT
1098 || GET_CODE (op) == CONST_DOUBLE;
1101 /* Subroutine of simplify_binary_operation to simplify an associative
1102 binary operation CODE with result mode MODE, operating on OP0 and OP1.
1103 Return 0 if no simplification is possible. */
1105 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1110 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
1111 if (GET_CODE (op0) == code
1112 && associative_constant_p (op1)
1113 && associative_constant_p (XEXP (op0, 1)))
1115 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1118 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1121 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1122 if (GET_CODE (op0) == code
1123 && GET_CODE (op1) == code
1124 && associative_constant_p (XEXP (op0, 1))
1125 && associative_constant_p (XEXP (op1, 1)))
1127 rtx c = simplify_binary_operation (code, mode,
1128 XEXP (op0, 1), XEXP (op1, 1));
1131 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1132 return simplify_gen_binary (code, mode, tem, c);
1135 /* Canonicalize (x op c) op y as (x op y) op c. */
1136 if (GET_CODE (op0) == code
1137 && associative_constant_p (XEXP (op0, 1)))
1139 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1140 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1143 /* Canonicalize x op (y op c) as (x op y) op c. */
1144 if (GET_CODE (op1) == code
1145 && associative_constant_p (XEXP (op1, 1)))
1147 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1148 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1154 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1155 and OP1. Return 0 if no simplification is possible.
1157 Don't use this for relational operations such as EQ or LT.
1158 Use simplify_relational_operation instead. */
1160 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1163 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1165 unsigned int width = GET_MODE_BITSIZE (mode);
1167 rtx trueop0 = avoid_constant_pool_reference (op0);
1168 rtx trueop1 = avoid_constant_pool_reference (op1);
1170 /* Relational operations don't work here. We must know the mode
1171 of the operands in order to do the comparison correctly.
1172 Assuming a full word can give incorrect results.
1173 Consider comparing 128 with -128 in QImode. */
1175 if (GET_RTX_CLASS (code) == '<')
1178 /* Make sure the constant is second. */
1179 if (GET_RTX_CLASS (code) == 'c'
1180 && swap_commutative_operands_p (trueop0, trueop1))
1182 tem = op0, op0 = op1, op1 = tem;
1183 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1186 if (VECTOR_MODE_P (mode)
1187 && GET_CODE (trueop0) == CONST_VECTOR
1188 && GET_CODE (trueop1) == CONST_VECTOR)
1190 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1191 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1192 enum machine_mode op0mode = GET_MODE (trueop0);
1193 int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1194 unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1195 enum machine_mode op1mode = GET_MODE (trueop1);
1196 int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1197 unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1198 rtvec v = rtvec_alloc (n_elts);
1201 if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1204 for (i = 0; i < n_elts; i++)
1206 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1207 CONST_VECTOR_ELT (trueop0, i),
1208 CONST_VECTOR_ELT (trueop1, i));
1211 RTVEC_ELT (v, i) = x;
1214 return gen_rtx_CONST_VECTOR (mode, v);
1217 if (GET_MODE_CLASS (mode) == MODE_FLOAT
1218 && GET_CODE (trueop0) == CONST_DOUBLE
1219 && GET_CODE (trueop1) == CONST_DOUBLE
1220 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1222 REAL_VALUE_TYPE f0, f1, value;
1224 REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1225 REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1226 f0 = real_value_truncate (mode, f0);
1227 f1 = real_value_truncate (mode, f1);
1229 if (HONOR_SNANS (mode)
1230 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1234 && REAL_VALUES_EQUAL (f1, dconst0)
1235 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1238 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1239 && flag_trapping_math
1240 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1242 int s0 = REAL_VALUE_NEGATIVE (f0);
1243 int s1 = REAL_VALUE_NEGATIVE (f1);
1248 /* Inf + -Inf = NaN plus exception. */
1253 /* Inf - Inf = NaN plus exception. */
1258 /* Inf / Inf = NaN plus exception. */
1265 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1266 && flag_trapping_math
1267 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1268 || (REAL_VALUE_ISINF (f1) && REAL_VALUES_EQUAL (f0, dconst0))))
1269 /* Inf * 0 = NaN plus exception. */
1272 REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1274 value = real_value_truncate (mode, value);
1275 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1278 /* We can fold some multi-word operations. */
1279 if (GET_MODE_CLASS (mode) == MODE_INT
1280 && width == HOST_BITS_PER_WIDE_INT * 2
1281 && (GET_CODE (trueop0) == CONST_DOUBLE
1282 || GET_CODE (trueop0) == CONST_INT)
1283 && (GET_CODE (trueop1) == CONST_DOUBLE
1284 || GET_CODE (trueop1) == CONST_INT))
1286 unsigned HOST_WIDE_INT l1, l2, lv;
1287 HOST_WIDE_INT h1, h2, hv;
1289 if (GET_CODE (trueop0) == CONST_DOUBLE)
1290 l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1292 l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1294 if (GET_CODE (trueop1) == CONST_DOUBLE)
1295 l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1297 l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1302 /* A - B == A + (-B). */
1303 neg_double (l2, h2, &lv, &hv);
1306 /* Fall through.... */
1309 add_double (l1, h1, l2, h2, &lv, &hv);
1313 mul_double (l1, h1, l2, h2, &lv, &hv);
1316 case DIV: case MOD: case UDIV: case UMOD:
1317 /* We'd need to include tree.h to do this and it doesn't seem worth
1322 lv = l1 & l2, hv = h1 & h2;
1326 lv = l1 | l2, hv = h1 | h2;
1330 lv = l1 ^ l2, hv = h1 ^ h2;
1336 && ((unsigned HOST_WIDE_INT) l1
1337 < (unsigned HOST_WIDE_INT) l2)))
1346 && ((unsigned HOST_WIDE_INT) l1
1347 > (unsigned HOST_WIDE_INT) l2)))
1354 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1356 && ((unsigned HOST_WIDE_INT) l1
1357 < (unsigned HOST_WIDE_INT) l2)))
1364 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1366 && ((unsigned HOST_WIDE_INT) l1
1367 > (unsigned HOST_WIDE_INT) l2)))
1373 case LSHIFTRT: case ASHIFTRT:
1375 case ROTATE: case ROTATERT:
1376 #ifdef SHIFT_COUNT_TRUNCATED
1377 if (SHIFT_COUNT_TRUNCATED)
1378 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1381 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1384 if (code == LSHIFTRT || code == ASHIFTRT)
1385 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1387 else if (code == ASHIFT)
1388 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1389 else if (code == ROTATE)
1390 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1391 else /* code == ROTATERT */
1392 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1399 return immed_double_const (lv, hv, mode);
1402 if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1403 || width > HOST_BITS_PER_WIDE_INT || width == 0)
1405 /* Even if we can't compute a constant result,
1406 there are some cases worth simplifying. */
1411 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1412 when x is NaN, infinite, or finite and nonzero. They aren't
1413 when x is -0 and the rounding mode is not towards -infinity,
1414 since (-0) + 0 is then 0. */
1415 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1418 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1419 transformations are safe even for IEEE. */
1420 if (GET_CODE (op0) == NEG)
1421 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1422 else if (GET_CODE (op1) == NEG)
1423 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1425 /* (~a) + 1 -> -a */
1426 if (INTEGRAL_MODE_P (mode)
1427 && GET_CODE (op0) == NOT
1428 && trueop1 == const1_rtx)
1429 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1431 /* Handle both-operands-constant cases. We can only add
1432 CONST_INTs to constants since the sum of relocatable symbols
1433 can't be handled by most assemblers. Don't add CONST_INT
1434 to CONST_INT since overflow won't be computed properly if wider
1435 than HOST_BITS_PER_WIDE_INT. */
1437 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1438 && GET_CODE (op1) == CONST_INT)
1439 return plus_constant (op0, INTVAL (op1));
1440 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1441 && GET_CODE (op0) == CONST_INT)
1442 return plus_constant (op1, INTVAL (op0));
1444 /* See if this is something like X * C - X or vice versa or
1445 if the multiplication is written as a shift. If so, we can
1446 distribute and make a new multiply, shift, or maybe just
1447 have X (if C is 2 in the example above). But don't make
1448 real multiply if we didn't have one before. */
1450 if (! FLOAT_MODE_P (mode))
1452 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1453 rtx lhs = op0, rhs = op1;
1456 if (GET_CODE (lhs) == NEG)
1457 coeff0 = -1, lhs = XEXP (lhs, 0);
1458 else if (GET_CODE (lhs) == MULT
1459 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1461 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1464 else if (GET_CODE (lhs) == ASHIFT
1465 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1466 && INTVAL (XEXP (lhs, 1)) >= 0
1467 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1469 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1470 lhs = XEXP (lhs, 0);
1473 if (GET_CODE (rhs) == NEG)
1474 coeff1 = -1, rhs = XEXP (rhs, 0);
1475 else if (GET_CODE (rhs) == MULT
1476 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1478 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1481 else if (GET_CODE (rhs) == ASHIFT
1482 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1483 && INTVAL (XEXP (rhs, 1)) >= 0
1484 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1486 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1487 rhs = XEXP (rhs, 0);
1490 if (rtx_equal_p (lhs, rhs))
1492 tem = simplify_gen_binary (MULT, mode, lhs,
1493 GEN_INT (coeff0 + coeff1));
1494 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1498 /* If one of the operands is a PLUS or a MINUS, see if we can
1499 simplify this by the associative law.
1500 Don't use the associative law for floating point.
1501 The inaccuracy makes it nonassociative,
1502 and subtle programs can break if operations are associated. */
1504 if (INTEGRAL_MODE_P (mode)
1505 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1506 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1507 || (GET_CODE (op0) == CONST
1508 && GET_CODE (XEXP (op0, 0)) == PLUS)
1509 || (GET_CODE (op1) == CONST
1510 && GET_CODE (XEXP (op1, 0)) == PLUS))
1511 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1514 /* Reassociate floating point addition only when the user
1515 specifies unsafe math optimizations. */
1516 if (FLOAT_MODE_P (mode)
1517 && flag_unsafe_math_optimizations)
1519 tem = simplify_associative_operation (code, mode, op0, op1);
1527 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1528 using cc0, in which case we want to leave it as a COMPARE
1529 so we can distinguish it from a register-register-copy.
1531 In IEEE floating point, x-0 is not the same as x. */
1533 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1534 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1535 && trueop1 == CONST0_RTX (mode))
1539 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1540 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1541 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1542 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1544 rtx xop00 = XEXP (op0, 0);
1545 rtx xop10 = XEXP (op1, 0);
1548 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1550 if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1551 && GET_MODE (xop00) == GET_MODE (xop10)
1552 && REGNO (xop00) == REGNO (xop10)
1553 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1554 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1561 /* We can't assume x-x is 0 even with non-IEEE floating point,
1562 but since it is zero except in very strange circumstances, we
1563 will treat it as zero with -funsafe-math-optimizations. */
1564 if (rtx_equal_p (trueop0, trueop1)
1565 && ! side_effects_p (op0)
1566 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1567 return CONST0_RTX (mode);
1569 /* Change subtraction from zero into negation. (0 - x) is the
1570 same as -x when x is NaN, infinite, or finite and nonzero.
1571 But if the mode has signed zeros, and does not round towards
1572 -infinity, then 0 - 0 is 0, not -0. */
1573 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1574 return simplify_gen_unary (NEG, mode, op1, mode);
1576 /* (-1 - a) is ~a. */
1577 if (trueop0 == constm1_rtx)
1578 return simplify_gen_unary (NOT, mode, op1, mode);
1580 /* Subtracting 0 has no effect unless the mode has signed zeros
1581 and supports rounding towards -infinity. In such a case,
1583 if (!(HONOR_SIGNED_ZEROS (mode)
1584 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1585 && trueop1 == CONST0_RTX (mode))
1588 /* See if this is something like X * C - X or vice versa or
1589 if the multiplication is written as a shift. If so, we can
1590 distribute and make a new multiply, shift, or maybe just
1591 have X (if C is 2 in the example above). But don't make
1592 real multiply if we didn't have one before. */
1594 if (! FLOAT_MODE_P (mode))
1596 HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1597 rtx lhs = op0, rhs = op1;
1600 if (GET_CODE (lhs) == NEG)
1601 coeff0 = -1, lhs = XEXP (lhs, 0);
1602 else if (GET_CODE (lhs) == MULT
1603 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1605 coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1608 else if (GET_CODE (lhs) == ASHIFT
1609 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1610 && INTVAL (XEXP (lhs, 1)) >= 0
1611 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1613 coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1614 lhs = XEXP (lhs, 0);
1617 if (GET_CODE (rhs) == NEG)
1618 coeff1 = - 1, rhs = XEXP (rhs, 0);
1619 else if (GET_CODE (rhs) == MULT
1620 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1622 coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1625 else if (GET_CODE (rhs) == ASHIFT
1626 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1627 && INTVAL (XEXP (rhs, 1)) >= 0
1628 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1630 coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1631 rhs = XEXP (rhs, 0);
1634 if (rtx_equal_p (lhs, rhs))
1636 tem = simplify_gen_binary (MULT, mode, lhs,
1637 GEN_INT (coeff0 - coeff1));
1638 return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1642 /* (a - (-b)) -> (a + b). True even for IEEE. */
1643 if (GET_CODE (op1) == NEG)
1644 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1646 /* (-x - c) may be simplified as (-c - x). */
1647 if (GET_CODE (op0) == NEG
1648 && (GET_CODE (op1) == CONST_INT
1649 || GET_CODE (op1) == CONST_DOUBLE))
1651 tem = simplify_unary_operation (NEG, mode, op1, mode);
1653 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1656 /* If one of the operands is a PLUS or a MINUS, see if we can
1657 simplify this by the associative law.
1658 Don't use the associative law for floating point.
1659 The inaccuracy makes it nonassociative,
1660 and subtle programs can break if operations are associated. */
1662 if (INTEGRAL_MODE_P (mode)
1663 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1664 || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1665 || (GET_CODE (op0) == CONST
1666 && GET_CODE (XEXP (op0, 0)) == PLUS)
1667 || (GET_CODE (op1) == CONST
1668 && GET_CODE (XEXP (op1, 0)) == PLUS))
1669 && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1672 /* Don't let a relocatable value get a negative coeff. */
1673 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1674 return simplify_gen_binary (PLUS, mode,
1676 neg_const_int (mode, op1));
1678 /* (x - (x & y)) -> (x & ~y) */
1679 if (GET_CODE (op1) == AND)
1681 if (rtx_equal_p (op0, XEXP (op1, 0)))
1683 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1684 GET_MODE (XEXP (op1, 1)));
1685 return simplify_gen_binary (AND, mode, op0, tem);
1687 if (rtx_equal_p (op0, XEXP (op1, 1)))
1689 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1690 GET_MODE (XEXP (op1, 0)));
1691 return simplify_gen_binary (AND, mode, op0, tem);
1697 if (trueop1 == constm1_rtx)
1698 return simplify_gen_unary (NEG, mode, op0, mode);
1700 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1701 x is NaN, since x * 0 is then also NaN. Nor is it valid
1702 when the mode has signed zeros, since multiplying a negative
1703 number by 0 will give -0, not 0. */
1704 if (!HONOR_NANS (mode)
1705 && !HONOR_SIGNED_ZEROS (mode)
1706 && trueop1 == CONST0_RTX (mode)
1707 && ! side_effects_p (op0))
1710 /* In IEEE floating point, x*1 is not equivalent to x for
1712 if (!HONOR_SNANS (mode)
1713 && trueop1 == CONST1_RTX (mode))
1716 /* Convert multiply by constant power of two into shift unless
1717 we are still generating RTL. This test is a kludge. */
1718 if (GET_CODE (trueop1) == CONST_INT
1719 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1720 /* If the mode is larger than the host word size, and the
1721 uppermost bit is set, then this isn't a power of two due
1722 to implicit sign extension. */
1723 && (width <= HOST_BITS_PER_WIDE_INT
1724 || val != HOST_BITS_PER_WIDE_INT - 1)
1725 && ! rtx_equal_function_value_matters)
1726 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1728 /* x*2 is x+x and x*(-1) is -x */
1729 if (GET_CODE (trueop1) == CONST_DOUBLE
1730 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1731 && GET_MODE (op0) == mode)
1734 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1736 if (REAL_VALUES_EQUAL (d, dconst2))
1737 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1739 if (REAL_VALUES_EQUAL (d, dconstm1))
1740 return simplify_gen_unary (NEG, mode, op0, mode);
1743 /* Reassociate multiplication, but for floating point MULTs
1744 only when the user specifies unsafe math optimizations. */
1745 if (! FLOAT_MODE_P (mode)
1746 || flag_unsafe_math_optimizations)
1748 tem = simplify_associative_operation (code, mode, op0, op1);
1755 if (trueop1 == const0_rtx)
1757 if (GET_CODE (trueop1) == CONST_INT
1758 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1759 == GET_MODE_MASK (mode)))
1761 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1763 /* A | (~A) -> -1 */
1764 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1765 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1766 && ! side_effects_p (op0)
1767 && GET_MODE_CLASS (mode) != MODE_CC)
1769 tem = simplify_associative_operation (code, mode, op0, op1);
1775 if (trueop1 == const0_rtx)
1777 if (GET_CODE (trueop1) == CONST_INT
1778 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1779 == GET_MODE_MASK (mode)))
1780 return simplify_gen_unary (NOT, mode, op0, mode);
1781 if (trueop0 == trueop1 && ! side_effects_p (op0)
1782 && GET_MODE_CLASS (mode) != MODE_CC)
1784 tem = simplify_associative_operation (code, mode, op0, op1);
1790 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1792 if (GET_CODE (trueop1) == CONST_INT
1793 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1794 == GET_MODE_MASK (mode)))
1796 if (trueop0 == trueop1 && ! side_effects_p (op0)
1797 && GET_MODE_CLASS (mode) != MODE_CC)
1800 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1801 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1802 && ! side_effects_p (op0)
1803 && GET_MODE_CLASS (mode) != MODE_CC)
1805 tem = simplify_associative_operation (code, mode, op0, op1);
1811 /* Convert divide by power of two into shift (divide by 1 handled
1813 if (GET_CODE (trueop1) == CONST_INT
1814 && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1815 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1817 /* Fall through.... */
1820 if (trueop1 == CONST1_RTX (mode))
1822 /* On some platforms DIV uses narrower mode than its
1824 rtx x = gen_lowpart_common (mode, op0);
1827 else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1828 return gen_lowpart_SUBREG (mode, op0);
1833 /* Maybe change 0 / x to 0. This transformation isn't safe for
1834 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1835 Nor is it safe for modes with signed zeros, since dividing
1836 0 by a negative number gives -0, not 0. */
1837 if (!HONOR_NANS (mode)
1838 && !HONOR_SIGNED_ZEROS (mode)
1839 && trueop0 == CONST0_RTX (mode)
1840 && ! side_effects_p (op1))
1843 /* Change division by a constant into multiplication. Only do
1844 this with -funsafe-math-optimizations. */
1845 else if (GET_CODE (trueop1) == CONST_DOUBLE
1846 && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1847 && trueop1 != CONST0_RTX (mode)
1848 && flag_unsafe_math_optimizations)
1851 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1853 if (! REAL_VALUES_EQUAL (d, dconst0))
1855 REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1856 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1857 return simplify_gen_binary (MULT, mode, op0, tem);
1863 /* Handle modulus by power of two (mod with 1 handled below). */
1864 if (GET_CODE (trueop1) == CONST_INT
1865 && exact_log2 (INTVAL (trueop1)) > 0)
1866 return simplify_gen_binary (AND, mode, op0,
1867 GEN_INT (INTVAL (op1) - 1));
1869 /* Fall through.... */
1872 if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1873 && ! side_effects_p (op0) && ! side_effects_p (op1))
1880 /* Rotating ~0 always results in ~0. */
1881 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1882 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1883 && ! side_effects_p (op1))
1886 /* Fall through.... */
1890 if (trueop1 == const0_rtx)
1892 if (trueop0 == const0_rtx && ! side_effects_p (op1))
1897 if (width <= HOST_BITS_PER_WIDE_INT
1898 && GET_CODE (trueop1) == CONST_INT
1899 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1900 && ! side_effects_p (op0))
1902 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1904 tem = simplify_associative_operation (code, mode, op0, op1);
1910 if (width <= HOST_BITS_PER_WIDE_INT
1911 && GET_CODE (trueop1) == CONST_INT
1912 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1913 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1914 && ! side_effects_p (op0))
1916 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1918 tem = simplify_associative_operation (code, mode, op0, op1);
1924 if (trueop1 == const0_rtx && ! side_effects_p (op0))
1926 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1928 tem = simplify_associative_operation (code, mode, op0, op1);
1934 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1936 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1938 tem = simplify_associative_operation (code, mode, op0, op1);
1947 /* ??? There are simplifications that can be done. */
1951 if (!VECTOR_MODE_P (mode))
1953 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1955 != GET_MODE_INNER (GET_MODE (trueop0)))
1956 || GET_CODE (trueop1) != PARALLEL
1957 || XVECLEN (trueop1, 0) != 1
1958 || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1961 if (GET_CODE (trueop0) == CONST_VECTOR)
1962 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1966 if (!VECTOR_MODE_P (GET_MODE (trueop0))
1967 || (GET_MODE_INNER (mode)
1968 != GET_MODE_INNER (GET_MODE (trueop0)))
1969 || GET_CODE (trueop1) != PARALLEL)
1972 if (GET_CODE (trueop0) == CONST_VECTOR)
1974 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1975 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1976 rtvec v = rtvec_alloc (n_elts);
1979 if (XVECLEN (trueop1, 0) != (int) n_elts)
1981 for (i = 0; i < n_elts; i++)
1983 rtx x = XVECEXP (trueop1, 0, i);
1985 if (GET_CODE (x) != CONST_INT)
1987 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1990 return gen_rtx_CONST_VECTOR (mode, v);
1996 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1997 ? GET_MODE (trueop0)
1998 : GET_MODE_INNER (mode));
1999 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2000 ? GET_MODE (trueop1)
2001 : GET_MODE_INNER (mode));
2003 if (!VECTOR_MODE_P (mode)
2004 || (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2005 != GET_MODE_SIZE (mode)))
2008 if ((VECTOR_MODE_P (op0_mode)
2009 && (GET_MODE_INNER (mode)
2010 != GET_MODE_INNER (op0_mode)))
2011 || (!VECTOR_MODE_P (op0_mode)
2012 && GET_MODE_INNER (mode) != op0_mode))
2015 if ((VECTOR_MODE_P (op1_mode)
2016 && (GET_MODE_INNER (mode)
2017 != GET_MODE_INNER (op1_mode)))
2018 || (!VECTOR_MODE_P (op1_mode)
2019 && GET_MODE_INNER (mode) != op1_mode))
2022 if ((GET_CODE (trueop0) == CONST_VECTOR
2023 || GET_CODE (trueop0) == CONST_INT
2024 || GET_CODE (trueop0) == CONST_DOUBLE)
2025 && (GET_CODE (trueop1) == CONST_VECTOR
2026 || GET_CODE (trueop1) == CONST_INT
2027 || GET_CODE (trueop1) == CONST_DOUBLE))
2029 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2030 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2031 rtvec v = rtvec_alloc (n_elts);
2033 unsigned in_n_elts = 1;
2035 if (VECTOR_MODE_P (op0_mode))
2036 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2037 for (i = 0; i < n_elts; i++)
2041 if (!VECTOR_MODE_P (op0_mode))
2042 RTVEC_ELT (v, i) = trueop0;
2044 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2048 if (!VECTOR_MODE_P (op1_mode))
2049 RTVEC_ELT (v, i) = trueop1;
2051 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2056 return gen_rtx_CONST_VECTOR (mode, v);
2068 /* Get the integer argument values in two forms:
2069 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2071 arg0 = INTVAL (trueop0);
2072 arg1 = INTVAL (trueop1);
2074 if (width < HOST_BITS_PER_WIDE_INT)
2076 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2077 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2080 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2081 arg0s |= ((HOST_WIDE_INT) (-1) << width);
2084 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2085 arg1s |= ((HOST_WIDE_INT) (-1) << width);
2093 /* Compute the value of the arithmetic. */
2098 val = arg0s + arg1s;
2102 val = arg0s - arg1s;
2106 val = arg0s * arg1s;
2111 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2114 val = arg0s / arg1s;
2119 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2122 val = arg0s % arg1s;
2127 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2130 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2135 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2138 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2154 /* If shift count is undefined, don't fold it; let the machine do
2155 what it wants. But truncate it if the machine will do that. */
2159 #ifdef SHIFT_COUNT_TRUNCATED
2160 if (SHIFT_COUNT_TRUNCATED)
2164 val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2171 #ifdef SHIFT_COUNT_TRUNCATED
2172 if (SHIFT_COUNT_TRUNCATED)
2176 val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2183 #ifdef SHIFT_COUNT_TRUNCATED
2184 if (SHIFT_COUNT_TRUNCATED)
2188 val = arg0s >> arg1;
2190 /* Bootstrap compiler may not have sign extended the right shift.
2191 Manually extend the sign to insure bootstrap cc matches gcc. */
2192 if (arg0s < 0 && arg1 > 0)
2193 val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2202 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2203 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2211 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2212 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2216 /* Do nothing here. */
2220 val = arg0s <= arg1s ? arg0s : arg1s;
2224 val = ((unsigned HOST_WIDE_INT) arg0
2225 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2229 val = arg0s > arg1s ? arg0s : arg1s;
2233 val = ((unsigned HOST_WIDE_INT) arg0
2234 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2241 /* ??? There are simplifications that can be done. */
2248 val = trunc_int_for_mode (val, mode);
2250 return GEN_INT (val);
2253 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2256 Rather than test for specific case, we do this by a brute-force method
2257 and do all possible simplifications until no more changes occur. Then
2258 we rebuild the operation.
2260 If FORCE is true, then always generate the rtx. This is used to
2261 canonicalize stuff emitted from simplify_gen_binary. Note that this
2262 can still fail if the rtx is too complex. It won't fail just because
2263 the result is not 'simpler' than the input, however. */
2265 struct simplify_plus_minus_op_data
2272 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2274 const struct simplify_plus_minus_op_data *d1 = p1;
2275 const struct simplify_plus_minus_op_data *d2 = p2;
2277 return (commutative_operand_precedence (d2->op)
2278 - commutative_operand_precedence (d1->op));
2282 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2285 struct simplify_plus_minus_op_data ops[8];
2287 int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2291 memset (ops, 0, sizeof ops);
2293 /* Set up the two operands and then expand them until nothing has been
2294 changed. If we run out of room in our array, give up; this should
2295 almost never happen. */
2300 ops[1].neg = (code == MINUS);
2306 for (i = 0; i < n_ops; i++)
2308 rtx this_op = ops[i].op;
2309 int this_neg = ops[i].neg;
2310 enum rtx_code this_code = GET_CODE (this_op);
2319 ops[n_ops].op = XEXP (this_op, 1);
2320 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2323 ops[i].op = XEXP (this_op, 0);
2329 ops[i].op = XEXP (this_op, 0);
2330 ops[i].neg = ! this_neg;
2336 && GET_CODE (XEXP (this_op, 0)) == PLUS
2337 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2338 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2340 ops[i].op = XEXP (XEXP (this_op, 0), 0);
2341 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2342 ops[n_ops].neg = this_neg;
2350 /* ~a -> (-a - 1) */
2353 ops[n_ops].op = constm1_rtx;
2354 ops[n_ops++].neg = this_neg;
2355 ops[i].op = XEXP (this_op, 0);
2356 ops[i].neg = !this_neg;
2364 ops[i].op = neg_const_int (mode, this_op);
2377 /* If we only have two operands, we can't do anything. */
2378 if (n_ops <= 2 && !force)
2381 /* Count the number of CONSTs we didn't split above. */
2382 for (i = 0; i < n_ops; i++)
2383 if (GET_CODE (ops[i].op) == CONST)
2386 /* Now simplify each pair of operands until nothing changes. The first
2387 time through just simplify constants against each other. */
2394 for (i = 0; i < n_ops - 1; i++)
2395 for (j = i + 1; j < n_ops; j++)
2397 rtx lhs = ops[i].op, rhs = ops[j].op;
2398 int lneg = ops[i].neg, rneg = ops[j].neg;
2400 if (lhs != 0 && rhs != 0
2401 && (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2403 enum rtx_code ncode = PLUS;
2409 tem = lhs, lhs = rhs, rhs = tem;
2411 else if (swap_commutative_operands_p (lhs, rhs))
2412 tem = lhs, lhs = rhs, rhs = tem;
2414 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2416 /* Reject "simplifications" that just wrap the two
2417 arguments in a CONST. Failure to do so can result
2418 in infinite recursion with simplify_binary_operation
2419 when it calls us to simplify CONST operations. */
2421 && ! (GET_CODE (tem) == CONST
2422 && GET_CODE (XEXP (tem, 0)) == ncode
2423 && XEXP (XEXP (tem, 0), 0) == lhs
2424 && XEXP (XEXP (tem, 0), 1) == rhs)
2425 /* Don't allow -x + -1 -> ~x simplifications in the
2426 first pass. This allows us the chance to combine
2427 the -1 with other constants. */
2429 && GET_CODE (tem) == NOT
2430 && XEXP (tem, 0) == rhs))
2433 if (GET_CODE (tem) == NEG)
2434 tem = XEXP (tem, 0), lneg = !lneg;
2435 if (GET_CODE (tem) == CONST_INT && lneg)
2436 tem = neg_const_int (mode, tem), lneg = 0;
2440 ops[j].op = NULL_RTX;
2450 /* Pack all the operands to the lower-numbered entries. */
2451 for (i = 0, j = 0; j < n_ops; j++)
2456 /* Sort the operations based on swap_commutative_operands_p. */
2457 qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2459 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2461 && GET_CODE (ops[1].op) == CONST_INT
2462 && CONSTANT_P (ops[0].op)
2464 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2466 /* We suppressed creation of trivial CONST expressions in the
2467 combination loop to avoid recursion. Create one manually now.
2468 The combination loop should have ensured that there is exactly
2469 one CONST_INT, and the sort will have ensured that it is last
2470 in the array and that any other constant will be next-to-last. */
2473 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2474 && CONSTANT_P (ops[n_ops - 2].op))
2476 rtx value = ops[n_ops - 1].op;
2477 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2478 value = neg_const_int (mode, value);
2479 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2483 /* Count the number of CONSTs that we generated. */
2485 for (i = 0; i < n_ops; i++)
2486 if (GET_CODE (ops[i].op) == CONST)
2489 /* Give up if we didn't reduce the number of operands we had. Make
2490 sure we count a CONST as two operands. If we have the same
2491 number of operands, but have made more CONSTs than before, this
2492 is also an improvement, so accept it. */
2494 && (n_ops + n_consts > input_ops
2495 || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2498 /* Put a non-negated operand first, if possible. */
2500 for (i = 0; i < n_ops && ops[i].neg; i++)
2503 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2512 /* Now make the result by performing the requested operations. */
2514 for (i = 1; i < n_ops; i++)
2515 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2516 mode, result, ops[i].op);
2521 /* Like simplify_binary_operation except used for relational operators.
2522 MODE is the mode of the operands, not that of the result. If MODE
2523 is VOIDmode, both operands must also be VOIDmode and we compare the
2524 operands in "infinite precision".
2526 If no simplification is possible, this function returns zero. Otherwise,
2527 it returns either const_true_rtx or const0_rtx. */
2530 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2533 int equal, op0lt, op0ltu, op1lt, op1ltu;
2538 if (mode == VOIDmode
2539 && (GET_MODE (op0) != VOIDmode
2540 || GET_MODE (op1) != VOIDmode))
2543 /* If op0 is a compare, extract the comparison arguments from it. */
2544 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2545 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2547 trueop0 = avoid_constant_pool_reference (op0);
2548 trueop1 = avoid_constant_pool_reference (op1);
2550 /* We can't simplify MODE_CC values since we don't know what the
2551 actual comparison is. */
2552 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2555 /* Make sure the constant is second. */
2556 if (swap_commutative_operands_p (trueop0, trueop1))
2558 tem = op0, op0 = op1, op1 = tem;
2559 tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2560 code = swap_condition (code);
2563 /* For integer comparisons of A and B maybe we can simplify A - B and can
2564 then simplify a comparison of that with zero. If A and B are both either
2565 a register or a CONST_INT, this can't help; testing for these cases will
2566 prevent infinite recursion here and speed things up.
2568 If CODE is an unsigned comparison, then we can never do this optimization,
2569 because it gives an incorrect result if the subtraction wraps around zero.
2570 ANSI C defines unsigned operations such that they never overflow, and
2571 thus such cases can not be ignored. */
2573 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2574 && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2575 && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2576 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2577 /* We cannot do this for == or != if tem is a nonzero address. */
2578 && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2579 && code != GTU && code != GEU && code != LTU && code != LEU)
2580 return simplify_relational_operation (signed_condition (code),
2581 mode, tem, const0_rtx);
2583 if (flag_unsafe_math_optimizations && code == ORDERED)
2584 return const_true_rtx;
2586 if (flag_unsafe_math_optimizations && code == UNORDERED)
2589 /* For modes without NaNs, if the two operands are equal, we know the
2590 result except if they have side-effects. */
2591 if (! HONOR_NANS (GET_MODE (trueop0))
2592 && rtx_equal_p (trueop0, trueop1)
2593 && ! side_effects_p (trueop0))
2594 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2596 /* If the operands are floating-point constants, see if we can fold
2598 else if (GET_CODE (trueop0) == CONST_DOUBLE
2599 && GET_CODE (trueop1) == CONST_DOUBLE
2600 && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2602 REAL_VALUE_TYPE d0, d1;
2604 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2605 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2607 /* Comparisons are unordered iff at least one of the values is NaN. */
2608 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2618 return const_true_rtx;
2631 equal = REAL_VALUES_EQUAL (d0, d1);
2632 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2633 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2636 /* Otherwise, see if the operands are both integers. */
2637 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2638 && (GET_CODE (trueop0) == CONST_DOUBLE
2639 || GET_CODE (trueop0) == CONST_INT)
2640 && (GET_CODE (trueop1) == CONST_DOUBLE
2641 || GET_CODE (trueop1) == CONST_INT))
2643 int width = GET_MODE_BITSIZE (mode);
2644 HOST_WIDE_INT l0s, h0s, l1s, h1s;
2645 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2647 /* Get the two words comprising each integer constant. */
2648 if (GET_CODE (trueop0) == CONST_DOUBLE)
2650 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2651 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2655 l0u = l0s = INTVAL (trueop0);
2656 h0u = h0s = HWI_SIGN_EXTEND (l0s);
2659 if (GET_CODE (trueop1) == CONST_DOUBLE)
2661 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2662 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2666 l1u = l1s = INTVAL (trueop1);
2667 h1u = h1s = HWI_SIGN_EXTEND (l1s);
2670 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2671 we have to sign or zero-extend the values. */
2672 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2674 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2675 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2677 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2678 l0s |= ((HOST_WIDE_INT) (-1) << width);
2680 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2681 l1s |= ((HOST_WIDE_INT) (-1) << width);
2683 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2684 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2686 equal = (h0u == h1u && l0u == l1u);
2687 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2688 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2689 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2690 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2693 /* Otherwise, there are some code-specific tests we can make. */
2699 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2704 if (trueop1 == const0_rtx && nonzero_address_p (op0))
2705 return const_true_rtx;
2709 /* Unsigned values are never negative. */
2710 if (trueop1 == const0_rtx)
2711 return const_true_rtx;
2715 if (trueop1 == const0_rtx)
2720 /* Unsigned values are never greater than the largest
2722 if (GET_CODE (trueop1) == CONST_INT
2723 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2724 && INTEGRAL_MODE_P (mode))
2725 return const_true_rtx;
2729 if (GET_CODE (trueop1) == CONST_INT
2730 && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2731 && INTEGRAL_MODE_P (mode))
2736 /* Optimize abs(x) < 0.0. */
2737 if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2739 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2741 if (GET_CODE (tem) == ABS)
2747 /* Optimize abs(x) >= 0.0. */
2748 if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2750 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2752 if (GET_CODE (tem) == ABS)
2753 return const_true_rtx;
2758 /* Optimize ! (abs(x) < 0.0). */
2759 if (trueop1 == CONST0_RTX (mode))
2761 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2763 if (GET_CODE (tem) == ABS)
2764 return const_true_rtx;
2775 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2781 return equal ? const_true_rtx : const0_rtx;
2784 return ! equal ? const_true_rtx : const0_rtx;
2787 return op0lt ? const_true_rtx : const0_rtx;
2790 return op1lt ? const_true_rtx : const0_rtx;
2792 return op0ltu ? const_true_rtx : const0_rtx;
2794 return op1ltu ? const_true_rtx : const0_rtx;
2797 return equal || op0lt ? const_true_rtx : const0_rtx;
2800 return equal || op1lt ? const_true_rtx : const0_rtx;
2802 return equal || op0ltu ? const_true_rtx : const0_rtx;
2804 return equal || op1ltu ? const_true_rtx : const0_rtx;
2806 return const_true_rtx;
2814 /* Simplify CODE, an operation with result mode MODE and three operands,
2815 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2816 a constant. Return 0 if no simplifications is possible. */
2819 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2820 enum machine_mode op0_mode, rtx op0, rtx op1,
2823 unsigned int width = GET_MODE_BITSIZE (mode);
2825 /* VOIDmode means "infinite" precision. */
2827 width = HOST_BITS_PER_WIDE_INT;
2833 if (GET_CODE (op0) == CONST_INT
2834 && GET_CODE (op1) == CONST_INT
2835 && GET_CODE (op2) == CONST_INT
2836 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2837 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2839 /* Extracting a bit-field from a constant */
2840 HOST_WIDE_INT val = INTVAL (op0);
2842 if (BITS_BIG_ENDIAN)
2843 val >>= (GET_MODE_BITSIZE (op0_mode)
2844 - INTVAL (op2) - INTVAL (op1));
2846 val >>= INTVAL (op2);
2848 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2850 /* First zero-extend. */
2851 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2852 /* If desired, propagate sign bit. */
2853 if (code == SIGN_EXTRACT
2854 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2855 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2858 /* Clear the bits that don't belong in our mode,
2859 unless they and our sign bit are all one.
2860 So we get either a reasonable negative value or a reasonable
2861 unsigned value for this mode. */
2862 if (width < HOST_BITS_PER_WIDE_INT
2863 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2864 != ((HOST_WIDE_INT) (-1) << (width - 1))))
2865 val &= ((HOST_WIDE_INT) 1 << width) - 1;
2867 return GEN_INT (val);
2872 if (GET_CODE (op0) == CONST_INT)
2873 return op0 != const0_rtx ? op1 : op2;
2875 /* Convert c ? a : a into "a". */
2876 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
2879 /* Convert a != b ? a : b into "a". */
2880 if (GET_CODE (op0) == NE
2881 && ! side_effects_p (op0)
2882 && ! HONOR_NANS (mode)
2883 && ! HONOR_SIGNED_ZEROS (mode)
2884 && ((rtx_equal_p (XEXP (op0, 0), op1)
2885 && rtx_equal_p (XEXP (op0, 1), op2))
2886 || (rtx_equal_p (XEXP (op0, 0), op2)
2887 && rtx_equal_p (XEXP (op0, 1), op1))))
2890 /* Convert a == b ? a : b into "b". */
2891 if (GET_CODE (op0) == EQ
2892 && ! side_effects_p (op0)
2893 && ! HONOR_NANS (mode)
2894 && ! HONOR_SIGNED_ZEROS (mode)
2895 && ((rtx_equal_p (XEXP (op0, 0), op1)
2896 && rtx_equal_p (XEXP (op0, 1), op2))
2897 || (rtx_equal_p (XEXP (op0, 0), op2)
2898 && rtx_equal_p (XEXP (op0, 1), op1))))
2901 if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2903 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2904 ? GET_MODE (XEXP (op0, 1))
2905 : GET_MODE (XEXP (op0, 0)));
2907 if (cmp_mode == VOIDmode)
2908 cmp_mode = op0_mode;
2909 temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2910 XEXP (op0, 0), XEXP (op0, 1));
2912 /* See if any simplifications were possible. */
2913 if (temp == const0_rtx)
2915 else if (temp == const_true_rtx)
2920 /* Look for happy constants in op1 and op2. */
2921 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2923 HOST_WIDE_INT t = INTVAL (op1);
2924 HOST_WIDE_INT f = INTVAL (op2);
2926 if (t == STORE_FLAG_VALUE && f == 0)
2927 code = GET_CODE (op0);
2928 else if (t == 0 && f == STORE_FLAG_VALUE)
2931 tmp = reversed_comparison_code (op0, NULL_RTX);
2939 return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2945 if (GET_MODE (op0) != mode
2946 || GET_MODE (op1) != mode
2947 || !VECTOR_MODE_P (mode))
2949 op2 = avoid_constant_pool_reference (op2);
2950 if (GET_CODE (op2) == CONST_INT)
2952 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2953 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2954 int mask = (1 << n_elts) - 1;
2956 if (!(INTVAL (op2) & mask))
2958 if ((INTVAL (op2) & mask) == mask)
2961 op0 = avoid_constant_pool_reference (op0);
2962 op1 = avoid_constant_pool_reference (op1);
2963 if (GET_CODE (op0) == CONST_VECTOR
2964 && GET_CODE (op1) == CONST_VECTOR)
2966 rtvec v = rtvec_alloc (n_elts);
2969 for (i = 0; i < n_elts; i++)
2970 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2971 ? CONST_VECTOR_ELT (op0, i)
2972 : CONST_VECTOR_ELT (op1, i));
2973 return gen_rtx_CONST_VECTOR (mode, v);
2985 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
2986 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
2988 Works by unpacking OP into a collection of 8-bit values
2989 represented as a little-endian array of 'unsigned char', selecting by BYTE,
2990 and then repacking them again for OUTERMODE. */
2993 simplify_immed_subreg (enum machine_mode outermode, rtx op,
2994 enum machine_mode innermode, unsigned int byte)
2996 /* We support up to 512-bit values (for V8DFmode). */
3000 value_mask = (1 << value_bit) - 1
3002 unsigned char value[max_bitsize / value_bit];
3011 rtvec result_v = NULL;
3012 enum mode_class outer_class;
3013 enum machine_mode outer_submode;
3015 /* Some ports misuse CCmode. */
3016 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3019 /* Unpack the value. */
3021 if (GET_CODE (op) == CONST_VECTOR)
3023 num_elem = CONST_VECTOR_NUNITS (op);
3024 elems = &CONST_VECTOR_ELT (op, 0);
3025 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3031 elem_bitsize = max_bitsize;
3034 if (BITS_PER_UNIT % value_bit != 0)
3035 abort (); /* Too complicated; reducing value_bit may help. */
3036 if (elem_bitsize % BITS_PER_UNIT != 0)
3037 abort (); /* I don't know how to handle endianness of sub-units. */
3039 for (elem = 0; elem < num_elem; elem++)
3042 rtx el = elems[elem];
3044 /* Vectors are kept in target memory order. (This is probably
3047 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3048 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3050 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3051 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3052 unsigned bytele = (subword_byte % UNITS_PER_WORD
3053 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3054 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3057 switch (GET_CODE (el))
3061 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3063 *vp++ = INTVAL (el) >> i;
3064 /* CONST_INTs are always logically sign-extended. */
3065 for (; i < elem_bitsize; i += value_bit)
3066 *vp++ = INTVAL (el) < 0 ? -1 : 0;
3070 if (GET_MODE (el) == VOIDmode)
3072 /* If this triggers, someone should have generated a
3073 CONST_INT instead. */
3074 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3077 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3078 *vp++ = CONST_DOUBLE_LOW (el) >> i;
3079 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3082 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3085 /* It shouldn't matter what's done here, so fill it with
3087 for (; i < max_bitsize; i += value_bit)
3090 else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3092 long tmp[max_bitsize / 32];
3093 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3095 if (bitsize > elem_bitsize)
3097 if (bitsize % value_bit != 0)
3100 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3103 /* real_to_target produces its result in words affected by
3104 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3105 and use WORDS_BIG_ENDIAN instead; see the documentation
3106 of SUBREG in rtl.texi. */
3107 for (i = 0; i < bitsize; i += value_bit)
3110 if (WORDS_BIG_ENDIAN)
3111 ibase = bitsize - 1 - i;
3114 *vp++ = tmp[ibase / 32] >> i % 32;
3117 /* It shouldn't matter what's done here, so fill it with
3119 for (; i < elem_bitsize; i += value_bit)
3131 /* Now, pick the right byte to start with. */
3132 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3133 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3134 will already have offset 0. */
3135 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3137 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3139 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3140 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3141 byte = (subword_byte % UNITS_PER_WORD
3142 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3145 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3146 so if it's become negative it will instead be very large.) */
3147 if (byte >= GET_MODE_SIZE (innermode))
3150 /* Convert from bytes to chunks of size value_bit. */
3151 value_start = byte * (BITS_PER_UNIT / value_bit);
3153 /* Re-pack the value. */
3155 if (VECTOR_MODE_P (outermode))
3157 num_elem = GET_MODE_NUNITS (outermode);
3158 result_v = rtvec_alloc (num_elem);
3159 elems = &RTVEC_ELT (result_v, 0);
3160 outer_submode = GET_MODE_INNER (outermode);
3166 outer_submode = outermode;
3169 outer_class = GET_MODE_CLASS (outer_submode);
3170 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3172 if (elem_bitsize % value_bit != 0)
3174 if (elem_bitsize + value_start * value_bit > max_bitsize)
3177 for (elem = 0; elem < num_elem; elem++)
3181 /* Vectors are stored in target memory order. (This is probably
3184 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3185 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3187 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3188 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3189 unsigned bytele = (subword_byte % UNITS_PER_WORD
3190 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3191 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3194 switch (outer_class)
3197 case MODE_PARTIAL_INT:
3199 unsigned HOST_WIDE_INT hi = 0, lo = 0;
3202 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3204 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3205 for (; i < elem_bitsize; i += value_bit)
3206 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3207 << (i - HOST_BITS_PER_WIDE_INT));
3209 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3211 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3212 elems[elem] = gen_int_mode (lo, outer_submode);
3214 elems[elem] = immed_double_const (lo, hi, outer_submode);
3221 long tmp[max_bitsize / 32];
3223 /* real_from_target wants its input in words affected by
3224 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3225 and use WORDS_BIG_ENDIAN instead; see the documentation
3226 of SUBREG in rtl.texi. */
3227 for (i = 0; i < max_bitsize / 32; i++)
3229 for (i = 0; i < elem_bitsize; i += value_bit)
3232 if (WORDS_BIG_ENDIAN)
3233 ibase = elem_bitsize - 1 - i;
3236 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3239 real_from_target (&r, tmp, outer_submode);
3240 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3248 if (VECTOR_MODE_P (outermode))
3249 return gen_rtx_CONST_VECTOR (outermode, result_v);
3254 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3255 Return 0 if no simplifications are possible. */
3257 simplify_subreg (enum machine_mode outermode, rtx op,
3258 enum machine_mode innermode, unsigned int byte)
3260 /* Little bit of sanity checking. */
3261 if (innermode == VOIDmode || outermode == VOIDmode
3262 || innermode == BLKmode || outermode == BLKmode)
3265 if (GET_MODE (op) != innermode
3266 && GET_MODE (op) != VOIDmode)
3269 if (byte % GET_MODE_SIZE (outermode)
3270 || byte >= GET_MODE_SIZE (innermode))
3273 if (outermode == innermode && !byte)
3276 if (GET_CODE (op) == CONST_INT
3277 || GET_CODE (op) == CONST_DOUBLE
3278 || GET_CODE (op) == CONST_VECTOR)
3279 return simplify_immed_subreg (outermode, op, innermode, byte);
3281 /* Changing mode twice with SUBREG => just change it once,
3282 or not at all if changing back op starting mode. */
3283 if (GET_CODE (op) == SUBREG)
3285 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3286 int final_offset = byte + SUBREG_BYTE (op);
3289 if (outermode == innermostmode
3290 && byte == 0 && SUBREG_BYTE (op) == 0)
3291 return SUBREG_REG (op);
3293 /* The SUBREG_BYTE represents offset, as if the value were stored
3294 in memory. Irritating exception is paradoxical subreg, where
3295 we define SUBREG_BYTE to be 0. On big endian machines, this
3296 value should be negative. For a moment, undo this exception. */
3297 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3299 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3300 if (WORDS_BIG_ENDIAN)
3301 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3302 if (BYTES_BIG_ENDIAN)
3303 final_offset += difference % UNITS_PER_WORD;
3305 if (SUBREG_BYTE (op) == 0
3306 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3308 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3309 if (WORDS_BIG_ENDIAN)
3310 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3311 if (BYTES_BIG_ENDIAN)
3312 final_offset += difference % UNITS_PER_WORD;
3315 /* See whether resulting subreg will be paradoxical. */
3316 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3318 /* In nonparadoxical subregs we can't handle negative offsets. */
3319 if (final_offset < 0)
3321 /* Bail out in case resulting subreg would be incorrect. */
3322 if (final_offset % GET_MODE_SIZE (outermode)
3323 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3329 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3331 /* In paradoxical subreg, see if we are still looking on lower part.
3332 If so, our SUBREG_BYTE will be 0. */
3333 if (WORDS_BIG_ENDIAN)
3334 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3335 if (BYTES_BIG_ENDIAN)
3336 offset += difference % UNITS_PER_WORD;
3337 if (offset == final_offset)
3343 /* Recurse for further possible simplifications. */
3344 new = simplify_subreg (outermode, SUBREG_REG (op),
3345 GET_MODE (SUBREG_REG (op)),
3349 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3352 /* SUBREG of a hard register => just change the register number
3353 and/or mode. If the hard register is not valid in that mode,
3354 suppress this simplification. If the hard register is the stack,
3355 frame, or argument pointer, leave this as a SUBREG. */
3358 && (! REG_FUNCTION_VALUE_P (op)
3359 || ! rtx_equal_function_value_matters)
3360 && REGNO (op) < FIRST_PSEUDO_REGISTER
3361 #ifdef CANNOT_CHANGE_MODE_CLASS
3362 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3363 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3364 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3366 && ((reload_completed && !frame_pointer_needed)
3367 || (REGNO (op) != FRAME_POINTER_REGNUM
3368 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3369 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3372 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3373 && REGNO (op) != ARG_POINTER_REGNUM
3375 && REGNO (op) != STACK_POINTER_REGNUM
3376 && subreg_offset_representable_p (REGNO (op), innermode,
3379 rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3380 int final_regno = subreg_hard_regno (tem, 0);
3382 /* ??? We do allow it if the current REG is not valid for
3383 its mode. This is a kludge to work around how float/complex
3384 arguments are passed on 32-bit SPARC and should be fixed. */
3385 if (HARD_REGNO_MODE_OK (final_regno, outermode)
3386 || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3388 rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3390 /* Propagate original regno. We don't have any way to specify
3391 the offset inside original regno, so do so only for lowpart.
3392 The information is used only by alias analysis that can not
3393 grog partial register anyway. */
3395 if (subreg_lowpart_offset (outermode, innermode) == byte)
3396 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3401 /* If we have a SUBREG of a register that we are replacing and we are
3402 replacing it with a MEM, make a new MEM and try replacing the
3403 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3404 or if we would be widening it. */
3406 if (GET_CODE (op) == MEM
3407 && ! mode_dependent_address_p (XEXP (op, 0))
3408 /* Allow splitting of volatile memory references in case we don't
3409 have instruction to move the whole thing. */
3410 && (! MEM_VOLATILE_P (op)
3411 || ! have_insn_for (SET, innermode))
3412 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3413 return adjust_address_nv (op, outermode, byte);
3415 /* Handle complex values represented as CONCAT
3416 of real and imaginary part. */
3417 if (GET_CODE (op) == CONCAT)
3419 int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3420 rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3421 unsigned int final_offset;
3424 final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3425 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3428 /* We can at least simplify it by referring directly to the relevant part. */
3429 return gen_rtx_SUBREG (outermode, part, final_offset);
3435 /* Make a SUBREG operation or equivalent if it folds. */
3438 simplify_gen_subreg (enum machine_mode outermode, rtx op,
3439 enum machine_mode innermode, unsigned int byte)
3442 /* Little bit of sanity checking. */
3443 if (innermode == VOIDmode || outermode == VOIDmode
3444 || innermode == BLKmode || outermode == BLKmode)
3447 if (GET_MODE (op) != innermode
3448 && GET_MODE (op) != VOIDmode)
3451 if (byte % GET_MODE_SIZE (outermode)
3452 || byte >= GET_MODE_SIZE (innermode))
3455 if (GET_CODE (op) == QUEUED)
3458 new = simplify_subreg (outermode, op, innermode, byte);
3462 if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3465 return gen_rtx_SUBREG (outermode, op, byte);
3467 /* Simplify X, an rtx expression.
3469 Return the simplified expression or NULL if no simplifications
3472 This is the preferred entry point into the simplification routines;
3473 however, we still allow passes to call the more specific routines.
3475 Right now GCC has three (yes, three) major bodies of RTL simplification
3476 code that need to be unified.
3478 1. fold_rtx in cse.c. This code uses various CSE specific
3479 information to aid in RTL simplification.
3481 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3482 it uses combine specific information to aid in RTL
3485 3. The routines in this file.
3488 Long term we want to only have one body of simplification code; to
3489 get to that state I recommend the following steps:
3491 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3492 which are not pass dependent state into these routines.
3494 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3495 use this routine whenever possible.
3497 3. Allow for pass dependent state to be provided to these
3498 routines and add simplifications based on the pass dependent
3499 state. Remove code from cse.c & combine.c that becomes
3502 It will take time, but ultimately the compiler will be easier to
3503 maintain and improve. It's totally silly that when we add a
3504 simplification that it needs to be added to 4 places (3 for RTL
3505 simplification and 1 for tree simplification. */
3508 simplify_rtx (rtx x)
3510 enum rtx_code code = GET_CODE (x);
3511 enum machine_mode mode = GET_MODE (x);
3514 switch (GET_RTX_CLASS (code))
3517 return simplify_unary_operation (code, mode,
3518 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3520 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3521 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3523 /* Fall through.... */
3526 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3530 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3531 XEXP (x, 0), XEXP (x, 1),
3535 if (VECTOR_MODE_P (mode))
3537 temp = simplify_relational_operation (code,
3538 ((GET_MODE (XEXP (x, 0))
3540 ? GET_MODE (XEXP (x, 0))
3541 : GET_MODE (XEXP (x, 1))),
3542 XEXP (x, 0), XEXP (x, 1));
3543 #ifdef FLOAT_STORE_FLAG_VALUE
3544 if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3546 if (temp == const0_rtx)
3547 temp = CONST0_RTX (mode);
3549 temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3557 return simplify_gen_subreg (mode, SUBREG_REG (x),
3558 GET_MODE (SUBREG_REG (x)),
3560 if (code == CONSTANT_P_RTX)
3562 if (CONSTANT_P (XEXP (x, 0)))
3570 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3571 if (GET_CODE (XEXP (x, 0)) == HIGH
3572 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))