1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "coretypes.h"
32 #include "hard-reg-set.h"
35 #include "insn-config.h"
44 /* Simplification and canonicalization of RTL. */
46 /* Much code operates on (low, high) pairs; the low value is an
47 unsigned wide int, the high value a signed wide int. We
48 occasionally need to sign extend from low to high as if low were a
50 #define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
53 static rtx neg_const_int (enum machine_mode, rtx);
54 static bool plus_minus_operand_p (rtx);
55 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56 static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
57 static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
59 static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
61 static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63 static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64 static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
67 /* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
70 neg_const_int (enum machine_mode mode, rtx i)
72 return gen_int_mode (- INTVAL (i), mode);
75 /* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
79 mode_signbit_p (enum machine_mode mode, rtx x)
81 unsigned HOST_WIDE_INT val;
84 if (GET_MODE_CLASS (mode) != MODE_INT)
87 width = GET_MODE_BITSIZE (mode);
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
109 /* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
113 simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
123 /* Put complex operands first and constants second if commutative. */
124 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
125 && swap_commutative_operands_p (op0, op1))
126 tem = op0, op0 = op1, op1 = tem;
128 return gen_rtx_fmt_ee (code, mode, op0, op1);
131 /* If X is a MEM referencing the constant pool, return the real value.
132 Otherwise return X. */
134 avoid_constant_pool_reference (rtx x)
137 enum machine_mode cmode;
138 HOST_WIDE_INT offset = 0;
140 switch (GET_CODE (x))
146 /* Handle float extensions of constant pool references. */
148 c = avoid_constant_pool_reference (tmp);
149 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
153 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
154 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
164 /* Call target hook to avoid the effects of -fpic etc.... */
165 addr = targetm.delegitimize_address (addr);
167 /* Split the address into a base and integer offset. */
168 if (GET_CODE (addr) == CONST
169 && GET_CODE (XEXP (addr, 0)) == PLUS
170 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
172 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
173 addr = XEXP (XEXP (addr, 0), 0);
176 if (GET_CODE (addr) == LO_SUM)
177 addr = XEXP (addr, 1);
179 /* If this is a constant pool reference, we can turn it into its
180 constant and hope that simplifications happen. */
181 if (GET_CODE (addr) == SYMBOL_REF
182 && CONSTANT_POOL_ADDRESS_P (addr))
184 c = get_pool_constant (addr);
185 cmode = get_pool_mode (addr);
187 /* If we're accessing the constant in a different mode than it was
188 originally stored, attempt to fix that up via subreg simplifications.
189 If that fails we have no choice but to return the original memory. */
190 if (offset != 0 || cmode != GET_MODE (x))
192 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
193 if (tem && CONSTANT_P (tem))
203 /* Return true if X is a MEM referencing the constant pool. */
206 constant_pool_reference_p (rtx x)
208 return avoid_constant_pool_reference (x) != x;
211 /* Make a unary operation by first seeing if it folds and otherwise making
212 the specified operation. */
215 simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
216 enum machine_mode op_mode)
220 /* If this simplifies, use it. */
221 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
224 return gen_rtx_fmt_e (code, mode, op);
227 /* Likewise for ternary operations. */
230 simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
231 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
235 /* If this simplifies, use it. */
236 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
240 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
243 /* Likewise, for relational operations.
244 CMP_MODE specifies mode comparison is done in. */
247 simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
248 enum machine_mode cmp_mode, rtx op0, rtx op1)
252 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
256 return gen_rtx_fmt_ee (code, mode, op0, op1);
259 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
260 resulting RTX. Return a new RTX which is as simplified as possible. */
263 simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
265 enum rtx_code code = GET_CODE (x);
266 enum machine_mode mode = GET_MODE (x);
267 enum machine_mode op_mode;
270 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
271 to build a new expression substituting recursively. If we can't do
272 anything, return our input. */
277 switch (GET_RTX_CLASS (code))
281 op_mode = GET_MODE (op0);
282 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
283 if (op0 == XEXP (x, 0))
285 return simplify_gen_unary (code, mode, op0, op_mode);
289 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
290 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
291 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
293 return simplify_gen_binary (code, mode, op0, op1);
296 case RTX_COMM_COMPARE:
299 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
300 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
301 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
302 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
304 return simplify_gen_relational (code, mode, op_mode, op0, op1);
307 case RTX_BITFIELD_OPS:
309 op_mode = GET_MODE (op0);
310 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
311 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
312 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
313 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
315 if (op_mode == VOIDmode)
316 op_mode = GET_MODE (op0);
317 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
320 /* The only case we try to handle is a SUBREG. */
323 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
324 if (op0 == SUBREG_REG (x))
326 op0 = simplify_gen_subreg (GET_MODE (x), op0,
327 GET_MODE (SUBREG_REG (x)),
329 return op0 ? op0 : x;
336 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
337 if (op0 == XEXP (x, 0))
339 return replace_equiv_address_nv (x, op0);
341 else if (code == LO_SUM)
343 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
344 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
346 /* (lo_sum (high x) x) -> x */
347 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
350 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
352 return gen_rtx_LO_SUM (mode, op0, op1);
354 else if (code == REG)
356 if (rtx_equal_p (x, old_rtx))
367 /* Try to simplify a unary operation CODE whose output mode is to be
368 MODE with input operand OP whose mode was originally OP_MODE.
369 Return zero if no simplification can be made. */
371 simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
372 rtx op, enum machine_mode op_mode)
376 if (GET_CODE (op) == CONST)
379 trueop = avoid_constant_pool_reference (op);
381 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
385 return simplify_unary_operation_1 (code, mode, op);
388 /* Perform some simplifications we can do even if the operands
391 simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
393 enum rtx_code reversed;
399 /* (not (not X)) == X. */
400 if (GET_CODE (op) == NOT)
403 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
404 comparison is all ones. */
405 if (COMPARISON_P (op)
406 && (mode == BImode || STORE_FLAG_VALUE == -1)
407 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
408 return simplify_gen_relational (reversed, mode, VOIDmode,
409 XEXP (op, 0), XEXP (op, 1));
411 /* (not (plus X -1)) can become (neg X). */
412 if (GET_CODE (op) == PLUS
413 && XEXP (op, 1) == constm1_rtx)
414 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
416 /* Similarly, (not (neg X)) is (plus X -1). */
417 if (GET_CODE (op) == NEG)
418 return plus_constant (XEXP (op, 0), -1);
420 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
421 if (GET_CODE (op) == XOR
422 && GET_CODE (XEXP (op, 1)) == CONST_INT
423 && (temp = simplify_unary_operation (NOT, mode,
424 XEXP (op, 1), mode)) != 0)
425 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
427 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
428 if (GET_CODE (op) == PLUS
429 && GET_CODE (XEXP (op, 1)) == CONST_INT
430 && mode_signbit_p (mode, XEXP (op, 1))
431 && (temp = simplify_unary_operation (NOT, mode,
432 XEXP (op, 1), mode)) != 0)
433 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
436 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
437 operands other than 1, but that is not valid. We could do a
438 similar simplification for (not (lshiftrt C X)) where C is
439 just the sign bit, but this doesn't seem common enough to
441 if (GET_CODE (op) == ASHIFT
442 && XEXP (op, 0) == const1_rtx)
444 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
445 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
448 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
449 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
450 so we can perform the above simplification. */
452 if (STORE_FLAG_VALUE == -1
453 && GET_CODE (op) == ASHIFTRT
454 && GET_CODE (XEXP (op, 1)) == CONST_INT
455 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
456 return simplify_gen_relational (GE, mode, VOIDmode,
457 XEXP (op, 0), const0_rtx);
460 if (GET_CODE (op) == SUBREG
461 && subreg_lowpart_p (op)
462 && (GET_MODE_SIZE (GET_MODE (op))
463 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
464 && GET_CODE (SUBREG_REG (op)) == ASHIFT
465 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
467 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
470 x = gen_rtx_ROTATE (inner_mode,
471 simplify_gen_unary (NOT, inner_mode, const1_rtx,
473 XEXP (SUBREG_REG (op), 1));
474 return rtl_hooks.gen_lowpart_no_emit (mode, x);
477 /* Apply De Morgan's laws to reduce number of patterns for machines
478 with negating logical insns (and-not, nand, etc.). If result has
479 only one NOT, put it first, since that is how the patterns are
482 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
484 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
485 enum machine_mode op_mode;
487 op_mode = GET_MODE (in1);
488 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
490 op_mode = GET_MODE (in2);
491 if (op_mode == VOIDmode)
493 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
495 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
498 in2 = in1; in1 = tem;
501 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
507 /* (neg (neg X)) == X. */
508 if (GET_CODE (op) == NEG)
511 /* (neg (plus X 1)) can become (not X). */
512 if (GET_CODE (op) == PLUS
513 && XEXP (op, 1) == const1_rtx)
514 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
516 /* Similarly, (neg (not X)) is (plus X 1). */
517 if (GET_CODE (op) == NOT)
518 return plus_constant (XEXP (op, 0), 1);
520 /* (neg (minus X Y)) can become (minus Y X). This transformation
521 isn't safe for modes with signed zeros, since if X and Y are
522 both +0, (minus Y X) is the same as (minus X Y). If the
523 rounding mode is towards +infinity (or -infinity) then the two
524 expressions will be rounded differently. */
525 if (GET_CODE (op) == MINUS
526 && !HONOR_SIGNED_ZEROS (mode)
527 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
528 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
530 if (GET_CODE (op) == PLUS
531 && !HONOR_SIGNED_ZEROS (mode)
532 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
534 /* (neg (plus A C)) is simplified to (minus -C A). */
535 if (GET_CODE (XEXP (op, 1)) == CONST_INT
536 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
538 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
540 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
543 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
544 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
545 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
548 /* (neg (mult A B)) becomes (mult (neg A) B).
549 This works even for floating-point values. */
550 if (GET_CODE (op) == MULT
551 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
553 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
554 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
557 /* NEG commutes with ASHIFT since it is multiplication. Only do
558 this if we can then eliminate the NEG (e.g., if the operand
560 if (GET_CODE (op) == ASHIFT)
562 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
564 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
567 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
568 C is equal to the width of MODE minus 1. */
569 if (GET_CODE (op) == ASHIFTRT
570 && GET_CODE (XEXP (op, 1)) == CONST_INT
571 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
572 return simplify_gen_binary (LSHIFTRT, mode,
573 XEXP (op, 0), XEXP (op, 1));
575 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
576 C is equal to the width of MODE minus 1. */
577 if (GET_CODE (op) == LSHIFTRT
578 && GET_CODE (XEXP (op, 1)) == CONST_INT
579 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
580 return simplify_gen_binary (ASHIFTRT, mode,
581 XEXP (op, 0), XEXP (op, 1));
583 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
584 if (GET_CODE (op) == XOR
585 && XEXP (op, 1) == const1_rtx
586 && nonzero_bits (XEXP (op, 0), mode) == 1)
587 return plus_constant (XEXP (op, 0), -1);
589 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
590 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
591 if (GET_CODE (op) == LT
592 && XEXP (op, 1) == const0_rtx
593 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
595 enum machine_mode inner = GET_MODE (XEXP (op, 0));
596 int isize = GET_MODE_BITSIZE (inner);
597 if (STORE_FLAG_VALUE == 1)
599 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
600 GEN_INT (isize - 1));
603 if (GET_MODE_BITSIZE (mode) > isize)
604 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
605 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
607 else if (STORE_FLAG_VALUE == -1)
609 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
610 GEN_INT (isize - 1));
613 if (GET_MODE_BITSIZE (mode) > isize)
614 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
615 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
621 /* We can't handle truncation to a partial integer mode here
622 because we don't know the real bitsize of the partial
624 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
627 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
628 if ((GET_CODE (op) == SIGN_EXTEND
629 || GET_CODE (op) == ZERO_EXTEND)
630 && GET_MODE (XEXP (op, 0)) == mode)
633 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
634 (OP:SI foo:SI) if OP is NEG or ABS. */
635 if ((GET_CODE (op) == ABS
636 || GET_CODE (op) == NEG)
637 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
638 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
639 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
640 return simplify_gen_unary (GET_CODE (op), mode,
641 XEXP (XEXP (op, 0), 0), mode);
643 /* (truncate:A (subreg:B (truncate:C X) 0)) is
645 if (GET_CODE (op) == SUBREG
646 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
647 && subreg_lowpart_p (op))
648 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
649 GET_MODE (XEXP (SUBREG_REG (op), 0)));
651 /* If we know that the value is already truncated, we can
652 replace the TRUNCATE with a SUBREG. Note that this is also
653 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
654 modes we just have to apply a different definition for
655 truncation. But don't do this for an (LSHIFTRT (MULT ...))
656 since this will cause problems with the umulXi3_highpart
658 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
659 GET_MODE_BITSIZE (GET_MODE (op)))
660 ? (num_sign_bit_copies (op, GET_MODE (op))
661 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
662 - GET_MODE_BITSIZE (mode)))
663 : truncated_to_mode (mode, op))
664 && ! (GET_CODE (op) == LSHIFTRT
665 && GET_CODE (XEXP (op, 0)) == MULT))
666 return rtl_hooks.gen_lowpart_no_emit (mode, op);
668 /* A truncate of a comparison can be replaced with a subreg if
669 STORE_FLAG_VALUE permits. This is like the previous test,
670 but it works even if the comparison is done in a mode larger
671 than HOST_BITS_PER_WIDE_INT. */
672 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
674 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
675 return rtl_hooks.gen_lowpart_no_emit (mode, op);
679 if (DECIMAL_FLOAT_MODE_P (mode))
682 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
683 if (GET_CODE (op) == FLOAT_EXTEND
684 && GET_MODE (XEXP (op, 0)) == mode)
687 /* (float_truncate:SF (float_truncate:DF foo:XF))
688 = (float_truncate:SF foo:XF).
689 This may eliminate double rounding, so it is unsafe.
691 (float_truncate:SF (float_extend:XF foo:DF))
692 = (float_truncate:SF foo:DF).
694 (float_truncate:DF (float_extend:XF foo:SF))
695 = (float_extend:SF foo:DF). */
696 if ((GET_CODE (op) == FLOAT_TRUNCATE
697 && flag_unsafe_math_optimizations)
698 || GET_CODE (op) == FLOAT_EXTEND)
699 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
701 > GET_MODE_SIZE (mode)
702 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
706 /* (float_truncate (float x)) is (float x) */
707 if (GET_CODE (op) == FLOAT
708 && (flag_unsafe_math_optimizations
709 || ((unsigned)significand_size (GET_MODE (op))
710 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
711 - num_sign_bit_copies (XEXP (op, 0),
712 GET_MODE (XEXP (op, 0)))))))
713 return simplify_gen_unary (FLOAT, mode,
715 GET_MODE (XEXP (op, 0)));
717 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
718 (OP:SF foo:SF) if OP is NEG or ABS. */
719 if ((GET_CODE (op) == ABS
720 || GET_CODE (op) == NEG)
721 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
722 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
723 return simplify_gen_unary (GET_CODE (op), mode,
724 XEXP (XEXP (op, 0), 0), mode);
726 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
727 is (float_truncate:SF x). */
728 if (GET_CODE (op) == SUBREG
729 && subreg_lowpart_p (op)
730 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
731 return SUBREG_REG (op);
735 if (DECIMAL_FLOAT_MODE_P (mode))
738 /* (float_extend (float_extend x)) is (float_extend x)
740 (float_extend (float x)) is (float x) assuming that double
741 rounding can't happen.
743 if (GET_CODE (op) == FLOAT_EXTEND
744 || (GET_CODE (op) == FLOAT
745 && ((unsigned)significand_size (GET_MODE (op))
746 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
747 - num_sign_bit_copies (XEXP (op, 0),
748 GET_MODE (XEXP (op, 0)))))))
749 return simplify_gen_unary (GET_CODE (op), mode,
751 GET_MODE (XEXP (op, 0)));
756 /* (abs (neg <foo>)) -> (abs <foo>) */
757 if (GET_CODE (op) == NEG)
758 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
759 GET_MODE (XEXP (op, 0)));
761 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
763 if (GET_MODE (op) == VOIDmode)
766 /* If operand is something known to be positive, ignore the ABS. */
767 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
768 || ((GET_MODE_BITSIZE (GET_MODE (op))
769 <= HOST_BITS_PER_WIDE_INT)
770 && ((nonzero_bits (op, GET_MODE (op))
772 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
776 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
777 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
778 return gen_rtx_NEG (mode, op);
783 /* (ffs (*_extend <X>)) = (ffs <X>) */
784 if (GET_CODE (op) == SIGN_EXTEND
785 || GET_CODE (op) == ZERO_EXTEND)
786 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
787 GET_MODE (XEXP (op, 0)));
792 /* (pop* (zero_extend <X>)) = (pop* <X>) */
793 if (GET_CODE (op) == ZERO_EXTEND)
794 return simplify_gen_unary (code, mode, XEXP (op, 0),
795 GET_MODE (XEXP (op, 0)));
799 /* (float (sign_extend <X>)) = (float <X>). */
800 if (GET_CODE (op) == SIGN_EXTEND)
801 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
802 GET_MODE (XEXP (op, 0)));
806 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
807 becomes just the MINUS if its mode is MODE. This allows
808 folding switch statements on machines using casesi (such as
810 if (GET_CODE (op) == TRUNCATE
811 && GET_MODE (XEXP (op, 0)) == mode
812 && GET_CODE (XEXP (op, 0)) == MINUS
813 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
814 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
817 /* Check for a sign extension of a subreg of a promoted
818 variable, where the promotion is sign-extended, and the
819 target mode is the same as the variable's promotion. */
820 if (GET_CODE (op) == SUBREG
821 && SUBREG_PROMOTED_VAR_P (op)
822 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
823 && GET_MODE (XEXP (op, 0)) == mode)
826 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
827 if (! POINTERS_EXTEND_UNSIGNED
828 && mode == Pmode && GET_MODE (op) == ptr_mode
830 || (GET_CODE (op) == SUBREG
831 && REG_P (SUBREG_REG (op))
832 && REG_POINTER (SUBREG_REG (op))
833 && GET_MODE (SUBREG_REG (op)) == Pmode)))
834 return convert_memory_address (Pmode, op);
839 /* Check for a zero extension of a subreg of a promoted
840 variable, where the promotion is zero-extended, and the
841 target mode is the same as the variable's promotion. */
842 if (GET_CODE (op) == SUBREG
843 && SUBREG_PROMOTED_VAR_P (op)
844 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
845 && GET_MODE (XEXP (op, 0)) == mode)
848 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
849 if (POINTERS_EXTEND_UNSIGNED > 0
850 && mode == Pmode && GET_MODE (op) == ptr_mode
852 || (GET_CODE (op) == SUBREG
853 && REG_P (SUBREG_REG (op))
854 && REG_POINTER (SUBREG_REG (op))
855 && GET_MODE (SUBREG_REG (op)) == Pmode)))
856 return convert_memory_address (Pmode, op);
867 /* Try to compute the value of a unary operation CODE whose output mode is to
868 be MODE with input operand OP whose mode was originally OP_MODE.
869 Return zero if the value cannot be computed. */
871 simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
872 rtx op, enum machine_mode op_mode)
874 unsigned int width = GET_MODE_BITSIZE (mode);
876 if (code == VEC_DUPLICATE)
878 gcc_assert (VECTOR_MODE_P (mode));
879 if (GET_MODE (op) != VOIDmode)
881 if (!VECTOR_MODE_P (GET_MODE (op)))
882 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
884 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
887 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
888 || GET_CODE (op) == CONST_VECTOR)
890 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
891 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
892 rtvec v = rtvec_alloc (n_elts);
895 if (GET_CODE (op) != CONST_VECTOR)
896 for (i = 0; i < n_elts; i++)
897 RTVEC_ELT (v, i) = op;
900 enum machine_mode inmode = GET_MODE (op);
901 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
902 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
904 gcc_assert (in_n_elts < n_elts);
905 gcc_assert ((n_elts % in_n_elts) == 0);
906 for (i = 0; i < n_elts; i++)
907 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
909 return gen_rtx_CONST_VECTOR (mode, v);
913 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
915 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
916 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
917 enum machine_mode opmode = GET_MODE (op);
918 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
919 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
920 rtvec v = rtvec_alloc (n_elts);
923 gcc_assert (op_n_elts == n_elts);
924 for (i = 0; i < n_elts; i++)
926 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
927 CONST_VECTOR_ELT (op, i),
928 GET_MODE_INNER (opmode));
931 RTVEC_ELT (v, i) = x;
933 return gen_rtx_CONST_VECTOR (mode, v);
936 /* The order of these tests is critical so that, for example, we don't
937 check the wrong mode (input vs. output) for a conversion operation,
938 such as FIX. At some point, this should be simplified. */
940 if (code == FLOAT && GET_MODE (op) == VOIDmode
941 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
943 HOST_WIDE_INT hv, lv;
946 if (GET_CODE (op) == CONST_INT)
947 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
949 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
951 REAL_VALUE_FROM_INT (d, lv, hv, mode);
952 d = real_value_truncate (mode, d);
953 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
955 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
956 && (GET_CODE (op) == CONST_DOUBLE
957 || GET_CODE (op) == CONST_INT))
959 HOST_WIDE_INT hv, lv;
962 if (GET_CODE (op) == CONST_INT)
963 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
965 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
967 if (op_mode == VOIDmode)
969 /* We don't know how to interpret negative-looking numbers in
970 this case, so don't try to fold those. */
974 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
977 hv = 0, lv &= GET_MODE_MASK (op_mode);
979 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
980 d = real_value_truncate (mode, d);
981 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
984 if (GET_CODE (op) == CONST_INT
985 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
987 HOST_WIDE_INT arg0 = INTVAL (op);
1001 val = (arg0 >= 0 ? arg0 : - arg0);
1005 /* Don't use ffs here. Instead, get low order bit and then its
1006 number. If arg0 is zero, this will return 0, as desired. */
1007 arg0 &= GET_MODE_MASK (mode);
1008 val = exact_log2 (arg0 & (- arg0)) + 1;
1012 arg0 &= GET_MODE_MASK (mode);
1013 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1016 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1020 arg0 &= GET_MODE_MASK (mode);
1023 /* Even if the value at zero is undefined, we have to come
1024 up with some replacement. Seems good enough. */
1025 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1026 val = GET_MODE_BITSIZE (mode);
1029 val = exact_log2 (arg0 & -arg0);
1033 arg0 &= GET_MODE_MASK (mode);
1036 val++, arg0 &= arg0 - 1;
1040 arg0 &= GET_MODE_MASK (mode);
1043 val++, arg0 &= arg0 - 1;
1055 /* When zero-extending a CONST_INT, we need to know its
1057 gcc_assert (op_mode != VOIDmode);
1058 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1060 /* If we were really extending the mode,
1061 we would have to distinguish between zero-extension
1062 and sign-extension. */
1063 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1066 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1067 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1073 if (op_mode == VOIDmode)
1075 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1077 /* If we were really extending the mode,
1078 we would have to distinguish between zero-extension
1079 and sign-extension. */
1080 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1083 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1086 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1088 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1089 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1097 case FLOAT_TRUNCATE:
1107 return gen_int_mode (val, mode);
1110 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1111 for a DImode operation on a CONST_INT. */
1112 else if (GET_MODE (op) == VOIDmode
1113 && width <= HOST_BITS_PER_WIDE_INT * 2
1114 && (GET_CODE (op) == CONST_DOUBLE
1115 || GET_CODE (op) == CONST_INT))
1117 unsigned HOST_WIDE_INT l1, lv;
1118 HOST_WIDE_INT h1, hv;
1120 if (GET_CODE (op) == CONST_DOUBLE)
1121 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1123 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1133 neg_double (l1, h1, &lv, &hv);
1138 neg_double (l1, h1, &lv, &hv);
1150 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1153 lv = exact_log2 (l1 & -l1) + 1;
1159 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1160 - HOST_BITS_PER_WIDE_INT;
1162 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1163 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1164 lv = GET_MODE_BITSIZE (mode);
1170 lv = exact_log2 (l1 & -l1);
1172 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1173 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1174 lv = GET_MODE_BITSIZE (mode);
1197 /* This is just a change-of-mode, so do nothing. */
1202 gcc_assert (op_mode != VOIDmode);
1204 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1208 lv = l1 & GET_MODE_MASK (op_mode);
1212 if (op_mode == VOIDmode
1213 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1217 lv = l1 & GET_MODE_MASK (op_mode);
1218 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1219 && (lv & ((HOST_WIDE_INT) 1
1220 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1221 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1223 hv = HWI_SIGN_EXTEND (lv);
1234 return immed_double_const (lv, hv, mode);
1237 else if (GET_CODE (op) == CONST_DOUBLE
1238 && SCALAR_FLOAT_MODE_P (mode))
1240 REAL_VALUE_TYPE d, t;
1241 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1246 if (HONOR_SNANS (mode) && real_isnan (&d))
1248 real_sqrt (&t, mode, &d);
1252 d = REAL_VALUE_ABS (d);
1255 d = REAL_VALUE_NEGATE (d);
1257 case FLOAT_TRUNCATE:
1258 d = real_value_truncate (mode, d);
1261 /* All this does is change the mode. */
1264 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1271 real_to_target (tmp, &d, GET_MODE (op));
1272 for (i = 0; i < 4; i++)
1274 real_from_target (&d, tmp, mode);
1280 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1283 else if (GET_CODE (op) == CONST_DOUBLE
1284 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1285 && GET_MODE_CLASS (mode) == MODE_INT
1286 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1288 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1289 operators are intentionally left unspecified (to ease implementation
1290 by target backends), for consistency, this routine implements the
1291 same semantics for constant folding as used by the middle-end. */
1293 /* This was formerly used only for non-IEEE float.
1294 eggert@twinsun.com says it is safe for IEEE also. */
1295 HOST_WIDE_INT xh, xl, th, tl;
1296 REAL_VALUE_TYPE x, t;
1297 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1301 if (REAL_VALUE_ISNAN (x))
1304 /* Test against the signed upper bound. */
1305 if (width > HOST_BITS_PER_WIDE_INT)
1307 th = ((unsigned HOST_WIDE_INT) 1
1308 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1314 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1316 real_from_integer (&t, VOIDmode, tl, th, 0);
1317 if (REAL_VALUES_LESS (t, x))
1324 /* Test against the signed lower bound. */
1325 if (width > HOST_BITS_PER_WIDE_INT)
1327 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1333 tl = (HOST_WIDE_INT) -1 << (width - 1);
1335 real_from_integer (&t, VOIDmode, tl, th, 0);
1336 if (REAL_VALUES_LESS (x, t))
1342 REAL_VALUE_TO_INT (&xl, &xh, x);
1346 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1349 /* Test against the unsigned upper bound. */
1350 if (width == 2*HOST_BITS_PER_WIDE_INT)
1355 else if (width >= HOST_BITS_PER_WIDE_INT)
1357 th = ((unsigned HOST_WIDE_INT) 1
1358 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1364 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1366 real_from_integer (&t, VOIDmode, tl, th, 1);
1367 if (REAL_VALUES_LESS (t, x))
1374 REAL_VALUE_TO_INT (&xl, &xh, x);
1380 return immed_double_const (xl, xh, mode);
1386 /* Subroutine of simplify_binary_operation to simplify a commutative,
1387 associative binary operation CODE with result mode MODE, operating
1388 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1389 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1390 canonicalization is possible. */
1393 simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1398 /* Linearize the operator to the left. */
1399 if (GET_CODE (op1) == code)
1401 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1402 if (GET_CODE (op0) == code)
1404 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1405 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1408 /* "a op (b op c)" becomes "(b op c) op a". */
1409 if (! swap_commutative_operands_p (op1, op0))
1410 return simplify_gen_binary (code, mode, op1, op0);
1417 if (GET_CODE (op0) == code)
1419 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1420 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1422 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1423 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1426 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1427 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1428 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1429 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1431 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1433 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1434 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1435 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1436 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1438 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1445 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1446 and OP1. Return 0 if no simplification is possible.
1448 Don't use this for relational operations such as EQ or LT.
1449 Use simplify_relational_operation instead. */
1451 simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1454 rtx trueop0, trueop1;
1457 /* Relational operations don't work here. We must know the mode
1458 of the operands in order to do the comparison correctly.
1459 Assuming a full word can give incorrect results.
1460 Consider comparing 128 with -128 in QImode. */
1461 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1462 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1464 /* Make sure the constant is second. */
1465 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1466 && swap_commutative_operands_p (op0, op1))
1468 tem = op0, op0 = op1, op1 = tem;
1471 trueop0 = avoid_constant_pool_reference (op0);
1472 trueop1 = avoid_constant_pool_reference (op1);
1474 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1477 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1480 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1481 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1482 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1483 actual constants. */
1486 simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1487 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1489 rtx tem, reversed, opleft, opright;
1491 unsigned int width = GET_MODE_BITSIZE (mode);
1493 /* Even if we can't compute a constant result,
1494 there are some cases worth simplifying. */
1499 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1500 when x is NaN, infinite, or finite and nonzero. They aren't
1501 when x is -0 and the rounding mode is not towards -infinity,
1502 since (-0) + 0 is then 0. */
1503 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1506 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1507 transformations are safe even for IEEE. */
1508 if (GET_CODE (op0) == NEG)
1509 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1510 else if (GET_CODE (op1) == NEG)
1511 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1513 /* (~a) + 1 -> -a */
1514 if (INTEGRAL_MODE_P (mode)
1515 && GET_CODE (op0) == NOT
1516 && trueop1 == const1_rtx)
1517 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1519 /* Handle both-operands-constant cases. We can only add
1520 CONST_INTs to constants since the sum of relocatable symbols
1521 can't be handled by most assemblers. Don't add CONST_INT
1522 to CONST_INT since overflow won't be computed properly if wider
1523 than HOST_BITS_PER_WIDE_INT. */
1525 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1526 && GET_CODE (op1) == CONST_INT)
1527 return plus_constant (op0, INTVAL (op1));
1528 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1529 && GET_CODE (op0) == CONST_INT)
1530 return plus_constant (op1, INTVAL (op0));
1532 /* See if this is something like X * C - X or vice versa or
1533 if the multiplication is written as a shift. If so, we can
1534 distribute and make a new multiply, shift, or maybe just
1535 have X (if C is 2 in the example above). But don't make
1536 something more expensive than we had before. */
1538 if (SCALAR_INT_MODE_P (mode))
1540 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1541 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1542 rtx lhs = op0, rhs = op1;
1544 if (GET_CODE (lhs) == NEG)
1548 lhs = XEXP (lhs, 0);
1550 else if (GET_CODE (lhs) == MULT
1551 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1553 coeff0l = INTVAL (XEXP (lhs, 1));
1554 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1555 lhs = XEXP (lhs, 0);
1557 else if (GET_CODE (lhs) == ASHIFT
1558 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1559 && INTVAL (XEXP (lhs, 1)) >= 0
1560 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1562 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1564 lhs = XEXP (lhs, 0);
1567 if (GET_CODE (rhs) == NEG)
1571 rhs = XEXP (rhs, 0);
1573 else if (GET_CODE (rhs) == MULT
1574 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1576 coeff1l = INTVAL (XEXP (rhs, 1));
1577 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1578 rhs = XEXP (rhs, 0);
1580 else if (GET_CODE (rhs) == ASHIFT
1581 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1582 && INTVAL (XEXP (rhs, 1)) >= 0
1583 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1585 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1587 rhs = XEXP (rhs, 0);
1590 if (rtx_equal_p (lhs, rhs))
1592 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1594 unsigned HOST_WIDE_INT l;
1597 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1598 coeff = immed_double_const (l, h, mode);
1600 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1601 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1606 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1607 if ((GET_CODE (op1) == CONST_INT
1608 || GET_CODE (op1) == CONST_DOUBLE)
1609 && GET_CODE (op0) == XOR
1610 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1611 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1612 && mode_signbit_p (mode, op1))
1613 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1614 simplify_gen_binary (XOR, mode, op1,
1617 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1618 if (GET_CODE (op0) == MULT
1619 && GET_CODE (XEXP (op0, 0)) == NEG)
1623 in1 = XEXP (XEXP (op0, 0), 0);
1624 in2 = XEXP (op0, 1);
1625 return simplify_gen_binary (MINUS, mode, op1,
1626 simplify_gen_binary (MULT, mode,
1630 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1631 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1633 if (COMPARISON_P (op0)
1634 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1635 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1636 && (reversed = reversed_comparison (op0, mode)))
1638 simplify_gen_unary (NEG, mode, reversed, mode);
1640 /* If one of the operands is a PLUS or a MINUS, see if we can
1641 simplify this by the associative law.
1642 Don't use the associative law for floating point.
1643 The inaccuracy makes it nonassociative,
1644 and subtle programs can break if operations are associated. */
1646 if (INTEGRAL_MODE_P (mode)
1647 && (plus_minus_operand_p (op0)
1648 || plus_minus_operand_p (op1))
1649 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1652 /* Reassociate floating point addition only when the user
1653 specifies unsafe math optimizations. */
1654 if (FLOAT_MODE_P (mode)
1655 && flag_unsafe_math_optimizations)
1657 tem = simplify_associative_operation (code, mode, op0, op1);
1665 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1666 using cc0, in which case we want to leave it as a COMPARE
1667 so we can distinguish it from a register-register-copy.
1669 In IEEE floating point, x-0 is not the same as x. */
1671 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1672 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1673 && trueop1 == CONST0_RTX (mode))
1677 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1678 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1679 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1680 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1682 rtx xop00 = XEXP (op0, 0);
1683 rtx xop10 = XEXP (op1, 0);
1686 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1688 if (REG_P (xop00) && REG_P (xop10)
1689 && GET_MODE (xop00) == GET_MODE (xop10)
1690 && REGNO (xop00) == REGNO (xop10)
1691 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1692 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1699 /* We can't assume x-x is 0 even with non-IEEE floating point,
1700 but since it is zero except in very strange circumstances, we
1701 will treat it as zero with -funsafe-math-optimizations. */
1702 if (rtx_equal_p (trueop0, trueop1)
1703 && ! side_effects_p (op0)
1704 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1705 return CONST0_RTX (mode);
1707 /* Change subtraction from zero into negation. (0 - x) is the
1708 same as -x when x is NaN, infinite, or finite and nonzero.
1709 But if the mode has signed zeros, and does not round towards
1710 -infinity, then 0 - 0 is 0, not -0. */
1711 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1712 return simplify_gen_unary (NEG, mode, op1, mode);
1714 /* (-1 - a) is ~a. */
1715 if (trueop0 == constm1_rtx)
1716 return simplify_gen_unary (NOT, mode, op1, mode);
1718 /* Subtracting 0 has no effect unless the mode has signed zeros
1719 and supports rounding towards -infinity. In such a case,
1721 if (!(HONOR_SIGNED_ZEROS (mode)
1722 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1723 && trueop1 == CONST0_RTX (mode))
1726 /* See if this is something like X * C - X or vice versa or
1727 if the multiplication is written as a shift. If so, we can
1728 distribute and make a new multiply, shift, or maybe just
1729 have X (if C is 2 in the example above). But don't make
1730 something more expensive than we had before. */
1732 if (SCALAR_INT_MODE_P (mode))
1734 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1735 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1736 rtx lhs = op0, rhs = op1;
1738 if (GET_CODE (lhs) == NEG)
1742 lhs = XEXP (lhs, 0);
1744 else if (GET_CODE (lhs) == MULT
1745 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1747 coeff0l = INTVAL (XEXP (lhs, 1));
1748 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1749 lhs = XEXP (lhs, 0);
1751 else if (GET_CODE (lhs) == ASHIFT
1752 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1753 && INTVAL (XEXP (lhs, 1)) >= 0
1754 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1756 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1758 lhs = XEXP (lhs, 0);
1761 if (GET_CODE (rhs) == NEG)
1765 rhs = XEXP (rhs, 0);
1767 else if (GET_CODE (rhs) == MULT
1768 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1770 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1771 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1772 rhs = XEXP (rhs, 0);
1774 else if (GET_CODE (rhs) == ASHIFT
1775 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1776 && INTVAL (XEXP (rhs, 1)) >= 0
1777 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1779 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1781 rhs = XEXP (rhs, 0);
1784 if (rtx_equal_p (lhs, rhs))
1786 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1788 unsigned HOST_WIDE_INT l;
1791 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1792 coeff = immed_double_const (l, h, mode);
1794 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1795 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1800 /* (a - (-b)) -> (a + b). True even for IEEE. */
1801 if (GET_CODE (op1) == NEG)
1802 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1804 /* (-x - c) may be simplified as (-c - x). */
1805 if (GET_CODE (op0) == NEG
1806 && (GET_CODE (op1) == CONST_INT
1807 || GET_CODE (op1) == CONST_DOUBLE))
1809 tem = simplify_unary_operation (NEG, mode, op1, mode);
1811 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1814 /* Don't let a relocatable value get a negative coeff. */
1815 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1816 return simplify_gen_binary (PLUS, mode,
1818 neg_const_int (mode, op1));
1820 /* (x - (x & y)) -> (x & ~y) */
1821 if (GET_CODE (op1) == AND)
1823 if (rtx_equal_p (op0, XEXP (op1, 0)))
1825 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1826 GET_MODE (XEXP (op1, 1)));
1827 return simplify_gen_binary (AND, mode, op0, tem);
1829 if (rtx_equal_p (op0, XEXP (op1, 1)))
1831 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1832 GET_MODE (XEXP (op1, 0)));
1833 return simplify_gen_binary (AND, mode, op0, tem);
1837 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1838 by reversing the comparison code if valid. */
1839 if (STORE_FLAG_VALUE == 1
1840 && trueop0 == const1_rtx
1841 && COMPARISON_P (op1)
1842 && (reversed = reversed_comparison (op1, mode)))
1845 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1846 if (GET_CODE (op1) == MULT
1847 && GET_CODE (XEXP (op1, 0)) == NEG)
1851 in1 = XEXP (XEXP (op1, 0), 0);
1852 in2 = XEXP (op1, 1);
1853 return simplify_gen_binary (PLUS, mode,
1854 simplify_gen_binary (MULT, mode,
1859 /* Canonicalize (minus (neg A) (mult B C)) to
1860 (minus (mult (neg B) C) A). */
1861 if (GET_CODE (op1) == MULT
1862 && GET_CODE (op0) == NEG)
1866 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1867 in2 = XEXP (op1, 1);
1868 return simplify_gen_binary (MINUS, mode,
1869 simplify_gen_binary (MULT, mode,
1874 /* If one of the operands is a PLUS or a MINUS, see if we can
1875 simplify this by the associative law. This will, for example,
1876 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1877 Don't use the associative law for floating point.
1878 The inaccuracy makes it nonassociative,
1879 and subtle programs can break if operations are associated. */
1881 if (INTEGRAL_MODE_P (mode)
1882 && (plus_minus_operand_p (op0)
1883 || plus_minus_operand_p (op1))
1884 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1889 if (trueop1 == constm1_rtx)
1890 return simplify_gen_unary (NEG, mode, op0, mode);
1892 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1893 x is NaN, since x * 0 is then also NaN. Nor is it valid
1894 when the mode has signed zeros, since multiplying a negative
1895 number by 0 will give -0, not 0. */
1896 if (!HONOR_NANS (mode)
1897 && !HONOR_SIGNED_ZEROS (mode)
1898 && trueop1 == CONST0_RTX (mode)
1899 && ! side_effects_p (op0))
1902 /* In IEEE floating point, x*1 is not equivalent to x for
1904 if (!HONOR_SNANS (mode)
1905 && trueop1 == CONST1_RTX (mode))
1908 /* Convert multiply by constant power of two into shift unless
1909 we are still generating RTL. This test is a kludge. */
1910 if (GET_CODE (trueop1) == CONST_INT
1911 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1912 /* If the mode is larger than the host word size, and the
1913 uppermost bit is set, then this isn't a power of two due
1914 to implicit sign extension. */
1915 && (width <= HOST_BITS_PER_WIDE_INT
1916 || val != HOST_BITS_PER_WIDE_INT - 1))
1917 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1919 /* Likewise for multipliers wider than a word. */
1920 if (GET_CODE (trueop1) == CONST_DOUBLE
1921 && (GET_MODE (trueop1) == VOIDmode
1922 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1923 && GET_MODE (op0) == mode
1924 && CONST_DOUBLE_LOW (trueop1) == 0
1925 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1926 return simplify_gen_binary (ASHIFT, mode, op0,
1927 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1929 /* x*2 is x+x and x*(-1) is -x */
1930 if (GET_CODE (trueop1) == CONST_DOUBLE
1931 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1932 && GET_MODE (op0) == mode)
1935 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1937 if (REAL_VALUES_EQUAL (d, dconst2))
1938 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1940 if (!HONOR_SNANS (mode)
1941 && REAL_VALUES_EQUAL (d, dconstm1))
1942 return simplify_gen_unary (NEG, mode, op0, mode);
1945 /* Optimize -x * -x as x * x. */
1946 if (FLOAT_MODE_P (mode)
1947 && GET_CODE (op0) == NEG
1948 && GET_CODE (op1) == NEG
1949 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1950 && !side_effects_p (XEXP (op0, 0)))
1951 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1953 /* Likewise, optimize abs(x) * abs(x) as x * x. */
1954 if (SCALAR_FLOAT_MODE_P (mode)
1955 && GET_CODE (op0) == ABS
1956 && GET_CODE (op1) == ABS
1957 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1958 && !side_effects_p (XEXP (op0, 0)))
1959 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1961 /* Reassociate multiplication, but for floating point MULTs
1962 only when the user specifies unsafe math optimizations. */
1963 if (! FLOAT_MODE_P (mode)
1964 || flag_unsafe_math_optimizations)
1966 tem = simplify_associative_operation (code, mode, op0, op1);
1973 if (trueop1 == const0_rtx)
1975 if (GET_CODE (trueop1) == CONST_INT
1976 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1977 == GET_MODE_MASK (mode)))
1979 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1981 /* A | (~A) -> -1 */
1982 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1983 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1984 && ! side_effects_p (op0)
1985 && SCALAR_INT_MODE_P (mode))
1988 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1989 if (GET_CODE (op1) == CONST_INT
1990 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1991 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1994 /* Convert (A & B) | A to A. */
1995 if (GET_CODE (op0) == AND
1996 && (rtx_equal_p (XEXP (op0, 0), op1)
1997 || rtx_equal_p (XEXP (op0, 1), op1))
1998 && ! side_effects_p (XEXP (op0, 0))
1999 && ! side_effects_p (XEXP (op0, 1)))
2002 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2003 mode size to (rotate A CX). */
2005 if (GET_CODE (op1) == ASHIFT
2006 || GET_CODE (op1) == SUBREG)
2017 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2018 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2019 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2020 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2021 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2022 == GET_MODE_BITSIZE (mode)))
2023 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2025 /* Same, but for ashift that has been "simplified" to a wider mode
2026 by simplify_shift_const. */
2028 if (GET_CODE (opleft) == SUBREG
2029 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2030 && GET_CODE (opright) == LSHIFTRT
2031 && GET_CODE (XEXP (opright, 0)) == SUBREG
2032 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2033 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2034 && (GET_MODE_SIZE (GET_MODE (opleft))
2035 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2036 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2037 SUBREG_REG (XEXP (opright, 0)))
2038 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2039 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2040 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2041 == GET_MODE_BITSIZE (mode)))
2042 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2043 XEXP (SUBREG_REG (opleft), 1));
2045 /* If we have (ior (and (X C1) C2)), simplify this by making
2046 C1 as small as possible if C1 actually changes. */
2047 if (GET_CODE (op1) == CONST_INT
2048 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2049 || INTVAL (op1) > 0)
2050 && GET_CODE (op0) == AND
2051 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2052 && GET_CODE (op1) == CONST_INT
2053 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2054 return simplify_gen_binary (IOR, mode,
2056 (AND, mode, XEXP (op0, 0),
2057 GEN_INT (INTVAL (XEXP (op0, 1))
2061 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2062 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2063 the PLUS does not affect any of the bits in OP1: then we can do
2064 the IOR as a PLUS and we can associate. This is valid if OP1
2065 can be safely shifted left C bits. */
2066 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2067 && GET_CODE (XEXP (op0, 0)) == PLUS
2068 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2069 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2070 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2072 int count = INTVAL (XEXP (op0, 1));
2073 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2075 if (mask >> count == INTVAL (trueop1)
2076 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2077 return simplify_gen_binary (ASHIFTRT, mode,
2078 plus_constant (XEXP (op0, 0), mask),
2082 tem = simplify_associative_operation (code, mode, op0, op1);
2088 if (trueop1 == const0_rtx)
2090 if (GET_CODE (trueop1) == CONST_INT
2091 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2092 == GET_MODE_MASK (mode)))
2093 return simplify_gen_unary (NOT, mode, op0, mode);
2094 if (rtx_equal_p (trueop0, trueop1)
2095 && ! side_effects_p (op0)
2096 && GET_MODE_CLASS (mode) != MODE_CC)
2097 return CONST0_RTX (mode);
2099 /* Canonicalize XOR of the most significant bit to PLUS. */
2100 if ((GET_CODE (op1) == CONST_INT
2101 || GET_CODE (op1) == CONST_DOUBLE)
2102 && mode_signbit_p (mode, op1))
2103 return simplify_gen_binary (PLUS, mode, op0, op1);
2104 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2105 if ((GET_CODE (op1) == CONST_INT
2106 || GET_CODE (op1) == CONST_DOUBLE)
2107 && GET_CODE (op0) == PLUS
2108 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2109 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2110 && mode_signbit_p (mode, XEXP (op0, 1)))
2111 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2112 simplify_gen_binary (XOR, mode, op1,
2115 /* If we are XORing two things that have no bits in common,
2116 convert them into an IOR. This helps to detect rotation encoded
2117 using those methods and possibly other simplifications. */
2119 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2120 && (nonzero_bits (op0, mode)
2121 & nonzero_bits (op1, mode)) == 0)
2122 return (simplify_gen_binary (IOR, mode, op0, op1));
2124 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2125 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2128 int num_negated = 0;
2130 if (GET_CODE (op0) == NOT)
2131 num_negated++, op0 = XEXP (op0, 0);
2132 if (GET_CODE (op1) == NOT)
2133 num_negated++, op1 = XEXP (op1, 0);
2135 if (num_negated == 2)
2136 return simplify_gen_binary (XOR, mode, op0, op1);
2137 else if (num_negated == 1)
2138 return simplify_gen_unary (NOT, mode,
2139 simplify_gen_binary (XOR, mode, op0, op1),
2143 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2144 correspond to a machine insn or result in further simplifications
2145 if B is a constant. */
2147 if (GET_CODE (op0) == AND
2148 && rtx_equal_p (XEXP (op0, 1), op1)
2149 && ! side_effects_p (op1))
2150 return simplify_gen_binary (AND, mode,
2151 simplify_gen_unary (NOT, mode,
2152 XEXP (op0, 0), mode),
2155 else if (GET_CODE (op0) == AND
2156 && rtx_equal_p (XEXP (op0, 0), op1)
2157 && ! side_effects_p (op1))
2158 return simplify_gen_binary (AND, mode,
2159 simplify_gen_unary (NOT, mode,
2160 XEXP (op0, 1), mode),
2163 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2164 comparison if STORE_FLAG_VALUE is 1. */
2165 if (STORE_FLAG_VALUE == 1
2166 && trueop1 == const1_rtx
2167 && COMPARISON_P (op0)
2168 && (reversed = reversed_comparison (op0, mode)))
2171 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2172 is (lt foo (const_int 0)), so we can perform the above
2173 simplification if STORE_FLAG_VALUE is 1. */
2175 if (STORE_FLAG_VALUE == 1
2176 && trueop1 == const1_rtx
2177 && GET_CODE (op0) == LSHIFTRT
2178 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2179 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2180 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2182 /* (xor (comparison foo bar) (const_int sign-bit))
2183 when STORE_FLAG_VALUE is the sign bit. */
2184 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2185 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2186 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2187 && trueop1 == const_true_rtx
2188 && COMPARISON_P (op0)
2189 && (reversed = reversed_comparison (op0, mode)))
2194 tem = simplify_associative_operation (code, mode, op0, op1);
2200 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2202 /* If we are turning off bits already known off in OP0, we need
2204 if (GET_CODE (trueop1) == CONST_INT
2205 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2206 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2208 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2209 && GET_MODE_CLASS (mode) != MODE_CC)
2212 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2213 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2214 && ! side_effects_p (op0)
2215 && GET_MODE_CLASS (mode) != MODE_CC)
2216 return CONST0_RTX (mode);
2218 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2219 there are no nonzero bits of C outside of X's mode. */
2220 if ((GET_CODE (op0) == SIGN_EXTEND
2221 || GET_CODE (op0) == ZERO_EXTEND)
2222 && GET_CODE (trueop1) == CONST_INT
2223 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2224 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2225 & INTVAL (trueop1)) == 0)
2227 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2228 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2229 gen_int_mode (INTVAL (trueop1),
2231 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2234 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2235 insn (and may simplify more). */
2236 if (GET_CODE (op0) == XOR
2237 && rtx_equal_p (XEXP (op0, 0), op1)
2238 && ! side_effects_p (op1))
2239 return simplify_gen_binary (AND, mode,
2240 simplify_gen_unary (NOT, mode,
2241 XEXP (op0, 1), mode),
2244 if (GET_CODE (op0) == XOR
2245 && rtx_equal_p (XEXP (op0, 1), op1)
2246 && ! side_effects_p (op1))
2247 return simplify_gen_binary (AND, mode,
2248 simplify_gen_unary (NOT, mode,
2249 XEXP (op0, 0), mode),
2252 /* Similarly for (~(A ^ B)) & A. */
2253 if (GET_CODE (op0) == NOT
2254 && GET_CODE (XEXP (op0, 0)) == XOR
2255 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2256 && ! side_effects_p (op1))
2257 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2259 if (GET_CODE (op0) == NOT
2260 && GET_CODE (XEXP (op0, 0)) == XOR
2261 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2262 && ! side_effects_p (op1))
2263 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2265 /* Convert (A | B) & A to A. */
2266 if (GET_CODE (op0) == IOR
2267 && (rtx_equal_p (XEXP (op0, 0), op1)
2268 || rtx_equal_p (XEXP (op0, 1), op1))
2269 && ! side_effects_p (XEXP (op0, 0))
2270 && ! side_effects_p (XEXP (op0, 1)))
2273 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2274 ((A & N) + B) & M -> (A + B) & M
2275 Similarly if (N & M) == 0,
2276 ((A | N) + B) & M -> (A + B) & M
2277 and for - instead of + and/or ^ instead of |. */
2278 if (GET_CODE (trueop1) == CONST_INT
2279 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2280 && ~INTVAL (trueop1)
2281 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2282 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2287 pmop[0] = XEXP (op0, 0);
2288 pmop[1] = XEXP (op0, 1);
2290 for (which = 0; which < 2; which++)
2293 switch (GET_CODE (tem))
2296 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2297 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2298 == INTVAL (trueop1))
2299 pmop[which] = XEXP (tem, 0);
2303 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2304 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2305 pmop[which] = XEXP (tem, 0);
2312 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2314 tem = simplify_gen_binary (GET_CODE (op0), mode,
2316 return simplify_gen_binary (code, mode, tem, op1);
2319 tem = simplify_associative_operation (code, mode, op0, op1);
2325 /* 0/x is 0 (or x&0 if x has side-effects). */
2326 if (trueop0 == CONST0_RTX (mode))
2328 if (side_effects_p (op1))
2329 return simplify_gen_binary (AND, mode, op1, trueop0);
2333 if (trueop1 == CONST1_RTX (mode))
2334 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2335 /* Convert divide by power of two into shift. */
2336 if (GET_CODE (trueop1) == CONST_INT
2337 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2338 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2342 /* Handle floating point and integers separately. */
2343 if (SCALAR_FLOAT_MODE_P (mode))
2345 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2346 safe for modes with NaNs, since 0.0 / 0.0 will then be
2347 NaN rather than 0.0. Nor is it safe for modes with signed
2348 zeros, since dividing 0 by a negative number gives -0.0 */
2349 if (trueop0 == CONST0_RTX (mode)
2350 && !HONOR_NANS (mode)
2351 && !HONOR_SIGNED_ZEROS (mode)
2352 && ! side_effects_p (op1))
2355 if (trueop1 == CONST1_RTX (mode)
2356 && !HONOR_SNANS (mode))
2359 if (GET_CODE (trueop1) == CONST_DOUBLE
2360 && trueop1 != CONST0_RTX (mode))
2363 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2366 if (REAL_VALUES_EQUAL (d, dconstm1)
2367 && !HONOR_SNANS (mode))
2368 return simplify_gen_unary (NEG, mode, op0, mode);
2370 /* Change FP division by a constant into multiplication.
2371 Only do this with -funsafe-math-optimizations. */
2372 if (flag_unsafe_math_optimizations
2373 && !REAL_VALUES_EQUAL (d, dconst0))
2375 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2376 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2377 return simplify_gen_binary (MULT, mode, op0, tem);
2383 /* 0/x is 0 (or x&0 if x has side-effects). */
2384 if (trueop0 == CONST0_RTX (mode))
2386 if (side_effects_p (op1))
2387 return simplify_gen_binary (AND, mode, op1, trueop0);
2391 if (trueop1 == CONST1_RTX (mode))
2392 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2394 if (trueop1 == constm1_rtx)
2396 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2397 return simplify_gen_unary (NEG, mode, x, mode);
2403 /* 0%x is 0 (or x&0 if x has side-effects). */
2404 if (trueop0 == CONST0_RTX (mode))
2406 if (side_effects_p (op1))
2407 return simplify_gen_binary (AND, mode, op1, trueop0);
2410 /* x%1 is 0 (of x&0 if x has side-effects). */
2411 if (trueop1 == CONST1_RTX (mode))
2413 if (side_effects_p (op0))
2414 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2415 return CONST0_RTX (mode);
2417 /* Implement modulus by power of two as AND. */
2418 if (GET_CODE (trueop1) == CONST_INT
2419 && exact_log2 (INTVAL (trueop1)) > 0)
2420 return simplify_gen_binary (AND, mode, op0,
2421 GEN_INT (INTVAL (op1) - 1));
2425 /* 0%x is 0 (or x&0 if x has side-effects). */
2426 if (trueop0 == CONST0_RTX (mode))
2428 if (side_effects_p (op1))
2429 return simplify_gen_binary (AND, mode, op1, trueop0);
2432 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2433 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2435 if (side_effects_p (op0))
2436 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2437 return CONST0_RTX (mode);
2444 if (trueop1 == CONST0_RTX (mode))
2446 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2448 /* Rotating ~0 always results in ~0. */
2449 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2450 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2451 && ! side_effects_p (op1))
2457 if (trueop1 == CONST0_RTX (mode))
2459 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2464 if (trueop1 == CONST0_RTX (mode))
2466 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2468 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2469 if (GET_CODE (op0) == CLZ
2470 && GET_CODE (trueop1) == CONST_INT
2471 && STORE_FLAG_VALUE == 1
2472 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2474 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2475 unsigned HOST_WIDE_INT zero_val = 0;
2477 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2478 && zero_val == GET_MODE_BITSIZE (imode)
2479 && INTVAL (trueop1) == exact_log2 (zero_val))
2480 return simplify_gen_relational (EQ, mode, imode,
2481 XEXP (op0, 0), const0_rtx);
2486 if (width <= HOST_BITS_PER_WIDE_INT
2487 && GET_CODE (trueop1) == CONST_INT
2488 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2489 && ! side_effects_p (op0))
2491 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2493 tem = simplify_associative_operation (code, mode, op0, op1);
2499 if (width <= HOST_BITS_PER_WIDE_INT
2500 && GET_CODE (trueop1) == CONST_INT
2501 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2502 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2503 && ! side_effects_p (op0))
2505 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2507 tem = simplify_associative_operation (code, mode, op0, op1);
2513 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2515 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2517 tem = simplify_associative_operation (code, mode, op0, op1);
2523 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2525 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2527 tem = simplify_associative_operation (code, mode, op0, op1);
2536 /* ??? There are simplifications that can be done. */
2540 if (!VECTOR_MODE_P (mode))
2542 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2543 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2544 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2545 gcc_assert (XVECLEN (trueop1, 0) == 1);
2546 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2548 if (GET_CODE (trueop0) == CONST_VECTOR)
2549 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2554 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2555 gcc_assert (GET_MODE_INNER (mode)
2556 == GET_MODE_INNER (GET_MODE (trueop0)));
2557 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2559 if (GET_CODE (trueop0) == CONST_VECTOR)
2561 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2562 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2563 rtvec v = rtvec_alloc (n_elts);
2566 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2567 for (i = 0; i < n_elts; i++)
2569 rtx x = XVECEXP (trueop1, 0, i);
2571 gcc_assert (GET_CODE (x) == CONST_INT);
2572 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2576 return gen_rtx_CONST_VECTOR (mode, v);
2580 if (XVECLEN (trueop1, 0) == 1
2581 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2582 && GET_CODE (trueop0) == VEC_CONCAT)
2585 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2587 /* Try to find the element in the VEC_CONCAT. */
2588 while (GET_MODE (vec) != mode
2589 && GET_CODE (vec) == VEC_CONCAT)
2591 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2592 if (offset < vec_size)
2593 vec = XEXP (vec, 0);
2597 vec = XEXP (vec, 1);
2599 vec = avoid_constant_pool_reference (vec);
2602 if (GET_MODE (vec) == mode)
2609 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2610 ? GET_MODE (trueop0)
2611 : GET_MODE_INNER (mode));
2612 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2613 ? GET_MODE (trueop1)
2614 : GET_MODE_INNER (mode));
2616 gcc_assert (VECTOR_MODE_P (mode));
2617 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2618 == GET_MODE_SIZE (mode));
2620 if (VECTOR_MODE_P (op0_mode))
2621 gcc_assert (GET_MODE_INNER (mode)
2622 == GET_MODE_INNER (op0_mode));
2624 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2626 if (VECTOR_MODE_P (op1_mode))
2627 gcc_assert (GET_MODE_INNER (mode)
2628 == GET_MODE_INNER (op1_mode));
2630 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2632 if ((GET_CODE (trueop0) == CONST_VECTOR
2633 || GET_CODE (trueop0) == CONST_INT
2634 || GET_CODE (trueop0) == CONST_DOUBLE)
2635 && (GET_CODE (trueop1) == CONST_VECTOR
2636 || GET_CODE (trueop1) == CONST_INT
2637 || GET_CODE (trueop1) == CONST_DOUBLE))
2639 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2640 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2641 rtvec v = rtvec_alloc (n_elts);
2643 unsigned in_n_elts = 1;
2645 if (VECTOR_MODE_P (op0_mode))
2646 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2647 for (i = 0; i < n_elts; i++)
2651 if (!VECTOR_MODE_P (op0_mode))
2652 RTVEC_ELT (v, i) = trueop0;
2654 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2658 if (!VECTOR_MODE_P (op1_mode))
2659 RTVEC_ELT (v, i) = trueop1;
2661 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2666 return gen_rtx_CONST_VECTOR (mode, v);
2679 simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2682 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2684 unsigned int width = GET_MODE_BITSIZE (mode);
2686 if (VECTOR_MODE_P (mode)
2687 && code != VEC_CONCAT
2688 && GET_CODE (op0) == CONST_VECTOR
2689 && GET_CODE (op1) == CONST_VECTOR)
2691 unsigned n_elts = GET_MODE_NUNITS (mode);
2692 enum machine_mode op0mode = GET_MODE (op0);
2693 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2694 enum machine_mode op1mode = GET_MODE (op1);
2695 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2696 rtvec v = rtvec_alloc (n_elts);
2699 gcc_assert (op0_n_elts == n_elts);
2700 gcc_assert (op1_n_elts == n_elts);
2701 for (i = 0; i < n_elts; i++)
2703 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2704 CONST_VECTOR_ELT (op0, i),
2705 CONST_VECTOR_ELT (op1, i));
2708 RTVEC_ELT (v, i) = x;
2711 return gen_rtx_CONST_VECTOR (mode, v);
2714 if (VECTOR_MODE_P (mode)
2715 && code == VEC_CONCAT
2716 && CONSTANT_P (op0) && CONSTANT_P (op1))
2718 unsigned n_elts = GET_MODE_NUNITS (mode);
2719 rtvec v = rtvec_alloc (n_elts);
2721 gcc_assert (n_elts >= 2);
2724 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2725 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2727 RTVEC_ELT (v, 0) = op0;
2728 RTVEC_ELT (v, 1) = op1;
2732 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2733 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2736 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2737 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2738 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2740 for (i = 0; i < op0_n_elts; ++i)
2741 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2742 for (i = 0; i < op1_n_elts; ++i)
2743 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2746 return gen_rtx_CONST_VECTOR (mode, v);
2749 if (SCALAR_FLOAT_MODE_P (mode)
2750 && GET_CODE (op0) == CONST_DOUBLE
2751 && GET_CODE (op1) == CONST_DOUBLE
2752 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2763 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2765 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2767 for (i = 0; i < 4; i++)
2784 real_from_target (&r, tmp0, mode);
2785 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2789 REAL_VALUE_TYPE f0, f1, value, result;
2792 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2793 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2794 real_convert (&f0, mode, &f0);
2795 real_convert (&f1, mode, &f1);
2797 if (HONOR_SNANS (mode)
2798 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2802 && REAL_VALUES_EQUAL (f1, dconst0)
2803 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2806 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2807 && flag_trapping_math
2808 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2810 int s0 = REAL_VALUE_NEGATIVE (f0);
2811 int s1 = REAL_VALUE_NEGATIVE (f1);
2816 /* Inf + -Inf = NaN plus exception. */
2821 /* Inf - Inf = NaN plus exception. */
2826 /* Inf / Inf = NaN plus exception. */
2833 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2834 && flag_trapping_math
2835 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2836 || (REAL_VALUE_ISINF (f1)
2837 && REAL_VALUES_EQUAL (f0, dconst0))))
2838 /* Inf * 0 = NaN plus exception. */
2841 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2843 real_convert (&result, mode, &value);
2845 /* Don't constant fold this floating point operation if
2846 the result has overflowed and flag_trapping_math. */
2848 if (flag_trapping_math
2849 && MODE_HAS_INFINITIES (mode)
2850 && REAL_VALUE_ISINF (result)
2851 && !REAL_VALUE_ISINF (f0)
2852 && !REAL_VALUE_ISINF (f1))
2853 /* Overflow plus exception. */
2856 /* Don't constant fold this floating point operation if the
2857 result may dependent upon the run-time rounding mode and
2858 flag_rounding_math is set, or if GCC's software emulation
2859 is unable to accurately represent the result. */
2861 if ((flag_rounding_math
2862 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2863 && !flag_unsafe_math_optimizations))
2864 && (inexact || !real_identical (&result, &value)))
2867 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2871 /* We can fold some multi-word operations. */
2872 if (GET_MODE_CLASS (mode) == MODE_INT
2873 && width == HOST_BITS_PER_WIDE_INT * 2
2874 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2875 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2877 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2878 HOST_WIDE_INT h1, h2, hv, ht;
2880 if (GET_CODE (op0) == CONST_DOUBLE)
2881 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2883 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2885 if (GET_CODE (op1) == CONST_DOUBLE)
2886 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2888 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2893 /* A - B == A + (-B). */
2894 neg_double (l2, h2, &lv, &hv);
2897 /* Fall through.... */
2900 add_double (l1, h1, l2, h2, &lv, &hv);
2904 mul_double (l1, h1, l2, h2, &lv, &hv);
2908 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2909 &lv, &hv, <, &ht))
2914 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2915 <, &ht, &lv, &hv))
2920 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2921 &lv, &hv, <, &ht))
2926 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2927 <, &ht, &lv, &hv))
2932 lv = l1 & l2, hv = h1 & h2;
2936 lv = l1 | l2, hv = h1 | h2;
2940 lv = l1 ^ l2, hv = h1 ^ h2;
2946 && ((unsigned HOST_WIDE_INT) l1
2947 < (unsigned HOST_WIDE_INT) l2)))
2956 && ((unsigned HOST_WIDE_INT) l1
2957 > (unsigned HOST_WIDE_INT) l2)))
2964 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2966 && ((unsigned HOST_WIDE_INT) l1
2967 < (unsigned HOST_WIDE_INT) l2)))
2974 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2976 && ((unsigned HOST_WIDE_INT) l1
2977 > (unsigned HOST_WIDE_INT) l2)))
2983 case LSHIFTRT: case ASHIFTRT:
2985 case ROTATE: case ROTATERT:
2986 if (SHIFT_COUNT_TRUNCATED)
2987 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2989 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2992 if (code == LSHIFTRT || code == ASHIFTRT)
2993 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2995 else if (code == ASHIFT)
2996 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2997 else if (code == ROTATE)
2998 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2999 else /* code == ROTATERT */
3000 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
3007 return immed_double_const (lv, hv, mode);
3010 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3011 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3013 /* Get the integer argument values in two forms:
3014 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3016 arg0 = INTVAL (op0);
3017 arg1 = INTVAL (op1);
3019 if (width < HOST_BITS_PER_WIDE_INT)
3021 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3022 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3025 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3026 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3029 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3030 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3038 /* Compute the value of the arithmetic. */
3043 val = arg0s + arg1s;
3047 val = arg0s - arg1s;
3051 val = arg0s * arg1s;
3056 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3059 val = arg0s / arg1s;
3064 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3067 val = arg0s % arg1s;
3072 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3075 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3080 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3083 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3101 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3102 the value is in range. We can't return any old value for
3103 out-of-range arguments because either the middle-end (via
3104 shift_truncation_mask) or the back-end might be relying on
3105 target-specific knowledge. Nor can we rely on
3106 shift_truncation_mask, since the shift might not be part of an
3107 ashlM3, lshrM3 or ashrM3 instruction. */
3108 if (SHIFT_COUNT_TRUNCATED)
3109 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3110 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3113 val = (code == ASHIFT
3114 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3115 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3117 /* Sign-extend the result for arithmetic right shifts. */
3118 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3119 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3127 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3128 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3136 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3137 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3141 /* Do nothing here. */
3145 val = arg0s <= arg1s ? arg0s : arg1s;
3149 val = ((unsigned HOST_WIDE_INT) arg0
3150 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3154 val = arg0s > arg1s ? arg0s : arg1s;
3158 val = ((unsigned HOST_WIDE_INT) arg0
3159 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3167 /* ??? There are simplifications that can be done. */
3174 return gen_int_mode (val, mode);
3182 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3185 Rather than test for specific case, we do this by a brute-force method
3186 and do all possible simplifications until no more changes occur. Then
3187 we rebuild the operation. */
3189 struct simplify_plus_minus_op_data
3196 simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3198 const struct simplify_plus_minus_op_data *d1 = p1;
3199 const struct simplify_plus_minus_op_data *d2 = p2;
3202 result = (commutative_operand_precedence (d2->op)
3203 - commutative_operand_precedence (d1->op));
3207 /* Group together equal REGs to do more simplification. */
3208 if (REG_P (d1->op) && REG_P (d2->op))
3209 return REGNO (d1->op) - REGNO (d2->op);
3215 simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3218 struct simplify_plus_minus_op_data ops[8];
3220 int n_ops = 2, input_ops = 2;
3221 int changed, n_constants = 0, canonicalized = 0;
3224 memset (ops, 0, sizeof ops);
3226 /* Set up the two operands and then expand them until nothing has been
3227 changed. If we run out of room in our array, give up; this should
3228 almost never happen. */
3233 ops[1].neg = (code == MINUS);
3239 for (i = 0; i < n_ops; i++)
3241 rtx this_op = ops[i].op;
3242 int this_neg = ops[i].neg;
3243 enum rtx_code this_code = GET_CODE (this_op);
3252 ops[n_ops].op = XEXP (this_op, 1);
3253 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3256 ops[i].op = XEXP (this_op, 0);
3259 canonicalized |= this_neg;
3263 ops[i].op = XEXP (this_op, 0);
3264 ops[i].neg = ! this_neg;
3271 && GET_CODE (XEXP (this_op, 0)) == PLUS
3272 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3273 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3275 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3276 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3277 ops[n_ops].neg = this_neg;
3285 /* ~a -> (-a - 1) */
3288 ops[n_ops].op = constm1_rtx;
3289 ops[n_ops++].neg = this_neg;
3290 ops[i].op = XEXP (this_op, 0);
3291 ops[i].neg = !this_neg;
3301 ops[i].op = neg_const_int (mode, this_op);
3315 if (n_constants > 1)
3318 gcc_assert (n_ops >= 2);
3320 /* If we only have two operands, we can avoid the loops. */
3323 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3326 /* Get the two operands. Be careful with the order, especially for
3327 the cases where code == MINUS. */
3328 if (ops[0].neg && ops[1].neg)
3330 lhs = gen_rtx_NEG (mode, ops[0].op);
3333 else if (ops[0].neg)
3344 return simplify_const_binary_operation (code, mode, lhs, rhs);
3347 /* Now simplify each pair of operands until nothing changes. */
3350 /* Insertion sort is good enough for an eight-element array. */
3351 for (i = 1; i < n_ops; i++)
3353 struct simplify_plus_minus_op_data save;
3355 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3361 ops[j + 1] = ops[j];
3362 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3366 /* This is only useful the first time through. */
3371 for (i = n_ops - 1; i > 0; i--)
3372 for (j = i - 1; j >= 0; j--)
3374 rtx lhs = ops[j].op, rhs = ops[i].op;
3375 int lneg = ops[j].neg, rneg = ops[i].neg;
3377 if (lhs != 0 && rhs != 0)
3379 enum rtx_code ncode = PLUS;
3385 tem = lhs, lhs = rhs, rhs = tem;
3387 else if (swap_commutative_operands_p (lhs, rhs))
3388 tem = lhs, lhs = rhs, rhs = tem;
3390 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3391 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3393 rtx tem_lhs, tem_rhs;
3395 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3396 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3397 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3399 if (tem && !CONSTANT_P (tem))
3400 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3403 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3405 /* Reject "simplifications" that just wrap the two
3406 arguments in a CONST. Failure to do so can result
3407 in infinite recursion with simplify_binary_operation
3408 when it calls us to simplify CONST operations. */
3410 && ! (GET_CODE (tem) == CONST
3411 && GET_CODE (XEXP (tem, 0)) == ncode
3412 && XEXP (XEXP (tem, 0), 0) == lhs
3413 && XEXP (XEXP (tem, 0), 1) == rhs))
3416 if (GET_CODE (tem) == NEG)
3417 tem = XEXP (tem, 0), lneg = !lneg;
3418 if (GET_CODE (tem) == CONST_INT && lneg)
3419 tem = neg_const_int (mode, tem), lneg = 0;
3423 ops[j].op = NULL_RTX;
3429 /* Pack all the operands to the lower-numbered entries. */
3430 for (i = 0, j = 0; j < n_ops; j++)
3440 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3442 && GET_CODE (ops[1].op) == CONST_INT
3443 && CONSTANT_P (ops[0].op)
3445 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3447 /* We suppressed creation of trivial CONST expressions in the
3448 combination loop to avoid recursion. Create one manually now.
3449 The combination loop should have ensured that there is exactly
3450 one CONST_INT, and the sort will have ensured that it is last
3451 in the array and that any other constant will be next-to-last. */
3454 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3455 && CONSTANT_P (ops[n_ops - 2].op))
3457 rtx value = ops[n_ops - 1].op;
3458 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3459 value = neg_const_int (mode, value);
3460 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3464 /* Put a non-negated operand first, if possible. */
3466 for (i = 0; i < n_ops && ops[i].neg; i++)
3469 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3478 /* Now make the result by performing the requested operations. */
3480 for (i = 1; i < n_ops; i++)
3481 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3482 mode, result, ops[i].op);
3487 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3489 plus_minus_operand_p (rtx x)
3491 return GET_CODE (x) == PLUS
3492 || GET_CODE (x) == MINUS
3493 || (GET_CODE (x) == CONST
3494 && GET_CODE (XEXP (x, 0)) == PLUS
3495 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3496 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3499 /* Like simplify_binary_operation except used for relational operators.
3500 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3501 not also be VOIDmode.
3503 CMP_MODE specifies in which mode the comparison is done in, so it is
3504 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3505 the operands or, if both are VOIDmode, the operands are compared in
3506 "infinite precision". */
3508 simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3509 enum machine_mode cmp_mode, rtx op0, rtx op1)
3511 rtx tem, trueop0, trueop1;
3513 if (cmp_mode == VOIDmode)
3514 cmp_mode = GET_MODE (op0);
3515 if (cmp_mode == VOIDmode)
3516 cmp_mode = GET_MODE (op1);
3518 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3521 if (SCALAR_FLOAT_MODE_P (mode))
3523 if (tem == const0_rtx)
3524 return CONST0_RTX (mode);
3525 #ifdef FLOAT_STORE_FLAG_VALUE
3527 REAL_VALUE_TYPE val;
3528 val = FLOAT_STORE_FLAG_VALUE (mode);
3529 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3535 if (VECTOR_MODE_P (mode))
3537 if (tem == const0_rtx)
3538 return CONST0_RTX (mode);
3539 #ifdef VECTOR_STORE_FLAG_VALUE
3544 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3545 if (val == NULL_RTX)
3547 if (val == const1_rtx)
3548 return CONST1_RTX (mode);
3550 units = GET_MODE_NUNITS (mode);
3551 v = rtvec_alloc (units);
3552 for (i = 0; i < units; i++)
3553 RTVEC_ELT (v, i) = val;
3554 return gen_rtx_raw_CONST_VECTOR (mode, v);
3564 /* For the following tests, ensure const0_rtx is op1. */
3565 if (swap_commutative_operands_p (op0, op1)
3566 || (op0 == const0_rtx && op1 != const0_rtx))
3567 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3569 /* If op0 is a compare, extract the comparison arguments from it. */
3570 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3571 return simplify_relational_operation (code, mode, VOIDmode,
3572 XEXP (op0, 0), XEXP (op0, 1));
3574 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3578 trueop0 = avoid_constant_pool_reference (op0);
3579 trueop1 = avoid_constant_pool_reference (op1);
3580 return simplify_relational_operation_1 (code, mode, cmp_mode,
3584 /* This part of simplify_relational_operation is only used when CMP_MODE
3585 is not in class MODE_CC (i.e. it is a real comparison).
3587 MODE is the mode of the result, while CMP_MODE specifies in which
3588 mode the comparison is done in, so it is the mode of the operands. */
3591 simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3592 enum machine_mode cmp_mode, rtx op0, rtx op1)
3594 enum rtx_code op0code = GET_CODE (op0);
3596 if (GET_CODE (op1) == CONST_INT)
3598 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3600 /* If op0 is a comparison, extract the comparison arguments
3604 if (GET_MODE (op0) == mode)
3605 return simplify_rtx (op0);
3607 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3608 XEXP (op0, 0), XEXP (op0, 1));
3610 else if (code == EQ)
3612 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3613 if (new_code != UNKNOWN)
3614 return simplify_gen_relational (new_code, mode, VOIDmode,
3615 XEXP (op0, 0), XEXP (op0, 1));
3620 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3621 if ((code == EQ || code == NE)
3622 && (op0code == PLUS || op0code == MINUS)
3624 && CONSTANT_P (XEXP (op0, 1))
3625 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3627 rtx x = XEXP (op0, 0);
3628 rtx c = XEXP (op0, 1);
3630 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3632 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3635 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3636 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3638 && op1 == const0_rtx
3639 && GET_MODE_CLASS (mode) == MODE_INT
3640 && cmp_mode != VOIDmode
3641 /* ??? Work-around BImode bugs in the ia64 backend. */
3643 && cmp_mode != BImode
3644 && nonzero_bits (op0, cmp_mode) == 1
3645 && STORE_FLAG_VALUE == 1)
3646 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3647 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3648 : lowpart_subreg (mode, op0, cmp_mode);
3650 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3651 if ((code == EQ || code == NE)
3652 && op1 == const0_rtx
3654 return simplify_gen_relational (code, mode, cmp_mode,
3655 XEXP (op0, 0), XEXP (op0, 1));
3657 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3658 if ((code == EQ || code == NE)
3660 && rtx_equal_p (XEXP (op0, 0), op1)
3661 && !side_effects_p (XEXP (op0, 0)))
3662 return simplify_gen_relational (code, mode, cmp_mode,
3663 XEXP (op0, 1), const0_rtx);
3665 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3666 if ((code == EQ || code == NE)
3668 && rtx_equal_p (XEXP (op0, 1), op1)
3669 && !side_effects_p (XEXP (op0, 1)))
3670 return simplify_gen_relational (code, mode, cmp_mode,
3671 XEXP (op0, 0), const0_rtx);
3673 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3674 if ((code == EQ || code == NE)
3676 && (GET_CODE (op1) == CONST_INT
3677 || GET_CODE (op1) == CONST_DOUBLE)
3678 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3679 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3680 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3681 simplify_gen_binary (XOR, cmp_mode,
3682 XEXP (op0, 1), op1));
3687 /* Check if the given comparison (done in the given MODE) is actually a
3688 tautology or a contradiction.
3689 If no simplification is possible, this function returns zero.
3690 Otherwise, it returns either const_true_rtx or const0_rtx. */
3693 simplify_const_relational_operation (enum rtx_code code,
3694 enum machine_mode mode,
3697 int equal, op0lt, op0ltu, op1lt, op1ltu;
3702 gcc_assert (mode != VOIDmode
3703 || (GET_MODE (op0) == VOIDmode
3704 && GET_MODE (op1) == VOIDmode));
3706 /* If op0 is a compare, extract the comparison arguments from it. */
3707 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3709 op1 = XEXP (op0, 1);
3710 op0 = XEXP (op0, 0);
3712 if (GET_MODE (op0) != VOIDmode)
3713 mode = GET_MODE (op0);
3714 else if (GET_MODE (op1) != VOIDmode)
3715 mode = GET_MODE (op1);
3720 /* We can't simplify MODE_CC values since we don't know what the
3721 actual comparison is. */
3722 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3725 /* Make sure the constant is second. */
3726 if (swap_commutative_operands_p (op0, op1))
3728 tem = op0, op0 = op1, op1 = tem;
3729 code = swap_condition (code);
3732 trueop0 = avoid_constant_pool_reference (op0);
3733 trueop1 = avoid_constant_pool_reference (op1);
3735 /* For integer comparisons of A and B maybe we can simplify A - B and can
3736 then simplify a comparison of that with zero. If A and B are both either
3737 a register or a CONST_INT, this can't help; testing for these cases will
3738 prevent infinite recursion here and speed things up.
3740 We can only do this for EQ and NE comparisons as otherwise we may
3741 lose or introduce overflow which we cannot disregard as undefined as
3742 we do not know the signedness of the operation on either the left or
3743 the right hand side of the comparison. */
3745 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3746 && (code == EQ || code == NE)
3747 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3748 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3749 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3750 /* We cannot do this if tem is a nonzero address. */
3751 && ! nonzero_address_p (tem))
3752 return simplify_const_relational_operation (signed_condition (code),
3753 mode, tem, const0_rtx);
3755 if (! HONOR_NANS (mode) && code == ORDERED)
3756 return const_true_rtx;
3758 if (! HONOR_NANS (mode) && code == UNORDERED)
3761 /* For modes without NaNs, if the two operands are equal, we know the
3762 result except if they have side-effects. */
3763 if (! HONOR_NANS (GET_MODE (trueop0))
3764 && rtx_equal_p (trueop0, trueop1)
3765 && ! side_effects_p (trueop0))
3766 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3768 /* If the operands are floating-point constants, see if we can fold
3770 else if (GET_CODE (trueop0) == CONST_DOUBLE
3771 && GET_CODE (trueop1) == CONST_DOUBLE
3772 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3774 REAL_VALUE_TYPE d0, d1;
3776 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3777 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3779 /* Comparisons are unordered iff at least one of the values is NaN. */
3780 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3790 return const_true_rtx;
3803 equal = REAL_VALUES_EQUAL (d0, d1);
3804 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3805 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3808 /* Otherwise, see if the operands are both integers. */
3809 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3810 && (GET_CODE (trueop0) == CONST_DOUBLE
3811 || GET_CODE (trueop0) == CONST_INT)
3812 && (GET_CODE (trueop1) == CONST_DOUBLE
3813 || GET_CODE (trueop1) == CONST_INT))
3815 int width = GET_MODE_BITSIZE (mode);
3816 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3817 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3819 /* Get the two words comprising each integer constant. */
3820 if (GET_CODE (trueop0) == CONST_DOUBLE)
3822 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3823 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3827 l0u = l0s = INTVAL (trueop0);
3828 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3831 if (GET_CODE (trueop1) == CONST_DOUBLE)
3833 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3834 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3838 l1u = l1s = INTVAL (trueop1);
3839 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3842 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3843 we have to sign or zero-extend the values. */
3844 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3846 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3847 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3849 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3850 l0s |= ((HOST_WIDE_INT) (-1) << width);
3852 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3853 l1s |= ((HOST_WIDE_INT) (-1) << width);
3855 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3856 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3858 equal = (h0u == h1u && l0u == l1u);
3859 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3860 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3861 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3862 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3865 /* Otherwise, there are some code-specific tests we can make. */
3868 /* Optimize comparisons with upper and lower bounds. */
3869 if (SCALAR_INT_MODE_P (mode)
3870 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3883 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3890 /* x >= min is always true. */
3891 if (rtx_equal_p (trueop1, mmin))
3892 tem = const_true_rtx;
3898 /* x <= max is always true. */
3899 if (rtx_equal_p (trueop1, mmax))
3900 tem = const_true_rtx;
3905 /* x > max is always false. */
3906 if (rtx_equal_p (trueop1, mmax))
3912 /* x < min is always false. */
3913 if (rtx_equal_p (trueop1, mmin))
3920 if (tem == const0_rtx
3921 || tem == const_true_rtx)
3928 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3933 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3934 return const_true_rtx;
3938 /* Optimize abs(x) < 0.0. */
3939 if (trueop1 == CONST0_RTX (mode)
3940 && !HONOR_SNANS (mode)
3941 && (!INTEGRAL_MODE_P (mode)
3942 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3944 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3946 if (GET_CODE (tem) == ABS)
3948 if (INTEGRAL_MODE_P (mode)
3949 && (issue_strict_overflow_warning
3950 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
3951 warning (OPT_Wstrict_overflow,
3952 ("assuming signed overflow does not occur when "
3953 "assuming abs (x) < 0 is false"));
3960 /* Optimize abs(x) >= 0.0. */
3961 if (trueop1 == CONST0_RTX (mode)
3962 && !HONOR_NANS (mode)
3963 && (!INTEGRAL_MODE_P (mode)
3964 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3966 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3968 if (GET_CODE (tem) == ABS)
3970 if (INTEGRAL_MODE_P (mode)
3971 && (issue_strict_overflow_warning
3972 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
3973 warning (OPT_Wstrict_overflow,
3974 ("assuming signed overflow does not occur when "
3975 "assuming abs (x) >= 0 is true"));
3976 return const_true_rtx;
3982 /* Optimize ! (abs(x) < 0.0). */
3983 if (trueop1 == CONST0_RTX (mode))
3985 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3987 if (GET_CODE (tem) == ABS)
3988 return const_true_rtx;
3999 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
4005 return equal ? const_true_rtx : const0_rtx;
4008 return ! equal ? const_true_rtx : const0_rtx;
4011 return op0lt ? const_true_rtx : const0_rtx;
4014 return op1lt ? const_true_rtx : const0_rtx;
4016 return op0ltu ? const_true_rtx : const0_rtx;
4018 return op1ltu ? const_true_rtx : const0_rtx;
4021 return equal || op0lt ? const_true_rtx : const0_rtx;
4024 return equal || op1lt ? const_true_rtx : const0_rtx;
4026 return equal || op0ltu ? const_true_rtx : const0_rtx;
4028 return equal || op1ltu ? const_true_rtx : const0_rtx;
4030 return const_true_rtx;
4038 /* Simplify CODE, an operation with result mode MODE and three operands,
4039 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4040 a constant. Return 0 if no simplifications is possible. */
4043 simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4044 enum machine_mode op0_mode, rtx op0, rtx op1,
4047 unsigned int width = GET_MODE_BITSIZE (mode);
4049 /* VOIDmode means "infinite" precision. */
4051 width = HOST_BITS_PER_WIDE_INT;
4057 if (GET_CODE (op0) == CONST_INT
4058 && GET_CODE (op1) == CONST_INT
4059 && GET_CODE (op2) == CONST_INT
4060 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4061 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4063 /* Extracting a bit-field from a constant */
4064 HOST_WIDE_INT val = INTVAL (op0);
4066 if (BITS_BIG_ENDIAN)
4067 val >>= (GET_MODE_BITSIZE (op0_mode)
4068 - INTVAL (op2) - INTVAL (op1));
4070 val >>= INTVAL (op2);
4072 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4074 /* First zero-extend. */
4075 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4076 /* If desired, propagate sign bit. */
4077 if (code == SIGN_EXTRACT
4078 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4079 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4082 /* Clear the bits that don't belong in our mode,
4083 unless they and our sign bit are all one.
4084 So we get either a reasonable negative value or a reasonable
4085 unsigned value for this mode. */
4086 if (width < HOST_BITS_PER_WIDE_INT
4087 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4088 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4089 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4091 return gen_int_mode (val, mode);
4096 if (GET_CODE (op0) == CONST_INT)
4097 return op0 != const0_rtx ? op1 : op2;
4099 /* Convert c ? a : a into "a". */
4100 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4103 /* Convert a != b ? a : b into "a". */
4104 if (GET_CODE (op0) == NE
4105 && ! side_effects_p (op0)
4106 && ! HONOR_NANS (mode)
4107 && ! HONOR_SIGNED_ZEROS (mode)
4108 && ((rtx_equal_p (XEXP (op0, 0), op1)
4109 && rtx_equal_p (XEXP (op0, 1), op2))
4110 || (rtx_equal_p (XEXP (op0, 0), op2)
4111 && rtx_equal_p (XEXP (op0, 1), op1))))
4114 /* Convert a == b ? a : b into "b". */
4115 if (GET_CODE (op0) == EQ
4116 && ! side_effects_p (op0)
4117 && ! HONOR_NANS (mode)
4118 && ! HONOR_SIGNED_ZEROS (mode)
4119 && ((rtx_equal_p (XEXP (op0, 0), op1)
4120 && rtx_equal_p (XEXP (op0, 1), op2))
4121 || (rtx_equal_p (XEXP (op0, 0), op2)
4122 && rtx_equal_p (XEXP (op0, 1), op1))))
4125 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4127 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4128 ? GET_MODE (XEXP (op0, 1))
4129 : GET_MODE (XEXP (op0, 0)));
4132 /* Look for happy constants in op1 and op2. */
4133 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4135 HOST_WIDE_INT t = INTVAL (op1);
4136 HOST_WIDE_INT f = INTVAL (op2);
4138 if (t == STORE_FLAG_VALUE && f == 0)
4139 code = GET_CODE (op0);
4140 else if (t == 0 && f == STORE_FLAG_VALUE)
4143 tmp = reversed_comparison_code (op0, NULL_RTX);
4151 return simplify_gen_relational (code, mode, cmp_mode,
4152 XEXP (op0, 0), XEXP (op0, 1));
4155 if (cmp_mode == VOIDmode)
4156 cmp_mode = op0_mode;
4157 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4158 cmp_mode, XEXP (op0, 0),
4161 /* See if any simplifications were possible. */
4164 if (GET_CODE (temp) == CONST_INT)
4165 return temp == const0_rtx ? op2 : op1;
4167 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4173 gcc_assert (GET_MODE (op0) == mode);
4174 gcc_assert (GET_MODE (op1) == mode);
4175 gcc_assert (VECTOR_MODE_P (mode));
4176 op2 = avoid_constant_pool_reference (op2);
4177 if (GET_CODE (op2) == CONST_INT)
4179 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4180 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4181 int mask = (1 << n_elts) - 1;
4183 if (!(INTVAL (op2) & mask))
4185 if ((INTVAL (op2) & mask) == mask)
4188 op0 = avoid_constant_pool_reference (op0);
4189 op1 = avoid_constant_pool_reference (op1);
4190 if (GET_CODE (op0) == CONST_VECTOR
4191 && GET_CODE (op1) == CONST_VECTOR)
4193 rtvec v = rtvec_alloc (n_elts);
4196 for (i = 0; i < n_elts; i++)
4197 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4198 ? CONST_VECTOR_ELT (op0, i)
4199 : CONST_VECTOR_ELT (op1, i));
4200 return gen_rtx_CONST_VECTOR (mode, v);
4212 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4213 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4215 Works by unpacking OP into a collection of 8-bit values
4216 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4217 and then repacking them again for OUTERMODE. */
4220 simplify_immed_subreg (enum machine_mode outermode, rtx op,
4221 enum machine_mode innermode, unsigned int byte)
4223 /* We support up to 512-bit values (for V8DFmode). */
4227 value_mask = (1 << value_bit) - 1
4229 unsigned char value[max_bitsize / value_bit];
4238 rtvec result_v = NULL;
4239 enum mode_class outer_class;
4240 enum machine_mode outer_submode;
4242 /* Some ports misuse CCmode. */
4243 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4246 /* We have no way to represent a complex constant at the rtl level. */
4247 if (COMPLEX_MODE_P (outermode))
4250 /* Unpack the value. */
4252 if (GET_CODE (op) == CONST_VECTOR)
4254 num_elem = CONST_VECTOR_NUNITS (op);
4255 elems = &CONST_VECTOR_ELT (op, 0);
4256 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4262 elem_bitsize = max_bitsize;
4264 /* If this asserts, it is too complicated; reducing value_bit may help. */
4265 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4266 /* I don't know how to handle endianness of sub-units. */
4267 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4269 for (elem = 0; elem < num_elem; elem++)
4272 rtx el = elems[elem];
4274 /* Vectors are kept in target memory order. (This is probably
4277 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4278 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4280 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4281 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4282 unsigned bytele = (subword_byte % UNITS_PER_WORD
4283 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4284 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4287 switch (GET_CODE (el))
4291 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4293 *vp++ = INTVAL (el) >> i;
4294 /* CONST_INTs are always logically sign-extended. */
4295 for (; i < elem_bitsize; i += value_bit)
4296 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4300 if (GET_MODE (el) == VOIDmode)
4302 /* If this triggers, someone should have generated a
4303 CONST_INT instead. */
4304 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4306 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4307 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4308 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4311 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4314 /* It shouldn't matter what's done here, so fill it with
4316 for (; i < elem_bitsize; i += value_bit)
4321 long tmp[max_bitsize / 32];
4322 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4324 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4325 gcc_assert (bitsize <= elem_bitsize);
4326 gcc_assert (bitsize % value_bit == 0);
4328 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4331 /* real_to_target produces its result in words affected by
4332 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4333 and use WORDS_BIG_ENDIAN instead; see the documentation
4334 of SUBREG in rtl.texi. */
4335 for (i = 0; i < bitsize; i += value_bit)
4338 if (WORDS_BIG_ENDIAN)
4339 ibase = bitsize - 1 - i;
4342 *vp++ = tmp[ibase / 32] >> i % 32;
4345 /* It shouldn't matter what's done here, so fill it with
4347 for (; i < elem_bitsize; i += value_bit)
4357 /* Now, pick the right byte to start with. */
4358 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4359 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4360 will already have offset 0. */
4361 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4363 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4365 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4366 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4367 byte = (subword_byte % UNITS_PER_WORD
4368 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4371 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4372 so if it's become negative it will instead be very large.) */
4373 gcc_assert (byte < GET_MODE_SIZE (innermode));
4375 /* Convert from bytes to chunks of size value_bit. */
4376 value_start = byte * (BITS_PER_UNIT / value_bit);
4378 /* Re-pack the value. */
4380 if (VECTOR_MODE_P (outermode))
4382 num_elem = GET_MODE_NUNITS (outermode);
4383 result_v = rtvec_alloc (num_elem);
4384 elems = &RTVEC_ELT (result_v, 0);
4385 outer_submode = GET_MODE_INNER (outermode);
4391 outer_submode = outermode;
4394 outer_class = GET_MODE_CLASS (outer_submode);
4395 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4397 gcc_assert (elem_bitsize % value_bit == 0);
4398 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4400 for (elem = 0; elem < num_elem; elem++)
4404 /* Vectors are stored in target memory order. (This is probably
4407 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4408 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4410 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4411 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4412 unsigned bytele = (subword_byte % UNITS_PER_WORD
4413 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4414 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4417 switch (outer_class)
4420 case MODE_PARTIAL_INT:
4422 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4425 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4427 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4428 for (; i < elem_bitsize; i += value_bit)
4429 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4430 << (i - HOST_BITS_PER_WIDE_INT));
4432 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4434 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4435 elems[elem] = gen_int_mode (lo, outer_submode);
4436 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4437 elems[elem] = immed_double_const (lo, hi, outer_submode);
4444 case MODE_DECIMAL_FLOAT:
4447 long tmp[max_bitsize / 32];
4449 /* real_from_target wants its input in words affected by
4450 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4451 and use WORDS_BIG_ENDIAN instead; see the documentation
4452 of SUBREG in rtl.texi. */
4453 for (i = 0; i < max_bitsize / 32; i++)
4455 for (i = 0; i < elem_bitsize; i += value_bit)
4458 if (WORDS_BIG_ENDIAN)
4459 ibase = elem_bitsize - 1 - i;
4462 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4465 real_from_target (&r, tmp, outer_submode);
4466 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4474 if (VECTOR_MODE_P (outermode))
4475 return gen_rtx_CONST_VECTOR (outermode, result_v);
4480 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4481 Return 0 if no simplifications are possible. */
4483 simplify_subreg (enum machine_mode outermode, rtx op,
4484 enum machine_mode innermode, unsigned int byte)
4486 /* Little bit of sanity checking. */
4487 gcc_assert (innermode != VOIDmode);
4488 gcc_assert (outermode != VOIDmode);
4489 gcc_assert (innermode != BLKmode);
4490 gcc_assert (outermode != BLKmode);
4492 gcc_assert (GET_MODE (op) == innermode
4493 || GET_MODE (op) == VOIDmode);
4495 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4496 gcc_assert (byte < GET_MODE_SIZE (innermode));
4498 if (outermode == innermode && !byte)
4501 if (GET_CODE (op) == CONST_INT
4502 || GET_CODE (op) == CONST_DOUBLE
4503 || GET_CODE (op) == CONST_VECTOR)
4504 return simplify_immed_subreg (outermode, op, innermode, byte);
4506 /* Changing mode twice with SUBREG => just change it once,
4507 or not at all if changing back op starting mode. */
4508 if (GET_CODE (op) == SUBREG)
4510 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4511 int final_offset = byte + SUBREG_BYTE (op);
4514 if (outermode == innermostmode
4515 && byte == 0 && SUBREG_BYTE (op) == 0)
4516 return SUBREG_REG (op);
4518 /* The SUBREG_BYTE represents offset, as if the value were stored
4519 in memory. Irritating exception is paradoxical subreg, where
4520 we define SUBREG_BYTE to be 0. On big endian machines, this
4521 value should be negative. For a moment, undo this exception. */
4522 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4524 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4525 if (WORDS_BIG_ENDIAN)
4526 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4527 if (BYTES_BIG_ENDIAN)
4528 final_offset += difference % UNITS_PER_WORD;
4530 if (SUBREG_BYTE (op) == 0
4531 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4533 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4534 if (WORDS_BIG_ENDIAN)
4535 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4536 if (BYTES_BIG_ENDIAN)
4537 final_offset += difference % UNITS_PER_WORD;
4540 /* See whether resulting subreg will be paradoxical. */
4541 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4543 /* In nonparadoxical subregs we can't handle negative offsets. */
4544 if (final_offset < 0)
4546 /* Bail out in case resulting subreg would be incorrect. */
4547 if (final_offset % GET_MODE_SIZE (outermode)
4548 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4554 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4556 /* In paradoxical subreg, see if we are still looking on lower part.
4557 If so, our SUBREG_BYTE will be 0. */
4558 if (WORDS_BIG_ENDIAN)
4559 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4560 if (BYTES_BIG_ENDIAN)
4561 offset += difference % UNITS_PER_WORD;
4562 if (offset == final_offset)
4568 /* Recurse for further possible simplifications. */
4569 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4573 if (validate_subreg (outermode, innermostmode,
4574 SUBREG_REG (op), final_offset))
4575 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4579 /* Merge implicit and explicit truncations. */
4581 if (GET_CODE (op) == TRUNCATE
4582 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4583 && subreg_lowpart_offset (outermode, innermode) == byte)
4584 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4585 GET_MODE (XEXP (op, 0)));
4587 /* SUBREG of a hard register => just change the register number
4588 and/or mode. If the hard register is not valid in that mode,
4589 suppress this simplification. If the hard register is the stack,
4590 frame, or argument pointer, leave this as a SUBREG. */
4593 && REGNO (op) < FIRST_PSEUDO_REGISTER
4594 #ifdef CANNOT_CHANGE_MODE_CLASS
4595 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4596 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4597 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4599 && ((reload_completed && !frame_pointer_needed)
4600 || (REGNO (op) != FRAME_POINTER_REGNUM
4601 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4602 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4605 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4606 && REGNO (op) != ARG_POINTER_REGNUM
4608 && REGNO (op) != STACK_POINTER_REGNUM
4609 && subreg_offset_representable_p (REGNO (op), innermode,
4612 unsigned int regno = REGNO (op);
4613 unsigned int final_regno
4614 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4616 /* ??? We do allow it if the current REG is not valid for
4617 its mode. This is a kludge to work around how float/complex
4618 arguments are passed on 32-bit SPARC and should be fixed. */
4619 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4620 || ! HARD_REGNO_MODE_OK (regno, innermode))
4623 int final_offset = byte;
4625 /* Adjust offset for paradoxical subregs. */
4627 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4629 int difference = (GET_MODE_SIZE (innermode)
4630 - GET_MODE_SIZE (outermode));
4631 if (WORDS_BIG_ENDIAN)
4632 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4633 if (BYTES_BIG_ENDIAN)
4634 final_offset += difference % UNITS_PER_WORD;
4637 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4639 /* Propagate original regno. We don't have any way to specify
4640 the offset inside original regno, so do so only for lowpart.
4641 The information is used only by alias analysis that can not
4642 grog partial register anyway. */
4644 if (subreg_lowpart_offset (outermode, innermode) == byte)
4645 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4650 /* If we have a SUBREG of a register that we are replacing and we are
4651 replacing it with a MEM, make a new MEM and try replacing the
4652 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4653 or if we would be widening it. */
4656 && ! mode_dependent_address_p (XEXP (op, 0))
4657 /* Allow splitting of volatile memory references in case we don't
4658 have instruction to move the whole thing. */
4659 && (! MEM_VOLATILE_P (op)
4660 || ! have_insn_for (SET, innermode))
4661 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4662 return adjust_address_nv (op, outermode, byte);
4664 /* Handle complex values represented as CONCAT
4665 of real and imaginary part. */
4666 if (GET_CODE (op) == CONCAT)
4668 unsigned int inner_size, final_offset;
4671 inner_size = GET_MODE_UNIT_SIZE (innermode);
4672 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4673 final_offset = byte % inner_size;
4674 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4677 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4680 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4681 return gen_rtx_SUBREG (outermode, part, final_offset);
4685 /* Optimize SUBREG truncations of zero and sign extended values. */
4686 if ((GET_CODE (op) == ZERO_EXTEND
4687 || GET_CODE (op) == SIGN_EXTEND)
4688 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4690 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4692 /* If we're requesting the lowpart of a zero or sign extension,
4693 there are three possibilities. If the outermode is the same
4694 as the origmode, we can omit both the extension and the subreg.
4695 If the outermode is not larger than the origmode, we can apply
4696 the truncation without the extension. Finally, if the outermode
4697 is larger than the origmode, but both are integer modes, we
4698 can just extend to the appropriate mode. */
4701 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4702 if (outermode == origmode)
4703 return XEXP (op, 0);
4704 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4705 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4706 subreg_lowpart_offset (outermode,
4708 if (SCALAR_INT_MODE_P (outermode))
4709 return simplify_gen_unary (GET_CODE (op), outermode,
4710 XEXP (op, 0), origmode);
4713 /* A SUBREG resulting from a zero extension may fold to zero if
4714 it extracts higher bits that the ZERO_EXTEND's source bits. */
4715 if (GET_CODE (op) == ZERO_EXTEND
4716 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4717 return CONST0_RTX (outermode);
4720 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4721 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4722 the outer subreg is effectively a truncation to the original mode. */
4723 if ((GET_CODE (op) == LSHIFTRT
4724 || GET_CODE (op) == ASHIFTRT)
4725 && SCALAR_INT_MODE_P (outermode)
4726 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4727 to avoid the possibility that an outer LSHIFTRT shifts by more
4728 than the sign extension's sign_bit_copies and introduces zeros
4729 into the high bits of the result. */
4730 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4731 && GET_CODE (XEXP (op, 1)) == CONST_INT
4732 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4733 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4734 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4735 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4736 return simplify_gen_binary (ASHIFTRT, outermode,
4737 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4739 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4740 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4741 the outer subreg is effectively a truncation to the original mode. */
4742 if ((GET_CODE (op) == LSHIFTRT
4743 || GET_CODE (op) == ASHIFTRT)
4744 && SCALAR_INT_MODE_P (outermode)
4745 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4746 && GET_CODE (XEXP (op, 1)) == CONST_INT
4747 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4748 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4749 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4750 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4751 return simplify_gen_binary (LSHIFTRT, outermode,
4752 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4754 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4755 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4756 the outer subreg is effectively a truncation to the original mode. */
4757 if (GET_CODE (op) == ASHIFT
4758 && SCALAR_INT_MODE_P (outermode)
4759 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4760 && GET_CODE (XEXP (op, 1)) == CONST_INT
4761 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4762 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4763 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4764 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4765 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4766 return simplify_gen_binary (ASHIFT, outermode,
4767 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4772 /* Make a SUBREG operation or equivalent if it folds. */
4775 simplify_gen_subreg (enum machine_mode outermode, rtx op,
4776 enum machine_mode innermode, unsigned int byte)
4780 newx = simplify_subreg (outermode, op, innermode, byte);
4784 if (GET_CODE (op) == SUBREG
4785 || GET_CODE (op) == CONCAT
4786 || GET_MODE (op) == VOIDmode)
4789 if (validate_subreg (outermode, innermode, op, byte))
4790 return gen_rtx_SUBREG (outermode, op, byte);
4795 /* Simplify X, an rtx expression.
4797 Return the simplified expression or NULL if no simplifications
4800 This is the preferred entry point into the simplification routines;
4801 however, we still allow passes to call the more specific routines.
4803 Right now GCC has three (yes, three) major bodies of RTL simplification
4804 code that need to be unified.
4806 1. fold_rtx in cse.c. This code uses various CSE specific
4807 information to aid in RTL simplification.
4809 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4810 it uses combine specific information to aid in RTL
4813 3. The routines in this file.
4816 Long term we want to only have one body of simplification code; to
4817 get to that state I recommend the following steps:
4819 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4820 which are not pass dependent state into these routines.
4822 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4823 use this routine whenever possible.
4825 3. Allow for pass dependent state to be provided to these
4826 routines and add simplifications based on the pass dependent
4827 state. Remove code from cse.c & combine.c that becomes
4830 It will take time, but ultimately the compiler will be easier to
4831 maintain and improve. It's totally silly that when we add a
4832 simplification that it needs to be added to 4 places (3 for RTL
4833 simplification and 1 for tree simplification. */
4836 simplify_rtx (rtx x)
4838 enum rtx_code code = GET_CODE (x);
4839 enum machine_mode mode = GET_MODE (x);
4841 switch (GET_RTX_CLASS (code))
4844 return simplify_unary_operation (code, mode,
4845 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4846 case RTX_COMM_ARITH:
4847 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4848 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4850 /* Fall through.... */
4853 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4856 case RTX_BITFIELD_OPS:
4857 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4858 XEXP (x, 0), XEXP (x, 1),
4862 case RTX_COMM_COMPARE:
4863 return simplify_relational_operation (code, mode,
4864 ((GET_MODE (XEXP (x, 0))
4866 ? GET_MODE (XEXP (x, 0))
4867 : GET_MODE (XEXP (x, 1))),
4873 return simplify_gen_subreg (mode, SUBREG_REG (x),
4874 GET_MODE (SUBREG_REG (x)),
4881 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4882 if (GET_CODE (XEXP (x, 0)) == HIGH
4883 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))