Merge branch 'vendor/GCC44'
[dragonfly.git] / contrib / gcc-4.4 / gcc / fold-const.c
CommitLineData
c251ad9e
SS
1/* Fold a constant sub-tree into a single node for C-compiler
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 3, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
21
22/*@@ This file should be rewritten to use an arbitrary precision
23 @@ representation for "struct tree_int_cst" and "struct tree_real_cst".
24 @@ Perhaps the routines could also be used for bc/dc, and made a lib.
25 @@ The routines that translate from the ap rep should
26 @@ warn if precision et. al. is lost.
27 @@ This would also make life easier when this technology is used
28 @@ for cross-compilers. */
29
30/* The entry points in this file are fold, size_int_wide, size_binop
31 and force_fit_type_double.
32
33 fold takes a tree as argument and returns a simplified tree.
34
35 size_binop takes a tree code for an arithmetic operation
36 and two operands that are trees, and produces a tree for the
37 result, assuming the type comes from `sizetype'.
38
39 size_int takes an integer value, and creates a tree constant
40 with type from `sizetype'.
41
42 force_fit_type_double takes a constant, an overflowable flag and a
43 prior overflow indicator. It forces the value to fit the type and
44 sets TREE_OVERFLOW.
45
46 Note: Since the folders get called on non-gimple code as well as
47 gimple code, we need to handle GIMPLE tuples as well as their
48 corresponding tree equivalents. */
49
50#include "config.h"
51#include "system.h"
52#include "coretypes.h"
53#include "tm.h"
54#include "flags.h"
55#include "tree.h"
56#include "real.h"
57#include "fixed-value.h"
58#include "rtl.h"
59#include "expr.h"
60#include "tm_p.h"
61#include "target.h"
62#include "toplev.h"
63#include "intl.h"
64#include "ggc.h"
65#include "hashtab.h"
66#include "langhooks.h"
67#include "md5.h"
68#include "gimple.h"
69
70/* Nonzero if we are folding constants inside an initializer; zero
71 otherwise. */
72int folding_initializer = 0;
73
74/* The following constants represent a bit based encoding of GCC's
75 comparison operators. This encoding simplifies transformations
76 on relational comparison operators, such as AND and OR. */
77enum comparison_code {
78 COMPCODE_FALSE = 0,
79 COMPCODE_LT = 1,
80 COMPCODE_EQ = 2,
81 COMPCODE_LE = 3,
82 COMPCODE_GT = 4,
83 COMPCODE_LTGT = 5,
84 COMPCODE_GE = 6,
85 COMPCODE_ORD = 7,
86 COMPCODE_UNORD = 8,
87 COMPCODE_UNLT = 9,
88 COMPCODE_UNEQ = 10,
89 COMPCODE_UNLE = 11,
90 COMPCODE_UNGT = 12,
91 COMPCODE_NE = 13,
92 COMPCODE_UNGE = 14,
93 COMPCODE_TRUE = 15
94};
95
96static void encode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT, HOST_WIDE_INT);
97static void decode (HOST_WIDE_INT *, unsigned HOST_WIDE_INT *, HOST_WIDE_INT *);
98static bool negate_mathfn_p (enum built_in_function);
99static bool negate_expr_p (tree);
100static tree negate_expr (tree);
101static tree split_tree (tree, enum tree_code, tree *, tree *, tree *, int);
102static tree associate_trees (tree, tree, enum tree_code, tree);
103static tree const_binop (enum tree_code, tree, tree, int);
104static enum comparison_code comparison_to_compcode (enum tree_code);
105static enum tree_code compcode_to_comparison (enum comparison_code);
106static tree combine_comparisons (enum tree_code, enum tree_code,
107 enum tree_code, tree, tree, tree);
108static int operand_equal_for_comparison_p (tree, tree, tree);
109static int twoval_comparison_p (tree, tree *, tree *, int *);
110static tree eval_subst (tree, tree, tree, tree, tree);
111static tree pedantic_omit_one_operand (tree, tree, tree);
112static tree distribute_bit_expr (enum tree_code, tree, tree, tree);
113static tree make_bit_field_ref (tree, tree, HOST_WIDE_INT, HOST_WIDE_INT, int);
114static tree optimize_bit_field_compare (enum tree_code, tree, tree, tree);
115static tree decode_field_reference (tree, HOST_WIDE_INT *, HOST_WIDE_INT *,
116 enum machine_mode *, int *, int *,
117 tree *, tree *);
118static int all_ones_mask_p (const_tree, int);
119static tree sign_bit_p (tree, const_tree);
120static int simple_operand_p (const_tree);
121static tree range_binop (enum tree_code, tree, tree, int, tree, int);
122static tree range_predecessor (tree);
123static tree range_successor (tree);
124static tree make_range (tree, int *, tree *, tree *, bool *);
125static tree build_range_check (tree, tree, int, tree, tree);
126static int merge_ranges (int *, tree *, tree *, int, tree, tree, int, tree,
127 tree);
128static tree fold_range_test (enum tree_code, tree, tree, tree);
129static tree fold_cond_expr_with_comparison (tree, tree, tree, tree);
130static tree unextend (tree, int, int, tree);
131static tree fold_truthop (enum tree_code, tree, tree, tree);
132static tree optimize_minmax_comparison (enum tree_code, tree, tree, tree);
133static tree extract_muldiv (tree, tree, enum tree_code, tree, bool *);
134static tree extract_muldiv_1 (tree, tree, enum tree_code, tree, bool *);
135static tree fold_binary_op_with_conditional_arg (enum tree_code, tree,
136 tree, tree,
137 tree, tree, int);
138static tree fold_mathfn_compare (enum built_in_function, enum tree_code,
139 tree, tree, tree);
140static tree fold_inf_compare (enum tree_code, tree, tree, tree);
141static tree fold_div_compare (enum tree_code, tree, tree, tree);
142static bool reorder_operands_p (const_tree, const_tree);
143static tree fold_negate_const (tree, tree);
144static tree fold_not_const (tree, tree);
145static tree fold_relational_const (enum tree_code, tree, tree, tree);
146
147
148/* We know that A1 + B1 = SUM1, using 2's complement arithmetic and ignoring
149 overflow. Suppose A, B and SUM have the same respective signs as A1, B1,
150 and SUM1. Then this yields nonzero if overflow occurred during the
151 addition.
152
153 Overflow occurs if A and B have the same sign, but A and SUM differ in
154 sign. Use `^' to test whether signs differ, and `< 0' to isolate the
155 sign. */
156#define OVERFLOW_SUM_SIGN(a, b, sum) ((~((a) ^ (b)) & ((a) ^ (sum))) < 0)
157\f
158/* To do constant folding on INTEGER_CST nodes requires two-word arithmetic.
159 We do that by representing the two-word integer in 4 words, with only
160 HOST_BITS_PER_WIDE_INT / 2 bits stored in each word, as a positive
161 number. The value of the word is LOWPART + HIGHPART * BASE. */
162
163#define LOWPART(x) \
164 ((x) & (((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT / 2)) - 1))
165#define HIGHPART(x) \
166 ((unsigned HOST_WIDE_INT) (x) >> HOST_BITS_PER_WIDE_INT / 2)
167#define BASE ((unsigned HOST_WIDE_INT) 1 << HOST_BITS_PER_WIDE_INT / 2)
168
169/* Unpack a two-word integer into 4 words.
170 LOW and HI are the integer, as two `HOST_WIDE_INT' pieces.
171 WORDS points to the array of HOST_WIDE_INTs. */
172
173static void
174encode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
175{
176 words[0] = LOWPART (low);
177 words[1] = HIGHPART (low);
178 words[2] = LOWPART (hi);
179 words[3] = HIGHPART (hi);
180}
181
182/* Pack an array of 4 words into a two-word integer.
183 WORDS points to the array of words.
184 The integer is stored into *LOW and *HI as two `HOST_WIDE_INT' pieces. */
185
186static void
187decode (HOST_WIDE_INT *words, unsigned HOST_WIDE_INT *low,
188 HOST_WIDE_INT *hi)
189{
190 *low = words[0] + words[1] * BASE;
191 *hi = words[2] + words[3] * BASE;
192}
193\f
194/* Force the double-word integer L1, H1 to be within the range of the
195 integer type TYPE. Stores the properly truncated and sign-extended
196 double-word integer in *LV, *HV. Returns true if the operation
197 overflows, that is, argument and result are different. */
198
199int
200fit_double_type (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
201 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, const_tree type)
202{
203 unsigned HOST_WIDE_INT low0 = l1;
204 HOST_WIDE_INT high0 = h1;
205 unsigned int prec;
206 int sign_extended_type;
207
208 if (POINTER_TYPE_P (type)
209 || TREE_CODE (type) == OFFSET_TYPE)
210 prec = POINTER_SIZE;
211 else
212 prec = TYPE_PRECISION (type);
213
214 /* Size types *are* sign extended. */
215 sign_extended_type = (!TYPE_UNSIGNED (type)
216 || (TREE_CODE (type) == INTEGER_TYPE
217 && TYPE_IS_SIZETYPE (type)));
218
219 /* First clear all bits that are beyond the type's precision. */
220 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
221 ;
222 else if (prec > HOST_BITS_PER_WIDE_INT)
223 h1 &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
224 else
225 {
226 h1 = 0;
227 if (prec < HOST_BITS_PER_WIDE_INT)
228 l1 &= ~((HOST_WIDE_INT) (-1) << prec);
229 }
230
231 /* Then do sign extension if necessary. */
232 if (!sign_extended_type)
233 /* No sign extension */;
234 else if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
235 /* Correct width already. */;
236 else if (prec > HOST_BITS_PER_WIDE_INT)
237 {
238 /* Sign extend top half? */
239 if (h1 & ((unsigned HOST_WIDE_INT)1
240 << (prec - HOST_BITS_PER_WIDE_INT - 1)))
241 h1 |= (HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT);
242 }
243 else if (prec == HOST_BITS_PER_WIDE_INT)
244 {
245 if ((HOST_WIDE_INT)l1 < 0)
246 h1 = -1;
247 }
248 else
249 {
250 /* Sign extend bottom half? */
251 if (l1 & ((unsigned HOST_WIDE_INT)1 << (prec - 1)))
252 {
253 h1 = -1;
254 l1 |= (HOST_WIDE_INT)(-1) << prec;
255 }
256 }
257
258 *lv = l1;
259 *hv = h1;
260
261 /* If the value didn't fit, signal overflow. */
262 return l1 != low0 || h1 != high0;
263}
264
265/* We force the double-int HIGH:LOW to the range of the type TYPE by
266 sign or zero extending it.
267 OVERFLOWABLE indicates if we are interested
268 in overflow of the value, when >0 we are only interested in signed
269 overflow, for <0 we are interested in any overflow. OVERFLOWED
270 indicates whether overflow has already occurred. CONST_OVERFLOWED
271 indicates whether constant overflow has already occurred. We force
272 T's value to be within range of T's type (by setting to 0 or 1 all
273 the bits outside the type's range). We set TREE_OVERFLOWED if,
274 OVERFLOWED is nonzero,
275 or OVERFLOWABLE is >0 and signed overflow occurs
276 or OVERFLOWABLE is <0 and any overflow occurs
277 We return a new tree node for the extended double-int. The node
278 is shared if no overflow flags are set. */
279
280tree
281force_fit_type_double (tree type, unsigned HOST_WIDE_INT low,
282 HOST_WIDE_INT high, int overflowable,
283 bool overflowed)
284{
285 int sign_extended_type;
286 bool overflow;
287
288 /* Size types *are* sign extended. */
289 sign_extended_type = (!TYPE_UNSIGNED (type)
290 || (TREE_CODE (type) == INTEGER_TYPE
291 && TYPE_IS_SIZETYPE (type)));
292
293 overflow = fit_double_type (low, high, &low, &high, type);
294
295 /* If we need to set overflow flags, return a new unshared node. */
296 if (overflowed || overflow)
297 {
298 if (overflowed
299 || overflowable < 0
300 || (overflowable > 0 && sign_extended_type))
301 {
302 tree t = make_node (INTEGER_CST);
303 TREE_INT_CST_LOW (t) = low;
304 TREE_INT_CST_HIGH (t) = high;
305 TREE_TYPE (t) = type;
306 TREE_OVERFLOW (t) = 1;
307 return t;
308 }
309 }
310
311 /* Else build a shared node. */
312 return build_int_cst_wide (type, low, high);
313}
314\f
315/* Add two doubleword integers with doubleword result.
316 Return nonzero if the operation overflows according to UNSIGNED_P.
317 Each argument is given as two `HOST_WIDE_INT' pieces.
318 One argument is L1 and H1; the other, L2 and H2.
319 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
320
321int
322add_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
323 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
324 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
325 bool unsigned_p)
326{
327 unsigned HOST_WIDE_INT l;
328 HOST_WIDE_INT h;
329
330 l = l1 + l2;
4b1e227d
SW
331 h = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) h1
332 + (unsigned HOST_WIDE_INT) h2
333 + (l < l1));
c251ad9e
SS
334
335 *lv = l;
336 *hv = h;
337
338 if (unsigned_p)
4b1e227d
SW
339 return ((unsigned HOST_WIDE_INT) h < (unsigned HOST_WIDE_INT) h1
340 || (h == h1
341 && l < l1));
c251ad9e
SS
342 else
343 return OVERFLOW_SUM_SIGN (h1, h2, h);
344}
345
346/* Negate a doubleword integer with doubleword result.
347 Return nonzero if the operation overflows, assuming it's signed.
348 The argument is given as two `HOST_WIDE_INT' pieces in L1 and H1.
349 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
350
351int
352neg_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
353 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
354{
355 if (l1 == 0)
356 {
357 *lv = 0;
358 *hv = - h1;
359 return (*hv & h1) < 0;
360 }
361 else
362 {
363 *lv = -l1;
364 *hv = ~h1;
365 return 0;
366 }
367}
368\f
369/* Multiply two doubleword integers with doubleword result.
370 Return nonzero if the operation overflows according to UNSIGNED_P.
371 Each argument is given as two `HOST_WIDE_INT' pieces.
372 One argument is L1 and H1; the other, L2 and H2.
373 The value is stored as two `HOST_WIDE_INT' pieces in *LV and *HV. */
374
375int
376mul_double_with_sign (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
377 unsigned HOST_WIDE_INT l2, HOST_WIDE_INT h2,
378 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
379 bool unsigned_p)
380{
381 HOST_WIDE_INT arg1[4];
382 HOST_WIDE_INT arg2[4];
383 HOST_WIDE_INT prod[4 * 2];
384 unsigned HOST_WIDE_INT carry;
385 int i, j, k;
386 unsigned HOST_WIDE_INT toplow, neglow;
387 HOST_WIDE_INT tophigh, neghigh;
388
389 encode (arg1, l1, h1);
390 encode (arg2, l2, h2);
391
392 memset (prod, 0, sizeof prod);
393
394 for (i = 0; i < 4; i++)
395 {
396 carry = 0;
397 for (j = 0; j < 4; j++)
398 {
399 k = i + j;
400 /* This product is <= 0xFFFE0001, the sum <= 0xFFFF0000. */
401 carry += arg1[i] * arg2[j];
402 /* Since prod[p] < 0xFFFF, this sum <= 0xFFFFFFFF. */
403 carry += prod[k];
404 prod[k] = LOWPART (carry);
405 carry = HIGHPART (carry);
406 }
407 prod[i + 4] = carry;
408 }
409
410 decode (prod, lv, hv);
411 decode (prod + 4, &toplow, &tophigh);
412
413 /* Unsigned overflow is immediate. */
414 if (unsigned_p)
415 return (toplow | tophigh) != 0;
416
417 /* Check for signed overflow by calculating the signed representation of the
418 top half of the result; it should agree with the low half's sign bit. */
419 if (h1 < 0)
420 {
421 neg_double (l2, h2, &neglow, &neghigh);
422 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
423 }
424 if (h2 < 0)
425 {
426 neg_double (l1, h1, &neglow, &neghigh);
427 add_double (neglow, neghigh, toplow, tophigh, &toplow, &tophigh);
428 }
429 return (*hv < 0 ? ~(toplow & tophigh) : toplow | tophigh) != 0;
430}
431\f
432/* Shift the doubleword integer in L1, H1 left by COUNT places
433 keeping only PREC bits of result.
434 Shift right if COUNT is negative.
435 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
436 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
437
438void
439lshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
440 HOST_WIDE_INT count, unsigned int prec,
441 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv, int arith)
442{
443 unsigned HOST_WIDE_INT signmask;
444
445 if (count < 0)
446 {
447 rshift_double (l1, h1, -count, prec, lv, hv, arith);
448 return;
449 }
450
451 if (SHIFT_COUNT_TRUNCATED)
452 count %= prec;
453
454 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
455 {
456 /* Shifting by the host word size is undefined according to the
457 ANSI standard, so we must handle this as a special case. */
458 *hv = 0;
459 *lv = 0;
460 }
461 else if (count >= HOST_BITS_PER_WIDE_INT)
462 {
463 *hv = l1 << (count - HOST_BITS_PER_WIDE_INT);
464 *lv = 0;
465 }
466 else
467 {
468 *hv = (((unsigned HOST_WIDE_INT) h1 << count)
469 | (l1 >> (HOST_BITS_PER_WIDE_INT - count - 1) >> 1));
470 *lv = l1 << count;
471 }
472
473 /* Sign extend all bits that are beyond the precision. */
474
475 signmask = -((prec > HOST_BITS_PER_WIDE_INT
476 ? ((unsigned HOST_WIDE_INT) *hv
477 >> (prec - HOST_BITS_PER_WIDE_INT - 1))
478 : (*lv >> (prec - 1))) & 1);
479
480 if (prec >= 2 * HOST_BITS_PER_WIDE_INT)
481 ;
482 else if (prec >= HOST_BITS_PER_WIDE_INT)
483 {
484 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT));
485 *hv |= signmask << (prec - HOST_BITS_PER_WIDE_INT);
486 }
487 else
488 {
489 *hv = signmask;
490 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << prec);
491 *lv |= signmask << prec;
492 }
493}
494
495/* Shift the doubleword integer in L1, H1 right by COUNT places
496 keeping only PREC bits of result. COUNT must be positive.
497 ARITH nonzero specifies arithmetic shifting; otherwise use logical shift.
498 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
499
500void
501rshift_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
502 HOST_WIDE_INT count, unsigned int prec,
503 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv,
504 int arith)
505{
506 unsigned HOST_WIDE_INT signmask;
507
508 signmask = (arith
509 ? -((unsigned HOST_WIDE_INT) h1 >> (HOST_BITS_PER_WIDE_INT - 1))
510 : 0);
511
512 if (SHIFT_COUNT_TRUNCATED)
513 count %= prec;
514
515 if (count >= 2 * HOST_BITS_PER_WIDE_INT)
516 {
517 /* Shifting by the host word size is undefined according to the
518 ANSI standard, so we must handle this as a special case. */
519 *hv = 0;
520 *lv = 0;
521 }
522 else if (count >= HOST_BITS_PER_WIDE_INT)
523 {
524 *hv = 0;
525 *lv = (unsigned HOST_WIDE_INT) h1 >> (count - HOST_BITS_PER_WIDE_INT);
526 }
527 else
528 {
529 *hv = (unsigned HOST_WIDE_INT) h1 >> count;
530 *lv = ((l1 >> count)
531 | ((unsigned HOST_WIDE_INT) h1 << (HOST_BITS_PER_WIDE_INT - count - 1) << 1));
532 }
533
534 /* Zero / sign extend all bits that are beyond the precision. */
535
536 if (count >= (HOST_WIDE_INT)prec)
537 {
538 *hv = signmask;
539 *lv = signmask;
540 }
541 else if ((prec - count) >= 2 * HOST_BITS_PER_WIDE_INT)
542 ;
543 else if ((prec - count) >= HOST_BITS_PER_WIDE_INT)
544 {
545 *hv &= ~((HOST_WIDE_INT) (-1) << (prec - count - HOST_BITS_PER_WIDE_INT));
546 *hv |= signmask << (prec - count - HOST_BITS_PER_WIDE_INT);
547 }
548 else
549 {
550 *hv = signmask;
551 *lv &= ~((unsigned HOST_WIDE_INT) (-1) << (prec - count));
552 *lv |= signmask << (prec - count);
553 }
554}
555\f
556/* Rotate the doubleword integer in L1, H1 left by COUNT places
557 keeping only PREC bits of result.
558 Rotate right if COUNT is negative.
559 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
560
561void
562lrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
563 HOST_WIDE_INT count, unsigned int prec,
564 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
565{
566 unsigned HOST_WIDE_INT s1l, s2l;
567 HOST_WIDE_INT s1h, s2h;
568
569 count %= prec;
570 if (count < 0)
571 count += prec;
572
573 lshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
574 rshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
575 *lv = s1l | s2l;
576 *hv = s1h | s2h;
577}
578
579/* Rotate the doubleword integer in L1, H1 left by COUNT places
580 keeping only PREC bits of result. COUNT must be positive.
581 Store the value as two `HOST_WIDE_INT' pieces in *LV and *HV. */
582
583void
584rrotate_double (unsigned HOST_WIDE_INT l1, HOST_WIDE_INT h1,
585 HOST_WIDE_INT count, unsigned int prec,
586 unsigned HOST_WIDE_INT *lv, HOST_WIDE_INT *hv)
587{
588 unsigned HOST_WIDE_INT s1l, s2l;
589 HOST_WIDE_INT s1h, s2h;
590
591 count %= prec;
592 if (count < 0)
593 count += prec;
594
595 rshift_double (l1, h1, count, prec, &s1l, &s1h, 0);
596 lshift_double (l1, h1, prec - count, prec, &s2l, &s2h, 0);
597 *lv = s1l | s2l;
598 *hv = s1h | s2h;
599}
600\f
601/* Divide doubleword integer LNUM, HNUM by doubleword integer LDEN, HDEN
602 for a quotient (stored in *LQUO, *HQUO) and remainder (in *LREM, *HREM).
603 CODE is a tree code for a kind of division, one of
604 TRUNC_DIV_EXPR, FLOOR_DIV_EXPR, CEIL_DIV_EXPR, ROUND_DIV_EXPR
605 or EXACT_DIV_EXPR
606 It controls how the quotient is rounded to an integer.
607 Return nonzero if the operation overflows.
608 UNS nonzero says do unsigned division. */
609
610int
611div_and_round_double (enum tree_code code, int uns,
612 unsigned HOST_WIDE_INT lnum_orig, /* num == numerator == dividend */
613 HOST_WIDE_INT hnum_orig,
614 unsigned HOST_WIDE_INT lden_orig, /* den == denominator == divisor */
615 HOST_WIDE_INT hden_orig,
616 unsigned HOST_WIDE_INT *lquo,
617 HOST_WIDE_INT *hquo, unsigned HOST_WIDE_INT *lrem,
618 HOST_WIDE_INT *hrem)
619{
620 int quo_neg = 0;
621 HOST_WIDE_INT num[4 + 1]; /* extra element for scaling. */
622 HOST_WIDE_INT den[4], quo[4];
623 int i, j;
624 unsigned HOST_WIDE_INT work;
625 unsigned HOST_WIDE_INT carry = 0;
626 unsigned HOST_WIDE_INT lnum = lnum_orig;
627 HOST_WIDE_INT hnum = hnum_orig;
628 unsigned HOST_WIDE_INT lden = lden_orig;
629 HOST_WIDE_INT hden = hden_orig;
630 int overflow = 0;
631
632 if (hden == 0 && lden == 0)
633 overflow = 1, lden = 1;
634
635 /* Calculate quotient sign and convert operands to unsigned. */
636 if (!uns)
637 {
638 if (hnum < 0)
639 {
640 quo_neg = ~ quo_neg;
641 /* (minimum integer) / (-1) is the only overflow case. */
642 if (neg_double (lnum, hnum, &lnum, &hnum)
643 && ((HOST_WIDE_INT) lden & hden) == -1)
644 overflow = 1;
645 }
646 if (hden < 0)
647 {
648 quo_neg = ~ quo_neg;
649 neg_double (lden, hden, &lden, &hden);
650 }
651 }
652
653 if (hnum == 0 && hden == 0)
654 { /* single precision */
655 *hquo = *hrem = 0;
656 /* This unsigned division rounds toward zero. */
657 *lquo = lnum / lden;
658 goto finish_up;
659 }
660
661 if (hnum == 0)
662 { /* trivial case: dividend < divisor */
663 /* hden != 0 already checked. */
664 *hquo = *lquo = 0;
665 *hrem = hnum;
666 *lrem = lnum;
667 goto finish_up;
668 }
669
670 memset (quo, 0, sizeof quo);
671
672 memset (num, 0, sizeof num); /* to zero 9th element */
673 memset (den, 0, sizeof den);
674
675 encode (num, lnum, hnum);
676 encode (den, lden, hden);
677
678 /* Special code for when the divisor < BASE. */
679 if (hden == 0 && lden < (unsigned HOST_WIDE_INT) BASE)
680 {
681 /* hnum != 0 already checked. */
682 for (i = 4 - 1; i >= 0; i--)
683 {
684 work = num[i] + carry * BASE;
685 quo[i] = work / lden;
686 carry = work % lden;
687 }
688 }
689 else
690 {
691 /* Full double precision division,
692 with thanks to Don Knuth's "Seminumerical Algorithms". */
693 int num_hi_sig, den_hi_sig;
694 unsigned HOST_WIDE_INT quo_est, scale;
695
696 /* Find the highest nonzero divisor digit. */
697 for (i = 4 - 1;; i--)
698 if (den[i] != 0)
699 {
700 den_hi_sig = i;
701 break;
702 }
703
704 /* Insure that the first digit of the divisor is at least BASE/2.
705 This is required by the quotient digit estimation algorithm. */
706
707 scale = BASE / (den[den_hi_sig] + 1);
708 if (scale > 1)
709 { /* scale divisor and dividend */
710 carry = 0;
711 for (i = 0; i <= 4 - 1; i++)
712 {
713 work = (num[i] * scale) + carry;
714 num[i] = LOWPART (work);
715 carry = HIGHPART (work);
716 }
717
718 num[4] = carry;
719 carry = 0;
720 for (i = 0; i <= 4 - 1; i++)
721 {
722 work = (den[i] * scale) + carry;
723 den[i] = LOWPART (work);
724 carry = HIGHPART (work);
725 if (den[i] != 0) den_hi_sig = i;
726 }
727 }
728
729 num_hi_sig = 4;
730
731 /* Main loop */
732 for (i = num_hi_sig - den_hi_sig - 1; i >= 0; i--)
733 {
734 /* Guess the next quotient digit, quo_est, by dividing the first
735 two remaining dividend digits by the high order quotient digit.
736 quo_est is never low and is at most 2 high. */
737 unsigned HOST_WIDE_INT tmp;
738
739 num_hi_sig = i + den_hi_sig + 1;
740 work = num[num_hi_sig] * BASE + num[num_hi_sig - 1];
741 if (num[num_hi_sig] != den[den_hi_sig])
742 quo_est = work / den[den_hi_sig];
743 else
744 quo_est = BASE - 1;
745
746 /* Refine quo_est so it's usually correct, and at most one high. */
747 tmp = work - quo_est * den[den_hi_sig];
748 if (tmp < BASE
749 && (den[den_hi_sig - 1] * quo_est
750 > (tmp * BASE + num[num_hi_sig - 2])))
751 quo_est--;
752
753 /* Try QUO_EST as the quotient digit, by multiplying the
754 divisor by QUO_EST and subtracting from the remaining dividend.
755 Keep in mind that QUO_EST is the I - 1st digit. */
756
757 carry = 0;
758 for (j = 0; j <= den_hi_sig; j++)
759 {
760 work = quo_est * den[j] + carry;
761 carry = HIGHPART (work);
762 work = num[i + j] - LOWPART (work);
763 num[i + j] = LOWPART (work);
764 carry += HIGHPART (work) != 0;
765 }
766
767 /* If quo_est was high by one, then num[i] went negative and
768 we need to correct things. */
769 if (num[num_hi_sig] < (HOST_WIDE_INT) carry)
770 {
771 quo_est--;
772 carry = 0; /* add divisor back in */
773 for (j = 0; j <= den_hi_sig; j++)
774 {
775 work = num[i + j] + den[j] + carry;
776 carry = HIGHPART (work);
777 num[i + j] = LOWPART (work);
778 }
779
780 num [num_hi_sig] += carry;
781 }
782
783 /* Store the quotient digit. */
784 quo[i] = quo_est;
785 }
786 }
787
788 decode (quo, lquo, hquo);
789
790 finish_up:
791 /* If result is negative, make it so. */
792 if (quo_neg)
793 neg_double (*lquo, *hquo, lquo, hquo);
794
795 /* Compute trial remainder: rem = num - (quo * den) */
796 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
797 neg_double (*lrem, *hrem, lrem, hrem);
798 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
799
800 switch (code)
801 {
802 case TRUNC_DIV_EXPR:
803 case TRUNC_MOD_EXPR: /* round toward zero */
804 case EXACT_DIV_EXPR: /* for this one, it shouldn't matter */
805 return overflow;
806
807 case FLOOR_DIV_EXPR:
808 case FLOOR_MOD_EXPR: /* round toward negative infinity */
809 if (quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio < 0 && rem != 0 */
810 {
811 /* quo = quo - 1; */
812 add_double (*lquo, *hquo, (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1,
813 lquo, hquo);
814 }
815 else
816 return overflow;
817 break;
818
819 case CEIL_DIV_EXPR:
820 case CEIL_MOD_EXPR: /* round toward positive infinity */
821 if (!quo_neg && (*lrem != 0 || *hrem != 0)) /* ratio > 0 && rem != 0 */
822 {
823 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
824 lquo, hquo);
825 }
826 else
827 return overflow;
828 break;
829
830 case ROUND_DIV_EXPR:
831 case ROUND_MOD_EXPR: /* round to closest integer */
832 {
833 unsigned HOST_WIDE_INT labs_rem = *lrem;
834 HOST_WIDE_INT habs_rem = *hrem;
835 unsigned HOST_WIDE_INT labs_den = lden, ltwice;
836 HOST_WIDE_INT habs_den = hden, htwice;
837
838 /* Get absolute values. */
839 if (*hrem < 0)
840 neg_double (*lrem, *hrem, &labs_rem, &habs_rem);
841 if (hden < 0)
842 neg_double (lden, hden, &labs_den, &habs_den);
843
844 /* If (2 * abs (lrem) >= abs (lden)), adjust the quotient. */
845 mul_double ((HOST_WIDE_INT) 2, (HOST_WIDE_INT) 0,
846 labs_rem, habs_rem, &ltwice, &htwice);
847
848 if (((unsigned HOST_WIDE_INT) habs_den
849 < (unsigned HOST_WIDE_INT) htwice)
850 || (((unsigned HOST_WIDE_INT) habs_den
851 == (unsigned HOST_WIDE_INT) htwice)
852 && (labs_den <= ltwice)))
853 {
854 if (*hquo < 0)
855 /* quo = quo - 1; */
856 add_double (*lquo, *hquo,
857 (HOST_WIDE_INT) -1, (HOST_WIDE_INT) -1, lquo, hquo);
858 else
859 /* quo = quo + 1; */
860 add_double (*lquo, *hquo, (HOST_WIDE_INT) 1, (HOST_WIDE_INT) 0,
861 lquo, hquo);
862 }
863 else
864 return overflow;
865 }
866 break;
867
868 default:
869 gcc_unreachable ();
870 }
871
872 /* Compute true remainder: rem = num - (quo * den) */
873 mul_double (*lquo, *hquo, lden_orig, hden_orig, lrem, hrem);
874 neg_double (*lrem, *hrem, lrem, hrem);
875 add_double (lnum_orig, hnum_orig, *lrem, *hrem, lrem, hrem);
876 return overflow;
877}
878
879/* If ARG2 divides ARG1 with zero remainder, carries out the division
880 of type CODE and returns the quotient.
881 Otherwise returns NULL_TREE. */
882
883static tree
884div_if_zero_remainder (enum tree_code code, const_tree arg1, const_tree arg2)
885{
886 unsigned HOST_WIDE_INT int1l, int2l;
887 HOST_WIDE_INT int1h, int2h;
888 unsigned HOST_WIDE_INT quol, reml;
889 HOST_WIDE_INT quoh, remh;
890 tree type = TREE_TYPE (arg1);
891 int uns = TYPE_UNSIGNED (type);
892
893 int1l = TREE_INT_CST_LOW (arg1);
894 int1h = TREE_INT_CST_HIGH (arg1);
895 /* &obj[0] + -128 really should be compiled as &obj[-8] rather than
896 &obj[some_exotic_number]. */
897 if (POINTER_TYPE_P (type))
898 {
899 uns = false;
900 type = signed_type_for (type);
901 fit_double_type (int1l, int1h, &int1l, &int1h,
902 type);
903 }
904 else
905 fit_double_type (int1l, int1h, &int1l, &int1h, type);
906 int2l = TREE_INT_CST_LOW (arg2);
907 int2h = TREE_INT_CST_HIGH (arg2);
908
909 div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
910 &quol, &quoh, &reml, &remh);
911 if (remh != 0 || reml != 0)
912 return NULL_TREE;
913
914 return build_int_cst_wide (type, quol, quoh);
915}
916\f
917/* This is nonzero if we should defer warnings about undefined
918 overflow. This facility exists because these warnings are a
919 special case. The code to estimate loop iterations does not want
920 to issue any warnings, since it works with expressions which do not
921 occur in user code. Various bits of cleanup code call fold(), but
922 only use the result if it has certain characteristics (e.g., is a
923 constant); that code only wants to issue a warning if the result is
924 used. */
925
926static int fold_deferring_overflow_warnings;
927
928/* If a warning about undefined overflow is deferred, this is the
929 warning. Note that this may cause us to turn two warnings into
930 one, but that is fine since it is sufficient to only give one
931 warning per expression. */
932
933static const char* fold_deferred_overflow_warning;
934
935/* If a warning about undefined overflow is deferred, this is the
936 level at which the warning should be emitted. */
937
938static enum warn_strict_overflow_code fold_deferred_overflow_code;
939
940/* Start deferring overflow warnings. We could use a stack here to
941 permit nested calls, but at present it is not necessary. */
942
943void
944fold_defer_overflow_warnings (void)
945{
946 ++fold_deferring_overflow_warnings;
947}
948
949/* Stop deferring overflow warnings. If there is a pending warning,
950 and ISSUE is true, then issue the warning if appropriate. STMT is
951 the statement with which the warning should be associated (used for
952 location information); STMT may be NULL. CODE is the level of the
953 warning--a warn_strict_overflow_code value. This function will use
954 the smaller of CODE and the deferred code when deciding whether to
955 issue the warning. CODE may be zero to mean to always use the
956 deferred code. */
957
958void
959fold_undefer_overflow_warnings (bool issue, const_gimple stmt, int code)
960{
961 const char *warnmsg;
962 location_t locus;
963
964 gcc_assert (fold_deferring_overflow_warnings > 0);
965 --fold_deferring_overflow_warnings;
966 if (fold_deferring_overflow_warnings > 0)
967 {
968 if (fold_deferred_overflow_warning != NULL
969 && code != 0
970 && code < (int) fold_deferred_overflow_code)
971 fold_deferred_overflow_code = code;
972 return;
973 }
974
975 warnmsg = fold_deferred_overflow_warning;
976 fold_deferred_overflow_warning = NULL;
977
978 if (!issue || warnmsg == NULL)
979 return;
980
981 if (gimple_no_warning_p (stmt))
982 return;
983
984 /* Use the smallest code level when deciding to issue the
985 warning. */
986 if (code == 0 || code > (int) fold_deferred_overflow_code)
987 code = fold_deferred_overflow_code;
988
989 if (!issue_strict_overflow_warning (code))
990 return;
991
992 if (stmt == NULL)
993 locus = input_location;
994 else
995 locus = gimple_location (stmt);
996 warning (OPT_Wstrict_overflow, "%H%s", &locus, warnmsg);
997}
998
999/* Stop deferring overflow warnings, ignoring any deferred
1000 warnings. */
1001
1002void
1003fold_undefer_and_ignore_overflow_warnings (void)
1004{
1005 fold_undefer_overflow_warnings (false, NULL, 0);
1006}
1007
1008/* Whether we are deferring overflow warnings. */
1009
1010bool
1011fold_deferring_overflow_warnings_p (void)
1012{
1013 return fold_deferring_overflow_warnings > 0;
1014}
1015
1016/* This is called when we fold something based on the fact that signed
1017 overflow is undefined. */
1018
1019static void
1020fold_overflow_warning (const char* gmsgid, enum warn_strict_overflow_code wc)
1021{
1022 if (fold_deferring_overflow_warnings > 0)
1023 {
1024 if (fold_deferred_overflow_warning == NULL
1025 || wc < fold_deferred_overflow_code)
1026 {
1027 fold_deferred_overflow_warning = gmsgid;
1028 fold_deferred_overflow_code = wc;
1029 }
1030 }
1031 else if (issue_strict_overflow_warning (wc))
1032 warning (OPT_Wstrict_overflow, gmsgid);
1033}
1034\f
1035/* Return true if the built-in mathematical function specified by CODE
1036 is odd, i.e. -f(x) == f(-x). */
1037
1038static bool
1039negate_mathfn_p (enum built_in_function code)
1040{
1041 switch (code)
1042 {
1043 CASE_FLT_FN (BUILT_IN_ASIN):
1044 CASE_FLT_FN (BUILT_IN_ASINH):
1045 CASE_FLT_FN (BUILT_IN_ATAN):
1046 CASE_FLT_FN (BUILT_IN_ATANH):
1047 CASE_FLT_FN (BUILT_IN_CASIN):
1048 CASE_FLT_FN (BUILT_IN_CASINH):
1049 CASE_FLT_FN (BUILT_IN_CATAN):
1050 CASE_FLT_FN (BUILT_IN_CATANH):
1051 CASE_FLT_FN (BUILT_IN_CBRT):
1052 CASE_FLT_FN (BUILT_IN_CPROJ):
1053 CASE_FLT_FN (BUILT_IN_CSIN):
1054 CASE_FLT_FN (BUILT_IN_CSINH):
1055 CASE_FLT_FN (BUILT_IN_CTAN):
1056 CASE_FLT_FN (BUILT_IN_CTANH):
1057 CASE_FLT_FN (BUILT_IN_ERF):
1058 CASE_FLT_FN (BUILT_IN_LLROUND):
1059 CASE_FLT_FN (BUILT_IN_LROUND):
1060 CASE_FLT_FN (BUILT_IN_ROUND):
1061 CASE_FLT_FN (BUILT_IN_SIN):
1062 CASE_FLT_FN (BUILT_IN_SINH):
1063 CASE_FLT_FN (BUILT_IN_TAN):
1064 CASE_FLT_FN (BUILT_IN_TANH):
1065 CASE_FLT_FN (BUILT_IN_TRUNC):
1066 return true;
1067
1068 CASE_FLT_FN (BUILT_IN_LLRINT):
1069 CASE_FLT_FN (BUILT_IN_LRINT):
1070 CASE_FLT_FN (BUILT_IN_NEARBYINT):
1071 CASE_FLT_FN (BUILT_IN_RINT):
1072 return !flag_rounding_math;
1073
1074 default:
1075 break;
1076 }
1077 return false;
1078}
1079
1080/* Check whether we may negate an integer constant T without causing
1081 overflow. */
1082
1083bool
1084may_negate_without_overflow_p (const_tree t)
1085{
1086 unsigned HOST_WIDE_INT val;
1087 unsigned int prec;
1088 tree type;
1089
1090 gcc_assert (TREE_CODE (t) == INTEGER_CST);
1091
1092 type = TREE_TYPE (t);
1093 if (TYPE_UNSIGNED (type))
1094 return false;
1095
1096 prec = TYPE_PRECISION (type);
1097 if (prec > HOST_BITS_PER_WIDE_INT)
1098 {
1099 if (TREE_INT_CST_LOW (t) != 0)
1100 return true;
1101 prec -= HOST_BITS_PER_WIDE_INT;
1102 val = TREE_INT_CST_HIGH (t);
1103 }
1104 else
1105 val = TREE_INT_CST_LOW (t);
1106 if (prec < HOST_BITS_PER_WIDE_INT)
1107 val &= ((unsigned HOST_WIDE_INT) 1 << prec) - 1;
1108 return val != ((unsigned HOST_WIDE_INT) 1 << (prec - 1));
1109}
1110
1111/* Determine whether an expression T can be cheaply negated using
1112 the function negate_expr without introducing undefined overflow. */
1113
1114static bool
1115negate_expr_p (tree t)
1116{
1117 tree type;
1118
1119 if (t == 0)
1120 return false;
1121
1122 type = TREE_TYPE (t);
1123
1124 STRIP_SIGN_NOPS (t);
1125 switch (TREE_CODE (t))
1126 {
1127 case INTEGER_CST:
1128 if (TYPE_OVERFLOW_WRAPS (type))
1129 return true;
1130
1131 /* Check that -CST will not overflow type. */
1132 return may_negate_without_overflow_p (t);
1133 case BIT_NOT_EXPR:
1134 return (INTEGRAL_TYPE_P (type)
1135 && TYPE_OVERFLOW_WRAPS (type));
1136
1137 case FIXED_CST:
1138 case REAL_CST:
1139 case NEGATE_EXPR:
1140 return true;
1141
1142 case COMPLEX_CST:
1143 return negate_expr_p (TREE_REALPART (t))
1144 && negate_expr_p (TREE_IMAGPART (t));
1145
1146 case COMPLEX_EXPR:
1147 return negate_expr_p (TREE_OPERAND (t, 0))
1148 && negate_expr_p (TREE_OPERAND (t, 1));
1149
1150 case CONJ_EXPR:
1151 return negate_expr_p (TREE_OPERAND (t, 0));
1152
1153 case PLUS_EXPR:
1154 if (HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1155 || HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
1156 return false;
1157 /* -(A + B) -> (-B) - A. */
1158 if (negate_expr_p (TREE_OPERAND (t, 1))
1159 && reorder_operands_p (TREE_OPERAND (t, 0),
1160 TREE_OPERAND (t, 1)))
1161 return true;
1162 /* -(A + B) -> (-A) - B. */
1163 return negate_expr_p (TREE_OPERAND (t, 0));
1164
1165 case MINUS_EXPR:
1166 /* We can't turn -(A-B) into B-A when we honor signed zeros. */
1167 return !HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1168 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type))
1169 && reorder_operands_p (TREE_OPERAND (t, 0),
1170 TREE_OPERAND (t, 1));
1171
1172 case MULT_EXPR:
1173 if (TYPE_UNSIGNED (TREE_TYPE (t)))
1174 break;
1175
1176 /* Fall through. */
1177
1178 case RDIV_EXPR:
1179 if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (TREE_TYPE (t))))
1180 return negate_expr_p (TREE_OPERAND (t, 1))
1181 || negate_expr_p (TREE_OPERAND (t, 0));
1182 break;
1183
1184 case TRUNC_DIV_EXPR:
1185 case ROUND_DIV_EXPR:
1186 case FLOOR_DIV_EXPR:
1187 case CEIL_DIV_EXPR:
1188 case EXACT_DIV_EXPR:
1189 /* In general we can't negate A / B, because if A is INT_MIN and
1190 B is 1, we may turn this into INT_MIN / -1 which is undefined
1191 and actually traps on some architectures. But if overflow is
1192 undefined, we can negate, because - (INT_MIN / 1) is an
1193 overflow. */
1194 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
1195 && !TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (t)))
1196 break;
1197 return negate_expr_p (TREE_OPERAND (t, 1))
1198 || negate_expr_p (TREE_OPERAND (t, 0));
1199
1200 case NOP_EXPR:
1201 /* Negate -((double)float) as (double)(-float). */
1202 if (TREE_CODE (type) == REAL_TYPE)
1203 {
1204 tree tem = strip_float_extensions (t);
1205 if (tem != t)
1206 return negate_expr_p (tem);
1207 }
1208 break;
1209
1210 case CALL_EXPR:
1211 /* Negate -f(x) as f(-x). */
1212 if (negate_mathfn_p (builtin_mathfn_code (t)))
1213 return negate_expr_p (CALL_EXPR_ARG (t, 0));
1214 break;
1215
1216 case RSHIFT_EXPR:
1217 /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
1218 if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
1219 {
1220 tree op1 = TREE_OPERAND (t, 1);
1221 if (TREE_INT_CST_HIGH (op1) == 0
1222 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
1223 == TREE_INT_CST_LOW (op1))
1224 return true;
1225 }
1226 break;
1227
1228 default:
1229 break;
1230 }
1231 return false;
1232}
1233
1234/* Given T, an expression, return a folded tree for -T or NULL_TREE, if no
1235 simplification is possible.
1236 If negate_expr_p would return true for T, NULL_TREE will never be
1237 returned. */
1238
1239static tree
1240fold_negate_expr (tree t)
1241{
1242 tree type = TREE_TYPE (t);
1243 tree tem;
1244
1245 switch (TREE_CODE (t))
1246 {
1247 /* Convert - (~A) to A + 1. */
1248 case BIT_NOT_EXPR:
1249 if (INTEGRAL_TYPE_P (type))
1250 return fold_build2 (PLUS_EXPR, type, TREE_OPERAND (t, 0),
1251 build_int_cst (type, 1));
1252 break;
1253
1254 case INTEGER_CST:
1255 tem = fold_negate_const (t, type);
1256 if (TREE_OVERFLOW (tem) == TREE_OVERFLOW (t)
1257 || !TYPE_OVERFLOW_TRAPS (type))
1258 return tem;
1259 break;
1260
1261 case REAL_CST:
1262 tem = fold_negate_const (t, type);
1263 /* Two's complement FP formats, such as c4x, may overflow. */
1264 if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
1265 return tem;
1266 break;
1267
1268 case FIXED_CST:
1269 tem = fold_negate_const (t, type);
1270 return tem;
1271
1272 case COMPLEX_CST:
1273 {
1274 tree rpart = negate_expr (TREE_REALPART (t));
1275 tree ipart = negate_expr (TREE_IMAGPART (t));
1276
1277 if ((TREE_CODE (rpart) == REAL_CST
1278 && TREE_CODE (ipart) == REAL_CST)
1279 || (TREE_CODE (rpart) == INTEGER_CST
1280 && TREE_CODE (ipart) == INTEGER_CST))
1281 return build_complex (type, rpart, ipart);
1282 }
1283 break;
1284
1285 case COMPLEX_EXPR:
1286 if (negate_expr_p (t))
1287 return fold_build2 (COMPLEX_EXPR, type,
1288 fold_negate_expr (TREE_OPERAND (t, 0)),
1289 fold_negate_expr (TREE_OPERAND (t, 1)));
1290 break;
1291
1292 case CONJ_EXPR:
1293 if (negate_expr_p (t))
1294 return fold_build1 (CONJ_EXPR, type,
1295 fold_negate_expr (TREE_OPERAND (t, 0)));
1296 break;
1297
1298 case NEGATE_EXPR:
1299 return TREE_OPERAND (t, 0);
1300
1301 case PLUS_EXPR:
1302 if (!HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1303 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type)))
1304 {
1305 /* -(A + B) -> (-B) - A. */
1306 if (negate_expr_p (TREE_OPERAND (t, 1))
1307 && reorder_operands_p (TREE_OPERAND (t, 0),
1308 TREE_OPERAND (t, 1)))
1309 {
1310 tem = negate_expr (TREE_OPERAND (t, 1));
1311 return fold_build2 (MINUS_EXPR, type,
1312 tem, TREE_OPERAND (t, 0));
1313 }
1314
1315 /* -(A + B) -> (-A) - B. */
1316 if (negate_expr_p (TREE_OPERAND (t, 0)))
1317 {
1318 tem = negate_expr (TREE_OPERAND (t, 0));
1319 return fold_build2 (MINUS_EXPR, type,
1320 tem, TREE_OPERAND (t, 1));
1321 }
1322 }
1323 break;
1324
1325 case MINUS_EXPR:
1326 /* - (A - B) -> B - A */
1327 if (!HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type))
1328 && !HONOR_SIGNED_ZEROS (TYPE_MODE (type))
1329 && reorder_operands_p (TREE_OPERAND (t, 0), TREE_OPERAND (t, 1)))
1330 return fold_build2 (MINUS_EXPR, type,
1331 TREE_OPERAND (t, 1), TREE_OPERAND (t, 0));
1332 break;
1333
1334 case MULT_EXPR:
1335 if (TYPE_UNSIGNED (type))
1336 break;
1337
1338 /* Fall through. */
1339
1340 case RDIV_EXPR:
1341 if (! HONOR_SIGN_DEPENDENT_ROUNDING (TYPE_MODE (type)))
1342 {
1343 tem = TREE_OPERAND (t, 1);
1344 if (negate_expr_p (tem))
1345 return fold_build2 (TREE_CODE (t), type,
1346 TREE_OPERAND (t, 0), negate_expr (tem));
1347 tem = TREE_OPERAND (t, 0);
1348 if (negate_expr_p (tem))
1349 return fold_build2 (TREE_CODE (t), type,
1350 negate_expr (tem), TREE_OPERAND (t, 1));
1351 }
1352 break;
1353
1354 case TRUNC_DIV_EXPR:
1355 case ROUND_DIV_EXPR:
1356 case FLOOR_DIV_EXPR:
1357 case CEIL_DIV_EXPR:
1358 case EXACT_DIV_EXPR:
1359 /* In general we can't negate A / B, because if A is INT_MIN and
1360 B is 1, we may turn this into INT_MIN / -1 which is undefined
1361 and actually traps on some architectures. But if overflow is
1362 undefined, we can negate, because - (INT_MIN / 1) is an
1363 overflow. */
1364 if (!INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_UNDEFINED (type))
1365 {
1366 const char * const warnmsg = G_("assuming signed overflow does not "
1367 "occur when negating a division");
1368 tem = TREE_OPERAND (t, 1);
1369 if (negate_expr_p (tem))
1370 {
1371 if (INTEGRAL_TYPE_P (type)
1372 && (TREE_CODE (tem) != INTEGER_CST
1373 || integer_onep (tem)))
1374 fold_overflow_warning (warnmsg, WARN_STRICT_OVERFLOW_MISC);
1375 return fold_build2 (TREE_CODE (t), type,
1376 TREE_OPERAND (t, 0), negate_expr (tem));
1377 }
1378 tem = TREE_OPERAND (t, 0);
1379 if (negate_expr_p (tem))
1380 {
1381 if (INTEGRAL_TYPE_P (type)
1382 && (TREE_CODE (tem) != INTEGER_CST
1383 || tree_int_cst_equal (tem, TYPE_MIN_VALUE (type))))
1384 fold_overflow_warning (warnmsg, WARN_STRICT_OVERFLOW_MISC);
1385 return fold_build2 (TREE_CODE (t), type,
1386 negate_expr (tem), TREE_OPERAND (t, 1));
1387 }
1388 }
1389 break;
1390
1391 case NOP_EXPR:
1392 /* Convert -((double)float) into (double)(-float). */
1393 if (TREE_CODE (type) == REAL_TYPE)
1394 {
1395 tem = strip_float_extensions (t);
1396 if (tem != t && negate_expr_p (tem))
1397 return fold_convert (type, negate_expr (tem));
1398 }
1399 break;
1400
1401 case CALL_EXPR:
1402 /* Negate -f(x) as f(-x). */
1403 if (negate_mathfn_p (builtin_mathfn_code (t))
1404 && negate_expr_p (CALL_EXPR_ARG (t, 0)))
1405 {
1406 tree fndecl, arg;
1407
1408 fndecl = get_callee_fndecl (t);
1409 arg = negate_expr (CALL_EXPR_ARG (t, 0));
1410 return build_call_expr (fndecl, 1, arg);
1411 }
1412 break;
1413
1414 case RSHIFT_EXPR:
1415 /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
1416 if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
1417 {
1418 tree op1 = TREE_OPERAND (t, 1);
1419 if (TREE_INT_CST_HIGH (op1) == 0
1420 && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
1421 == TREE_INT_CST_LOW (op1))
1422 {
1423 tree ntype = TYPE_UNSIGNED (type)
1424 ? signed_type_for (type)
1425 : unsigned_type_for (type);
1426 tree temp = fold_convert (ntype, TREE_OPERAND (t, 0));
1427 temp = fold_build2 (RSHIFT_EXPR, ntype, temp, op1);
1428 return fold_convert (type, temp);
1429 }
1430 }
1431 break;
1432
1433 default:
1434 break;
1435 }
1436
1437 return NULL_TREE;
1438}
1439
1440/* Like fold_negate_expr, but return a NEGATE_EXPR tree, if T can not be
1441 negated in a simpler way. Also allow for T to be NULL_TREE, in which case
1442 return NULL_TREE. */
1443
1444static tree
1445negate_expr (tree t)
1446{
1447 tree type, tem;
1448
1449 if (t == NULL_TREE)
1450 return NULL_TREE;
1451
1452 type = TREE_TYPE (t);
1453 STRIP_SIGN_NOPS (t);
1454
1455 tem = fold_negate_expr (t);
1456 if (!tem)
1457 tem = build1 (NEGATE_EXPR, TREE_TYPE (t), t);
1458 return fold_convert (type, tem);
1459}
1460\f
1461/* Split a tree IN into a constant, literal and variable parts that could be
1462 combined with CODE to make IN. "constant" means an expression with
1463 TREE_CONSTANT but that isn't an actual constant. CODE must be a
1464 commutative arithmetic operation. Store the constant part into *CONP,
1465 the literal in *LITP and return the variable part. If a part isn't
1466 present, set it to null. If the tree does not decompose in this way,
1467 return the entire tree as the variable part and the other parts as null.
1468
1469 If CODE is PLUS_EXPR we also split trees that use MINUS_EXPR. In that
1470 case, we negate an operand that was subtracted. Except if it is a
1471 literal for which we use *MINUS_LITP instead.
1472
1473 If NEGATE_P is true, we are negating all of IN, again except a literal
1474 for which we use *MINUS_LITP instead.
1475
1476 If IN is itself a literal or constant, return it as appropriate.
1477
1478 Note that we do not guarantee that any of the three values will be the
1479 same type as IN, but they will have the same signedness and mode. */
1480
1481static tree
1482split_tree (tree in, enum tree_code code, tree *conp, tree *litp,
1483 tree *minus_litp, int negate_p)
1484{
1485 tree var = 0;
1486
1487 *conp = 0;
1488 *litp = 0;
1489 *minus_litp = 0;
1490
1491 /* Strip any conversions that don't change the machine mode or signedness. */
1492 STRIP_SIGN_NOPS (in);
1493
1494 if (TREE_CODE (in) == INTEGER_CST || TREE_CODE (in) == REAL_CST
1495 || TREE_CODE (in) == FIXED_CST)
1496 *litp = in;
1497 else if (TREE_CODE (in) == code
1498 || ((! FLOAT_TYPE_P (TREE_TYPE (in)) || flag_associative_math)
1499 && ! SAT_FIXED_POINT_TYPE_P (TREE_TYPE (in))
1500 /* We can associate addition and subtraction together (even
1501 though the C standard doesn't say so) for integers because
1502 the value is not affected. For reals, the value might be
1503 affected, so we can't. */
1504 && ((code == PLUS_EXPR && TREE_CODE (in) == MINUS_EXPR)
1505 || (code == MINUS_EXPR && TREE_CODE (in) == PLUS_EXPR))))
1506 {
1507 tree op0 = TREE_OPERAND (in, 0);
1508 tree op1 = TREE_OPERAND (in, 1);
1509 int neg1_p = TREE_CODE (in) == MINUS_EXPR;
1510 int neg_litp_p = 0, neg_conp_p = 0, neg_var_p = 0;
1511
1512 /* First see if either of the operands is a literal, then a constant. */
1513 if (TREE_CODE (op0) == INTEGER_CST || TREE_CODE (op0) == REAL_CST
1514 || TREE_CODE (op0) == FIXED_CST)
1515 *litp = op0, op0 = 0;
1516 else if (TREE_CODE (op1) == INTEGER_CST || TREE_CODE (op1) == REAL_CST
1517 || TREE_CODE (op1) == FIXED_CST)
1518 *litp = op1, neg_litp_p = neg1_p, op1 = 0;
1519
1520 if (op0 != 0 && TREE_CONSTANT (op0))
1521 *conp = op0, op0 = 0;
1522 else if (op1 != 0 && TREE_CONSTANT (op1))
1523 *conp = op1, neg_conp_p = neg1_p, op1 = 0;
1524
1525 /* If we haven't dealt with either operand, this is not a case we can
1526 decompose. Otherwise, VAR is either of the ones remaining, if any. */
1527 if (op0 != 0 && op1 != 0)
1528 var = in;
1529 else if (op0 != 0)
1530 var = op0;
1531 else
1532 var = op1, neg_var_p = neg1_p;
1533
1534 /* Now do any needed negations. */
1535 if (neg_litp_p)
1536 *minus_litp = *litp, *litp = 0;
1537 if (neg_conp_p)
1538 *conp = negate_expr (*conp);
1539 if (neg_var_p)
1540 var = negate_expr (var);
1541 }
1542 else if (TREE_CONSTANT (in))
1543 *conp = in;
1544 else
1545 var = in;
1546
1547 if (negate_p)
1548 {
1549 if (*litp)
1550 *minus_litp = *litp, *litp = 0;
1551 else if (*minus_litp)
1552 *litp = *minus_litp, *minus_litp = 0;
1553 *conp = negate_expr (*conp);
1554 var = negate_expr (var);
1555 }
1556
1557 return var;
1558}
1559
1560/* Re-associate trees split by the above function. T1 and T2 are either
1561 expressions to associate or null. Return the new expression, if any. If
1562 we build an operation, do it in TYPE and with CODE. */
1563
1564static tree
1565associate_trees (tree t1, tree t2, enum tree_code code, tree type)
1566{
1567 if (t1 == 0)
1568 return t2;
1569 else if (t2 == 0)
1570 return t1;
1571
1572 /* If either input is CODE, a PLUS_EXPR, or a MINUS_EXPR, don't
1573 try to fold this since we will have infinite recursion. But do
1574 deal with any NEGATE_EXPRs. */
1575 if (TREE_CODE (t1) == code || TREE_CODE (t2) == code
1576 || TREE_CODE (t1) == MINUS_EXPR || TREE_CODE (t2) == MINUS_EXPR)
1577 {
1578 if (code == PLUS_EXPR)
1579 {
1580 if (TREE_CODE (t1) == NEGATE_EXPR)
1581 return build2 (MINUS_EXPR, type, fold_convert (type, t2),
1582 fold_convert (type, TREE_OPERAND (t1, 0)));
1583 else if (TREE_CODE (t2) == NEGATE_EXPR)
1584 return build2 (MINUS_EXPR, type, fold_convert (type, t1),
1585 fold_convert (type, TREE_OPERAND (t2, 0)));
1586 else if (integer_zerop (t2))
1587 return fold_convert (type, t1);
1588 }
1589 else if (code == MINUS_EXPR)
1590 {
1591 if (integer_zerop (t2))
1592 return fold_convert (type, t1);
1593 }
1594
1595 return build2 (code, type, fold_convert (type, t1),
1596 fold_convert (type, t2));
1597 }
1598
1599 return fold_build2 (code, type, fold_convert (type, t1),
1600 fold_convert (type, t2));
1601}
1602\f
1603/* Check whether TYPE1 and TYPE2 are equivalent integer types, suitable
1604 for use in int_const_binop, size_binop and size_diffop. */
1605
1606static bool
1607int_binop_types_match_p (enum tree_code code, const_tree type1, const_tree type2)
1608{
1609 if (TREE_CODE (type1) != INTEGER_TYPE && !POINTER_TYPE_P (type1))
1610 return false;
1611 if (TREE_CODE (type2) != INTEGER_TYPE && !POINTER_TYPE_P (type2))
1612 return false;
1613
1614 switch (code)
1615 {
1616 case LSHIFT_EXPR:
1617 case RSHIFT_EXPR:
1618 case LROTATE_EXPR:
1619 case RROTATE_EXPR:
1620 return true;
1621
1622 default:
1623 break;
1624 }
1625
1626 return TYPE_UNSIGNED (type1) == TYPE_UNSIGNED (type2)
1627 && TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
1628 && TYPE_MODE (type1) == TYPE_MODE (type2);
1629}
1630
1631
1632/* Combine two integer constants ARG1 and ARG2 under operation CODE
1633 to produce a new constant. Return NULL_TREE if we don't know how
1634 to evaluate CODE at compile-time.
1635
1636 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1637
1638tree
1639int_const_binop (enum tree_code code, const_tree arg1, const_tree arg2, int notrunc)
1640{
1641 unsigned HOST_WIDE_INT int1l, int2l;
1642 HOST_WIDE_INT int1h, int2h;
1643 unsigned HOST_WIDE_INT low;
1644 HOST_WIDE_INT hi;
1645 unsigned HOST_WIDE_INT garbagel;
1646 HOST_WIDE_INT garbageh;
1647 tree t;
1648 tree type = TREE_TYPE (arg1);
1649 int uns = TYPE_UNSIGNED (type);
1650 int is_sizetype
1651 = (TREE_CODE (type) == INTEGER_TYPE && TYPE_IS_SIZETYPE (type));
1652 int overflow = 0;
1653
1654 int1l = TREE_INT_CST_LOW (arg1);
1655 int1h = TREE_INT_CST_HIGH (arg1);
1656 int2l = TREE_INT_CST_LOW (arg2);
1657 int2h = TREE_INT_CST_HIGH (arg2);
1658
1659 switch (code)
1660 {
1661 case BIT_IOR_EXPR:
1662 low = int1l | int2l, hi = int1h | int2h;
1663 break;
1664
1665 case BIT_XOR_EXPR:
1666 low = int1l ^ int2l, hi = int1h ^ int2h;
1667 break;
1668
1669 case BIT_AND_EXPR:
1670 low = int1l & int2l, hi = int1h & int2h;
1671 break;
1672
1673 case RSHIFT_EXPR:
1674 int2l = -int2l;
1675 case LSHIFT_EXPR:
1676 /* It's unclear from the C standard whether shifts can overflow.
1677 The following code ignores overflow; perhaps a C standard
1678 interpretation ruling is needed. */
1679 lshift_double (int1l, int1h, int2l, TYPE_PRECISION (type),
1680 &low, &hi, !uns);
1681 break;
1682
1683 case RROTATE_EXPR:
1684 int2l = - int2l;
1685 case LROTATE_EXPR:
1686 lrotate_double (int1l, int1h, int2l, TYPE_PRECISION (type),
1687 &low, &hi);
1688 break;
1689
1690 case PLUS_EXPR:
1691 overflow = add_double (int1l, int1h, int2l, int2h, &low, &hi);
1692 break;
1693
1694 case MINUS_EXPR:
1695 neg_double (int2l, int2h, &low, &hi);
1696 add_double (int1l, int1h, low, hi, &low, &hi);
1697 overflow = OVERFLOW_SUM_SIGN (hi, int2h, int1h);
1698 break;
1699
1700 case MULT_EXPR:
1701 overflow = mul_double (int1l, int1h, int2l, int2h, &low, &hi);
1702 break;
1703
1704 case TRUNC_DIV_EXPR:
1705 case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
1706 case EXACT_DIV_EXPR:
1707 /* This is a shortcut for a common special case. */
1708 if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
1709 && !TREE_OVERFLOW (arg1)
1710 && !TREE_OVERFLOW (arg2)
1711 && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
1712 {
1713 if (code == CEIL_DIV_EXPR)
1714 int1l += int2l - 1;
1715
1716 low = int1l / int2l, hi = 0;
1717 break;
1718 }
1719
1720 /* ... fall through ... */
1721
1722 case ROUND_DIV_EXPR:
1723 if (int2h == 0 && int2l == 0)
1724 return NULL_TREE;
1725 if (int2h == 0 && int2l == 1)
1726 {
1727 low = int1l, hi = int1h;
1728 break;
1729 }
1730 if (int1l == int2l && int1h == int2h
1731 && ! (int1l == 0 && int1h == 0))
1732 {
1733 low = 1, hi = 0;
1734 break;
1735 }
1736 overflow = div_and_round_double (code, uns, int1l, int1h, int2l, int2h,
1737 &low, &hi, &garbagel, &garbageh);
1738 break;
1739
1740 case TRUNC_MOD_EXPR:
1741 case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
1742 /* This is a shortcut for a common special case. */
1743 if (int2h == 0 && (HOST_WIDE_INT) int2l > 0
1744 && !TREE_OVERFLOW (arg1)
1745 && !TREE_OVERFLOW (arg2)
1746 && int1h == 0 && (HOST_WIDE_INT) int1l >= 0)
1747 {
1748 if (code == CEIL_MOD_EXPR)
1749 int1l += int2l - 1;
1750 low = int1l % int2l, hi = 0;
1751 break;
1752 }
1753
1754 /* ... fall through ... */
1755
1756 case ROUND_MOD_EXPR:
1757 if (int2h == 0 && int2l == 0)
1758 return NULL_TREE;
1759 overflow = div_and_round_double (code, uns,
1760 int1l, int1h, int2l, int2h,
1761 &garbagel, &garbageh, &low, &hi);
1762 break;
1763
1764 case MIN_EXPR:
1765 case MAX_EXPR:
1766 if (uns)
1767 low = (((unsigned HOST_WIDE_INT) int1h
1768 < (unsigned HOST_WIDE_INT) int2h)
1769 || (((unsigned HOST_WIDE_INT) int1h
1770 == (unsigned HOST_WIDE_INT) int2h)
1771 && int1l < int2l));
1772 else
1773 low = (int1h < int2h
1774 || (int1h == int2h && int1l < int2l));
1775
1776 if (low == (code == MIN_EXPR))
1777 low = int1l, hi = int1h;
1778 else
1779 low = int2l, hi = int2h;
1780 break;
1781
1782 default:
1783 return NULL_TREE;
1784 }
1785
1786 if (notrunc)
1787 {
1788 t = build_int_cst_wide (TREE_TYPE (arg1), low, hi);
1789
1790 /* Propagate overflow flags ourselves. */
1791 if (((!uns || is_sizetype) && overflow)
1792 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2))
1793 {
1794 t = copy_node (t);
1795 TREE_OVERFLOW (t) = 1;
1796 }
1797 }
1798 else
1799 t = force_fit_type_double (TREE_TYPE (arg1), low, hi, 1,
1800 ((!uns || is_sizetype) && overflow)
1801 | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
1802
1803 return t;
1804}
1805
1806/* Combine two constants ARG1 and ARG2 under operation CODE to produce a new
1807 constant. We assume ARG1 and ARG2 have the same data type, or at least
1808 are the same kind of constant and the same machine mode. Return zero if
1809 combining the constants is not allowed in the current operating mode.
1810
1811 If NOTRUNC is nonzero, do not truncate the result to fit the data type. */
1812
1813static tree
1814const_binop (enum tree_code code, tree arg1, tree arg2, int notrunc)
1815{
1816 /* Sanity check for the recursive cases. */
1817 if (!arg1 || !arg2)
1818 return NULL_TREE;
1819
1820 STRIP_NOPS (arg1);
1821 STRIP_NOPS (arg2);
1822
1823 if (TREE_CODE (arg1) == INTEGER_CST)
1824 return int_const_binop (code, arg1, arg2, notrunc);
1825
1826 if (TREE_CODE (arg1) == REAL_CST)
1827 {
1828 enum machine_mode mode;
1829 REAL_VALUE_TYPE d1;
1830 REAL_VALUE_TYPE d2;
1831 REAL_VALUE_TYPE value;
1832 REAL_VALUE_TYPE result;
1833 bool inexact;
1834 tree t, type;
1835
1836 /* The following codes are handled by real_arithmetic. */
1837 switch (code)
1838 {
1839 case PLUS_EXPR:
1840 case MINUS_EXPR:
1841 case MULT_EXPR:
1842 case RDIV_EXPR:
1843 case MIN_EXPR:
1844 case MAX_EXPR:
1845 break;
1846
1847 default:
1848 return NULL_TREE;
1849 }
1850
1851 d1 = TREE_REAL_CST (arg1);
1852 d2 = TREE_REAL_CST (arg2);
1853
1854 type = TREE_TYPE (arg1);
1855 mode = TYPE_MODE (type);
1856
1857 /* Don't perform operation if we honor signaling NaNs and
1858 either operand is a NaN. */
1859 if (HONOR_SNANS (mode)
1860 && (REAL_VALUE_ISNAN (d1) || REAL_VALUE_ISNAN (d2)))
1861 return NULL_TREE;
1862
1863 /* Don't perform operation if it would raise a division
1864 by zero exception. */
1865 if (code == RDIV_EXPR
1866 && REAL_VALUES_EQUAL (d2, dconst0)
1867 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1868 return NULL_TREE;
1869
1870 /* If either operand is a NaN, just return it. Otherwise, set up
1871 for floating-point trap; we return an overflow. */
1872 if (REAL_VALUE_ISNAN (d1))
1873 return arg1;
1874 else if (REAL_VALUE_ISNAN (d2))
1875 return arg2;
1876
1877 inexact = real_arithmetic (&value, code, &d1, &d2);
1878 real_convert (&result, mode, &value);
1879
1880 /* Don't constant fold this floating point operation if
1881 the result has overflowed and flag_trapping_math. */
1882 if (flag_trapping_math
1883 && MODE_HAS_INFINITIES (mode)
1884 && REAL_VALUE_ISINF (result)
1885 && !REAL_VALUE_ISINF (d1)
1886 && !REAL_VALUE_ISINF (d2))
1887 return NULL_TREE;
1888
1889 /* Don't constant fold this floating point operation if the
1890 result may dependent upon the run-time rounding mode and
1891 flag_rounding_math is set, or if GCC's software emulation
1892 is unable to accurately represent the result. */
1893 if ((flag_rounding_math
1894 || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
1895 && (inexact || !real_identical (&result, &value)))
1896 return NULL_TREE;
1897
1898 t = build_real (type, result);
1899
1900 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2);
1901 return t;
1902 }
1903
1904 if (TREE_CODE (arg1) == FIXED_CST)
1905 {
1906 FIXED_VALUE_TYPE f1;
1907 FIXED_VALUE_TYPE f2;
1908 FIXED_VALUE_TYPE result;
1909 tree t, type;
1910 int sat_p;
1911 bool overflow_p;
1912
1913 /* The following codes are handled by fixed_arithmetic. */
1914 switch (code)
1915 {
1916 case PLUS_EXPR:
1917 case MINUS_EXPR:
1918 case MULT_EXPR:
1919 case TRUNC_DIV_EXPR:
1920 f2 = TREE_FIXED_CST (arg2);
1921 break;
1922
1923 case LSHIFT_EXPR:
1924 case RSHIFT_EXPR:
1925 f2.data.high = TREE_INT_CST_HIGH (arg2);
1926 f2.data.low = TREE_INT_CST_LOW (arg2);
1927 f2.mode = SImode;
1928 break;
1929
1930 default:
1931 return NULL_TREE;
1932 }
1933
1934 f1 = TREE_FIXED_CST (arg1);
1935 type = TREE_TYPE (arg1);
1936 sat_p = TYPE_SATURATING (type);
1937 overflow_p = fixed_arithmetic (&result, code, &f1, &f2, sat_p);
1938 t = build_fixed (type, result);
1939 /* Propagate overflow flags. */
1940 if (overflow_p | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2))
1941 {
1942 TREE_OVERFLOW (t) = 1;
1943 TREE_CONSTANT_OVERFLOW (t) = 1;
1944 }
1945 else if (TREE_CONSTANT_OVERFLOW (arg1) | TREE_CONSTANT_OVERFLOW (arg2))
1946 TREE_CONSTANT_OVERFLOW (t) = 1;
1947 return t;
1948 }
1949
1950 if (TREE_CODE (arg1) == COMPLEX_CST)
1951 {
1952 tree type = TREE_TYPE (arg1);
1953 tree r1 = TREE_REALPART (arg1);
1954 tree i1 = TREE_IMAGPART (arg1);
1955 tree r2 = TREE_REALPART (arg2);
1956 tree i2 = TREE_IMAGPART (arg2);
1957 tree real, imag;
1958
1959 switch (code)
1960 {
1961 case PLUS_EXPR:
1962 case MINUS_EXPR:
1963 real = const_binop (code, r1, r2, notrunc);
1964 imag = const_binop (code, i1, i2, notrunc);
1965 break;
1966
1967 case MULT_EXPR:
1968 real = const_binop (MINUS_EXPR,
1969 const_binop (MULT_EXPR, r1, r2, notrunc),
1970 const_binop (MULT_EXPR, i1, i2, notrunc),
1971 notrunc);
1972 imag = const_binop (PLUS_EXPR,
1973 const_binop (MULT_EXPR, r1, i2, notrunc),
1974 const_binop (MULT_EXPR, i1, r2, notrunc),
1975 notrunc);
1976 break;
1977
1978 case RDIV_EXPR:
1979 {
1980 tree magsquared
1981 = const_binop (PLUS_EXPR,
1982 const_binop (MULT_EXPR, r2, r2, notrunc),
1983 const_binop (MULT_EXPR, i2, i2, notrunc),
1984 notrunc);
1985 tree t1
1986 = const_binop (PLUS_EXPR,
1987 const_binop (MULT_EXPR, r1, r2, notrunc),
1988 const_binop (MULT_EXPR, i1, i2, notrunc),
1989 notrunc);
1990 tree t2
1991 = const_binop (MINUS_EXPR,
1992 const_binop (MULT_EXPR, i1, r2, notrunc),
1993 const_binop (MULT_EXPR, r1, i2, notrunc),
1994 notrunc);
1995
1996 if (INTEGRAL_TYPE_P (TREE_TYPE (r1)))
1997 code = TRUNC_DIV_EXPR;
1998
1999 real = const_binop (code, t1, magsquared, notrunc);
2000 imag = const_binop (code, t2, magsquared, notrunc);
2001 }
2002 break;
2003
2004 default:
2005 return NULL_TREE;
2006 }
2007
2008 if (real && imag)
2009 return build_complex (type, real, imag);
2010 }
2011
2012 return NULL_TREE;
2013}
2014
2015/* Create a size type INT_CST node with NUMBER sign extended. KIND
2016 indicates which particular sizetype to create. */
2017
2018tree
2019size_int_kind (HOST_WIDE_INT number, enum size_type_kind kind)
2020{
2021 return build_int_cst (sizetype_tab[(int) kind], number);
2022}
2023\f
2024/* Combine operands OP1 and OP2 with arithmetic operation CODE. CODE
2025 is a tree code. The type of the result is taken from the operands.
2026 Both must be equivalent integer types, ala int_binop_types_match_p.
2027 If the operands are constant, so is the result. */
2028
2029tree
2030size_binop (enum tree_code code, tree arg0, tree arg1)
2031{
2032 tree type = TREE_TYPE (arg0);
2033
2034 if (arg0 == error_mark_node || arg1 == error_mark_node)
2035 return error_mark_node;
2036
2037 gcc_assert (int_binop_types_match_p (code, TREE_TYPE (arg0),
2038 TREE_TYPE (arg1)));
2039
2040 /* Handle the special case of two integer constants faster. */
2041 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
2042 {
2043 /* And some specific cases even faster than that. */
2044 if (code == PLUS_EXPR)
2045 {
2046 if (integer_zerop (arg0) && !TREE_OVERFLOW (arg0))
2047 return arg1;
2048 if (integer_zerop (arg1) && !TREE_OVERFLOW (arg1))
2049 return arg0;
2050 }
2051 else if (code == MINUS_EXPR)
2052 {
2053 if (integer_zerop (arg1) && !TREE_OVERFLOW (arg1))
2054 return arg0;
2055 }
2056 else if (code == MULT_EXPR)
2057 {
2058 if (integer_onep (arg0) && !TREE_OVERFLOW (arg0))
2059 return arg1;
2060 }
2061
2062 /* Handle general case of two integer constants. */
2063 return int_const_binop (code, arg0, arg1, 0);
2064 }
2065
2066 return fold_build2 (code, type, arg0, arg1);
2067}
2068
2069/* Given two values, either both of sizetype or both of bitsizetype,
2070 compute the difference between the two values. Return the value
2071 in signed type corresponding to the type of the operands. */
2072
2073tree
2074size_diffop (tree arg0, tree arg1)
2075{
2076 tree type = TREE_TYPE (arg0);
2077 tree ctype;
2078
2079 gcc_assert (int_binop_types_match_p (MINUS_EXPR, TREE_TYPE (arg0),
2080 TREE_TYPE (arg1)));
2081
2082 /* If the type is already signed, just do the simple thing. */
2083 if (!TYPE_UNSIGNED (type))
2084 return size_binop (MINUS_EXPR, arg0, arg1);
2085
2086 if (type == sizetype)
2087 ctype = ssizetype;
2088 else if (type == bitsizetype)
2089 ctype = sbitsizetype;
2090 else
2091 ctype = signed_type_for (type);
2092
2093 /* If either operand is not a constant, do the conversions to the signed
2094 type and subtract. The hardware will do the right thing with any
2095 overflow in the subtraction. */
2096 if (TREE_CODE (arg0) != INTEGER_CST || TREE_CODE (arg1) != INTEGER_CST)
2097 return size_binop (MINUS_EXPR, fold_convert (ctype, arg0),
2098 fold_convert (ctype, arg1));
2099
2100 /* If ARG0 is larger than ARG1, subtract and return the result in CTYPE.
2101 Otherwise, subtract the other way, convert to CTYPE (we know that can't
2102 overflow) and negate (which can't either). Special-case a result
2103 of zero while we're here. */
2104 if (tree_int_cst_equal (arg0, arg1))
2105 return build_int_cst (ctype, 0);
2106 else if (tree_int_cst_lt (arg1, arg0))
2107 return fold_convert (ctype, size_binop (MINUS_EXPR, arg0, arg1));
2108 else
2109 return size_binop (MINUS_EXPR, build_int_cst (ctype, 0),
2110 fold_convert (ctype, size_binop (MINUS_EXPR,
2111 arg1, arg0)));
2112}
2113\f
2114/* A subroutine of fold_convert_const handling conversions of an
2115 INTEGER_CST to another integer type. */
2116
2117static tree
2118fold_convert_const_int_from_int (tree type, const_tree arg1)
2119{
2120 tree t;
2121
2122 /* Given an integer constant, make new constant with new type,
2123 appropriately sign-extended or truncated. */
2124 t = force_fit_type_double (type, TREE_INT_CST_LOW (arg1),
2125 TREE_INT_CST_HIGH (arg1),
2126 /* Don't set the overflow when
2127 converting from a pointer, */
2128 !POINTER_TYPE_P (TREE_TYPE (arg1))
2129 /* or to a sizetype with same signedness
2130 and the precision is unchanged.
2131 ??? sizetype is always sign-extended,
2132 but its signedness depends on the
2133 frontend. Thus we see spurious overflows
2134 here if we do not check this. */
2135 && !((TYPE_PRECISION (TREE_TYPE (arg1))
2136 == TYPE_PRECISION (type))
2137 && (TYPE_UNSIGNED (TREE_TYPE (arg1))
2138 == TYPE_UNSIGNED (type))
2139 && ((TREE_CODE (TREE_TYPE (arg1)) == INTEGER_TYPE
2140 && TYPE_IS_SIZETYPE (TREE_TYPE (arg1)))
2141 || (TREE_CODE (type) == INTEGER_TYPE
2142 && TYPE_IS_SIZETYPE (type)))),
2143 (TREE_INT_CST_HIGH (arg1) < 0
2144 && (TYPE_UNSIGNED (type)
2145 < TYPE_UNSIGNED (TREE_TYPE (arg1))))
2146 | TREE_OVERFLOW (arg1));
2147
2148 return t;
2149}
2150
2151/* A subroutine of fold_convert_const handling conversions a REAL_CST
2152 to an integer type. */
2153
2154static tree
2155fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg1)
2156{
2157 int overflow = 0;
2158 tree t;
2159
2160 /* The following code implements the floating point to integer
2161 conversion rules required by the Java Language Specification,
2162 that IEEE NaNs are mapped to zero and values that overflow
2163 the target precision saturate, i.e. values greater than
2164 INT_MAX are mapped to INT_MAX, and values less than INT_MIN
2165 are mapped to INT_MIN. These semantics are allowed by the
2166 C and C++ standards that simply state that the behavior of
2167 FP-to-integer conversion is unspecified upon overflow. */
2168
2169 HOST_WIDE_INT high, low;
2170 REAL_VALUE_TYPE r;
2171 REAL_VALUE_TYPE x = TREE_REAL_CST (arg1);
2172
2173 switch (code)
2174 {
2175 case FIX_TRUNC_EXPR:
2176 real_trunc (&r, VOIDmode, &x);
2177 break;
2178
2179 default:
2180 gcc_unreachable ();
2181 }
2182
2183 /* If R is NaN, return zero and show we have an overflow. */
2184 if (REAL_VALUE_ISNAN (r))
2185 {
2186 overflow = 1;
2187 high = 0;
2188 low = 0;
2189 }
2190
2191 /* See if R is less than the lower bound or greater than the
2192 upper bound. */
2193
2194 if (! overflow)
2195 {
2196 tree lt = TYPE_MIN_VALUE (type);
2197 REAL_VALUE_TYPE l = real_value_from_int_cst (NULL_TREE, lt);
2198 if (REAL_VALUES_LESS (r, l))
2199 {
2200 overflow = 1;
2201 high = TREE_INT_CST_HIGH (lt);
2202 low = TREE_INT_CST_LOW (lt);
2203 }
2204 }
2205
2206 if (! overflow)
2207 {
2208 tree ut = TYPE_MAX_VALUE (type);
2209 if (ut)
2210 {
2211 REAL_VALUE_TYPE u = real_value_from_int_cst (NULL_TREE, ut);
2212 if (REAL_VALUES_LESS (u, r))
2213 {
2214 overflow = 1;
2215 high = TREE_INT_CST_HIGH (ut);
2216 low = TREE_INT_CST_LOW (ut);
2217 }
2218 }
2219 }
2220
2221 if (! overflow)
2222 REAL_VALUE_TO_INT (&low, &high, r);
2223
2224 t = force_fit_type_double (type, low, high, -1,
2225 overflow | TREE_OVERFLOW (arg1));
2226 return t;
2227}
2228
2229/* A subroutine of fold_convert_const handling conversions of a
2230 FIXED_CST to an integer type. */
2231
2232static tree
2233fold_convert_const_int_from_fixed (tree type, const_tree arg1)
2234{
2235 tree t;
2236 double_int temp, temp_trunc;
2237 unsigned int mode;
2238
2239 /* Right shift FIXED_CST to temp by fbit. */
2240 temp = TREE_FIXED_CST (arg1).data;
2241 mode = TREE_FIXED_CST (arg1).mode;
2242 if (GET_MODE_FBIT (mode) < 2 * HOST_BITS_PER_WIDE_INT)
2243 {
2244 lshift_double (temp.low, temp.high,
2245 - GET_MODE_FBIT (mode), 2 * HOST_BITS_PER_WIDE_INT,
2246 &temp.low, &temp.high, SIGNED_FIXED_POINT_MODE_P (mode));
2247
2248 /* Left shift temp to temp_trunc by fbit. */
2249 lshift_double (temp.low, temp.high,
2250 GET_MODE_FBIT (mode), 2 * HOST_BITS_PER_WIDE_INT,
2251 &temp_trunc.low, &temp_trunc.high,
2252 SIGNED_FIXED_POINT_MODE_P (mode));
2253 }
2254 else
2255 {
2256 temp.low = 0;
2257 temp.high = 0;
2258 temp_trunc.low = 0;
2259 temp_trunc.high = 0;
2260 }
2261
2262 /* If FIXED_CST is negative, we need to round the value toward 0.
2263 By checking if the fractional bits are not zero to add 1 to temp. */
2264 if (SIGNED_FIXED_POINT_MODE_P (mode) && temp_trunc.high < 0
2265 && !double_int_equal_p (TREE_FIXED_CST (arg1).data, temp_trunc))
2266 {
2267 double_int one;
2268 one.low = 1;
2269 one.high = 0;
2270 temp = double_int_add (temp, one);
2271 }
2272
2273 /* Given a fixed-point constant, make new constant with new type,
2274 appropriately sign-extended or truncated. */
2275 t = force_fit_type_double (type, temp.low, temp.high, -1,
2276 (temp.high < 0
2277 && (TYPE_UNSIGNED (type)
2278 < TYPE_UNSIGNED (TREE_TYPE (arg1))))
2279 | TREE_OVERFLOW (arg1));
2280
2281 return t;
2282}
2283
2284/* A subroutine of fold_convert_const handling conversions a REAL_CST
2285 to another floating point type. */
2286
2287static tree
2288fold_convert_const_real_from_real (tree type, const_tree arg1)
2289{
2290 REAL_VALUE_TYPE value;
2291 tree t;
2292
2293 real_convert (&value, TYPE_MODE (type), &TREE_REAL_CST (arg1));
2294 t = build_real (type, value);
2295
2296 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
2297 return t;
2298}
2299
2300/* A subroutine of fold_convert_const handling conversions a FIXED_CST
2301 to a floating point type. */
2302
2303static tree
2304fold_convert_const_real_from_fixed (tree type, const_tree arg1)
2305{
2306 REAL_VALUE_TYPE value;
2307 tree t;
2308
2309 real_convert_from_fixed (&value, TYPE_MODE (type), &TREE_FIXED_CST (arg1));
2310 t = build_real (type, value);
2311
2312 TREE_OVERFLOW (t) = TREE_OVERFLOW (arg1);
2313 TREE_CONSTANT_OVERFLOW (t)
2314 = TREE_OVERFLOW (t) | TREE_CONSTANT_OVERFLOW (arg1);
2315 return t;
2316}
2317
2318/* A subroutine of fold_convert_const handling conversions a FIXED_CST
2319 to another fixed-point type. */
2320
2321static tree
2322fold_convert_const_fixed_from_fixed (tree type, const_tree arg1)
2323{
2324 FIXED_VALUE_TYPE value;
2325 tree t;
2326 bool overflow_p;
2327
2328 overflow_p = fixed_convert (&value, TYPE_MODE (type), &TREE_FIXED_CST (arg1),
2329 TYPE_SATURATING (type));
2330 t = build_fixed (type, value);
2331
2332 /* Propagate overflow flags. */
2333 if (overflow_p | TREE_OVERFLOW (arg1))
2334 {
2335 TREE_OVERFLOW (t) = 1;
2336 TREE_CONSTANT_OVERFLOW (t) = 1;
2337 }
2338 else if (TREE_CONSTANT_OVERFLOW (arg1))
2339 TREE_CONSTANT_OVERFLOW (t) = 1;
2340 return t;
2341}
2342
2343/* A subroutine of fold_convert_const handling conversions an INTEGER_CST
2344 to a fixed-point type. */
2345
2346static tree
2347fold_convert_const_fixed_from_int (tree type, const_tree arg1)
2348{
2349 FIXED_VALUE_TYPE value;
2350 tree t;
2351 bool overflow_p;
2352
2353 overflow_p = fixed_convert_from_int (&value, TYPE_MODE (type),
2354 TREE_INT_CST (arg1),
2355 TYPE_UNSIGNED (TREE_TYPE (arg1)),
2356 TYPE_SATURATING (type));
2357 t = build_fixed (type, value);
2358
2359 /* Propagate overflow flags. */
2360 if (overflow_p | TREE_OVERFLOW (arg1))
2361 {
2362 TREE_OVERFLOW (t) = 1;
2363 TREE_CONSTANT_OVERFLOW (t) = 1;
2364 }
2365 else if (TREE_CONSTANT_OVERFLOW (arg1))
2366 TREE_CONSTANT_OVERFLOW (t) = 1;
2367 return t;
2368}
2369
2370/* A subroutine of fold_convert_const handling conversions a REAL_CST
2371 to a fixed-point type. */
2372
2373static tree
2374fold_convert_const_fixed_from_real (tree type, const_tree arg1)
2375{
2376 FIXED_VALUE_TYPE value;
2377 tree t;
2378 bool overflow_p;
2379
2380 overflow_p = fixed_convert_from_real (&value, TYPE_MODE (type),
2381 &TREE_REAL_CST (arg1),
2382 TYPE_SATURATING (type));
2383 t = build_fixed (type, value);
2384
2385 /* Propagate overflow flags. */
2386 if (overflow_p | TREE_OVERFLOW (arg1))
2387 {
2388 TREE_OVERFLOW (t) = 1;
2389 TREE_CONSTANT_OVERFLOW (t) = 1;
2390 }
2391 else if (TREE_CONSTANT_OVERFLOW (arg1))
2392 TREE_CONSTANT_OVERFLOW (t) = 1;
2393 return t;
2394}
2395
2396/* Attempt to fold type conversion operation CODE of expression ARG1 to
2397 type TYPE. If no simplification can be done return NULL_TREE. */
2398
2399static tree
2400fold_convert_const (enum tree_code code, tree type, tree arg1)
2401{
2402 if (TREE_TYPE (arg1) == type)
2403 return arg1;
2404
2405 if (POINTER_TYPE_P (type) || INTEGRAL_TYPE_P (type)
2406 || TREE_CODE (type) == OFFSET_TYPE)
2407 {
2408 if (TREE_CODE (arg1) == INTEGER_CST)
2409 return fold_convert_const_int_from_int (type, arg1);
2410 else if (TREE_CODE (arg1) == REAL_CST)
2411 return fold_convert_const_int_from_real (code, type, arg1);
2412 else if (TREE_CODE (arg1) == FIXED_CST)
2413 return fold_convert_const_int_from_fixed (type, arg1);
2414 }
2415 else if (TREE_CODE (type) == REAL_TYPE)
2416 {
2417 if (TREE_CODE (arg1) == INTEGER_CST)
2418 return build_real_from_int_cst (type, arg1);
2419 else if (TREE_CODE (arg1) == REAL_CST)
2420 return fold_convert_const_real_from_real (type, arg1);
2421 else if (TREE_CODE (arg1) == FIXED_CST)
2422 return fold_convert_const_real_from_fixed (type, arg1);
2423 }
2424 else if (TREE_CODE (type) == FIXED_POINT_TYPE)
2425 {
2426 if (TREE_CODE (arg1) == FIXED_CST)
2427 return fold_convert_const_fixed_from_fixed (type, arg1);
2428 else if (TREE_CODE (arg1) == INTEGER_CST)
2429 return fold_convert_const_fixed_from_int (type, arg1);
2430 else if (TREE_CODE (arg1) == REAL_CST)
2431 return fold_convert_const_fixed_from_real (type, arg1);
2432 }
2433 return NULL_TREE;
2434}
2435
2436/* Construct a vector of zero elements of vector type TYPE. */
2437
2438static tree
2439build_zero_vector (tree type)
2440{
2441 tree elem, list;
2442 int i, units;
2443
2444 elem = fold_convert_const (NOP_EXPR, TREE_TYPE (type), integer_zero_node);
2445 units = TYPE_VECTOR_SUBPARTS (type);
2446
2447 list = NULL_TREE;
2448 for (i = 0; i < units; i++)
2449 list = tree_cons (NULL_TREE, elem, list);
2450 return build_vector (type, list);
2451}
2452
2453/* Returns true, if ARG is convertible to TYPE using a NOP_EXPR. */
2454
2455bool
2456fold_convertible_p (const_tree type, const_tree arg)
2457{
2458 tree orig = TREE_TYPE (arg);
2459
2460 if (type == orig)
2461 return true;
2462
2463 if (TREE_CODE (arg) == ERROR_MARK
2464 || TREE_CODE (type) == ERROR_MARK
2465 || TREE_CODE (orig) == ERROR_MARK)
2466 return false;
2467
2468 if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig))
2469 return true;
2470
2471 switch (TREE_CODE (type))
2472 {
2473 case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
2474 case POINTER_TYPE: case REFERENCE_TYPE:
2475 case OFFSET_TYPE:
2476 if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
2477 || TREE_CODE (orig) == OFFSET_TYPE)
2478 return true;
2479 return (TREE_CODE (orig) == VECTOR_TYPE
2480 && tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
2481
2482 case REAL_TYPE:
2483 case FIXED_POINT_TYPE:
2484 case COMPLEX_TYPE:
2485 case VECTOR_TYPE:
2486 case VOID_TYPE:
2487 return TREE_CODE (type) == TREE_CODE (orig);
2488
2489 default:
2490 return false;
2491 }
2492}
2493
2494/* Convert expression ARG to type TYPE. Used by the middle-end for
2495 simple conversions in preference to calling the front-end's convert. */
2496
2497tree
2498fold_convert (tree type, tree arg)
2499{
2500 tree orig = TREE_TYPE (arg);
2501 tree tem;
2502
2503 if (type == orig)
2504 return arg;
2505
2506 if (TREE_CODE (arg) == ERROR_MARK
2507 || TREE_CODE (type) == ERROR_MARK
2508 || TREE_CODE (orig) == ERROR_MARK)
2509 return error_mark_node;
2510
2511 if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (orig))
2512 return fold_build1 (NOP_EXPR, type, arg);
2513
2514 switch (TREE_CODE (type))
2515 {
2516 case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
2517 case POINTER_TYPE: case REFERENCE_TYPE:
2518 case OFFSET_TYPE:
2519 if (TREE_CODE (arg) == INTEGER_CST)
2520 {
2521 tem = fold_convert_const (NOP_EXPR, type, arg);
2522 if (tem != NULL_TREE)
2523 return tem;
2524 }
2525 if (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
2526 || TREE_CODE (orig) == OFFSET_TYPE)
2527 return fold_build1 (NOP_EXPR, type, arg);
2528 if (TREE_CODE (orig) == COMPLEX_TYPE)
2529 {
2530 tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2531 return fold_convert (type, tem);
2532 }
2533 gcc_assert (TREE_CODE (orig) == VECTOR_TYPE
2534 && tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
2535 return fold_build1 (NOP_EXPR, type, arg);
2536
2537 case REAL_TYPE:
2538 if (TREE_CODE (arg) == INTEGER_CST)
2539 {
2540 tem = fold_convert_const (FLOAT_EXPR, type, arg);
2541 if (tem != NULL_TREE)
2542 return tem;
2543 }
2544 else if (TREE_CODE (arg) == REAL_CST)
2545 {
2546 tem = fold_convert_const (NOP_EXPR, type, arg);
2547 if (tem != NULL_TREE)
2548 return tem;
2549 }
2550 else if (TREE_CODE (arg) == FIXED_CST)
2551 {
2552 tem = fold_convert_const (FIXED_CONVERT_EXPR, type, arg);
2553 if (tem != NULL_TREE)
2554 return tem;
2555 }
2556
2557 switch (TREE_CODE (orig))
2558 {
2559 case INTEGER_TYPE:
2560 case BOOLEAN_TYPE: case ENUMERAL_TYPE:
2561 case POINTER_TYPE: case REFERENCE_TYPE:
2562 return fold_build1 (FLOAT_EXPR, type, arg);
2563
2564 case REAL_TYPE:
2565 return fold_build1 (NOP_EXPR, type, arg);
2566
2567 case FIXED_POINT_TYPE:
2568 return fold_build1 (FIXED_CONVERT_EXPR, type, arg);
2569
2570 case COMPLEX_TYPE:
2571 tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2572 return fold_convert (type, tem);
2573
2574 default:
2575 gcc_unreachable ();
2576 }
2577
2578 case FIXED_POINT_TYPE:
2579 if (TREE_CODE (arg) == FIXED_CST || TREE_CODE (arg) == INTEGER_CST
2580 || TREE_CODE (arg) == REAL_CST)
2581 {
2582 tem = fold_convert_const (FIXED_CONVERT_EXPR, type, arg);
2583 if (tem != NULL_TREE)
2584 return tem;
2585 }
2586
2587 switch (TREE_CODE (orig))
2588 {
2589 case FIXED_POINT_TYPE:
2590 case INTEGER_TYPE:
2591 case ENUMERAL_TYPE:
2592 case BOOLEAN_TYPE:
2593 case REAL_TYPE:
2594 return fold_build1 (FIXED_CONVERT_EXPR, type, arg);
2595
2596 case COMPLEX_TYPE:
2597 tem = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2598 return fold_convert (type, tem);
2599
2600 default:
2601 gcc_unreachable ();
2602 }
2603
2604 case COMPLEX_TYPE:
2605 switch (TREE_CODE (orig))
2606 {
2607 case INTEGER_TYPE:
2608 case BOOLEAN_TYPE: case ENUMERAL_TYPE:
2609 case POINTER_TYPE: case REFERENCE_TYPE:
2610 case REAL_TYPE:
2611 case FIXED_POINT_TYPE:
2612 return fold_build2 (COMPLEX_EXPR, type,
2613 fold_convert (TREE_TYPE (type), arg),
2614 fold_convert (TREE_TYPE (type),
2615 integer_zero_node));
2616 case COMPLEX_TYPE:
2617 {
2618 tree rpart, ipart;
2619
2620 if (TREE_CODE (arg) == COMPLEX_EXPR)
2621 {
2622 rpart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 0));
2623 ipart = fold_convert (TREE_TYPE (type), TREE_OPERAND (arg, 1));
2624 return fold_build2 (COMPLEX_EXPR, type, rpart, ipart);
2625 }
2626
2627 arg = save_expr (arg);
2628 rpart = fold_build1 (REALPART_EXPR, TREE_TYPE (orig), arg);
2629 ipart = fold_build1 (IMAGPART_EXPR, TREE_TYPE (orig), arg);
2630 rpart = fold_convert (TREE_TYPE (type), rpart);
2631 ipart = fold_convert (TREE_TYPE (type), ipart);
2632 return fold_build2 (COMPLEX_EXPR, type, rpart, ipart);
2633 }
2634
2635 default:
2636 gcc_unreachable ();
2637 }
2638
2639 case VECTOR_TYPE:
2640 if (integer_zerop (arg))
2641 return build_zero_vector (type);
2642 gcc_assert (tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (orig)));
2643 gcc_assert (INTEGRAL_TYPE_P (orig) || POINTER_TYPE_P (orig)
2644 || TREE_CODE (orig) == VECTOR_TYPE);
2645 return fold_build1 (VIEW_CONVERT_EXPR, type, arg);
2646
2647 case VOID_TYPE:
2648 tem = fold_ignored_result (arg);
2649 if (TREE_CODE (tem) == MODIFY_EXPR)
2650 return tem;
2651 return fold_build1 (NOP_EXPR, type, tem);
2652
2653 default:
2654 gcc_unreachable ();
2655 }
2656}
2657\f
2658/* Return false if expr can be assumed not to be an lvalue, true
2659 otherwise. */
2660
2661static bool
2662maybe_lvalue_p (const_tree x)
2663{
2664 /* We only need to wrap lvalue tree codes. */
2665 switch (TREE_CODE (x))
2666 {
2667 case VAR_DECL:
2668 case PARM_DECL:
2669 case RESULT_DECL:
2670 case LABEL_DECL:
2671 case FUNCTION_DECL:
2672 case SSA_NAME:
2673
2674 case COMPONENT_REF:
2675 case INDIRECT_REF:
2676 case ALIGN_INDIRECT_REF:
2677 case MISALIGNED_INDIRECT_REF:
2678 case ARRAY_REF:
2679 case ARRAY_RANGE_REF:
2680 case BIT_FIELD_REF:
2681 case OBJ_TYPE_REF:
2682
2683 case REALPART_EXPR:
2684 case IMAGPART_EXPR:
2685 case PREINCREMENT_EXPR:
2686 case PREDECREMENT_EXPR:
2687 case SAVE_EXPR:
2688 case TRY_CATCH_EXPR:
2689 case WITH_CLEANUP_EXPR:
2690 case COMPOUND_EXPR:
2691 case MODIFY_EXPR:
2692 case TARGET_EXPR:
2693 case COND_EXPR:
2694 case BIND_EXPR:
2695 case MIN_EXPR:
2696 case MAX_EXPR:
2697 break;
2698
2699 default:
2700 /* Assume the worst for front-end tree codes. */
2701 if ((int)TREE_CODE (x) >= NUM_TREE_CODES)
2702 break;
2703 return false;
2704 }
2705
2706 return true;
2707}
2708
2709/* Return an expr equal to X but certainly not valid as an lvalue. */
2710
2711tree
2712non_lvalue (tree x)
2713{
2714 /* While we are in GIMPLE, NON_LVALUE_EXPR doesn't mean anything to
2715 us. */
2716 if (in_gimple_form)
2717 return x;
2718
2719 if (! maybe_lvalue_p (x))
2720 return x;
2721 return build1 (NON_LVALUE_EXPR, TREE_TYPE (x), x);
2722}
2723
2724/* Nonzero means lvalues are limited to those valid in pedantic ANSI C.
2725 Zero means allow extended lvalues. */
2726
2727int pedantic_lvalues;
2728
2729/* When pedantic, return an expr equal to X but certainly not valid as a
2730 pedantic lvalue. Otherwise, return X. */
2731
2732static tree
2733pedantic_non_lvalue (tree x)
2734{
2735 if (pedantic_lvalues)
2736 return non_lvalue (x);
2737 else
2738 return x;
2739}
2740\f
2741/* Given a tree comparison code, return the code that is the logical inverse
2742 of the given code. It is not safe to do this for floating-point
2743 comparisons, except for NE_EXPR and EQ_EXPR, so we receive a machine mode
2744 as well: if reversing the comparison is unsafe, return ERROR_MARK. */
2745
2746enum tree_code
2747invert_tree_comparison (enum tree_code code, bool honor_nans)
2748{
2749 if (honor_nans && flag_trapping_math)
2750 return ERROR_MARK;
2751
2752 switch (code)
2753 {
2754 case EQ_EXPR:
2755 return NE_EXPR;
2756 case NE_EXPR:
2757 return EQ_EXPR;
2758 case GT_EXPR:
2759 return honor_nans ? UNLE_EXPR : LE_EXPR;
2760 case GE_EXPR:
2761 return honor_nans ? UNLT_EXPR : LT_EXPR;
2762 case LT_EXPR:
2763 return honor_nans ? UNGE_EXPR : GE_EXPR;
2764 case LE_EXPR:
2765 return honor_nans ? UNGT_EXPR : GT_EXPR;
2766 case LTGT_EXPR:
2767 return UNEQ_EXPR;
2768 case UNEQ_EXPR:
2769 return LTGT_EXPR;
2770 case UNGT_EXPR:
2771 return LE_EXPR;
2772 case UNGE_EXPR:
2773 return LT_EXPR;
2774 case UNLT_EXPR:
2775 return GE_EXPR;
2776 case UNLE_EXPR:
2777 return GT_EXPR;
2778 case ORDERED_EXPR:
2779 return UNORDERED_EXPR;
2780 case UNORDERED_EXPR:
2781 return ORDERED_EXPR;
2782 default:
2783 gcc_unreachable ();
2784 }
2785}
2786
2787/* Similar, but return the comparison that results if the operands are
2788 swapped. This is safe for floating-point. */
2789
2790enum tree_code
2791swap_tree_comparison (enum tree_code code)
2792{
2793 switch (code)
2794 {
2795 case EQ_EXPR:
2796 case NE_EXPR:
2797 case ORDERED_EXPR:
2798 case UNORDERED_EXPR:
2799 case LTGT_EXPR:
2800 case UNEQ_EXPR:
2801 return code;
2802 case GT_EXPR:
2803 return LT_EXPR;
2804 case GE_EXPR:
2805 return LE_EXPR;
2806 case LT_EXPR:
2807 return GT_EXPR;
2808 case LE_EXPR:
2809 return GE_EXPR;
2810 case UNGT_EXPR:
2811 return UNLT_EXPR;
2812 case UNGE_EXPR:
2813 return UNLE_EXPR;
2814 case UNLT_EXPR:
2815 return UNGT_EXPR;
2816 case UNLE_EXPR:
2817 return UNGE_EXPR;
2818 default:
2819 gcc_unreachable ();
2820 }
2821}
2822
2823
2824/* Convert a comparison tree code from an enum tree_code representation
2825 into a compcode bit-based encoding. This function is the inverse of
2826 compcode_to_comparison. */
2827
2828static enum comparison_code
2829comparison_to_compcode (enum tree_code code)
2830{
2831 switch (code)
2832 {
2833 case LT_EXPR:
2834 return COMPCODE_LT;
2835 case EQ_EXPR:
2836 return COMPCODE_EQ;
2837 case LE_EXPR:
2838 return COMPCODE_LE;
2839 case GT_EXPR:
2840 return COMPCODE_GT;
2841 case NE_EXPR:
2842 return COMPCODE_NE;
2843 case GE_EXPR:
2844 return COMPCODE_GE;
2845 case ORDERED_EXPR:
2846 return COMPCODE_ORD;
2847 case UNORDERED_EXPR:
2848 return COMPCODE_UNORD;
2849 case UNLT_EXPR:
2850 return COMPCODE_UNLT;
2851 case UNEQ_EXPR:
2852 return COMPCODE_UNEQ;
2853 case UNLE_EXPR:
2854 return COMPCODE_UNLE;
2855 case UNGT_EXPR:
2856 return COMPCODE_UNGT;
2857 case LTGT_EXPR:
2858 return COMPCODE_LTGT;
2859 case UNGE_EXPR:
2860 return COMPCODE_UNGE;
2861 default:
2862 gcc_unreachable ();
2863 }
2864}
2865
2866/* Convert a compcode bit-based encoding of a comparison operator back
2867 to GCC's enum tree_code representation. This function is the
2868 inverse of comparison_to_compcode. */
2869
2870static enum tree_code
2871compcode_to_comparison (enum comparison_code code)
2872{
2873 switch (code)
2874 {
2875 case COMPCODE_LT:
2876 return LT_EXPR;
2877 case COMPCODE_EQ:
2878 return EQ_EXPR;
2879 case COMPCODE_LE:
2880 return LE_EXPR;
2881 case COMPCODE_GT:
2882 return GT_EXPR;
2883 case COMPCODE_NE:
2884 return NE_EXPR;
2885 case COMPCODE_GE:
2886 return GE_EXPR;
2887 case COMPCODE_ORD:
2888 return ORDERED_EXPR;
2889 case COMPCODE_UNORD:
2890 return UNORDERED_EXPR;
2891 case COMPCODE_UNLT:
2892 return UNLT_EXPR;
2893 case COMPCODE_UNEQ:
2894 return UNEQ_EXPR;
2895 case COMPCODE_UNLE:
2896 return UNLE_EXPR;
2897 case COMPCODE_UNGT:
2898 return UNGT_EXPR;
2899 case COMPCODE_LTGT:
2900 return LTGT_EXPR;
2901 case COMPCODE_UNGE:
2902 return UNGE_EXPR;
2903 default:
2904 gcc_unreachable ();
2905 }
2906}
2907
2908/* Return a tree for the comparison which is the combination of
2909 doing the AND or OR (depending on CODE) of the two operations LCODE
2910 and RCODE on the identical operands LL_ARG and LR_ARG. Take into account
2911 the possibility of trapping if the mode has NaNs, and return NULL_TREE
2912 if this makes the transformation invalid. */
2913
2914tree
2915combine_comparisons (enum tree_code code, enum tree_code lcode,
2916 enum tree_code rcode, tree truth_type,
2917 tree ll_arg, tree lr_arg)
2918{
2919 bool honor_nans = HONOR_NANS (TYPE_MODE (TREE_TYPE (ll_arg)));
2920 enum comparison_code lcompcode = comparison_to_compcode (lcode);
2921 enum comparison_code rcompcode = comparison_to_compcode (rcode);
2922 enum comparison_code compcode;
2923
2924 switch (code)
2925 {
2926 case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR:
2927 compcode = lcompcode & rcompcode;
2928 break;
2929
2930 case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR:
2931 compcode = lcompcode | rcompcode;
2932 break;
2933
2934 default:
2935 return NULL_TREE;
2936 }
2937
2938 if (!honor_nans)
2939 {
2940 /* Eliminate unordered comparisons, as well as LTGT and ORD
2941 which are not used unless the mode has NaNs. */
2942 compcode &= ~COMPCODE_UNORD;
2943 if (compcode == COMPCODE_LTGT)
2944 compcode = COMPCODE_NE;
2945 else if (compcode == COMPCODE_ORD)
2946 compcode = COMPCODE_TRUE;
2947 }
2948 else if (flag_trapping_math)
2949 {
2950 /* Check that the original operation and the optimized ones will trap
2951 under the same condition. */
2952 bool ltrap = (lcompcode & COMPCODE_UNORD) == 0
2953 && (lcompcode != COMPCODE_EQ)
2954 && (lcompcode != COMPCODE_ORD);
2955 bool rtrap = (rcompcode & COMPCODE_UNORD) == 0
2956 && (rcompcode != COMPCODE_EQ)
2957 && (rcompcode != COMPCODE_ORD);
2958 bool trap = (compcode & COMPCODE_UNORD) == 0
2959 && (compcode != COMPCODE_EQ)
2960 && (compcode != COMPCODE_ORD);
2961
2962 /* In a short-circuited boolean expression the LHS might be
2963 such that the RHS, if evaluated, will never trap. For
2964 example, in ORD (x, y) && (x < y), we evaluate the RHS only
2965 if neither x nor y is NaN. (This is a mixed blessing: for
2966 example, the expression above will never trap, hence
2967 optimizing it to x < y would be invalid). */
2968 if ((code == TRUTH_ORIF_EXPR && (lcompcode & COMPCODE_UNORD))
2969 || (code == TRUTH_ANDIF_EXPR && !(lcompcode & COMPCODE_UNORD)))
2970 rtrap = false;
2971
2972 /* If the comparison was short-circuited, and only the RHS
2973 trapped, we may now generate a spurious trap. */
2974 if (rtrap && !ltrap
2975 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
2976 return NULL_TREE;
2977
2978 /* If we changed the conditions that cause a trap, we lose. */
2979 if ((ltrap || rtrap) != trap)
2980 return NULL_TREE;
2981 }
2982
2983 if (compcode == COMPCODE_TRUE)
2984 return constant_boolean_node (true, truth_type);
2985 else if (compcode == COMPCODE_FALSE)
2986 return constant_boolean_node (false, truth_type);
2987 else
2988 return fold_build2 (compcode_to_comparison (compcode),
2989 truth_type, ll_arg, lr_arg);
2990}
2991\f
2992/* Return nonzero if two operands (typically of the same tree node)
2993 are necessarily equal. If either argument has side-effects this
2994 function returns zero. FLAGS modifies behavior as follows:
2995
2996 If OEP_ONLY_CONST is set, only return nonzero for constants.
2997 This function tests whether the operands are indistinguishable;
2998 it does not test whether they are equal using C's == operation.
2999 The distinction is important for IEEE floating point, because
3000 (1) -0.0 and 0.0 are distinguishable, but -0.0==0.0, and
3001 (2) two NaNs may be indistinguishable, but NaN!=NaN.
3002
3003 If OEP_ONLY_CONST is unset, a VAR_DECL is considered equal to itself
3004 even though it may hold multiple values during a function.
3005 This is because a GCC tree node guarantees that nothing else is
3006 executed between the evaluation of its "operands" (which may often
3007 be evaluated in arbitrary order). Hence if the operands themselves
3008 don't side-effect, the VAR_DECLs, PARM_DECLs etc... must hold the
3009 same value in each operand/subexpression. Hence leaving OEP_ONLY_CONST
3010 unset means assuming isochronic (or instantaneous) tree equivalence.
3011 Unless comparing arbitrary expression trees, such as from different
3012 statements, this flag can usually be left unset.
3013
3014 If OEP_PURE_SAME is set, then pure functions with identical arguments
3015 are considered the same. It is used when the caller has other ways
3016 to ensure that global memory is unchanged in between. */
3017
3018int
3019operand_equal_p (const_tree arg0, const_tree arg1, unsigned int flags)
3020{
3021 /* If either is ERROR_MARK, they aren't equal. */
3022 if (TREE_CODE (arg0) == ERROR_MARK || TREE_CODE (arg1) == ERROR_MARK)
3023 return 0;
3024
3025 /* Check equality of integer constants before bailing out due to
3026 precision differences. */
3027 if (TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
3028 return tree_int_cst_equal (arg0, arg1);
3029
3030 /* If both types don't have the same signedness, then we can't consider
3031 them equal. We must check this before the STRIP_NOPS calls
3032 because they may change the signedness of the arguments. As pointers
3033 strictly don't have a signedness, require either two pointers or
3034 two non-pointers as well. */
3035 if (TYPE_UNSIGNED (TREE_TYPE (arg0)) != TYPE_UNSIGNED (TREE_TYPE (arg1))
3036 || POINTER_TYPE_P (TREE_TYPE (arg0)) != POINTER_TYPE_P (TREE_TYPE (arg1)))
3037 return 0;
3038
3039 /* If both types don't have the same precision, then it is not safe
3040 to strip NOPs. */
3041 if (TYPE_PRECISION (TREE_TYPE (arg0)) != TYPE_PRECISION (TREE_TYPE (arg1)))
3042 return 0;
3043
3044 STRIP_NOPS (arg0);
3045 STRIP_NOPS (arg1);
3046
3047 /* In case both args are comparisons but with different comparison
3048 code, try to swap the comparison operands of one arg to produce
3049 a match and compare that variant. */
3050 if (TREE_CODE (arg0) != TREE_CODE (arg1)
3051 && COMPARISON_CLASS_P (arg0)
3052 && COMPARISON_CLASS_P (arg1))
3053 {
3054 enum tree_code swap_code = swap_tree_comparison (TREE_CODE (arg1));
3055
3056 if (TREE_CODE (arg0) == swap_code)
3057 return operand_equal_p (TREE_OPERAND (arg0, 0),
3058 TREE_OPERAND (arg1, 1), flags)
3059 && operand_equal_p (TREE_OPERAND (arg0, 1),
3060 TREE_OPERAND (arg1, 0), flags);
3061 }
3062
3063 if (TREE_CODE (arg0) != TREE_CODE (arg1)
3064 /* This is needed for conversions and for COMPONENT_REF.
3065 Might as well play it safe and always test this. */
3066 || TREE_CODE (TREE_TYPE (arg0)) == ERROR_MARK
3067 || TREE_CODE (TREE_TYPE (arg1)) == ERROR_MARK
3068 || TYPE_MODE (TREE_TYPE (arg0)) != TYPE_MODE (TREE_TYPE (arg1)))
3069 return 0;
3070
3071 /* If ARG0 and ARG1 are the same SAVE_EXPR, they are necessarily equal.
3072 We don't care about side effects in that case because the SAVE_EXPR
3073 takes care of that for us. In all other cases, two expressions are
3074 equal if they have no side effects. If we have two identical
3075 expressions with side effects that should be treated the same due
3076 to the only side effects being identical SAVE_EXPR's, that will
3077 be detected in the recursive calls below. */
3078 if (arg0 == arg1 && ! (flags & OEP_ONLY_CONST)
3079 && (TREE_CODE (arg0) == SAVE_EXPR
3080 || (! TREE_SIDE_EFFECTS (arg0) && ! TREE_SIDE_EFFECTS (arg1))))
3081 return 1;
3082
3083 /* Next handle constant cases, those for which we can return 1 even
3084 if ONLY_CONST is set. */
3085 if (TREE_CONSTANT (arg0) && TREE_CONSTANT (arg1))
3086 switch (TREE_CODE (arg0))
3087 {
3088 case INTEGER_CST:
3089 return tree_int_cst_equal (arg0, arg1);
3090
3091 case FIXED_CST:
3092 return FIXED_VALUES_IDENTICAL (TREE_FIXED_CST (arg0),
3093 TREE_FIXED_CST (arg1));
3094
3095 case REAL_CST:
3096 if (REAL_VALUES_IDENTICAL (TREE_REAL_CST (arg0),
3097 TREE_REAL_CST (arg1)))
3098 return 1;
3099
3100
3101 if (!HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg0))))
3102 {
3103 /* If we do not distinguish between signed and unsigned zero,
3104 consider them equal. */
3105 if (real_zerop (arg0) && real_zerop (arg1))
3106 return 1;
3107 }
3108 return 0;
3109
3110 case VECTOR_CST:
3111 {
3112 tree v1, v2;
3113
3114 v1 = TREE_VECTOR_CST_ELTS (arg0);
3115 v2 = TREE_VECTOR_CST_ELTS (arg1);
3116 while (v1 && v2)
3117 {
3118 if (!operand_equal_p (TREE_VALUE (v1), TREE_VALUE (v2),
3119 flags))
3120 return 0;
3121 v1 = TREE_CHAIN (v1);
3122 v2 = TREE_CHAIN (v2);
3123 }
3124
3125 return v1 == v2;
3126 }
3127
3128 case COMPLEX_CST:
3129 return (operand_equal_p (TREE_REALPART (arg0), TREE_REALPART (arg1),
3130 flags)
3131 && operand_equal_p (TREE_IMAGPART (arg0), TREE_IMAGPART (arg1),
3132 flags));
3133
3134 case STRING_CST:
3135 return (TREE_STRING_LENGTH (arg0) == TREE_STRING_LENGTH (arg1)
3136 && ! memcmp (TREE_STRING_POINTER (arg0),
3137 TREE_STRING_POINTER (arg1),
3138 TREE_STRING_LENGTH (arg0)));
3139
3140 case ADDR_EXPR:
3141 return operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0),
3142 0);
3143 default:
3144 break;
3145 }
3146
3147 if (flags & OEP_ONLY_CONST)
3148 return 0;
3149
3150/* Define macros to test an operand from arg0 and arg1 for equality and a
3151 variant that allows null and views null as being different from any
3152 non-null value. In the latter case, if either is null, the both
3153 must be; otherwise, do the normal comparison. */
3154#define OP_SAME(N) operand_equal_p (TREE_OPERAND (arg0, N), \
3155 TREE_OPERAND (arg1, N), flags)
3156
3157#define OP_SAME_WITH_NULL(N) \
3158 ((!TREE_OPERAND (arg0, N) || !TREE_OPERAND (arg1, N)) \
3159 ? TREE_OPERAND (arg0, N) == TREE_OPERAND (arg1, N) : OP_SAME (N))
3160
3161 switch (TREE_CODE_CLASS (TREE_CODE (arg0)))
3162 {
3163 case tcc_unary:
3164 /* Two conversions are equal only if signedness and modes match. */
3165 switch (TREE_CODE (arg0))
3166 {
3167 CASE_CONVERT:
3168 case FIX_TRUNC_EXPR:
3169 if (TYPE_UNSIGNED (TREE_TYPE (arg0))
3170 != TYPE_UNSIGNED (TREE_TYPE (arg1)))
3171 return 0;
3172 break;
3173 default:
3174 break;
3175 }
3176
3177 return OP_SAME (0);
3178
3179
3180 case tcc_comparison:
3181 case tcc_binary:
3182 if (OP_SAME (0) && OP_SAME (1))
3183 return 1;
3184
3185 /* For commutative ops, allow the other order. */
3186 return (commutative_tree_code (TREE_CODE (arg0))
3187 && operand_equal_p (TREE_OPERAND (arg0, 0),
3188 TREE_OPERAND (arg1, 1), flags)
3189 && operand_equal_p (TREE_OPERAND (arg0, 1),
3190 TREE_OPERAND (arg1, 0), flags));
3191
3192 case tcc_reference:
3193 /* If either of the pointer (or reference) expressions we are
3194 dereferencing contain a side effect, these cannot be equal. */
3195 if (TREE_SIDE_EFFECTS (arg0)
3196 || TREE_SIDE_EFFECTS (arg1))
3197 return 0;
3198
3199 switch (TREE_CODE (arg0))
3200 {
3201 case INDIRECT_REF:
3202 case ALIGN_INDIRECT_REF:
3203 case MISALIGNED_INDIRECT_REF:
3204 case REALPART_EXPR:
3205 case IMAGPART_EXPR:
3206 return OP_SAME (0);
3207
3208 case ARRAY_REF:
3209 case ARRAY_RANGE_REF:
3210 /* Operands 2 and 3 may be null.
3211 Compare the array index by value if it is constant first as we
3212 may have different types but same value here. */
3213 return (OP_SAME (0)
3214 && (tree_int_cst_equal (TREE_OPERAND (arg0, 1),
3215 TREE_OPERAND (arg1, 1))
3216 || OP_SAME (1))
3217 && OP_SAME_WITH_NULL (2)
3218 && OP_SAME_WITH_NULL (3));
3219
3220 case COMPONENT_REF:
3221 /* Handle operand 2 the same as for ARRAY_REF. Operand 0
3222 may be NULL when we're called to compare MEM_EXPRs. */
3223 return OP_SAME_WITH_NULL (0)
3224 && OP_SAME (1)
3225 && OP_SAME_WITH_NULL (2);
3226
3227 case BIT_FIELD_REF:
3228 return OP_SAME (0) && OP_SAME (1) && OP_SAME (2);
3229
3230 default:
3231 return 0;
3232 }
3233
3234 case tcc_expression:
3235 switch (TREE_CODE (arg0))
3236 {
3237 case ADDR_EXPR:
3238 case TRUTH_NOT_EXPR:
3239 return OP_SAME (0);
3240
3241 case TRUTH_ANDIF_EXPR:
3242 case TRUTH_ORIF_EXPR:
3243 return OP_SAME (0) && OP_SAME (1);
3244
3245 case TRUTH_AND_EXPR:
3246 case TRUTH_OR_EXPR:
3247 case TRUTH_XOR_EXPR:
3248 if (OP_SAME (0) && OP_SAME (1))
3249 return 1;
3250
3251 /* Otherwise take into account this is a commutative operation. */
3252 return (operand_equal_p (TREE_OPERAND (arg0, 0),
3253 TREE_OPERAND (arg1, 1), flags)
3254 && operand_equal_p (TREE_OPERAND (arg0, 1),
3255 TREE_OPERAND (arg1, 0), flags));
3256
3257 case COND_EXPR:
3258 return OP_SAME (0) && OP_SAME (1) && OP_SAME (2);
3259
3260 default:
3261 return 0;
3262 }
3263
3264 case tcc_vl_exp:
3265 switch (TREE_CODE (arg0))
3266 {
3267 case CALL_EXPR:
3268 /* If the CALL_EXPRs call different functions, then they
3269 clearly can not be equal. */
3270 if (! operand_equal_p (CALL_EXPR_FN (arg0), CALL_EXPR_FN (arg1),
3271 flags))
3272 return 0;
3273
3274 {
3275 unsigned int cef = call_expr_flags (arg0);
3276 if (flags & OEP_PURE_SAME)
3277 cef &= ECF_CONST | ECF_PURE;
3278 else
3279 cef &= ECF_CONST;
3280 if (!cef)
3281 return 0;
3282 }
3283
3284 /* Now see if all the arguments are the same. */
3285 {
3286 const_call_expr_arg_iterator iter0, iter1;
3287 const_tree a0, a1;
3288 for (a0 = first_const_call_expr_arg (arg0, &iter0),
3289 a1 = first_const_call_expr_arg (arg1, &iter1);
3290 a0 && a1;
3291 a0 = next_const_call_expr_arg (&iter0),
3292 a1 = next_const_call_expr_arg (&iter1))
3293 if (! operand_equal_p (a0, a1, flags))
3294 return 0;
3295
3296 /* If we get here and both argument lists are exhausted
3297 then the CALL_EXPRs are equal. */
3298 return ! (a0 || a1);
3299 }
3300 default:
3301 return 0;
3302 }
3303
3304 case tcc_declaration:
3305 /* Consider __builtin_sqrt equal to sqrt. */
3306 return (TREE_CODE (arg0) == FUNCTION_DECL
3307 && DECL_BUILT_IN (arg0) && DECL_BUILT_IN (arg1)
3308 && DECL_BUILT_IN_CLASS (arg0) == DECL_BUILT_IN_CLASS (arg1)
3309 && DECL_FUNCTION_CODE (arg0) == DECL_FUNCTION_CODE (arg1));
3310
3311 default:
3312 return 0;
3313 }
3314
3315#undef OP_SAME
3316#undef OP_SAME_WITH_NULL
3317}
3318\f
3319/* Similar to operand_equal_p, but see if ARG0 might have been made by
3320 shorten_compare from ARG1 when ARG1 was being compared with OTHER.
3321
3322 When in doubt, return 0. */
3323
3324static int
3325operand_equal_for_comparison_p (tree arg0, tree arg1, tree other)
3326{
3327 int unsignedp1, unsignedpo;
3328 tree primarg0, primarg1, primother;
3329 unsigned int correct_width;
3330
3331 if (operand_equal_p (arg0, arg1, 0))
3332 return 1;
3333
3334 if (! INTEGRAL_TYPE_P (TREE_TYPE (arg0))
3335 || ! INTEGRAL_TYPE_P (TREE_TYPE (arg1)))
3336 return 0;
3337
3338 /* Discard any conversions that don't change the modes of ARG0 and ARG1
3339 and see if the inner values are the same. This removes any
3340 signedness comparison, which doesn't matter here. */
3341 primarg0 = arg0, primarg1 = arg1;
3342 STRIP_NOPS (primarg0);
3343 STRIP_NOPS (primarg1);
3344 if (operand_equal_p (primarg0, primarg1, 0))
3345 return 1;
3346
3347 /* Duplicate what shorten_compare does to ARG1 and see if that gives the
3348 actual comparison operand, ARG0.
3349
3350 First throw away any conversions to wider types
3351 already present in the operands. */
3352
3353 primarg1 = get_narrower (arg1, &unsignedp1);
3354 primother = get_narrower (other, &unsignedpo);
3355
3356 correct_width = TYPE_PRECISION (TREE_TYPE (arg1));
3357 if (unsignedp1 == unsignedpo
3358 && TYPE_PRECISION (TREE_TYPE (primarg1)) < correct_width
3359 && TYPE_PRECISION (TREE_TYPE (primother)) < correct_width)
3360 {
3361 tree type = TREE_TYPE (arg0);
3362
3363 /* Make sure shorter operand is extended the right way
3364 to match the longer operand. */
3365 primarg1 = fold_convert (signed_or_unsigned_type_for
3366 (unsignedp1, TREE_TYPE (primarg1)), primarg1);
3367
3368 if (operand_equal_p (arg0, fold_convert (type, primarg1), 0))
3369 return 1;
3370 }
3371
3372 return 0;
3373}
3374\f
3375/* See if ARG is an expression that is either a comparison or is performing
3376 arithmetic on comparisons. The comparisons must only be comparing
3377 two different values, which will be stored in *CVAL1 and *CVAL2; if
3378 they are nonzero it means that some operands have already been found.
3379 No variables may be used anywhere else in the expression except in the
3380 comparisons. If SAVE_P is true it means we removed a SAVE_EXPR around
3381 the expression and save_expr needs to be called with CVAL1 and CVAL2.
3382
3383 If this is true, return 1. Otherwise, return zero. */
3384
3385static int
3386twoval_comparison_p (tree arg, tree *cval1, tree *cval2, int *save_p)
3387{
3388 enum tree_code code = TREE_CODE (arg);
3389 enum tree_code_class tclass = TREE_CODE_CLASS (code);
3390
3391 /* We can handle some of the tcc_expression cases here. */
3392 if (tclass == tcc_expression && code == TRUTH_NOT_EXPR)
3393 tclass = tcc_unary;
3394 else if (tclass == tcc_expression
3395 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR
3396 || code == COMPOUND_EXPR))
3397 tclass = tcc_binary;
3398
3399 else if (tclass == tcc_expression && code == SAVE_EXPR
3400 && ! TREE_SIDE_EFFECTS (TREE_OPERAND (arg, 0)))
3401 {
3402 /* If we've already found a CVAL1 or CVAL2, this expression is
3403 two complex to handle. */
3404 if (*cval1 || *cval2)
3405 return 0;
3406
3407 tclass = tcc_unary;
3408 *save_p = 1;
3409 }
3410
3411 switch (tclass)
3412 {
3413 case tcc_unary:
3414 return twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p);
3415
3416 case tcc_binary:
3417 return (twoval_comparison_p (TREE_OPERAND (arg, 0), cval1, cval2, save_p)
3418 && twoval_comparison_p (TREE_OPERAND (arg, 1),
3419 cval1, cval2, save_p));
3420
3421 case tcc_constant:
3422 return 1;
3423
3424 case tcc_expression:
3425 if (code == COND_EXPR)
3426 return (twoval_comparison_p (TREE_OPERAND (arg, 0),
3427 cval1, cval2, save_p)
3428 && twoval_comparison_p (TREE_OPERAND (arg, 1),
3429 cval1, cval2, save_p)
3430 && twoval_comparison_p (TREE_OPERAND (arg, 2),
3431 cval1, cval2, save_p));
3432 return 0;
3433
3434 case tcc_comparison:
3435 /* First see if we can handle the first operand, then the second. For
3436 the second operand, we know *CVAL1 can't be zero. It must be that
3437 one side of the comparison is each of the values; test for the
3438 case where this isn't true by failing if the two operands
3439 are the same. */
3440
3441 if (operand_equal_p (TREE_OPERAND (arg, 0),
3442 TREE_OPERAND (arg, 1), 0))
3443 return 0;
3444
3445 if (*cval1 == 0)
3446 *cval1 = TREE_OPERAND (arg, 0);
3447 else if (operand_equal_p (*cval1, TREE_OPERAND (arg, 0), 0))
3448 ;
3449 else if (*cval2 == 0)
3450 *cval2 = TREE_OPERAND (arg, 0);
3451 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 0), 0))
3452 ;
3453 else
3454 return 0;
3455
3456 if (operand_equal_p (*cval1, TREE_OPERAND (arg, 1), 0))
3457 ;
3458 else if (*cval2 == 0)
3459 *cval2 = TREE_OPERAND (arg, 1);
3460 else if (operand_equal_p (*cval2, TREE_OPERAND (arg, 1), 0))
3461 ;
3462 else
3463 return 0;
3464
3465 return 1;
3466
3467 default:
3468 return 0;
3469 }
3470}
3471\f
3472/* ARG is a tree that is known to contain just arithmetic operations and
3473 comparisons. Evaluate the operations in the tree substituting NEW0 for
3474 any occurrence of OLD0 as an operand of a comparison and likewise for
3475 NEW1 and OLD1. */
3476
3477static tree
3478eval_subst (tree arg, tree old0, tree new0, tree old1, tree new1)
3479{
3480 tree type = TREE_TYPE (arg);
3481 enum tree_code code = TREE_CODE (arg);
3482 enum tree_code_class tclass = TREE_CODE_CLASS (code);
3483
3484 /* We can handle some of the tcc_expression cases here. */
3485 if (tclass == tcc_expression && code == TRUTH_NOT_EXPR)
3486 tclass = tcc_unary;
3487 else if (tclass == tcc_expression
3488 && (code == TRUTH_ANDIF_EXPR || code == TRUTH_ORIF_EXPR))
3489 tclass = tcc_binary;
3490
3491 switch (tclass)
3492 {
3493 case tcc_unary:
3494 return fold_build1 (code, type,
3495 eval_subst (TREE_OPERAND (arg, 0),
3496 old0, new0, old1, new1));
3497
3498 case tcc_binary:
3499 return fold_build2 (code, type,
3500 eval_subst (TREE_OPERAND (arg, 0),
3501 old0, new0, old1, new1),
3502 eval_subst (TREE_OPERAND (arg, 1),
3503 old0, new0, old1, new1));
3504
3505 case tcc_expression:
3506 switch (code)
3507 {
3508 case SAVE_EXPR:
3509 return eval_subst (TREE_OPERAND (arg, 0), old0, new0, old1, new1);
3510
3511 case COMPOUND_EXPR:
3512 return eval_subst (TREE_OPERAND (arg, 1), old0, new0, old1, new1);
3513
3514 case COND_EXPR:
3515 return fold_build3 (code, type,
3516 eval_subst (TREE_OPERAND (arg, 0),
3517 old0, new0, old1, new1),
3518 eval_subst (TREE_OPERAND (arg, 1),
3519 old0, new0, old1, new1),
3520 eval_subst (TREE_OPERAND (arg, 2),
3521 old0, new0, old1, new1));
3522 default:
3523 break;
3524 }
3525 /* Fall through - ??? */
3526
3527 case tcc_comparison:
3528 {
3529 tree arg0 = TREE_OPERAND (arg, 0);
3530 tree arg1 = TREE_OPERAND (arg, 1);
3531
3532 /* We need to check both for exact equality and tree equality. The
3533 former will be true if the operand has a side-effect. In that
3534 case, we know the operand occurred exactly once. */
3535
3536 if (arg0 == old0 || operand_equal_p (arg0, old0, 0))
3537 arg0 = new0;
3538 else if (arg0 == old1 || operand_equal_p (arg0, old1, 0))
3539 arg0 = new1;
3540
3541 if (arg1 == old0 || operand_equal_p (arg1, old0, 0))
3542 arg1 = new0;
3543 else if (arg1 == old1 || operand_equal_p (arg1, old1, 0))
3544 arg1 = new1;
3545
3546 return fold_build2 (code, type, arg0, arg1);
3547 }
3548
3549 default:
3550 return arg;
3551 }
3552}
3553\f
3554/* Return a tree for the case when the result of an expression is RESULT
3555 converted to TYPE and OMITTED was previously an operand of the expression
3556 but is now not needed (e.g., we folded OMITTED * 0).
3557
3558 If OMITTED has side effects, we must evaluate it. Otherwise, just do
3559 the conversion of RESULT to TYPE. */
3560
3561tree
3562omit_one_operand (tree type, tree result, tree omitted)
3563{
3564 tree t = fold_convert (type, result);
3565
3566 /* If the resulting operand is an empty statement, just return the omitted
3567 statement casted to void. */
3568 if (IS_EMPTY_STMT (t) && TREE_SIDE_EFFECTS (omitted))
3569 return build1 (NOP_EXPR, void_type_node, fold_ignored_result (omitted));
3570
3571 if (TREE_SIDE_EFFECTS (omitted))
3572 return build2 (COMPOUND_EXPR, type, fold_ignored_result (omitted), t);
3573
3574 return non_lvalue (t);
3575}
3576
3577/* Similar, but call pedantic_non_lvalue instead of non_lvalue. */
3578
3579static tree
3580pedantic_omit_one_operand (tree type, tree result, tree omitted)
3581{
3582 tree t = fold_convert (type, result);
3583
3584 /* If the resulting operand is an empty statement, just return the omitted
3585 statement casted to void. */
3586 if (IS_EMPTY_STMT (t) && TREE_SIDE_EFFECTS (omitted))
3587 return build1 (NOP_EXPR, void_type_node, fold_ignored_result (omitted));
3588
3589 if (TREE_SIDE_EFFECTS (omitted))
3590 return build2 (COMPOUND_EXPR, type, fold_ignored_result (omitted), t);
3591
3592 return pedantic_non_lvalue (t);
3593}
3594
3595/* Return a tree for the case when the result of an expression is RESULT
3596 converted to TYPE and OMITTED1 and OMITTED2 were previously operands
3597 of the expression but are now not needed.
3598
3599 If OMITTED1 or OMITTED2 has side effects, they must be evaluated.
3600 If both OMITTED1 and OMITTED2 have side effects, OMITTED1 is
3601 evaluated before OMITTED2. Otherwise, if neither has side effects,
3602 just do the conversion of RESULT to TYPE. */
3603
3604tree
3605omit_two_operands (tree type, tree result, tree omitted1, tree omitted2)
3606{
3607 tree t = fold_convert (type, result);
3608
3609 if (TREE_SIDE_EFFECTS (omitted2))
3610 t = build2 (COMPOUND_EXPR, type, omitted2, t);
3611 if (TREE_SIDE_EFFECTS (omitted1))
3612 t = build2 (COMPOUND_EXPR, type, omitted1, t);
3613
3614 return TREE_CODE (t) != COMPOUND_EXPR ? non_lvalue (t) : t;
3615}
3616
3617\f
3618/* Return a simplified tree node for the truth-negation of ARG. This
3619 never alters ARG itself. We assume that ARG is an operation that
3620 returns a truth value (0 or 1).
3621
3622 FIXME: one would think we would fold the result, but it causes
3623 problems with the dominator optimizer. */
3624
3625tree
3626fold_truth_not_expr (tree arg)
3627{
3628 tree type = TREE_TYPE (arg);
3629 enum tree_code code = TREE_CODE (arg);
3630
3631 /* If this is a comparison, we can simply invert it, except for
3632 floating-point non-equality comparisons, in which case we just
3633 enclose a TRUTH_NOT_EXPR around what we have. */
3634
3635 if (TREE_CODE_CLASS (code) == tcc_comparison)
3636 {
3637 tree op_type = TREE_TYPE (TREE_OPERAND (arg, 0));
3638 if (FLOAT_TYPE_P (op_type)
3639 && flag_trapping_math
3640 && code != ORDERED_EXPR && code != UNORDERED_EXPR
3641 && code != NE_EXPR && code != EQ_EXPR)
3642 return NULL_TREE;
3643 else
3644 {
3645 code = invert_tree_comparison (code,
3646 HONOR_NANS (TYPE_MODE (op_type)));
3647 if (code == ERROR_MARK)
3648 return NULL_TREE;
3649 else
3650 return build2 (code, type,
3651 TREE_OPERAND (arg, 0), TREE_OPERAND (arg, 1));
3652 }
3653 }
3654
3655 switch (code)
3656 {
3657 case INTEGER_CST:
3658 return constant_boolean_node (integer_zerop (arg), type);
3659
3660 case TRUTH_AND_EXPR:
3661 return build2 (TRUTH_OR_EXPR, type,
3662 invert_truthvalue (TREE_OPERAND (arg, 0)),
3663 invert_truthvalue (TREE_OPERAND (arg, 1)));
3664
3665 case TRUTH_OR_EXPR:
3666 return build2 (TRUTH_AND_EXPR, type,
3667 invert_truthvalue (TREE_OPERAND (arg, 0)),
3668 invert_truthvalue (TREE_OPERAND (arg, 1)));
3669
3670 case TRUTH_XOR_EXPR:
3671 /* Here we can invert either operand. We invert the first operand
3672 unless the second operand is a TRUTH_NOT_EXPR in which case our
3673 result is the XOR of the first operand with the inside of the
3674 negation of the second operand. */
3675
3676 if (TREE_CODE (TREE_OPERAND (arg, 1)) == TRUTH_NOT_EXPR)
3677 return build2 (TRUTH_XOR_EXPR, type, TREE_OPERAND (arg, 0),
3678 TREE_OPERAND (TREE_OPERAND (arg, 1), 0));
3679 else
3680 return build2 (TRUTH_XOR_EXPR, type,
3681 invert_truthvalue (TREE_OPERAND (arg, 0)),
3682 TREE_OPERAND (arg, 1));
3683
3684 case TRUTH_ANDIF_EXPR:
3685 return build2 (TRUTH_ORIF_EXPR, type,
3686 invert_truthvalue (TREE_OPERAND (arg, 0)),
3687 invert_truthvalue (TREE_OPERAND (arg, 1)));
3688
3689 case TRUTH_ORIF_EXPR:
3690 return build2 (TRUTH_ANDIF_EXPR, type,
3691 invert_truthvalue (TREE_OPERAND (arg, 0)),
3692 invert_truthvalue (TREE_OPERAND (arg, 1)));
3693
3694 case TRUTH_NOT_EXPR:
3695 return TREE_OPERAND (arg, 0);
3696
3697 case COND_EXPR:
3698 {
3699 tree arg1 = TREE_OPERAND (arg, 1);
3700 tree arg2 = TREE_OPERAND (arg, 2);
3701 /* A COND_EXPR may have a throw as one operand, which
3702 then has void type. Just leave void operands
3703 as they are. */
3704 return build3 (COND_EXPR, type, TREE_OPERAND (arg, 0),
3705 VOID_TYPE_P (TREE_TYPE (arg1))
3706 ? arg1 : invert_truthvalue (arg1),
3707 VOID_TYPE_P (TREE_TYPE (arg2))
3708 ? arg2 : invert_truthvalue (arg2));
3709 }
3710
3711 case COMPOUND_EXPR:
3712 return build2 (COMPOUND_EXPR, type, TREE_OPERAND (arg, 0),
3713 invert_truthvalue (TREE_OPERAND (arg, 1)));
3714
3715 case NON_LVALUE_EXPR:
3716 return invert_truthvalue (TREE_OPERAND (arg, 0));
3717
3718 case NOP_EXPR:
3719 if (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE)
3720 return build1 (TRUTH_NOT_EXPR, type, arg);
3721
3722 case CONVERT_EXPR:
3723 case FLOAT_EXPR:
3724 return build1 (TREE_CODE (arg), type,
3725 invert_truthvalue (TREE_OPERAND (arg, 0)));
3726
3727 case BIT_AND_EXPR:
3728 if (!integer_onep (TREE_OPERAND (arg, 1)))
3729 break;
3730 return build2 (EQ_EXPR, type, arg,
3731 build_int_cst (type, 0));
3732
3733 case SAVE_EXPR:
3734 return build1 (TRUTH_NOT_EXPR, type, arg);
3735
3736 case CLEANUP_POINT_EXPR:
3737 return build1 (CLEANUP_POINT_EXPR, type,
3738 invert_truthvalue (TREE_OPERAND (arg, 0)));
3739
3740 default:
3741 break;
3742 }
3743
3744 return NULL_TREE;
3745}
3746
3747/* Return a simplified tree node for the truth-negation of ARG. This
3748 never alters ARG itself. We assume that ARG is an operation that
3749 returns a truth value (0 or 1).
3750
3751 FIXME: one would think we would fold the result, but it causes
3752 problems with the dominator optimizer. */
3753
3754tree
3755invert_truthvalue (tree arg)
3756{
3757 tree tem;
3758
3759 if (TREE_CODE (arg) == ERROR_MARK)
3760 return arg;
3761
3762 tem = fold_truth_not_expr (arg);
3763 if (!tem)
3764 tem = build1 (TRUTH_NOT_EXPR, TREE_TYPE (arg), arg);
3765
3766 return tem;
3767}
3768
3769/* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
3770 operands are another bit-wise operation with a common input. If so,
3771 distribute the bit operations to save an operation and possibly two if
3772 constants are involved. For example, convert
3773 (A | B) & (A | C) into A | (B & C)
3774 Further simplification will occur if B and C are constants.
3775
3776 If this optimization cannot be done, 0 will be returned. */
3777
3778static tree
3779distribute_bit_expr (enum tree_code code, tree type, tree arg0, tree arg1)
3780{
3781 tree common;
3782 tree left, right;
3783
3784 if (TREE_CODE (arg0) != TREE_CODE (arg1)
3785 || TREE_CODE (arg0) == code
3786 || (TREE_CODE (arg0) != BIT_AND_EXPR
3787 && TREE_CODE (arg0) != BIT_IOR_EXPR))
3788 return 0;
3789
3790 if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 0), 0))
3791 {
3792 common = TREE_OPERAND (arg0, 0);
3793 left = TREE_OPERAND (arg0, 1);
3794 right = TREE_OPERAND (arg1, 1);
3795 }
3796 else if (operand_equal_p (TREE_OPERAND (arg0, 0), TREE_OPERAND (arg1, 1), 0))
3797 {
3798 common = TREE_OPERAND (arg0, 0);
3799 left = TREE_OPERAND (arg0, 1);
3800 right = TREE_OPERAND (arg1, 0);
3801 }
3802 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 0), 0))
3803 {
3804 common = TREE_OPERAND (arg0, 1);
3805 left = TREE_OPERAND (arg0, 0);
3806 right = TREE_OPERAND (arg1, 1);
3807 }
3808 else if (operand_equal_p (TREE_OPERAND (arg0, 1), TREE_OPERAND (arg1, 1), 0))
3809 {
3810 common = TREE_OPERAND (arg0, 1);
3811 left = TREE_OPERAND (arg0, 0);
3812 right = TREE_OPERAND (arg1, 0);
3813 }
3814 else
3815 return 0;
3816
3817 common = fold_convert (type, common);
3818 left = fold_convert (type, left);
3819 right = fold_convert (type, right);
3820 return fold_build2 (TREE_CODE (arg0), type, common,
3821 fold_build2 (code, type, left, right));
3822}
3823
3824/* Knowing that ARG0 and ARG1 are both RDIV_EXPRs, simplify a binary operation
3825 with code CODE. This optimization is unsafe. */
3826static tree
3827distribute_real_division (enum tree_code code, tree type, tree arg0, tree arg1)
3828{
3829 bool mul0 = TREE_CODE (arg0) == MULT_EXPR;
3830 bool mul1 = TREE_CODE (arg1) == MULT_EXPR;
3831
3832 /* (A / C) +- (B / C) -> (A +- B) / C. */
3833 if (mul0 == mul1
3834 && operand_equal_p (TREE_OPERAND (arg0, 1),
3835 TREE_OPERAND (arg1, 1), 0))
3836 return fold_build2 (mul0 ? MULT_EXPR : RDIV_EXPR, type,
3837 fold_build2 (code, type,
3838 TREE_OPERAND (arg0, 0),
3839 TREE_OPERAND (arg1, 0)),
3840 TREE_OPERAND (arg0, 1));
3841
3842 /* (A / C1) +- (A / C2) -> A * (1 / C1 +- 1 / C2). */
3843 if (operand_equal_p (TREE_OPERAND (arg0, 0),
3844 TREE_OPERAND (arg1, 0), 0)