1 /* C-compiler utilities for types and variables storage layout
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
37 #include "langhooks.h"
41 /* Data type for the expressions representing sizes of data types.
42 It is the first integer type laid out. */
43 tree sizetype_tab[(int) TYPE_KIND_LAST];
45 /* If nonzero, this is an upper limit on alignment of structure fields.
46 The value is measured in bits. */
47 unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
48 /* ... and its original value in bytes, specified via -fpack-struct=<value>. */
49 unsigned int initial_max_fld_align = TARGET_DEFAULT_PACK_STRUCT;
51 /* Nonzero if all REFERENCE_TYPEs are internal and hence should be
52 allocated in Pmode, not ptr_mode. Set only by internal_reference_types
53 called only by a front end. */
54 static int reference_types_internal = 0;
56 static void finalize_record_size (record_layout_info);
57 static void finalize_type_size (tree);
58 static void place_union_field (record_layout_info, tree);
59 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
60 static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
63 extern void debug_rli (record_layout_info);
65 /* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
67 static GTY(()) tree pending_sizes;
69 /* Show that REFERENCE_TYPES are internal and should be Pmode. Called only
73 internal_reference_types (void)
75 reference_types_internal = 1;
78 /* Get a list of all the objects put on the pending sizes list. */
81 get_pending_sizes (void)
83 tree chain = pending_sizes;
89 /* Add EXPR to the pending sizes list. */
92 put_pending_size (tree expr)
94 /* Strip any simple arithmetic from EXPR to see if it has an underlying
96 expr = skip_simple_arithmetic (expr);
98 if (TREE_CODE (expr) == SAVE_EXPR)
99 pending_sizes = tree_cons (NULL_TREE, expr, pending_sizes);
102 /* Put a chain of objects into the pending sizes list, which must be
106 put_pending_sizes (tree chain)
108 gcc_assert (!pending_sizes);
109 pending_sizes = chain;
112 /* Given a size SIZE that may not be a constant, return a SAVE_EXPR
113 to serve as the actual size-expression for a type or decl. */
116 variable_size (tree size)
120 /* If the language-processor is to take responsibility for variable-sized
121 items (e.g., languages which have elaboration procedures like Ada),
122 just return SIZE unchanged. Likewise for self-referential sizes and
124 if (TREE_CONSTANT (size)
125 || lang_hooks.decls.global_bindings_p () < 0
126 || CONTAINS_PLACEHOLDER_P (size))
129 size = save_expr (size);
131 /* If an array with a variable number of elements is declared, and
132 the elements require destruction, we will emit a cleanup for the
133 array. That cleanup is run both on normal exit from the block
134 and in the exception-handler for the block. Normally, when code
135 is used in both ordinary code and in an exception handler it is
136 `unsaved', i.e., all SAVE_EXPRs are recalculated. However, we do
137 not wish to do that here; the array-size is the same in both
139 save = skip_simple_arithmetic (size);
141 if (cfun && cfun->dont_save_pending_sizes_p)
142 /* The front-end doesn't want us to keep a list of the expressions
143 that determine sizes for variable size objects. Trust it. */
146 if (lang_hooks.decls.global_bindings_p ())
148 if (TREE_CONSTANT (size))
149 error ("type size can%'t be explicitly evaluated");
151 error ("variable-size type declared outside of any function");
153 return size_one_node;
156 put_pending_size (save);
161 #ifndef MAX_FIXED_MODE_SIZE
162 #define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (DImode)
165 /* Return the machine mode to use for a nonscalar of SIZE bits. The
166 mode must be in class MCLASS, and have exactly that many value bits;
167 it may have padding as well. If LIMIT is nonzero, modes of wider
168 than MAX_FIXED_MODE_SIZE will not be used. */
171 mode_for_size (unsigned int size, enum mode_class mclass, int limit)
173 enum machine_mode mode;
175 if (limit && size > MAX_FIXED_MODE_SIZE)
178 /* Get the first mode which has this size, in the specified class. */
179 for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
180 mode = GET_MODE_WIDER_MODE (mode))
181 if (GET_MODE_PRECISION (mode) == size)
187 /* Similar, except passed a tree node. */
190 mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
192 unsigned HOST_WIDE_INT uhwi;
195 if (!host_integerp (size, 1))
197 uhwi = tree_low_cst (size, 1);
201 return mode_for_size (ui, mclass, limit);
204 /* Similar, but never return BLKmode; return the narrowest mode that
205 contains at least the requested number of value bits. */
208 smallest_mode_for_size (unsigned int size, enum mode_class mclass)
210 enum machine_mode mode;
212 /* Get the first mode which has at least this size, in the
214 for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
215 mode = GET_MODE_WIDER_MODE (mode))
216 if (GET_MODE_PRECISION (mode) >= size)
222 /* Find an integer mode of the exact same size, or BLKmode on failure. */
225 int_mode_for_mode (enum machine_mode mode)
227 switch (GET_MODE_CLASS (mode))
230 case MODE_PARTIAL_INT:
233 case MODE_COMPLEX_INT:
234 case MODE_COMPLEX_FLOAT:
236 case MODE_DECIMAL_FLOAT:
237 case MODE_VECTOR_INT:
238 case MODE_VECTOR_FLOAT:
243 case MODE_VECTOR_FRACT:
244 case MODE_VECTOR_ACCUM:
245 case MODE_VECTOR_UFRACT:
246 case MODE_VECTOR_UACCUM:
247 mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
254 /* ... fall through ... */
264 /* Return the alignment of MODE. This will be bounded by 1 and
265 BIGGEST_ALIGNMENT. */
268 get_mode_alignment (enum machine_mode mode)
270 return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
274 /* Subroutine of layout_decl: Force alignment required for the data type.
275 But if the decl itself wants greater alignment, don't override that. */
278 do_type_align (tree type, tree decl)
280 if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
282 DECL_ALIGN (decl) = TYPE_ALIGN (type);
283 if (TREE_CODE (decl) == FIELD_DECL)
284 DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
288 /* Set the size, mode and alignment of a ..._DECL node.
289 TYPE_DECL does need this for C++.
290 Note that LABEL_DECL and CONST_DECL nodes do not need this,
291 and FUNCTION_DECL nodes have them set up in a special (and simple) way.
292 Don't call layout_decl for them.
294 KNOWN_ALIGN is the amount of alignment we can assume this
295 decl has with no special effort. It is relevant only for FIELD_DECLs
296 and depends on the previous fields.
297 All that matters about KNOWN_ALIGN is which powers of 2 divide it.
298 If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
299 the record will be aligned to suit. */
302 layout_decl (tree decl, unsigned int known_align)
304 tree type = TREE_TYPE (decl);
305 enum tree_code code = TREE_CODE (decl);
308 if (code == CONST_DECL)
311 gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
312 || code == TYPE_DECL ||code == FIELD_DECL);
314 rtl = DECL_RTL_IF_SET (decl);
316 if (type == error_mark_node)
317 type = void_type_node;
319 /* Usually the size and mode come from the data type without change,
320 however, the front-end may set the explicit width of the field, so its
321 size may not be the same as the size of its type. This happens with
322 bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
323 also happens with other fields. For example, the C++ front-end creates
324 zero-sized fields corresponding to empty base classes, and depends on
325 layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the
326 size in bytes from the size in bits. If we have already set the mode,
327 don't set it again since we can be called twice for FIELD_DECLs. */
329 DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
330 if (DECL_MODE (decl) == VOIDmode)
331 DECL_MODE (decl) = TYPE_MODE (type);
333 if (DECL_SIZE (decl) == 0)
335 DECL_SIZE (decl) = TYPE_SIZE (type);
336 DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
338 else if (DECL_SIZE_UNIT (decl) == 0)
339 DECL_SIZE_UNIT (decl)
340 = fold_convert (sizetype, size_binop (CEIL_DIV_EXPR, DECL_SIZE (decl),
343 if (code != FIELD_DECL)
344 /* For non-fields, update the alignment from the type. */
345 do_type_align (type, decl);
347 /* For fields, it's a bit more complicated... */
349 bool old_user_align = DECL_USER_ALIGN (decl);
350 bool zero_bitfield = false;
351 bool packed_p = DECL_PACKED (decl);
354 if (DECL_BIT_FIELD (decl))
356 DECL_BIT_FIELD_TYPE (decl) = type;
358 /* A zero-length bit-field affects the alignment of the next
359 field. In essence such bit-fields are not influenced by
360 any packing due to #pragma pack or attribute packed. */
361 if (integer_zerop (DECL_SIZE (decl))
362 && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
364 zero_bitfield = true;
366 #ifdef PCC_BITFIELD_TYPE_MATTERS
367 if (PCC_BITFIELD_TYPE_MATTERS)
368 do_type_align (type, decl);
372 #ifdef EMPTY_FIELD_BOUNDARY
373 if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
375 DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY;
376 DECL_USER_ALIGN (decl) = 0;
382 /* See if we can use an ordinary integer mode for a bit-field.
383 Conditions are: a fixed size that is correct for another mode
384 and occupying a complete byte or bytes on proper boundary. */
385 if (TYPE_SIZE (type) != 0
386 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
387 && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
389 enum machine_mode xmode
390 = mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
391 unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
394 && !(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
395 && (known_align == 0 || known_align >= xalign))
397 DECL_ALIGN (decl) = MAX (xalign, DECL_ALIGN (decl));
398 DECL_MODE (decl) = xmode;
399 DECL_BIT_FIELD (decl) = 0;
403 /* Turn off DECL_BIT_FIELD if we won't need it set. */
404 if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
405 && known_align >= TYPE_ALIGN (type)
406 && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
407 DECL_BIT_FIELD (decl) = 0;
409 else if (packed_p && DECL_USER_ALIGN (decl))
410 /* Don't touch DECL_ALIGN. For other packed fields, go ahead and
411 round up; we'll reduce it again below. We want packing to
412 supersede USER_ALIGN inherited from the type, but defer to
413 alignment explicitly specified on the field decl. */;
415 do_type_align (type, decl);
417 /* If the field is packed and not explicitly aligned, give it the
418 minimum alignment. Note that do_type_align may set
419 DECL_USER_ALIGN, so we need to check old_user_align instead. */
422 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
424 if (! packed_p && ! DECL_USER_ALIGN (decl))
426 /* Some targets (i.e. i386, VMS) limit struct field alignment
427 to a lower boundary than alignment of variables unless
428 it was overridden by attribute aligned. */
429 #ifdef BIGGEST_FIELD_ALIGNMENT
431 = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT);
433 #ifdef ADJUST_FIELD_ALIGN
434 DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl));
439 mfa = initial_max_fld_align * BITS_PER_UNIT;
441 mfa = maximum_field_alignment;
442 /* Should this be controlled by DECL_USER_ALIGN, too? */
444 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa);
447 /* Evaluate nonconstant size only once, either now or as soon as safe. */
448 if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
449 DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
450 if (DECL_SIZE_UNIT (decl) != 0
451 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
452 DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
454 /* If requested, warn about definitions of large data objects. */
456 && (code == VAR_DECL || code == PARM_DECL)
457 && ! DECL_EXTERNAL (decl))
459 tree size = DECL_SIZE_UNIT (decl);
461 if (size != 0 && TREE_CODE (size) == INTEGER_CST
462 && compare_tree_int (size, larger_than_size) > 0)
464 int size_as_int = TREE_INT_CST_LOW (size);
466 if (compare_tree_int (size, size_as_int) == 0)
467 warning (OPT_Wlarger_than_eq, "size of %q+D is %d bytes", decl, size_as_int);
469 warning (OPT_Wlarger_than_eq, "size of %q+D is larger than %wd bytes",
470 decl, larger_than_size);
474 /* If the RTL was already set, update its mode and mem attributes. */
477 PUT_MODE (rtl, DECL_MODE (decl));
478 SET_DECL_RTL (decl, 0);
479 set_mem_attributes (rtl, decl, 1);
480 SET_DECL_RTL (decl, rtl);
484 /* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
485 a previous call to layout_decl and calls it again. */
488 relayout_decl (tree decl)
490 DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
491 DECL_MODE (decl) = VOIDmode;
492 if (!DECL_USER_ALIGN (decl))
493 DECL_ALIGN (decl) = 0;
494 SET_DECL_RTL (decl, 0);
496 layout_decl (decl, 0);
499 /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
500 QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which
501 is to be passed to all other layout functions for this record. It is the
502 responsibility of the caller to call `free' for the storage returned.
503 Note that garbage collection is not permitted until we finish laying
507 start_record_layout (tree t)
509 record_layout_info rli = XNEW (struct record_layout_info_s);
513 /* If the type has a minimum specified alignment (via an attribute
514 declaration, for example) use it -- otherwise, start with a
515 one-byte alignment. */
516 rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
517 rli->unpacked_align = rli->record_align;
518 rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
520 #ifdef STRUCTURE_SIZE_BOUNDARY
521 /* Packed structures don't need to have minimum size. */
522 if (! TYPE_PACKED (t))
526 /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */
527 tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
528 if (maximum_field_alignment != 0)
529 tmp = MIN (tmp, maximum_field_alignment);
530 rli->record_align = MAX (rli->record_align, tmp);
534 rli->offset = size_zero_node;
535 rli->bitpos = bitsize_zero_node;
537 rli->pending_statics = 0;
538 rli->packed_maybe_necessary = 0;
539 rli->remaining_in_alignment = 0;
544 /* These four routines perform computations that convert between
545 the offset/bitpos forms and byte and bit offsets. */
548 bit_from_pos (tree offset, tree bitpos)
550 return size_binop (PLUS_EXPR, bitpos,
551 size_binop (MULT_EXPR,
552 fold_convert (bitsizetype, offset),
557 byte_from_pos (tree offset, tree bitpos)
559 return size_binop (PLUS_EXPR, offset,
560 fold_convert (sizetype,
561 size_binop (TRUNC_DIV_EXPR, bitpos,
562 bitsize_unit_node)));
566 pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
569 *poffset = size_binop (MULT_EXPR,
570 fold_convert (sizetype,
571 size_binop (FLOOR_DIV_EXPR, pos,
572 bitsize_int (off_align))),
573 size_int (off_align / BITS_PER_UNIT));
574 *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align));
577 /* Given a pointer to bit and byte offsets and an offset alignment,
578 normalize the offsets so they are within the alignment. */
581 normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
583 /* If the bit position is now larger than it should be, adjust it
585 if (compare_tree_int (*pbitpos, off_align) >= 0)
587 tree extra_aligns = size_binop (FLOOR_DIV_EXPR, *pbitpos,
588 bitsize_int (off_align));
591 = size_binop (PLUS_EXPR, *poffset,
592 size_binop (MULT_EXPR,
593 fold_convert (sizetype, extra_aligns),
594 size_int (off_align / BITS_PER_UNIT)));
597 = size_binop (FLOOR_MOD_EXPR, *pbitpos, bitsize_int (off_align));
601 /* Print debugging information about the information in RLI. */
604 debug_rli (record_layout_info rli)
606 print_node_brief (stderr, "type", rli->t, 0);
607 print_node_brief (stderr, "\noffset", rli->offset, 0);
608 print_node_brief (stderr, " bitpos", rli->bitpos, 0);
610 fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
611 rli->record_align, rli->unpacked_align,
614 /* The ms_struct code is the only that uses this. */
615 if (targetm.ms_bitfield_layout_p (rli->t))
616 fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
618 if (rli->packed_maybe_necessary)
619 fprintf (stderr, "packed may be necessary\n");
621 if (rli->pending_statics)
623 fprintf (stderr, "pending statics:\n");
624 debug_tree (rli->pending_statics);
628 /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
629 BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */
632 normalize_rli (record_layout_info rli)
634 normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
637 /* Returns the size in bytes allocated so far. */
640 rli_size_unit_so_far (record_layout_info rli)
642 return byte_from_pos (rli->offset, rli->bitpos);
645 /* Returns the size in bits allocated so far. */
648 rli_size_so_far (record_layout_info rli)
650 return bit_from_pos (rli->offset, rli->bitpos);
653 /* FIELD is about to be added to RLI->T. The alignment (in bits) of
654 the next available location within the record is given by KNOWN_ALIGN.
655 Update the variable alignment fields in RLI, and return the alignment
656 to give the FIELD. */
659 update_alignment_for_field (record_layout_info rli, tree field,
660 unsigned int known_align)
662 /* The alignment required for FIELD. */
663 unsigned int desired_align;
664 /* The type of this field. */
665 tree type = TREE_TYPE (field);
666 /* True if the field was explicitly aligned by the user. */
670 /* Do not attempt to align an ERROR_MARK node */
671 if (TREE_CODE (type) == ERROR_MARK)
674 /* Lay out the field so we know what alignment it needs. */
675 layout_decl (field, known_align);
676 desired_align = DECL_ALIGN (field);
677 user_align = DECL_USER_ALIGN (field);
679 is_bitfield = (type != error_mark_node
680 && DECL_BIT_FIELD_TYPE (field)
681 && ! integer_zerop (TYPE_SIZE (type)));
683 /* Record must have at least as much alignment as any field.
684 Otherwise, the alignment of the field within the record is
686 if (targetm.ms_bitfield_layout_p (rli->t))
688 /* Here, the alignment of the underlying type of a bitfield can
689 affect the alignment of a record; even a zero-sized field
690 can do this. The alignment should be to the alignment of
691 the type, except that for zero-size bitfields this only
692 applies if there was an immediately prior, nonzero-size
693 bitfield. (That's the way it is, experimentally.) */
694 if ((!is_bitfield && !DECL_PACKED (field))
695 || (!integer_zerop (DECL_SIZE (field))
696 ? !DECL_PACKED (field)
698 && DECL_BIT_FIELD_TYPE (rli->prev_field)
699 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
701 unsigned int type_align = TYPE_ALIGN (type);
702 type_align = MAX (type_align, desired_align);
703 if (maximum_field_alignment != 0)
704 type_align = MIN (type_align, maximum_field_alignment);
705 rli->record_align = MAX (rli->record_align, type_align);
706 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
709 #ifdef PCC_BITFIELD_TYPE_MATTERS
710 else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
712 /* Named bit-fields cause the entire structure to have the
713 alignment implied by their type. Some targets also apply the same
714 rules to unnamed bitfields. */
715 if (DECL_NAME (field) != 0
716 || targetm.align_anon_bitfield ())
718 unsigned int type_align = TYPE_ALIGN (type);
720 #ifdef ADJUST_FIELD_ALIGN
721 if (! TYPE_USER_ALIGN (type))
722 type_align = ADJUST_FIELD_ALIGN (field, type_align);
725 /* Targets might chose to handle unnamed and hence possibly
726 zero-width bitfield. Those are not influenced by #pragmas
727 or packed attributes. */
728 if (integer_zerop (DECL_SIZE (field)))
730 if (initial_max_fld_align)
731 type_align = MIN (type_align,
732 initial_max_fld_align * BITS_PER_UNIT);
734 else if (maximum_field_alignment != 0)
735 type_align = MIN (type_align, maximum_field_alignment);
736 else if (DECL_PACKED (field))
737 type_align = MIN (type_align, BITS_PER_UNIT);
739 /* The alignment of the record is increased to the maximum
740 of the current alignment, the alignment indicated on the
741 field (i.e., the alignment specified by an __aligned__
742 attribute), and the alignment indicated by the type of
744 rli->record_align = MAX (rli->record_align, desired_align);
745 rli->record_align = MAX (rli->record_align, type_align);
748 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
749 user_align |= TYPE_USER_ALIGN (type);
755 rli->record_align = MAX (rli->record_align, desired_align);
756 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
759 TYPE_USER_ALIGN (rli->t) |= user_align;
761 return desired_align;
764 /* Called from place_field to handle unions. */
767 place_union_field (record_layout_info rli, tree field)
769 update_alignment_for_field (rli, field, /*known_align=*/0);
771 DECL_FIELD_OFFSET (field) = size_zero_node;
772 DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
773 SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
775 /* If this is an ERROR_MARK return *after* having set the
776 field at the start of the union. This helps when parsing
778 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
781 /* We assume the union's size will be a multiple of a byte so we don't
782 bother with BITPOS. */
783 if (TREE_CODE (rli->t) == UNION_TYPE)
784 rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
785 else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
786 rli->offset = fold_build3 (COND_EXPR, sizetype,
787 DECL_QUALIFIER (field),
788 DECL_SIZE_UNIT (field), rli->offset);
791 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
792 /* A bitfield of SIZE with a required access alignment of ALIGN is allocated
793 at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more
794 units of alignment than the underlying TYPE. */
796 excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
797 HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
799 /* Note that the calculation of OFFSET might overflow; we calculate it so
800 that we still get the right result as long as ALIGN is a power of two. */
801 unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
803 offset = offset % align;
804 return ((offset + size + align - 1) / align
805 > ((unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE (type), 1)
810 /* RLI contains information about the layout of a RECORD_TYPE. FIELD
811 is a FIELD_DECL to be added after those fields already present in
812 T. (FIELD is not actually added to the TYPE_FIELDS list here;
813 callers that desire that behavior must manually perform that step.) */
816 place_field (record_layout_info rli, tree field)
818 /* The alignment required for FIELD. */
819 unsigned int desired_align;
820 /* The alignment FIELD would have if we just dropped it into the
821 record as it presently stands. */
822 unsigned int known_align;
823 unsigned int actual_align;
824 /* The type of this field. */
825 tree type = TREE_TYPE (field);
827 gcc_assert (TREE_CODE (field) != ERROR_MARK);
829 /* If FIELD is static, then treat it like a separate variable, not
830 really like a structure field. If it is a FUNCTION_DECL, it's a
831 method. In both cases, all we do is lay out the decl, and we do
832 it *after* the record is laid out. */
833 if (TREE_CODE (field) == VAR_DECL)
835 rli->pending_statics = tree_cons (NULL_TREE, field,
836 rli->pending_statics);
840 /* Enumerators and enum types which are local to this class need not
841 be laid out. Likewise for initialized constant fields. */
842 else if (TREE_CODE (field) != FIELD_DECL)
845 /* Unions are laid out very differently than records, so split
846 that code off to another function. */
847 else if (TREE_CODE (rli->t) != RECORD_TYPE)
849 place_union_field (rli, field);
853 else if (TREE_CODE (type) == ERROR_MARK)
855 /* Place this field at the current allocation position, so we
856 maintain monotonicity. */
857 DECL_FIELD_OFFSET (field) = rli->offset;
858 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
859 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
863 /* Work out the known alignment so far. Note that A & (-A) is the
864 value of the least-significant bit in A that is one. */
865 if (! integer_zerop (rli->bitpos))
866 known_align = (tree_low_cst (rli->bitpos, 1)
867 & - tree_low_cst (rli->bitpos, 1));
868 else if (integer_zerop (rli->offset))
870 else if (host_integerp (rli->offset, 1))
871 known_align = (BITS_PER_UNIT
872 * (tree_low_cst (rli->offset, 1)
873 & - tree_low_cst (rli->offset, 1)));
875 known_align = rli->offset_align;
877 desired_align = update_alignment_for_field (rli, field, known_align);
878 if (known_align == 0)
879 known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
881 if (warn_packed && DECL_PACKED (field))
883 if (known_align >= TYPE_ALIGN (type))
885 if (TYPE_ALIGN (type) > desired_align)
887 if (STRICT_ALIGNMENT)
888 warning (OPT_Wattributes, "packed attribute causes "
889 "inefficient alignment for %q+D", field);
891 warning (OPT_Wattributes, "packed attribute is "
892 "unnecessary for %q+D", field);
896 rli->packed_maybe_necessary = 1;
899 /* Does this field automatically have alignment it needs by virtue
900 of the fields that precede it and the record's own alignment?
901 We already align ms_struct fields, so don't re-align them. */
902 if (known_align < desired_align
903 && !targetm.ms_bitfield_layout_p (rli->t))
905 /* No, we need to skip space before this field.
906 Bump the cumulative size to multiple of field alignment. */
908 warning (OPT_Wpadded, "padding struct to align %q+D", field);
910 /* If the alignment is still within offset_align, just align
912 if (desired_align < rli->offset_align)
913 rli->bitpos = round_up (rli->bitpos, desired_align);
916 /* First adjust OFFSET by the partial bits, then align. */
918 = size_binop (PLUS_EXPR, rli->offset,
919 fold_convert (sizetype,
920 size_binop (CEIL_DIV_EXPR, rli->bitpos,
921 bitsize_unit_node)));
922 rli->bitpos = bitsize_zero_node;
924 rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
927 if (! TREE_CONSTANT (rli->offset))
928 rli->offset_align = desired_align;
932 /* Handle compatibility with PCC. Note that if the record has any
933 variable-sized fields, we need not worry about compatibility. */
934 #ifdef PCC_BITFIELD_TYPE_MATTERS
935 if (PCC_BITFIELD_TYPE_MATTERS
936 && ! targetm.ms_bitfield_layout_p (rli->t)
937 && TREE_CODE (field) == FIELD_DECL
938 && type != error_mark_node
939 && DECL_BIT_FIELD (field)
940 && (! DECL_PACKED (field)
941 /* Enter for these packed fields only to issue a warning. */
942 || TYPE_ALIGN (type) <= BITS_PER_UNIT)
943 && maximum_field_alignment == 0
944 && ! integer_zerop (DECL_SIZE (field))
945 && host_integerp (DECL_SIZE (field), 1)
946 && host_integerp (rli->offset, 1)
947 && host_integerp (TYPE_SIZE (type), 1))
949 unsigned int type_align = TYPE_ALIGN (type);
950 tree dsize = DECL_SIZE (field);
951 HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
952 HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
953 HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
955 #ifdef ADJUST_FIELD_ALIGN
956 if (! TYPE_USER_ALIGN (type))
957 type_align = ADJUST_FIELD_ALIGN (field, type_align);
960 /* A bit field may not span more units of alignment of its type
961 than its type itself. Advance to next boundary if necessary. */
962 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
964 if (DECL_PACKED (field))
966 if (warn_packed_bitfield_compat == 1)
969 "Offset of packed bit-field %qD has changed in GCC 4.4",
973 rli->bitpos = round_up (rli->bitpos, type_align);
976 if (! DECL_PACKED (field))
977 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
981 #ifdef BITFIELD_NBYTES_LIMITED
982 if (BITFIELD_NBYTES_LIMITED
983 && ! targetm.ms_bitfield_layout_p (rli->t)
984 && TREE_CODE (field) == FIELD_DECL
985 && type != error_mark_node
986 && DECL_BIT_FIELD_TYPE (field)
987 && ! DECL_PACKED (field)
988 && ! integer_zerop (DECL_SIZE (field))
989 && host_integerp (DECL_SIZE (field), 1)
990 && host_integerp (rli->offset, 1)
991 && host_integerp (TYPE_SIZE (type), 1))
993 unsigned int type_align = TYPE_ALIGN (type);
994 tree dsize = DECL_SIZE (field);
995 HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
996 HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
997 HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
999 #ifdef ADJUST_FIELD_ALIGN
1000 if (! TYPE_USER_ALIGN (type))
1001 type_align = ADJUST_FIELD_ALIGN (field, type_align);
1004 if (maximum_field_alignment != 0)
1005 type_align = MIN (type_align, maximum_field_alignment);
1006 /* ??? This test is opposite the test in the containing if
1007 statement, so this code is unreachable currently. */
1008 else if (DECL_PACKED (field))
1009 type_align = MIN (type_align, BITS_PER_UNIT);
1011 /* A bit field may not span the unit of alignment of its type.
1012 Advance to next boundary if necessary. */
1013 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1014 rli->bitpos = round_up (rli->bitpos, type_align);
1016 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1020 /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1022 When a bit field is inserted into a packed record, the whole
1023 size of the underlying type is used by one or more same-size
1024 adjacent bitfields. (That is, if its long:3, 32 bits is
1025 used in the record, and any additional adjacent long bitfields are
1026 packed into the same chunk of 32 bits. However, if the size
1027 changes, a new field of that size is allocated.) In an unpacked
1028 record, this is the same as using alignment, but not equivalent
1031 Note: for compatibility, we use the type size, not the type alignment
1032 to determine alignment, since that matches the documentation */
1034 if (targetm.ms_bitfield_layout_p (rli->t))
1036 tree prev_saved = rli->prev_field;
1037 tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
1039 /* This is a bitfield if it exists. */
1040 if (rli->prev_field)
1042 /* If both are bitfields, nonzero, and the same size, this is
1043 the middle of a run. Zero declared size fields are special
1044 and handled as "end of run". (Note: it's nonzero declared
1045 size, but equal type sizes!) (Since we know that both
1046 the current and previous fields are bitfields by the
1047 time we check it, DECL_SIZE must be present for both.) */
1048 if (DECL_BIT_FIELD_TYPE (field)
1049 && !integer_zerop (DECL_SIZE (field))
1050 && !integer_zerop (DECL_SIZE (rli->prev_field))
1051 && host_integerp (DECL_SIZE (rli->prev_field), 0)
1052 && host_integerp (TYPE_SIZE (type), 0)
1053 && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
1055 /* We're in the middle of a run of equal type size fields; make
1056 sure we realign if we run out of bits. (Not decl size,
1058 HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1);
1060 if (rli->remaining_in_alignment < bitsize)
1062 HOST_WIDE_INT typesize = tree_low_cst (TYPE_SIZE (type), 1);
1064 /* out of bits; bump up to next 'word'. */
1066 = size_binop (PLUS_EXPR, rli->bitpos,
1067 bitsize_int (rli->remaining_in_alignment));
1068 rli->prev_field = field;
1069 if (typesize < bitsize)
1070 rli->remaining_in_alignment = 0;
1072 rli->remaining_in_alignment = typesize - bitsize;
1075 rli->remaining_in_alignment -= bitsize;
1079 /* End of a run: if leaving a run of bitfields of the same type
1080 size, we have to "use up" the rest of the bits of the type
1083 Compute the new position as the sum of the size for the prior
1084 type and where we first started working on that type.
1085 Note: since the beginning of the field was aligned then
1086 of course the end will be too. No round needed. */
1088 if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1091 = size_binop (PLUS_EXPR, rli->bitpos,
1092 bitsize_int (rli->remaining_in_alignment));
1095 /* We "use up" size zero fields; the code below should behave
1096 as if the prior field was not a bitfield. */
1099 /* Cause a new bitfield to be captured, either this time (if
1100 currently a bitfield) or next time we see one. */
1101 if (!DECL_BIT_FIELD_TYPE(field)
1102 || integer_zerop (DECL_SIZE (field)))
1103 rli->prev_field = NULL;
1106 normalize_rli (rli);
1109 /* If we're starting a new run of same size type bitfields
1110 (or a run of non-bitfields), set up the "first of the run"
1113 That is, if the current field is not a bitfield, or if there
1114 was a prior bitfield the type sizes differ, or if there wasn't
1115 a prior bitfield the size of the current field is nonzero.
1117 Note: we must be sure to test ONLY the type size if there was
1118 a prior bitfield and ONLY for the current field being zero if
1121 if (!DECL_BIT_FIELD_TYPE (field)
1122 || (prev_saved != NULL
1123 ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
1124 : !integer_zerop (DECL_SIZE (field)) ))
1126 /* Never smaller than a byte for compatibility. */
1127 unsigned int type_align = BITS_PER_UNIT;
1129 /* (When not a bitfield), we could be seeing a flex array (with
1130 no DECL_SIZE). Since we won't be using remaining_in_alignment
1131 until we see a bitfield (and come by here again) we just skip
1133 if (DECL_SIZE (field) != NULL
1134 && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 1)
1135 && host_integerp (DECL_SIZE (field), 1))
1137 unsigned HOST_WIDE_INT bitsize
1138 = tree_low_cst (DECL_SIZE (field), 1);
1139 unsigned HOST_WIDE_INT typesize
1140 = tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1);
1142 if (typesize < bitsize)
1143 rli->remaining_in_alignment = 0;
1145 rli->remaining_in_alignment = typesize - bitsize;
1148 /* Now align (conventionally) for the new type. */
1149 type_align = TYPE_ALIGN (TREE_TYPE (field));
1151 if (maximum_field_alignment != 0)
1152 type_align = MIN (type_align, maximum_field_alignment);
1154 rli->bitpos = round_up (rli->bitpos, type_align);
1156 /* If we really aligned, don't allow subsequent bitfields
1158 rli->prev_field = NULL;
1162 /* Offset so far becomes the position of this field after normalizing. */
1163 normalize_rli (rli);
1164 DECL_FIELD_OFFSET (field) = rli->offset;
1165 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1166 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1168 /* If this field ended up more aligned than we thought it would be (we
1169 approximate this by seeing if its position changed), lay out the field
1170 again; perhaps we can use an integral mode for it now. */
1171 if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1172 actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
1173 & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1));
1174 else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1175 actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1176 else if (host_integerp (DECL_FIELD_OFFSET (field), 1))
1177 actual_align = (BITS_PER_UNIT
1178 * (tree_low_cst (DECL_FIELD_OFFSET (field), 1)
1179 & - tree_low_cst (DECL_FIELD_OFFSET (field), 1)));
1181 actual_align = DECL_OFFSET_ALIGN (field);
1182 /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1183 store / extract bit field operations will check the alignment of the
1184 record against the mode of bit fields. */
1186 if (known_align != actual_align)
1187 layout_decl (field, actual_align);
1189 if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
1190 rli->prev_field = field;
1192 /* Now add size of this field to the size of the record. If the size is
1193 not constant, treat the field as being a multiple of bytes and just
1194 adjust the offset, resetting the bit position. Otherwise, apportion the
1195 size amongst the bit position and offset. First handle the case of an
1196 unspecified size, which can happen when we have an invalid nested struct
1197 definition, such as struct j { struct j { int i; } }. The error message
1198 is printed in finish_struct. */
1199 if (DECL_SIZE (field) == 0)
1201 else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
1202 || TREE_OVERFLOW (DECL_SIZE (field)))
1205 = size_binop (PLUS_EXPR, rli->offset,
1206 fold_convert (sizetype,
1207 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1208 bitsize_unit_node)));
1210 = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1211 rli->bitpos = bitsize_zero_node;
1212 rli->offset_align = MIN (rli->offset_align, desired_align);
1214 else if (targetm.ms_bitfield_layout_p (rli->t))
1216 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1218 /* If we ended a bitfield before the full length of the type then
1219 pad the struct out to the full length of the last type. */
1220 if ((TREE_CHAIN (field) == NULL
1221 || TREE_CODE (TREE_CHAIN (field)) != FIELD_DECL)
1222 && DECL_BIT_FIELD_TYPE (field)
1223 && !integer_zerop (DECL_SIZE (field)))
1224 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
1225 bitsize_int (rli->remaining_in_alignment));
1227 normalize_rli (rli);
1231 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1232 normalize_rli (rli);
1236 /* Assuming that all the fields have been laid out, this function uses
1237 RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1238 indicated by RLI. */
1241 finalize_record_size (record_layout_info rli)
1243 tree unpadded_size, unpadded_size_unit;
1245 /* Now we want just byte and bit offsets, so set the offset alignment
1246 to be a byte and then normalize. */
1247 rli->offset_align = BITS_PER_UNIT;
1248 normalize_rli (rli);
1250 /* Determine the desired alignment. */
1251 #ifdef ROUND_TYPE_ALIGN
1252 TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1255 TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align);
1258 /* Compute the size so far. Be sure to allow for extra bits in the
1259 size in bytes. We have guaranteed above that it will be no more
1260 than a single byte. */
1261 unpadded_size = rli_size_so_far (rli);
1262 unpadded_size_unit = rli_size_unit_so_far (rli);
1263 if (! integer_zerop (rli->bitpos))
1265 = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1267 /* Round the size up to be a multiple of the required alignment. */
1268 TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
1269 TYPE_SIZE_UNIT (rli->t)
1270 = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1272 if (TREE_CONSTANT (unpadded_size)
1273 && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0)
1274 warning (OPT_Wpadded, "padding struct size to alignment boundary");
1276 if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1277 && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1278 && TREE_CONSTANT (unpadded_size))
1282 #ifdef ROUND_TYPE_ALIGN
1284 = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1286 rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1289 unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
1290 if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1292 TYPE_PACKED (rli->t) = 0;
1294 if (TYPE_NAME (rli->t))
1298 if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1299 name = IDENTIFIER_POINTER (TYPE_NAME (rli->t));
1301 name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (rli->t)));
1303 if (STRICT_ALIGNMENT)
1304 warning (OPT_Wpacked, "packed attribute causes inefficient "
1305 "alignment for %qs", name);
1307 warning (OPT_Wpacked,
1308 "packed attribute is unnecessary for %qs", name);
1312 if (STRICT_ALIGNMENT)
1313 warning (OPT_Wpacked,
1314 "packed attribute causes inefficient alignment");
1316 warning (OPT_Wpacked, "packed attribute is unnecessary");
1322 /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */
1325 compute_record_mode (tree type)
1328 enum machine_mode mode = VOIDmode;
1330 /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1331 However, if possible, we use a mode that fits in a register
1332 instead, in order to allow for better optimization down the
1334 SET_TYPE_MODE (type, BLKmode);
1336 if (! host_integerp (TYPE_SIZE (type), 1))
1339 /* A record which has any BLKmode members must itself be
1340 BLKmode; it can't go in a register. Unless the member is
1341 BLKmode only because it isn't aligned. */
1342 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
1344 if (TREE_CODE (field) != FIELD_DECL)
1347 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1348 || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1349 && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1350 && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1351 && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1352 || ! host_integerp (bit_position (field), 1)
1353 || DECL_SIZE (field) == 0
1354 || ! host_integerp (DECL_SIZE (field), 1))
1357 /* If this field is the whole struct, remember its mode so
1358 that, say, we can put a double in a class into a DF
1359 register instead of forcing it to live in the stack. */
1360 if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field)))
1361 mode = DECL_MODE (field);
1363 #ifdef MEMBER_TYPE_FORCES_BLK
1364 /* With some targets, eg. c4x, it is sub-optimal
1365 to access an aligned BLKmode structure as a scalar. */
1367 if (MEMBER_TYPE_FORCES_BLK (field, mode))
1369 #endif /* MEMBER_TYPE_FORCES_BLK */
1372 /* If we only have one real field; use its mode if that mode's size
1373 matches the type's size. This only applies to RECORD_TYPE. This
1374 does not apply to unions. */
1375 if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
1376 && host_integerp (TYPE_SIZE (type), 1)
1377 && GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type)))
1378 SET_TYPE_MODE (type, mode);
1380 SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1));
1382 /* If structure's known alignment is less than what the scalar
1383 mode would need, and it matters, then stick with BLKmode. */
1384 if (TYPE_MODE (type) != BLKmode
1386 && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1387 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type))))
1389 /* If this is the only reason this type is BLKmode, then
1390 don't force containing types to be BLKmode. */
1391 TYPE_NO_FORCE_BLK (type) = 1;
1392 SET_TYPE_MODE (type, BLKmode);
1396 /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1400 finalize_type_size (tree type)
1402 /* Normally, use the alignment corresponding to the mode chosen.
1403 However, where strict alignment is not required, avoid
1404 over-aligning structures, since most compilers do not do this
1407 if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode
1408 && (STRICT_ALIGNMENT
1409 || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE
1410 && TREE_CODE (type) != QUAL_UNION_TYPE
1411 && TREE_CODE (type) != ARRAY_TYPE)))
1413 unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1415 /* Don't override a larger alignment requirement coming from a user
1416 alignment of one of the fields. */
1417 if (mode_align >= TYPE_ALIGN (type))
1419 TYPE_ALIGN (type) = mode_align;
1420 TYPE_USER_ALIGN (type) = 0;
1424 /* Do machine-dependent extra alignment. */
1425 #ifdef ROUND_TYPE_ALIGN
1427 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT);
1430 /* If we failed to find a simple way to calculate the unit size
1431 of the type, find it by division. */
1432 if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1433 /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the
1434 result will fit in sizetype. We will get more efficient code using
1435 sizetype, so we force a conversion. */
1436 TYPE_SIZE_UNIT (type)
1437 = fold_convert (sizetype,
1438 size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1439 bitsize_unit_node));
1441 if (TYPE_SIZE (type) != 0)
1443 TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
1444 TYPE_SIZE_UNIT (type) = round_up (TYPE_SIZE_UNIT (type),
1445 TYPE_ALIGN_UNIT (type));
1448 /* Evaluate nonconstant sizes only once, either now or as soon as safe. */
1449 if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1450 TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1451 if (TYPE_SIZE_UNIT (type) != 0
1452 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1453 TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1455 /* Also layout any other variants of the type. */
1456 if (TYPE_NEXT_VARIANT (type)
1457 || type != TYPE_MAIN_VARIANT (type))
1460 /* Record layout info of this variant. */
1461 tree size = TYPE_SIZE (type);
1462 tree size_unit = TYPE_SIZE_UNIT (type);
1463 unsigned int align = TYPE_ALIGN (type);
1464 unsigned int user_align = TYPE_USER_ALIGN (type);
1465 enum machine_mode mode = TYPE_MODE (type);
1467 /* Copy it into all variants. */
1468 for (variant = TYPE_MAIN_VARIANT (type);
1470 variant = TYPE_NEXT_VARIANT (variant))
1472 TYPE_SIZE (variant) = size;
1473 TYPE_SIZE_UNIT (variant) = size_unit;
1474 TYPE_ALIGN (variant) = align;
1475 TYPE_USER_ALIGN (variant) = user_align;
1476 SET_TYPE_MODE (variant, mode);
1481 /* Do all of the work required to layout the type indicated by RLI,
1482 once the fields have been laid out. This function will call `free'
1483 for RLI, unless FREE_P is false. Passing a value other than false
1484 for FREE_P is bad practice; this option only exists to support the
1488 finish_record_layout (record_layout_info rli, int free_p)
1492 /* Compute the final size. */
1493 finalize_record_size (rli);
1495 /* Compute the TYPE_MODE for the record. */
1496 compute_record_mode (rli->t);
1498 /* Perform any last tweaks to the TYPE_SIZE, etc. */
1499 finalize_type_size (rli->t);
1501 /* Propagate TYPE_PACKED to variants. With C++ templates,
1502 handle_packed_attribute is too early to do this. */
1503 for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
1504 variant = TYPE_NEXT_VARIANT (variant))
1505 TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
1507 /* Lay out any static members. This is done now because their type
1508 may use the record's type. */
1509 while (rli->pending_statics)
1511 layout_decl (TREE_VALUE (rli->pending_statics), 0);
1512 rli->pending_statics = TREE_CHAIN (rli->pending_statics);
1521 /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is
1522 NAME, its fields are chained in reverse on FIELDS.
1524 If ALIGN_TYPE is non-null, it is given the same alignment as
1528 finish_builtin_struct (tree type, const char *name, tree fields,
1533 for (tail = NULL_TREE; fields; tail = fields, fields = next)
1535 DECL_FIELD_CONTEXT (fields) = type;
1536 next = TREE_CHAIN (fields);
1537 TREE_CHAIN (fields) = tail;
1539 TYPE_FIELDS (type) = tail;
1543 TYPE_ALIGN (type) = TYPE_ALIGN (align_type);
1544 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
1548 #if 0 /* not yet, should get fixed properly later */
1549 TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
1551 TYPE_NAME (type) = build_decl (TYPE_DECL, get_identifier (name), type);
1553 TYPE_STUB_DECL (type) = TYPE_NAME (type);
1554 layout_decl (TYPE_NAME (type), 0);
1557 /* Calculate the mode, size, and alignment for TYPE.
1558 For an array type, calculate the element separation as well.
1559 Record TYPE on the chain of permanent or temporary types
1560 so that dbxout will find out about it.
1562 TYPE_SIZE of a type is nonzero if the type has been laid out already.
1563 layout_type does nothing on such a type.
1565 If the type is incomplete, its TYPE_SIZE remains zero. */
1568 layout_type (tree type)
1572 if (type == error_mark_node)
1575 /* Do nothing if type has been laid out before. */
1576 if (TYPE_SIZE (type))
1579 switch (TREE_CODE (type))
1582 /* This kind of type is the responsibility
1583 of the language-specific code. */
1586 case BOOLEAN_TYPE: /* Used for Java, Pascal, and Chill. */
1587 if (TYPE_PRECISION (type) == 0)
1588 TYPE_PRECISION (type) = 1; /* default to one byte/boolean. */
1590 /* ... fall through ... */
1594 if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
1595 && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
1596 TYPE_UNSIGNED (type) = 1;
1598 SET_TYPE_MODE (type,
1599 smallest_mode_for_size (TYPE_PRECISION (type), MODE_INT));
1600 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1601 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1605 SET_TYPE_MODE (type,
1606 mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0));
1607 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1608 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1611 case FIXED_POINT_TYPE:
1612 /* TYPE_MODE (type) has been set already. */
1613 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1614 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1618 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1619 SET_TYPE_MODE (type,
1620 mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
1621 (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
1622 ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
1624 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1625 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1630 int nunits = TYPE_VECTOR_SUBPARTS (type);
1631 tree innertype = TREE_TYPE (type);
1633 gcc_assert (!(nunits & (nunits - 1)));
1635 /* Find an appropriate mode for the vector type. */
1636 if (TYPE_MODE (type) == VOIDmode)
1638 enum machine_mode innermode = TYPE_MODE (innertype);
1639 enum machine_mode mode;
1641 /* First, look for a supported vector type. */
1642 if (SCALAR_FLOAT_MODE_P (innermode))
1643 mode = MIN_MODE_VECTOR_FLOAT;
1644 else if (SCALAR_FRACT_MODE_P (innermode))
1645 mode = MIN_MODE_VECTOR_FRACT;
1646 else if (SCALAR_UFRACT_MODE_P (innermode))
1647 mode = MIN_MODE_VECTOR_UFRACT;
1648 else if (SCALAR_ACCUM_MODE_P (innermode))
1649 mode = MIN_MODE_VECTOR_ACCUM;
1650 else if (SCALAR_UACCUM_MODE_P (innermode))
1651 mode = MIN_MODE_VECTOR_UACCUM;
1653 mode = MIN_MODE_VECTOR_INT;
1655 /* Do not check vector_mode_supported_p here. We'll do that
1656 later in vector_type_mode. */
1657 for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
1658 if (GET_MODE_NUNITS (mode) == nunits
1659 && GET_MODE_INNER (mode) == innermode)
1662 /* For integers, try mapping it to a same-sized scalar mode. */
1663 if (mode == VOIDmode
1664 && GET_MODE_CLASS (innermode) == MODE_INT)
1665 mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
1668 if (mode == VOIDmode ||
1669 (GET_MODE_CLASS (mode) == MODE_INT
1670 && !have_regs_of_mode[mode]))
1671 SET_TYPE_MODE (type, BLKmode);
1673 SET_TYPE_MODE (type, mode);
1676 TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
1677 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1678 TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
1679 TYPE_SIZE_UNIT (innertype),
1680 size_int (nunits), 0);
1681 TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
1682 bitsize_int (nunits), 0);
1684 /* Always naturally align vectors. This prevents ABI changes
1685 depending on whether or not native vector modes are supported. */
1686 TYPE_ALIGN (type) = tree_low_cst (TYPE_SIZE (type), 0);
1691 /* This is an incomplete type and so doesn't have a size. */
1692 TYPE_ALIGN (type) = 1;
1693 TYPE_USER_ALIGN (type) = 0;
1694 SET_TYPE_MODE (type, VOIDmode);
1698 TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
1699 TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
1700 /* A pointer might be MODE_PARTIAL_INT,
1701 but ptrdiff_t must be integral. */
1702 SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0));
1707 /* It's hard to see what the mode and size of a function ought to
1708 be, but we do know the alignment is FUNCTION_BOUNDARY, so
1709 make it consistent with that. */
1710 SET_TYPE_MODE (type, mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0));
1711 TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
1712 TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
1716 case REFERENCE_TYPE:
1718 enum machine_mode mode = ((TREE_CODE (type) == REFERENCE_TYPE
1719 && reference_types_internal)
1720 ? Pmode : TYPE_MODE (type));
1722 int nbits = GET_MODE_BITSIZE (mode);
1724 TYPE_SIZE (type) = bitsize_int (nbits);
1725 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
1726 TYPE_UNSIGNED (type) = 1;
1727 TYPE_PRECISION (type) = nbits;
1733 tree index = TYPE_DOMAIN (type);
1734 tree element = TREE_TYPE (type);
1736 build_pointer_type (element);
1738 /* We need to know both bounds in order to compute the size. */
1739 if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
1740 && TYPE_SIZE (element))
1742 tree ub = TYPE_MAX_VALUE (index);
1743 tree lb = TYPE_MIN_VALUE (index);
1747 /* The initial subtraction should happen in the original type so
1748 that (possible) negative values are handled appropriately. */
1749 length = size_binop (PLUS_EXPR, size_one_node,
1750 fold_convert (sizetype,
1751 fold_build2 (MINUS_EXPR,
1755 /* Special handling for arrays of bits (for Chill). */
1756 element_size = TYPE_SIZE (element);
1757 if (TYPE_PACKED (type) && INTEGRAL_TYPE_P (element)
1758 && (integer_zerop (TYPE_MAX_VALUE (element))
1759 || integer_onep (TYPE_MAX_VALUE (element)))
1760 && host_integerp (TYPE_MIN_VALUE (element), 1))
1762 HOST_WIDE_INT maxvalue
1763 = tree_low_cst (TYPE_MAX_VALUE (element), 1);
1764 HOST_WIDE_INT minvalue
1765 = tree_low_cst (TYPE_MIN_VALUE (element), 1);
1767 if (maxvalue - minvalue == 1
1768 && (maxvalue == 1 || maxvalue == 0))
1769 element_size = integer_one_node;
1772 /* If neither bound is a constant and sizetype is signed, make
1773 sure the size is never negative. We should really do this
1774 if *either* bound is non-constant, but this is the best
1775 compromise between C and Ada. */
1776 if (!TYPE_UNSIGNED (sizetype)
1777 && TREE_CODE (TYPE_MIN_VALUE (index)) != INTEGER_CST
1778 && TREE_CODE (TYPE_MAX_VALUE (index)) != INTEGER_CST)
1779 length = size_binop (MAX_EXPR, length, size_zero_node);
1781 TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
1782 fold_convert (bitsizetype,
1785 /* If we know the size of the element, calculate the total
1786 size directly, rather than do some division thing below.
1787 This optimization helps Fortran assumed-size arrays
1788 (where the size of the array is determined at runtime)
1790 Note that we can't do this in the case where the size of
1791 the elements is one bit since TYPE_SIZE_UNIT cannot be
1792 set correctly in that case. */
1793 if (TYPE_SIZE_UNIT (element) != 0 && ! integer_onep (element_size))
1794 TYPE_SIZE_UNIT (type)
1795 = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
1798 /* Now round the alignment and size,
1799 using machine-dependent criteria if any. */
1801 #ifdef ROUND_TYPE_ALIGN
1803 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT);
1805 TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
1807 if (!TYPE_SIZE (element))
1808 /* We don't know the size of the underlying element type, so
1809 our alignment calculations will be wrong, forcing us to
1810 fall back on structural equality. */
1811 SET_TYPE_STRUCTURAL_EQUALITY (type);
1812 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
1813 SET_TYPE_MODE (type, BLKmode);
1814 if (TYPE_SIZE (type) != 0
1815 #ifdef MEMBER_TYPE_FORCES_BLK
1816 && ! MEMBER_TYPE_FORCES_BLK (type, VOIDmode)
1818 /* BLKmode elements force BLKmode aggregate;
1819 else extract/store fields may lose. */
1820 && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
1821 || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
1823 /* One-element arrays get the component type's mode. */
1824 if (simple_cst_equal (TYPE_SIZE (type),
1825 TYPE_SIZE (TREE_TYPE (type))))
1826 SET_TYPE_MODE (type, TYPE_MODE (TREE_TYPE (type)));
1828 SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type),
1831 if (TYPE_MODE (type) != BLKmode
1832 && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
1833 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
1835 TYPE_NO_FORCE_BLK (type) = 1;
1836 SET_TYPE_MODE (type, BLKmode);
1839 /* When the element size is constant, check that it is at least as
1840 large as the element alignment. */
1841 if (TYPE_SIZE_UNIT (element)
1842 && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
1843 /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
1845 && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
1846 && !integer_zerop (TYPE_SIZE_UNIT (element))
1847 && compare_tree_int (TYPE_SIZE_UNIT (element),
1848 TYPE_ALIGN_UNIT (element)) < 0)
1849 error ("alignment of array elements is greater than element size");
1855 case QUAL_UNION_TYPE:
1858 record_layout_info rli;
1860 /* Initialize the layout information. */
1861 rli = start_record_layout (type);
1863 /* If this is a QUAL_UNION_TYPE, we want to process the fields
1864 in the reverse order in building the COND_EXPR that denotes
1865 its size. We reverse them again later. */
1866 if (TREE_CODE (type) == QUAL_UNION_TYPE)
1867 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
1869 /* Place all the fields. */
1870 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
1871 place_field (rli, field);
1873 if (TREE_CODE (type) == QUAL_UNION_TYPE)
1874 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
1876 /* Finish laying out the record. */
1877 finish_record_layout (rli, /*free_p=*/true);
1885 /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For
1886 records and unions, finish_record_layout already called this
1888 if (TREE_CODE (type) != RECORD_TYPE
1889 && TREE_CODE (type) != UNION_TYPE
1890 && TREE_CODE (type) != QUAL_UNION_TYPE)
1891 finalize_type_size (type);
1893 /* We should never see alias sets on incomplete aggregates. And we
1894 should not call layout_type on not incomplete aggregates. */
1895 if (AGGREGATE_TYPE_P (type))
1896 gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
1899 /* Vector types need to re-check the target flags each time we report
1900 the machine mode. We need to do this because attribute target can
1901 change the result of vector_mode_supported_p and have_regs_of_mode
1902 on a per-function basis. Thus the TYPE_MODE of a VECTOR_TYPE can
1903 change on a per-function basis. */
1904 /* ??? Possibly a better solution is to run through all the types
1905 referenced by a function and re-compute the TYPE_MODE once, rather
1906 than make the TYPE_MODE macro call a function. */
1909 vector_type_mode (const_tree t)
1911 enum machine_mode mode;
1913 gcc_assert (TREE_CODE (t) == VECTOR_TYPE);
1915 mode = t->type.mode;
1916 if (VECTOR_MODE_P (mode)
1917 && (!targetm.vector_mode_supported_p (mode)
1918 || !have_regs_of_mode[mode]))
1920 enum machine_mode innermode = TREE_TYPE (t)->type.mode;
1922 /* For integers, try mapping it to a same-sized scalar mode. */
1923 if (GET_MODE_CLASS (innermode) == MODE_INT)
1925 mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t)
1926 * GET_MODE_BITSIZE (innermode), MODE_INT, 0);
1928 if (mode != VOIDmode && have_regs_of_mode[mode])
1938 /* Create and return a type for signed integers of PRECISION bits. */
1941 make_signed_type (int precision)
1943 tree type = make_node (INTEGER_TYPE);
1945 TYPE_PRECISION (type) = precision;
1947 fixup_signed_type (type);
1951 /* Create and return a type for unsigned integers of PRECISION bits. */
1954 make_unsigned_type (int precision)
1956 tree type = make_node (INTEGER_TYPE);
1958 TYPE_PRECISION (type) = precision;
1960 fixup_unsigned_type (type);
1964 /* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
1968 make_fract_type (int precision, int unsignedp, int satp)
1970 tree type = make_node (FIXED_POINT_TYPE);
1972 TYPE_PRECISION (type) = precision;
1975 TYPE_SATURATING (type) = 1;
1977 /* Lay out the type: set its alignment, size, etc. */
1980 TYPE_UNSIGNED (type) = 1;
1981 SET_TYPE_MODE (type, mode_for_size (precision, MODE_UFRACT, 0));
1984 SET_TYPE_MODE (type, mode_for_size (precision, MODE_FRACT, 0));
1990 /* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
1994 make_accum_type (int precision, int unsignedp, int satp)
1996 tree type = make_node (FIXED_POINT_TYPE);
1998 TYPE_PRECISION (type) = precision;
2001 TYPE_SATURATING (type) = 1;
2003 /* Lay out the type: set its alignment, size, etc. */
2006 TYPE_UNSIGNED (type) = 1;
2007 SET_TYPE_MODE (type, mode_for_size (precision, MODE_UACCUM, 0));
2010 SET_TYPE_MODE (type, mode_for_size (precision, MODE_ACCUM, 0));
2016 /* Initialize sizetype and bitsizetype to a reasonable and temporary
2017 value to enable integer types to be created. */
2020 initialize_sizetypes (bool signed_p)
2022 tree t = make_node (INTEGER_TYPE);
2023 int precision = GET_MODE_BITSIZE (SImode);
2025 SET_TYPE_MODE (t, SImode);
2026 TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode);
2027 TYPE_USER_ALIGN (t) = 0;
2028 TYPE_IS_SIZETYPE (t) = 1;
2029 TYPE_UNSIGNED (t) = !signed_p;
2030 TYPE_SIZE (t) = build_int_cst (t, precision);
2031 TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
2032 TYPE_PRECISION (t) = precision;
2034 /* Set TYPE_MIN_VALUE and TYPE_MAX_VALUE. */
2035 set_min_and_max_values_for_integral_type (t, precision, !signed_p);
2038 bitsizetype = build_distinct_type_copy (t);
2041 /* Make sizetype a version of TYPE, and initialize *sizetype
2042 accordingly. We do this by overwriting the stub sizetype and
2043 bitsizetype nodes created by initialize_sizetypes. This makes sure
2044 that (a) anything stubby about them no longer exists, (b) any
2045 INTEGER_CSTs created with such a type, remain valid. */
2048 set_sizetype (tree type)
2050 int oprecision = TYPE_PRECISION (type);
2051 /* The *bitsizetype types use a precision that avoids overflows when
2052 calculating signed sizes / offsets in bits. However, when
2053 cross-compiling from a 32 bit to a 64 bit host, we are limited to 64 bit
2055 int precision = MIN (MIN (oprecision + BITS_PER_UNIT_LOG + 1,
2056 MAX_FIXED_MODE_SIZE),
2057 2 * HOST_BITS_PER_WIDE_INT);
2060 gcc_assert (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype));
2062 t = build_distinct_type_copy (type);
2063 /* We do want to use sizetype's cache, as we will be replacing that
2065 TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
2066 TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
2067 TREE_TYPE (TYPE_CACHED_VALUES (t)) = type;
2068 TYPE_UID (t) = TYPE_UID (sizetype);
2069 TYPE_IS_SIZETYPE (t) = 1;
2071 /* Replace our original stub sizetype. */
2072 memcpy (sizetype, t, tree_size (sizetype));
2073 TYPE_MAIN_VARIANT (sizetype) = sizetype;
2075 t = make_node (INTEGER_TYPE);
2076 TYPE_NAME (t) = get_identifier ("bit_size_type");
2077 /* We do want to use bitsizetype's cache, as we will be replacing that
2079 TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype);
2080 TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype);
2081 TYPE_PRECISION (t) = precision;
2082 TYPE_UID (t) = TYPE_UID (bitsizetype);
2083 TYPE_IS_SIZETYPE (t) = 1;
2085 /* Replace our original stub bitsizetype. */
2086 memcpy (bitsizetype, t, tree_size (bitsizetype));
2087 TYPE_MAIN_VARIANT (bitsizetype) = bitsizetype;
2089 if (TYPE_UNSIGNED (type))
2091 fixup_unsigned_type (bitsizetype);
2092 ssizetype = build_distinct_type_copy (make_signed_type (oprecision));
2093 TYPE_IS_SIZETYPE (ssizetype) = 1;
2094 sbitsizetype = build_distinct_type_copy (make_signed_type (precision));
2095 TYPE_IS_SIZETYPE (sbitsizetype) = 1;
2099 fixup_signed_type (bitsizetype);
2100 ssizetype = sizetype;
2101 sbitsizetype = bitsizetype;
2104 /* If SIZETYPE is unsigned, we need to fix TYPE_MAX_VALUE so that
2105 it is sign extended in a way consistent with force_fit_type. */
2106 if (TYPE_UNSIGNED (type))
2108 tree orig_max, new_max;
2110 orig_max = TYPE_MAX_VALUE (sizetype);
2112 /* Build a new node with the same values, but a different type.
2113 Sign extend it to ensure consistency. */
2114 new_max = build_int_cst_wide_type (sizetype,
2115 TREE_INT_CST_LOW (orig_max),
2116 TREE_INT_CST_HIGH (orig_max));
2117 TYPE_MAX_VALUE (sizetype) = new_max;
2121 /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
2122 or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2123 for TYPE, based on the PRECISION and whether or not the TYPE
2124 IS_UNSIGNED. PRECISION need not correspond to a width supported
2125 natively by the hardware; for example, on a machine with 8-bit,
2126 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2130 set_min_and_max_values_for_integral_type (tree type,
2139 min_value = build_int_cst (type, 0);
2141 = build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0
2143 : ((HOST_WIDE_INT) 1 << precision) - 1,
2144 precision - HOST_BITS_PER_WIDE_INT > 0
2145 ? ((unsigned HOST_WIDE_INT) ~0
2146 >> (HOST_BITS_PER_WIDE_INT
2147 - (precision - HOST_BITS_PER_WIDE_INT)))
2153 = build_int_cst_wide (type,
2154 (precision - HOST_BITS_PER_WIDE_INT > 0
2156 : (HOST_WIDE_INT) (-1) << (precision - 1)),
2157 (((HOST_WIDE_INT) (-1)
2158 << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2159 ? precision - HOST_BITS_PER_WIDE_INT - 1
2162 = build_int_cst_wide (type,
2163 (precision - HOST_BITS_PER_WIDE_INT > 0
2165 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
2166 (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2167 ? (((HOST_WIDE_INT) 1
2168 << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
2172 TYPE_MIN_VALUE (type) = min_value;
2173 TYPE_MAX_VALUE (type) = max_value;
2176 /* Set the extreme values of TYPE based on its precision in bits,
2177 then lay it out. Used when make_signed_type won't do
2178 because the tree code is not INTEGER_TYPE.
2179 E.g. for Pascal, when the -fsigned-char option is given. */
2182 fixup_signed_type (tree type)
2184 int precision = TYPE_PRECISION (type);
2186 /* We can not represent properly constants greater then
2187 2 * HOST_BITS_PER_WIDE_INT, still we need the types
2188 as they are used by i386 vector extensions and friends. */
2189 if (precision > HOST_BITS_PER_WIDE_INT * 2)
2190 precision = HOST_BITS_PER_WIDE_INT * 2;
2192 set_min_and_max_values_for_integral_type (type, precision,
2193 /*is_unsigned=*/false);
2195 /* Lay out the type: set its alignment, size, etc. */
2199 /* Set the extreme values of TYPE based on its precision in bits,
2200 then lay it out. This is used both in `make_unsigned_type'
2201 and for enumeral types. */
2204 fixup_unsigned_type (tree type)
2206 int precision = TYPE_PRECISION (type);
2208 /* We can not represent properly constants greater then
2209 2 * HOST_BITS_PER_WIDE_INT, still we need the types
2210 as they are used by i386 vector extensions and friends. */
2211 if (precision > HOST_BITS_PER_WIDE_INT * 2)
2212 precision = HOST_BITS_PER_WIDE_INT * 2;
2214 TYPE_UNSIGNED (type) = 1;
2216 set_min_and_max_values_for_integral_type (type, precision,
2217 /*is_unsigned=*/true);
2219 /* Lay out the type: set its alignment, size, etc. */
2223 /* Find the best machine mode to use when referencing a bit field of length
2224 BITSIZE bits starting at BITPOS.
2226 The underlying object is known to be aligned to a boundary of ALIGN bits.
2227 If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
2228 larger than LARGEST_MODE (usually SImode).
2230 If no mode meets all these conditions, we return VOIDmode.
2232 If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
2233 smallest mode meeting these conditions.
2235 If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
2236 largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2239 If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
2240 decide which of the above modes should be used. */
2243 get_best_mode (int bitsize, int bitpos, unsigned int align,
2244 enum machine_mode largest_mode, int volatilep)
2246 enum machine_mode mode;
2247 unsigned int unit = 0;
2249 /* Find the narrowest integer mode that contains the bit field. */
2250 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
2251 mode = GET_MODE_WIDER_MODE (mode))
2253 unit = GET_MODE_BITSIZE (mode);
2254 if ((bitpos % unit) + bitsize <= unit)
2258 if (mode == VOIDmode
2259 /* It is tempting to omit the following line
2260 if STRICT_ALIGNMENT is true.
2261 But that is incorrect, since if the bitfield uses part of 3 bytes
2262 and we use a 4-byte mode, we could get a spurious segv
2263 if the extra 4th byte is past the end of memory.
2264 (Though at least one Unix compiler ignores this problem:
2265 that on the Sequent 386 machine. */
2266 || MIN (unit, BIGGEST_ALIGNMENT) > align
2267 || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode)))
2270 if ((SLOW_BYTE_ACCESS && ! volatilep)
2271 || (volatilep && !targetm.narrow_volatile_bitfield ()))
2273 enum machine_mode wide_mode = VOIDmode, tmode;
2275 for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode;
2276 tmode = GET_MODE_WIDER_MODE (tmode))
2278 unit = GET_MODE_BITSIZE (tmode);
2279 if (bitpos / unit == (bitpos + bitsize - 1) / unit
2280 && unit <= BITS_PER_WORD
2281 && unit <= MIN (align, BIGGEST_ALIGNMENT)
2282 && (largest_mode == VOIDmode
2283 || unit <= GET_MODE_BITSIZE (largest_mode)))
2287 if (wide_mode != VOIDmode)
2294 /* Gets minimal and maximal values for MODE (signed or unsigned depending on
2295 SIGN). The returned constants are made to be usable in TARGET_MODE. */
2298 get_mode_bounds (enum machine_mode mode, int sign,
2299 enum machine_mode target_mode,
2300 rtx *mmin, rtx *mmax)
2302 unsigned size = GET_MODE_BITSIZE (mode);
2303 unsigned HOST_WIDE_INT min_val, max_val;
2305 gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
2309 min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1));
2310 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1;
2315 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1;
2318 *mmin = gen_int_mode (min_val, target_mode);
2319 *mmax = gen_int_mode (max_val, target_mode);
2322 #include "gt-stor-layout.h"