summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/stor-layout.c
diff options
context:
space:
mode:
authorkan <kan@FreeBSD.org>2007-05-19 01:19:51 +0000
committerkan <kan@FreeBSD.org>2007-05-19 01:19:51 +0000
commit1f9ea4d0a40cca64d60cf4dab152349da7b9dddf (patch)
tree0cb530c9c38af219e6dda2994c078b6b2b9ad853 /contrib/gcc/stor-layout.c
parent4895159b2b4f648051c1f139faa7b6dc50c2bfcb (diff)
downloadFreeBSD-src-1f9ea4d0a40cca64d60cf4dab152349da7b9dddf.zip
FreeBSD-src-1f9ea4d0a40cca64d60cf4dab152349da7b9dddf.tar.gz
GCC 4.2.0 release.
Diffstat (limited to 'contrib/gcc/stor-layout.c')
-rw-r--r--contrib/gcc/stor-layout.c859
1 files changed, 473 insertions, 386 deletions
diff --git a/contrib/gcc/stor-layout.c b/contrib/gcc/stor-layout.c
index 4527fe4..147ff37 100644
--- a/contrib/gcc/stor-layout.c
+++ b/contrib/gcc/stor-layout.c
@@ -1,6 +1,7 @@
/* C-compiler utilities for types and variables storage layout
Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
- 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
This file is part of GCC.
@@ -16,8 +17,8 @@ for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
#include "config.h"
@@ -30,17 +31,13 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "flags.h"
#include "function.h"
#include "expr.h"
+#include "output.h"
#include "toplev.h"
#include "ggc.h"
#include "target.h"
#include "langhooks.h"
-
-/* Set to one when set_sizetype has been called. */
-static int sizetype_set;
-
-/* List of types created before set_sizetype has been called. We do not
- make this a GGC root since we want these nodes to be reclaimed. */
-static tree early_type_list;
+#include "regs.h"
+#include "params.h"
/* Data type for the expressions representing sizes of data types.
It is the first integer type laid out. */
@@ -48,11 +45,9 @@ tree sizetype_tab[(int) TYPE_KIND_LAST];
/* If nonzero, this is an upper limit on alignment of structure fields.
The value is measured in bits. */
-unsigned int maximum_field_alignment;
-
-/* If nonzero, the alignment of a bitstring or (power-)set value, in bits.
- May be overridden by front-ends. */
-unsigned int set_alignment = 0;
+unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
+/* ... and its original value in bytes, specified via -fpack-struct=<value>. */
+unsigned int initial_max_fld_align = TARGET_DEFAULT_PACK_STRUCT;
/* Nonzero if all REFERENCE_TYPEs are internal and hence should be
allocated in Pmode, not ptr_mode. Set only by internal_reference_types
@@ -66,19 +61,12 @@ static void place_union_field (record_layout_info, tree);
static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
HOST_WIDE_INT, tree);
#endif
-static unsigned int update_alignment_for_field (record_layout_info, tree,
- unsigned int);
extern void debug_rli (record_layout_info);
/* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
static GTY(()) tree pending_sizes;
-/* Nonzero means cannot safely call expand_expr now,
- so put variable sizes onto `pending_sizes' instead. */
-
-int immediate_size_expand;
-
/* Show that REFERENCE_TYPES are internal and should be Pmode. Called only
by front end. */
@@ -94,29 +82,11 @@ tree
get_pending_sizes (void)
{
tree chain = pending_sizes;
- tree t;
-
- /* Put each SAVE_EXPR into the current function. */
- for (t = chain; t; t = TREE_CHAIN (t))
- SAVE_EXPR_CONTEXT (TREE_VALUE (t)) = current_function_decl;
pending_sizes = 0;
return chain;
}
-/* Return nonzero if EXPR is present on the pending sizes list. */
-
-int
-is_pending_size (tree expr)
-{
- tree t;
-
- for (t = pending_sizes; t; t = TREE_CHAIN (t))
- if (TREE_VALUE (t) == expr)
- return 1;
- return 0;
-}
-
/* Add EXPR to the pending sizes list. */
void
@@ -136,9 +106,7 @@ put_pending_size (tree expr)
void
put_pending_sizes (tree chain)
{
- if (pending_sizes)
- abort ();
-
+ gcc_assert (!pending_sizes);
pending_sizes = chain;
}
@@ -155,16 +123,11 @@ variable_size (tree size)
just return SIZE unchanged. Likewise for self-referential sizes and
constant sizes. */
if (TREE_CONSTANT (size)
- || (*lang_hooks.decls.global_bindings_p) () < 0
+ || lang_hooks.decls.global_bindings_p () < 0
|| CONTAINS_PLACEHOLDER_P (size))
return size;
- if (TREE_CODE (size) == MINUS_EXPR && integer_onep (TREE_OPERAND (size, 1)))
- /* If this is the upper bound of a C array, leave the minus 1 outside
- the SAVE_EXPR so it can be folded away. */
- TREE_OPERAND (size, 0) = save = save_expr (TREE_OPERAND (size, 0));
- else
- size = save = save_expr (size);
+ size = save_expr (size);
/* If an array with a variable number of elements is declared, and
the elements require destruction, we will emit a cleanup for the
@@ -174,27 +137,24 @@ variable_size (tree size)
`unsaved', i.e., all SAVE_EXPRs are recalculated. However, we do
not wish to do that here; the array-size is the same in both
places. */
- if (TREE_CODE (save) == SAVE_EXPR)
- SAVE_EXPR_PERSISTENT_P (save) = 1;
+ save = skip_simple_arithmetic (size);
+
+ if (cfun && cfun->x_dont_save_pending_sizes_p)
+ /* The front-end doesn't want us to keep a list of the expressions
+ that determine sizes for variable size objects. Trust it. */
+ return size;
- if ((*lang_hooks.decls.global_bindings_p) ())
+ if (lang_hooks.decls.global_bindings_p ())
{
if (TREE_CONSTANT (size))
- error ("type size can't be explicitly evaluated");
+ error ("type size can%'t be explicitly evaluated");
else
error ("variable-size type declared outside of any function");
return size_one_node;
}
- if (immediate_size_expand)
- expand_expr (save, const0_rtx, VOIDmode, 0);
- else if (cfun != 0 && cfun->x_dont_save_pending_sizes_p)
- /* The front-end doesn't want us to keep a list of the expressions
- that determine sizes for variable size objects. */
- ;
- else
- put_pending_size (save);
+ put_pending_size (save);
return size;
}
@@ -230,15 +190,16 @@ mode_for_size (unsigned int size, enum mode_class class, int limit)
enum machine_mode
mode_for_size_tree (tree size, enum mode_class class, int limit)
{
- if (TREE_CODE (size) != INTEGER_CST
- || TREE_OVERFLOW (size)
- /* What we really want to say here is that the size can fit in a
- host integer, but we know there's no way we'd find a mode for
- this many bits, so there's no point in doing the precise test. */
- || compare_tree_int (size, 1000) > 0)
+ unsigned HOST_WIDE_INT uhwi;
+ unsigned int ui;
+
+ if (!host_integerp (size, 1))
return BLKmode;
- else
- return mode_for_size (tree_low_cst (size, 1), class, limit);
+ uhwi = tree_low_cst (size, 1);
+ ui = uhwi;
+ if (uhwi != ui)
+ return BLKmode;
+ return mode_for_size (ui, class, limit);
}
/* Similar, but never return BLKmode; return the narrowest mode that
@@ -256,7 +217,7 @@ smallest_mode_for_size (unsigned int size, enum mode_class class)
if (GET_MODE_PRECISION (mode) >= size)
return mode;
- abort ();
+ gcc_unreachable ();
}
/* Find an integer mode of the exact same size, or BLKmode on failure. */
@@ -273,6 +234,7 @@ int_mode_for_mode (enum machine_mode mode)
case MODE_COMPLEX_INT:
case MODE_COMPLEX_FLOAT:
case MODE_FLOAT:
+ case MODE_DECIMAL_FLOAT:
case MODE_VECTOR_INT:
case MODE_VECTOR_FLOAT:
mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
@@ -286,7 +248,7 @@ int_mode_for_mode (enum machine_mode mode)
case MODE_CC:
default:
- abort ();
+ gcc_unreachable ();
}
return mode;
@@ -301,26 +263,6 @@ get_mode_alignment (enum machine_mode mode)
return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
}
-/* Return the value of VALUE, rounded up to a multiple of DIVISOR.
- This can only be applied to objects of a sizetype. */
-
-tree
-round_up (tree value, int divisor)
-{
- tree arg = size_int_type (divisor, TREE_TYPE (value));
-
- return size_binop (MULT_EXPR, size_binop (CEIL_DIV_EXPR, value, arg), arg);
-}
-
-/* Likewise, but round down. */
-
-tree
-round_down (tree value, int divisor)
-{
- tree arg = size_int_type (divisor, TREE_TYPE (value));
-
- return size_binop (MULT_EXPR, size_binop (FLOOR_DIV_EXPR, value, arg), arg);
-}
/* Subroutine of layout_decl: Force alignment required for the data type.
But if the decl itself wants greater alignment, don't override that. */
@@ -358,9 +300,9 @@ layout_decl (tree decl, unsigned int known_align)
if (code == CONST_DECL)
return;
- else if (code != VAR_DECL && code != PARM_DECL && code != RESULT_DECL
- && code != TYPE_DECL && code != FIELD_DECL)
- abort ();
+
+ gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
+ || code == TYPE_DECL ||code == FIELD_DECL);
rtl = DECL_RTL_IF_SET (decl);
@@ -377,7 +319,7 @@ layout_decl (tree decl, unsigned int known_align)
size in bytes from the size in bits. If we have already set the mode,
don't set it again since we can be called twice for FIELD_DECLs. */
- TREE_UNSIGNED (decl) = TREE_UNSIGNED (type);
+ DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
if (DECL_MODE (decl) == VOIDmode)
DECL_MODE (decl) = TYPE_MODE (type);
@@ -388,8 +330,8 @@ layout_decl (tree decl, unsigned int known_align)
}
else if (DECL_SIZE_UNIT (decl) == 0)
DECL_SIZE_UNIT (decl)
- = convert (sizetype, size_binop (CEIL_DIV_EXPR, DECL_SIZE (decl),
- bitsize_unit_node));
+ = fold_convert (sizetype, size_binop (CEIL_DIV_EXPR, DECL_SIZE (decl),
+ bitsize_unit_node));
if (code != FIELD_DECL)
/* For non-fields, update the alignment from the type. */
@@ -398,17 +340,22 @@ layout_decl (tree decl, unsigned int known_align)
/* For fields, it's a bit more complicated... */
{
bool old_user_align = DECL_USER_ALIGN (decl);
+ bool zero_bitfield = false;
+ bool packed_p = DECL_PACKED (decl);
+ unsigned int mfa;
if (DECL_BIT_FIELD (decl))
{
DECL_BIT_FIELD_TYPE (decl) = type;
/* A zero-length bit-field affects the alignment of the next
- field. */
+ field. In essence such bit-fields are not influenced by
+ any packing due to #pragma pack or attribute packed. */
if (integer_zerop (DECL_SIZE (decl))
- && ! DECL_PACKED (decl)
- && ! (*targetm.ms_bitfield_layout_p) (DECL_FIELD_CONTEXT (decl)))
+ && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
{
+ zero_bitfield = true;
+ packed_p = false;
#ifdef PCC_BITFIELD_TYPE_MATTERS
if (PCC_BITFIELD_TYPE_MATTERS)
do_type_align (type, decl);
@@ -435,7 +382,7 @@ layout_decl (tree decl, unsigned int known_align)
enum machine_mode xmode
= mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
- if (xmode != BLKmode
+ if (xmode != BLKmode
&& (known_align == 0
|| known_align >= GET_MODE_ALIGNMENT (xmode)))
{
@@ -452,7 +399,7 @@ layout_decl (tree decl, unsigned int known_align)
&& DECL_ALIGN (decl) >= TYPE_ALIGN (type))
DECL_BIT_FIELD (decl) = 0;
}
- else if (DECL_PACKED (decl) && DECL_USER_ALIGN (decl))
+ else if (packed_p && DECL_USER_ALIGN (decl))
/* Don't touch DECL_ALIGN. For other packed fields, go ahead and
round up; we'll reduce it again below. We want packing to
supersede USER_ALIGN inherited from the type, but defer to
@@ -467,17 +414,14 @@ layout_decl (tree decl, unsigned int known_align)
Note that do_type_align may set DECL_USER_ALIGN, so we need to
check old_user_align instead. */
- if (DECL_PACKED (decl)
+ if (packed_p
&& !old_user_align
&& (DECL_NONADDRESSABLE_P (decl)
|| DECL_SIZE_UNIT (decl) == 0
|| TREE_CODE (DECL_SIZE_UNIT (decl)) == INTEGER_CST))
DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
- /* Should this be controlled by DECL_USER_ALIGN, too? */
- if (maximum_field_alignment != 0)
- DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), maximum_field_alignment);
- if (! DECL_USER_ALIGN (decl))
+ if (! packed_p && ! DECL_USER_ALIGN (decl))
{
/* Some targets (i.e. i386, VMS) limit struct field alignment
to a lower boundary than alignment of variables unless
@@ -490,6 +434,14 @@ layout_decl (tree decl, unsigned int known_align)
DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl));
#endif
}
+
+ if (zero_bitfield)
+ mfa = initial_max_fld_align * BITS_PER_UNIT;
+ else
+ mfa = maximum_field_alignment;
+ /* Should this be controlled by DECL_USER_ALIGN, too? */
+ if (mfa != 0)
+ DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa);
}
/* Evaluate nonconstant size only once, either now or as soon as safe. */
@@ -512,10 +464,10 @@ layout_decl (tree decl, unsigned int known_align)
int size_as_int = TREE_INT_CST_LOW (size);
if (compare_tree_int (size, size_as_int) == 0)
- warning ("%Jsize of '%D' is %d bytes", decl, decl, size_as_int);
+ warning (0, "size of %q+D is %d bytes", decl, size_as_int);
else
- warning ("%Jsize of '%D' is larger than %d bytes",
- decl, decl, larger_than_size);
+ warning (0, "size of %q+D is larger than %wd bytes",
+ decl, larger_than_size);
}
}
@@ -528,11 +480,26 @@ layout_decl (tree decl, unsigned int known_align)
SET_DECL_RTL (decl, rtl);
}
}
+
+/* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
+ a previous call to layout_decl and calls it again. */
+
+void
+relayout_decl (tree decl)
+{
+ DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
+ DECL_MODE (decl) = VOIDmode;
+ if (!DECL_USER_ALIGN (decl))
+ DECL_ALIGN (decl) = 0;
+ SET_DECL_RTL (decl, 0);
+
+ layout_decl (decl, 0);
+}
/* Hook for a front-end function that can modify the record layout as needed
immediately before it is finalized. */
-void (*lang_adjust_rli) (record_layout_info) = 0;
+static void (*lang_adjust_rli) (record_layout_info) = 0;
void
set_lang_adjust_rli (void (*f) (record_layout_info))
@@ -572,6 +539,7 @@ start_record_layout (tree t)
rli->prev_field = 0;
rli->pending_statics = 0;
rli->packed_maybe_necessary = 0;
+ rli->remaining_in_alignment = 0;
return rli;
}
@@ -583,7 +551,8 @@ tree
bit_from_pos (tree offset, tree bitpos)
{
return size_binop (PLUS_EXPR, bitpos,
- size_binop (MULT_EXPR, convert (bitsizetype, offset),
+ size_binop (MULT_EXPR,
+ fold_convert (bitsizetype, offset),
bitsize_unit_node));
}
@@ -591,9 +560,9 @@ tree
byte_from_pos (tree offset, tree bitpos)
{
return size_binop (PLUS_EXPR, offset,
- convert (sizetype,
- size_binop (TRUNC_DIV_EXPR, bitpos,
- bitsize_unit_node)));
+ fold_convert (sizetype,
+ size_binop (TRUNC_DIV_EXPR, bitpos,
+ bitsize_unit_node)));
}
void
@@ -601,9 +570,9 @@ pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
tree pos)
{
*poffset = size_binop (MULT_EXPR,
- convert (sizetype,
- size_binop (FLOOR_DIV_EXPR, pos,
- bitsize_int (off_align))),
+ fold_convert (sizetype,
+ size_binop (FLOOR_DIV_EXPR, pos,
+ bitsize_int (off_align))),
size_int (off_align / BITS_PER_UNIT));
*pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align));
}
@@ -623,7 +592,8 @@ normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
*poffset
= size_binop (PLUS_EXPR, *poffset,
- size_binop (MULT_EXPR, convert (sizetype, extra_aligns),
+ size_binop (MULT_EXPR,
+ fold_convert (sizetype, extra_aligns),
size_int (off_align / BITS_PER_UNIT)));
*pbitpos
@@ -643,6 +613,11 @@ debug_rli (record_layout_info rli)
fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
rli->record_align, rli->unpacked_align,
rli->offset_align);
+
+ /* The ms_struct code is the only that uses this. */
+ if (targetm.ms_bitfield_layout_p (rli->t))
+ fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
+
if (rli->packed_maybe_necessary)
fprintf (stderr, "packed may be necessary\n");
@@ -679,11 +654,11 @@ rli_size_so_far (record_layout_info rli)
}
/* FIELD is about to be added to RLI->T. The alignment (in bits) of
- the next available location is given by KNOWN_ALIGN. Update the
- variable alignment fields in RLI, and return the alignment to give
- the FIELD. */
+ the next available location within the record is given by KNOWN_ALIGN.
+ Update the variable alignment fields in RLI, and return the alignment
+ to give the FIELD. */
-static unsigned int
+unsigned int
update_alignment_for_field (record_layout_info rli, tree field,
unsigned int known_align)
{
@@ -695,6 +670,10 @@ update_alignment_for_field (record_layout_info rli, tree field,
bool user_align;
bool is_bitfield;
+ /* Do not attempt to align an ERROR_MARK node */
+ if (TREE_CODE (type) == ERROR_MARK)
+ return 0;
+
/* Lay out the field so we know what alignment it needs. */
layout_decl (field, known_align);
desired_align = DECL_ALIGN (field);
@@ -707,7 +686,7 @@ update_alignment_for_field (record_layout_info rli, tree field,
/* Record must have at least as much alignment as any field.
Otherwise, the alignment of the field within the record is
meaningless. */
- if (is_bitfield && (* targetm.ms_bitfield_layout_p) (rli->t))
+ if (targetm.ms_bitfield_layout_p (rli->t))
{
/* Here, the alignment of the underlying type of a bitfield can
affect the alignment of a record; even a zero-sized field
@@ -715,11 +694,12 @@ update_alignment_for_field (record_layout_info rli, tree field,
the type, except that for zero-size bitfields this only
applies if there was an immediately prior, nonzero-size
bitfield. (That's the way it is, experimentally.) */
- if (! integer_zerop (DECL_SIZE (field))
- ? ! DECL_PACKED (field)
- : (rli->prev_field
- && DECL_BIT_FIELD_TYPE (rli->prev_field)
- && ! integer_zerop (DECL_SIZE (rli->prev_field))))
+ if ((!is_bitfield && !DECL_PACKED (field))
+ || (!integer_zerop (DECL_SIZE (field))
+ ? !DECL_PACKED (field)
+ : (rli->prev_field
+ && DECL_BIT_FIELD_TYPE (rli->prev_field)
+ && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
{
unsigned int type_align = TYPE_ALIGN (type);
type_align = MAX (type_align, desired_align);
@@ -733,8 +713,10 @@ update_alignment_for_field (record_layout_info rli, tree field,
else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
{
/* Named bit-fields cause the entire structure to have the
- alignment implied by their type. */
- if (DECL_NAME (field) != 0)
+ alignment implied by their type. Some targets also apply the same
+ rules to unnamed bitfields. */
+ if (DECL_NAME (field) != 0
+ || targetm.align_anon_bitfield ())
{
unsigned int type_align = TYPE_ALIGN (type);
@@ -743,7 +725,16 @@ update_alignment_for_field (record_layout_info rli, tree field,
type_align = ADJUST_FIELD_ALIGN (field, type_align);
#endif
- if (maximum_field_alignment != 0)
+ /* Targets might chose to handle unnamed and hence possibly
+ zero-width bitfield. Those are not influenced by #pragmas
+ or packed attributes. */
+ if (integer_zerop (DECL_SIZE (field)))
+ {
+ if (initial_max_fld_align)
+ type_align = MIN (type_align,
+ initial_max_fld_align * BITS_PER_UNIT);
+ }
+ else if (maximum_field_alignment != 0)
type_align = MIN (type_align, maximum_field_alignment);
else if (DECL_PACKED (field))
type_align = MIN (type_align, BITS_PER_UNIT);
@@ -784,14 +775,20 @@ place_union_field (record_layout_info rli, tree field)
DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
+ /* If this is an ERROR_MARK return *after* having set the
+ field at the start of the union. This helps when parsing
+ invalid fields. */
+ if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
+ return;
+
/* We assume the union's size will be a multiple of a byte so we don't
bother with BITPOS. */
if (TREE_CODE (rli->t) == UNION_TYPE)
rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
- rli->offset = fold (build (COND_EXPR, sizetype,
+ rli->offset = fold_build3 (COND_EXPR, sizetype,
DECL_QUALIFIER (field),
- DECL_SIZE_UNIT (field), rli->offset));
+ DECL_SIZE_UNIT (field), rli->offset);
}
#if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
@@ -830,8 +827,7 @@ place_field (record_layout_info rli, tree field)
/* The type of this field. */
tree type = TREE_TYPE (field);
- if (TREE_CODE (field) == ERROR_MARK || TREE_CODE (type) == ERROR_MARK)
- return;
+ gcc_assert (TREE_CODE (field) != ERROR_MARK);
/* If FIELD is static, then treat it like a separate variable, not
really like a structure field. If it is a FUNCTION_DECL, it's a
@@ -857,13 +853,23 @@ place_field (record_layout_info rli, tree field)
return;
}
+ else if (TREE_CODE (type) == ERROR_MARK)
+ {
+ /* Place this field at the current allocation position, so we
+ maintain monotonicity. */
+ DECL_FIELD_OFFSET (field) = rli->offset;
+ DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
+ SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
+ return;
+ }
+
/* Work out the known alignment so far. Note that A & (-A) is the
value of the least-significant bit in A that is one. */
if (! integer_zerop (rli->bitpos))
known_align = (tree_low_cst (rli->bitpos, 1)
& - tree_low_cst (rli->bitpos, 1));
else if (integer_zerop (rli->offset))
- known_align = BIGGEST_ALIGNMENT;
+ known_align = 0;
else if (host_integerp (rli->offset, 1))
known_align = (BITS_PER_UNIT
* (tree_low_cst (rli->offset, 1)
@@ -872,6 +878,8 @@ place_field (record_layout_info rli, tree field)
known_align = rli->offset_align;
desired_align = update_alignment_for_field (rli, field, known_align);
+ if (known_align == 0)
+ known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
if (warn_packed && DECL_PACKED (field))
{
@@ -880,11 +888,11 @@ place_field (record_layout_info rli, tree field)
if (TYPE_ALIGN (type) > desired_align)
{
if (STRICT_ALIGNMENT)
- warning ("%Jpacked attribute causes inefficient alignment "
- "for '%D'", field, field);
+ warning (OPT_Wattributes, "packed attribute causes "
+ "inefficient alignment for %q+D", field);
else
- warning ("%Jpacked attribute is unnecessary for '%D'",
- field, field);
+ warning (OPT_Wattributes, "packed attribute is "
+ "unnecessary for %q+D", field);
}
}
else
@@ -892,14 +900,15 @@ place_field (record_layout_info rli, tree field)
}
/* Does this field automatically have alignment it needs by virtue
- of the fields that precede it and the record's own alignment? */
- if (known_align < desired_align)
+ of the fields that precede it and the record's own alignment?
+ We already align ms_struct fields, so don't re-align them. */
+ if (known_align < desired_align
+ && !targetm.ms_bitfield_layout_p (rli->t))
{
/* No, we need to skip space before this field.
Bump the cumulative size to multiple of field alignment. */
- if (warn_padded)
- warning ("%Jpadding struct to align '%D'", field, field);
+ warning (OPT_Wpadded, "padding struct to align %q+D", field);
/* If the alignment is still within offset_align, just align
the bit position. */
@@ -910,9 +919,9 @@ place_field (record_layout_info rli, tree field)
/* First adjust OFFSET by the partial bits, then align. */
rli->offset
= size_binop (PLUS_EXPR, rli->offset,
- convert (sizetype,
- size_binop (CEIL_DIV_EXPR, rli->bitpos,
- bitsize_unit_node)));
+ fold_convert (sizetype,
+ size_binop (CEIL_DIV_EXPR, rli->bitpos,
+ bitsize_unit_node)));
rli->bitpos = bitsize_zero_node;
rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
@@ -927,7 +936,7 @@ place_field (record_layout_info rli, tree field)
variable-sized fields, we need not worry about compatibility. */
#ifdef PCC_BITFIELD_TYPE_MATTERS
if (PCC_BITFIELD_TYPE_MATTERS
- && ! (* targetm.ms_bitfield_layout_p) (rli->t)
+ && ! targetm.ms_bitfield_layout_p (rli->t)
&& TREE_CODE (field) == FIELD_DECL
&& type != error_mark_node
&& DECL_BIT_FIELD (field)
@@ -960,7 +969,7 @@ place_field (record_layout_info rli, tree field)
#ifdef BITFIELD_NBYTES_LIMITED
if (BITFIELD_NBYTES_LIMITED
- && ! (* targetm.ms_bitfield_layout_p) (rli->t)
+ && ! targetm.ms_bitfield_layout_p (rli->t)
&& TREE_CODE (field) == FIELD_DECL
&& type != error_mark_node
&& DECL_BIT_FIELD_TYPE (field)
@@ -1011,17 +1020,13 @@ place_field (record_layout_info rli, tree field)
Note: for compatibility, we use the type size, not the type alignment
to determine alignment, since that matches the documentation */
- if ((* targetm.ms_bitfield_layout_p) (rli->t)
- && ((DECL_BIT_FIELD_TYPE (field) && ! DECL_PACKED (field))
- || (rli->prev_field && ! DECL_PACKED (rli->prev_field))))
+ if (targetm.ms_bitfield_layout_p (rli->t))
{
- /* At this point, either the prior or current are bitfields,
- (possibly both), and we're dealing with MS packing. */
tree prev_saved = rli->prev_field;
+ tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
- /* Is the prior field a bitfield? If so, handle "runs" of same
- type size fields. */
- if (rli->prev_field /* necessarily a bitfield if it exists. */)
+ /* This is a bitfield if it exists. */
+ if (rli->prev_field)
{
/* If both are bitfields, nonzero, and the same size, this is
the middle of a run. Zero declared size fields are special
@@ -1034,27 +1039,29 @@ place_field (record_layout_info rli, tree field)
&& !integer_zerop (DECL_SIZE (rli->prev_field))
&& host_integerp (DECL_SIZE (rli->prev_field), 0)
&& host_integerp (TYPE_SIZE (type), 0)
- && simple_cst_equal (TYPE_SIZE (type),
- TYPE_SIZE (TREE_TYPE (rli->prev_field))))
+ && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
{
/* We're in the middle of a run of equal type size fields; make
sure we realign if we run out of bits. (Not decl size,
type size!) */
- HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 0);
+ HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1);
if (rli->remaining_in_alignment < bitsize)
{
+ HOST_WIDE_INT typesize = tree_low_cst (TYPE_SIZE (type), 1);
+
/* out of bits; bump up to next 'word'. */
- rli->offset = DECL_FIELD_OFFSET (rli->prev_field);
rli->bitpos
- = size_binop (PLUS_EXPR, TYPE_SIZE (type),
- DECL_FIELD_BIT_OFFSET (rli->prev_field));
+ = size_binop (PLUS_EXPR, rli->bitpos,
+ bitsize_int (rli->remaining_in_alignment));
rli->prev_field = field;
- rli->remaining_in_alignment
- = tree_low_cst (TYPE_SIZE (type), 0);
+ if (typesize < bitsize)
+ rli->remaining_in_alignment = 0;
+ else
+ rli->remaining_in_alignment = typesize - bitsize;
}
-
- rli->remaining_in_alignment -= bitsize;
+ else
+ rli->remaining_in_alignment -= bitsize;
}
else
{
@@ -1069,11 +1076,9 @@ place_field (record_layout_info rli, tree field)
if (!integer_zerop (DECL_SIZE (rli->prev_field)))
{
- tree type_size = TYPE_SIZE (TREE_TYPE (rli->prev_field));
-
rli->bitpos
- = size_binop (PLUS_EXPR, type_size,
- DECL_FIELD_BIT_OFFSET (rli->prev_field));
+ = size_binop (PLUS_EXPR, rli->bitpos,
+ bitsize_int (rli->remaining_in_alignment));
}
else
/* We "use up" size zero fields; the code below should behave
@@ -1083,7 +1088,7 @@ place_field (record_layout_info rli, tree field)
/* Cause a new bitfield to be captured, either this time (if
currently a bitfield) or next time we see one. */
if (!DECL_BIT_FIELD_TYPE(field)
- || integer_zerop (DECL_SIZE (field)))
+ || integer_zerop (DECL_SIZE (field)))
rli->prev_field = NULL;
}
@@ -1103,9 +1108,8 @@ place_field (record_layout_info rli, tree field)
there wasn't. */
if (!DECL_BIT_FIELD_TYPE (field)
- || ( prev_saved != NULL
- ? !simple_cst_equal (TYPE_SIZE (type),
- TYPE_SIZE (TREE_TYPE (prev_saved)))
+ || (prev_saved != NULL
+ ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
: !integer_zerop (DECL_SIZE (field)) ))
{
/* Never smaller than a byte for compatibility. */
@@ -1118,22 +1122,19 @@ place_field (record_layout_info rli, tree field)
if (DECL_SIZE (field) != NULL
&& host_integerp (TYPE_SIZE (TREE_TYPE (field)), 0)
&& host_integerp (DECL_SIZE (field), 0))
- rli->remaining_in_alignment
- = tree_low_cst (TYPE_SIZE (TREE_TYPE(field)), 0)
- - tree_low_cst (DECL_SIZE (field), 0);
+ {
+ HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1);
+ HOST_WIDE_INT typesize
+ = tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1);
+
+ if (typesize < bitsize)
+ rli->remaining_in_alignment = 0;
+ else
+ rli->remaining_in_alignment = typesize - bitsize;
+ }
/* Now align (conventionally) for the new type. */
- if (!DECL_PACKED(field))
- type_align = MAX(TYPE_ALIGN (type), type_align);
-
- if (prev_saved
- && DECL_BIT_FIELD_TYPE (prev_saved)
- /* If the previous bit-field is zero-sized, we've already
- accounted for its alignment needs (or ignored it, if
- appropriate) while placing it. */
- && ! integer_zerop (DECL_SIZE (prev_saved)))
- type_align = MAX (type_align,
- TYPE_ALIGN (TREE_TYPE (prev_saved)));
+ type_align = TYPE_ALIGN (TREE_TYPE (field));
if (maximum_field_alignment != 0)
type_align = MIN (type_align, maximum_field_alignment);
@@ -1159,20 +1160,22 @@ place_field (record_layout_info rli, tree field)
actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
& - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1));
else if (integer_zerop (DECL_FIELD_OFFSET (field)))
- actual_align = BIGGEST_ALIGNMENT;
+ actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
else if (host_integerp (DECL_FIELD_OFFSET (field), 1))
actual_align = (BITS_PER_UNIT
* (tree_low_cst (DECL_FIELD_OFFSET (field), 1)
& - tree_low_cst (DECL_FIELD_OFFSET (field), 1)));
else
actual_align = DECL_OFFSET_ALIGN (field);
+ /* ACTUAL_ALIGN is still the actual alignment *within the record* .
+ store / extract bit field operations will check the alignment of the
+ record against the mode of bit fields. */
if (known_align != actual_align)
layout_decl (field, actual_align);
- /* Only the MS bitfields use this. */
- if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE(field))
- rli->prev_field = field;
+ if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
+ rli->prev_field = field;
/* Now add size of this field to the size of the record. If the size is
not constant, treat the field as being a multiple of bytes and just
@@ -1183,19 +1186,34 @@ place_field (record_layout_info rli, tree field)
is printed in finish_struct. */
if (DECL_SIZE (field) == 0)
/* Do nothing. */;
- else if (TREE_CODE (DECL_SIZE_UNIT (field)) != INTEGER_CST
- || TREE_CONSTANT_OVERFLOW (DECL_SIZE_UNIT (field)))
+ else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
+ || TREE_CONSTANT_OVERFLOW (DECL_SIZE (field)))
{
rli->offset
= size_binop (PLUS_EXPR, rli->offset,
- convert (sizetype,
- size_binop (CEIL_DIV_EXPR, rli->bitpos,
- bitsize_unit_node)));
+ fold_convert (sizetype,
+ size_binop (CEIL_DIV_EXPR, rli->bitpos,
+ bitsize_unit_node)));
rli->offset
= size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
rli->bitpos = bitsize_zero_node;
rli->offset_align = MIN (rli->offset_align, desired_align);
}
+ else if (targetm.ms_bitfield_layout_p (rli->t))
+ {
+ rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
+
+ /* If we ended a bitfield before the full length of the type then
+ pad the struct out to the full length of the last type. */
+ if ((TREE_CHAIN (field) == NULL
+ || TREE_CODE (TREE_CHAIN (field)) != FIELD_DECL)
+ && DECL_BIT_FIELD_TYPE (field)
+ && !integer_zerop (DECL_SIZE (field)))
+ rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
+ bitsize_int (rli->remaining_in_alignment));
+
+ normalize_rli (rli);
+ }
else
{
rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
@@ -1236,12 +1254,12 @@ finalize_record_size (record_layout_info rli)
/* Round the size up to be a multiple of the required alignment. */
TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
- TYPE_SIZE_UNIT (rli->t) = round_up (unpadded_size_unit,
- TYPE_ALIGN (rli->t) / BITS_PER_UNIT);
+ TYPE_SIZE_UNIT (rli->t)
+ = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
- if (warn_padded && TREE_CONSTANT (unpadded_size)
+ if (TREE_CONSTANT (unpadded_size)
&& simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0)
- warning ("padding struct size to alignment boundary");
+ warning (OPT_Wpadded, "padding struct size to alignment boundary");
if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
&& TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
@@ -1271,16 +1289,19 @@ finalize_record_size (record_layout_info rli)
name = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (rli->t)));
if (STRICT_ALIGNMENT)
- warning ("packed attribute causes inefficient alignment for `%s'", name);
+ warning (OPT_Wpacked, "packed attribute causes inefficient "
+ "alignment for %qs", name);
else
- warning ("packed attribute is unnecessary for `%s'", name);
+ warning (OPT_Wpacked,
+ "packed attribute is unnecessary for %qs", name);
}
else
{
if (STRICT_ALIGNMENT)
- warning ("packed attribute causes inefficient alignment");
+ warning (OPT_Wpacked,
+ "packed attribute causes inefficient alignment");
else
- warning ("packed attribute is unnecessary");
+ warning (OPT_Wpacked, "packed attribute is unnecessary");
}
}
}
@@ -1336,9 +1357,12 @@ compute_record_mode (tree type)
#endif /* MEMBER_TYPE_FORCES_BLK */
}
- /* If we only have one real field; use its mode. This only applies to
- RECORD_TYPE. This does not apply to unions. */
- if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode)
+ /* If we only have one real field; use its mode if that mode's size
+ matches the type's size. This only applies to RECORD_TYPE. This
+ does not apply to unions. */
+ if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
+ && host_integerp (TYPE_SIZE (type), 1)
+ && GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type)))
TYPE_MODE (type) = mode;
else
TYPE_MODE (type) = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1);
@@ -1374,8 +1398,15 @@ finalize_type_size (tree type)
&& TREE_CODE (type) != QUAL_UNION_TYPE
&& TREE_CODE (type) != ARRAY_TYPE)))
{
- TYPE_ALIGN (type) = GET_MODE_ALIGNMENT (TYPE_MODE (type));
- TYPE_USER_ALIGN (type) = 0;
+ unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
+
+ /* Don't override a larger alignment requirement coming from a user
+ alignment of one of the fields. */
+ if (mode_align >= TYPE_ALIGN (type))
+ {
+ TYPE_ALIGN (type) = mode_align;
+ TYPE_USER_ALIGN (type) = 0;
+ }
}
/* Do machine-dependent extra alignment. */
@@ -1391,15 +1422,15 @@ finalize_type_size (tree type)
result will fit in sizetype. We will get more efficient code using
sizetype, so we force a conversion. */
TYPE_SIZE_UNIT (type)
- = convert (sizetype,
- size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
- bitsize_unit_node));
+ = fold_convert (sizetype,
+ size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
+ bitsize_unit_node));
if (TYPE_SIZE (type) != 0)
{
TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
- TYPE_SIZE_UNIT (type)
- = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN (type) / BITS_PER_UNIT);
+ TYPE_SIZE_UNIT (type) = round_up (TYPE_SIZE_UNIT (type),
+ TYPE_ALIGN_UNIT (type));
}
/* Evaluate nonconstant sizes only once, either now or as soon as safe. */
@@ -1444,6 +1475,8 @@ finalize_type_size (tree type)
void
finish_record_layout (record_layout_info rli, int free_p)
{
+ tree variant;
+
/* Compute the final size. */
finalize_record_size (rli);
@@ -1453,6 +1486,12 @@ finish_record_layout (record_layout_info rli, int free_p)
/* Perform any last tweaks to the TYPE_SIZE, etc. */
finalize_type_size (rli->t);
+ /* Propagate TYPE_PACKED to variants. With C++ templates,
+ handle_packed_attribute is too early to do this. */
+ for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
+ variant = TYPE_NEXT_VARIANT (variant))
+ TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
+
/* Lay out any static members. This is done now because their type
may use the record's type. */
while (rli->pending_statics)
@@ -1516,8 +1555,10 @@ finish_builtin_struct (tree type, const char *name, tree fields,
void
layout_type (tree type)
{
- if (type == 0)
- abort ();
+ gcc_assert (type);
+
+ if (type == error_mark_node)
+ return;
/* Do nothing if type has been laid out before. */
if (TYPE_SIZE (type))
@@ -1528,7 +1569,7 @@ layout_type (tree type)
case LANG_TYPE:
/* This kind of type is the responsibility
of the language-specific code. */
- abort ();
+ gcc_unreachable ();
case BOOLEAN_TYPE: /* Used for Java, Pascal, and Chill. */
if (TYPE_PRECISION (type) == 0)
@@ -1538,10 +1579,9 @@ layout_type (tree type)
case INTEGER_TYPE:
case ENUMERAL_TYPE:
- case CHAR_TYPE:
if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
&& tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
- TREE_UNSIGNED (type) = 1;
+ TYPE_UNSIGNED (type) = 1;
TYPE_MODE (type) = smallest_mode_for_size (TYPE_PRECISION (type),
MODE_INT);
@@ -1556,11 +1596,11 @@ layout_type (tree type)
break;
case COMPLEX_TYPE:
- TREE_UNSIGNED (type) = TREE_UNSIGNED (TREE_TYPE (type));
+ TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
TYPE_MODE (type)
= mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
- (TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE
- ? MODE_COMPLEX_INT : MODE_COMPLEX_FLOAT),
+ (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
+ ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
0);
TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
@@ -1568,14 +1608,54 @@ layout_type (tree type)
case VECTOR_TYPE:
{
- tree subtype;
+ int nunits = TYPE_VECTOR_SUBPARTS (type);
+ tree nunits_tree = build_int_cst (NULL_TREE, nunits);
+ tree innertype = TREE_TYPE (type);
+
+ gcc_assert (!(nunits & (nunits - 1)));
+
+ /* Find an appropriate mode for the vector type. */
+ if (TYPE_MODE (type) == VOIDmode)
+ {
+ enum machine_mode innermode = TYPE_MODE (innertype);
+ enum machine_mode mode;
+
+ /* First, look for a supported vector type. */
+ if (SCALAR_FLOAT_MODE_P (innermode))
+ mode = MIN_MODE_VECTOR_FLOAT;
+ else
+ mode = MIN_MODE_VECTOR_INT;
+
+ for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
+ if (GET_MODE_NUNITS (mode) == nunits
+ && GET_MODE_INNER (mode) == innermode
+ && targetm.vector_mode_supported_p (mode))
+ break;
+
+ /* For integers, try mapping it to a same-sized scalar mode. */
+ if (mode == VOIDmode
+ && GET_MODE_CLASS (innermode) == MODE_INT)
+ mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
+ MODE_INT, 0);
+
+ if (mode == VOIDmode || !have_regs_of_mode[mode])
+ TYPE_MODE (type) = BLKmode;
+ else
+ TYPE_MODE (type) = mode;
+ }
- subtype = TREE_TYPE (type);
- TREE_UNSIGNED (type) = TREE_UNSIGNED (subtype);
- TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
- TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
+ TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
+ TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
+ TYPE_SIZE_UNIT (innertype),
+ nunits_tree, 0);
+ TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
+ nunits_tree, 0);
+
+ /* Always naturally align vectors. This prevents ABI changes
+ depending on whether or not native vector modes are supported. */
+ TYPE_ALIGN (type) = tree_low_cst (TYPE_SIZE (type), 0);
+ break;
}
- break;
case VOID_TYPE:
/* This is an incomplete type and so doesn't have a size. */
@@ -1594,9 +1674,12 @@ layout_type (tree type)
case FUNCTION_TYPE:
case METHOD_TYPE:
- TYPE_MODE (type) = mode_for_size (2 * POINTER_SIZE, MODE_INT, 0);
- TYPE_SIZE (type) = bitsize_int (2 * POINTER_SIZE);
- TYPE_SIZE_UNIT (type) = size_int ((2 * POINTER_SIZE) / BITS_PER_UNIT);
+ /* It's hard to see what the mode and size of a function ought to
+ be, but we do know the alignment is FUNCTION_BOUNDARY, so
+ make it consistent with that. */
+ TYPE_MODE (type) = mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0);
+ TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
+ TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
break;
case POINTER_TYPE:
@@ -1611,7 +1694,7 @@ layout_type (tree type)
TYPE_SIZE (type) = bitsize_int (nbits);
TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
- TREE_UNSIGNED (type) = 1;
+ TYPE_UNSIGNED (type) = 1;
TYPE_PRECISION (type) = nbits;
}
break;
@@ -1635,10 +1718,10 @@ layout_type (tree type)
/* The initial subtraction should happen in the original type so
that (possible) negative values are handled appropriately. */
length = size_binop (PLUS_EXPR, size_one_node,
- convert (sizetype,
- fold (build (MINUS_EXPR,
- TREE_TYPE (lb),
- ub, lb))));
+ fold_convert (sizetype,
+ fold_build2 (MINUS_EXPR,
+ TREE_TYPE (lb),
+ ub, lb)));
/* Special handling for arrays of bits (for Chill). */
element_size = TYPE_SIZE (element);
@@ -1661,13 +1744,14 @@ layout_type (tree type)
sure the size is never negative. We should really do this
if *either* bound is non-constant, but this is the best
compromise between C and Ada. */
- if (! TREE_UNSIGNED (sizetype)
+ if (!TYPE_UNSIGNED (sizetype)
&& TREE_CODE (TYPE_MIN_VALUE (index)) != INTEGER_CST
&& TREE_CODE (TYPE_MAX_VALUE (index)) != INTEGER_CST)
length = size_binop (MAX_EXPR, length, size_zero_node);
TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
- convert (bitsizetype, length));
+ fold_convert (bitsizetype,
+ length));
/* If we know the size of the element, calculate the total
size directly, rather than do some division thing below.
@@ -1719,6 +1803,17 @@ layout_type (tree type)
TYPE_MODE (type) = BLKmode;
}
}
+ /* When the element size is constant, check that it is at least as
+ large as the element alignment. */
+ if (TYPE_SIZE_UNIT (element)
+ && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
+ /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
+ TYPE_ALIGN_UNIT. */
+ && !TREE_CONSTANT_OVERFLOW (TYPE_SIZE_UNIT (element))
+ && !integer_zerop (TYPE_SIZE_UNIT (element))
+ && compare_tree_int (TYPE_SIZE_UNIT (element),
+ TYPE_ALIGN_UNIT (element)) < 0)
+ error ("alignment of array elements is greater than element size");
break;
}
@@ -1753,46 +1848,8 @@ layout_type (tree type)
}
break;
- case SET_TYPE: /* Used by Chill and Pascal. */
- if (TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) != INTEGER_CST
- || TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) != INTEGER_CST)
- abort ();
- else
- {
-#ifndef SET_WORD_SIZE
-#define SET_WORD_SIZE BITS_PER_WORD
-#endif
- unsigned int alignment
- = set_alignment ? set_alignment : SET_WORD_SIZE;
- HOST_WIDE_INT size_in_bits
- = (tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0)
- - tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) + 1);
- HOST_WIDE_INT rounded_size
- = ((size_in_bits + alignment - 1) / alignment) * alignment;
-
- if (rounded_size > (int) alignment)
- TYPE_MODE (type) = BLKmode;
- else
- TYPE_MODE (type) = mode_for_size (alignment, MODE_INT, 1);
-
- TYPE_SIZE (type) = bitsize_int (rounded_size);
- TYPE_SIZE_UNIT (type) = size_int (rounded_size / BITS_PER_UNIT);
- TYPE_ALIGN (type) = alignment;
- TYPE_USER_ALIGN (type) = 0;
- TYPE_PRECISION (type) = size_in_bits;
- }
- break;
-
- case FILE_TYPE:
- /* The size may vary in different languages, so the language front end
- should fill in the size. */
- TYPE_ALIGN (type) = BIGGEST_ALIGNMENT;
- TYPE_USER_ALIGN (type) = 0;
- TYPE_MODE (type) = BLKmode;
- break;
-
default:
- abort ();
+ gcc_unreachable ();
}
/* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For
@@ -1803,11 +1860,6 @@ layout_type (tree type)
&& TREE_CODE (type) != QUAL_UNION_TYPE)
finalize_type_size (type);
- /* If this type is created before sizetype has been permanently set,
- record it so set_sizetype can fix it up. */
- if (! sizetype_set)
- early_type_list = tree_cons (NULL_TREE, type, early_type_list);
-
/* If an alias set has been set for this aggregate when it was incomplete,
force it into alias set 0.
This is too conservative, but we cannot call record_component_aliases
@@ -1847,37 +1899,32 @@ make_unsigned_type (int precision)
value to enable integer types to be created. */
void
-initialize_sizetypes (void)
+initialize_sizetypes (bool signed_p)
{
tree t = make_node (INTEGER_TYPE);
-
- /* Set this so we do something reasonable for the build_int_2 calls
- below. */
- integer_type_node = t;
+ int precision = GET_MODE_BITSIZE (SImode);
TYPE_MODE (t) = SImode;
TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode);
TYPE_USER_ALIGN (t) = 0;
- TYPE_SIZE (t) = build_int_2 (GET_MODE_BITSIZE (SImode), 0);
- TYPE_SIZE_UNIT (t) = build_int_2 (GET_MODE_SIZE (SImode), 0);
- TREE_UNSIGNED (t) = 1;
- TYPE_PRECISION (t) = GET_MODE_BITSIZE (SImode);
- TYPE_MIN_VALUE (t) = build_int_2 (0, 0);
TYPE_IS_SIZETYPE (t) = 1;
+ TYPE_UNSIGNED (t) = !signed_p;
+ TYPE_SIZE (t) = build_int_cst (t, precision);
+ TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
+ TYPE_PRECISION (t) = precision;
- /* 1000 avoids problems with possible overflow and is certainly
- larger than any size value we'd want to be storing. */
- TYPE_MAX_VALUE (t) = build_int_2 (1000, 0);
+ /* Set TYPE_MIN_VALUE and TYPE_MAX_VALUE. */
+ set_min_and_max_values_for_integral_type (t, precision, !signed_p);
- /* These two must be different nodes because of the caching done in
- size_int_wide. */
sizetype = t;
- bitsizetype = copy_node (t);
- integer_type_node = 0;
+ bitsizetype = build_distinct_type_copy (t);
}
-/* Set sizetype to TYPE, and initialize *sizetype accordingly.
- Also update the type of any standard type's sizes made so far. */
+/* Make sizetype a version of TYPE, and initialize *sizetype
+ accordingly. We do this by overwriting the stub sizetype and
+ bitsizetype nodes created by initialize_sizetypes. This makes sure
+ that (a) anything stubby about them no longer exists, (b) any
+ INTEGER_CSTs created with such a type, remain valid. */
void
set_sizetype (tree type)
@@ -1887,75 +1934,77 @@ set_sizetype (tree type)
calculating signed sizes / offsets in bits. However, when
cross-compiling from a 32 bit to a 64 bit host, we are limited to 64 bit
precision. */
- int precision = MIN (oprecision + BITS_PER_UNIT_LOG + 1,
+ int precision = MIN (MIN (oprecision + BITS_PER_UNIT_LOG + 1,
+ MAX_FIXED_MODE_SIZE),
2 * HOST_BITS_PER_WIDE_INT);
- unsigned int i;
tree t;
- if (sizetype_set)
- abort ();
+ gcc_assert (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (sizetype));
- /* Make copies of nodes since we'll be setting TYPE_IS_SIZETYPE. */
- sizetype = copy_node (type);
- TYPE_DOMAIN (sizetype) = type;
- TYPE_IS_SIZETYPE (sizetype) = 1;
- bitsizetype = make_node (INTEGER_TYPE);
- TYPE_NAME (bitsizetype) = TYPE_NAME (type);
- TYPE_PRECISION (bitsizetype) = precision;
- TYPE_IS_SIZETYPE (bitsizetype) = 1;
+ t = build_distinct_type_copy (type);
+ /* We do want to use sizetype's cache, as we will be replacing that
+ type. */
+ TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
+ TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
+ TREE_TYPE (TYPE_CACHED_VALUES (t)) = type;
+ TYPE_UID (t) = TYPE_UID (sizetype);
+ TYPE_IS_SIZETYPE (t) = 1;
- if (TREE_UNSIGNED (type))
- fixup_unsigned_type (bitsizetype);
- else
- fixup_signed_type (bitsizetype);
+ /* Replace our original stub sizetype. */
+ memcpy (sizetype, t, tree_size (sizetype));
+ TYPE_MAIN_VARIANT (sizetype) = sizetype;
+
+ t = make_node (INTEGER_TYPE);
+ TYPE_NAME (t) = get_identifier ("bit_size_type");
+ /* We do want to use bitsizetype's cache, as we will be replacing that
+ type. */
+ TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype);
+ TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype);
+ TYPE_PRECISION (t) = precision;
+ TYPE_UID (t) = TYPE_UID (bitsizetype);
+ TYPE_IS_SIZETYPE (t) = 1;
- layout_type (bitsizetype);
+ /* Replace our original stub bitsizetype. */
+ memcpy (bitsizetype, t, tree_size (bitsizetype));
+ TYPE_MAIN_VARIANT (bitsizetype) = bitsizetype;
- if (TREE_UNSIGNED (type))
+ if (TYPE_UNSIGNED (type))
{
- usizetype = sizetype;
- ubitsizetype = bitsizetype;
- ssizetype = copy_node (make_signed_type (oprecision));
- sbitsizetype = copy_node (make_signed_type (precision));
+ fixup_unsigned_type (bitsizetype);
+ ssizetype = build_distinct_type_copy (make_signed_type (oprecision));
+ TYPE_IS_SIZETYPE (ssizetype) = 1;
+ sbitsizetype = build_distinct_type_copy (make_signed_type (precision));
+ TYPE_IS_SIZETYPE (sbitsizetype) = 1;
}
else
{
+ fixup_signed_type (bitsizetype);
ssizetype = sizetype;
sbitsizetype = bitsizetype;
- usizetype = copy_node (make_unsigned_type (oprecision));
- ubitsizetype = copy_node (make_unsigned_type (precision));
}
- TYPE_NAME (bitsizetype) = get_identifier ("bit_size_type");
-
- /* Show is a sizetype, is a main type, and has no pointers to it. */
- for (i = 0; i < ARRAY_SIZE (sizetype_tab); i++)
+ /* If SIZETYPE is unsigned, we need to fix TYPE_MAX_VALUE so that
+ it is sign extended in a way consistent with force_fit_type. */
+ if (TYPE_UNSIGNED (type))
{
- TYPE_IS_SIZETYPE (sizetype_tab[i]) = 1;
- TYPE_MAIN_VARIANT (sizetype_tab[i]) = sizetype_tab[i];
- TYPE_NEXT_VARIANT (sizetype_tab[i]) = 0;
- TYPE_POINTER_TO (sizetype_tab[i]) = 0;
- TYPE_REFERENCE_TO (sizetype_tab[i]) = 0;
- }
+ tree orig_max, new_max;
- /* Go down each of the types we already made and set the proper type
- for the sizes in them. */
- for (t = early_type_list; t != 0; t = TREE_CHAIN (t))
- {
- if (TREE_CODE (TREE_VALUE (t)) != INTEGER_TYPE
- && TREE_CODE (TREE_VALUE (t)) != BOOLEAN_TYPE)
- abort ();
+ orig_max = TYPE_MAX_VALUE (sizetype);
- TREE_TYPE (TYPE_SIZE (TREE_VALUE (t))) = bitsizetype;
- TREE_TYPE (TYPE_SIZE_UNIT (TREE_VALUE (t))) = sizetype;
- }
+ /* Build a new node with the same values, but a different type. */
+ new_max = build_int_cst_wide (sizetype,
+ TREE_INT_CST_LOW (orig_max),
+ TREE_INT_CST_HIGH (orig_max));
- early_type_list = 0;
- sizetype_set = 1;
+ /* Now sign extend it using force_fit_type to ensure
+ consistency. */
+ new_max = force_fit_type (new_max, 0, 0, 0);
+ TYPE_MAX_VALUE (sizetype) = new_max;
+ }
}
-/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE,
- BOOLEAN_TYPE, or CHAR_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
+/* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
+ or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
for TYPE, based on the PRECISION and whether or not the TYPE
IS_UNSIGNED. PRECISION need not correspond to a width supported
natively by the hardware; for example, on a machine with 8-bit,
@@ -1972,36 +2021,39 @@ set_min_and_max_values_for_integral_type (tree type,
if (is_unsigned)
{
- min_value = build_int_2 (0, 0);
- max_value
- = build_int_2 (precision - HOST_BITS_PER_WIDE_INT >= 0
- ? -1 : ((HOST_WIDE_INT) 1 << precision) - 1,
- precision - HOST_BITS_PER_WIDE_INT > 0
- ? ((unsigned HOST_WIDE_INT) ~0
- >> (HOST_BITS_PER_WIDE_INT
- - (precision - HOST_BITS_PER_WIDE_INT)))
- : 0);
+ min_value = build_int_cst (type, 0);
+ max_value
+ = build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0
+ ? -1
+ : ((HOST_WIDE_INT) 1 << precision) - 1,
+ precision - HOST_BITS_PER_WIDE_INT > 0
+ ? ((unsigned HOST_WIDE_INT) ~0
+ >> (HOST_BITS_PER_WIDE_INT
+ - (precision - HOST_BITS_PER_WIDE_INT)))
+ : 0);
}
else
{
- min_value
- = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0
- ? 0 : (HOST_WIDE_INT) (-1) << (precision - 1)),
- (((HOST_WIDE_INT) (-1)
- << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
- ? precision - HOST_BITS_PER_WIDE_INT - 1
- : 0))));
+ min_value
+ = build_int_cst_wide (type,
+ (precision - HOST_BITS_PER_WIDE_INT > 0
+ ? 0
+ : (HOST_WIDE_INT) (-1) << (precision - 1)),
+ (((HOST_WIDE_INT) (-1)
+ << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
+ ? precision - HOST_BITS_PER_WIDE_INT - 1
+ : 0))));
max_value
- = build_int_2 ((precision - HOST_BITS_PER_WIDE_INT > 0
- ? -1 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
- (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
- ? (((HOST_WIDE_INT) 1
- << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
- : 0));
+ = build_int_cst_wide (type,
+ (precision - HOST_BITS_PER_WIDE_INT > 0
+ ? -1
+ : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
+ (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
+ ? (((HOST_WIDE_INT) 1
+ << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
+ : 0));
}
- TREE_TYPE (min_value) = type;
- TREE_TYPE (max_value) = type;
TYPE_MIN_VALUE (type) = min_value;
TYPE_MAX_VALUE (type) = max_value;
}
@@ -2022,7 +2074,7 @@ fixup_signed_type (tree type)
if (precision > HOST_BITS_PER_WIDE_INT * 2)
precision = HOST_BITS_PER_WIDE_INT * 2;
- set_min_and_max_values_for_integral_type (type, precision,
+ set_min_and_max_values_for_integral_type (type, precision,
/*is_unsigned=*/false);
/* Lay out the type: set its alignment, size, etc. */
@@ -2044,7 +2096,9 @@ fixup_unsigned_type (tree type)
if (precision > HOST_BITS_PER_WIDE_INT * 2)
precision = HOST_BITS_PER_WIDE_INT * 2;
- set_min_and_max_values_for_integral_type (type, precision,
+ TYPE_UNSIGNED (type) = 1;
+
+ set_min_and_max_values_for_integral_type (type, precision,
/*is_unsigned=*/true);
/* Lay out the type: set its alignment, size, etc. */
@@ -2058,13 +2112,17 @@ fixup_unsigned_type (tree type)
If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
larger than LARGEST_MODE (usually SImode).
- If no mode meets all these conditions, we return VOIDmode. Otherwise, if
- VOLATILEP is true or SLOW_BYTE_ACCESS is false, we return the smallest
- mode meeting these conditions.
+ If no mode meets all these conditions, we return VOIDmode.
+
+ If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
+ smallest mode meeting these conditions.
- Otherwise (VOLATILEP is false and SLOW_BYTE_ACCESS is true), we return
- the largest mode (but a mode no wider than UNITS_PER_WORD) that meets
- all the conditions. */
+ If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
+ largest mode (but a mode no wider than UNITS_PER_WORD) that meets
+ all the conditions.
+
+ If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
+ decide which of the above modes should be used. */
enum machine_mode
get_best_mode (int bitsize, int bitpos, unsigned int align,
@@ -2094,7 +2152,8 @@ get_best_mode (int bitsize, int bitpos, unsigned int align,
|| (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode)))
return VOIDmode;
- if (SLOW_BYTE_ACCESS && ! volatilep)
+ if ((SLOW_BYTE_ACCESS && ! volatilep)
+ || (volatilep && !targetm.narrow_volatile_bitfield()))
{
enum machine_mode wide_mode = VOIDmode, tmode;
@@ -2117,4 +2176,32 @@ get_best_mode (int bitsize, int bitpos, unsigned int align,
return mode;
}
+/* Gets minimal and maximal values for MODE (signed or unsigned depending on
+ SIGN). The returned constants are made to be usable in TARGET_MODE. */
+
+void
+get_mode_bounds (enum machine_mode mode, int sign,
+ enum machine_mode target_mode,
+ rtx *mmin, rtx *mmax)
+{
+ unsigned size = GET_MODE_BITSIZE (mode);
+ unsigned HOST_WIDE_INT min_val, max_val;
+
+ gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
+
+ if (sign)
+ {
+ min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1));
+ max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1;
+ }
+ else
+ {
+ min_val = 0;
+ max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1;
+ }
+
+ *mmin = gen_int_mode (min_val, target_mode);
+ *mmax = gen_int_mode (max_val, target_mode);
+}
+
#include "gt-stor-layout.h"
OpenPOWER on IntegriCloud