| /**************************************************************************** |
| * * |
| * GNAT COMPILER COMPONENTS * |
| * * |
| * U T I L S * |
| * * |
| * C Implementation File * |
| * * |
| * Copyright (C) 1992-2021, Free Software Foundation, Inc. * |
| * * |
| * GNAT is free software; you can redistribute it and/or modify it under * |
| * terms of the GNU General Public License as published by the Free Soft- * |
| * ware Foundation; either version 3, or (at your option) any later ver- * |
| * sion. GNAT is distributed in the hope that it will be useful, but WITH- * |
| * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * |
| * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * |
| * for more details. You should have received a copy of the GNU General * |
| * Public License along with GCC; see the file COPYING3. If not see * |
| * <http://www.gnu.org/licenses/>. * |
| * * |
| * GNAT was originally developed by the GNAT team at New York University. * |
| * Extensive contributions were provided by Ada Core Technologies Inc. * |
| * * |
| ****************************************************************************/ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "target.h" |
| #include "function.h" |
| #include "tree.h" |
| #include "stringpool.h" |
| #include "cgraph.h" |
| #include "diagnostic.h" |
| #include "alias.h" |
| #include "fold-const.h" |
| #include "stor-layout.h" |
| #include "attribs.h" |
| #include "varasm.h" |
| #include "toplev.h" |
| #include "opts.h" |
| #include "output.h" |
| #include "debug.h" |
| #include "convert.h" |
| #include "common/common-target.h" |
| #include "langhooks.h" |
| #include "tree-dump.h" |
| #include "tree-inline.h" |
| |
| #include "ada.h" |
| #include "types.h" |
| #include "atree.h" |
| #include "nlists.h" |
| #include "snames.h" |
| #include "uintp.h" |
| #include "fe.h" |
| #include "sinfo.h" |
| #include "einfo.h" |
| #include "ada-tree.h" |
| #include "gigi.h" |
| |
| /* If nonzero, pretend we are allocating at global level. */ |
| int force_global; |
| |
| /* The default alignment of "double" floating-point types, i.e. floating |
| point types whose size is equal to 64 bits, or 0 if this alignment is |
| not specifically capped. */ |
| int double_float_alignment; |
| |
| /* The default alignment of "double" or larger scalar types, i.e. scalar |
| types whose size is greater or equal to 64 bits, or 0 if this alignment |
| is not specifically capped. */ |
| int double_scalar_alignment; |
| |
| /* True if floating-point arithmetics may use wider intermediate results. */ |
| bool fp_arith_may_widen = true; |
| |
| /* Tree nodes for the various types and decls we create. */ |
| tree gnat_std_decls[(int) ADT_LAST]; |
| |
| /* Functions to call for each of the possible raise reasons. */ |
| tree gnat_raise_decls[(int) LAST_REASON_CODE + 1]; |
| |
| /* Likewise, but with extra info for each of the possible raise reasons. */ |
| tree gnat_raise_decls_ext[(int) LAST_REASON_CODE + 1]; |
| |
| /* Forward declarations for handlers of attributes. */ |
| static tree handle_const_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_nothrow_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_pure_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_novops_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_nonnull_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_sentinel_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_noreturn_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_stack_protect_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_no_stack_protector_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_strub_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_noinline_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_noclone_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_noicf_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_noipa_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_leaf_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_always_inline_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_malloc_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_type_generic_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_flatten_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_used_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_cold_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_hot_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_target_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_target_clones_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_vector_size_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_vector_type_attribute (tree *, tree, tree, int, bool *); |
| static tree handle_zero_call_used_regs_attribute (tree *, tree, tree, int, |
| bool *); |
| |
| static const struct attribute_spec::exclusions attr_cold_hot_exclusions[] = |
| { |
| { "cold", true, true, true }, |
| { "hot" , true, true, true }, |
| { NULL , false, false, false } |
| }; |
| |
| static const struct attribute_spec::exclusions attr_stack_protect_exclusions[] = |
| { |
| { "stack_protect", true, false, false }, |
| { "no_stack_protector", true, false, false }, |
| { NULL, false, false, false }, |
| }; |
| |
| /* Fake handler for attributes we don't properly support, typically because |
| they'd require dragging a lot of the common-c front-end circuitry. */ |
| static tree fake_attribute_handler (tree *, tree, tree, int, bool *); |
| |
| /* Table of machine-independent internal attributes for Ada. We support |
| this minimal set of attributes to accommodate the needs of builtins. */ |
| const struct attribute_spec gnat_internal_attribute_table[] = |
| { |
| /* { name, min_len, max_len, decl_req, type_req, fn_type_req, |
| affects_type_identity, handler, exclude } */ |
| { "const", 0, 0, true, false, false, false, |
| handle_const_attribute, NULL }, |
| { "nothrow", 0, 0, true, false, false, false, |
| handle_nothrow_attribute, NULL }, |
| { "pure", 0, 0, true, false, false, false, |
| handle_pure_attribute, NULL }, |
| { "no vops", 0, 0, true, false, false, false, |
| handle_novops_attribute, NULL }, |
| { "nonnull", 0, -1, false, true, true, false, |
| handle_nonnull_attribute, NULL }, |
| { "sentinel", 0, 1, false, true, true, false, |
| handle_sentinel_attribute, NULL }, |
| { "noreturn", 0, 0, true, false, false, false, |
| handle_noreturn_attribute, NULL }, |
| { "stack_protect",0, 0, true, false, false, false, |
| handle_stack_protect_attribute, |
| attr_stack_protect_exclusions }, |
| { "no_stack_protector",0, 0, true, false, false, false, |
| handle_no_stack_protector_attribute, |
| attr_stack_protect_exclusions }, |
| { "strub", 0, 1, false, true, false, true, |
| handle_strub_attribute, NULL }, |
| { "noinline", 0, 0, true, false, false, false, |
| handle_noinline_attribute, NULL }, |
| { "noclone", 0, 0, true, false, false, false, |
| handle_noclone_attribute, NULL }, |
| { "no_icf", 0, 0, true, false, false, false, |
| handle_noicf_attribute, NULL }, |
| { "noipa", 0, 0, true, false, false, false, |
| handle_noipa_attribute, NULL }, |
| { "leaf", 0, 0, true, false, false, false, |
| handle_leaf_attribute, NULL }, |
| { "always_inline",0, 0, true, false, false, false, |
| handle_always_inline_attribute, NULL }, |
| { "malloc", 0, 0, true, false, false, false, |
| handle_malloc_attribute, NULL }, |
| { "type generic", 0, 0, false, true, true, false, |
| handle_type_generic_attribute, NULL }, |
| |
| { "flatten", 0, 0, true, false, false, false, |
| handle_flatten_attribute, NULL }, |
| { "used", 0, 0, true, false, false, false, |
| handle_used_attribute, NULL }, |
| { "cold", 0, 0, true, false, false, false, |
| handle_cold_attribute, attr_cold_hot_exclusions }, |
| { "hot", 0, 0, true, false, false, false, |
| handle_hot_attribute, attr_cold_hot_exclusions }, |
| { "target", 1, -1, true, false, false, false, |
| handle_target_attribute, NULL }, |
| { "target_clones",1, -1, true, false, false, false, |
| handle_target_clones_attribute, NULL }, |
| |
| { "vector_size", 1, 1, false, true, false, false, |
| handle_vector_size_attribute, NULL }, |
| { "vector_type", 0, 0, false, true, false, false, |
| handle_vector_type_attribute, NULL }, |
| { "may_alias", 0, 0, false, true, false, false, |
| NULL, NULL }, |
| |
| { "zero_call_used_regs", 1, 1, true, false, false, false, |
| handle_zero_call_used_regs_attribute, NULL }, |
| |
| /* ??? format and format_arg are heavy and not supported, which actually |
| prevents support for stdio builtins, which we however declare as part |
| of the common builtins.def contents. */ |
| { "format", 3, 3, false, true, true, false, |
| fake_attribute_handler, NULL }, |
| { "format_arg", 1, 1, false, true, true, false, |
| fake_attribute_handler, NULL }, |
| |
| { NULL, 0, 0, false, false, false, false, |
| NULL, NULL } |
| }; |
| |
| /* Associates a GNAT tree node to a GCC tree node. It is used in |
| `save_gnu_tree', `get_gnu_tree' and `present_gnu_tree'. See documentation |
| of `save_gnu_tree' for more info. */ |
| static GTY((length ("max_gnat_nodes"))) tree *associate_gnat_to_gnu; |
| |
| #define GET_GNU_TREE(GNAT_ENTITY) \ |
| associate_gnat_to_gnu[(GNAT_ENTITY) - First_Node_Id] |
| |
| #define SET_GNU_TREE(GNAT_ENTITY,VAL) \ |
| associate_gnat_to_gnu[(GNAT_ENTITY) - First_Node_Id] = (VAL) |
| |
| #define PRESENT_GNU_TREE(GNAT_ENTITY) \ |
| (associate_gnat_to_gnu[(GNAT_ENTITY) - First_Node_Id] != NULL_TREE) |
| |
| /* Associates a GNAT entity to a GCC tree node used as a dummy, if any. */ |
| static GTY((length ("max_gnat_nodes"))) tree *dummy_node_table; |
| |
| #define GET_DUMMY_NODE(GNAT_ENTITY) \ |
| dummy_node_table[(GNAT_ENTITY) - First_Node_Id] |
| |
| #define SET_DUMMY_NODE(GNAT_ENTITY,VAL) \ |
| dummy_node_table[(GNAT_ENTITY) - First_Node_Id] = (VAL) |
| |
| #define PRESENT_DUMMY_NODE(GNAT_ENTITY) \ |
| (dummy_node_table[(GNAT_ENTITY) - First_Node_Id] != NULL_TREE) |
| |
| /* This variable keeps a table for types for each precision so that we only |
| allocate each of them once. Signed and unsigned types are kept separate. |
| |
| Note that these types are only used when fold-const requests something |
| special. Perhaps we should NOT share these types; we'll see how it |
| goes later. */ |
| static GTY(()) tree signed_and_unsigned_types[2 * MAX_BITS_PER_WORD + 1][2]; |
| |
| /* Likewise for float types, but record these by mode. */ |
| static GTY(()) tree float_types[NUM_MACHINE_MODES]; |
| |
| /* For each binding contour we allocate a binding_level structure to indicate |
| the binding depth. */ |
| |
| struct GTY((chain_next ("%h.chain"))) gnat_binding_level { |
| /* The binding level containing this one (the enclosing binding level). */ |
| struct gnat_binding_level *chain; |
| /* The BLOCK node for this level. */ |
| tree block; |
| /* If nonzero, the setjmp buffer that needs to be updated for any |
| variable-sized definition within this context. */ |
| tree jmpbuf_decl; |
| }; |
| |
| /* The binding level currently in effect. */ |
| static GTY(()) struct gnat_binding_level *current_binding_level; |
| |
| /* A chain of gnat_binding_level structures awaiting reuse. */ |
| static GTY((deletable)) struct gnat_binding_level *free_binding_level; |
| |
| /* The context to be used for global declarations. */ |
| static GTY(()) tree global_context; |
| |
| /* An array of global declarations. */ |
| static GTY(()) vec<tree, va_gc> *global_decls; |
| |
| /* An array of builtin function declarations. */ |
| static GTY(()) vec<tree, va_gc> *builtin_decls; |
| |
| /* A chain of unused BLOCK nodes. */ |
| static GTY((deletable)) tree free_block_chain; |
| |
| /* A hash table of packable types. It is modelled on the generic type |
| hash table in tree.cc, which must thus be used as a reference. */ |
| |
| struct GTY((for_user)) packable_type_hash |
| { |
| hashval_t hash; |
| tree type; |
| }; |
| |
| struct packable_type_hasher : ggc_cache_ptr_hash<packable_type_hash> |
| { |
| static inline hashval_t hash (packable_type_hash *t) { return t->hash; } |
| static bool equal (packable_type_hash *a, packable_type_hash *b); |
| |
| static int |
| keep_cache_entry (packable_type_hash *&t) |
| { |
| return ggc_marked_p (t->type); |
| } |
| }; |
| |
| static GTY ((cache)) hash_table<packable_type_hasher> *packable_type_hash_table; |
| |
| /* A hash table of padded types. It is modelled on the generic type |
| hash table in tree.cc, which must thus be used as a reference. */ |
| |
| struct GTY((for_user)) pad_type_hash |
| { |
| hashval_t hash; |
| tree type; |
| }; |
| |
| struct pad_type_hasher : ggc_cache_ptr_hash<pad_type_hash> |
| { |
| static inline hashval_t hash (pad_type_hash *t) { return t->hash; } |
| static bool equal (pad_type_hash *a, pad_type_hash *b); |
| |
| static int |
| keep_cache_entry (pad_type_hash *&t) |
| { |
| return ggc_marked_p (t->type); |
| } |
| }; |
| |
| static GTY ((cache)) hash_table<pad_type_hasher> *pad_type_hash_table; |
| |
| static tree merge_sizes (tree, tree, tree, bool, bool); |
| static tree fold_bit_position (const_tree); |
| static tree compute_related_constant (tree, tree); |
| static tree split_plus (tree, tree *); |
| static tree float_type_for_precision (int, machine_mode); |
| static tree convert_to_fat_pointer (tree, tree); |
| static unsigned int scale_by_factor_of (tree, unsigned int); |
| |
| /* Linked list used as a queue to defer the initialization of the DECL_CONTEXT |
| of ..._DECL nodes and of the TYPE_CONTEXT of ..._TYPE nodes. */ |
| struct deferred_decl_context_node |
| { |
| /* The ..._DECL node to work on. */ |
| tree decl; |
| |
| /* The corresponding entity's Scope. */ |
| Entity_Id gnat_scope; |
| |
| /* The value of force_global when DECL was pushed. */ |
| int force_global; |
| |
| /* The list of ..._TYPE nodes to propagate the context to. */ |
| vec<tree> types; |
| |
| /* The next queue item. */ |
| struct deferred_decl_context_node *next; |
| }; |
| |
| static struct deferred_decl_context_node *deferred_decl_context_queue = NULL; |
| |
| /* Defer the initialization of DECL's DECL_CONTEXT attribute, scheduling to |
| feed it with the elaboration of GNAT_SCOPE. */ |
| static struct deferred_decl_context_node * |
| add_deferred_decl_context (tree decl, Entity_Id gnat_scope, int force_global); |
| |
| /* Defer the initialization of TYPE's TYPE_CONTEXT attribute, scheduling to |
| feed it with the DECL_CONTEXT computed as part of N as soon as it is |
| computed. */ |
| static void add_deferred_type_context (struct deferred_decl_context_node *n, |
| tree type); |
| |
| /* Initialize data structures of the utils.cc module. */ |
| |
| void |
| init_gnat_utils (void) |
| { |
| /* Initialize the association of GNAT nodes to GCC trees. */ |
| associate_gnat_to_gnu = ggc_cleared_vec_alloc<tree> (max_gnat_nodes); |
| |
| /* Initialize the association of GNAT nodes to GCC trees as dummies. */ |
| dummy_node_table = ggc_cleared_vec_alloc<tree> (max_gnat_nodes); |
| |
| /* Initialize the hash table of packable types. */ |
| packable_type_hash_table = hash_table<packable_type_hasher>::create_ggc (512); |
| |
| /* Initialize the hash table of padded types. */ |
| pad_type_hash_table = hash_table<pad_type_hasher>::create_ggc (512); |
| } |
| |
| /* Destroy data structures of the utils.cc module. */ |
| |
| void |
| destroy_gnat_utils (void) |
| { |
| /* Destroy the association of GNAT nodes to GCC trees. */ |
| ggc_free (associate_gnat_to_gnu); |
| associate_gnat_to_gnu = NULL; |
| |
| /* Destroy the association of GNAT nodes to GCC trees as dummies. */ |
| ggc_free (dummy_node_table); |
| dummy_node_table = NULL; |
| |
| /* Destroy the hash table of packable types. */ |
| packable_type_hash_table->empty (); |
| packable_type_hash_table = NULL; |
| |
| /* Destroy the hash table of padded types. */ |
| pad_type_hash_table->empty (); |
| pad_type_hash_table = NULL; |
| } |
| |
| /* GNAT_ENTITY is a GNAT tree node for an entity. Associate GNU_DECL, a GCC |
| tree node, with GNAT_ENTITY. If GNU_DECL is not a ..._DECL node, abort. |
| If NO_CHECK is true, the latter check is suppressed. |
| |
| If GNU_DECL is zero, reset a previous association. */ |
| |
| void |
| save_gnu_tree (Entity_Id gnat_entity, tree gnu_decl, bool no_check) |
| { |
| /* Check that GNAT_ENTITY is not already defined and that it is being set |
| to something which is a decl. If that is not the case, this usually |
| means GNAT_ENTITY is defined twice, but occasionally is due to some |
| Gigi problem. */ |
| gcc_assert (!(gnu_decl |
| && (PRESENT_GNU_TREE (gnat_entity) |
| || (!no_check && !DECL_P (gnu_decl))))); |
| |
| SET_GNU_TREE (gnat_entity, gnu_decl); |
| } |
| |
| /* GNAT_ENTITY is a GNAT tree node for an entity. Return the GCC tree node |
| that was associated with it. If there is no such tree node, abort. |
| |
| In some cases, such as delayed elaboration or expressions that need to |
| be elaborated only once, GNAT_ENTITY is really not an entity. */ |
| |
| tree |
| get_gnu_tree (Entity_Id gnat_entity) |
| { |
| gcc_assert (PRESENT_GNU_TREE (gnat_entity)); |
| return GET_GNU_TREE (gnat_entity); |
| } |
| |
| /* Return nonzero if a GCC tree has been associated with GNAT_ENTITY. */ |
| |
| bool |
| present_gnu_tree (Entity_Id gnat_entity) |
| { |
| return PRESENT_GNU_TREE (gnat_entity); |
| } |
| |
| /* Make a dummy type corresponding to GNAT_TYPE. */ |
| |
| tree |
| make_dummy_type (Entity_Id gnat_type) |
| { |
| Entity_Id gnat_equiv = Gigi_Equivalent_Type (Underlying_Type (gnat_type)); |
| tree gnu_type, debug_type; |
| |
| /* If there was no equivalent type (can only happen when just annotating |
| types) or underlying type, go back to the original type. */ |
| if (No (gnat_equiv)) |
| gnat_equiv = gnat_type; |
| |
| /* If it there already a dummy type, use that one. Else make one. */ |
| if (PRESENT_DUMMY_NODE (gnat_equiv)) |
| return GET_DUMMY_NODE (gnat_equiv); |
| |
| /* If this is a record, make a RECORD_TYPE or UNION_TYPE; else make |
| an ENUMERAL_TYPE. */ |
| gnu_type = make_node (Is_Record_Type (gnat_equiv) |
| ? tree_code_for_record_type (gnat_equiv) |
| : ENUMERAL_TYPE); |
| TYPE_NAME (gnu_type) = get_entity_name (gnat_type); |
| TYPE_DUMMY_P (gnu_type) = 1; |
| TYPE_STUB_DECL (gnu_type) |
| = create_type_stub_decl (TYPE_NAME (gnu_type), gnu_type); |
| if (Is_By_Reference_Type (gnat_equiv)) |
| TYPE_BY_REFERENCE_P (gnu_type) = 1; |
| if (Has_Discriminants (gnat_equiv)) |
| decl_attributes (&gnu_type, |
| tree_cons (get_identifier ("may_alias"), NULL_TREE, |
| NULL_TREE), |
| ATTR_FLAG_TYPE_IN_PLACE); |
| |
| SET_DUMMY_NODE (gnat_equiv, gnu_type); |
| |
| /* Create a debug type so that debuggers only see an unspecified type. */ |
| if (Needs_Debug_Info (gnat_type)) |
| { |
| debug_type = make_node (LANG_TYPE); |
| TYPE_NAME (debug_type) = TYPE_NAME (gnu_type); |
| TYPE_ARTIFICIAL (debug_type) = TYPE_ARTIFICIAL (gnu_type); |
| SET_TYPE_DEBUG_TYPE (gnu_type, debug_type); |
| } |
| |
| return gnu_type; |
| } |
| |
| /* Return the dummy type that was made for GNAT_TYPE, if any. */ |
| |
| tree |
| get_dummy_type (Entity_Id gnat_type) |
| { |
| return GET_DUMMY_NODE (gnat_type); |
| } |
| |
| /* Build dummy fat and thin pointer types whose designated type is specified |
| by GNAT_DESIG_TYPE/GNU_DESIG_TYPE and attach them to the latter. */ |
| |
| void |
| build_dummy_unc_pointer_types (Entity_Id gnat_desig_type, tree gnu_desig_type) |
| { |
| tree gnu_template_type, gnu_ptr_template, gnu_array_type, gnu_ptr_array; |
| tree gnu_fat_type, fields, gnu_object_type; |
| |
| gnu_template_type = make_node (RECORD_TYPE); |
| TYPE_NAME (gnu_template_type) = create_concat_name (gnat_desig_type, "XUB"); |
| TYPE_DUMMY_P (gnu_template_type) = 1; |
| gnu_ptr_template = build_pointer_type (gnu_template_type); |
| |
| gnu_array_type = make_node (ENUMERAL_TYPE); |
| TYPE_NAME (gnu_array_type) = create_concat_name (gnat_desig_type, "XUA"); |
| TYPE_DUMMY_P (gnu_array_type) = 1; |
| gnu_ptr_array = build_pointer_type (gnu_array_type); |
| |
| gnu_fat_type = make_node (RECORD_TYPE); |
| /* Build a stub DECL to trigger the special processing for fat pointer types |
| in gnat_pushdecl. */ |
| TYPE_NAME (gnu_fat_type) |
| = create_type_stub_decl (create_concat_name (gnat_desig_type, "XUP"), |
| gnu_fat_type); |
| fields = create_field_decl (get_identifier ("P_ARRAY"), gnu_ptr_array, |
| gnu_fat_type, NULL_TREE, NULL_TREE, 0, 1); |
| DECL_CHAIN (fields) |
| = create_field_decl (get_identifier ("P_BOUNDS"), gnu_ptr_template, |
| gnu_fat_type, NULL_TREE, NULL_TREE, 0, 1); |
| finish_fat_pointer_type (gnu_fat_type, fields); |
| SET_TYPE_UNCONSTRAINED_ARRAY (gnu_fat_type, gnu_desig_type); |
| /* Suppress debug info until after the type is completed. */ |
| TYPE_DECL_SUPPRESS_DEBUG (TYPE_STUB_DECL (gnu_fat_type)) = 1; |
| |
| gnu_object_type = make_node (RECORD_TYPE); |
| TYPE_NAME (gnu_object_type) = create_concat_name (gnat_desig_type, "XUT"); |
| TYPE_DUMMY_P (gnu_object_type) = 1; |
| |
| TYPE_POINTER_TO (gnu_desig_type) = gnu_fat_type; |
| TYPE_REFERENCE_TO (gnu_desig_type) = gnu_fat_type; |
| TYPE_OBJECT_RECORD_TYPE (gnu_desig_type) = gnu_object_type; |
| } |
| |
| /* Return true if we are in the global binding level. */ |
| |
| bool |
| global_bindings_p (void) |
| { |
| return force_global || !current_function_decl; |
| } |
| |
| /* Enter a new binding level. */ |
| |
| void |
| gnat_pushlevel (void) |
| { |
| struct gnat_binding_level *newlevel = NULL; |
| |
| /* Reuse a struct for this binding level, if there is one. */ |
| if (free_binding_level) |
| { |
| newlevel = free_binding_level; |
| free_binding_level = free_binding_level->chain; |
| } |
| else |
| newlevel = ggc_alloc<gnat_binding_level> (); |
| |
| /* Use a free BLOCK, if any; otherwise, allocate one. */ |
| if (free_block_chain) |
| { |
| newlevel->block = free_block_chain; |
| free_block_chain = BLOCK_CHAIN (free_block_chain); |
| BLOCK_CHAIN (newlevel->block) = NULL_TREE; |
| } |
| else |
| newlevel->block = make_node (BLOCK); |
| |
| /* Point the BLOCK we just made to its parent. */ |
| if (current_binding_level) |
| BLOCK_SUPERCONTEXT (newlevel->block) = current_binding_level->block; |
| |
| BLOCK_VARS (newlevel->block) = NULL_TREE; |
| BLOCK_SUBBLOCKS (newlevel->block) = NULL_TREE; |
| TREE_USED (newlevel->block) = 1; |
| |
| /* Add this level to the front of the chain (stack) of active levels. */ |
| newlevel->chain = current_binding_level; |
| newlevel->jmpbuf_decl = NULL_TREE; |
| current_binding_level = newlevel; |
| } |
| |
| /* Set SUPERCONTEXT of the BLOCK for the current binding level to FNDECL |
| and point FNDECL to this BLOCK. */ |
| |
| void |
| set_current_block_context (tree fndecl) |
| { |
| BLOCK_SUPERCONTEXT (current_binding_level->block) = fndecl; |
| DECL_INITIAL (fndecl) = current_binding_level->block; |
| set_block_for_group (current_binding_level->block); |
| } |
| |
| /* Set the jmpbuf_decl for the current binding level to DECL. */ |
| |
| void |
| set_block_jmpbuf_decl (tree decl) |
| { |
| current_binding_level->jmpbuf_decl = decl; |
| } |
| |
| /* Get the jmpbuf_decl, if any, for the current binding level. */ |
| |
| tree |
| get_block_jmpbuf_decl (void) |
| { |
| return current_binding_level->jmpbuf_decl; |
| } |
| |
| /* Exit a binding level. Set any BLOCK into the current code group. */ |
| |
| void |
| gnat_poplevel (void) |
| { |
| struct gnat_binding_level *level = current_binding_level; |
| tree block = level->block; |
| |
| BLOCK_VARS (block) = nreverse (BLOCK_VARS (block)); |
| BLOCK_SUBBLOCKS (block) = blocks_nreverse (BLOCK_SUBBLOCKS (block)); |
| |
| /* If this is a function-level BLOCK don't do anything. Otherwise, if there |
| are no variables free the block and merge its subblocks into those of its |
| parent block. Otherwise, add it to the list of its parent. */ |
| if (TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL) |
| ; |
| else if (!BLOCK_VARS (block)) |
| { |
| BLOCK_SUBBLOCKS (level->chain->block) |
| = block_chainon (BLOCK_SUBBLOCKS (block), |
| BLOCK_SUBBLOCKS (level->chain->block)); |
| BLOCK_CHAIN (block) = free_block_chain; |
| free_block_chain = block; |
| } |
| else |
| { |
| BLOCK_CHAIN (block) = BLOCK_SUBBLOCKS (level->chain->block); |
| BLOCK_SUBBLOCKS (level->chain->block) = block; |
| TREE_USED (block) = 1; |
| set_block_for_group (block); |
| } |
| |
| /* Free this binding structure. */ |
| current_binding_level = level->chain; |
| level->chain = free_binding_level; |
| free_binding_level = level; |
| } |
| |
| /* Exit a binding level and discard the associated BLOCK. */ |
| |
| void |
| gnat_zaplevel (void) |
| { |
| struct gnat_binding_level *level = current_binding_level; |
| tree block = level->block; |
| |
| BLOCK_CHAIN (block) = free_block_chain; |
| free_block_chain = block; |
| |
| /* Free this binding structure. */ |
| current_binding_level = level->chain; |
| level->chain = free_binding_level; |
| free_binding_level = level; |
| } |
| |
| /* Set the context of TYPE and its parallel types (if any) to CONTEXT. */ |
| |
| static void |
| gnat_set_type_context (tree type, tree context) |
| { |
| tree decl = TYPE_STUB_DECL (type); |
| |
| TYPE_CONTEXT (type) = context; |
| |
| while (decl && DECL_PARALLEL_TYPE (decl)) |
| { |
| tree parallel_type = DECL_PARALLEL_TYPE (decl); |
| |
| /* Give a context to the parallel types and their stub decl, if any. |
| Some parallel types seems to be present in multiple parallel type |
| chains, so don't mess with their context if they already have one. */ |
| if (!TYPE_CONTEXT (parallel_type)) |
| { |
| if (TYPE_STUB_DECL (parallel_type)) |
| DECL_CONTEXT (TYPE_STUB_DECL (parallel_type)) = context; |
| TYPE_CONTEXT (parallel_type) = context; |
| } |
| |
| decl = TYPE_STUB_DECL (DECL_PARALLEL_TYPE (decl)); |
| } |
| } |
| |
| /* Return the innermost scope, starting at GNAT_NODE, we are be interested in |
| the debug info, or Empty if there is no such scope. If not NULL, set |
| IS_SUBPROGRAM to whether the returned entity is a subprogram. */ |
| |
| Entity_Id |
| get_debug_scope (Node_Id gnat_node, bool *is_subprogram) |
| { |
| Entity_Id gnat_entity; |
| |
| if (is_subprogram) |
| *is_subprogram = false; |
| |
| if (Nkind (gnat_node) == N_Defining_Identifier |
| || Nkind (gnat_node) == N_Defining_Operator_Symbol) |
| gnat_entity = Scope (gnat_node); |
| else |
| return Empty; |
| |
| while (Present (gnat_entity)) |
| { |
| switch (Ekind (gnat_entity)) |
| { |
| case E_Function: |
| case E_Procedure: |
| if (Present (Protected_Body_Subprogram (gnat_entity))) |
| gnat_entity = Protected_Body_Subprogram (gnat_entity); |
| |
| /* If the scope is a subprogram, then just rely on |
| current_function_decl, so that we don't have to defer |
| anything. This is needed because other places rely on the |
| validity of the DECL_CONTEXT attribute of FUNCTION_DECL nodes. */ |
| if (is_subprogram) |
| *is_subprogram = true; |
| return gnat_entity; |
| |
| case E_Record_Type: |
| case E_Record_Subtype: |
| return gnat_entity; |
| |
| default: |
| /* By default, we are not interested in this particular scope: go to |
| the outer one. */ |
| break; |
| } |
| |
| gnat_entity = Scope (gnat_entity); |
| } |
| |
| return Empty; |
| } |
| |
| /* If N is NULL, set TYPE's context to CONTEXT. Defer this to the processing |
| of N otherwise. */ |
| |
| static void |
| defer_or_set_type_context (tree type, tree context, |
| struct deferred_decl_context_node *n) |
| { |
| if (n) |
| add_deferred_type_context (n, type); |
| else |
| gnat_set_type_context (type, context); |
| } |
| |
| /* Return global_context, but create it first if need be. */ |
| |
| static tree |
| get_global_context (void) |
| { |
| if (!global_context) |
| { |
| global_context |
| = build_translation_unit_decl (get_identifier (main_input_filename)); |
| debug_hooks->register_main_translation_unit (global_context); |
| } |
| |
| return global_context; |
| } |
| |
| /* Record DECL as belonging to the current lexical scope and use GNAT_NODE |
| for location information and flag propagation. */ |
| |
| void |
| gnat_pushdecl (tree decl, Node_Id gnat_node) |
| { |
| tree context = NULL_TREE; |
| struct deferred_decl_context_node *deferred_decl_context = NULL; |
| |
| /* If explicitly asked to make DECL global or if it's an imported nested |
| object, short-circuit the regular Scope-based context computation. */ |
| if (!((TREE_PUBLIC (decl) && DECL_EXTERNAL (decl)) || force_global == 1)) |
| { |
| /* Rely on the GNAT scope, or fallback to the current_function_decl if |
| the GNAT scope reached the global scope, if it reached a subprogram |
| or the declaration is a subprogram or a variable (for them we skip |
| intermediate context types because the subprogram body elaboration |
| machinery and the inliner both expect a subprogram context). |
| |
| Falling back to current_function_decl is necessary for implicit |
| subprograms created by gigi, such as the elaboration subprograms. */ |
| bool context_is_subprogram = false; |
| const Entity_Id gnat_scope |
| = get_debug_scope (gnat_node, &context_is_subprogram); |
| |
| if (Present (gnat_scope) |
| && !context_is_subprogram |
| && TREE_CODE (decl) != FUNCTION_DECL |
| && TREE_CODE (decl) != VAR_DECL) |
| /* Always assume the scope has not been elaborated, thus defer the |
| context propagation to the time its elaboration will be |
| available. */ |
| deferred_decl_context |
| = add_deferred_decl_context (decl, gnat_scope, force_global); |
| |
| /* External declarations (when force_global > 0) may not be in a |
| local context. */ |
| else if (current_function_decl && force_global == 0) |
| context = current_function_decl; |
| } |
| |
| /* If either we are forced to be in global mode or if both the GNAT scope and |
| the current_function_decl did not help in determining the context, use the |
| global scope. */ |
| if (!deferred_decl_context && !context) |
| context = get_global_context (); |
| |
| /* Functions imported in another function are not really nested. |
| For really nested functions mark them initially as needing |
| a static chain for uses of that flag before unnesting; |
| lower_nested_functions will then recompute it. */ |
| if (TREE_CODE (decl) == FUNCTION_DECL |
| && !TREE_PUBLIC (decl) |
| && context |
| && (TREE_CODE (context) == FUNCTION_DECL |
| || decl_function_context (context))) |
| DECL_STATIC_CHAIN (decl) = 1; |
| |
| if (!deferred_decl_context) |
| DECL_CONTEXT (decl) = context; |
| |
| suppress_warning (decl, all_warnings, |
| No (gnat_node) || Warnings_Off (gnat_node)); |
| |
| /* Set the location of DECL and emit a declaration for it. */ |
| if (Present (gnat_node) && !renaming_from_instantiation_p (gnat_node)) |
| Sloc_to_locus (Sloc (gnat_node), &DECL_SOURCE_LOCATION (decl)); |
| |
| add_decl_expr (decl, gnat_node); |
| |
| /* Put the declaration on the list. The list of declarations is in reverse |
| order. The list will be reversed later. Put global declarations in the |
| globals list and local ones in the current block. But skip TYPE_DECLs |
| for UNCONSTRAINED_ARRAY_TYPE in both cases, as they will cause trouble |
| with the debugger and aren't needed anyway. */ |
| if (!(TREE_CODE (decl) == TYPE_DECL |
| && TREE_CODE (TREE_TYPE (decl)) == UNCONSTRAINED_ARRAY_TYPE)) |
| { |
| /* External declarations must go to the binding level they belong to. |
| This will make corresponding imported entities are available in the |
| debugger at the proper time. */ |
| if (DECL_EXTERNAL (decl) |
| && TREE_CODE (decl) == FUNCTION_DECL |
| && fndecl_built_in_p (decl)) |
| vec_safe_push (builtin_decls, decl); |
| else if (global_bindings_p ()) |
| vec_safe_push (global_decls, decl); |
| else |
| { |
| DECL_CHAIN (decl) = BLOCK_VARS (current_binding_level->block); |
| BLOCK_VARS (current_binding_level->block) = decl; |
| } |
| } |
| |
| /* For the declaration of a type, set its name either if it isn't already |
| set or if the previous type name was not derived from a source name. |
| We'd rather have the type named with a real name and all the pointer |
| types to the same object have the same node, except when the names are |
| both derived from source names. */ |
| if (TREE_CODE (decl) == TYPE_DECL && DECL_NAME (decl)) |
| { |
| tree t = TREE_TYPE (decl); |
| |
| /* Array and pointer types aren't tagged types in the C sense so we need |
| to generate a typedef in DWARF for them and make sure it is preserved, |
| unless the type is artificial. */ |
| if (!(TYPE_NAME (t) && TREE_CODE (TYPE_NAME (t)) == TYPE_DECL) |
| && ((TREE_CODE (t) != ARRAY_TYPE && TREE_CODE (t) != POINTER_TYPE) |
| || DECL_ARTIFICIAL (decl))) |
| ; |
| /* For array and pointer types, create the DECL_ORIGINAL_TYPE that will |
| generate the typedef in DWARF. Also do that for fat pointer types |
| because, even though they are tagged types in the C sense, they are |
| still XUP types attached to the base array type at this point. */ |
| else if (!DECL_ARTIFICIAL (decl) |
| && (TREE_CODE (t) == ARRAY_TYPE |
| || TREE_CODE (t) == POINTER_TYPE |
| || TYPE_IS_FAT_POINTER_P (t))) |
| { |
| tree tt = build_variant_type_copy (t); |
| TYPE_NAME (tt) = decl; |
| defer_or_set_type_context (tt, |
| DECL_CONTEXT (decl), |
| deferred_decl_context); |
| TREE_TYPE (decl) = tt; |
| if (TYPE_NAME (t) |
| && TREE_CODE (TYPE_NAME (t)) == TYPE_DECL |
| && DECL_ORIGINAL_TYPE (TYPE_NAME (t))) |
| DECL_ORIGINAL_TYPE (decl) = DECL_ORIGINAL_TYPE (TYPE_NAME (t)); |
| else |
| DECL_ORIGINAL_TYPE (decl) = t; |
| /* Array types need to have a name so that they can be related to |
| their GNAT encodings. */ |
| if (TREE_CODE (t) == ARRAY_TYPE && !TYPE_NAME (t)) |
| TYPE_NAME (t) = DECL_NAME (decl); |
| /* Remark the canonical fat pointer type as artificial. */ |
| if (TYPE_IS_FAT_POINTER_P (t)) |
| TYPE_ARTIFICIAL (t) = 1; |
| t = NULL_TREE; |
| } |
| else if (TYPE_NAME (t) |
| && TREE_CODE (TYPE_NAME (t)) == TYPE_DECL |
| && DECL_ARTIFICIAL (TYPE_NAME (t)) && !DECL_ARTIFICIAL (decl)) |
| ; |
| else |
| t = NULL_TREE; |
| |
| /* Propagate the name to all the variants, this is needed for the type |
| qualifiers machinery to work properly (see check_qualified_type). |
| Also propagate the context to them. Note that it will be propagated |
| to all parallel types too thanks to gnat_set_type_context. */ |
| if (t) |
| for (t = TYPE_MAIN_VARIANT (t); t; t = TYPE_NEXT_VARIANT (t)) |
| /* ??? Because of the previous kludge, we can have variants of fat |
| pointer types with different names. */ |
| if (!(TYPE_IS_FAT_POINTER_P (t) |
| && TYPE_NAME (t) |
| && TREE_CODE (TYPE_NAME (t)) == TYPE_DECL)) |
| { |
| TYPE_NAME (t) = decl; |
| defer_or_set_type_context (t, |
| DECL_CONTEXT (decl), |
| deferred_decl_context); |
| } |
| } |
| } |
| |
| /* Create a record type that contains a SIZE bytes long field of TYPE with a |
| starting bit position so that it is aligned to ALIGN bits, and leaving at |
| least ROOM bytes free before the field. BASE_ALIGN is the alignment the |
| record is guaranteed to get. GNAT_NODE is used for the position of the |
| associated TYPE_DECL. */ |
| |
| tree |
| make_aligning_type (tree type, unsigned int align, tree size, |
| unsigned int base_align, int room, Node_Id gnat_node) |
| { |
| /* We will be crafting a record type with one field at a position set to be |
| the next multiple of ALIGN past record'address + room bytes. We use a |
| record placeholder to express record'address. */ |
| tree record_type = make_node (RECORD_TYPE); |
| tree record = build0 (PLACEHOLDER_EXPR, record_type); |
| |
| tree record_addr_st |
| = convert (sizetype, build_unary_op (ADDR_EXPR, NULL_TREE, record)); |
| |
| /* The diagram below summarizes the shape of what we manipulate: |
| |
| <--------- pos ----------> |
| { +------------+-------------+-----------------+ |
| record =>{ |############| ... | field (type) | |
| { +------------+-------------+-----------------+ |
| |<-- room -->|<- voffset ->|<---- size ----->| |
| o o |
| | | |
| record_addr vblock_addr |
| |
| Every length is in sizetype bytes there, except "pos" which has to be |
| set as a bit position in the GCC tree for the record. */ |
| tree room_st = size_int (room); |
| tree vblock_addr_st = size_binop (PLUS_EXPR, record_addr_st, room_st); |
| tree voffset_st, pos, field; |
| |
| tree name = TYPE_IDENTIFIER (type); |
| |
| name = concat_name (name, "ALIGN"); |
| TYPE_NAME (record_type) = name; |
| |
| /* Compute VOFFSET and then POS. The next byte position multiple of some |
| alignment after some address is obtained by "and"ing the alignment minus |
| 1 with the two's complement of the address. */ |
| voffset_st = size_binop (BIT_AND_EXPR, |
| fold_build1 (NEGATE_EXPR, sizetype, vblock_addr_st), |
| size_int ((align / BITS_PER_UNIT) - 1)); |
| |
| /* POS = (ROOM + VOFFSET) * BIT_PER_UNIT, in bitsizetype. */ |
| pos = size_binop (MULT_EXPR, |
| convert (bitsizetype, |
| size_binop (PLUS_EXPR, room_st, voffset_st)), |
| bitsize_unit_node); |
| |
| /* Craft the GCC record representation. We exceptionally do everything |
| manually here because 1) our generic circuitry is not quite ready to |
| handle the complex position/size expressions we are setting up, 2) we |
| have a strong simplifying factor at hand: we know the maximum possible |
| value of voffset, and 3) we have to set/reset at least the sizes in |
| accordance with this maximum value anyway, as we need them to convey |
| what should be "alloc"ated for this type. |
| |
| Use -1 as the 'addressable' indication for the field to prevent the |
| creation of a bitfield. We don't need one, it would have damaging |
| consequences on the alignment computation, and create_field_decl would |
| make one without this special argument, for instance because of the |
| complex position expression. */ |
| field = create_field_decl (get_identifier ("F"), type, record_type, size, |
| pos, 1, -1); |
| TYPE_FIELDS (record_type) = field; |
| |
| SET_TYPE_ALIGN (record_type, base_align); |
| TYPE_USER_ALIGN (record_type) = 1; |
| |
| TYPE_SIZE (record_type) |
| = size_binop (PLUS_EXPR, |
| size_binop (MULT_EXPR, convert (bitsizetype, size), |
| bitsize_unit_node), |
| bitsize_int (align + room * BITS_PER_UNIT)); |
| TYPE_SIZE_UNIT (record_type) |
| = size_binop (PLUS_EXPR, size, |
| size_int (room + align / BITS_PER_UNIT)); |
| |
| SET_TYPE_MODE (record_type, BLKmode); |
| relate_alias_sets (record_type, type, ALIAS_SET_COPY); |
| |
| /* Declare it now since it will never be declared otherwise. This is |
| necessary to ensure that its subtrees are properly marked. */ |
| create_type_decl (name, record_type, true, false, gnat_node); |
| |
| return record_type; |
| } |
| |
| /* Return true iff the packable types are equivalent. */ |
| |
| bool |
| packable_type_hasher::equal (packable_type_hash *t1, packable_type_hash *t2) |
| { |
| tree type1, type2; |
| |
| if (t1->hash != t2->hash) |
| return 0; |
| |
| type1 = t1->type; |
| type2 = t2->type; |
| |
| /* We consider that packable types are equivalent if they have the same name, |
| size, alignment, RM size and storage order. Taking the mode into account |
| is redundant since it is determined by the others. */ |
| return |
| TYPE_NAME (type1) == TYPE_NAME (type2) |
| && TYPE_SIZE (type1) == TYPE_SIZE (type2) |
| && TYPE_ALIGN (type1) == TYPE_ALIGN (type2) |
| && TYPE_ADA_SIZE (type1) == TYPE_ADA_SIZE (type2) |
| && TYPE_REVERSE_STORAGE_ORDER (type1) == TYPE_REVERSE_STORAGE_ORDER (type2); |
| } |
| |
| /* Compute the hash value for the packable TYPE. */ |
| |
| static hashval_t |
| hash_packable_type (tree type) |
| { |
| hashval_t hashcode; |
| |
| hashcode = iterative_hash_expr (TYPE_NAME (type), 0); |
| hashcode = iterative_hash_expr (TYPE_SIZE (type), hashcode); |
| hashcode = iterative_hash_hashval_t (TYPE_ALIGN (type), hashcode); |
| hashcode = iterative_hash_expr (TYPE_ADA_SIZE (type), hashcode); |
| hashcode |
| = iterative_hash_hashval_t (TYPE_REVERSE_STORAGE_ORDER (type), hashcode); |
| |
| return hashcode; |
| } |
| |
| /* Look up the packable TYPE in the hash table and return its canonical version |
| if it exists; otherwise, insert it into the hash table. */ |
| |
| static tree |
| canonicalize_packable_type (tree type) |
| { |
| const hashval_t hashcode = hash_packable_type (type); |
| struct packable_type_hash in, *h, **slot; |
| |
| in.hash = hashcode; |
| in.type = type; |
| slot = packable_type_hash_table->find_slot_with_hash (&in, hashcode, INSERT); |
| h = *slot; |
| if (!h) |
| { |
| h = ggc_alloc<packable_type_hash> (); |
| h->hash = hashcode; |
| h->type = type; |
| *slot = h; |
| } |
| |
| return h->type; |
| } |
| |
| /* TYPE is an ARRAY_TYPE that is being used as the type of a field in a packed |
| record. See if we can rewrite it as a type that has non-BLKmode, which we |
| can pack tighter in the packed record. If so, return the new type; if not, |
| return the original type. */ |
| |
| static tree |
| make_packable_array_type (tree type) |
| { |
| const unsigned HOST_WIDE_INT size = tree_to_uhwi (TYPE_SIZE (type)); |
| unsigned HOST_WIDE_INT new_size; |
| unsigned int new_align; |
| |
| /* No point in doing anything if the size is either zero or too large for an |
| integral mode, or if the type already has non-BLKmode. */ |
| if (size == 0 || size > MAX_FIXED_MODE_SIZE || TYPE_MODE (type) != BLKmode) |
| return type; |
| |
| /* Punt if the component type is an aggregate type for now. */ |
| if (AGGREGATE_TYPE_P (TREE_TYPE (type))) |
| return type; |
| |
| tree new_type = copy_type (type); |
| |
| new_size = ceil_pow2 (size); |
| new_align = MIN (new_size, BIGGEST_ALIGNMENT); |
| SET_TYPE_ALIGN (new_type, new_align); |
| |
| TYPE_SIZE (new_type) = bitsize_int (new_size); |
| TYPE_SIZE_UNIT (new_type) = size_int (new_size / BITS_PER_UNIT); |
| |
| SET_TYPE_MODE (new_type, mode_for_size (new_size, MODE_INT, 1).else_blk ()); |
| |
| return new_type; |
| } |
| |
| /* TYPE is a RECORD_TYPE, UNION_TYPE or QUAL_UNION_TYPE that is being used |
| as the type of a field in a packed record if IN_RECORD is true, or as |
| the component type of a packed array if IN_RECORD is false. See if we |
| can rewrite it either as a type that has non-BLKmode, which we can pack |
| tighter in the packed record case, or as a smaller type with at most |
| MAX_ALIGN alignment if the value is non-zero. If so, return the new |
| type; if not, return the original type. */ |
| |
| tree |
| make_packable_type (tree type, bool in_record, unsigned int max_align) |
| { |
| const unsigned HOST_WIDE_INT size = tree_to_uhwi (TYPE_SIZE (type)); |
| const unsigned int align = TYPE_ALIGN (type); |
| unsigned HOST_WIDE_INT new_size; |
| unsigned int new_align; |
| |
| /* No point in doing anything if the size is zero. */ |
| if (size == 0) |
| return type; |
| |
| tree new_type = make_node (TREE_CODE (type)); |
| |
| /* Copy the name and flags from the old type to that of the new. |
| Note that we rely on the pointer equality created here for |
| TYPE_NAME to look through conversions in various places. */ |
| TYPE_NAME (new_type) = TYPE_NAME (type); |
| TYPE_PACKED (new_type) = 1; |
| TYPE_JUSTIFIED_MODULAR_P (new_type) = TYPE_JUSTIFIED_MODULAR_P (type); |
| TYPE_CONTAINS_TEMPLATE_P (new_type) = TYPE_CONTAINS_TEMPLATE_P (type); |
| TYPE_REVERSE_STORAGE_ORDER (new_type) = TYPE_REVERSE_STORAGE_ORDER (type); |
| if (TREE_CODE (type) == RECORD_TYPE) |
| TYPE_PADDING_P (new_type) = TYPE_PADDING_P (type); |
| |
| /* If we are in a record and have a small size, set the alignment to |
| try for an integral mode. Otherwise set it to try for a smaller |
| type with BLKmode. */ |
| if (in_record && size <= MAX_FIXED_MODE_SIZE) |
| { |
| new_size = ceil_pow2 (size); |
| new_align = MIN (new_size, BIGGEST_ALIGNMENT); |
| SET_TYPE_ALIGN (new_type, new_align); |
| } |
| else |
| { |
| tree ada_size = TYPE_ADA_SIZE (type); |
| |
| /* Do not try to shrink the size if the RM size is not constant. */ |
| if (TYPE_CONTAINS_TEMPLATE_P (type) || !tree_fits_uhwi_p (ada_size)) |
| return type; |
| |
| /* Round the RM size up to a unit boundary to get the minimal size |
| for a BLKmode record. Give up if it's already the size and we |
| don't need to lower the alignment. */ |
| new_size = tree_to_uhwi (ada_size); |
| new_size = (new_size + BITS_PER_UNIT - 1) & -BITS_PER_UNIT; |
| if (new_size == size && (max_align == 0 || align <= max_align)) |
| return type; |
| |
| new_align = MIN (new_size & -new_size, BIGGEST_ALIGNMENT); |
| if (max_align > 0 && new_align > max_align) |
| new_align = max_align; |
| SET_TYPE_ALIGN (new_type, MIN (align, new_align)); |
| } |
| |
| TYPE_USER_ALIGN (new_type) = 1; |
| |
| /* Now copy the fields, keeping the position and size as we don't want |
| to change the layout by propagating the packedness downwards. */ |
| tree new_field_list = NULL_TREE; |
| for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) |
| { |
| tree new_field_type = TREE_TYPE (field); |
| tree new_field, new_field_size; |
| |
| if (AGGREGATE_TYPE_P (new_field_type) |
| && tree_fits_uhwi_p (TYPE_SIZE (new_field_type))) |
| { |
| if (RECORD_OR_UNION_TYPE_P (new_field_type) |
| && !TYPE_FAT_POINTER_P (new_field_type)) |
| new_field_type |
| = make_packable_type (new_field_type, true, max_align); |
| else if (in_record |
| && max_align > 0 |
| && max_align < BITS_PER_UNIT |
| && TREE_CODE (new_field_type) == ARRAY_TYPE) |
| new_field_type = make_packable_array_type (new_field_type); |
| } |
| |
| /* However, for the last field in a not already packed record type |
| that is of an aggregate type, we need to use the RM size in the |
| packable version of the record type, see finish_record_type. */ |
| if (!DECL_CHAIN (field) |
| && !TYPE_PACKED (type) |
| && RECORD_OR_UNION_TYPE_P (new_field_type) |
| && !TYPE_FAT_POINTER_P (new_field_type) |
| && !TYPE_CONTAINS_TEMPLATE_P (new_field_type) |
| && TYPE_ADA_SIZE (new_field_type)) |
| new_field_size = TYPE_ADA_SIZE (new_field_type); |
| else |
| { |
| new_field_size = DECL_SIZE (field); |
| |
| /* Make sure not to use too small a type for the size. */ |
| if (TYPE_MODE (new_field_type) == BLKmode) |
| new_field_type = TREE_TYPE (field); |
| } |
| |
| /* This is a layout with full representation, alignment and size clauses |
| so we simply pass 0 as PACKED like gnat_to_gnu_field in this case. */ |
| new_field |
| = create_field_decl (DECL_NAME (field), new_field_type, new_type, |
| new_field_size, bit_position (field), 0, |
| !DECL_NONADDRESSABLE_P (field)); |
| |
| DECL_INTERNAL_P (new_field) = DECL_INTERNAL_P (field); |
| SET_DECL_ORIGINAL_FIELD_TO_FIELD (new_field, field); |
| if (TREE_CODE (new_type) == QUAL_UNION_TYPE) |
| DECL_QUALIFIER (new_field) = DECL_QUALIFIER (field); |
| |
| DECL_CHAIN (new_field) = new_field_list; |
| new_field_list = new_field; |
| } |
| |
| /* If this is a padding record, we never want to make the size smaller |
| than what was specified. For QUAL_UNION_TYPE, also copy the size. */ |
| if (TYPE_IS_PADDING_P (type) || TREE_CODE (type) == QUAL_UNION_TYPE) |
| { |
| TYPE_SIZE (new_type) = TYPE_SIZE (type); |
| TYPE_SIZE_UNIT (new_type) = TYPE_SIZE_UNIT (type); |
| new_size = size; |
| } |
| else |
| { |
| TYPE_SIZE (new_type) = bitsize_int (new_size); |
| TYPE_SIZE_UNIT (new_type) = size_int (new_size / BITS_PER_UNIT); |
| } |
| |
| if (!TYPE_CONTAINS_TEMPLATE_P (type)) |
| SET_TYPE_ADA_SIZE (new_type, TYPE_ADA_SIZE (type)); |
| |
| finish_record_type (new_type, nreverse (new_field_list), 2, false); |
| relate_alias_sets (new_type, type, ALIAS_SET_COPY); |
| if (gnat_encodings != DWARF_GNAT_ENCODINGS_ALL) |
| SET_TYPE_DEBUG_TYPE (new_type, TYPE_DEBUG_TYPE (type)); |
| else if (TYPE_STUB_DECL (type)) |
| SET_DECL_PARALLEL_TYPE (TYPE_STUB_DECL (new_type), |
| DECL_PARALLEL_TYPE (TYPE_STUB_DECL (type))); |
| |
| /* Try harder to get a packable type if necessary, for example in case |
| the record itself contains a BLKmode field. */ |
| if (in_record && TYPE_MODE (new_type) == BLKmode) |
| SET_TYPE_MODE (new_type, |
| mode_for_size_tree (TYPE_SIZE (new_type), |
| MODE_INT, 1).else_blk ()); |
| |
| /* If neither mode nor size nor alignment shrunk, return the old type. */ |
| if (TYPE_MODE (new_type) == BLKmode && new_size >= size && max_align == 0) |
| return type; |
| |
| /* If the packable type is named, we canonicalize it by means of the hash |
| table. This is consistent with the language semantics and ensures that |
| gigi and the middle-end have a common view of these packable types. */ |
| return |
| TYPE_NAME (new_type) ? canonicalize_packable_type (new_type) : new_type; |
| } |
| |
| /* Return true if TYPE has an unsigned representation. This needs to be used |
| when the representation of types whose precision is not equal to their size |
| is manipulated based on the RM size. */ |
| |
| static inline bool |
| type_unsigned_for_rm (tree type) |
| { |
| /* This is the common case. */ |
| if (TYPE_UNSIGNED (type)) |
| return true; |
| |
| /* See the E_Signed_Integer_Subtype case of gnat_to_gnu_entity. */ |
| if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST |
| && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0) |
| return true; |
| |
| return false; |
| } |
| |
| /* Given a type TYPE, return a new type whose size is appropriate for SIZE. |
| If TYPE is the best type, return it. Otherwise, make a new type. We |
| only support new integral and pointer types. FOR_BIASED is true if |
| we are making a biased type. */ |
| |
| tree |
| make_type_from_size (tree type, tree size_tree, bool for_biased) |
| { |
| unsigned HOST_WIDE_INT size; |
| bool biased_p; |
| tree new_type; |
| |
| /* If size indicates an error, just return TYPE to avoid propagating |
| the error. Likewise if it's too large to represent. */ |
| if (!size_tree || !tree_fits_uhwi_p (size_tree)) |
| return type; |
| |
| size = tree_to_uhwi (size_tree); |
| |
| switch (TREE_CODE (type)) |
| { |
| case BOOLEAN_TYPE: |
| /* Do not mess with boolean types that have foreign convention. */ |
| if (TYPE_PRECISION (type) == 1 && TYPE_SIZE (type) == size_tree) |
| break; |
| |
| /* ... fall through ... */ |
| |
| case INTEGER_TYPE: |
| case ENUMERAL_TYPE: |
| biased_p = (TREE_CODE (type) == INTEGER_TYPE |
| && TYPE_BIASED_REPRESENTATION_P (type)); |
| |
| /* Integer types with precision 0 are forbidden. */ |
| if (size == 0) |
| size = 1; |
| |
| /* Only do something if the type is not a bit-packed array type and does |
| not already have the proper size and the size is not too large. */ |
| if (BIT_PACKED_ARRAY_TYPE_P (type) |
| || (TYPE_PRECISION (type) == size && biased_p == for_biased) |
| || size > (Enable_128bit_Types ? 128 : LONG_LONG_TYPE_SIZE)) |
| break; |
| |
| biased_p |= for_biased; |
| |
| /* The type should be an unsigned type if the original type is unsigned |
| or if the lower bound is constant and non-negative or if the type is |
| biased, see E_Signed_Integer_Subtype case of gnat_to_gnu_entity. */ |
| if (type_unsigned_for_rm (type) || biased_p) |
| new_type = make_unsigned_type (size); |
| else |
| new_type = make_signed_type (size); |
| TREE_TYPE (new_type) = TREE_TYPE (type) ? TREE_TYPE (type) : type; |
| SET_TYPE_RM_MIN_VALUE (new_type, TYPE_MIN_VALUE (type)); |
| SET_TYPE_RM_MAX_VALUE (new_type, TYPE_MAX_VALUE (type)); |
| /* Copy the name to show that it's essentially the same type and |
| not a subrange type. */ |
| TYPE_NAME (new_type) = TYPE_NAME (type); |
| TYPE_BIASED_REPRESENTATION_P (new_type) = biased_p; |
| SET_TYPE_RM_SIZE (new_type, bitsize_int (size)); |
| return new_type; |
| |
| case RECORD_TYPE: |
| /* Do something if this is a fat pointer, in which case we |
| may need to return the thin pointer. */ |
| if (TYPE_FAT_POINTER_P (type) && size < POINTER_SIZE * 2) |
| { |
| scalar_int_mode p_mode; |
| if (!int_mode_for_size (size, 0).exists (&p_mode) |
| || !targetm.valid_pointer_mode (p_mode)) |
| p_mode = ptr_mode; |
| return |
| build_pointer_type_for_mode |
| (TYPE_OBJECT_RECORD_TYPE (TYPE_UNCONSTRAINED_ARRAY (type)), |
| p_mode, 0); |
| } |
| break; |
| |
| case POINTER_TYPE: |
| /* Only do something if this is a thin pointer, in which case we |
| may need to return the fat pointer. */ |
| if (TYPE_IS_THIN_POINTER_P (type) && size >= POINTER_SIZE * 2) |
| return |
| build_pointer_type (TYPE_UNCONSTRAINED_ARRAY (TREE_TYPE (type))); |
| break; |
| |
| default: |
| break; |
| } |
| |
| return type; |
| } |
| |
| /* Return true iff the padded types are equivalent. */ |
| |
| bool |
| pad_type_hasher::equal (pad_type_hash *t1, pad_type_hash *t2) |
| { |
| tree type1, type2; |
| |
| if (t1->hash != t2->hash) |
| return 0; |
| |
| type1 = t1->type; |
| type2 = t2->type; |
| |
| /* We consider that padded types are equivalent if they pad the same type |
| and have the same size, alignment, RM size and storage order. Taking the |
| mode into account is redundant since it is determined by the others. */ |
| return |
| TREE_TYPE (TYPE_FIELDS (type1)) == TREE_TYPE (TYPE_FIELDS (type2)) |
| && TYPE_SIZE (type1) == TYPE_SIZE (type2) |
| && TYPE_ALIGN (type1) == TYPE_ALIGN (type2) |
| && TYPE_ADA_SIZE (type1) == TYPE_ADA_SIZE (type2) |
| && TYPE_REVERSE_STORAGE_ORDER (type1) == TYPE_REVERSE_STORAGE_ORDER (type2); |
| } |
| |
| /* Compute the hash value for the padded TYPE. */ |
| |
| static hashval_t |
| hash_pad_type (tree type) |
| { |
| hashval_t hashcode; |
| |
| hashcode |
| = iterative_hash_object (TYPE_HASH (TREE_TYPE (TYPE_FIELDS (type))), 0); |
| hashcode = iterative_hash_expr (TYPE_SIZE (type), hashcode); |
| hashcode = iterative_hash_hashval_t (TYPE_ALIGN (type), hashcode); |
| hashcode = iterative_hash_expr (TYPE_ADA_SIZE (type), hashcode); |
| hashcode |
| = iterative_hash_hashval_t (TYPE_REVERSE_STORAGE_ORDER (type), hashcode); |
| |
| return hashcode; |
| } |
| |
| /* Look up the padded TYPE in the hash table and return its canonical version |
| if it exists; otherwise, insert it into the hash table. */ |
| |
| static tree |
| canonicalize_pad_type (tree type) |
| { |
| const hashval_t hashcode = hash_pad_type (type); |
| struct pad_type_hash in, *h, **slot; |
| |
| in.hash = hashcode; |
| in.type = type; |
| slot = pad_type_hash_table->find_slot_with_hash (&in, hashcode, INSERT); |
| h = *slot; |
| if (!h) |
| { |
| h = ggc_alloc<pad_type_hash> (); |
| h->hash = hashcode; |
| h->type = type; |
| *slot = h; |
| } |
| |
| return h->type; |
| } |
| |
| /* Ensure that TYPE has SIZE and ALIGN. Make and return a new padded type |
| if needed. We have already verified that SIZE and ALIGN are large enough. |
| GNAT_ENTITY is used to name the resulting record and to issue a warning. |
| IS_COMPONENT_TYPE is true if this is being done for the component type of |
| an array. DEFINITION is true if this type is being defined. SET_RM_SIZE |
| is true if the RM size of the resulting type is to be set to SIZE too; in |
| this case, the padded type is canonicalized before being returned. */ |
| |
| tree |
| maybe_pad_type (tree type, tree size, unsigned int align, |
| Entity_Id gnat_entity, bool is_component_type, |
| bool definition, bool set_rm_size) |
| { |
| tree orig_size = TYPE_SIZE (type); |
| unsigned int orig_align = TYPE_ALIGN (type); |
| tree record, field; |
| |
| /* If TYPE is a padded type, see if it agrees with any size and alignment |
| we were given. If so, return the original type. Otherwise, strip |
| off the padding, since we will either be returning the inner type |
| or repadding it. If no size or alignment is specified, use that of |
| the original padded type. */ |
| if (TYPE_IS_PADDING_P (type)) |
| { |
| if ((!size |
| || operand_equal_p (round_up (size, orig_align), orig_size, 0)) |
| && (align == 0 || align == orig_align)) |
| return type; |
| |
| if (!size) |
| size = orig_size; |
| if (align == 0) |
| align = orig_align; |
| |
| type = TREE_TYPE (TYPE_FIELDS (type)); |
| orig_size = TYPE_SIZE (type); |
| orig_align = TYPE_ALIGN (type); |
| } |
| |
| /* If the size is either not being changed or is being made smaller (which |
| is not done here and is only valid for bitfields anyway), show the size |
| isn't changing. Likewise, clear the alignment if it isn't being |
| changed. Then return if we aren't doing anything. */ |
| if (size |
| && (operand_equal_p (size, orig_size, 0) |
| || (TREE_CODE (orig_size) == INTEGER_CST |
| && tree_int_cst_lt (size, orig_size)))) |
| size = NULL_TREE; |
| |
| if (align == orig_align) |
| align = 0; |
| |
| if (align == 0 && !size) |
| return type; |
| |
| /* We used to modify the record in place in some cases, but that could |
| generate incorrect debugging information. So make a new record |
| type and name. */ |
| record = make_node (RECORD_TYPE); |
| TYPE_PADDING_P (record) = 1; |
| |
| if (Present (gnat_entity)) |
| TYPE_NAME (record) = create_concat_name (gnat_entity, "PAD"); |
| |
| SET_TYPE_ALIGN (record, align ? align : orig_align); |
| TYPE_SIZE (record) = size ? size : orig_size; |
| TYPE_SIZE_UNIT (record) |
| = convert (sizetype, |
| size_binop (EXACT_DIV_EXPR, TYPE_SIZE (record), |
| bitsize_unit_node)); |
| |
| /* If we are changing the alignment and the input type is a record with |
| BLKmode and a small constant size, try to make a form that has an |
| integral mode. This might allow the padding record to also have an |
| integral mode, which will be much more efficient. There is no point |
| in doing so if a size is specified unless it is also a small constant |
| size and it is incorrect to do so if we cannot guarantee that the mode |
| will be naturally aligned since the field must always be addressable. |
| |
| ??? This might not always be a win when done for a stand-alone object: |
| since the nominal and the effective type of the object will now have |
| different modes, a VIEW_CONVERT_EXPR will be required for converting |
| between them and it might be hard to overcome afterwards, including |
| at the RTL level when the stand-alone object is accessed as a whole. */ |
| if (align > 0 |
| && RECORD_OR_UNION_TYPE_P (type) |
| && TYPE_MODE (type) == BLKmode |
| && !TYPE_BY_REFERENCE_P (type) |
| && TREE_CODE (orig_size) == INTEGER_CST |
| && !TREE_OVERFLOW (orig_size) |
| && compare_tree_int (orig_size, MAX_FIXED_MODE_SIZE) <= 0 |
| && (!size |
| || (TREE_CODE (size) == INTEGER_CST |
| && compare_tree_int (size, MAX_FIXED_MODE_SIZE) <= 0))) |
| { |
| tree packable_type = make_packable_type (type, true, align); |
| if (TYPE_MODE (packable_type) != BLKmode |
| && compare_tree_int (TYPE_SIZE (packable_type), align) <= 0) |
| type = packable_type; |
| } |
| |
| /* Now create the field with the original size. */ |
| field = create_field_decl (get_identifier ("F"), type, record, orig_size, |
| bitsize_zero_node, 0, 1); |
| DECL_INTERNAL_P (field) = 1; |
| |
| /* We will output additional debug info manually below. */ |
| finish_record_type (record, field, 1, false); |
| |
| /* Set the RM size if requested. */ |
| if (set_rm_size) |
| { |
| SET_TYPE_ADA_SIZE (record, size ? size : orig_size); |
| |
| /* If the padded type is complete and has constant size, we canonicalize |
| it by means of the hash table. This is consistent with the language |
| semantics and ensures that gigi and the middle-end have a common view |
| of these padded types. */ |
| if (TREE_CONSTANT (TYPE_SIZE (record))) |
| { |
| tree canonical = canonicalize_pad_type (record); |
| if (canonical != record) |
| { |
| record = canonical; |
| goto built; |
| } |
| } |
| } |
| |
| /* Make the inner type the debug type of the padded type. */ |
| if (gnat_encodings != DWARF_GNAT_ENCODINGS_ALL) |
| SET_TYPE_DEBUG_TYPE (record, maybe_debug_type (type)); |
| |
| /* Unless debugging information isn't being written for the input type, |
| write a record that shows what we are a subtype of and also make a |
| variable that indicates our size, if still variable. */ |
| if (TREE_CODE (orig_size) != INTEGER_CST |
| && TYPE_NAME (record) |
| && TYPE_NAME (type) |
| && !(TREE_CODE (TYPE_NAME (type)) == TYPE_DECL |
| && DECL_IGNORED_P (TYPE_NAME (type)))) |
| { |
| tree name = TYPE_IDENTIFIER (record); |
| tree size_unit = TYPE_SIZE_UNIT (record); |
| |
| /* A variable that holds the size is required even with no encoding since |
| it will be referenced by debugging information attributes. At global |
| level, we need a single variable across all translation units. */ |
| if (size |
| && TREE_CODE (size) != INTEGER_CST |
| && (definition || global_bindings_p ())) |
| { |
| /* Whether or not gnat_entity comes from source, this XVZ variable is |
| is a compilation artifact. */ |
| size_unit |
| = create_var_decl (concat_name (name, "XVZ"), NULL_TREE, sizetype, |
| size_unit, true, global_bindings_p (), |
| !definition && global_bindings_p (), false, |
| false, true, true, NULL, gnat_entity, false); |
| TYPE_SIZE_UNIT (record) = size_unit; |
| } |
| |
| /* There is no need to show what we are a subtype of when outputting as |
| few encodings as possible: regular debugging infomation makes this |
| redundant. */ |
| if (gnat_encodings == DWARF_GNAT_ENCODINGS_ALL) |
| { |
| tree marker = make_node (RECORD_TYPE); |
| tree orig_name = TYPE_IDENTIFIER (type); |
| |
| TYPE_NAME (marker) = concat_name (name, "XVS"); |
| finish_record_type (marker, |
| create_field_decl (orig_name, |
| build_reference_type (type), |
| marker, NULL_TREE, NULL_TREE, |
| 0, 0), |
| 0, true); |
| TYPE_SIZE_UNIT (marker) = size_unit; |
| |
| add_parallel_type (record, marker); |
| } |
| } |
| |
| built: |
| /* If a simple size was explicitly given, maybe issue a warning. */ |
| if (!size |
| || TREE_CODE (size) == COND_EXPR |
| || TREE_CODE (size) == MAX_EXPR |
| || No (gnat_entity)) |
| return record; |
| |
| /* But don't do it if we are just annotating types and the type is tagged or |
| concurrent, since these types aren't fully laid out in this mode. */ |
| if (type_annotate_only) |
| { |
| Entity_Id gnat_type |
| = is_component_type |
| ? Component_Type (gnat_entity) : Etype (gnat_entity); |
| |
| if (Is_Tagged_Type (gnat_type) || Is_Concurrent_Type (gnat_type)) |
| return record; |
| } |
| |
| /* Take the original size as the maximum size of the input if there was an |
| unconstrained record involved and round it up to the specified alignment, |
| if one was specified, but only for aggregate types. */ |
| if (CONTAINS_PLACEHOLDER_P (orig_size)) |
| orig_size = max_size (orig_size, true); |
| |
| if (align && AGGREGATE_TYPE_P (type)) |
| orig_size = round_up (orig_size, align); |
| |
| if (!operand_equal_p (size, orig_size, 0) |
| && !(TREE_CODE (size) == INTEGER_CST |
| && TREE_CODE (orig_size) == INTEGER_CST |
| && (TREE_OVERFLOW (size) |
| || TREE_OVERFLOW (orig_size) |
| || tree_int_cst_lt (size, orig_size)))) |
| { |
| Node_Id gnat_error_node; |
| |
| /* For a packed array, post the message on the original array type. */ |
| if (Is_Packed_Array_Impl_Type (gnat_entity)) |
| gnat_entity = Original_Array_Type (gnat_entity); |
| |
| if ((Ekind (gnat_entity) == E_Component |
| || Ekind (gnat_entity) == E_Discriminant) |
| && Present (Component_Clause (gnat_entity))) |
| gnat_error_node = Last_Bit (Component_Clause (gnat_entity)); |
| else if (Has_Size_Clause (gnat_entity)) |
| gnat_error_node = Expression (Size_Clause (gnat_entity)); |
| else if (Has_Object_Size_Clause (gnat_entity)) |
| gnat_error_node = Expression (Object_Size_Clause (gnat_entity)); |
| else |
| gnat_error_node = Empty; |
| |
| /* Generate message only for entities that come from source, since |
| if we have an entity created by expansion, the message will be |
| generated for some other corresponding source entity. */ |
| if (Comes_From_Source (gnat_entity)) |
| { |
| if (is_component_type) |
| post_error_ne_tree ("component of& padded{ by ^ bits}??", |
| gnat_entity, gnat_entity, |
| size_diffop (size, orig_size)); |
| else if (Present (gnat_error_node)) |
| post_error_ne_tree ("{^ }bits of & unused??", |
| gnat_error_node, gnat_entity, |
| size_diffop (size, orig_size)); |
| } |
| } |
| |
| return record; |
| } |
| |
| /* Return true if padded TYPE was built with an RM size. */ |
| |
| bool |
| pad_type_has_rm_size (tree type) |
| { |
| /* This is required for the lookup. */ |
| if (!TREE_CONSTANT (TYPE_SIZE (type))) |
| return false; |
| |
| const hashval_t hashcode = hash_pad_type (type); |
| struct pad_type_hash in, *h; |
| |
| in.hash = hashcode; |
| in.type = type; |
| h = pad_type_hash_table->find_with_hash (&in, hashcode); |
| |
| /* The types built with an RM size are the canonicalized ones. */ |
| return h && h->type == type; |
| } |
| |
| /* Return a copy of the padded TYPE but with reverse storage order. */ |
| |
| tree |
| set_reverse_storage_order_on_pad_type (tree type) |
| { |
| if (flag_checking) |
| { |
| /* If the inner type is not scalar then the function does nothing. */ |
| tree inner_type = TREE_TYPE (TYPE_FIELDS (type)); |
| gcc_assert (!AGGREGATE_TYPE_P (inner_type) |
| && !VECTOR_TYPE_P (inner_type)); |
| } |
| |
| /* This is required for the canonicalization. */ |
| gcc_assert (TREE_CONSTANT (TYPE_SIZE (type))); |
| |
| tree field = copy_node (TYPE_FIELDS (type)); |
| type = copy_type (type); |
| DECL_CONTEXT (field) = type; |
| TYPE_FIELDS (type) = field; |
| TYPE_REVERSE_STORAGE_ORDER (type) = 1; |
| return canonicalize_pad_type (type); |
| } |
| |
| /* Relate the alias sets of GNU_NEW_TYPE and GNU_OLD_TYPE according to OP. |
| If this is a multi-dimensional array type, do this recursively. |
| |
| OP may be |
| - ALIAS_SET_COPY: the new set is made a copy of the old one. |
| - ALIAS_SET_SUPERSET: the new set is made a superset of the old one. |
| - ALIAS_SET_SUBSET: the new set is made a subset of the old one. */ |
| |
| void |
| relate_alias_sets (tree gnu_new_type, tree gnu_old_type, enum alias_set_op op) |
| { |
| /* Remove any padding from GNU_OLD_TYPE. It doesn't matter in the case |
| of a one-dimensional array, since the padding has the same alias set |
| as the field type, but if it's a multi-dimensional array, we need to |
| see the inner types. */ |
| while (TREE_CODE (gnu_old_type) == RECORD_TYPE |
| && (TYPE_JUSTIFIED_MODULAR_P (gnu_old_type) |
| || TYPE_PADDING_P (gnu_old_type))) |
| gnu_old_type = TREE_TYPE (TYPE_FIELDS (gnu_old_type)); |
| |
| /* Unconstrained array types are deemed incomplete and would thus be given |
| alias set 0. Retrieve the underlying array type. */ |
| if (TREE_CODE (gnu_old_type) == UNCONSTRAINED_ARRAY_TYPE) |
| gnu_old_type |
| = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_old_type)))); |
| if (TREE_CODE (gnu_new_type) == UNCONSTRAINED_ARRAY_TYPE) |
| gnu_new_type |
| = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_new_type)))); |
| |
| if (TREE_CODE (gnu_new_type) == ARRAY_TYPE |
| && TREE_CODE (TREE_TYPE (gnu_new_type)) == ARRAY_TYPE |
| && TYPE_MULTI_ARRAY_P (TREE_TYPE (gnu_new_type))) |
| relate_alias_sets (TREE_TYPE (gnu_new_type), TREE_TYPE (gnu_old_type), op); |
| |
| switch (op) |
| { |
| case ALIAS_SET_COPY: |
| /* The alias set shouldn't be copied between array types with different |
| aliasing settings because this can break the aliasing relationship |
| between the array type and its element type. */ |
| if (flag_checking || flag_strict_aliasing) |
| gcc_assert (!(TREE_CODE (gnu_new_type) == ARRAY_TYPE |
| && TREE_CODE (gnu_old_type) == ARRAY_TYPE |
| && TYPE_NONALIASED_COMPONENT (gnu_new_type) |
| != TYPE_NONALIASED_COMPONENT (gnu_old_type))); |
| |
| TYPE_ALIAS_SET (gnu_new_type) = get_alias_set (gnu_old_type); |
| break; |
| |
| case ALIAS_SET_SUBSET: |
| case ALIAS_SET_SUPERSET: |
| { |
| alias_set_type old_set = get_alias_set (gnu_old_type); |
| alias_set_type new_set = get_alias_set (gnu_new_type); |
| |
| /* Do nothing if the alias sets conflict. This ensures that we |
| never call record_alias_subset several times for the same pair |
| or at all for alias set 0. */ |
| if (!alias_sets_conflict_p (old_set, new_set)) |
| { |
| if (op == ALIAS_SET_SUBSET) |
| record_alias_subset (old_set, new_set); |
| else |
| record_alias_subset (new_set, old_set); |
| } |
| } |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| |
| record_component_aliases (gnu_new_type); |
| } |
| |
| /* Record TYPE as a builtin type for Ada. NAME is the name of the type. |
| ARTIFICIAL_P is true if the type was generated by the compiler. */ |
| |
| void |
| record_builtin_type (const char *name, tree type, bool artificial_p) |
| { |
| tree type_decl = build_decl (input_location, |
| TYPE_DECL, get_identifier (name), type); |
| DECL_ARTIFICIAL (type_decl) = artificial_p; |
| TYPE_ARTIFICIAL (type) = artificial_p; |
| gnat_pushdecl (type_decl, Empty); |
| |
| if (debug_hooks->type_decl) |
| debug_hooks->type_decl (type_decl, false); |
| } |
| |
| /* Finish constructing the character type CHAR_TYPE. |
| |
| In Ada character types are enumeration types and, as a consequence, are |
| represented in the front-end by integral types holding the positions of |
| the enumeration values as defined by the language, which means that the |
| integral types are unsigned. |
| |
| Unfortunately the signedness of 'char' in C is implementation-defined |
| and GCC even has the option -f[un]signed-char to toggle it at run time. |
| Since GNAT's philosophy is to be compatible with C by default, to wit |
| Interfaces.C.char is defined as a mere copy of Character, we may need |
| to declare character types as signed types in GENERIC and generate the |
| necessary adjustments to make them behave as unsigned types. |
| |
| The overall strategy is as follows: if 'char' is unsigned, do nothing; |
| if 'char' is signed, translate character types of CHAR_TYPE_SIZE and |
| character subtypes with RM_Size = Esize = CHAR_TYPE_SIZE into signed |
| types. The idea is to ensure that the bit pattern contained in the |
| Esize'd objects is not changed, even though the numerical value will |
| be interpreted differently depending on the signedness. */ |
| |
| void |
| finish_character_type (tree char_type) |
| { |
| if (TYPE_UNSIGNED (char_type)) |
| return; |
| |
| /* Make a copy of a generic unsigned version since we'll modify it. */ |
| tree unsigned_char_type |
| = (char_type == char_type_node |
| ? unsigned_char_type_node |
| : copy_type (gnat_unsigned_type_for (char_type))); |
| |
| /* Create an unsigned version of the type and set it as debug type. */ |
| TYPE_NAME (unsigned_char_type) = TYPE_NAME (char_type); |
| TYPE_STRING_FLAG (unsigned_char_type) = TYPE_STRING_FLAG (char_type); |
| TYPE_ARTIFICIAL (unsigned_char_type) = TYPE_ARTIFICIAL (char_type); |
| SET_TYPE_DEBUG_TYPE (char_type, unsigned_char_type); |
| |
| /* If this is a subtype, make the debug type a subtype of the debug type |
| of the base type and convert literal RM bounds to unsigned. */ |
| if (TREE_TYPE (char_type)) |
| { |
| tree base_unsigned_char_type = TYPE_DEBUG_TYPE (TREE_TYPE (char_type)); |
| tree min_value = TYPE_RM_MIN_VALUE (char_type); |
| tree max_value = TYPE_RM_MAX_VALUE (char_type); |
| |
| if (TREE_CODE (min_value) == INTEGER_CST) |
| min_value = fold_convert (base_unsigned_char_type, min_value); |
| if (TREE_CODE (max_value) == INTEGER_CST) |
| max_value = fold_convert (base_unsigned_char_type, max_value); |
| |
| TREE_TYPE (unsigned_char_type) = base_unsigned_char_type; |
| SET_TYPE_RM_MIN_VALUE (unsigned_char_type, min_value); |
| SET_TYPE_RM_MAX_VALUE (unsigned_char_type, max_value); |
| } |
| |
| /* Adjust the RM bounds of the original type to unsigned; that's especially |
| important for types since they are implicit in this case. */ |
| SET_TYPE_RM_MIN_VALUE (char_type, TYPE_MIN_VALUE (unsigned_char_type)); |
| SET_TYPE_RM_MAX_VALUE (char_type, TYPE_MAX_VALUE (unsigned_char_type)); |
| } |
| |
| /* Given a record type RECORD_TYPE and a list of FIELD_DECL nodes FIELD_LIST, |
| finish constructing the record type as a fat pointer type. */ |
| |
| void |
| finish_fat_pointer_type (tree record_type, tree field_list) |
| { |
| /* Make sure we can put it into a register. */ |
| if (STRICT_ALIGNMENT) |
| SET_TYPE_ALIGN (record_type, MIN (BIGGEST_ALIGNMENT, 2 * POINTER_SIZE)); |
| |
| /* Show what it really is. */ |
| TYPE_FAT_POINTER_P (record_type) = 1; |
| |
| /* Do not emit debug info for it since the types of its fields may still be |
| incomplete at this point. */ |
| finish_record_type (record_type, field_list, 0, false); |
| |
| /* Force type_contains_placeholder_p to return true on it. Although the |
| PLACEHOLDER_EXPRs are referenced only indirectly, this isn't a pointer |
| type but the representation of the unconstrained array. */ |
| TYPE_CONTAINS_PLACEHOLDER_INTERNAL (record_type) = 2; |
| } |
| |
| /* Given a record type RECORD_TYPE and a list of FIELD_DECL nodes FIELD_LIST, |
| finish constructing the record or union type. If REP_LEVEL is zero, this |
| record has no representation clause and so will be entirely laid out here. |
| If REP_LEVEL is one, this record has a representation clause and has been |
| laid out already; only set the sizes and alignment. If REP_LEVEL is two, |
| this record is derived from a parent record and thus inherits its layout; |
| only make a pass on the fields to finalize them. DEBUG_INFO_P is true if |
| additional debug info needs to be output for this type. */ |
| |
| void |
| finish_record_type (tree record_type, tree field_list, int rep_level, |
| bool debug_info_p) |
| { |
| const enum tree_code orig_code = TREE_CODE (record_type); |
| const bool had_size = TYPE_SIZE (record_type) != NULL_TREE; |
| const bool had_align = TYPE_ALIGN (record_type) > 0; |
| /* For all-repped records with a size specified, lay the QUAL_UNION_TYPE |
| out just like a UNION_TYPE, since the size will be fixed. */ |
| const enum tree_code code |
| = (orig_code == QUAL_UNION_TYPE && rep_level > 0 && had_size |
| ? UNION_TYPE : orig_code); |
| tree name = TYPE_IDENTIFIER (record_type); |
| tree ada_size = bitsize_zero_node; |
| tree size = bitsize_zero_node; |
| tree field; |
| |
| TYPE_FIELDS (record_type) = field_list; |
| |
| /* Always attach the TYPE_STUB_DECL for a record type. It is required to |
| generate debug info and have a parallel type. */ |
| TYPE_STUB_DECL (record_type) = create_type_stub_decl (name, record_type); |
| |
| /* Globally initialize the record first. If this is a rep'ed record, |
| that just means some initializations; otherwise, layout the record. */ |
| if (rep_level > 0) |
| { |
| if (TYPE_ALIGN (record_type) < BITS_PER_UNIT) |
| SET_TYPE_ALIGN (record_type, BITS_PER_UNIT); |
| |
| if (!had_size) |
| TYPE_SIZE (record_type) = bitsize_zero_node; |
| } |
| else |
| { |
| /* Ensure there isn't a size already set. There can be in an error |
| case where there is a rep clause but all fields have errors and |
| no longer have a position. */ |
| TYPE_SIZE (record_type) = NULL_TREE; |
| |
| /* Ensure we use the traditional GCC layout for bitfields when we need |
| to pack the record type or have a representation clause. The other |
| possible layout (Microsoft C compiler), if available, would prevent |
| efficient packing in almost all cases. */ |
| #ifdef TARGET_MS_BITFIELD_LAYOUT |
| if (TARGET_MS_BITFIELD_LAYOUT && TYPE_PACKED (record_type)) |
| decl_attributes (&record_type, |
| tree_cons (get_identifier ("gcc_struct"), |
| NULL_TREE, NULL_TREE), |
| ATTR_FLAG_TYPE_IN_PLACE); |
| #endif |
| |
| layout_type (record_type); |
| } |
| |
| /* At this point, the position and size of each field is known. It was |
| either set before entry by a rep clause, or by laying out the type above. |
| |
| We now run a pass over the fields (in reverse order for QUAL_UNION_TYPEs) |
| to compute the Ada size; the GCC size and alignment (for rep'ed records |
| that are not padding types); and the mode (for rep'ed records). We also |
| clear the DECL_BIT_FIELD indication for the cases we know have not been |
| handled yet, and adjust DECL_NONADDRESSABLE_P accordingly. */ |
| |
| if (code == QUAL_UNION_TYPE) |
| field_list = nreverse (field_list); |
| |
| for (field = field_list; field; field = DECL_CHAIN (field)) |
| { |
| tree type = TREE_TYPE (field); |
| tree pos = bit_position (field); |
| tree this_size = DECL_SIZE (field); |
| tree this_ada_size; |
| |
| if (RECORD_OR_UNION_TYPE_P (type) |
| && !TYPE_FAT_POINTER_P (type) |
| && !TYPE_CONTAINS_TEMPLATE_P (type) |
| && TYPE_ADA_SIZE (type)) |
| this_ada_size = TYPE_ADA_SIZE (type); |
| else |
| this_ada_size = this_size; |
| |
| const bool variant_part = (TREE_CODE (type) == QUAL_UNION_TYPE); |
| |
| /* Clear DECL_BIT_FIELD for the cases layout_decl does not handle. */ |
| if (DECL_BIT_FIELD (field) |
| && operand_equal_p (this_size, TYPE_SIZE (type), 0)) |
| { |
| const unsigned int align = TYPE_ALIGN (type); |
| |
| /* In the general case, type alignment is required. */ |
| if (value_factor_p (pos, align)) |
| { |
| /* The enclosing record type must be sufficiently aligned. |
| Otherwise, if no alignment was specified for it and it |
| has been laid out already, bump its alignment to the |
| desired one if this is compatible with its size and |
| maximum alignment, if any. */ |
| if (TYPE_ALIGN (record_type) >= align) |
| { |
| SET_DECL_ALIGN (field, MAX (DECL_ALIGN (field), align)); |
| DECL_BIT_FIELD (field) = 0; |
| } |
| else if (!had_align |
| && rep_level == 0 |
| && value_factor_p (TYPE_SIZE (record_type), align) |
| && (!TYPE_MAX_ALIGN (record_type) |
| || TYPE_MAX_ALIGN (record_type) >= align)) |
| { |
| SET_TYPE_ALIGN (record_type, align); |
| SET_DECL_ALIGN (field, MAX (DECL_ALIGN (field), align)); |
| DECL_BIT_FIELD (field) = 0; |
| } |
| } |
| |
| /* In the non-strict alignment case, only byte alignment is. */ |
| if (!STRICT_ALIGNMENT |
| && DECL_BIT_FIELD (field) |
| && value_factor_p (pos, BITS_PER_UNIT)) |
| DECL_BIT_FIELD (field) = 0; |
| } |
| |
| /* Clear DECL_BIT_FIELD_TYPE for a variant part at offset 0, it's simply |
| not supported by the DECL_BIT_FIELD_REPRESENTATIVE machinery because |
| the variant part is always the last field in the list. */ |
| if (variant_part && integer_zerop (pos)) |
| DECL_BIT_FIELD_TYPE (field) = NULL_TREE; |
| |
| /* If we still have DECL_BIT_FIELD set at this point, we know that the |
| field is technically not addressable. Except that it can actually |
| be addressed if it is BLKmode and happens to be properly aligned. */ |
| if (DECL_BIT_FIELD (field) |
| && !(DECL_MODE (field) == BLKmode |
| && value_factor_p (pos, BITS_PER_UNIT))) |
| DECL_NONADDRESSABLE_P (field) = 1; |
| |
| /* A type must be as aligned as its most aligned field that is not |
| a bit-field. But this is already enforced by layout_type. */ |
| if (rep_level > 0 && !DECL_BIT_FIELD (field)) |
| SET_TYPE_ALIGN (record_type, |
| MAX (TYPE_ALIGN (record_type), DECL_ALIGN (field))); |
| |
| switch (code) |
| { |
| case UNION_TYPE: |
| ada_size = size_binop (MAX_EXPR, ada_size, this_ada_size); |
| size = size_binop (MAX_EXPR, size, this_size); |
| break; |
| |
| case QUAL_UNION_TYPE: |
| ada_size |
| = fold_build3 (COND_EXPR, bitsizetype, DECL_QUALIFIER (field), |
| this_ada_size, ada_size); |
| size = fold_build3 (COND_EXPR, bitsizetype, DECL_QUALIFIER (field), |
| this_size, size); |
| break; |
| |
| case RECORD_TYPE: |
| /* Since we know here that all fields are sorted in order of |
| increasing bit position, the size of the record is one |
| higher than the ending bit of the last field processed |
| unless we have a rep clause, because we might be processing |
| the REP part of a record with a variant part for which the |
| variant part has a rep clause but not the fixed part, in |
| which case this REP part may contain overlapping fields |
| and thus needs to be treated like a union tyoe above, so |
| use a MAX in that case. Also, if this field is a variant |
| part, we need to take into account the previous size in |
| the case of empty variants. */ |
| ada_size |
| = merge_sizes (ada_size, pos, this_ada_size, rep_level > 0, |
| variant_part); |
| size |
| = merge_sizes (size, pos, this_size, rep_level > 0, variant_part); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| if (code == QUAL_UNION_TYPE) |
| nreverse (field_list); |
| |
| /* We need to set the regular sizes if REP_LEVEL is one. */ |
| if (rep_level == 1) |
| { |
| /* We round TYPE_SIZE and TYPE_SIZE_UNIT up to TYPE_ALIGN separately |
| to avoid having very large masking constants in TYPE_SIZE_UNIT. */ |
| const unsigned int align = TYPE_ALIGN (record_type); |
| |
| /* If this is a padding record, we never want to make the size smaller |
| than what was specified in it, if any. */ |
| if (TYPE_IS_PADDING_P (record_type) && had_size) |
| size = TYPE_SIZE (record_type); |
| else |
| size = round_up (size, BITS_PER_UNIT); |
| |
| TYPE_SIZE (record_type) = variable_size (round_up (size, align)); |
| |
| tree size_unit |
| = convert (sizetype, |
| size_binop (EXACT_DIV_EXPR, size, bitsize_unit_node)); |
| TYPE_SIZE_UNIT (record_type) |
| = variable_size (round_up (size_unit, align / BITS_PER_UNIT)); |
| } |
| |
| /* We need to set the Ada size if REP_LEVEL is zero or one. */ |
| if (rep_level < 2) |
| { |
| /* Now set any of the values we've just computed that apply. */ |
| if (!TYPE_FAT_POINTER_P (record_type) |
| && !TYPE_CONTAINS_TEMPLATE_P (record_type)) |
| SET_TYPE_ADA_SIZE (record_type, ada_size); |
| } |
| |
| /* We need to set the mode if REP_LEVEL is one or two. */ |
| if (rep_level > 0) |
| { |
| compute_record_mode (record_type); |
| finish_bitfield_layout (record_type); |
| } |
| |
| /* Reset the TYPE_MAX_ALIGN field since it's private to gigi. */ |
| TYPE_MAX_ALIGN (record_type) = 0; |
| |
| if (debug_info_p) |
| rest_of_record_type_compilation (record_type); |
| } |
| |
| /* Append PARALLEL_TYPE on the chain of parallel types of TYPE. If |
| PARRALEL_TYPE has no context and its computation is not deferred yet, also |
| propagate TYPE's context to PARALLEL_TYPE's or defer its propagation to the |
| moment TYPE will get a context. */ |
| |
| void |
| add_parallel_type (tree type, tree parallel_type) |
| { |
| tree decl = TYPE_STUB_DECL (type); |
| |
| while (DECL_PARALLEL_TYPE (decl)) |
| decl = TYPE_STUB_DECL (DECL_PARALLEL_TYPE (decl)); |
| |
| SET_DECL_PARALLEL_TYPE (decl, parallel_type); |
| |
| /* If PARALLEL_TYPE already has a context, we are done. */ |
| if (TYPE_CONTEXT (parallel_type)) |
| return; |
| |
| /* Otherwise, try to get one from TYPE's context. If so, simply propagate |
| it to PARALLEL_TYPE. */ |
| if (TYPE_CONTEXT (type)) |
| gnat_set_type_context (parallel_type, TYPE_CONTEXT (type)); |
| |
| /* Otherwise TYPE has not context yet. We know it will have one thanks to |
| gnat_pushdecl and then its context will be propagated to PARALLEL_TYPE, |
| so we have nothing to do in this case. */ |
| } |
| |
| /* Return true if TYPE has a parallel type. */ |
| |
| static bool |
| has_parallel_type (tree type) |
| { |
| tree decl = TYPE_STUB_DECL (type); |
| |
| return DECL_PARALLEL_TYPE (decl) != NULL_TREE; |
| } |
| |
| /* Wrap up compilation of RECORD_TYPE, i.e. output additional debug info |
| associated with it. It need not be invoked directly in most cases as |
| finish_record_type takes care of doing so. */ |
| |
| void |
| rest_of_record_type_compilation (tree record_type) |
| { |
| bool var_size = false; |
| tree field; |
| |
| /* If this is a padded type, the bulk of the debug info has already been |
| generated for the field's type. */ |
| if (TYPE_IS_PADDING_P (record_type)) |
| return; |
| |
| /* If the type already has a parallel type (XVS type), then we're done. */ |
| if (has_parallel_type (record_type)) |
| return; |
| |
| for (field = TYPE_FIELDS (record_type); field; field = DECL_CHAIN (field)) |
| { |
| /* We need to make an XVE/XVU record if any field has variable size, |
| whether or not the record does. For example, if we have a union, |
| it may be that all fields, rounded up to the alignment, have the |
| same size, in which case we'll use that size. But the debug |
| output routines (except Dwarf2) won't be able to output the fields, |
| so we need to make the special record. */ |
| if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST |
| /* If a field has a non-constant qualifier, the record will have |
| variable size too. */ |
| || (TREE_CODE (record_type) == QUAL_UNION_TYPE |
| && TREE_CODE (DECL_QUALIFIER (field)) != INTEGER_CST)) |
| { |
| var_size = true; |
| break; |
| } |
| } |
| |
| /* If this record type is of variable size, make a parallel record type that |
| will tell the debugger how the former is laid out (see exp_dbug.ads). */ |
| if (var_size && gnat_encodings == DWARF_GNAT_ENCODINGS_ALL) |
| { |
| tree new_record_type |
| = make_node (TREE_CODE (record_type) == QUAL_UNION_TYPE |
| ? UNION_TYPE : TREE_CODE (record_type)); |
| tree orig_name = TYPE_IDENTIFIER (record_type), new_name; |
| tree last_pos = bitsize_zero_node; |
| |
| new_name |
| = concat_name (orig_name, TREE_CODE (record_type) == QUAL_UNION_TYPE |
| ? "XVU" : "XVE"); |
| TYPE_NAME (new_record_type) = new_name; |
| SET_TYPE_ALIGN (new_record_type, BIGGEST_ALIGNMENT); |
| TYPE_STUB_DECL (new_record_type) |
| = create_type_stub_decl (new_name, new_record_type); |
| DECL_IGNORED_P (TYPE_STUB_DECL (new_record_type)) |
| = DECL_IGNORED_P (TYPE_STUB_DECL (record_type)); |
| gnat_pushdecl (TYPE_STUB_DECL (new_record_type), Empty); |
| TYPE_SIZE (new_record_type) = size_int (TYPE_ALIGN (record_type)); |
| TYPE_SIZE_UNIT (new_record_type) |
| = size_int (TYPE_ALIGN (record_type) / BITS_PER_UNIT); |
| |
| /* Now scan all the fields, replacing each field with a new field |
| corresponding to the new encoding. */ |
| for (tree old_field = TYPE_FIELDS (record_type); |
| old_field; |
| old_field = DECL_CHAIN (old_field)) |
| { |
| tree field_type = TREE_TYPE (old_field); |
| tree field_name = DECL_NAME (old_field); |
| tree curpos = fold_bit_position (old_field); |
| tree pos, new_field; |
| bool var = false; |
| unsigned int align = 0; |
| |
| /* See how the position was modified from the last position. |
| |
| There are two basic cases we support: a value was added |
| to the last position or the last position was rounded to |
| a boundary and they something was added. Check for the |
| first case first. If not, see if there is any evidence |
| of rounding. If so, round the last position and retry. |
| |
| If this is a union, the position can be taken as zero. */ |
| if (TREE_CODE (new_record_type) == UNION_TYPE) |
| pos = bitsize_zero_node; |
| else |
| pos = compute_related_constant (curpos, last_pos); |
| |
| if (pos) |
| ; |
| else if (TREE_CODE (curpos) == MULT_EXPR |
| && tree_fits_uhwi_p (TREE_OPERAND (curpos, 1))) |
| { |
| tree offset = TREE_OPERAND (curpos, 0); |
| align = tree_to_uhwi (TREE_OPERAND (curpos, 1)); |
| align = scale_by_factor_of (offset, align); |
| last_pos = round_up (last_pos, align); |
| pos = compute_related_constant (curpos, last_pos); |
| } |
| else if (TREE_CODE (curpos) == PLUS_EXPR |
| && tree_fits_uhwi_p (TREE_OPERAND (curpos, 1)) |
| && TREE_CODE (TREE_OPERAND (curpos, 0)) == MULT_EXPR |
| && tree_fits_uhwi_p |
| (TREE_OPERAND (TREE_OPERAND (curpos, 0), 1))) |
| { |
| tree offset = TREE_OPERAND (TREE_OPERAND (curpos, 0), 0); |
| unsigned HOST_WIDE_INT addend |
| = tree_to_uhwi (TREE_OPERAND (curpos, 1)); |
| align |
| = tree_to_uhwi (TREE_OPERAND (TREE_OPERAND (curpos, 0), 1)); |
| align = scale_by_factor_of (offset, align); |
| align = MIN (align, addend & -addend); |
| last_pos = round_up (last_pos, align); |
| pos = compute_related_constant (curpos, last_pos); |
| } |
| else |
| { |
| align = DECL_ALIGN (old_field); |
| last_pos = round_up (last_pos, align); |
| pos = compute_related_constant (curpos, last_pos); |
| } |
| |
| /* See if this type is variable-sized and make a pointer type |
| and indicate the indirection if so. Beware that the debug |
| back-end may adjust the position computed above according |
| to the alignment of the field type, i.e. the pointer type |
| in this case, if we don't preventively counter that. */ |
| if (TREE_CODE (DECL_SIZE (old_field)) != INTEGER_CST) |
| { |
| field_type = copy_type (build_pointer_type (field_type)); |
| SET_TYPE_ALIGN (field_type, BITS_PER_UNIT); |
| var = true; |
| |
| /* ??? Kludge to work around a bug in Workbench's debugger. */ |
| if (align == 0) |
| { |
| align = DECL_ALIGN (old_field); |
| last_pos = round_up (last_pos, align); |
| pos = compute_related_constant (curpos, last_pos); |
| } |
| } |
| |
| /* If we can't compute a position, set it to zero. |
| |
| ??? We really should abort here, but it's too much work |
| to get this correct for all cases. */ |
| if (!pos) |
| pos = bitsize_zero_node; |
| |
| /* Make a new field name, if necessary. */ |
| if (var || align != 0) |
| { |
| char suffix[16]; |
| |
| if (align != 0) |
| sprintf (suffix, "XV%c%u", var ? 'L' : 'A', |
| align / BITS_PER_UNIT); |
| else |
| strcpy (suffix, "XVL"); |
| |
| field_name = concat_name (field_name, suffix); |
| } |
| |
| new_field |
| = create_field_decl (field_name, field_type, new_record_type, |
| DECL_SIZE (old_field), pos, 0, 0); |
| /* The specified position is not the actual position of the field |
| but the gap with the previous field, so the computation of the |
| bit-field status may be incorrect. We adjust it manually to |
| avoid generating useless attributes for the field in DWARF. */ |
| if (DECL_SIZE (old_field) == TYPE_SIZE (field_type) |
| && value_factor_p (pos, BITS_PER_UNIT)) |
| { |
| DECL_BIT_FIELD (new_field) = 0; |
| DECL_BIT_FIELD_TYPE (new_field) = NULL_TREE; |
| } |
| DECL_CHAIN (new_field) = TYPE_FIELDS (new_record_type); |
| TYPE_FIELDS (new_record_type) = new_field; |
| |
| /* If old_field is a QUAL_UNION_TYPE, take its size as being |
| zero. The only time it's not the last field of the record |
| is when there are other components at fixed positions after |
| it (meaning there was a rep clause for every field) and we |
| want to be able to encode them. */ |
| last_pos = size_binop (PLUS_EXPR, curpos, |
| (TREE_CODE (TREE_TYPE (old_field)) |
| == QUAL_UNION_TYPE) |
| ? bitsize_zero_node |
| : DECL_SIZE (old_field)); |
| } |
| |
| TYPE_FIELDS (new_record_type) = nreverse (TYPE_FIELDS (new_record_type)); |
| |
| add_parallel_type (record_type, new_record_type); |
| } |
| } |
| |
| /* Utility function of above to merge LAST_SIZE, the previous size of a record |
| with FIRST_BIT and SIZE that describe a field. If MAX is true, we take the |
| MAX of the end position of this field with LAST_SIZE. In all other cases, |
| we use FIRST_BIT plus SIZE. SPECIAL is true if it's for a QUAL_UNION_TYPE, |
| in which case we must look for COND_EXPRs and replace a value of zero with |
| the old size. Return an expression for the size. */ |
| |
| static tree |
| merge_sizes (tree last_size, tree first_bit, tree size, bool max, bool special) |
| { |
| tree type = TREE_TYPE (last_size); |
| tree new_size; |
| |
| if (!special || TREE_CODE (size) != COND_EXPR) |
| { |
| new_size = size_binop (PLUS_EXPR, first_bit, size); |
| if (max) |
| new_size = size_binop (MAX_EXPR, last_size, new_size); |
| } |
| |
| else |
| new_size = fold_build3 (COND_EXPR, type, TREE_OPERAND (size, 0), |
| integer_zerop (TREE_OPERAND (size, 1)) |
| ? last_size : merge_sizes (last_size, first_bit, |
| TREE_OPERAND (size, 1), |
| max, special), |
| integer_zerop (TREE_OPERAND (size, 2)) |
| ? last_size : merge_sizes (last_size, first_bit, |
| TREE_OPERAND (size, 2), |
| max, special)); |
| |
| /* We don't need any NON_VALUE_EXPRs and they can confuse us (especially |
| when fed through SUBSTITUTE_IN_EXPR) into thinking that a constant |
| size is not constant. */ |
| while (TREE_CODE (new_size) == NON_LVALUE_EXPR) |
| new_size = TREE_OPERAND (new_size, 0); |
| |
| return new_size; |
| } |
| |
| /* Convert the size expression EXPR to TYPE and fold the result. */ |
| |
| static tree |
| fold_convert_size (tree type, tree expr) |
| { |
| /* We assume that size expressions do not wrap around. */ |
| if (TREE_CODE (expr) == MULT_EXPR || TREE_CODE (expr) == PLUS_EXPR) |
| return size_binop (TREE_CODE (expr), |
| fold_convert_size (type, TREE_OPERAND (expr, 0)), |
| fold_convert_size (type, TREE_OPERAND (expr, 1))); |
| |
| return fold_convert (type, expr); |
| } |
| |
| /* Return the bit position of FIELD, in bits from the start of the record, |
| and fold it as much as possible. This is a tree of type bitsizetype. */ |
| |
| static tree |
| fold_bit_position (const_tree field) |
| { |
| tree offset = fold_convert_size (bitsizetype, DECL_FIELD_OFFSET (field)); |
| return size_binop (PLUS_EXPR, DECL_FIELD_BIT_OFFSET (field), |
| size_binop (MULT_EXPR, offset, bitsize_unit_node)); |
| } |
| |
| /* Utility function of above to see if OP0 and OP1, both of SIZETYPE, are |
| related by the addition of a constant. Return that constant if so. */ |
| |
| static tree |
| compute_related_constant (tree op0, tree op1) |
| { |
| tree factor, op0_var, op1_var, op0_cst, op1_cst, result; |
| |
| if (TREE_CODE (op0) == MULT_EXPR |
| && TREE_CODE (op1) == MULT_EXPR |
| && TREE_CODE (TREE_OPERAND (op0, 1)) == INTEGER_CST |
| && TREE_OPERAND (op1, 1) == TREE_OPERAND (op0, 1)) |
| { |
| factor = TREE_OPERAND (op0, 1); |
| op0 = TREE_OPERAND (op0, 0); |
| op1 = TREE_OPERAND (op1, 0); |
| } |
| else |
| factor = NULL_TREE; |
| |
| op0_cst = split_plus (op0, &op0_var); |
| op1_cst = split_plus (op1, &op1_var); |
| result = size_binop (MINUS_EXPR, op0_cst, op1_cst); |
| |
| if (operand_equal_p (op0_var, op1_var, 0)) |
| return factor ? size_binop (MULT_EXPR, factor, result) : result; |
| |
| return NULL_TREE; |
| } |
| |
| /* Utility function of above to split a tree OP which may be a sum, into a |
| constant part, which is returned, and a variable part, which is stored |
| in *PVAR. *PVAR may be bitsize_zero_node. All operations must be of |
| bitsizetype. */ |
| |
| static tree |
| split_plus (tree in, tree *pvar) |
| { |
| /* Strip conversions in order to ease the tree traversal and maximize the |
| potential for constant or plus/minus discovery. We need to be careful |
| to always return and set *pvar to bitsizetype trees, but it's worth |
| the effort. */ |
| in = remove_conversions (in, false); |
| |
| *pvar = convert (bitsizetype, in); |
| |
| if (TREE_CODE (in) == INTEGER_CST) |
| { |
| *pvar = bitsize_zero_node; |
| return convert (bitsizetype, in); |
| } |
| else if (TREE_CODE (in) == PLUS_EXPR || TREE_CODE (in) == MINUS_EXPR) |
| { |
| tree lhs_var, rhs_var; |
| tree lhs_con = split_plus (TREE_OPERAND (in, 0), &lhs_var); |
| tree rhs_con = split_plus (TREE_OPERAND (in, 1), &rhs_var); |
| |
| if (lhs_var == TREE_OPERAND (in, 0) |
| && rhs_var == TREE_OPERAND (in, 1)) |
| return bitsize_zero_node; |
| |
| *pvar = size_binop (TREE_CODE (in), lhs_var, rhs_var); |
| return size_binop (TREE_CODE (in), lhs_con, rhs_con); |
| } |
| else |
| return bitsize_zero_node; |
| } |
| |
| /* Return a copy of TYPE but safe to modify in any way. */ |
| |
| tree |
| copy_type (tree type) |
| { |
| tree new_type = copy_node (type); |
| |
| /* Unshare the language-specific data. */ |
| if (TYPE_LANG_SPECIFIC (type)) |
| { |
| TYPE_LANG_SPECIFIC (new_type) = NULL; |
| SET_TYPE_LANG_SPECIFIC (new_type, GET_TYPE_LANG_SPECIFIC (type)); |
| } |
| |
| /* And the contents of the language-specific slot if needed. */ |
| if ((INTEGRAL_TYPE_P (type) || SCALAR_FLOAT_TYPE_P (type)) |
| && TYPE_RM_VALUES (type)) |
| { |
| TYPE_RM_VALUES (new_type) = NULL_TREE; |
| SET_TYPE_RM_SIZE (new_type, TYPE_RM_SIZE (type)); |
| SET_TYPE_RM_MIN_VALUE (new_type, TYPE_RM_MIN_VALUE (type)); |
| SET_TYPE_RM_MAX_VALUE (new_type, TYPE_RM_MAX_VALUE (type)); |
| } |
| |
| /* copy_node clears this field instead of copying it, because it is |
| aliased with TREE_CHAIN. */ |
| TYPE_STUB_DECL (new_type) = TYPE_STUB_DECL (type); |
| |
| TYPE_POINTER_TO (new_type) = NULL_TREE; |
| TYPE_REFERENCE_TO (new_type) = NULL_TREE; |
| TYPE_MAIN_VARIANT (new_type) = new_type; |
| TYPE_NEXT_VARIANT (new_type) = NULL_TREE; |
| TYPE_CANONICAL (new_type) = new_type; |
| |
| return new_type; |
| } |
| |
| /* Return a subtype of sizetype with range MIN to MAX and whose |
| TYPE_INDEX_TYPE is INDEX. GNAT_NODE is used for the position |
| of the associated TYPE_DECL. */ |
| |
| tree |
| create_index_type (tree min, tree max, tree index, Node_Id gnat_node) |
| { |
| /* First build a type for the desired range. */ |
| tree type = build_nonshared_range_type (sizetype, min, max); |
| |
| /* Then set the index type. */ |
| SET_TYPE_INDEX_TYPE (type, index); |
| create_type_decl (NULL_TREE, type, true, false, gnat_node); |
| |
| return type; |
| } |
| |
| /* Return a subtype of TYPE with range MIN to MAX. If TYPE is NULL, |
| sizetype is used. */ |
| |
| tree |
| create_range_type (tree type, tree min, tree max) |
| { |
| tree range_type; |
| |
| if (!type) |
| type = sizetype; |
| |
| /* First build a type with the base range. */ |
| range_type = build_nonshared_range_type (type, TYPE_MIN_VALUE (type), |
| TYPE_MAX_VALUE (type)); |
| |
| /* Then set the actual range. */ |
| SET_TYPE_RM_MIN_VALUE (range_type, min); |
| SET_TYPE_RM_MAX_VALUE (range_type, max); |
| |
| return range_type; |
| } |
| |
| /* Return an extra subtype of TYPE with range MIN to MAX. */ |
| |
| tree |
| create_extra_subtype (tree type, tree min, tree max) |
| { |
| const bool uns = TYPE_UNSIGNED (type); |
| const unsigned prec = TYPE_PRECISION (type); |
| tree subtype = uns ? make_unsigned_type (prec) : make_signed_type (prec); |
| |
| TREE_TYPE (subtype) = type; |
| TYPE_EXTRA_SUBTYPE_P (subtype) = 1; |
| |
| SET_TYPE_RM_MIN_VALUE (subtype, min); |
| SET_TYPE_RM_MAX_VALUE (subtype, max); |
| |
| return subtype; |
| } |
| |
| /* Return a TYPE_DECL node suitable for the TYPE_STUB_DECL field of TYPE. |
| NAME gives the name of the type to be used in the declaration. */ |
| |
| tree |
| create_type_stub_decl (tree name, tree type) |
| { |
| tree type_decl = build_decl (input_location, TYPE_DECL, name, type); |
| DECL_ARTIFICIAL (type_decl) = 1; |
| TYPE_ARTIFICIAL (type) = 1; |
| return type_decl; |
| } |
| |
| /* Return a TYPE_DECL node for TYPE. NAME gives the name of the type to be |
| used in the declaration. ARTIFICIAL_P is true if the declaration was |
| generated by the compiler. DEBUG_INFO_P is true if we need to write |
| debug information about this type. GNAT_NODE is used for the position |
| of the decl. */ |
| |
| tree |
| create_type_decl (tree name, tree type, bool artificial_p, bool debug_info_p, |
| Node_Id gnat_node) |
| { |
| enum tree_code code = TREE_CODE (type); |
| bool is_named |
| = TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL; |
| tree type_decl; |
| |
| /* Only the builtin TYPE_STUB_DECL should be used for dummy types. */ |
| gcc_assert (!TYPE_IS_DUMMY_P (type)); |
| |
| /* If the type hasn't been named yet, we're naming it; preserve an existing |
| TYPE_STUB_DECL that has been attached to it for some purpose. */ |
| if (!is_named && TYPE_STUB_DECL (type)) |
| { |
| type_decl = TYPE_STUB_DECL (type); |
| DECL_NAME (type_decl) = name; |
| } |
| else |
| type_decl = build_decl (input_location, TYPE_DECL, name, type); |
| |
| DECL_ARTIFICIAL (type_decl) = artificial_p; |
| TYPE_ARTIFICIAL (type) = artificial_p; |
| |
| /* Add this decl to the current binding level. */ |
| gnat_pushdecl (type_decl, gnat_node); |
| |
| /* If we're naming the type, equate the TYPE_STUB_DECL to the name. This |
| causes the name to be also viewed as a "tag" by the debug back-end, with |
| the advantage that no DW_TAG_typedef is emitted for artificial "tagged" |
| types in DWARF. |
| |
| Note that if "type" is used as a DECL_ORIGINAL_TYPE, it may be referenced |
| from multiple contexts, and "type_decl" references a copy of it: in such a |
| case, do not mess TYPE_STUB_DECL: we do not want to re-use the TYPE_DECL |
| with the mechanism above. */ |
| if (!is_named && type != DECL_ORIGINAL_TYPE (type_decl)) |
| TYPE_STUB_DECL (type) = type_decl; |
| |
| /* Do not generate debug info for UNCONSTRAINED_ARRAY_TYPE that the |
| back-end doesn't support, and for others if we don't need to. */ |
| if (code == UNCONSTRAINED_ARRAY_TYPE || !debug_info_p) |
| DECL_IGNORED_P (type_decl) = 1; |
| |
| return type_decl; |
| } |
| |
| /* Return a VAR_DECL or CONST_DECL node. |
| |
| NAME gives the name of the variable. ASM_NAME is its assembler name |
| (if provided). TYPE is its data type (a GCC ..._TYPE node). INIT is |
| the GCC tree for an optional initial expression; NULL_TREE if none. |
| |
| CONST_FLAG is true if this variable is constant, in which case we might |
| return a CONST_DECL node unless CONST_DECL_ALLOWED_P is false. |
| |
| PUBLIC_FLAG is true if this is for a reference to a public entity or for a |
| definition to be made visible outside of the current compilation unit, for |
| instance variable definitions in a package specification. |
| |
| EXTERN_FLAG is true when processing an external variable declaration (as |
| opposed to a definition: no storage is to be allocated for the variable). |
| |
| STATIC_FLAG is only relevant when not at top level and indicates whether |
| to always allocate storage to the variable. |
| |
| VOLATILE_FLAG is true if this variable is declared as volatile. |
| |
| ARTIFICIAL_P is true if the variable was generated by the compiler. |
| |
| DEBUG_INFO_P is true if we need to write debug information for it. |
| |
| ATTR_LIST is the list of attributes to be attached to the variable. |
| |
| GNAT_NODE is used for the position of the decl. */ |
| |
| tree |
| create_var_decl (tree name, tree asm_name, tree type, tree init, |
| bool const_flag, bool public_flag, bool extern_flag, |
| bool static_flag, bool volatile_flag, bool artificial_p, |
| bool debug_info_p, struct attrib *attr_list, |
| Node_Id gnat_node, bool const_decl_allowed_p) |
| { |
| /* Whether the object has static storage duration, either explicitly or by |
| virtue of being declared at the global level. */ |
| const bool static_storage = static_flag || global_bindings_p (); |
| |
| /* Whether the initializer is constant: for an external object or an object |
| with static storage duration, we check that the initializer is a valid |
| constant expression for initializing a static variable; otherwise, we |
| only check that it is constant. */ |
| const bool init_const |
| = (init |
| && gnat_types_compatible_p (type, TREE_TYPE (init)) |
| && (extern_flag || static_storage |
| ? initializer_constant_valid_p (init, TREE_TYPE (init)) |
| != NULL_TREE |
| : TREE_CONSTANT (init))); |
| |
| /* Whether we will make TREE_CONSTANT the DECL we produce here, in which |
| case the initializer may be used in lieu of the DECL node (as done in |
| Identifier_to_gnu). This is useful to prevent the need of elaboration |
| code when an identifier for which such a DECL is made is in turn used |
| as an initializer. We used to rely on CONST_DECL vs VAR_DECL for this, |
| but extra constraints apply to this choice (see below) and they are not |
| relevant to the distinction we wish to make. */ |
| const bool constant_p = const_flag && init_const; |
| |
| /* The actual DECL node. CONST_DECL was initially intended for enumerals |
| and may be used for scalars in general but not for aggregates. */ |
| tree var_decl |
| = build_decl (input_location, |
| (constant_p |
| && const_decl_allowed_p |
| && !AGGREGATE_TYPE_P (type) ? CONST_DECL : VAR_DECL), |
| name, type); |
| |
| /* Detect constants created by the front-end to hold 'reference to function |
| calls for stabilization purposes. This is needed for renaming. */ |
| if (const_flag && init && POINTER_TYPE_P (type)) |
| { |
| tree inner = init; |
| if (TREE_CODE (inner) == COMPOUND_EXPR) |
| inner = TREE_OPERAND (inner, 1); |
| inner = remove_conversions (inner, true); |
| if (TREE_CODE (inner) == ADDR_EXPR |
| && ((TREE_CODE (TREE_OPERAND (inner, 0)) == CALL_EXPR |
| && !call_is_atomic_load (TREE_OPERAND (inner, 0))) |
| || (TREE_CODE (TREE_OPERAND (inner, 0)) == VAR_DECL |
| && DECL_RETURN_VALUE_P (TREE_OPERAND (inner, 0))))) |
| DECL_RETURN_VALUE_P (var_decl) = 1; |
| } |
| |
| /* If this is external, throw away any initializations (they will be done |
| elsewhere) unless this is a constant for which we would like to remain |
| able to get the initializer. If we are defining a global here, leave a |
| constant initialization and save any variable elaborations for the |
| elaboration routine. If we are just annotating types, throw away the |
| initialization if it isn't a constant. */ |
| if ((extern_flag && !constant_p) |
| || (type_annotate_only && init && !TREE_CONSTANT (init))) |
| init = NULL_TREE; |
| |
| /* At the global level, a non-constant initializer generates elaboration |
| statements. Check that such statements are allowed, that is to say, |
| not violating a No_Elaboration_Code restriction. */ |
| if (init && !init_const && global_bindings_p ()) |
| Check_Elaboration_Code_Allowed (gnat_node); |
| |
| /* Attach the initializer, if any. */ |
| DECL_INITIAL (var_decl) = init; |
| |
| /* Directly set some flags. */ |
| DECL_ARTIFICIAL (var_decl) = artificial_p; |
| DECL_EXTERNAL (var_decl) = extern_flag; |
| |
| TREE_CONSTANT (var_decl) = constant_p; |
| TREE_READONLY (var_decl) = const_flag; |
| |
| /* The object is public if it is external or if it is declared public |
| and has static storage duration. */ |
| TREE_PUBLIC (var_decl) = extern_flag || (public_flag && static_storage); |
| |
| /* We need to allocate static storage for an object with static storage |
| duration if it isn't external. */ |
| TREE_STATIC (var_decl) = !extern_flag && static_storage; |
| |
| TREE_SIDE_EFFECTS (var_decl) |
| = TREE_THIS_VOLATILE (var_decl) |
| = TYPE_VOLATILE (type) | volatile_flag; |
| |
| if (TREE_SIDE_EFFECTS (var_decl)) |
| TREE_ADDRESSABLE (var_decl) = 1; |
| |
| /* Ada doesn't feature Fortran-like COMMON variables so we shouldn't |
| try to fiddle with DECL_COMMON. However, on platforms that don't |
| support global BSS sections, uninitialized global variables would |
| go in DATA instead, thus increasing the size of the executable. */ |
| if (!flag_no_common |
| && TREE_CODE (var_decl) == VAR_DECL |
| && TREE_PUBLIC (var_decl) |
| && !have_global_bss_p ()) |
| DECL_COMMON (var_decl) = 1; |
| |
| /* Do not emit debug info if not requested, or for an external constant whose |
| initializer is not absolute because this would require a global relocation |
| in a read-only section which runs afoul of the PE-COFF run-time relocation |
| mechanism. */ |
| if (!debug_info_p |
| || (extern_flag |
| && constant_p |
| && init |
| && initializer_constant_valid_p (init, TREE_TYPE (init)) |
| != null_pointer_node)) |
| DECL_IGNORED_P (var_decl) = 1; |
| |
| /* ??? Some attributes cannot be applied to CONST_DECLs. */ |
| if (TREE_CODE (var_decl) == VAR_DECL) |
| process_attributes (&var_decl, &attr_list, true, gnat_node); |
| |
| /* Add this decl to the current binding level. */ |
| gnat_pushdecl (var_decl, gnat_node); |
| |
| if (TREE_CODE (var_decl) == VAR_DECL && asm_name) |
| { |
| /* Let the target mangle the name if this isn't a verbatim asm. */ |
| if (*IDENTIFIER_POINTER (asm_name) != '*') |
| asm_name = targetm.mangle_decl_assembler_name (var_decl, asm_name); |
| |
| SET_DECL_ASSEMBLER_NAME (var_decl, asm_name); |
| } |
| |
| return var_decl; |
| } |
| |
| /* Return true if TYPE, an aggregate type, contains (or is) an array. */ |
| |
| static bool |
| aggregate_type_contains_array_p (tree type) |
| { |
| switch (TREE_CODE (type)) |
| { |
| case RECORD_TYPE: |
| case UNION_TYPE: |
| case QUAL_UNION_TYPE: |
| { |
| tree field; |
| for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) |
| if (AGGREGATE_TYPE_P (TREE_TYPE (field)) |
| && aggregate_type_contains_array_p (TREE_TYPE (field))) |
| return true; |
| return false; |
| } |
| |
| case ARRAY_TYPE: |
| return true; |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* Return true if TYPE is a type with variable size or a padding type with a |
| field of variable size or a record that has a field with such a type. */ |
| |
| bool |
| type_has_variable_size (tree type) |
| { |
| tree field; |
| |
| if (!TREE_CONSTANT (TYPE_SIZE (type))) |
| return true; |
| |
| if (TYPE_IS_PADDING_P (type) |
| && !TREE_CONSTANT (DECL_SIZE (TYPE_FIELDS (type)))) |
| return true; |
| |
| if (!RECORD_OR_UNION_TYPE_P (type)) |
| return false; |
| |
| for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) |
| if (type_has_variable_size (TREE_TYPE (field))) |
| return true; |
| |
| return false; |
| } |
| |
| /* Return a FIELD_DECL node. NAME is the field's name, TYPE is its type and |
| RECORD_TYPE is the type of the enclosing record. If SIZE is nonzero, it |
| is the specified size of the field. If POS is nonzero, it is the bit |
| position. PACKED is 1 if the enclosing record is packed, -1 if it has |
| Component_Alignment of Storage_Unit. If ADDRESSABLE is nonzero, it |
| means we are allowed to take the address of the field; if it is negative, |
| we should not make a bitfield, which is used by make_aligning_type. */ |
| |
| tree |
| create_field_decl (tree name, tree type, tree record_type, tree size, tree pos, |
| int packed, int addressable) |
| { |
| tree field_decl = build_decl (input_location, FIELD_DECL, name, type); |
| |
| DECL_CONTEXT (field_decl) = record_type; |
| TREE_READONLY (field_decl) = TYPE_READONLY (type); |
| |
| /* If a size is specified, use it. Otherwise, if the record type is packed |
| compute a size to use, which may differ from the object's natural size. |
| We always set a size in this case to trigger the checks for bitfield |
| creation below, which is typically required when no position has been |
| specified. */ |
| if (size) |
| size = convert (bitsizetype, size); |
| else if (packed == 1) |
| { |
| size = rm_size (type); |
| if (TYPE_MODE (type) == BLKmode) |
| size = round_up (size, BITS_PER_UNIT); |
| } |
| |
| /* If we may, according to ADDRESSABLE, then make a bitfield when the size |
| is specified for two reasons: first, when it differs from the natural |
| size; second, when the alignment is insufficient. |
| |
| We never make a bitfield if the type of the field has a nonconstant size, |
| because no such entity requiring bitfield operations should reach here. |
| |
| We do *preventively* make a bitfield when there might be the need for it |
| but we don't have all the necessary information to decide, as is the case |
| of a field in a packed record. |
| |
| We also don't look at STRICT_ALIGNMENT here, and rely on later processing |
| in layout_decl or finish_record_type to clear the bit_field indication if |
| it is in fact not needed. */ |
| if (addressable >= 0 |
| && size |
| && TREE_CODE (size) == INTEGER_CST |
| && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST |
| && (packed |
| || !tree_int_cst_equal (size, TYPE_SIZE (type)) |
| || (pos && !value_factor_p (pos, TYPE_ALIGN (type))) |
| || (TYPE_ALIGN (record_type) |
| && TYPE_ALIGN (record_type) < TYPE_ALIGN (type)))) |
| { |
| DECL_BIT_FIELD (field_decl) = 1; |
| DECL_SIZE (field_decl) = size; |
| if (!packed && !pos) |
| { |
| if (TYPE_ALIGN (record_type) |
| && TYPE_ALIGN (record_type) < TYPE_ALIGN (type)) |
| SET_DECL_ALIGN (field_decl, TYPE_ALIGN (record_type)); |
| else |
| SET_DECL_ALIGN (field_decl, TYPE_ALIGN (type)); |
| } |
| } |
| |
| DECL_PACKED (field_decl) = pos ? DECL_BIT_FIELD (field_decl) : packed; |
| |
| /* If FIELD_TYPE has BLKmode, we must ensure this is aligned to at least |
| a byte boundary since GCC cannot handle less aligned BLKmode bitfields. |
| Likewise if it has a variable size and no specified position because |
| variable-sized objects need to be aligned to at least a byte boundary. |
| Likewise for an aggregate without specified position that contains an |
| array because, in this case, slices of variable length of this array |
| must be handled by GCC and have variable size. */ |
| if (packed && (TYPE_MODE (type) == BLKmode |
| || (!pos && type_has_variable_size (type)) |
| || (!pos |
| && AGGREGATE_TYPE_P (type) |
| && aggregate_type_contains_array_p (type)))) |
| SET_DECL_ALIGN (field_decl, BITS_PER_UNIT); |
| |
| /* Bump the alignment if need be, either for bitfield/packing purposes or |
| to satisfy the type requirements if no such considerations apply. When |
| we get the alignment from the type, indicate if this is from an explicit |
| user request, which prevents stor-layout from lowering it later on. */ |
| else |
| { |
| const unsigned int field_align |
| = DECL_BIT_FIELD (field_decl) |
| ? 1 |
| : packed |
| ? BITS_PER_UNIT |
| : 0; |
| |
| if (field_align > DECL_ALIGN (field_decl)) |
| SET_DECL_ALIGN (field_decl, field_align); |
| else if (!field_align && TYPE_ALIGN (type) > DECL_ALIGN (field_decl)) |
| { |
| SET_DECL_ALIGN (field_decl, TYPE_ALIGN (type)); |
| DECL_USER_ALIGN (field_decl) = TYPE_USER_ALIGN (type); |
| } |
| } |
| |
| if (pos) |
| { |
| /* We need to pass in the alignment the DECL is known to have. |
| This is the lowest-order bit set in POS, but no more than |
| the alignment of the record, if one is specified. Note |
| that an alignment of 0 is taken as infinite. */ |
| unsigned int known_align; |
| |
| if (tree_fits_uhwi_p (pos)) |
| known_align = tree_to_uhwi (pos) & -tree_to_uhwi (pos); |
| else |
| known_align = BITS_PER_UNIT; |
| |
| if (TYPE_ALIGN (record_type) |
| && (known_align == 0 || known_align > TYPE_ALIGN (record_type))) |
| known_align = TYPE_ALIGN (record_type); |
| |
| layout_decl (field_decl, known_align); |
| SET_DECL_OFFSET_ALIGN (field_decl, |
| tree_fits_uhwi_p (pos) |
| ? BIGGEST_ALIGNMENT : BITS_PER_UNIT); |
| pos_from_bit (&DECL_FIELD_OFFSET (field_decl), |
| &DECL_FIELD_BIT_OFFSET (field_decl), |
| DECL_OFFSET_ALIGN (field_decl), pos); |
| } |
| |
| /* In addition to what our caller says, claim the field is addressable if we |
| know that its type is not suitable. |
| |
| The field may also be "technically" nonaddressable, meaning that even if |
| we attempt to take the field's address we will actually get the address |
| of a copy. This is the case for true bitfields, but the DECL_BIT_FIELD |
| value we have at this point is not accurate enough, so we don't account |
| for this here and let finish_record_type decide. */ |
| if (!addressable && !type_for_nonaliased_component_p (type)) |
| addressable = 1; |
| |
| /* Note that there is a trade-off in making a field nonaddressable because |
| this will cause type-based alias analysis to use the same alias set for |
| accesses to the field as for accesses to the whole record: while doing |
| so will make it more likely to disambiguate accesses to other objects |
| and accesses to the field, it will make it less likely to disambiguate |
| accesses to the other fields of the record and accesses to the field. |
| If the record is fully static, then the trade-off is irrelevant since |
| the fields of the record can always be disambiguated by their offsets |
| but, if the record is dynamic, then it can become problematic. */ |
| DECL_NONADDRESSABLE_P (field_decl) = !addressable; |
| |
| return field_decl; |
| } |
| |
| /* Return a PARM_DECL node with NAME and TYPE. */ |
| |
| tree |
| create_param_decl (tree name, tree type) |
| { |
| tree param_decl = build_decl (input_location, PARM_DECL, name, type); |
| |
| /* Honor TARGET_PROMOTE_PROTOTYPES like the C compiler, as not doing so |
| can lead to various ABI violations. */ |
| if (targetm.calls.promote_prototypes (NULL_TREE) |
| && INTEGRAL_TYPE_P (type) |
| && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) |
| { |
| /* We have to be careful about biased types here. Make a subtype |
| of integer_type_node with the proper biasing. */ |
| if (TREE_CODE (type) == INTEGER_TYPE |
| && TYPE_BIASED_REPRESENTATION_P (type)) |
| { |
| tree subtype |
| = |