blob: 1c7a716840eb45f7179d3f957c5282dc2834e71d [file] [log] [blame]
/****************************************************************************
* *
* GNAT COMPILER COMPONENTS *
* *
* D E C L *
* *
* C Implementation File *
* *
* Copyright (C) 1992-2021, Free Software Foundation, Inc. *
* *
* GNAT is free software; you can redistribute it and/or modify it under *
* terms of the GNU General Public License as published by the Free Soft- *
* ware Foundation; either version 3, or (at your option) any later ver- *
* sion. GNAT is distributed in the hope that it will be useful, but WITH- *
* OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
* for more details. You should have received a copy of the GNU General *
* Public License along with GCC; see the file COPYING3. If not see *
* <http://www.gnu.org/licenses/>. *
* *
* GNAT was originally developed by the GNAT team at New York University. *
* Extensive contributions were provided by Ada Core Technologies Inc. *
* *
****************************************************************************/
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "target.h"
#include "tree.h"
#include "gimple-expr.h"
#include "stringpool.h"
#include "diagnostic-core.h"
#include "alias.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "tree-inline.h"
#include "demangle.h"
#include "ada.h"
#include "types.h"
#include "atree.h"
#include "elists.h"
#include "namet.h"
#include "nlists.h"
#include "repinfo.h"
#include "snames.h"
#include "uintp.h"
#include "urealp.h"
#include "fe.h"
#include "sinfo.h"
#include "einfo.h"
#include "ada-tree.h"
#include "gigi.h"
/* The "stdcall" convention is really supported on 32-bit x86/Windows only.
The following macro is a helper to avoid having to check for a Windows
specific attribute throughout this unit. */
#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
#ifdef TARGET_64BIT
#define Has_Stdcall_Convention(E) \
(!TARGET_64BIT && Convention (E) == Convention_Stdcall)
#else
#define Has_Stdcall_Convention(E) (Convention (E) == Convention_Stdcall)
#endif
#else
#define Has_Stdcall_Convention(E) 0
#endif
#define STDCALL_PREFIX "_imp__"
/* Stack realignment is necessary for functions with foreign conventions when
the ABI doesn't mandate as much as what the compiler assumes - that is, up
to PREFERRED_STACK_BOUNDARY.
Such realignment can be requested with a dedicated function type attribute
on the targets that support it. We define FOREIGN_FORCE_REALIGN_STACK to
characterize the situations where the attribute should be set. We rely on
compiler configuration settings for 'main' to decide. */
#ifdef MAIN_STACK_BOUNDARY
#define FOREIGN_FORCE_REALIGN_STACK \
(MAIN_STACK_BOUNDARY < PREFERRED_STACK_BOUNDARY)
#else
#define FOREIGN_FORCE_REALIGN_STACK 0
#endif
/* The largest TYPE_ARRAY_MAX_SIZE value we set on an array type.
It's an artibrary limit (256 MB) above which we consider that
the allocation is essentially unbounded. */
#define TYPE_ARRAY_SIZE_LIMIT (1 << 28)
struct incomplete
{
struct incomplete *next;
tree old_type;
Entity_Id full_type;
};
/* These variables are used to defer recursively expanding incomplete types
while we are processing a record, an array or a subprogram type. */
static int defer_incomplete_level = 0;
static struct incomplete *defer_incomplete_list;
/* This variable is used to delay expanding types coming from a limited with
clause and completed Taft Amendment types until the end of the spec. */
static struct incomplete *defer_limited_with_list;
typedef struct subst_pair_d {
tree discriminant;
tree replacement;
} subst_pair;
typedef struct variant_desc_d {
/* The type of the variant. */
tree type;
/* The associated field. */
tree field;
/* The value of the qualifier. */
tree qual;
/* The type of the variant after transformation. */
tree new_type;
/* The auxiliary data. */
tree aux;
} variant_desc;
/* A map used to cache the result of annotate_value. */
struct value_annotation_hasher : ggc_cache_ptr_hash<tree_int_map>
{
static inline hashval_t
hash (tree_int_map *m)
{
return htab_hash_pointer (m->base.from);
}
static inline bool
equal (tree_int_map *a, tree_int_map *b)
{
return a->base.from == b->base.from;
}
static int
keep_cache_entry (tree_int_map *&m)
{
return ggc_marked_p (m->base.from);
}
};
static GTY ((cache)) hash_table<value_annotation_hasher> *annotate_value_cache;
/* A map used to associate a dummy type with a list of subprogram entities. */
struct GTY((for_user)) tree_entity_vec_map
{
struct tree_map_base base;
vec<Entity_Id, va_gc_atomic> *to;
};
void
gt_pch_nx (Entity_Id &)
{
}
void
gt_pch_nx (Entity_Id *x, gt_pointer_operator op, void *cookie)
{
op (x, NULL, cookie);
}
struct dummy_type_hasher : ggc_cache_ptr_hash<tree_entity_vec_map>
{
static inline hashval_t
hash (tree_entity_vec_map *m)
{
return htab_hash_pointer (m->base.from);
}
static inline bool
equal (tree_entity_vec_map *a, tree_entity_vec_map *b)
{
return a->base.from == b->base.from;
}
static int
keep_cache_entry (tree_entity_vec_map *&m)
{
return ggc_marked_p (m->base.from);
}
};
static GTY ((cache)) hash_table<dummy_type_hasher> *dummy_to_subprog_map;
static void prepend_one_attribute (struct attrib **,
enum attrib_type, tree, tree, Node_Id);
static void prepend_one_attribute_pragma (struct attrib **, Node_Id);
static void prepend_attributes (struct attrib **, Entity_Id);
static tree elaborate_expression (Node_Id, Entity_Id, const char *, bool, bool,
bool);
static tree elaborate_expression_1 (tree, Entity_Id, const char *, bool, bool);
static tree elaborate_expression_2 (tree, Entity_Id, const char *, bool, bool,
unsigned int);
static tree elaborate_reference (tree, Entity_Id, bool, tree *);
static tree gnat_to_gnu_component_type (Entity_Id, bool, bool);
static tree gnat_to_gnu_subprog_type (Entity_Id, bool, bool, tree *);
static int adjust_packed (tree, tree, int);
static tree gnat_to_gnu_field (Entity_Id, tree, int, bool, bool);
static enum inline_status_t inline_status_for_subprog (Entity_Id);
static tree gnu_ext_name_for_subprog (Entity_Id, tree);
static void set_nonaliased_component_on_array_type (tree);
static void set_reverse_storage_order_on_array_type (tree);
static bool same_discriminant_p (Entity_Id, Entity_Id);
static bool array_type_has_nonaliased_component (tree, Entity_Id);
static bool compile_time_known_address_p (Node_Id);
static bool flb_cannot_be_superflat (Node_Id);
static bool range_cannot_be_superflat (Node_Id);
static bool constructor_address_p (tree);
static bool allocatable_size_p (tree, bool);
static bool initial_value_needs_conversion (tree, tree);
static tree update_n_elem (tree, tree, tree);
static int compare_field_bitpos (const PTR, const PTR);
static bool components_to_record (Node_Id, Entity_Id, tree, tree, int, bool,
bool, bool, bool, bool, bool, bool, tree,
tree *);
static Uint annotate_value (tree);
static void annotate_rep (Entity_Id, tree);
static tree build_position_list (tree, bool, tree, tree, unsigned int, tree);
static vec<subst_pair> build_subst_list (Entity_Id, Entity_Id, bool);
static vec<variant_desc> build_variant_list (tree, Node_Id, vec<subst_pair>,
vec<variant_desc>);
static tree maybe_saturate_size (tree, unsigned int align);
static tree validate_size (Uint, tree, Entity_Id, enum tree_code, bool, bool,
const char *, const char *);
static void set_rm_size (Uint, tree, Entity_Id);
static unsigned int validate_alignment (Uint, Entity_Id, unsigned int);
static unsigned int promote_object_alignment (tree, tree, Entity_Id);
static void check_ok_for_atomic_type (tree, Entity_Id, bool);
static bool type_for_atomic_builtin_p (tree);
static tree resolve_atomic_builtin (enum built_in_function, tree);
static tree create_field_decl_from (tree, tree, tree, tree, tree,
vec<subst_pair>);
static tree create_rep_part (tree, tree, tree);
static tree get_rep_part (tree);
static tree create_variant_part_from (tree, vec<variant_desc>, tree,
tree, vec<subst_pair>, bool);
static void copy_and_substitute_in_size (tree, tree, vec<subst_pair>);
static void copy_and_substitute_in_layout (Entity_Id, Entity_Id, tree, tree,
vec<subst_pair>, bool);
static tree associate_original_type_to_packed_array (tree, Entity_Id);
static const char *get_entity_char (Entity_Id);
/* The relevant constituents of a subprogram binding to a GCC builtin. Used
to pass around calls performing profile compatibility checks. */
typedef struct {
Entity_Id gnat_entity; /* The Ada subprogram entity. */
tree ada_fntype; /* The corresponding GCC type node. */
tree btin_fntype; /* The GCC builtin function type node. */
} intrin_binding_t;
static bool intrin_profiles_compatible_p (const intrin_binding_t *);
/* Given GNAT_ENTITY, a GNAT defining identifier node, which denotes some Ada
entity, return the equivalent GCC tree for that entity (a ..._DECL node)
and associate the ..._DECL node with the input GNAT defining identifier.
If GNAT_ENTITY is a variable or a constant declaration, GNU_EXPR gives its
initial value (in GCC tree form). This is optional for a variable. For
a renamed entity, GNU_EXPR gives the object being renamed.
DEFINITION is true if this call is intended for a definition. This is used
for separate compilation where it is necessary to know whether an external
declaration or a definition must be created if the GCC equivalent was not
created previously. */
tree
gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, bool definition)
{
/* The construct that declared the entity. */
const Node_Id gnat_decl = Declaration_Node (gnat_entity);
/* The object that the entity renames, if any. */
const Entity_Id gnat_renamed_obj = Renamed_Object (gnat_entity);
/* The kind of the entity. */
const Entity_Kind kind = Ekind (gnat_entity);
/* True if this is a type. */
const bool is_type = IN (kind, Type_Kind);
/* True if this is an artificial entity. */
const bool artificial_p = !Comes_From_Source (gnat_entity);
/* True if debug info is requested for this entity. */
const bool debug_info_p = Needs_Debug_Info (gnat_entity);
/* True if this entity is to be considered as imported. */
const bool imported_p
= (Is_Imported (gnat_entity) && No (Address_Clause (gnat_entity)));
/* True if this entity has a foreign convention. */
const bool foreign = Has_Foreign_Convention (gnat_entity);
/* For a type, contains the equivalent GNAT node to be used in gigi. */
Entity_Id gnat_equiv_type = Empty;
/* For a type, contains the GNAT node to be used for back-annotation. */
Entity_Id gnat_annotate_type = Empty;
/* Temporary used to walk the GNAT tree. */
Entity_Id gnat_temp;
/* Contains the GCC DECL node which is equivalent to the input GNAT node.
This node will be associated with the GNAT node by calling at the end
of the `switch' statement. */
tree gnu_decl = NULL_TREE;
/* Contains the GCC type to be used for the GCC node. */
tree gnu_type = NULL_TREE;
/* Contains the GCC size tree to be used for the GCC node. */
tree gnu_size = NULL_TREE;
/* Contains the GCC name to be used for the GCC node. */
tree gnu_entity_name;
/* True if we have already saved gnu_decl as a GNAT association. This can
also be used to purposely avoid making such an association but this use
case ought not to be applied to types because it can break the deferral
mechanism implemented for access types. */
bool saved = false;
/* True if we incremented defer_incomplete_level. */
bool this_deferred = false;
/* True if we incremented force_global. */
bool this_global = false;
/* True if we should check to see if elaborated during processing. */
bool maybe_present = false;
/* True if we made GNU_DECL and its type here. */
bool this_made_decl = false;
/* Size and alignment of the GCC node, if meaningful. */
unsigned int esize = 0, align = 0;
/* Contains the list of attributes directly attached to the entity. */
struct attrib *attr_list = NULL;
/* Since a use of an itype is a definition, process it as such if it is in
the main unit, except for E_Access_Subtype because it's actually a use
of its base type, and for E_Class_Wide_Subtype with an Equivalent_Type
because it's actually a use of the latter type. */
if (!definition
&& is_type
&& Is_Itype (gnat_entity)
&& Ekind (gnat_entity) != E_Access_Subtype
&& !(Ekind (gnat_entity) == E_Class_Wide_Subtype
&& Present (Equivalent_Type (gnat_entity)))
&& !present_gnu_tree (gnat_entity)
&& In_Extended_Main_Code_Unit (gnat_entity))
{
/* Ensure that we are in a subprogram mentioned in the Scope chain of
this entity, our current scope is global, or we encountered a task
or entry (where we can't currently accurately check scoping). */
if (!current_function_decl
|| DECL_ELABORATION_PROC_P (current_function_decl))
{
process_type (gnat_entity);
return get_gnu_tree (gnat_entity);
}
for (gnat_temp = Scope (gnat_entity);
Present (gnat_temp);
gnat_temp = Scope (gnat_temp))
{
if (Is_Type (gnat_temp))
gnat_temp = Underlying_Type (gnat_temp);
if (Ekind (gnat_temp) == E_Subprogram_Body)
gnat_temp
= Corresponding_Spec (Parent (Declaration_Node (gnat_temp)));
if (Is_Subprogram (gnat_temp)
&& Present (Protected_Body_Subprogram (gnat_temp)))
gnat_temp = Protected_Body_Subprogram (gnat_temp);
if (Ekind (gnat_temp) == E_Entry
|| Ekind (gnat_temp) == E_Entry_Family
|| Ekind (gnat_temp) == E_Task_Type
|| (Is_Subprogram (gnat_temp)
&& present_gnu_tree (gnat_temp)
&& (current_function_decl
== gnat_to_gnu_entity (gnat_temp, NULL_TREE, false))))
{
process_type (gnat_entity);
return get_gnu_tree (gnat_entity);
}
}
/* This abort means the itype has an incorrect scope, i.e. that its
scope does not correspond to the subprogram it is first used in. */
gcc_unreachable ();
}
/* If we've already processed this entity, return what we got last time.
If we are defining the node, we should not have already processed it.
In that case, we will abort below when we try to save a new GCC tree
for this object. We also need to handle the case of getting a dummy
type when a Full_View exists but be careful so as not to trigger its
premature elaboration. Likewise for a cloned subtype without its own
freeze node, which typically happens when a generic gets instantiated
on an incomplete or private type. */
if ((!definition || (is_type && imported_p))
&& present_gnu_tree (gnat_entity))
{
gnu_decl = get_gnu_tree (gnat_entity);
if (TREE_CODE (gnu_decl) == TYPE_DECL
&& TYPE_IS_DUMMY_P (TREE_TYPE (gnu_decl))
&& IN (kind, Incomplete_Or_Private_Kind)
&& Present (Full_View (gnat_entity))
&& (present_gnu_tree (Full_View (gnat_entity))
|| No (Freeze_Node (Full_View (gnat_entity)))))
{
gnu_decl
= gnat_to_gnu_entity (Full_View (gnat_entity), NULL_TREE,
false);
save_gnu_tree (gnat_entity, NULL_TREE, false);
save_gnu_tree (gnat_entity, gnu_decl, false);
}
if (TREE_CODE (gnu_decl) == TYPE_DECL
&& TYPE_IS_DUMMY_P (TREE_TYPE (gnu_decl))
&& Ekind (gnat_entity) == E_Record_Subtype
&& No (Freeze_Node (gnat_entity))
&& Present (Cloned_Subtype (gnat_entity))
&& (present_gnu_tree (Cloned_Subtype (gnat_entity))
|| No (Freeze_Node (Cloned_Subtype (gnat_entity)))))
{
gnu_decl
= gnat_to_gnu_entity (Cloned_Subtype (gnat_entity), NULL_TREE,
false);
save_gnu_tree (gnat_entity, NULL_TREE, false);
save_gnu_tree (gnat_entity, gnu_decl, false);
}
return gnu_decl;
}
/* If this is a numeric or enumeral type, or an access type, a nonzero Esize
must be specified unless it was specified by the programmer. Exceptions
are for access-to-protected-subprogram types and all access subtypes, as
another GNAT type is used to lay out the GCC type for them. */
gcc_assert (!is_type
|| Known_Esize (gnat_entity)
|| Has_Size_Clause (gnat_entity)
|| (!Is_In_Numeric_Kind (kind)
&& !IN (kind, Enumeration_Kind)
&& (!IN (kind, Access_Kind)
|| kind == E_Access_Protected_Subprogram_Type
|| kind == E_Anonymous_Access_Protected_Subprogram_Type
|| kind == E_Access_Subtype
|| type_annotate_only)));
/* The RM size must be specified for all discrete and fixed-point types. */
gcc_assert (!(Is_In_Discrete_Or_Fixed_Point_Kind (kind)
&& !Known_RM_Size (gnat_entity)));
/* If we get here, it means we have not yet done anything with this entity.
If we are not defining it, it must be a type or an entity that is defined
elsewhere or externally, otherwise we should have defined it already.
In other words, the failure of this assertion typically arises when a
reference to an entity (type or object) is made before its declaration,
either directly or by means of a freeze node which is incorrectly placed.
This can also happen for an entity referenced out of context, for example
a parameter outside of the subprogram where it is declared. GNAT_ENTITY
is the N_Defining_Identifier of the entity, the problematic N_Identifier
being the argument passed to Identifier_to_gnu in the parent frame.
One exception is for an entity, typically an inherited operation, which is
a local alias for the parent's operation. It is neither defined, since it
is an inherited operation, nor public, since it is declared in the current
compilation unit, so we test Is_Public on the Alias entity instead. */
gcc_assert (definition
|| is_type
|| kind == E_Discriminant
|| kind == E_Component
|| kind == E_Label
|| (kind == E_Constant && Present (Full_View (gnat_entity)))
|| Is_Public (gnat_entity)
|| (Present (Alias (gnat_entity))
&& Is_Public (Alias (gnat_entity)))
|| type_annotate_only);
/* Get the name of the entity and set up the line number and filename of
the original definition for use in any decl we make. Make sure we do
not inherit another source location. */
gnu_entity_name = get_entity_name (gnat_entity);
if (!renaming_from_instantiation_p (gnat_entity))
Sloc_to_locus (Sloc (gnat_entity), &input_location);
/* For cases when we are not defining (i.e., we are referencing from
another compilation unit) public entities, show we are at global level
for the purpose of computing scopes. Don't do this for components or
discriminants since the relevant test is whether or not the record is
being defined. */
if (!definition
&& kind != E_Component
&& kind != E_Discriminant
&& Is_Public (gnat_entity)
&& !Is_Statically_Allocated (gnat_entity))
force_global++, this_global = true;
/* Handle any attributes directly attached to the entity. */
if (Has_Gigi_Rep_Item (gnat_entity))
prepend_attributes (&attr_list, gnat_entity);
/* Do some common processing for types. */
if (is_type)
{
/* Compute the equivalent type to be used in gigi. */
gnat_equiv_type = Gigi_Equivalent_Type (gnat_entity);
/* Machine_Attributes on types are expected to be propagated to
subtypes. The corresponding Gigi_Rep_Items are only attached
to the first subtype though, so we handle the propagation here. */
if (Base_Type (gnat_entity) != gnat_entity
&& !Is_First_Subtype (gnat_entity)
&& Has_Gigi_Rep_Item (First_Subtype (Base_Type (gnat_entity))))
prepend_attributes (&attr_list,
First_Subtype (Base_Type (gnat_entity)));
/* Compute a default value for the size of an elementary type. */
if (Known_Esize (gnat_entity) && Is_Elementary_Type (gnat_entity))
{
unsigned int max_esize;
gcc_assert (UI_Is_In_Int_Range (Esize (gnat_entity)));
esize = UI_To_Int (Esize (gnat_entity));
if (IN (kind, Float_Kind))
max_esize = fp_prec_to_size (LONG_DOUBLE_TYPE_SIZE);
else if (IN (kind, Access_Kind))
max_esize = POINTER_SIZE * 2;
else
max_esize = Enable_128bit_Types ? 128 : LONG_LONG_TYPE_SIZE;
if (esize > max_esize)
esize = max_esize;
}
}
switch (kind)
{
case E_Component:
case E_Discriminant:
{
/* The GNAT record where the component was defined. */
Entity_Id gnat_record = Underlying_Type (Scope (gnat_entity));
/* If the entity is a discriminant of an extended tagged type used to
rename a discriminant of the parent type, return the latter. */
if (kind == E_Discriminant
&& Present (Corresponding_Discriminant (gnat_entity))
&& Is_Tagged_Type (gnat_record))
{
gnu_decl
= gnat_to_gnu_entity (Corresponding_Discriminant (gnat_entity),
gnu_expr, definition);
saved = true;
break;
}
/* If the entity is an inherited component (in the case of extended
tagged record types), just return the original entity, which must
be a FIELD_DECL. Likewise for discriminants. If the entity is a
non-stored discriminant (in the case of derived untagged record
types), return the stored discriminant it renames. */
if (Present (Original_Record_Component (gnat_entity))
&& Original_Record_Component (gnat_entity) != gnat_entity)
{
gnu_decl
= gnat_to_gnu_entity (Original_Record_Component (gnat_entity),
gnu_expr, definition);
/* GNU_DECL contains a PLACEHOLDER_EXPR for discriminants. */
if (kind == E_Discriminant)
saved = true;
break;
}
/* Otherwise, if we are not defining this and we have no GCC type
for the containing record, make one for it. Then we should
have made our own equivalent. */
if (!definition && !present_gnu_tree (gnat_record))
{
/* ??? If this is in a record whose scope is a protected
type and we have an Original_Record_Component, use it.
This is a workaround for major problems in protected type
handling. */
Entity_Id Scop = Scope (Scope (gnat_entity));
if (Is_Protected_Type (Underlying_Type (Scop))
&& Present (Original_Record_Component (gnat_entity)))
{
gnu_decl
= gnat_to_gnu_entity (Original_Record_Component
(gnat_entity),
gnu_expr, false);
}
else
{
gnat_to_gnu_entity (Scope (gnat_entity), NULL_TREE, false);
gnu_decl = get_gnu_tree (gnat_entity);
}
saved = true;
break;
}
/* Here we have no GCC type and this is a reference rather than a
definition. This should never happen. Most likely the cause is
reference before declaration in the GNAT tree for gnat_entity. */
gcc_unreachable ();
}
case E_Named_Integer:
case E_Named_Real:
{
tree gnu_ext_name = NULL_TREE;
if (Is_Public (gnat_entity))
gnu_ext_name = create_concat_name (gnat_entity, NULL);
/* All references are supposed to be folded in the front-end. */
gcc_assert (definition && gnu_expr);
gnu_type = gnat_to_gnu_type (Etype (gnat_entity));
gnu_expr = convert (gnu_type, gnu_expr);
/* Build a CONST_DECL for debugging purposes exclusively. */
gnu_decl
= create_var_decl (gnu_entity_name, gnu_ext_name, gnu_type,
gnu_expr, true, Is_Public (gnat_entity),
false, false, false, artificial_p,
debug_info_p, NULL, gnat_entity);
}
break;
case E_Constant:
/* Ignore constant definitions already marked with the error node. See
the N_Object_Declaration case of gnat_to_gnu for the rationale. */
if (definition
&& present_gnu_tree (gnat_entity)
&& get_gnu_tree (gnat_entity) == error_mark_node)
{
maybe_present = true;
break;
}
/* Ignore deferred constant definitions without address clause since
they are processed fully in the front-end. If No_Initialization
is set, this is not a deferred constant but a constant whose value
is built manually. And constants that are renamings are handled
like variables. */
if (definition
&& !gnu_expr
&& No (Address_Clause (gnat_entity))
&& !No_Initialization (gnat_decl)
&& No (gnat_renamed_obj))
{
gnu_decl = error_mark_node;
saved = true;
break;
}
/* If this is a use of a deferred constant without address clause,
get its full definition. */
if (!definition
&& No (Address_Clause (gnat_entity))
&& Present (Full_View (gnat_entity)))
{
gnu_decl
= gnat_to_gnu_entity (Full_View (gnat_entity), gnu_expr, false);
saved = true;
break;
}
/* If we have a constant that we are not defining, get the expression it
was defined to represent. This is necessary to avoid generating dumb
elaboration code in simple cases, and we may throw it away later if it
is not a constant. But do not do it for dispatch tables because they
are only referenced indirectly and we need to have a consistent view
of the exported and of the imported declarations of the tables from
external units for them to be properly merged in LTO mode. Moreover
simply do not retrieve the expression if it is an allocator because
the designated type might still be dummy at this point. Note that we
invoke gnat_to_gnu_external and not gnat_to_gnu because the expression
may contain N_Expression_With_Actions nodes and thus declarations of
objects from other units that we need to discard. Note also that we
need to do it even if we are only annotating types, so as to be able
to validate representation clauses using constants. */
if (!definition
&& !No_Initialization (gnat_decl)
&& !Is_Dispatch_Table_Entity (gnat_entity)
&& Present (gnat_temp = Expression (gnat_decl))
&& Nkind (gnat_temp) != N_Allocator
&& (Is_Elementary_Type (Etype (gnat_entity)) || !type_annotate_only))
gnu_expr = gnat_to_gnu_external (gnat_temp);
/* ... fall through ... */
case E_Exception:
case E_Loop_Parameter:
case E_Out_Parameter:
case E_Variable:
{
const Entity_Id gnat_type = Etype (gnat_entity);
/* Always create a variable for volatile objects and variables seen
constant but with a Linker_Section pragma. */
bool const_flag
= ((kind == E_Constant || kind == E_Variable)
&& Is_True_Constant (gnat_entity)
&& !(kind == E_Variable
&& Present (Linker_Section_Pragma (gnat_entity)))
&& !Treat_As_Volatile (gnat_entity)
&& (((Nkind (gnat_decl) == N_Object_Declaration)
&& Present (Expression (gnat_decl)))
|| Present (gnat_renamed_obj)
|| imported_p));
bool inner_const_flag = const_flag;
bool static_flag = Is_Statically_Allocated (gnat_entity);
/* We implement RM 13.3(19) for exported and imported (non-constant)
objects by making them volatile. */
bool volatile_flag
= (Treat_As_Volatile (gnat_entity)
|| (!const_flag && (Is_Exported (gnat_entity) || imported_p)));
bool mutable_p = false;
bool used_by_ref = false;
tree gnu_ext_name = NULL_TREE;
tree gnu_ada_size = NULL_TREE;
/* We need to translate the renamed object even though we are only
referencing the renaming. But it may contain a call for which
we'll generate a temporary to hold the return value and which
is part of the definition of the renaming, so discard it. */
if (Present (gnat_renamed_obj) && !definition)
{
if (kind == E_Exception)
gnu_expr = gnat_to_gnu_entity (Renamed_Entity (gnat_entity),
NULL_TREE, false);
else
gnu_expr = gnat_to_gnu_external (gnat_renamed_obj);
}
/* Get the type after elaborating the renamed object. */
if (foreign && Is_Descendant_Of_Address (Underlying_Type (gnat_type)))
gnu_type = ptr_type_node;
else
gnu_type = gnat_to_gnu_type (gnat_type);
/* For a debug renaming declaration, build a debug-only entity. */
if (Present (Debug_Renaming_Link (gnat_entity)))
{
/* Force a non-null value to make sure the symbol is retained. */
tree value = build1 (INDIRECT_REF, gnu_type,
build1 (NOP_EXPR,
build_pointer_type (gnu_type),
integer_minus_one_node));
gnu_decl = build_decl (input_location,
VAR_DECL, gnu_entity_name, gnu_type);
SET_DECL_VALUE_EXPR (gnu_decl, value);
DECL_HAS_VALUE_EXPR_P (gnu_decl) = 1;
TREE_STATIC (gnu_decl) = global_bindings_p ();
gnat_pushdecl (gnu_decl, gnat_entity);
break;
}
/* If this is a loop variable, its type should be the base type.
This is because the code for processing a loop determines whether
a normal loop end test can be done by comparing the bounds of the
loop against those of the base type, which is presumed to be the
size used for computation. But this is not correct when the size
of the subtype is smaller than the type. */
if (kind == E_Loop_Parameter)
gnu_type = get_base_type (gnu_type);
/* Reject non-renamed objects whose type is an unconstrained array or
any object whose type is a dummy type or void. */
if ((TREE_CODE (gnu_type) == UNCONSTRAINED_ARRAY_TYPE
&& No (gnat_renamed_obj))
|| TYPE_IS_DUMMY_P (gnu_type)
|| TREE_CODE (gnu_type) == VOID_TYPE)
{
gcc_assert (type_annotate_only);
if (this_global)
force_global--;
return error_mark_node;
}
/* If an alignment is specified, use it if valid. Note that exceptions
are objects but don't have an alignment and there is also no point in
setting it for an address clause, since the final type of the object
will be a reference type. */
if (Known_Alignment (gnat_entity)
&& kind != E_Exception
&& No (Address_Clause (gnat_entity)))
align = validate_alignment (Alignment (gnat_entity), gnat_entity,
TYPE_ALIGN (gnu_type));
/* Likewise, if a size is specified, use it if valid. */
if (Known_Esize (gnat_entity))
gnu_size
= validate_size (Esize (gnat_entity), gnu_type, gnat_entity,
VAR_DECL, false, Has_Size_Clause (gnat_entity),
NULL, NULL);
if (gnu_size)
{
gnu_type
= make_type_from_size (gnu_type, gnu_size,
Has_Biased_Representation (gnat_entity));
if (operand_equal_p (TYPE_SIZE (gnu_type), gnu_size, 0))
gnu_size = NULL_TREE;
}
/* If this object has self-referential size, it must be a record with
a default discriminant. We are supposed to allocate an object of
the maximum size in this case, unless it is a constant with an
initializing expression, in which case we can get the size from
that. Note that the resulting size may still be a variable, so
this may end up with an indirect allocation. */
if (No (gnat_renamed_obj)
&& CONTAINS_PLACEHOLDER_P (TYPE_SIZE (gnu_type)))
{
if (gnu_expr && kind == E_Constant)
{
gnu_size = TYPE_SIZE (TREE_TYPE (gnu_expr));
gnu_ada_size = TYPE_ADA_SIZE (TREE_TYPE (gnu_expr));
if (CONTAINS_PLACEHOLDER_P (gnu_size))
{
/* If the initializing expression is itself a constant,
despite having a nominal type with self-referential
size, we can get the size directly from it. */
if (TREE_CODE (gnu_expr) == COMPONENT_REF
&& TYPE_IS_PADDING_P
(TREE_TYPE (TREE_OPERAND (gnu_expr, 0)))
&& TREE_CODE (TREE_OPERAND (gnu_expr, 0)) == VAR_DECL
&& (TREE_READONLY (TREE_OPERAND (gnu_expr, 0))
|| DECL_READONLY_ONCE_ELAB
(TREE_OPERAND (gnu_expr, 0))))
{
gnu_size = DECL_SIZE (TREE_OPERAND (gnu_expr, 0));
gnu_ada_size = gnu_size;
}
else
{
gnu_size
= SUBSTITUTE_PLACEHOLDER_IN_EXPR (gnu_size,
gnu_expr);
gnu_ada_size
= SUBSTITUTE_PLACEHOLDER_IN_EXPR (gnu_ada_size,
gnu_expr);
}
}
}
/* We may have no GNU_EXPR because No_Initialization is
set even though there's an Expression. */
else if (kind == E_Constant
&& Nkind (gnat_decl) == N_Object_Declaration
&& Present (Expression (gnat_decl)))
{
tree gnu_expr_type
= gnat_to_gnu_type (Etype (Expression (gnat_decl)));
gnu_size = TYPE_SIZE (gnu_expr_type);
gnu_ada_size = TYPE_ADA_SIZE (gnu_expr_type);
}
else
{
gnu_size = max_size (TYPE_SIZE (gnu_type), true);
/* We can be called on unconstrained arrays in this mode. */
if (!type_annotate_only)
gnu_ada_size = max_size (TYPE_ADA_SIZE (gnu_type), true);
mutable_p = true;
}
/* If the size isn't constant and we are at global level, call
elaborate_expression_1 to make a variable for it rather than
calculating it each time. */
if (!TREE_CONSTANT (gnu_size) && global_bindings_p ())
gnu_size = elaborate_expression_1 (gnu_size, gnat_entity,
"SIZE", definition, false);
}
/* If the size is zero byte, make it one byte since some linkers have
troubles with zero-sized objects. If the object will have a
template, that will make it nonzero so don't bother. Also avoid
doing that for an object renaming or an object with an address
clause, as we would lose useful information on the view size
(e.g. for null array slices) and we are not allocating the object
here anyway. */
if (((gnu_size
&& integer_zerop (gnu_size)
&& !TREE_OVERFLOW (gnu_size))
|| (TYPE_SIZE (gnu_type)
&& integer_zerop (TYPE_SIZE (gnu_type))
&& !TREE_OVERFLOW (TYPE_SIZE (gnu_type))))
&& !Is_Constr_Subt_For_UN_Aliased (gnat_type)
&& No (gnat_renamed_obj)
&& No (Address_Clause (gnat_entity)))
gnu_size = bitsize_unit_node;
/* If this is an object with no specified size and alignment, and
if either it is full access or we are not optimizing alignment for
space and it is composite and not an exception, an Out parameter
or a reference to another object, and the size of its type is a
constant, set the alignment to the smallest one which is not
smaller than the size, with an appropriate cap. */
if (!Known_Esize (gnat_entity)
&& !Known_Alignment (gnat_entity)
&& (Is_Full_Access (gnat_entity)
|| (!Optimize_Alignment_Space (gnat_entity)
&& kind != E_Exception
&& kind != E_Out_Parameter
&& Is_Composite_Type (gnat_type)
&& !Is_Constr_Subt_For_UN_Aliased (gnat_type)
&& !Is_Exported (gnat_entity)
&& !imported_p
&& No (gnat_renamed_obj)
&& No (Address_Clause (gnat_entity))))
&& (TREE_CODE (TYPE_SIZE (gnu_type)) == INTEGER_CST || gnu_size))
align = promote_object_alignment (gnu_type, gnu_size, gnat_entity);
/* If the object is set to have atomic components, find the component
type and validate it.
??? Note that we ignore Has_Volatile_Components on objects; it's
not at all clear what to do in that case. */
if (Has_Atomic_Components (gnat_entity))
{
tree gnu_inner = (TREE_CODE (gnu_type) == ARRAY_TYPE
? TREE_TYPE (gnu_type) : gnu_type);
while (TREE_CODE (gnu_inner) == ARRAY_TYPE
&& TYPE_MULTI_ARRAY_P (gnu_inner))
gnu_inner = TREE_TYPE (gnu_inner);
check_ok_for_atomic_type (gnu_inner, gnat_entity, true);
}
/* If this is an aliased object with an unconstrained array nominal
subtype, make a type that includes the template. We will either
allocate or create a variable of that type, see below. */
if (Is_Constr_Subt_For_UN_Aliased (gnat_type)
&& Is_Array_Type (Underlying_Type (gnat_type))
&& !type_annotate_only)
{
tree gnu_array = gnat_to_gnu_type (Base_Type (gnat_type));
gnu_type
= build_unc_object_type_from_ptr (TREE_TYPE (gnu_array),
gnu_type,
concat_name (gnu_entity_name,
"UNC"),
debug_info_p);
}
/* ??? If this is an object of CW type initialized to a value, try to
ensure that the object is sufficient aligned for this value, but
without pessimizing the allocation. This is a kludge necessary
because we don't support dynamic alignment. */
if (align == 0
&& Ekind (gnat_type) == E_Class_Wide_Subtype
&& No (gnat_renamed_obj)
&& No (Address_Clause (gnat_entity)))
align = get_target_system_allocator_alignment () * BITS_PER_UNIT;
#ifdef MINIMUM_ATOMIC_ALIGNMENT
/* If the size is a constant and no alignment is specified, force
the alignment to be the minimum valid atomic alignment. The
restriction on constant size avoids problems with variable-size
temporaries; if the size is variable, there's no issue with
atomic access. Also don't do this for a constant, since it isn't
necessary and can interfere with constant replacement. Finally,
do not do it for Out parameters since that creates an
size inconsistency with In parameters. */
if (align == 0
&& MINIMUM_ATOMIC_ALIGNMENT > TYPE_ALIGN (gnu_type)
&& !FLOAT_TYPE_P (gnu_type)
&& !const_flag && No (gnat_renamed_obj)
&& !imported_p && No (Address_Clause (gnat_entity))
&& kind != E_Out_Parameter
&& (gnu_size ? TREE_CODE (gnu_size) == INTEGER_CST
: TREE_CODE (TYPE_SIZE (gnu_type)) == INTEGER_CST))
align = MINIMUM_ATOMIC_ALIGNMENT;
#endif
/* Do not take into account aliased adjustments or alignment promotions
to compute the size of the object. */
tree gnu_object_size = gnu_size ? gnu_size : TYPE_SIZE (gnu_type);
/* If the object is aliased, of a constrained nominal subtype and its
size might be zero at run time, we force at least the unit size. */
if (Is_Aliased (gnat_entity)
&& !Is_Constr_Subt_For_UN_Aliased (gnat_type)
&& Is_Array_Type (Underlying_Type (gnat_type))
&& !TREE_CONSTANT (gnu_object_size))
gnu_size = size_binop (MAX_EXPR, gnu_object_size, bitsize_unit_node);
/* Make a new type with the desired size and alignment, if needed. */
if (gnu_size || align > 0)
{
tree orig_type = gnu_type;
gnu_type = maybe_pad_type (gnu_type, gnu_size, align, gnat_entity,
false, definition, true);
/* If the nominal subtype of the object is unconstrained and its
size is not fixed, compute the Ada size from the Ada size of
the subtype and/or the expression; this will make it possible
for gnat_type_max_size to easily compute a maximum size. */
if (gnu_ada_size && gnu_size && !TREE_CONSTANT (gnu_size))
SET_TYPE_ADA_SIZE (gnu_type, gnu_ada_size);
/* If a padding record was made, declare it now since it will
never be declared otherwise. This is necessary to ensure
that its subtrees are properly marked. */
if (gnu_type != orig_type && !DECL_P (TYPE_NAME (gnu_type)))
create_type_decl (TYPE_NAME (gnu_type), gnu_type, true,
debug_info_p, gnat_entity);
}
/* Now check if the type of the object allows atomic access. */
if (Is_Full_Access (gnat_entity))
check_ok_for_atomic_type (gnu_type, gnat_entity, false);
/* If this is a renaming, avoid as much as possible to create a new
object. However, in some cases, creating it is required because
renaming can be applied to objects that are not names in Ada.
This processing needs to be applied to the raw expression so as
to make it more likely to rename the underlying object. */
if (Present (gnat_renamed_obj))
{
/* If the renamed object had padding, strip off the reference to
the inner object and reset our type. */
if ((TREE_CODE (gnu_expr) == COMPONENT_REF
&& TYPE_IS_PADDING_P (TREE_TYPE (TREE_OPERAND (gnu_expr, 0))))
/* Strip useless conversions around the object. */
|| gnat_useless_type_conversion (gnu_expr))
{
gnu_expr = TREE_OPERAND (gnu_expr, 0);
gnu_type = TREE_TYPE (gnu_expr);
}
/* Or else, if the renamed object has an unconstrained type with
default discriminant, use the padded type. */
else if (type_is_padding_self_referential (TREE_TYPE (gnu_expr)))
gnu_type = TREE_TYPE (gnu_expr);
/* If this is a constant renaming stemming from a function call,
treat it as a normal object whose initial value is what is being
renamed. RM 3.3 says that the result of evaluating a function
call is a constant object. Therefore, it can be the inner
object of a constant renaming and the renaming must be fully
instantiated, i.e. it cannot be a reference to (part of) an
existing object. And treat other rvalues the same way. */
tree inner = gnu_expr;
while (handled_component_p (inner) || CONVERT_EXPR_P (inner))
inner = TREE_OPERAND (inner, 0);
/* Expand_Dispatching_Call can prepend a comparison of the tags
before the call to "=". */
if (TREE_CODE (inner) == TRUTH_ANDIF_EXPR
|| TREE_CODE (inner) == COMPOUND_EXPR)
inner = TREE_OPERAND (inner, 1);
if ((TREE_CODE (inner) == CALL_EXPR
&& !call_is_atomic_load (inner))
|| TREE_CODE (inner) == CONSTRUCTOR
|| CONSTANT_CLASS_P (inner)
|| COMPARISON_CLASS_P (inner)
|| BINARY_CLASS_P (inner)
|| EXPRESSION_CLASS_P (inner)
/* We need to detect the case where a temporary is created to
hold the return value, since we cannot safely rename it at
top level as it lives only in the elaboration routine. */
|| (TREE_CODE (inner) == VAR_DECL
&& DECL_RETURN_VALUE_P (inner))
/* We also need to detect the case where the front-end creates
a dangling 'reference to a function call at top level and
substitutes it in the renaming, for example:
q__b : boolean renames r__f.e (1);
can be rewritten into:
q__R1s : constant q__A2s := r__f'reference;
[...]
q__b : boolean renames q__R1s.all.e (1);
We cannot safely rename the rewritten expression since the
underlying object lives only in the elaboration routine. */
|| (TREE_CODE (inner) == INDIRECT_REF
&& (inner
= remove_conversions (TREE_OPERAND (inner, 0), true))
&& TREE_CODE (inner) == VAR_DECL
&& DECL_RETURN_VALUE_P (inner)))
;
/* Otherwise, this is an lvalue being renamed, so it needs to be
elaborated as a reference and substituted for the entity. But
this means that we must evaluate the address of the renaming
in the definition case to instantiate the SAVE_EXPRs. */
else
{
tree gnu_init = NULL_TREE;
if (type_annotate_only && TREE_CODE (gnu_expr) == ERROR_MARK)
break;
gnu_expr
= elaborate_reference (gnu_expr, gnat_entity, definition,
&gnu_init);
/* No DECL_EXPR might be created so the expression needs to be
marked manually because it will likely be shared. */
if (global_bindings_p ())
MARK_VISITED (gnu_expr);
/* This assertion will fail if the renamed object isn't aligned
enough as to make it possible to honor the alignment set on
the renaming. */
if (align)
{
const unsigned int ralign
= DECL_P (gnu_expr)
? DECL_ALIGN (gnu_expr)
: TYPE_ALIGN (TREE_TYPE (gnu_expr));
gcc_assert (ralign >= align);
}
/* The expression might not be a DECL so save it manually. */
gnu_decl = gnu_expr;
save_gnu_tree (gnat_entity, gnu_decl, true);
saved = true;
annotate_object (gnat_entity, gnu_type, NULL_TREE, false);
/* If this is only a reference to the entity, we are done. */
if (!definition)
break;
/* Otherwise, emit the initialization statement, if any. */
if (gnu_init)
add_stmt (gnu_init);
/* If it needs to be materialized for debugging purposes, build
the entity as indirect reference to the renamed object. */
if (Materialize_Entity (gnat_entity))
{
gnu_type = build_reference_type (gnu_type);
const_flag = true;
volatile_flag = false;
gnu_expr = build_unary_op (ADDR_EXPR, gnu_type, gnu_expr);
create_var_decl (gnu_entity_name, gnu_ext_name,
TREE_TYPE (gnu_expr), gnu_expr,
const_flag, Is_Public (gnat_entity),
imported_p, static_flag, volatile_flag,
artificial_p, debug_info_p, attr_list,
gnat_entity, false);
}
/* Otherwise, instantiate the SAVE_EXPRs if needed. */
else if (TREE_SIDE_EFFECTS (gnu_expr))
add_stmt (build_unary_op (ADDR_EXPR, NULL_TREE, gnu_expr));
break;
}
}
/* If we are defining an aliased object whose nominal subtype is
unconstrained, the object is a record that contains both the
template and the object. If there is an initializer, it will
have already been converted to the right type, but we need to
create the template if there is no initializer. */
if (definition
&& !gnu_expr
&& TREE_CODE (gnu_type) == RECORD_TYPE
&& (TYPE_CONTAINS_TEMPLATE_P (gnu_type)
/* Beware that padding might have been introduced above. */
|| (TYPE_PADDING_P (gnu_type)
&& TREE_CODE (TREE_TYPE (TYPE_FIELDS (gnu_type)))
== RECORD_TYPE
&& TYPE_CONTAINS_TEMPLATE_P
(TREE_TYPE (TYPE_FIELDS (gnu_type))))))
{
tree template_field
= TYPE_PADDING_P (gnu_type)
? TYPE_FIELDS (TREE_TYPE (TYPE_FIELDS (gnu_type)))
: TYPE_FIELDS (gnu_type);
vec<constructor_elt, va_gc> *v;
vec_alloc (v, 1);
tree t = build_template (TREE_TYPE (template_field),
TREE_TYPE (DECL_CHAIN (template_field)),
NULL_TREE);
CONSTRUCTOR_APPEND_ELT (v, template_field, t);
gnu_expr = gnat_build_constructor (gnu_type, v);
}
/* Convert the expression to the type of the object if need be. */
if (gnu_expr && initial_value_needs_conversion (gnu_type, gnu_expr))
gnu_expr = convert (gnu_type, gnu_expr);
/* If this is a pointer that doesn't have an initializing expression,
initialize it to NULL, unless the object is declared imported as
per RM B.1(24). */
if (definition
&& (POINTER_TYPE_P (gnu_type) || TYPE_IS_FAT_POINTER_P (gnu_type))
&& !gnu_expr
&& !Is_Imported (gnat_entity))
gnu_expr = integer_zero_node;
/* If we are defining the object and it has an Address clause, we must
either get the address expression from the saved GCC tree for the
object if it has a Freeze node, or elaborate the address expression
here since the front-end has guaranteed that the elaboration has no
effects in this case. */
if (definition && Present (Address_Clause (gnat_entity)))
{
const Node_Id gnat_clause = Address_Clause (gnat_entity);
const Node_Id gnat_address = Expression (gnat_clause);
tree gnu_address = present_gnu_tree (gnat_entity)
? TREE_OPERAND (get_gnu_tree (gnat_entity), 0)
: gnat_to_gnu (gnat_address);
save_gnu_tree (gnat_entity, NULL_TREE, false);
/* Convert the type of the object to a reference type that can
alias everything as per RM 13.3(19). */
if (volatile_flag && !TYPE_VOLATILE (gnu_type))
gnu_type = change_qualified_type (gnu_type, TYPE_QUAL_VOLATILE);
gnu_type
= build_reference_type_for_mode (gnu_type, ptr_mode, true);
gnu_address = convert (gnu_type, gnu_address);
used_by_ref = true;
const_flag
= (!Is_Public (gnat_entity)
|| compile_time_known_address_p (gnat_address));
volatile_flag = false;
gnu_size = NULL_TREE;
/* If this is an aliased object with an unconstrained array nominal
subtype, then it can overlay only another aliased object with an
unconstrained array nominal subtype and compatible template. */
if (Is_Constr_Subt_For_UN_Aliased (gnat_type)
&& Is_Array_Type (Underlying_Type (gnat_type))
&& !type_annotate_only)
{
tree rec_type = TREE_TYPE (gnu_type);
tree off = byte_position (DECL_CHAIN (TYPE_FIELDS (rec_type)));
/* This is the pattern built for a regular object. */
if (TREE_CODE (gnu_address) == POINTER_PLUS_EXPR
&& TREE_OPERAND (gnu_address, 1) == off)
gnu_address = TREE_OPERAND (gnu_address, 0);
/* This is the pattern built for an overaligned object. */
else if (TREE_CODE (gnu_address) == POINTER_PLUS_EXPR
&& TREE_CODE (TREE_OPERAND (gnu_address, 1))
== PLUS_EXPR
&& TREE_OPERAND (TREE_OPERAND (gnu_address, 1), 1)
== off)
gnu_address
= build2 (POINTER_PLUS_EXPR, gnu_type,
TREE_OPERAND (gnu_address, 0),
TREE_OPERAND (TREE_OPERAND (gnu_address, 1), 0));
/* We make an exception for an absolute address but we warn
that there is a descriptor at the start of the object. */
else if (TREE_CODE (gnu_address) == INTEGER_CST)
{
post_error_ne ("??aliased object& with unconstrained "
"array nominal subtype", gnat_clause,
gnat_entity);
post_error ("\\starts with a descriptor whose size is "
"given by ''Descriptor_Size", gnat_clause);
}
else
{
post_error_ne ("aliased object& with unconstrained array "
"nominal subtype", gnat_clause,
gnat_entity);
post_error ("\\can overlay only aliased object with "
"compatible subtype", gnat_clause);
}
}
/* If we don't have an initializing expression for the underlying
variable, the initializing expression for the pointer is the
specified address. Otherwise, we have to make a COMPOUND_EXPR
to assign both the address and the initial value. */
if (!gnu_expr)
gnu_expr = gnu_address;
else
gnu_expr
= build2 (COMPOUND_EXPR, gnu_type,
build_binary_op (INIT_EXPR, NULL_TREE,
build_unary_op (INDIRECT_REF,
NULL_TREE,
gnu_address),
gnu_expr),
gnu_address);
}
/* If it has an address clause and we are not defining it, mark it
as an indirect object. Likewise for Stdcall objects that are
imported. */
if ((!definition && Present (Address_Clause (gnat_entity)))
|| (imported_p && Has_Stdcall_Convention (gnat_entity)))
{
/* Convert the type of the object to a reference type that can
alias everything as per RM 13.3(19). */
if (volatile_flag && !TYPE_VOLATILE (gnu_type))
gnu_type = change_qualified_type (gnu_type, TYPE_QUAL_VOLATILE);
gnu_type
= build_reference_type_for_mode (gnu_type, ptr_mode, true);
used_by_ref = true;
const_flag = false;
volatile_flag = false;
gnu_size = NULL_TREE;
/* No point in taking the address of an initializing expression
that isn't going to be used. */
gnu_expr = NULL_TREE;
/* If it has an address clause whose value is known at compile
time, make the object a CONST_DECL. This will avoid a
useless dereference. */
if (Present (Address_Clause (gnat_entity)))
{
Node_Id gnat_address
= Expression (Address_Clause (gnat_entity));
if (compile_time_known_address_p (gnat_address))
{
gnu_expr = gnat_to_gnu (gnat_address);
const_flag = true;
}
}
}
/* If we are at top level and this object is of variable size,
make the actual type a hidden pointer to the real type and
make the initializer be a memory allocation and initialization.
Likewise for objects we aren't defining (presumed to be
external references from other packages), but there we do
not set up an initialization.
If the object's size overflows, make an allocator too, so that
Storage_Error gets raised. Note that we will never free
such memory, so we presume it never will get allocated. */
if (!allocatable_size_p (TYPE_SIZE_UNIT (gnu_type),
global_bindings_p ()
|| !definition
|| static_flag)
|| (gnu_size
&& !allocatable_size_p (convert (sizetype,
size_binop
(EXACT_DIV_EXPR, gnu_size,
bitsize_unit_node)),
global_bindings_p ()
|| !definition
|| static_flag)))
{
if (volatile_flag && !TYPE_VOLATILE (gnu_type))
gnu_type = change_qualified_type (gnu_type, TYPE_QUAL_VOLATILE);
gnu_type = build_reference_type (gnu_type);
used_by_ref = true;
const_flag = true;
volatile_flag = false;
gnu_size = NULL_TREE;
/* In case this was a aliased object whose nominal subtype is
unconstrained, the pointer above will be a thin pointer and
build_allocator will automatically make the template.
If we have a template initializer only (that we made above),
pretend there is none and rely on what build_allocator creates
again anyway. Otherwise (if we have a full initializer), get
the data part and feed that to build_allocator.
If we are elaborating a mutable object, tell build_allocator to
ignore a possibly simpler size from the initializer, if any, as
we must allocate the maximum possible size in this case. */
if (definition && !imported_p)
{
tree gnu_alloc_type = TREE_TYPE (gnu_type);
if (TREE_CODE (gnu_alloc_type) == RECORD_TYPE
&& TYPE_CONTAINS_TEMPLATE_P (gnu_alloc_type))
{
gnu_alloc_type
= TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (gnu_alloc_type)));
if (TREE_CODE (gnu_expr) == CONSTRUCTOR
&& CONSTRUCTOR_NELTS (gnu_expr) == 1)
gnu_expr = NULL_TREE;
else
gnu_expr
= build_component_ref
(gnu_expr,
DECL_CHAIN (TYPE_FIELDS (TREE_TYPE (gnu_expr))),
false);
}
if (TREE_CODE (TYPE_SIZE_UNIT (gnu_alloc_type)) == INTEGER_CST
&& !valid_constant_size_p (TYPE_SIZE_UNIT (gnu_alloc_type)))
post_error ("??Storage_Error will be raised at run time!",
gnat_entity);
gnu_expr
= build_allocator (gnu_alloc_type, gnu_expr, gnu_type,
Empty, Empty, gnat_entity, mutable_p);
}
else
gnu_expr = NULL_TREE;
}
/* If this object would go into the stack and has an alignment larger
than the largest stack alignment the back-end can honor, resort to
a variable of "aligning type". */
if (definition
&& TYPE_ALIGN (gnu_type) > BIGGEST_ALIGNMENT
&& !imported_p
&& !static_flag
&& !global_bindings_p ())
{
/* Create the new variable. No need for extra room before the
aligned field as this is in automatic storage. */
tree gnu_new_type
= make_aligning_type (gnu_type, TYPE_ALIGN (gnu_type),
TYPE_SIZE_UNIT (gnu_type),
BIGGEST_ALIGNMENT, 0, gnat_entity);
tree gnu_new_var
= create_var_decl (create_concat_name (gnat_entity, "ALIGN"),
NULL_TREE, gnu_new_type, NULL_TREE,
false, false, false, false, false,
true, debug_info_p && definition, NULL,
gnat_entity);
/* Initialize the aligned field if we have an initializer. */
if (gnu_expr)
add_stmt_with_node
(build_binary_op (INIT_EXPR, NULL_TREE,
build_component_ref
(gnu_new_var, TYPE_FIELDS (gnu_new_type),
false),
gnu_expr),
gnat_entity);
/* And setup this entity as a reference to the aligned field. */
gnu_type = build_reference_type (gnu_type);
gnu_expr
= build_unary_op
(ADDR_EXPR, NULL_TREE,
build_component_ref (gnu_new_var, TYPE_FIELDS (gnu_new_type),
false));
TREE_CONSTANT (gnu_expr) = 1;
used_by_ref = true;
const_flag = true;
volatile_flag = false;
gnu_size = NULL_TREE;
}
/* If this is an aggregate constant initialized to a constant, force it
to be statically allocated. This saves an initialization copy. */
if (!static_flag
&& const_flag
&& gnu_expr
&& TREE_CONSTANT (gnu_expr)
&& AGGREGATE_TYPE_P (gnu_type)
&& tree_fits_uhwi_p (TYPE_SIZE_UNIT (gnu_type))
&& !(TYPE_IS_PADDING_P (gnu_type)
&& !tree_fits_uhwi_p (TYPE_SIZE_UNIT
(TREE_TYPE (TYPE_FIELDS (gnu_type))))))
static_flag = true;
/* If this is an aliased object with an unconstrained array nominal
subtype, we make its type a thin reference, i.e. the reference
counterpart of a thin pointer, so it points to the array part.
This is aimed to make it easier for the debugger to decode the
object. Note that we have to do it this late because of the
couple of allocation adjustments that might be made above. */
if (Is_Constr_Subt_For_UN_Aliased (gnat_type)
&& Is_Array_Type (Underlying_Type (gnat_type))
&& !type_annotate_only)
{
/* In case the object with the template has already been allocated
just above, we have nothing to do here. */
if (!TYPE_IS_THIN_POINTER_P (gnu_type))
{
/* This variable is a GNAT encoding used by Workbench: let it
go through the debugging information but mark it as
artificial: users are not interested in it. */
tree gnu_unc_var
= create_var_decl (concat_name (gnu_entity_name, "UNC"),
NULL_TREE, gnu_type, gnu_expr,
const_flag, Is_Public (gnat_entity),
imported_p || !definition, static_flag,
volatile_flag, true,
debug_info_p && definition,
NULL, gnat_entity);
gnu_expr = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_unc_var);
TREE_CONSTANT (gnu_expr) = 1;
used_by_ref = true;
const_flag = true;
volatile_flag = false;
inner_const_flag = TREE_READONLY (gnu_unc_var);
gnu_size = NULL_TREE;
}
tree gnu_array = gnat_to_gnu_type (Base_Type (gnat_type));
gnu_type
= build_reference_type (TYPE_OBJECT_RECORD_TYPE (gnu_array));
}
/* Convert the expression to the type of the object if need be. */
if (gnu_expr && initial_value_needs_conversion (gnu_type, gnu_expr))
gnu_expr = convert (gnu_type, gnu_expr);
/* If this name is external or a name was specified, use it, but don't
use the Interface_Name with an address clause (see cd30005). */
if ((Is_Public (gnat_entity) && !Is_Imported (gnat_entity))
|| (Present (Interface_Name (gnat_entity))
&& No (Address_Clause (gnat_entity))))
gnu_ext_name = create_concat_name (gnat_entity, NULL);
/* Deal with a pragma Linker_Section on a constant or variable. */
if ((kind == E_Constant || kind == E_Variable)
&& Present (Linker_Section_Pragma (gnat_entity)))
prepend_one_attribute_pragma (&attr_list,
Linker_Section_Pragma (gnat_entity));
/* Now create the variable or the constant and set various flags. */
gnu_decl
= create_var_decl (gnu_entity_name, gnu_ext_name, gnu_type,
gnu_expr, const_flag, Is_Public (gnat_entity),
imported_p || !definition, static_flag,
volatile_flag, artificial_p,
debug_info_p && definition, attr_list,
gnat_entity);
DECL_BY_REF_P (gnu_decl) = used_by_ref;
DECL_POINTS_TO_READONLY_P (gnu_decl) = used_by_ref && inner_const_flag;
DECL_CAN_NEVER_BE_NULL_P (gnu_decl) = Can_Never_Be_Null (gnat_entity);
/* If we are defining an Out parameter and optimization isn't enabled,
create a fake PARM_DECL for debugging purposes and make it point to
the VAR_DECL. Suppress debug info for the latter but make sure it
will live in memory so that it can be accessed from within the
debugger through the PARM_DECL. */
if (kind == E_Out_Parameter
&& definition
&& debug_info_p
&& !optimize
&& !flag_generate_lto)
{
tree param = create_param_decl (gnu_entity_name, gnu_type);
gnat_pushdecl (param, gnat_entity);
SET_DECL_VALUE_EXPR (param, gnu_decl);
DECL_HAS_VALUE_EXPR_P (param) = 1;
DECL_IGNORED_P (gnu_decl) = 1;
TREE_ADDRESSABLE (gnu_decl) = 1;
}
/* If this is a loop parameter, set the corresponding flag. */
else if (kind == E_Loop_Parameter)
DECL_LOOP_PARM_P (gnu_decl) = 1;
/* If this is a constant and we are defining it or it generates a real
symbol at the object level and we are referencing it, we may want
or need to have a true variable to represent it:
- if the constant is public and not overlaid on something else,
- if its address is taken,
- if it is aliased,
- if optimization isn't enabled, for debugging purposes. */
if (TREE_CODE (gnu_decl) == CONST_DECL
&& (definition || Sloc (gnat_entity) > Standard_Location)
&& ((Is_Public (gnat_entity) && No (Address_Clause (gnat_entity)))
|| Address_Taken (gnat_entity)
|| Is_Aliased (gnat_entity)
|| (!optimize && debug_info_p)))
{
tree gnu_corr_var
= create_var_decl (gnu_entity_name, gnu_ext_name, gnu_type,
gnu_expr, true, Is_Public (gnat_entity),
!definition, static_flag, volatile_flag,
artificial_p, debug_info_p && definition,
attr_list, gnat_entity, false);
SET_DECL_CONST_CORRESPONDING_VAR (gnu_decl, gnu_corr_var);
DECL_IGNORED_P (gnu_decl) = 1;
}
/* If this is a constant, even if we don't need a true variable, we
may need to avoid returning the initializer in every case. That
can happen for the address of a (constant) constructor because,
upon dereferencing it, the constructor will be reinjected in the
tree, which may not be valid in every case; see lvalue_required_p
for more details. */
if (TREE_CODE (gnu_decl) == CONST_DECL)
DECL_CONST_ADDRESS_P (gnu_decl) = constructor_address_p (gnu_expr);
/* If this object is declared in a block that contains a block with an
exception handler, and we aren't using the GCC exception mechanism,
we must force this variable in memory in order to avoid an invalid
optimization. */
if (Front_End_Exceptions ()
&& Has_Nested_Block_With_Handler (Scope (gnat_entity)))
TREE_ADDRESSABLE (gnu_decl) = 1;
/* If this is a local variable with non-BLKmode and aggregate type,
and optimization isn't enabled, then force it in memory so that
a register won't be allocated to it with possible subparts left
uninitialized and reaching the register allocator. */
else if (TREE_CODE (gnu_decl) == VAR_DECL
&& !DECL_EXTERNAL (gnu_decl)
&& !TREE_STATIC (gnu_decl)
&& DECL_MODE (gnu_decl) != BLKmode
&& AGGREGATE_TYPE_P (TREE_TYPE (gnu_decl))
&& !TYPE_IS_FAT_POINTER_P (TREE_TYPE (gnu_decl))
&& !optimize)
TREE_ADDRESSABLE (gnu_decl) = 1;
/* If we are defining an object with variable size or an object with
fixed size that will be dynamically allocated, and we are using the
front-end setjmp/longjmp exception mechanism, update the setjmp
buffer. */
if (definition
&& Exception_Mechanism == Front_End_SJLJ
&& get_block_jmpbuf_decl ()
&& DECL_SIZE_UNIT (gnu_decl)
&& (TREE_CODE (DECL_SIZE_UNIT (gnu_decl)) != INTEGER_CST
|| (flag_stack_check == GENERIC_STACK_CHECK
&& compare_tree_int (DECL_SIZE_UNIT (gnu_decl),
STACK_CHECK_MAX_VAR_SIZE) > 0)))
add_stmt_with_node (build_call_n_expr
(update_setjmp_buf_decl, 1,
build_unary_op (ADDR_EXPR, NULL_TREE,
get_block_jmpbuf_decl ())),
gnat_entity);
/* Back-annotate Esize and Alignment of the object if not already
known. Note that we pick the values of the type, not those of
the object, to shield ourselves from low-level platform-dependent
adjustments like alignment promotion. This is both consistent with
all the treatment above, where alignment and size are set on the
type of the object and not on the object directly, and makes it
possible to support all confirming representation clauses. */
annotate_object (gnat_entity, TREE_TYPE (gnu_decl), gnu_object_size,
used_by_ref);
}
break;
case E_Void:
/* Return a TYPE_DECL for "void" that we previously made. */
gnu_decl = TYPE_NAME (void_type_node);
break;
case E_Enumeration_Type:
/* A special case: for the types Character and Wide_Character in
Standard, we do not list all the literals. So if the literals
are not specified, make this an integer type. */
if (No (First_Literal (gnat_entity)))
{
if (esize == CHAR_TYPE_SIZE && flag_signed_char)
gnu_type = make_signed_type (CHAR_TYPE_SIZE);
else
gnu_type = make_unsigned_type (esize);
TYPE_NAME (gnu_type) = gnu_entity_name;
/* Set TYPE_STRING_FLAG for Character and Wide_Character types.
This is needed by the DWARF-2 back-end to distinguish between
unsigned integer types and character types. */
TYPE_STRING_FLAG (gnu_type) = 1;
/* This flag is needed by the call just below. */
TYPE_ARTIFICIAL (gnu_type) = artificial_p;
finish_character_type (gnu_type);
}
else
{
/* We have a list of enumeral constants in First_Literal. We make a
CONST_DECL for each one and build into GNU_LITERAL_LIST the list
to be placed into TYPE_FIELDS. Each node is itself a TREE_LIST
whose TREE_VALUE is the literal name and whose TREE_PURPOSE is the
value of the literal. But when we have a regular boolean type, we
simplify this a little by using a BOOLEAN_TYPE. */
const bool is_boolean = Is_Boolean_Type (gnat_entity)
&& !Has_Non_Standard_Rep (gnat_entity);
const bool is_unsigned = Is_Unsigned_Type (gnat_entity);
tree gnu_list = NULL_TREE;
Entity_Id gnat_literal;
/* Boolean types with foreign convention have precision 1. */
if (is_boolean && foreign)
esize = 1;
gnu_type = make_node (is_boolean ? BOOLEAN_TYPE : ENUMERAL_TYPE);
TYPE_PRECISION (gnu_type) = esize;
TYPE_UNSIGNED (gnu_type) = is_unsigned;
set_min_and_max_values_for_integral_type (gnu_type, esize,
TYPE_SIGN (gnu_type));
process_attributes (&gnu_type, &attr_list, true, gnat_entity);
layout_type (gnu_type);
for (gnat_literal = First_Literal (gnat_entity);
Present (gnat_literal);
gnat_literal = Next_Literal (gnat_literal))
{
tree gnu_value
= UI_To_gnu (Enumeration_Rep (gnat_literal), gnu_type);
/* Do not generate debug info for individual enumerators. */
tree gnu_literal
= create_var_decl (get_entity_name (gnat_literal), NULL_TREE,
gnu_type, gnu_value, true, false, false,
false, false, artificial_p, false,
NULL, gnat_literal);
save_gnu_tree (gnat_literal, gnu_literal, false);
gnu_list
= tree_cons (DECL_NAME (gnu_literal), gnu_value, gnu_list);
}
if (!is_boolean)
TYPE_VALUES (gnu_type) = nreverse (gnu_list);
/* Note that the bounds are updated at the end of this function
to avoid an infinite recursion since they refer to the type. */
goto discrete_type;
}
break;
case E_Signed_Integer_Type:
/* For integer types, just make a signed type the appropriate number
of bits. */
gnu_type = make_signed_type (esize);
goto discrete_type;
case E_Ordinary_Fixed_Point_Type:
case E_Decimal_Fixed_Point_Type:
{
/* Small_Value is the scale factor. */
const Ureal gnat_small_value = Small_Value (gnat_entity);
tree scale_factor = NULL_TREE;
gnu_type = make_signed_type (esize);
/* When encoded as 1/2**N or 1/10**N, describe the scale factor as a
binary or decimal scale: it is easier to read for humans. */
if (UI_Eq (Numerator (gnat_small_value), Uint_1)
&& (Rbase (gnat_small_value) == 2
|| Rbase (gnat_small_value) == 10))
{
tree base
= build_int_cst (integer_type_node, Rbase (gnat_small_value));
tree exponent
= build_int_cst (integer_type_node,
UI_To_Int (Denominator (gnat_small_value)));
scale_factor
= build2 (RDIV_EXPR, integer_type_node,
integer_one_node,
build2 (POWER_EXPR, integer_type_node,
base, exponent));
}
/* Use the arbitrary scale factor description. Note that we support
a Small_Value whose magnitude is larger than 64-bit even on 32-bit
platforms, so we unconditionally use a (dummy) 128-bit type. */
else
{
const Uint gnat_num = Norm_Num (gnat_small_value);
const Uint gnat_den = Norm_Den (gnat_small_value);
tree gnu_small_type = make_unsigned_type (128);
tree gnu_num = UI_To_gnu (gnat_num, gnu_small_type);
tree gnu_den = UI_To_gnu (gnat_den, gnu_small_type);
scale_factor
= build2 (RDIV_EXPR, gnu_small_type, gnu_num, gnu_den);
}
TYPE_FIXED_POINT_P (gnu_type) = 1;
SET_TYPE_SCALE_FACTOR (gnu_type, scale_factor);
}
goto discrete_type;
case E_Modular_Integer_Type:
{
/* Packed Array Impl. Types are supposed to be subtypes only. */
gcc_assert (!Is_Packed_Array_Impl_Type (gnat_entity));
/* For modular types, make the unsigned type of the proper number
of bits and then set up the modulus, if required. */
gnu_type = make_unsigned_type (esize);
/* Get the modulus in this type. If the modulus overflows, assume
that this is because it was equal to 2**Esize. Note that there
is no overflow checking done on unsigned types, so we detect the
overflow by looking for a modulus of zero, which is invalid. */
tree gnu_modulus = UI_To_gnu (Modulus (gnat_entity), gnu_type);
/* If the modulus is not 2**Esize, then this also means that the upper
bound of the type, i.e. modulus - 1, is not maximal, so we create an
extra subtype to carry it and set the modulus on the base type. */
if (!integer_zerop (gnu_modulus))
{
TYPE_NAME (gnu_type) = create_concat_name (gnat_entity, "UMT");
TYPE_MODULAR_P (gnu_type) = 1;
SET_TYPE_MODULUS (gnu_type, gnu_modulus);
tree gnu_high = fold_build2 (MINUS_EXPR, gnu_type, gnu_modulus,
build_int_cst (gnu_type, 1));
gnu_type
= create_extra_subtype (gnu_type, TYPE_MIN_VALUE (gnu_type),
gnu_high);
}
}
goto discrete_type;
case E_Signed_Integer_Subtype:
case E_Enumeration_Subtype:
case E_Modular_Integer_Subtype:
case E_Ordinary_Fixed_Point_Subtype:
case E_Decimal_Fixed_Point_Subtype:
/* For integral subtypes, we make a new INTEGER_TYPE. Note that we do
not want to call create_range_type since we would like each subtype
node to be distinct. ??? Historically this was in preparation for
when memory aliasing is implemented, but that's obsolete now given
the call to relate_alias_sets below.
The TREE_TYPE field of the INTEGER_TYPE points to the base type;
this fact is used by the arithmetic conversion functions.
We elaborate the Ancestor_Subtype if it is not in the current unit
and one of our bounds is non-static. We do this to ensure consistent
naming in the case where several subtypes share the same bounds, by
elaborating the first such subtype first, thus using its name. */
if (!definition
&& Present (Ancestor_Subtype (gnat_entity))
&& !In_Extended_Main_Code_Unit (Ancestor_Subtype (gnat_entity))
&& (!Compile_Time_Known_Value (Type_Low_Bound (gnat_entity))
|| !Compile_Time_Known_Value (Type_High_Bound (gnat_entity))))
gnat_to_gnu_entity (Ancestor_Subtype (gnat_entity), gnu_expr, false);
/* Set the precision to the Esize except for bit-packed arrays. */
if (Is_Packed_Array_Impl_Type (gnat_entity))
esize = UI_To_Int (RM_Size (gnat_entity));
/* Boolean types with foreign convention have precision 1. */
if (Is_Boolean_Type (gnat_entity) && foreign)
{
gnu_type = make_node (BOOLEAN_TYPE);
TYPE_PRECISION (gnu_type) = 1;
TYPE_UNSIGNED (gnu_type) = 1;
set_min_and_max_values_for_integral_type (gnu_type, 1, UNSIGNED);
layout_type (gnu_type);
}
/* First subtypes of Character are treated as Character; otherwise
this should be an unsigned type if the base type is unsigned or
if the lower bound is constant and non-negative or if the type
is biased. However, even if the lower bound is constant and
non-negative, we use a signed type for a subtype with the same
size as its signed base type, because this eliminates useless
conversions to it and gives more leeway to the optimizer; but
this means that we will need to explicitly test for this case
when we change the representation based on the RM size. */
else if (kind == E_Enumeration_Subtype
&& No (First_Literal (Etype (gnat_entity)))
&& Esize (gnat_entity) == RM_Size (gnat_entity)
&& esize == CHAR_TYPE_SIZE
&& flag_signed_char)
gnu_type = make_signed_type (CHAR_TYPE_SIZE);
else if (Is_Unsigned_Type (Underlying_Type (Etype (gnat_entity)))
|| (Esize (Etype (gnat_entity)) != Esize (gnat_entity)
&& Is_Unsigned_Type (gnat_entity))
|| Has_Biased_Representation (gnat_entity))
gnu_type = make_unsigned_type (esize);
else
gnu_type = make_signed_type (esize);
TREE_TYPE (gnu_type) = get_unpadded_type (Etype (gnat_entity));
SET_TYPE_RM_MIN_VALUE
(gnu_type, elaborate_expression (Type_Low_Bound (gnat_entity),
gnat_entity, "L", definition, true,
debug_info_p));
SET_TYPE_RM_MAX_VALUE
(gnu_type, elaborate_expression (Type_High_Bound (gnat_entity),
gnat_entity, "U", definition, true,
debug_info_p));
if (TREE_CODE (gnu_type) == INTEGER_TYPE)
TYPE_BIASED_REPRESENTATION_P (gnu_type)
= Has_Biased_Representation (gnat_entity);
/* Do the same processing for Character subtypes as for types. */
if (TREE_CODE (TREE_TYPE (gnu_type)) == INTEGER_TYPE
&& TYPE_STRING_FLAG (TREE_TYPE (gnu_type)))
{
TYPE_NAME (gnu_type) = gnu_entity_name;
TYPE_STRING_FLAG (gnu_type) = 1;
TYPE_ARTIFICIAL (gnu_type) = artificial_p;
finish_character_type (gnu_type);
}
/* Inherit our alias set from what we're a subtype of. Subtypes
are not different types and a pointer can designate any instance
within a subtype hierarchy. */
relate_alias_sets (gnu_type, TREE_TYPE (gnu_type), ALIAS_SET_COPY);
/* One of the above calls might have caused us to be elaborated,
so don't blow up if so. */
if (present_gnu_tree (gnat_entity))
{
maybe_present = true;
break;
}
/* Attach the TYPE_STUB_DECL in case we have a parallel type. */
TYPE_STUB_DECL (gnu_type)
= create_type_stub_decl (gnu_entity_name, gnu_type);
discrete_type:
/* We have to handle clauses that under-align the type specially. */
if ((Present (Alignment_Clause (gnat_entity))
|| (Is_Packed_Array_Impl_Type (gnat_entity)
&& Present
(Alignment_Clause (Original_Array_Type (gnat_entity)))))
&& UI_Is_In_Int_Range (Alignment (gnat_entity)))
{
align = UI_To_Int (Alignment (gnat_entity)) * BITS_PER_UNIT;
if (align >= TYPE_ALIGN (gnu_type))
align = 0;
}
/* If the type we are dealing with represents a bit-packed array,
we need to have the bits left justified on big-endian targets
and right justified on little-endian targets. We also need to
ensure that when the value is read (e.g. for comparison of two
such values), we only get the good bits, since the unused bits
are uninitialized. Both goals are accomplished by wrapping up
the modular type in an enclosing record type. */
if (Is_Packed_Array_Impl_Type (gnat_entity))
{
tree gnu_field_type, gnu_field, t;
gcc_assert (Is_Bit_Packed_Array (Original_Array_Type (gnat_entity)));
TYPE_BIT_PACKED_ARRAY_TYPE_P (gnu_type) = 1;
/* Make the original array type a parallel/debug type. */
if (debug_info_p)
{
tree gnu_name
= associate_original_type_to_packed_array (gnu_type,
gnat_entity);
if (gnu_name)
gnu_entity_name = gnu_name;
}
/* Set the RM size before wrapping up the original type. */
SET_TYPE_RM_SIZE (gnu_type,
UI_To_gnu (RM_Size (gnat_entity), bitsizetype));
/* Create a stripped-down declaration, mainly for debugging. */
t = create_type_decl (gnu_entity_name, gnu_type, true, debug_info_p,
gnat_entity);
/* Now save it and build the enclosing record type. */
gnu_field_type = gnu_type;
gnu_type = make_node (RECORD_TYPE);
TYPE_NAME (gnu_type) = create_concat_name (gnat_entity, "JM");
TYPE_PACKED (gnu_type) = 1;
TYPE_SIZE (gnu_type) = TYPE_SIZE (gnu_field_type);
TYPE_SIZE_UNIT (gnu_type) = TYPE_SIZE_UNIT (gnu_field_type);
SET_TYPE_ADA_SIZE (gnu_type, TYPE_RM_SIZE (gnu_field_type));
/* Propagate the alignment of the modular type to the record type,
unless there is an alignment clause that under-aligns the type.
This means that bit-packed arrays are given "ceil" alignment for
their size by default, which may seem counter-intuitive but makes
it possible to overlay them on modular types easily. */
SET_TYPE_ALIGN (gnu_type,
align > 0 ? align : TYPE_ALIGN (gnu_field_type));
/* Propagate the reverse storage order flag to the record type so
that the required byte swapping is performed when retrieving the
enclosed modular value. */
TYPE_REVERSE_STORAGE_ORDER (gnu_type)
= Reverse_Storage_Order (Original_Array_Type (gnat_entity));
relate_alias_sets (gnu_type, gnu_field_type, ALIAS_SET_COPY);
/* Don't declare the field as addressable since we won't be taking
its address and this would prevent create_field_decl from making
a bitfield. */
gnu_field
= create_field_decl (get_identifier ("OBJECT"), gnu_field_type,
gnu_type, NULL_TREE, bitsize_zero_node, 1, 0);
/* We will output additional debug info manually below. */
finish_record_type (gnu_type, gnu_field, 2, false);
TYPE_JUSTIFIED_MODULAR_P (gnu_type) = 1;
/* Make the original array type a parallel/debug type. Note that
gnat_get_array_descr_info needs a TYPE_IMPL_PACKED_ARRAY_P type
so we use an intermediate step for standard DWARF. */
if (debug_info_p)
{
if (gnat_encodings != DWARF_GNAT_ENCODINGS_ALL)
SET_TYPE_DEBUG_TYPE (gnu_type, gnu_field_type);
else if (DECL_PARALLEL_TYPE (t))
add_parallel_type (gnu_type, DECL_PARALLEL_TYPE (t));
}
}
/* If the type we are dealing with has got a smaller alignment than the
natural one, we need to wrap it up in a record type and misalign the
latter; we reuse the padding machinery for this purpose. */
else if (align > 0)
{
tree gnu_size = UI_To_gnu (RM_Size (gnat_entity), bitsizetype);
/* Set the RM size before wrapping the type. */
SET_TYPE_RM_SIZE (gnu_type, gnu_size);
/* Create a stripped-down declaration, mainly for debugging. */
create_type_decl (gnu_entity_name, gnu_type, true, debug_info_p,
gnat_entity);
gnu_type
= maybe_pad_type (gnu_type, TYPE_SIZE (gnu_type), align,
gnat_entity, false, definition, false);
TYPE_PACKED (gnu_type) = 1;
SET_TYPE_ADA_SIZE (gnu_type, gnu_size);
}
break;
case E_Floating_Point_Type:
/* The type of the Low and High bounds can be our type if this is
a type from Standard, so set them at the end of the function. */
gnu_type = make_node (REAL_TYPE);
TYPE_PRECISION (gnu_type) = fp_size_to_prec (esize);
layout_type (gnu_type);
break;
case E_Floating_Point_Subtype:
/* See the E_Signed_Integer_Subtype case for the rationale. */
if (!definition
&& Present (Ancestor_Subtype (gnat_entity))
&& !In_Extended_Main_Code_Unit (Ancestor_Subtype (gnat_entity))
&& (!Compile_Time_Known_Value (Type_Low_Bound (gnat_entity))
|| !Compile_Time_Known_Value (Type_High_Bound (gnat_entity))))
gnat_to_gnu_entity (Ancestor_Subtype (gnat_entity), gnu_expr, false);
gnu_type = make_node (REAL_TYPE);
TREE_TYPE (gnu_type) = get_unpadded_type (Etype (gnat_entity));
TYPE_PRECISION (gnu_type) = fp_size_to_prec (esize);
TYPE_GCC_MIN_VALUE (gnu_type)
= TYPE_GCC_MIN_VALUE (TREE_TYPE (gnu_type));
TYPE_GCC_MAX_VALUE (gnu_type)
= TYPE_GCC_MAX_VALUE (TREE_TYPE (gnu_type));
layout_type (gnu_type);
SET_TYPE_RM_MIN_VALUE
(gnu_type, elaborate_expression (Type_Low_Bound (gnat_entity),
gnat_entity, "L", definition, true,
debug_info_p));
SET_TYPE_RM_MAX_VALUE
(gnu_type, elaborate_expression (Type_High_Bound (gnat_entity),
gnat_entity, "U", definition, true,
debug_info_p));
/* Inherit our alias set from what we're a subtype of, as for
integer subtypes. */
relate_alias_sets (gnu_type, TREE_TYPE (gnu_type), ALIAS_SET_COPY);
/* One of the above calls might have caused us to be elaborated,
so don't blow up if so. */
maybe_present = true;
break;
/* Array Types and Subtypes
In GNAT unconstrained array types are represented by E_Array_Type and
constrained array types are represented by E_Array_Subtype. They are
translated into UNCONSTRAINED_ARRAY_TYPE and ARRAY_TYPE respectively.
But there are no actual objects of an unconstrained array type; all we
have are pointers to that type. In addition to the type node itself,
4 other types associated with it are built in the process:
1. the array type (suffix XUA) containing the actual data,
2. the template type (suffix XUB) containng the bounds,
3. the fat pointer type (suffix XUP) representing a pointer or a
reference to the unconstrained array type:
XUP = struct { XUA *, XUB * }
4. the object record type (suffix XUT) containing bounds and data:
XUT = struct { XUB, XUA }
The bounds of the array type XUA (de)reference the XUB * field of a
PLACEHOLDER_EXPR for the fat pointer type XUP, so the array type XUA
is to be interpreted in the context of the fat pointer type XUB for
debug info purposes. */
case E_Array_Type:
{
const Entity_Id PAT = Packed_Array_Impl_Type (gnat_entity);
const bool convention_fortran_p
= (Convention (gnat_entity) == Convention_Fortran);
const int ndim = Number_Dimensions (gnat_entity);
tree gnu_template_type;
tree gnu_ptr_template;
tree gnu_template_reference, gnu_template_fields, gnu_fat_type;
tree *gnu_index_types = XALLOCAVEC (tree, ndim);
tree *gnu_temp_fields = XALLOCAVEC (tree, ndim);
tree gnu_max_size = size_one_node, tem, obj;
Entity_Id gnat_index;
int index;
tree comp_type;
/* Create the type for the component now, as it simplifies breaking
type reference loops. */
comp_type
= gnat_to_gnu_component_type (gnat_entity, definition, debug_info_p);
if (present_gnu_tree (gnat_entity))
{
/* As a side effect, the type may have been translated. */
maybe_present = true;
break;
}
/* We complete an existing dummy fat pointer type in place. This both
avoids further complex adjustments in update_pointer_to and yields
better debugging information in DWARF by leveraging the support for
incomplete declarations of "tagged" types in the DWARF back-end. */
gnu_type = get_dummy_type (gnat_entity);
if (gnu_type && TYPE_POINTER_TO (gnu_type))
{
gnu_fat_type = TYPE_MAIN_VARIANT (TYPE_POINTER_TO (gnu_type));
TYPE_NAME (gnu_fat_type) = NULL_TREE;
gnu_ptr_template =
TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (gnu_fat_type)));
gnu_template_type = TREE_TYPE (gnu_ptr_template);
/* Save the contents of the dummy type for update_pointer_to. */
TYPE_POINTER_TO (gnu_type) = copy_type (gnu_fat_type);
TYPE_FIELDS (TYPE_POINTER_TO (gnu_type))
= copy_node (TYPE_FIELDS (gnu_fat_type));
DECL_CHAIN (TYPE_FIELDS (TYPE_POINTER_TO (gnu_type)))
= copy_node (DECL_CHAIN (TYPE_FIELDS (gnu_fat_type)));
}
else
{
gnu_fat_type = make_node (RECORD_TYPE);
gnu_template_type = make_node (RECORD_TYPE);
gnu_ptr_template = build_pointer_type (gnu_template_type);
}
/* Make a node for the array. If we are not defining the array
suppress expanding incomplete types. */
gnu_type = make_node (UNCONSTRAINED_ARRAY_TYPE);
if (!definition)
{
defer_incomplete_level++;
this_deferred = true;
}
/* Build the fat pointer type. Use a "void *" object instead of
a pointer to the array type since we don't have the array type
yet (it will reference the fat pointer via the bounds). Note
that we reuse the existing fields of a dummy type because for:
type Arr is array (Positive range <>) of Element_Type;
type Array_Ref is access Arr;
Var : Array_Ref := Null;
in a declarative part, Arr will be frozen only after Var, which
means that the fields used in the CONSTRUCTOR built for Null are
those of the dummy type, which in turn means that COMPONENT_REFs
of Var may be built with these fields. Now if COMPONENT_REFs of
Var are also built later with the fields of the final type, the
aliasing machinery may consider that the accesses are distinct
if the FIELD_DECLs are distinct as objects. */
if (COMPLETE_TYPE_P (gnu_fat_type))
{
tem = TYPE_FIELDS (gnu_fat_type);
TREE_TYPE (tem) = ptr_type_node;
TREE_TYPE (DECL_CHAIN (tem)) = gnu_ptr_template;
TYPE_DECL_SUPPRESS_DEBUG (TYPE_STUB_DECL (gnu_fat_type)) = 0;
for (tree t = gnu_fat_type; t; t = TYPE_NEXT_VARIANT (t))
SET_TYPE_UNCONSTRAINED_ARRAY (t, gnu_type);
}
else
{
/* We make the fields addressable for the sake of compatibility
with languages for which the regular fields are addressable. */
tem
= create_field_decl (get_identifier ("P_ARRAY"),
ptr_type_node, gnu_fat_type,
NULL_TREE, NULL_TREE, 0, 1);
DECL_CHAIN (tem)
= create_field_decl (get_identifier ("P_BOUNDS"),
gnu_ptr_template, gnu_fat_type,
NULL_TREE, NULL_TREE, 0, 1);
finish_fat_pointer_type (gnu_fat_type, tem);
SET_TYPE_UNCONSTRAINED_ARRAY (gnu_fat_type, gnu_type);
}
/* If the GNAT encodings are used, give the fat pointer type a name.
If this is a packed type implemented specially, tell the debugger
how to interpret the underlying bits by fetching the name of the
implementation type. But, in any case, mark it as artificial so
the debugger can skip it. */
const Entity_Id gnat_name
= Present (PAT) && gnat_encodings == DWARF_GNAT_ENCODINGS_ALL
? PAT
: gnat_entity;
tree xup_name
= gnat_encodings == DWARF_GNAT_ENCODINGS_ALL
? create_concat_name (gnat_name, "XUP")
: gnu_entity_name;
create_type_decl (xup_name, gnu_fat_type, true, debug_info_p,
gnat_entity);
/* Build a reference to the template from a PLACEHOLDER_EXPR that
is the fat pointer. This will be used to access the individual
fields once we build them. */
tem = build3 (COMPONENT_REF, gnu_ptr_template,
build0 (PLACEHOLDER_EXPR, gnu_fat_type),
DECL_CHAIN (TYPE_FIELDS (gnu_fat_type)), NULL_TREE);
gnu_template_reference
= build_unary_op (INDIRECT_REF, gnu_template_type, tem);
TREE_READONLY (gnu_template_reference) = 1;
TREE_THIS_NOTRAP (gnu_template_reference) = 1;
/* Now create the GCC type for each index and add the fields for that
index to the template. */
for (index = (convention_fortran_p ? ndim - 1 : 0),
gnat_index = First_Index (gnat_entity);
IN_RANGE (index, 0, ndim - 1);
index += (convention_fortran_p ? - 1 : 1),
gnat_index = Next_Index (gnat_index))
{
const bool is_flb
= Is_Fixed_Lower_Bound_Index_Subtype (Etype (gnat_index));
tree gnu_index_type = get_unpadded_type (Etype (gnat_index));
tree gnu_orig_min = TYPE_MIN_VALUE (gnu_index_type);
tree gnu_orig_max = TYPE_MAX_VALUE (gnu_index_type);
tree gnu_index_base_type = get_base_type (gnu_index_type);
tree gnu_lb_field, gnu_hb_field;
tree gnu_min, gnu_max, gnu_high;
char field_name[16];
/* Update the maximum size of the array in elements. */
if (gnu_max_size)
gnu_max_size
= update_n_elem (gnu_max_size, gnu_orig_min, gnu_orig_max);
/* Now build the self-referential bounds of the index type. */
gnu_index_type = maybe_character_type (gnu_index_type);
gnu_index_base_type = maybe_character_type (gnu_index_base_type);
/* Make the FIELD_DECLs for the low and high bounds of this
type and then make extractions of these fields from the
template. */
sprintf (field_name, "LB%d", index);
gnu_lb_field = create_field_decl (get_identifier (field_name),
gnu_index_type,
gnu_template_type, NULL_TREE,
NULL_TREE, 0, 0);
Sloc_to_locus (Sloc (gnat_entity),
&DECL_SOURCE_LOCATION (gnu_lb_field));
field_name[0] = 'U';
gnu_hb_field = create_field_decl (get_identifier (field_name),
gnu_index_type,
gnu_template_type, NULL_TREE,
NULL_TREE, 0, 0);
Sloc_to_locus (Sloc (gnat_entity),
&DECL_SOURCE_LOCATION (gnu_hb_field));
gnu_temp_fields[index] = chainon (gnu_lb_field, gnu_hb_field);
/* We can't use build_component_ref here since the template type
isn't complete yet. */
if (!is_flb)
{
gnu_orig_min = build3 (COMPONENT_REF, TREE_TYPE (gnu_lb_field),
gnu_template_reference, gnu_lb_field,
NULL_TREE);
TREE_READONLY (gnu_orig_min) = 1;
}
gnu_orig_max = build3 (COMPONENT_REF, TREE_TYPE (gnu_hb_field),
gnu_template_reference, gnu_hb_field,
NULL_TREE);
TREE_READONLY (gnu_orig_max) = 1;
gnu_min = convert (sizetype, gnu_orig_min);
gnu_max = convert (sizetype, gnu_orig_max);
/* Compute the size of this dimension. See the E_Array_Subtype
case below for the rationale. */
if (is_flb
&& Nkind (gnat_index) == N_Subtype_Indication
&& flb_cannot_be_superflat (gnat_index))
gnu_high = gnu_max;
else
gnu_high
= build3 (COND_EXPR, sizetype,
build2 (GE_EXPR, boolean_type_node,
gnu_orig_max, gnu_orig_min),
gnu_max,
TREE_CODE (gnu_min) == INTEGER_CST
? int_const_binop (MINUS_EXPR, gnu_min, size_one_node)
: size_binop (MINUS_EXPR, gnu_min, size_one_node));
/* Make a range type with the new range in the Ada base type.
Then make an index type with the size range in sizetype. */
gnu_index_types[index]
= create_index_type (gnu_min, gnu_high,
create_range_type (gnu_index_base_type,
gnu_orig_min,
gnu_orig_max),
gnat_entity);
TYPE_NAME (gnu_index_types[index])
= create_concat_name (gnat_entity, field_name);
}
/* Install all the fields into the template. */
TYPE_NAME (gnu_template_type)
= create_concat_name (gnat_entity, "XUB");
gnu_template_fields = NULL_TREE;
for (index = 0; index < ndim; index++)
gnu_template_fields
= chainon (gnu_template_fields, gnu_temp_fields[index]);
finish_record_type (gnu_template_type, gnu_template_fields, 0,
debug_info_p);
TYPE_CONTEXT (gnu_template_type) = current_function_decl;
/* If Component_Size is not already specified, annotate it with the
size of the component. */
if (!Known_Component_Size (gnat_entity))
Set_Component_Size (gnat_entity,
annotate_value (TYPE_SIZE (comp_type)));
/* Compute the maximum size of the array in units. */
if (gnu_max_size)
gnu_max_size
= size_binop (MULT_EXPR, gnu_max_size, TYPE_SIZE_UNIT (comp_type));
/* Now build the array type. */
tem = comp_type;
for (index = ndim - 1; index >= 0; index--)
{
tem = build_nonshared_array_type (tem, gnu_index_types[index]);
TYPE_MULTI_ARRAY_P (tem) = (index > 0);
TYPE_CONVENTION_FORTRAN_P (tem) = convention_fortran_p;
if (index == ndim - 1 && Reverse_Storage_Order (gnat_entity))
set_reverse_storage_order_on_array_type (tem);
if (array_type_has_nonaliased_component (tem, gnat_entity))
set_nonaliased_component_on_array_type (tem);
}
/* If this is a packed type implemented specially, then process the
implementation type so it is elaborated in the proper scope. */
if (Present (PAT))
gnat_to_gnu_entity (PAT, NULL_TREE, false);
/* Otherwise, if an alignment is specified, use it if valid and, if
the alignment was requested with an explicit clause, state so. */
else if (Known_Alignment (gnat_entity))
{
SET_TYPE_ALIGN (tem,
validate_alignment (Alignment (gnat_entity),
gnat_entity,
TYPE_ALIGN (tem)));
if (Present (Alignment_Clause (gnat_entity)))
TYPE_USER_ALIGN (tem) = 1;
}
/* Tag top-level ARRAY_TYPE nodes for packed arrays and their
implementation types as such so that the debug information back-end
can output the appropriate description for them. */
TYPE_PACKED (tem)
= (Is_Packed (gnat_entity)
|| Is_Packed_Array_Impl_Type (gnat_entity));
if (Treat_As_Volatile (gnat_entity))
tem = change_qualified_type (tem, TYPE_QUAL_VOLATILE);
/* Adjust the type of the pointer-to-array field of the fat pointer
and record the aliasing relationships if necessary. If this is
a packed type implemented specially, then use a ref-all pointer
type since the implementation type may vary between constrained
subtypes and unconstrained base type. */
if (Present (PAT))
TREE_TYPE (TYPE_FIELDS (gnu_fat_type))
= build_pointer_type_for_mode (tem, ptr_mode, true);
else
TREE_TYPE (TYPE_FIELDS (gnu_fat_type)) = build_pointer_type (tem);
if (TYPE_ALIAS_SET_KNOWN_P (gnu_fat_type))
record_component_aliases (gnu_fat_type);
/* If the maximum size doesn't overflow, use it. */
if (gnu_max_size
&& TREE_CODE (gnu_max_size) == INTEGER_CST
&& !TREE_OVERFLOW (gnu_max_size)
&& compare_tree_int (gnu_max_size, TYPE_ARRAY_SIZE_LIMIT) <= 0)
TYPE_ARRAY_MAX_SIZE (tem) = gnu_max_size;
/* See the above description for the rationale. */
create_type_decl (create_concat_name (gnat_entity, "XUA"), tem,
artificial_p, debug_info_p, gnat_entity);
TYPE_CONTEXT (tem) = gnu_fat_type;
TYPE_CONTEXT (TYPE_POINTER_TO (tem)) = gnu_fat_type;
/* Create the type to be designated by thin pointers: a record type for
the array and its template. We used to shift the fields to have the
template at a negative offset, but this was somewhat of a kludge; we
now shift thin pointer values explicitly but only those which have a
TYPE_UNCONSTRAINED_ARRAY attached to the designated RECORD_TYPE.
If the GNAT encodings are used, give it a name. */
tree xut_name
= (gnat_encodings == DWARF_GNAT_ENCODINGS_ALL)
? create_concat_name (gnat_name, "XUT")
: gnu_entity_name;
obj = build_unc_object_type (gnu_template_type, tem, xut_name,
debug_info_p);
SET_TYPE_UNCONSTRAINED_ARRAY (obj, gnu_type);
TYPE_OBJECT_RECORD_TYPE (gnu_type) = obj;
/* The result type is an UNCONSTRAINED_ARRAY_TYPE that indicates the
corresponding fat pointer. */
TREE_TYPE (gnu_type) = gnu_fat_type;
TYPE_POINTER_TO (gnu_type) = gnu_fat_type;
TYPE_REFERENCE_TO (gnu_type) = gnu_fat_type;
SET_TYPE_MODE (gnu_type, BLKmode);
SET_TYPE_ALIGN (gnu_type, TYPE_ALIGN (tem));
}
break;
case E_Array_Subtype:
/* This is the actual data type for array variables. Multidimensional
arrays are implemented as arrays of arrays. Note that arrays which
have sparse enumeration subtypes as index components create sparse
arrays, which is obviously space inefficient but so much easier to
code for now.
Also note that the subtype never refers to the unconstrained array
type, which is somewhat at variance with Ada semantics.
First check to see if this is simply a renaming of the array type.
If so, the result is the array type. */
gnu_type = TYPE_MAIN_VARIANT (gnat_to_gnu_type (Etype (gnat_entity)));
if (!Is_Constrained (gnat_entity))
;
else
{
const Entity_Id PAT = Packed_Array_Impl_Type (gnat_entity);
Entity_Id gnat_index, gnat_base_index;
const bool convention_fortran_p
= (Convention (gnat_entity) == Convention_Fortran);
const int ndim = Number_Dimensions (gnat_entity);
tree gnu_base_type = gnu_type;
tree *gnu_index_types = XALLOCAVEC (tree, ndim);
tree gnu_max_size = size_one_node;
bool need_index_type_struct = false;
int index;
/* First create the GCC type for each index and find out whether
special types are needed for debugging information. */
for (index = (convention_fortran_p ? ndim - 1 : 0),
gnat_index = First_Index (gnat_entity),
gnat_base_index
= First_Index (Implementation_Base_Type (gnat_entity));
IN_RANGE (index, 0, ndim - 1);
index += (convention_fortran_p ? - 1 : 1),
gnat_index = Next_Index (gnat_index),
gnat_base_index = Next_Index (gnat_base_index))
{
tree gnu_index_type = get_unpadded_type (Etype (gnat_index));
tree gnu_orig_min = TYPE_MIN_VALUE (gnu_index_type);
tree gnu_orig_max = TYPE_MAX_VALUE (gnu_index_type);
tree gnu_index_base_type = get_base_type (gnu_index_type);
tree gnu_base_index_type
= get_unpadded_type (Etype (gnat_base_index));
tree gnu_base_orig_min = TYPE_MIN_VALUE (gnu_base_index_type);
tree gnu_base_orig_max = TYPE_MAX_VALUE (gnu_base_index_type);
tree gnu_min, gnu_max, gnu_high;
/* We try to create subtypes for discriminants used as bounds
that are more restrictive than those declared, by using the
bounds of the index type of the base array type. This will
make it possible to calculate the maximum size of the record
type more conservatively. This may have already been done by
the front-end (Exp_Ch3.Adjust_Discriminants), in which case
there will be a conversion that needs to be removed first. */
if (CONTAINS_PLACEHOLDER_P (gnu_orig_min)
&& TYPE_RM_SIZE (gnu_base_index_type)
&& tree_int_cst_lt (TYPE_RM_SIZE (gnu_base_index_type),
TYPE_RM_SIZE (gnu_index_type)))
{
gnu_orig_min = remove_conversions (gnu_orig_min, false);
TREE_TYPE (gnu_orig_min)
= create_extra_subtype (TREE_TYPE (gnu_orig_min),
gnu_base_orig_min,
gnu_base_orig_max);
}
if (CONTAINS_PLACEHOLDER_P (gnu_orig_max)
&& TYPE_RM_SIZE (gnu_base_index_type)
&& tree_int_cst_lt (TYPE_RM_SIZE (gnu_base_index_type),
TYPE_RM_SIZE (gnu_index_type)))
{
gnu_orig_max = remove_conversions (gnu_orig_max, false);
TREE_TYPE (gnu_orig_max)
= create_extra_subtype (TREE_TYPE (gnu_orig_max),
gnu_base_orig_min,
gnu_base_orig_max);
}
/* Update the maximum size of the array in elements. Here we
see if any constraint on the index type of the base type
can be used in the case of self-referential bounds on the
index type of the array type. We look for a non-"infinite"
and non-self-referential bound from any type involved and
handle each bound separately. */
if (gnu_max_size)
{
if (CONTAINS_PLACEHOLDER_P (gnu_orig_min))
gnu_min = gnu_base_orig_min;
else
gnu_min = gnu_orig_min;
if (TREE_CODE (gnu_min) != INTEGER_CST
|| TREE_OVERFLOW (gnu_min))
gnu_min = TYPE_MIN_VALUE (TREE_TYPE (gnu_min));
if (CONTAINS_PLACEHOLDER_P (gnu_orig_max))
gnu_max = gnu_base_orig_max;
else
gnu_max = gnu_orig_max;
if (TREE_CODE (gnu_max) != INTEGER_CST
|| TREE_OVERFLOW (gnu_max))
gnu_max = TYPE_MAX_VALUE (TREE_TYPE (gnu_max));
gnu_max_size
= update_n_elem (gnu_max_size, gnu_min, gnu_max);
}
/* Convert the bounds to the base type for consistency below. */
gnu_index_base_type = maybe_character_type (gnu_index_base_type);
gnu_orig_min = convert (gnu_index_base_type, gnu_orig_min);
gnu_orig_max = convert (gnu_index_base_type, gnu_orig_max);
gnu_min = convert (sizetype, gnu_orig_min);
gnu_max = convert (sizetype, gnu_orig_max);
/* See if the base array type is already flat. If it is, we
are probably compiling an ACATS test but it will cause the
code below to malfunction if we don't handle it specially. */
if (TREE_CODE (gnu_base_orig_min) == INTEGER_CST
&& TREE_CODE (gnu_base_orig_max) == INTEGER_CST
&& tree_int_cst_lt (gnu_base_orig_max, gnu_base_orig_min))
{
gnu_min = size_one_node;
gnu_max = size_zero_node;
gnu_high = gnu_max;
}
/* Similarly, if one of the values overflows in sizetype and the
range is null, use 1..0 for the sizetype bounds. */
else if (TREE_CODE (gnu_min) == INTEGER_CST
&& TREE_CODE (gnu_max) == INTEGER_CST
&& (TREE_OVERFLOW (gnu_min) || TREE_OVERFLOW (gnu_max))
&& tree_int_cst_lt (gnu_orig_max, gnu_orig_min))
{
gnu_min = size_one_node;
gnu_max = size_zero_node;
gnu_high = gnu_max;
}
/* If the minimum and maximum values both overflow in sizetype,
but the difference in the original type does not overflow in
sizetype, ignore the overflow indication. */
else if (TREE_CODE (gnu_min) == INTEGER_CST
&& TREE_CODE (gnu_max) == INTEGER_CST
&& TREE_OVERFLOW (gnu_min) && TREE_OVERFLOW (gnu_max)
&& !TREE_OVERFLOW
(convert (sizetype,
fold_build2 (MINUS_EXPR,
gnu_index_base_type,
gnu_orig_max,
gnu_orig_min))))
{
TREE_OVERFLOW (gnu_min) = 0;
TREE_OVERFLOW (gnu_max) = 0;
gnu_high = gnu_max;
}
/* Compute the size of this dimension in the general case. We
need to provide GCC with an upper bound to use but have to
deal with the "superflat" case. There are three ways to do
this. If we can prove that the array can never be superflat,
we can just use the high bound of the index type. */
else if ((Nkind (gnat_index) == N_Range
&& range_cannot_be_superflat (gnat_index))
/* Bit-Packed Array Impl. Types are never superflat. */
|| (Is_Packed_Array_Impl_Type (gnat_entity)
&& Is_Bit_Packed_Array
(Original_Array_Type (gnat_entity))))
gnu_high = gnu_max;
/* Otherwise, if the high bound is constant but the low bound is
not, we use the expression (hb >= lb) ? lb : hb + 1 for the
lower bound. Note that the comparison must be done in the
original type to avoid any overflow during the conversion. */
else if (TREE_CODE (gnu_max) == INTEGER_CST
&& TREE_CODE (gnu_min) != INTEGER_CST)
{
gnu_high = gnu_max;
gnu_min
= build_cond_expr (sizetype,
build_binary_op (GE_EXPR,
boolean_type_node,
gnu_orig_max,
gnu_orig_min),
gnu_min,
int_const_binop (PLUS_EXPR, gnu_max,
size_one_node));
}
/* Finally we use (hb >= lb) ? hb : lb - 1 for the upper bound
in all the other cases. Note that we use int_const_binop for
the shift by 1 if the bound is constant to avoid any unwanted
overflow. */
else
gnu_high
= build_cond_expr (sizetype,
build_binary_op (GE_EXPR,
boolean_type_node,
gnu_orig_max,
gnu_orig_min),
gnu_max,
TREE_CODE (gnu_min) == INTEGER_CST
? int_const_binop (MINUS_EXPR, gnu_min,
size_one_node)
: size_binop (MINUS_EXPR, gnu_min,
size_one_node));
/* Reuse the index type for the range type. Then make an index
type with the size range in sizetype. */
gnu_index_types[index]
= create_index_type (gnu_min, gnu_high, gnu_index_type,
gnat_entity);
/* We need special types for debugging information to point to
the index types if they have variable bounds, are not integer
types, are biased or are wider than sizetype. These are GNAT
encodings, so we have to include them only when all encodings
are requested. */
if ((TREE_CODE (gnu_orig_min) != INTEGER_CST
|| TREE_CODE (gnu_orig_max) != INTEGER_CST
|| TREE_CODE (gnu_index_type) != INTEGER_TYPE
|| (TREE_TYPE (gnu_index_type)
&& TREE_CODE (TREE_TYPE (gnu_index_type))
!= INTEGER_TYPE)
|| TYPE_BIASED_REPRESENTATION_P (gnu_index_type))
&& gnat_encodings == DWARF_GNAT_ENCODINGS_ALL)
need_index_type_struct = true;
}
/* Then flatten: create the array of arrays. For an array type
used to implement a packed array, get the component type from
the original array type since the representation clauses that
can affect it are on the latter. */
if (Is_Packed_Array_Impl_Type (gnat_entity)
&& !Is_Bit_Packed_Array (Original_Array_Type (gnat_entity)))
{
gnu_type = gnat_to_gnu_type (Original_Array_Type (gnat_entity));
for (index = ndim - 1; index >= 0; index--)
gnu_type = TREE_TYPE (gnu_type);
/* One of the above calls might have caused us to be elaborated,
so don't blow up if so. */
if (present_gnu_tree (gnat_entity))
{
maybe_present = true;
break;
}
}
else
{
gnu_type = gnat_to_gnu_component_type (gnat_entity, definition,
debug_info_p);
/* One of the above calls might have caused us to be elaborated,
so don't blow up if so. */
if (present_gnu_tree (gnat_entity))
{
maybe_present = true;
break;
}
}
/* Compute the maximum size of the array in units. */
if (gnu_max_size)
gnu_max_size
= size_binop (MULT_EXPR, gnu_max_size, TYPE_SIZE_UNIT (gnu_type));
/* Now build the array type. */
for (index = ndim - 1; index >= 0; index --)
{
gnu_type = build_nonshared_array_type (gnu_type,
gnu_index_types[index]);
TYPE_MULTI_ARRAY_P (gnu_type) = (index > 0);
TYPE_CONVENTION_FORTRAN_P (gnu_type) = convention_fortran_p;
if (index == ndim - 1 && Reverse_Storage_Order (gnat_entity))
set_reverse_storage_order_on_array_type (gnu_type);
if (array_type_has_nonaliased_component (gnu_type, gnat_entity))
set_nonaliased_component_on_array_type (gnu_type);
/* Kludge to remove the TREE_OVERFLOW flag for the sake of LTO
on maximally-sized array types designed by access types. */
if (integer_zerop (TYPE_SIZE (gnu_type))
&& TREE_OVERFLOW (TYPE_SIZE (gnu_type))
&& Is_Itype (gnat_entity)
&& (gnat_temp = Associated_Node_For_Itype (gnat_entity))
&& IN (Nkind (gnat_temp), N_Declaration)
&& Is_Access_Type (Defining_Entity (gnat_temp))
&& Is_Entity_Name (First_Index (gnat_entity))
&& UI_To_Int (RM_Size (Entity (First_Index (gnat_entity))))
== BITS_PER_WORD)
{
TYPE_SIZE (gnu_type) = bitsize_zero_node;
TYPE_SIZE_UNIT (gnu_type) = size_zero_node;
}
}
/* Attach the TYPE_STUB_DECL in case we have a parallel type. */
TYPE_STUB_DECL (gnu_type)
= create_type_stub_decl (gnu_entity_name, gnu_type);
/* If this is a multi-dimensional array and we are at global level,
we need to make a variable corresponding to the stride of the
inner dimensions. */
if (ndim > 1 && global_bindings_p ())
{
tree gnu_arr_type;
for (gnu_arr_type = TREE_TYPE (gnu_type), index = 1;
TREE_CODE (gnu_arr_type) == ARRAY_TYPE;
gnu_arr_type = TREE_TYPE (gnu_arr_type), index++)
{
tree eltype = TREE_TYPE (gnu_arr_type);
char stride_name[32];
sprintf (stride_name, "ST%d", index);
TYPE_SIZE (gnu_arr_type)
= elaborate_expression_1 (TYPE_SIZE (gnu_arr_type),
gnat_entity, stride_name,
definition, false);
/* ??? For now, store the size as a multiple of the
alignment of the element type in bytes so that we
can see the alignment from the tree. */
sprintf (stride_name, "ST%d_A_UNIT", index);
TYPE_SIZE_UNIT (gnu_arr_type)
= elaborate_expression_2 (TYPE_SIZE_UNIT (gnu_arr_type),
gnat_entity, stride_name,
definition, false,
TYPE_ALIGN (eltype));
/* ??? create_type_decl is not invoked on the inner types so
the MULT_EXPR node built above will never be marked. */
MARK_VISITED (TYPE_SIZE_UNIT (gnu_arr_type));
}
}
/* Set the TYPE_PACKED flag on packed array types and also on their
implementation types, so that the DWARF back-end can output the
appropriate description for them. */
TYPE_PACKED (gnu_type)
= (Is_Packed (gnat_entity)
|| Is_Packed_Array_Impl_Type (gnat_entity));
TYPE_BIT_PACKED_ARRAY_TYPE_P (gnu_type)
= (Is_Packed_Array_Impl_Type (gnat_entity)
&& Is_Bit_Packed_Array (Original_Array_Type (gnat_entity)));
/* If the maximum size doesn't overflow, use it. */
if (gnu_max_size
&& TREE_CODE (gnu_max_size) == INTEGER_CST
&& !TREE_OVERFLOW (gnu_max_size)
&& compare_tree_int (gnu_max_size, TYPE_ARRAY_SIZE_LIMIT) <= 0)
TYPE_ARRAY_MAX_SIZE (gnu_type) = gnu_max_size;
/* If we need to write out a record type giving the names of the
bounds for debugging purposes, do it now and make the record
type a parallel type. This is not needed for a packed array
since the bounds are conveyed by the original array type. */
if (need_index_type_struct
&& debug_info_p
&& !Is_Packed_Array_Impl_Type (gnat_entity))
{
tree gnu_bound_rec = make_node (RECORD_TYPE);
tree gnu_field_list = NULL_TREE;
tree gnu_field;
TYPE_NAME (gnu_bound_rec)
= create_concat_name (gnat_entity, "XA");
for (index = ndim - 1; index >= 0; index--)
{
tree gnu_index = TYPE_INDEX_TYPE (gnu_index_types[index]);
tree gnu_index_name = TYPE_IDENTIFIER (gnu_index);
/* Make sure to reference the types themselves, and not just
their names, as the debugger may fall back on them. */
gnu_field = create_field_decl (gnu_index_name, gnu_index,
gnu_bound_rec, NULL_TREE,
NULL_TREE, 0, 0);
DECL_CHAIN (gnu_field) = gnu_field_list;
gnu_field_list = gnu_field;
}
finish_record_type (gnu_bound_rec, gnu_field_list, 0, true);
add_parallel_type (gnu_type, gnu_bound_rec);
}
/* If this is a packed array type, make the original array type a
parallel/debug type. Otherwise, if GNAT encodings are used, do
it for the base array type if it is not artificial to make sure
that it is kept in the debug info. */
if (debug_info_p)
{
if (Is_Packed_Array_Impl_Type (gnat_entity))
{
tree gnu_name
= associate_original_type_to_packed_array (gnu_type,
gnat_entity);
if (gnu_name)
gnu_entity_name = gnu_name;
}
else if (gnat_encodings == DWARF_GNAT_ENCODINGS_ALL)
{
tree gnu_base_decl
= gnat_to_gnu_entity (Etype (gnat_entity), NULL_TREE,
false);
if (!DECL_ARTIFICIAL (gnu_base_decl))
add_parallel_type (gnu_type,
TREE_TYPE (TREE_TYPE (gnu_base_decl)));
}
}
/* Set our alias set to that of our base type. This gives all
array subtypes the same alias set. */
relate_alias_sets (gnu_type, gnu_base_type, ALIAS_SET_COPY);
/* If this is a packed type implemented specially, then replace our
type with the implementation type. */
if (Present (PAT))
{
/* First finish the type we had been making so that we output
debugging information for it. */
process_attributes (&gnu_type, &attr_list, false, gnat_entity);
if (Treat_As_Volatile (gnat_entity))
{
const int quals
= TYPE_QUAL_VOLATILE
| (Is_Full_Access (gnat_entity) ? TYPE_QUAL_ATOMIC : 0);
gnu_type = change_qualified_type (gnu_type, quals);
}
/* Make it artificial only if the base type was artificial too.
That's sort of "morally" true and will make it possible for
the debugger to look it up by name in DWARF, which is needed
in order to decode the packed array type. */
tree gnu_tmp_decl
= create_type_decl (gnu_entity_name, gnu_type,
!Comes_From_Source (Etype (gnat_entity))
&& artificial_p, debug_info_p,
gnat_entity);
/* Save it as our equivalent in case the call below elaborates
this type again. */
save_gnu_tree (gnat_entity, gnu_tmp_decl, false);
gnu_type = gnat_to_gnu_type (PAT);
save_gnu_tree (gnat_entity, NULL_TREE, false);
/* Set the ___XP suffix for GNAT encodings. */
if (gnat_encodings == DWARF_GNAT_ENCODINGS_ALL)
gnu_entity_name = DECL_NAME (TYPE_NAME (gnu_type));
tree gnu_inner = gnu_type;
while (TREE_CODE (gnu_inner) == RECORD_TYPE
&& (TYPE_JUSTIFIED_MODULAR_P (gnu_inner)
|| TYPE_PADDING_P (gnu_inner)))
gnu_inner = TREE_TYPE (TYPE_FIELDS (gnu_inner));
/* We need to attach the index type to the type we just made so
that the actual bounds can later be put into a template. */
if ((TREE_CODE (gnu_inner) == ARRAY_TYPE
&& !TYPE_ACTUAL_BOUNDS (gnu_inner))
|| (TREE_CODE (gnu_inner) == INTEGER_TYPE
&& !TYPE_HAS_ACTUAL_BOUNDS_P (gnu_inner)))
{
if (TREE_CODE (gnu_inner) == INTEGER_TYPE)
{
/* The TYPE_ACTUAL_BOUNDS field is overloaded with the
TYPE_MODULUS for modular types so we make an extra
subtype if necessary. */
if (TYPE_MODULAR_P (gnu_inner))
gnu_inner
= create_extra_subtype (gnu_inner,
TYPE_MIN_VALUE (gnu_inner),
TYPE_MAX_VALUE (gnu_inner));
TYPE_HAS_ACTUAL_BOUNDS_P (gnu_inner) = 1;
/* Check for other cases of overloading. */
gcc_checking_assert (!TYPE_ACTUAL_BOUNDS (gnu_inner));
}
for (Entity_Id gnat_index = First_Index (gnat_entity);
Present (gnat_index);
gnat_index = Next_Index (gnat_index))
SET_TYPE_ACTUAL_BOUNDS
(gnu_inner,
tree_cons (NULL_TREE,
get_unpadded_type (Etype (gnat_index)),
TYPE_ACTUAL_BOUNDS (gnu_inner)));
if (Convention (gnat_entity) != Convention_Fortran)
SET_TYPE_ACTUAL_BOUNDS
(gnu_inner, nreverse (TYPE_ACTUAL_BOUNDS (gnu_inner)));
if (TREE_CODE (gnu_type) == RECORD_TYPE
&& TYPE_JUSTIFIED_MODULAR_P (gnu_type))
TREE_TYPE (TYPE_FIELDS (gnu_type)) = gnu_inner;
}
}
}
break;
case E_String_Literal_Subtype:
/* Create the type for a string literal. */
{
Entity_Id gnat_full_type
= (Is_Private_Type (Etype (gnat_entity))
&& Present (Full_View (Etype (gnat_entity)))
? Full_View (Etype (gnat_entity)) : Etype (gnat_entity));
tree gnu_string_type = get_unpadded_type (gnat_full_type);
tree gnu_string_array_type
= TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_string_type))));
tree gnu_string_index_type
= get_base_type (TREE_TYPE (TYPE_INDEX_TYPE
(TYPE_DOMAIN (gnu_string_array_type))));
tree gnu_lower_bound
= convert (gnu_string_index_type,
gnat_to_gnu (String_Literal_Low_Bound (gnat_entity)));
tree gnu_length
= UI_To_gnu (String_Literal_Length (gnat_entity),
gnu_string_index_type);
tree gnu_upper_bound
= build_binary_op (PLUS_EXPR, gnu_string_index_type,
gnu_lower_bound,
int_const_binop (MINUS_EXPR, gnu_length,
convert (gnu_string_index_type,
integer_one_node)));
tree gnu_index_type
= create_index_type (convert (sizetype, gnu_lower_bound),
convert (sizetype, gnu_upper_bound),
create_range_type (gnu_string_index_type,
gnu_lower_bound,
gnu_upper_bound),
gnat_entity);
gnu_type
= build_nonshared_array_type (gnat_to_gnu_type
(Component_Type (gnat_entity)),
gnu_index_type);
if (array_type_has_nonaliased_component (gnu_type, gnat_entity))
set_nonaliased_component_on_array_type (gnu_type);
relate_alias_sets (gnu_type, gnu_string_type, ALIAS_SET_COPY);
}
break;
/* Record Types and Subtypes
A record type definition is transformed into the equivalent of a C
struct definition. The fields that are the discriminants which are
found in the Full_Type_Declaration node and the elements of the
Component_List found in the Record_Type_Definition node. The
Component_List can be a recursive structure since each Variant of
the Variant_Part of the Component_List has a Component_List.
Processing of a record type definition comprises starting the list of
field declarations here from the discriminants and the calling the
function components_to_record to add the rest of the fields from the
component list and return the gnu type node. The function
components_to_record will call itself recursively as it traverses
the tree. */
case E_Record_Type:
{
Node_Id record_definition = Type_Definition (gnat_decl);
if (Has_Complex_Representation (gnat_entity))
{
const Node_Id first_component
= First (Component_Items (Component_List (record_definition)));
tree gnu_component_type
= get_unpadded_type (Etype (Defining_Entity (first_component)));
gnu_type = build_complex_type (gnu_component_type);
break;
}
Node_Id gnat_constr;
Entity_Id gnat_field, gnat_parent_type;
tree gnu_field, gnu_field_list = NULL_TREE;
tree gnu_get_parent;
/* Set PACKED in keeping with gnat_to_gnu_field. */
const int packed
= Is_Packed (gnat_entity)
? 1
: Component_Alignment (gnat_entity) == Calign_Storage_Unit
? -1
: 0;
const bool has_align = Known_Alignment (gnat_entity);
const bool has_discr = Has_Discriminants (gnat_entity);
const bool is_extension
= (Is_Tagged_Type (gnat_entity)
&& Nkind (record_definition) == N_Derived_Type_Definition);
const bool has_rep
= is_extension
? Has_Record_Rep_Clause (gnat_entity)
: Has_Specified_Layout (gnat_entity);
const bool is_unchecked_union = Is_Unchecked_Union (gnat_entity);
bool all_rep = has_rep;
/* See if all fields have a rep clause. Stop when we find one
that doesn't. */
if (all_rep)
for (gnat_field = First_Entity (gnat_entity);
Present (gnat_field);
gnat_field = Next_Entity (gnat_field))
if ((Ekind (gnat_field) == E_Component
|| (Ekind (gnat_field) == E_Discriminant
&& !is_unchecked_union))
&& No (Component_Clause (gnat_field)))
{
all_rep = false;
break;
}
/* If this is a record extension, go a level further to find the
record definition. Also, verify we have a Parent_Subtype. */
if (is_extension)
{
if (!type_annotate_only
|| Present (Record_Extension_Part (record_definition)))
record_definition = Record_Extension_Part (record_definition);
gcc_assert (Present (Parent_Subtype (gnat_entity))
|| type_annotate_only);
}
/* Make a node for the record type. */
gnu_type = make_node (tree_code_for_record_type (gnat_entity));
TYPE_NAME (gnu_type) = gnu_entity_name;
TYPE_PACKED (gnu_type) = (packed != 0) || has_align || has_rep;
TYPE_REVERSE_STORAGE_ORDER (gnu_type)
= Reverse_Storage_Order (gnat_entity);
/* If the record type has discriminants, pointers to it may also point
to constrained subtypes of it, so mark it as may_alias for LTO. */
if (has_discr)
prepend_one_attribute
(&attr_list, ATTR_MACHINE_ATTRIBUTE,
get_identifier ("may_alias"), NULL_TREE,
<