blob: 39059cb69a456665ba18cdb707a7bf703b8a2a48 [file] [log] [blame]
/****************************************************************************
* *
* GNAT COMPILER COMPONENTS *
* *
* T R A N S *
* *
* C Implementation File *
* *
* Copyright (C) 1992-2021, Free Software Foundation, Inc. *
* *
* GNAT is free software; you can redistribute it and/or modify it under *
* terms of the GNU General Public License as published by the Free Soft- *
* ware Foundation; either version 3, or (at your option) any later ver- *
* sion. GNAT is distributed in the hope that it will be useful, but WITH- *
* OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY *
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License *
* for more details. You should have received a copy of the GNU General *
* Public License distributed with GNAT; see file COPYING3. If not see *
* <http://www.gnu.org/licenses/>. *
* *
* GNAT was originally developed by the GNAT team at New York University. *
* Extensive contributions were provided by Ada Core Technologies Inc. *
* *
****************************************************************************/
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "target.h"
#include "function.h"
#include "bitmap.h"
#include "tree.h"
#include "gimple-expr.h"
#include "stringpool.h"
#include "cgraph.h"
#include "predict.h"
#include "diagnostic.h"
#include "alias.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "stmt.h"
#include "varasm.h"
#include "output.h"
#include "debug.h"
#include "libfuncs.h" /* For set_stack_check_libfunc. */
#include "tree-iterator.h"
#include "gimplify.h"
#include "opts.h"
#include "common/common-target.h"
#include "gomp-constants.h"
#include "stringpool.h"
#include "attribs.h"
#include "tree-nested.h"
#include "ada.h"
#include "adadecode.h"
#include "types.h"
#include "atree.h"
#include "namet.h"
#include "nlists.h"
#include "snames.h"
#include "stringt.h"
#include "uintp.h"
#include "urealp.h"
#include "fe.h"
#include "sinfo.h"
#include "einfo.h"
#include "gadaint.h"
#include "ada-tree.h"
#include "gigi.h"
/* We should avoid allocating more than ALLOCA_THRESHOLD bytes via alloca,
for fear of running out of stack space. If we need more, we use xmalloc
instead. */
#define ALLOCA_THRESHOLD 1000
/* Pointers to front-end tables accessed through macros. */
Node_Header *Node_Offsets_Ptr;
any_slot *Slots_Ptr;
Node_Id *Next_Node_Ptr;
Node_Id *Prev_Node_Ptr;
struct Elist_Header *Elists_Ptr;
struct Elmt_Item *Elmts_Ptr;
struct String_Entry *Strings_Ptr;
Char_Code *String_Chars_Ptr;
struct List_Header *List_Headers_Ptr;
/* Highest number in the front-end node table. */
int max_gnat_nodes;
/* True when gigi is being called on an analyzed but unexpanded
tree, and the only purpose of the call is to properly annotate
types with representation information. */
bool type_annotate_only;
/* List of N_Validate_Unchecked_Conversion nodes in the unit. */
static vec<Node_Id> gnat_validate_uc_list;
/* List of expressions of pragma Compile_Time_{Error|Warning} in the unit. */
static vec<Node_Id> gnat_compile_time_expr_list;
/* When not optimizing, we cache the 'First, 'Last and 'Length attributes
of unconstrained array IN parameters to avoid emitting a great deal of
redundant instructions to recompute them each time. */
struct GTY (()) parm_attr_d {
int id; /* GTY doesn't like Entity_Id. */
int dim;
tree first;
tree last;
tree length;
};
typedef struct parm_attr_d *parm_attr;
/* Structure used to record information for a function. */
struct GTY(()) language_function {
vec<parm_attr, va_gc> *parm_attr_cache;
bitmap named_ret_val;
vec<tree, va_gc> *other_ret_val;
int gnat_ret;
};
#define f_parm_attr_cache \
DECL_STRUCT_FUNCTION (current_function_decl)->language->parm_attr_cache
#define f_named_ret_val \
DECL_STRUCT_FUNCTION (current_function_decl)->language->named_ret_val
#define f_other_ret_val \
DECL_STRUCT_FUNCTION (current_function_decl)->language->other_ret_val
#define f_gnat_ret \
DECL_STRUCT_FUNCTION (current_function_decl)->language->gnat_ret
/* A structure used to gather together information about a statement group.
We use this to gather related statements, for example the "then" part
of a IF. In the case where it represents a lexical scope, we may also
have a BLOCK node corresponding to it and/or cleanups. */
struct GTY((chain_next ("%h.previous"))) stmt_group {
struct stmt_group *previous; /* Previous code group. */
tree stmt_list; /* List of statements for this code group. */
tree block; /* BLOCK for this code group, if any. */
tree cleanups; /* Cleanups for this code group, if any. */
};
static GTY(()) struct stmt_group *current_stmt_group;
/* List of unused struct stmt_group nodes. */
static GTY((deletable)) struct stmt_group *stmt_group_free_list;
/* A structure used to record information on elaboration procedures
we've made and need to process.
??? gnat_node should be Node_Id, but gengtype gets confused. */
struct GTY((chain_next ("%h.next"))) elab_info {
struct elab_info *next; /* Pointer to next in chain. */
tree elab_proc; /* Elaboration procedure. */
int gnat_node; /* The N_Compilation_Unit. */
};
static GTY(()) struct elab_info *elab_info_list;
/* Stack of exception pointer variables. Each entry is the VAR_DECL
that stores the address of the raised exception. Nonzero means we
are in an exception handler. Not used in the zero-cost case. */
static GTY(()) vec<tree, va_gc> *gnu_except_ptr_stack;
/* In ZCX case, current exception pointer. Used to re-raise it. */
static GTY(()) tree gnu_incoming_exc_ptr;
/* Stack for storing the current elaboration procedure decl. */
static GTY(()) vec<tree, va_gc> *gnu_elab_proc_stack;
/* Stack of labels to be used as a goto target instead of a return in
some functions. See processing for N_Subprogram_Body. */
static GTY(()) vec<tree, va_gc> *gnu_return_label_stack;
/* Stack of variable for the return value of a function with copy-in/copy-out
parameters. See processing for N_Subprogram_Body. */
static GTY(()) vec<tree, va_gc> *gnu_return_var_stack;
/* Structure used to record information for a range check. */
struct GTY(()) range_check_info_d {
tree low_bound;
tree high_bound;
tree disp;
bool neg_p;
tree type;
tree invariant_cond;
tree inserted_cond;
};
typedef struct range_check_info_d *range_check_info;
/* Structure used to record information for a loop. */
struct GTY(()) loop_info_d {
tree fndecl;
tree stmt;
tree loop_var;
tree low_bound;
tree high_bound;
tree omp_loop_clauses;
tree omp_construct_clauses;
enum tree_code omp_code;
vec<range_check_info, va_gc> *checks;
vec<tree, va_gc> *invariants;
};
typedef struct loop_info_d *loop_info;
/* Stack of loop_info structures associated with LOOP_STMT nodes. */
static GTY(()) vec<loop_info, va_gc> *gnu_loop_stack;
/* The stacks for N_{Push,Pop}_*_Label. */
static vec<Entity_Id> gnu_constraint_error_label_stack;
static vec<Entity_Id> gnu_storage_error_label_stack;
static vec<Entity_Id> gnu_program_error_label_stack;
/* Map GNAT tree codes to GCC tree codes for simple expressions. */
static enum tree_code gnu_codes[Number_Node_Kinds];
static void init_code_table (void);
static tree get_elaboration_procedure (void);
static void Compilation_Unit_to_gnu (Node_Id);
static bool empty_stmt_list_p (tree);
static void record_code_position (Node_Id);
static void insert_code_for (Node_Id);
static void add_cleanup (tree, Node_Id);
static void add_stmt_list (List_Id);
static tree build_stmt_group (List_Id, bool);
static inline bool stmt_group_may_fallthru (void);
static enum gimplify_status gnat_gimplify_stmt (tree *);
static void elaborate_all_entities (Node_Id);
static void process_freeze_entity (Node_Id);
static void process_decls (List_Id, List_Id, Node_Id, bool, bool);
static tree emit_check (tree, tree, int, Node_Id);
static tree build_unary_op_trapv (enum tree_code, tree, tree, Node_Id);
static tree build_binary_op_trapv (enum tree_code, tree, tree, tree, Node_Id);
static tree convert_with_check (Entity_Id, tree, bool, bool, Node_Id);
static bool addressable_p (tree, tree);
static tree assoc_to_constructor (Entity_Id, Node_Id, tree);
static tree pos_to_constructor (Node_Id, tree);
static void validate_unchecked_conversion (Node_Id);
static void set_expr_location_from_node (tree, Node_Id, bool = false);
static void set_gnu_expr_location_from_node (tree, Node_Id);
static bool set_end_locus_from_node (tree, Node_Id);
static int lvalue_required_p (Node_Id, tree, bool, bool);
static tree build_raise_check (int, enum exception_info_kind);
static tree create_init_temporary (const char *, tree, tree *, Node_Id);
static bool maybe_make_gnu_thunk (Entity_Id gnat_thunk, tree gnu_thunk);
/* This makes gigi's file_info_ptr visible in this translation unit,
so that Sloc_to_locus can look it up when deciding whether to map
decls to instances. */
static struct File_Info_Type *file_map;
/* Return the string of the identifier allocated for the file name Id. */
static const char*
File_Name_to_gnu (Name_Id Id)
{
/* __gnat_to_canonical_file_spec translates file names from pragmas
Source_Reference that contain host style syntax not understood by GDB. */
const char *name = __gnat_to_canonical_file_spec (Get_Name_String (Id));
/* Use the identifier table to make a permanent copy of the file name as
the name table gets reallocated after Gigi returns but before all the
debugging information is output. */
return IDENTIFIER_POINTER (get_identifier (name));
}
/* This is the main program of the back-end. It sets up all the table
structures and then generates code. */
void
gigi (Node_Id gnat_root,
int max_gnat_node,
int number_name ATTRIBUTE_UNUSED,
Node_Header *node_offsets_ptr,
any_slot *slots_ptr,
Node_Id *next_node_ptr,
Node_Id *prev_node_ptr,
struct Elist_Header *elists_ptr,
struct Elmt_Item *elmts_ptr,
struct String_Entry *strings_ptr,
Char_Code *string_chars_ptr,
struct List_Header *list_headers_ptr,
Nat number_file,
struct File_Info_Type *file_info_ptr,
Entity_Id standard_boolean,
Entity_Id standard_integer,
Entity_Id standard_character,
Entity_Id standard_long_long_float,
Entity_Id standard_exception_type,
Int gigi_operating_mode)
{
Node_Id gnat_iter;
Entity_Id gnat_literal;
tree t, ftype, int64_type;
struct elab_info *info;
int i;
max_gnat_nodes = max_gnat_node;
Node_Offsets_Ptr = node_offsets_ptr;
Slots_Ptr = slots_ptr;
Next_Node_Ptr = next_node_ptr;
Prev_Node_Ptr = prev_node_ptr;
Elists_Ptr = elists_ptr;
Elmts_Ptr = elmts_ptr;
Strings_Ptr = strings_ptr;
String_Chars_Ptr = string_chars_ptr;
List_Headers_Ptr = list_headers_ptr;
type_annotate_only = (gigi_operating_mode == 1);
if (Generate_SCO_Instance_Table != 0)
{
file_map = file_info_ptr;
maybe_create_decl_to_instance_map (number_file);
}
for (i = 0; i < number_file; i++)
{
/* We rely on the order isomorphism between files and line maps. */
if ((int) LINEMAPS_ORDINARY_USED (line_table) != i)
{
gcc_assert (i > 0);
error ("%s contains too many lines",
File_Name_to_gnu (file_info_ptr[i - 1].File_Name));
}
/* We create the line map for a source file at once, with a fixed number
of columns chosen to avoid jumping over the next power of 2. */
linemap_add (line_table, LC_ENTER, 0,
File_Name_to_gnu (file_info_ptr[i].File_Name), 1);
linemap_line_start (line_table, file_info_ptr[i].Num_Source_Lines, 252);
linemap_position_for_column (line_table, 252 - 1);
linemap_add (line_table, LC_LEAVE, 0, NULL, 0);
}
gcc_assert (Nkind (gnat_root) == N_Compilation_Unit);
/* Declare the name of the compilation unit as the first global
name in order to make the middle-end fully deterministic. */
t = create_concat_name (Defining_Entity (Unit (gnat_root)), NULL);
first_global_object_name = ggc_strdup (IDENTIFIER_POINTER (t));
/* Initialize ourselves. */
init_code_table ();
init_gnat_decl ();
init_gnat_utils ();
/* If we are just annotating types, give VOID_TYPE zero sizes to avoid
errors. */
if (type_annotate_only)
{
TYPE_SIZE (void_type_node) = bitsize_zero_node;
TYPE_SIZE_UNIT (void_type_node) = size_zero_node;
}
/* Enable GNAT stack checking method if needed */
if (!Stack_Check_Probes_On_Target)
set_stack_check_libfunc ("_gnat_stack_check");
/* Retrieve alignment settings. */
double_float_alignment = get_target_double_float_alignment ();
double_scalar_alignment = get_target_double_scalar_alignment ();
/* Record the builtin types. Define `integer' and `character' first so that
dbx will output them first. */
record_builtin_type ("integer", integer_type_node, false);
record_builtin_type ("character", char_type_node, false);
record_builtin_type ("boolean", boolean_type_node, false);
record_builtin_type ("void", void_type_node, false);
/* Save the type we made for integer as the type for Standard.Integer. */
save_gnu_tree (Base_Type (standard_integer),
TYPE_NAME (integer_type_node),
false);
/* Likewise for character as the type for Standard.Character. */
finish_character_type (char_type_node);
save_gnu_tree (Base_Type (standard_character),
TYPE_NAME (char_type_node),
false);
/* Likewise for boolean as the type for Standard.Boolean. */
save_gnu_tree (Base_Type (standard_boolean),
TYPE_NAME (boolean_type_node),
false);
gnat_literal = First_Literal (Base_Type (standard_boolean));
t = UI_To_gnu (Enumeration_Rep (gnat_literal), boolean_type_node);
gcc_assert (t == boolean_false_node);
t = create_var_decl (get_entity_name (gnat_literal), NULL_TREE,
boolean_type_node, t, true, false, false, false, false,
true, false, NULL, gnat_literal);
save_gnu_tree (gnat_literal, t, false);
gnat_literal = Next_Literal (gnat_literal);
t = UI_To_gnu (Enumeration_Rep (gnat_literal), boolean_type_node);
gcc_assert (t == boolean_true_node);
t = create_var_decl (get_entity_name (gnat_literal), NULL_TREE,
boolean_type_node, t, true, false, false, false, false,
true, false, NULL, gnat_literal);
save_gnu_tree (gnat_literal, t, false);
/* Declare the building blocks of function nodes. */
void_list_node = build_tree_list (NULL_TREE, void_type_node);
void_ftype = build_function_type_list (void_type_node, NULL_TREE);
ptr_void_ftype = build_pointer_type (void_ftype);
/* Now declare run-time functions. */
malloc_decl
= create_subprog_decl (get_identifier ("__gnat_malloc"), NULL_TREE,
build_function_type_list (ptr_type_node, sizetype,
NULL_TREE),
NULL_TREE, is_default, true, true, true, false,
false, NULL, Empty);
DECL_IS_MALLOC (malloc_decl) = 1;
free_decl
= create_subprog_decl (get_identifier ("__gnat_free"), NULL_TREE,
build_function_type_list (void_type_node,
ptr_type_node, NULL_TREE),
NULL_TREE, is_default, true, true, true, false,
false, NULL, Empty);
realloc_decl
= create_subprog_decl (get_identifier ("__gnat_realloc"), NULL_TREE,
build_function_type_list (ptr_type_node,
ptr_type_node, sizetype,
NULL_TREE),
NULL_TREE, is_default, true, true, true, false,
false, NULL, Empty);
/* This is used for 64-bit multiplication with overflow checking. */
int64_type = gnat_type_for_size (64, 0);
mulv64_decl
= create_subprog_decl (get_identifier ("__gnat_mulv64"), NULL_TREE,
build_function_type_list (int64_type, int64_type,
int64_type, NULL_TREE),
NULL_TREE, is_default, true, true, true, false,
false, NULL, Empty);
if (Enable_128bit_Types)
{
tree int128_type = gnat_type_for_size (128, 0);
mulv128_decl
= create_subprog_decl (get_identifier ("__gnat_mulv128"), NULL_TREE,
build_function_type_list (int128_type,
int128_type,
int128_type,
NULL_TREE),
NULL_TREE, is_default, true, true, true, false,
false, NULL, Empty);
}
/* Name of the _Parent field in tagged record types. */
parent_name_id = get_identifier (Get_Name_String (Name_uParent));
/* Name of the Not_Handled_By_Others field in exception record types. */
not_handled_by_others_name_id = get_identifier ("not_handled_by_others");
/* Make the types and functions used for exception processing. */
except_type_node = gnat_to_gnu_type (Base_Type (standard_exception_type));
for (t = TYPE_FIELDS (except_type_node); t; t = DECL_CHAIN (t))
if (DECL_NAME (t) == not_handled_by_others_name_id)
{
not_handled_by_others_decl = t;
break;
}
gcc_assert (DECL_P (not_handled_by_others_decl));
jmpbuf_type
= build_array_type (gnat_type_for_mode (Pmode, 0),
build_index_type (size_int (5)));
record_builtin_type ("JMPBUF_T", jmpbuf_type, true);
jmpbuf_ptr_type = build_pointer_type (jmpbuf_type);
/* Functions to get and set the jumpbuf pointer for the current thread. */
get_jmpbuf_decl
= create_subprog_decl
(get_identifier ("system__soft_links__get_jmpbuf_address_soft"),
NULL_TREE, build_function_type_list (jmpbuf_ptr_type, NULL_TREE),
NULL_TREE, is_default, true, true, true, false, false, NULL, Empty);
set_jmpbuf_decl
= create_subprog_decl
(get_identifier ("system__soft_links__set_jmpbuf_address_soft"),
NULL_TREE, build_function_type_list (void_type_node, jmpbuf_ptr_type,
NULL_TREE),
NULL_TREE, is_default, true, true, true, false, false, NULL, Empty);
get_excptr_decl
= create_subprog_decl
(get_identifier ("system__soft_links__get_gnat_exception"), NULL_TREE,
build_function_type_list (build_pointer_type (except_type_node),
NULL_TREE),
NULL_TREE, is_default, true, true, true, false, false, NULL, Empty);
/* setjmp returns an integer and has one operand, which is a pointer to
a jmpbuf. */
setjmp_decl
= create_subprog_decl
(get_identifier ("__builtin_setjmp"), NULL_TREE,
build_function_type_list (integer_type_node, jmpbuf_ptr_type,
NULL_TREE),
NULL_TREE, is_default, true, true, true, false, false, NULL, Empty);
set_decl_built_in_function (setjmp_decl, BUILT_IN_NORMAL, BUILT_IN_SETJMP);
/* update_setjmp_buf updates a setjmp buffer from the current stack pointer
address. */
update_setjmp_buf_decl
= create_subprog_decl
(get_identifier ("__builtin_update_setjmp_buf"), NULL_TREE,
build_function_type_list (void_type_node, jmpbuf_ptr_type, NULL_TREE),
NULL_TREE, is_default, true, true, true, false, false, NULL, Empty);
set_decl_built_in_function (update_setjmp_buf_decl, BUILT_IN_NORMAL,
BUILT_IN_UPDATE_SETJMP_BUF);
/* Indicate that it never returns. */
ftype = build_function_type_list (void_type_node,
build_pointer_type (except_type_node),
NULL_TREE);
ftype = build_qualified_type (ftype, TYPE_QUAL_VOLATILE);
raise_nodefer_decl
= create_subprog_decl
(get_identifier ("__gnat_raise_nodefer_with_msg"), NULL_TREE, ftype,
NULL_TREE, is_default, true, true, true, false, false, NULL, Empty);
set_exception_parameter_decl
= create_subprog_decl
(get_identifier ("__gnat_set_exception_parameter"), NULL_TREE,
build_function_type_list (void_type_node, ptr_type_node, ptr_type_node,
NULL_TREE),
NULL_TREE, is_default, true, true, true, false, false, NULL, Empty);
/* Hooks to call when entering/leaving an exception handler. */
ftype = build_function_type_list (ptr_type_node,
ptr_type_node, NULL_TREE);
begin_handler_decl
= create_subprog_decl (get_identifier ("__gnat_begin_handler_v1"),
NULL_TREE, ftype, NULL_TREE,
is_default, true, true, true, false, false, NULL,
Empty);
/* __gnat_begin_handler_v1 is not a dummy procedure, but we arrange
for it not to throw. */
TREE_NOTHROW (begin_handler_decl) = 1;
ftype = build_function_type_list (ptr_type_node,
ptr_type_node, ptr_type_node,
ptr_type_node, NULL_TREE);
end_handler_decl
= create_subprog_decl (get_identifier ("__gnat_end_handler_v1"), NULL_TREE,
ftype, NULL_TREE,
is_default, true, true, true, false, false, NULL,
Empty);
ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
unhandled_except_decl
= create_subprog_decl (get_identifier ("__gnat_unhandled_except_handler"),
NULL_TREE, ftype, NULL_TREE,
is_default, true, true, true, false, false, NULL,
Empty);
/* Indicate that it never returns. */
ftype = build_qualified_type (ftype, TYPE_QUAL_VOLATILE);
reraise_zcx_decl
= create_subprog_decl (get_identifier ("__gnat_reraise_zcx"), NULL_TREE,
ftype, NULL_TREE,
is_default, true, true, true, false, false, NULL,
Empty);
/* Dummy objects to materialize "others" and "all others" in the exception
tables. These are exported by a-exexpr-gcc.adb, so see this unit for
the types to use. */
others_decl
= create_var_decl (get_identifier ("OTHERS"),
get_identifier ("__gnat_others_value"),
char_type_node, NULL_TREE,
true, false, true, false, false, true, false,
NULL, Empty);
all_others_decl
= create_var_decl (get_identifier ("ALL_OTHERS"),
get_identifier ("__gnat_all_others_value"),
char_type_node, NULL_TREE,
true, false, true, false, false, true, false,
NULL, Empty);
unhandled_others_decl
= create_var_decl (get_identifier ("UNHANDLED_OTHERS"),
get_identifier ("__gnat_unhandled_others_value"),
char_type_node, NULL_TREE,
true, false, true, false, false, true, false,
NULL, Empty);
/* If in no exception handlers mode, all raise statements are redirected to
__gnat_last_chance_handler. No need to redefine raise_nodefer_decl since
this procedure will never be called in this mode. */
if (No_Exception_Handlers_Set ())
{
/* Indicate that it never returns. */
ftype = build_function_type_list (void_type_node,
build_pointer_type (char_type_node),
integer_type_node, NULL_TREE);
ftype = build_qualified_type (ftype, TYPE_QUAL_VOLATILE);
tree decl
= create_subprog_decl
(get_identifier ("__gnat_last_chance_handler"), NULL_TREE, ftype,
NULL_TREE, is_default, true, true, true, false, false, NULL,
Empty);
for (i = 0; i < (int) ARRAY_SIZE (gnat_raise_decls); i++)
gnat_raise_decls[i] = decl;
}
else
{
/* Otherwise, make one decl for each exception reason. */
for (i = 0; i < (int) ARRAY_SIZE (gnat_raise_decls); i++)
gnat_raise_decls[i] = build_raise_check (i, exception_simple);
for (i = 0; i < (int) ARRAY_SIZE (gnat_raise_decls_ext); i++)
gnat_raise_decls_ext[i]
= build_raise_check (i,
i == CE_Index_Check_Failed
|| i == CE_Range_Check_Failed
|| i == CE_Invalid_Data
? exception_range : exception_column);
}
/* Build the special descriptor type and its null node if needed. */
if (TARGET_VTABLE_USES_DESCRIPTORS)
{
tree null_node = fold_convert (ptr_void_ftype, null_pointer_node);
tree field_list = NULL_TREE;
int j;
vec<constructor_elt, va_gc> *null_vec = NULL;
constructor_elt *elt;
fdesc_type_node = make_node (RECORD_TYPE);
vec_safe_grow (null_vec, TARGET_VTABLE_USES_DESCRIPTORS, true);
elt = (null_vec->address () + TARGET_VTABLE_USES_DESCRIPTORS - 1);
for (j = 0; j < TARGET_VTABLE_USES_DESCRIPTORS; j++)
{
tree field
= create_field_decl (NULL_TREE, ptr_void_ftype, fdesc_type_node,
NULL_TREE, NULL_TREE, 0, 1);
DECL_CHAIN (field) = field_list;
field_list = field;
elt->index = field;
elt->value = null_node;
elt--;
}
finish_record_type (fdesc_type_node, nreverse (field_list), 0, false);
record_builtin_type ("descriptor", fdesc_type_node, true);
null_fdesc_node = gnat_build_constructor (fdesc_type_node, null_vec);
}
longest_float_type_node
= get_unpadded_type (Base_Type (standard_long_long_float));
main_identifier_node = get_identifier ("main");
/* If we are using the GCC exception mechanism, let GCC know. */
if (Back_End_Exceptions ())
gnat_init_gcc_eh ();
/* Initialize the GCC support for FP operations. */
gnat_init_gcc_fp ();
/* Install the builtins we might need, either internally or as user-available
facilities for Intrinsic imports. Note that this must be done after the
GCC exception mechanism is initialized. */
gnat_install_builtins ();
vec_safe_push (gnu_except_ptr_stack, NULL_TREE);
gnu_constraint_error_label_stack.safe_push (Empty);
gnu_storage_error_label_stack.safe_push (Empty);
gnu_program_error_label_stack.safe_push (Empty);
/* Process any Pragma Ident for the main unit. */
if (Present (Ident_String (Main_Unit)))
targetm.asm_out.output_ident
(TREE_STRING_POINTER (gnat_to_gnu (Ident_String (Main_Unit))));
/* Force -fno-strict-aliasing if the configuration pragma was seen. */
if (No_Strict_Aliasing_CP)
flag_strict_aliasing = 0;
/* Save the current optimization options again after the above possible
global_options changes. */
optimization_default_node
= build_optimization_node (&global_options, &global_options_set);
optimization_current_node = optimization_default_node;
/* Now translate the compilation unit proper. */
Compilation_Unit_to_gnu (gnat_root);
/* Then process the N_Validate_Unchecked_Conversion nodes. We do this at
the very end to avoid having to second-guess the front-end when we run
into dummy nodes during the regular processing. */
for (i = 0; gnat_validate_uc_list.iterate (i, &gnat_iter); i++)
validate_unchecked_conversion (gnat_iter);
gnat_validate_uc_list.release ();
/* Finally see if we have any elaboration procedures to deal with. */
for (info = elab_info_list; info; info = info->next)
{
tree gnu_body = DECL_SAVED_TREE (info->elab_proc);
/* We should have a BIND_EXPR but it may not have any statements in it.
If it doesn't have any, we have nothing to do except for setting the
flag on the GNAT node. Otherwise, process the function as others. */
tree gnu_stmts = gnu_body;
if (TREE_CODE (gnu_stmts) == BIND_EXPR)
gnu_stmts = BIND_EXPR_BODY (gnu_stmts);
if (!gnu_stmts || empty_stmt_list_p (gnu_stmts))
Set_Has_No_Elaboration_Code (info->gnat_node, 1);
else
{
begin_subprog_body (info->elab_proc);
end_subprog_body (gnu_body);
rest_of_subprog_body_compilation (info->elab_proc);
}
}
/* Destroy ourselves. */
file_map = NULL;
destroy_gnat_decl ();
destroy_gnat_utils ();
/* We cannot track the location of errors past this point. */
Current_Error_Node = Empty;
}
/* Return a subprogram decl corresponding to __gnat_rcheck_xx for the given
CHECK if KIND is EXCEPTION_SIMPLE, or else to __gnat_rcheck_xx_ext. */
static tree
build_raise_check (int check, enum exception_info_kind kind)
{
tree result, ftype;
const char pfx[] = "__gnat_rcheck_";
strcpy (Name_Buffer, pfx);
Name_Len = sizeof (pfx) - 1;
Get_RT_Exception_Name ((enum RT_Exception_Code) check);
if (kind == exception_simple)
{
Name_Buffer[Name_Len] = 0;
ftype
= build_function_type_list (void_type_node,
build_pointer_type (char_type_node),
integer_type_node, NULL_TREE);
}
else
{
tree t = (kind == exception_column ? NULL_TREE : integer_type_node);
strcpy (Name_Buffer + Name_Len, "_ext");
Name_Buffer[Name_Len + 4] = 0;
ftype
= build_function_type_list (void_type_node,
build_pointer_type (char_type_node),
integer_type_node, integer_type_node,
t, t, NULL_TREE);
}
/* Indicate that it never returns. */
ftype = build_qualified_type (ftype, TYPE_QUAL_VOLATILE);
result
= create_subprog_decl (get_identifier (Name_Buffer), NULL_TREE, ftype,
NULL_TREE, is_default, true, true, true, false,
false, NULL, Empty);
return result;
}
/* Return a positive value if an lvalue is required for GNAT_NODE, which is
an N_Attribute_Reference. */
static int
lvalue_required_for_attribute_p (Node_Id gnat_node)
{
switch (Get_Attribute_Id (Attribute_Name (gnat_node)))
{
case Attr_Pred:
case Attr_Succ:
case Attr_First:
case Attr_Last:
case Attr_Range_Length:
case Attr_Length:
case Attr_Object_Size:
case Attr_Size:
case Attr_Value_Size:
case Attr_Component_Size:
case Attr_Descriptor_Size:
case Attr_Max_Size_In_Storage_Elements:
case Attr_Min:
case Attr_Max:
case Attr_Null_Parameter:
case Attr_Passed_By_Reference:
case Attr_Mechanism_Code:
case Attr_Machine:
case Attr_Model:
return 0;
case Attr_Address:
case Attr_Access:
case Attr_Unchecked_Access:
case Attr_Unrestricted_Access:
case Attr_Code_Address:
case Attr_Pool_Address:
case Attr_Alignment:
case Attr_Bit_Position:
case Attr_Position:
case Attr_First_Bit:
case Attr_Last_Bit:
case Attr_Bit:
case Attr_Asm_Input:
case Attr_Asm_Output:
default:
return 1;
}
}
/* Return a positive value if an lvalue is required for GNAT_NODE. GNU_TYPE
is the type that will be used for GNAT_NODE in the translated GNU tree.
CONSTANT indicates whether the underlying object represented by GNAT_NODE
is constant in the Ada sense. If it is, ADDRESS_OF_CONSTANT indicates
whether its value is the address of another constant. If it isn't, then
ADDRESS_OF_CONSTANT is ignored.
The function climbs up the GNAT tree starting from the node and returns 1
upon encountering a node that effectively requires an lvalue downstream.
It returns int instead of bool to facilitate usage in non-purely binary
logic contexts. */
static int
lvalue_required_p (Node_Id gnat_node, tree gnu_type, bool constant,
bool address_of_constant)
{
Node_Id gnat_parent = Parent (gnat_node), gnat_temp;
switch (Nkind (gnat_parent))
{
case N_Reference:
return 1;
case N_Attribute_Reference:
return lvalue_required_for_attribute_p (gnat_parent);
case N_Parameter_Association:
case N_Function_Call:
case N_Procedure_Call_Statement:
/* If the parameter is by reference, an lvalue is required. */
return (!constant
|| must_pass_by_ref (gnu_type)
|| default_pass_by_ref (gnu_type));
case N_Pragma_Argument_Association:
return lvalue_required_p (gnat_parent, gnu_type, constant,
address_of_constant);
case N_Pragma:
if (Is_Pragma_Name (Chars (Pragma_Identifier (gnat_parent))))
{
const unsigned char id
= Get_Pragma_Id (Chars (Pragma_Identifier (gnat_parent)));
return id == Pragma_Inspection_Point;
}
else
return 0;
case N_Indexed_Component:
/* Only the array expression can require an lvalue. */
if (Prefix (gnat_parent) != gnat_node)
return 0;
/* ??? Consider that referencing an indexed component with a variable
index forces the whole aggregate to memory. Note that testing only
for literals is conservative, any static expression in the RM sense
could probably be accepted with some additional work. */
for (gnat_temp = First (Expressions (gnat_parent));
Present (gnat_temp);
gnat_temp = Next (gnat_temp))
if (Nkind (gnat_temp) != N_Character_Literal
&& Nkind (gnat_temp) != N_Integer_Literal
&& !(Is_Entity_Name (gnat_temp)
&& Ekind (Entity (gnat_temp)) == E_Enumeration_Literal))
return 1;
/* ... fall through ... */
case N_Selected_Component:
case N_Slice:
/* Only the prefix expression can require an lvalue. */
if (Prefix (gnat_parent) != gnat_node)
return 0;
return lvalue_required_p (gnat_parent,
get_unpadded_type (Etype (gnat_parent)),
constant, address_of_constant);
case N_Object_Renaming_Declaration:
/* We need to preserve addresses through a renaming. */
return 1;
case N_Object_Declaration:
/* We cannot use a constructor if this is an atomic object because
the actual assignment might end up being done component-wise. */
return (!constant
||(Is_Composite_Type (Underlying_Type (Etype (gnat_node)))
&& Is_Full_Access (Defining_Entity (gnat_parent)))
/* We don't use a constructor if this is a class-wide object
because the effective type of the object is the equivalent
type of the class-wide subtype and it smashes most of the
data into an array of bytes to which we cannot convert. */
|| Ekind ((Etype (Defining_Entity (gnat_parent))))
== E_Class_Wide_Subtype);
case N_Assignment_Statement:
/* We cannot use a constructor if the LHS is an atomic object because
the actual assignment might end up being done component-wise. */
return (!constant
|| Name (gnat_parent) == gnat_node
|| (Is_Composite_Type (Underlying_Type (Etype (gnat_node)))
&& Is_Entity_Name (Name (gnat_parent))
&& Is_Full_Access (Entity (Name (gnat_parent)))));
case N_Unchecked_Type_Conversion:
if (!constant)
return 1;
/* ... fall through ... */
case N_Type_Conversion:
case N_Qualified_Expression:
/* We must look through all conversions because we may need to bypass
an intermediate conversion that is meant to be purely formal. */
return lvalue_required_p (gnat_parent,
get_unpadded_type (Etype (gnat_parent)),
constant, address_of_constant);
case N_Explicit_Dereference:
/* We look through dereferences for address of constant because we need
to handle the special cases listed above. */
if (constant && address_of_constant)
return lvalue_required_p (gnat_parent,
get_unpadded_type (Etype (gnat_parent)),
true, false);
/* ... fall through ... */
default:
return 0;
}
gcc_unreachable ();
}
/* Return true if an lvalue should be used for GNAT_NODE. GNU_TYPE is the type
that will be used for GNAT_NODE in the translated GNU tree and is assumed to
be an aggregate type.
The function climbs up the GNAT tree starting from the node and returns true
upon encountering a node that makes it doable to decide. lvalue_required_p
should have been previously invoked on the arguments and returned false. */
static bool
lvalue_for_aggregate_p (Node_Id gnat_node, tree gnu_type)
{
Node_Id gnat_parent = Parent (gnat_node);
switch (Nkind (gnat_parent))
{
case N_Parameter_Association:
case N_Function_Call:
case N_Procedure_Call_Statement:
/* Even if the parameter is by copy, prefer an lvalue. */
return true;
case N_Simple_Return_Statement:
/* Likewise for a return value. */
return true;
case N_Indexed_Component:
case N_Selected_Component:
/* If an elementary component is used, take it from the constant. */
if (!Is_Composite_Type (Underlying_Type (Etype (gnat_parent))))
return false;
/* ... fall through ... */
case N_Slice:
return lvalue_for_aggregate_p (gnat_parent,
get_unpadded_type (Etype (gnat_parent)));
case N_Object_Declaration:
/* For an aggregate object declaration, return false consistently. */
return false;
case N_Assignment_Statement:
/* For an aggregate assignment, decide based on the size. */
{
const HOST_WIDE_INT size = int_size_in_bytes (gnu_type);
return size < 0 || size >= param_large_stack_frame / 4;
}
case N_Unchecked_Type_Conversion:
case N_Type_Conversion:
case N_Qualified_Expression:
return lvalue_for_aggregate_p (gnat_parent,
get_unpadded_type (Etype (gnat_parent)));
case N_Allocator:
/* We should only reach here through the N_Qualified_Expression case.
Force an lvalue for aggregate types since a block-copy to the newly
allocated area of memory is made. */
return true;
default:
return false;
}
gcc_unreachable ();
}
/* Return true if T is a constant DECL node that can be safely replaced
by its initializer. */
static bool
constant_decl_with_initializer_p (tree t)
{
if (!TREE_CONSTANT (t) || !DECL_P (t) || !DECL_INITIAL (t))
return false;
/* Return false for aggregate types that contain a placeholder since
their initializers cannot be manipulated easily. */
if (AGGREGATE_TYPE_P (TREE_TYPE (t))
&& !TYPE_IS_FAT_POINTER_P (TREE_TYPE (t))
&& type_contains_placeholder_p (TREE_TYPE (t)))
return false;
return true;
}
/* Return an expression equivalent to EXP but where constant DECL nodes
have been replaced by their initializer. */
static tree
fold_constant_decl_in_expr (tree exp)
{
enum tree_code code = TREE_CODE (exp);
tree op0;
switch (code)
{
case CONST_DECL:
case VAR_DECL:
if (!constant_decl_with_initializer_p (exp))
return exp;
return DECL_INITIAL (exp);
case COMPONENT_REF:
op0 = fold_constant_decl_in_expr (TREE_OPERAND (exp, 0));
if (op0 == TREE_OPERAND (exp, 0))
return exp;
return fold_build3 (COMPONENT_REF, TREE_TYPE (exp), op0,
TREE_OPERAND (exp, 1), NULL_TREE);
case BIT_FIELD_REF:
op0 = fold_constant_decl_in_expr (TREE_OPERAND (exp, 0));
if (op0 == TREE_OPERAND (exp, 0))
return exp;
return fold_build3 (BIT_FIELD_REF, TREE_TYPE (exp), op0,
TREE_OPERAND (exp, 1), TREE_OPERAND (exp, 2));
case ARRAY_REF:
case ARRAY_RANGE_REF:
/* If the index is not itself constant, then nothing can be folded. */
if (!TREE_CONSTANT (TREE_OPERAND (exp, 1)))
return exp;
op0 = fold_constant_decl_in_expr (TREE_OPERAND (exp, 0));
if (op0 == TREE_OPERAND (exp, 0))
return exp;
return fold (build4 (code, TREE_TYPE (exp), op0, TREE_OPERAND (exp, 1),
TREE_OPERAND (exp, 2), NULL_TREE));
case REALPART_EXPR:
case IMAGPART_EXPR:
case VIEW_CONVERT_EXPR:
op0 = fold_constant_decl_in_expr (TREE_OPERAND (exp, 0));
if (op0 == TREE_OPERAND (exp, 0))
return exp;
return fold_build1 (code, TREE_TYPE (exp), op0);
default:
return exp;
}
gcc_unreachable ();
}
/* Return true if TYPE and DEF_TYPE are compatible GNAT types for Gigi. */
static bool
Gigi_Types_Compatible (Entity_Id type, Entity_Id def_type)
{
/* The trivial case. */
if (type == def_type)
return true;
/* A class-wide type is equivalent to a subtype of itself. */
if (Is_Class_Wide_Type (type))
return true;
/* A packed array type is compatible with its implementation type. */
if (Is_Packed (def_type) && type == Packed_Array_Impl_Type (def_type))
return true;
/* If both types are Itypes, one may be a copy of the other. */
if (Is_Itype (def_type) && Is_Itype (type))
return true;
/* If the type is incomplete and comes from a limited context, then also
consider its non-limited view. */
if (Is_Incomplete_Type (def_type)
&& From_Limited_With (def_type)
&& Present (Non_Limited_View (def_type)))
return Gigi_Types_Compatible (type, Non_Limited_View (def_type));
/* If the type is incomplete/private, then also consider its full view. */
if (Is_Incomplete_Or_Private_Type (def_type)
&& Present (Full_View (def_type)))
return Gigi_Types_Compatible (type, Full_View (def_type));
return false;
}
/* Subroutine of gnat_to_gnu to translate gnat_node, an N_Identifier,
to a GCC tree, which is returned. GNU_RESULT_TYPE_P is a pointer
to where we should place the result type. */
static tree
Identifier_to_gnu (Node_Id gnat_node, tree *gnu_result_type_p)
{
/* The entity of GNAT_NODE and its type. */
Node_Id gnat_entity = (Nkind (gnat_node) == N_Defining_Identifier
|| Nkind (gnat_node) == N_Defining_Operator_Symbol)
? gnat_node : Entity (gnat_node);
Node_Id gnat_entity_type = Etype (gnat_entity);
/* If GNAT_NODE is a constant, whether we should use the initialization
value instead of the constant entity, typically for scalars with an
address clause when the parent doesn't require an lvalue. */
bool use_constant_initializer = false;
/* Whether we should require an lvalue for GNAT_NODE. Needed in
specific circumstances only, so evaluated lazily. < 0 means
unknown, > 0 means known true, 0 means known false. */
int require_lvalue = -1;
Entity_Id gnat_result_type;
tree gnu_result, gnu_result_type;
/* If the Etype of this node is not the same as that of the Entity, then
something went wrong, probably in generic instantiation. However, this
does not apply to types. Since we sometime have strange Ekind's, just
do this test for objects, except for discriminants because their type
may have been changed to a subtype by Exp_Ch3.Adjust_Discriminants. */
gcc_assert (!Is_Object (gnat_entity)
|| Ekind (gnat_entity) == E_Discriminant
|| Etype (gnat_node) == gnat_entity_type
|| Gigi_Types_Compatible (Etype (gnat_node), gnat_entity_type));
/* If this is a reference to a deferred constant whose partial view is an
unconstrained private type, the proper type is on the full view of the
constant, not on the full view of the type, which may be unconstrained.
This may be a reference to a type, for example in the prefix of the
attribute Position, generated for dispatching code (see Make_DT in
exp_disp,adb). In that case we need the type itself, not is parent,
in particular if it is a derived type */
if (Ekind (gnat_entity) == E_Constant
&& Is_Private_Type (gnat_entity_type)
&& (Has_Unknown_Discriminants (gnat_entity_type)
|| (Present (Full_View (gnat_entity_type))
&& Has_Discriminants (Full_View (gnat_entity_type))))
&& Present (Full_View (gnat_entity)))
{
gnat_entity = Full_View (gnat_entity);
gnat_result_type = Etype (gnat_entity);
}
else
{
/* We use the Actual_Subtype only if it has already been elaborated,
as we may be invoked precisely during its elaboration, otherwise
the Etype. Avoid using it for packed arrays to simplify things,
except in a return statement because we need the actual size and
the front-end does not make it explicit in this case. */
if ((Ekind (gnat_entity) == E_Constant
|| Ekind (gnat_entity) == E_Variable
|| Is_Formal (gnat_entity))
&& !(Is_Array_Type (Etype (gnat_entity))
&& Present (Packed_Array_Impl_Type (Etype (gnat_entity)))
&& Nkind (Parent (gnat_node)) != N_Simple_Return_Statement)
&& Present (Actual_Subtype (gnat_entity))
&& present_gnu_tree (Actual_Subtype (gnat_entity)))
gnat_result_type = Actual_Subtype (gnat_entity);
else
gnat_result_type = Etype (gnat_node);
}
/* Expand the type of this identifier first, in case it is an enumeral
literal, which only get made when the type is expanded. There is no
order-of-elaboration issue here. */
gnu_result_type = get_unpadded_type (gnat_result_type);
/* If this is a non-imported elementary constant with an address clause,
retrieve the value instead of a pointer to be dereferenced unless
an lvalue is required. This is generally more efficient and actually
required if this is a static expression because it might be used
in a context where a dereference is inappropriate, such as a case
statement alternative or a record discriminant. There is no possible
volatile-ness short-circuit here since Volatile constants must be
imported per C.6. */
if (Ekind (gnat_entity) == E_Constant
&& Is_Elementary_Type (gnat_result_type)
&& !Is_Imported (gnat_entity)
&& Present (Address_Clause (gnat_entity)))
{
require_lvalue
= lvalue_required_p (gnat_node, gnu_result_type, true, false);
use_constant_initializer = !require_lvalue;
}
if (use_constant_initializer)
{
/* If this is a deferred constant, the initializer is attached to
the full view. */
if (Present (Full_View (gnat_entity)))
gnat_entity = Full_View (gnat_entity);
gnu_result = gnat_to_gnu (Expression (Declaration_Node (gnat_entity)));
}
else
gnu_result = gnat_to_gnu_entity (gnat_entity, NULL_TREE, false);
/* Some objects (such as parameters passed by reference, globals of
variable size, and renamed objects) actually represent the address
of the object. In that case, we must do the dereference. Likewise,
deal with parameters to foreign convention subprograms. */
if (DECL_P (gnu_result)
&& (DECL_BY_REF_P (gnu_result)
|| (TREE_CODE (gnu_result) == PARM_DECL
&& DECL_BY_COMPONENT_PTR_P (gnu_result))))
{
const bool read_only = DECL_POINTS_TO_READONLY_P (gnu_result);
/* If it's a PARM_DECL to foreign convention subprogram, convert it. */
if (TREE_CODE (gnu_result) == PARM_DECL
&& DECL_BY_COMPONENT_PTR_P (gnu_result))
gnu_result
= convert (build_pointer_type (gnu_result_type), gnu_result);
/* If it's a CONST_DECL, return the underlying constant like below. */
else if (TREE_CODE (gnu_result) == CONST_DECL
&& !(DECL_CONST_ADDRESS_P (gnu_result)
&& lvalue_required_p (gnat_node, gnu_result_type, true,
true)))
gnu_result = DECL_INITIAL (gnu_result);
/* Do the final dereference. */
gnu_result = build_unary_op (INDIRECT_REF, NULL_TREE, gnu_result);
if ((TREE_CODE (gnu_result) == INDIRECT_REF
|| TREE_CODE (gnu_result) == UNCONSTRAINED_ARRAY_REF)
&& No (Address_Clause (gnat_entity)))
TREE_THIS_NOTRAP (gnu_result) = 1;
if (read_only)
TREE_READONLY (gnu_result) = 1;
}
/* If we have a constant declaration and its initializer, try to return the
latter to avoid the need to call fold in lots of places and the need for
elaboration code if this identifier is used as an initializer itself. */
if (constant_decl_with_initializer_p (gnu_result))
{
bool constant_only = (TREE_CODE (gnu_result) == CONST_DECL
&& !DECL_CONST_CORRESPONDING_VAR (gnu_result));
bool address_of_constant = (TREE_CODE (gnu_result) == CONST_DECL
&& DECL_CONST_ADDRESS_P (gnu_result));
/* If there is a (corresponding) variable or this is the address of a
constant, we only want to return the initializer if an lvalue isn't
required. Evaluate this now if we have not already done so. */
if ((!constant_only || address_of_constant) && require_lvalue < 0)
require_lvalue
= lvalue_required_p (gnat_node, gnu_result_type, true,
address_of_constant)
|| (AGGREGATE_TYPE_P (gnu_result_type)
&& lvalue_for_aggregate_p (gnat_node, gnu_result_type));
/* Finally retrieve the initializer if this is deemed valid. */
if ((constant_only && !address_of_constant) || !require_lvalue)
gnu_result = DECL_INITIAL (gnu_result);
}
/* But for a constant renaming we couldn't do that incrementally for its
definition because of the need to return an lvalue so, if the present
context doesn't itself require an lvalue, we try again here. */
else if (Ekind (gnat_entity) == E_Constant
&& Is_Elementary_Type (gnat_result_type)
&& Present (Renamed_Object (gnat_entity)))
{
if (require_lvalue < 0)
require_lvalue
= lvalue_required_p (gnat_node, gnu_result_type, true, false);
if (!require_lvalue)
gnu_result = fold_constant_decl_in_expr (gnu_result);
}
/* The GNAT tree has the type of a function set to its result type, so we
adjust here. Also use the type of the result if the Etype is a subtype
that is nominally unconstrained. Likewise if this is a deferred constant
of a discriminated type whose full view can be elaborated statically, to
avoid problematic conversions to the nominal subtype. But remove any
padding from the resulting type. */
if (FUNC_OR_METHOD_TYPE_P (TREE_TYPE (gnu_result))
|| Is_Constr_Subt_For_UN_Aliased (gnat_result_type)
|| (Ekind (gnat_entity) == E_Constant
&& Present (Full_View (gnat_entity))
&& Has_Discriminants (gnat_result_type)
&& TREE_CODE (gnu_result) == CONSTRUCTOR))
{
gnu_result_type = TREE_TYPE (gnu_result);
if (TYPE_IS_PADDING_P (gnu_result_type))
gnu_result_type = TREE_TYPE (TYPE_FIELDS (gnu_result_type));
}
*gnu_result_type_p = gnu_result_type;
return gnu_result;
}
/* Subroutine of gnat_to_gnu to process gnat_node, an N_Pragma. Return
any statements we generate. */
static tree
Pragma_to_gnu (Node_Id gnat_node)
{
tree gnu_result = alloc_stmt_list ();
Node_Id gnat_temp;
/* Check for (and ignore) unrecognized pragmas. */
if (!Is_Pragma_Name (Chars (Pragma_Identifier (gnat_node))))
return gnu_result;
const unsigned char id
= Get_Pragma_Id (Chars (Pragma_Identifier (gnat_node)));
/* Save the expression of pragma Compile_Time_{Error|Warning} for later. */
if (id == Pragma_Compile_Time_Error || id == Pragma_Compile_Time_Warning)
{
gnat_temp = First (Pragma_Argument_Associations (gnat_node));
gnat_compile_time_expr_list.safe_push (Expression (gnat_temp));
return gnu_result;
}
/* Stop there if we are just annotating types. */
if (type_annotate_only)
return gnu_result;
switch (id)
{
case Pragma_Inspection_Point:
/* Do nothing at top level: all such variables are already viewable. */
if (global_bindings_p ())
break;
for (gnat_temp = First (Pragma_Argument_Associations (gnat_node));
Present (gnat_temp);
gnat_temp = Next (gnat_temp))
{
Node_Id gnat_expr = Expression (gnat_temp);
tree gnu_expr = gnat_to_gnu (gnat_expr);
tree asm_constraint = NULL_TREE;
#ifdef ASM_COMMENT_START
char *comment;
#endif
gnu_expr = maybe_unconstrained_array (gnu_expr);
if (TREE_CODE (gnu_expr) == CONST_DECL
&& DECL_CONST_CORRESPONDING_VAR (gnu_expr))
gnu_expr = DECL_CONST_CORRESPONDING_VAR (gnu_expr);
gnat_mark_addressable (gnu_expr);
#ifdef ASM_COMMENT_START
comment = concat (ASM_COMMENT_START,
" inspection point: ",
Get_Name_String (Chars (gnat_expr)),
" is at %0",
NULL);
asm_constraint = build_string (strlen (comment), comment);
free (comment);
#endif
gnu_expr = build5 (ASM_EXPR, void_type_node,
asm_constraint,
NULL_TREE,
tree_cons
(build_tree_list (NULL_TREE,
build_string (1, "m")),
gnu_expr, NULL_TREE),
NULL_TREE, NULL_TREE);
ASM_VOLATILE_P (gnu_expr) = 1;
set_expr_location_from_node (gnu_expr, gnat_node);
append_to_statement_list (gnu_expr, &gnu_result);
}
break;
case Pragma_Loop_Optimize:
for (gnat_temp = First (Pragma_Argument_Associations (gnat_node));
Present (gnat_temp);
gnat_temp = Next (gnat_temp))
{
tree gnu_loop_stmt = gnu_loop_stack->last ()->stmt;
switch (Chars (Expression (gnat_temp)))
{
case Name_Ivdep:
LOOP_STMT_IVDEP (gnu_loop_stmt) = 1;
break;
case Name_No_Unroll:
LOOP_STMT_NO_UNROLL (gnu_loop_stmt) = 1;
break;
case Name_Unroll:
LOOP_STMT_UNROLL (gnu_loop_stmt) = 1;
break;
case Name_No_Vector:
LOOP_STMT_NO_VECTOR (gnu_loop_stmt) = 1;
break;
case Name_Vector:
LOOP_STMT_VECTOR (gnu_loop_stmt) = 1;
break;
default:
gcc_unreachable ();
}
}
break;
case Pragma_Optimize:
switch (Chars (Expression
(First (Pragma_Argument_Associations (gnat_node)))))
{
case Name_Off:
if (optimize)
post_error ("must specify -O0??", gnat_node);
break;
case Name_Space:
if (!optimize_size)
post_error ("must specify -Os??", gnat_node);
break;
case Name_Time:
if (!optimize)
post_error ("insufficient -O value??", gnat_node);
break;
default:
gcc_unreachable ();
}
break;
case Pragma_Reviewable:
if (write_symbols == NO_DEBUG)
post_error ("must specify -g??", gnat_node);
break;
case Pragma_Warning_As_Error:
case Pragma_Warnings:
{
Node_Id gnat_expr;
/* Preserve the location of the pragma. */
const location_t location = input_location;
struct cl_option_handlers handlers;
unsigned int option_index;
diagnostic_t kind;
bool imply;
gnat_temp = First (Pragma_Argument_Associations (gnat_node));
/* This is the String form: pragma Warning{s|_As_Error}(String). */
if (Nkind (Expression (gnat_temp)) == N_String_Literal)
{
switch (id)
{
case Pragma_Warning_As_Error:
kind = DK_ERROR;
imply = false;
break;
case Pragma_Warnings:
kind = DK_WARNING;
imply = true;
break;
default:
gcc_unreachable ();
}
gnat_expr = Expression (gnat_temp);
}
/* This is the On/Off form: pragma Warnings (On | Off [,String]). */
else if (Nkind (Expression (gnat_temp)) == N_Identifier)
{
switch (Chars (Expression (gnat_temp)))
{
case Name_Off:
kind = DK_IGNORED;
break;
case Name_On:
kind = DK_WARNING;
break;
default:
gcc_unreachable ();
}
/* Deal with optional pattern (but ignore Reason => "..."). */
if (Present (Next (gnat_temp))
&& Chars (Next (gnat_temp)) != Name_Reason)
{
/* pragma Warnings (On | Off, Name) is handled differently. */
if (Nkind (Expression (Next (gnat_temp))) != N_String_Literal)
break;
gnat_expr = Expression (Next (gnat_temp));
}
else
{
gnat_expr = Empty;
/* For pragma Warnings (Off), we save the current state... */
if (kind == DK_IGNORED)
diagnostic_push_diagnostics (global_dc, location);
/* ...so that, for pragma Warnings (On), we do not enable all
the warnings but just restore the previous state. */
else
{
diagnostic_pop_diagnostics (global_dc, location);
break;
}
}
imply = false;
}
else
gcc_unreachable ();
/* This is the same implementation as in the C family of compilers. */
const unsigned int lang_mask = CL_Ada | CL_COMMON;
const char *arg = NULL;
if (Present (gnat_expr))
{
tree gnu_expr = gnat_to_gnu (gnat_expr);
const char *option_string = TREE_STRING_POINTER (gnu_expr);
const int len = TREE_STRING_LENGTH (gnu_expr);
if (len < 3 || option_string[0] != '-' || option_string[1] != 'W')
break;
option_index = find_opt (option_string + 1, lang_mask);
if (option_index == OPT_SPECIAL_unknown)
{
post_error ("unknown -W switch??", gnat_node);
break;
}
else if (!(cl_options[option_index].flags & CL_WARNING))
{
post_error ("-W switch does not control warning??", gnat_node);
break;
}
else if (!(cl_options[option_index].flags & lang_mask))
{
post_error ("-W switch not valid for Ada??", gnat_node);
break;
}
if (cl_options[option_index].flags & CL_JOINED)
arg = option_string + 1 + cl_options[option_index].opt_len;
}
else
option_index = 0;
set_default_handlers (&handlers, NULL);
control_warning_option (option_index, (int) kind, arg, imply, location,
lang_mask, &handlers, &global_options,
&global_options_set, global_dc);
}
break;
default:
break;
}
return gnu_result;
}
/* Check the inline status of nested function FNDECL wrt its parent function.
If a non-inline nested function is referenced from an inline external
function, we cannot honor both requests at the same time without cloning
the nested function in the current unit since it is private to its unit.
We could inline it as well but it's probably better to err on the side
of too little inlining.
This must be done only on nested functions present in the source code
and not on nested functions generated by the compiler, e.g. finalizers,
because they may be not marked inline and we don't want them to block
the inlining of the parent function. */
static void
check_inlining_for_nested_subprog (tree fndecl)
{
if (DECL_IGNORED_P (current_function_decl) || DECL_IGNORED_P (fndecl))
return;
if (DECL_DECLARED_INLINE_P (fndecl))
return;
tree parent_decl = decl_function_context (fndecl);
if (DECL_EXTERNAL (parent_decl) && DECL_DECLARED_INLINE_P (parent_decl))
{
const location_t loc1 = DECL_SOURCE_LOCATION (fndecl);
const location_t loc2 = DECL_SOURCE_LOCATION (parent_decl);
if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (parent_decl)))
{
error_at (loc1, "subprogram %q+F not marked %<Inline_Always%>",
fndecl);
error_at (loc2, "parent subprogram cannot be inlined");
}
else
{
warning_at (loc1, OPT_Winline, "subprogram %q+F not marked %<Inline%>",
fndecl);
warning_at (loc2, OPT_Winline, "parent subprogram cannot be inlined");
}
DECL_DECLARED_INLINE_P (parent_decl) = 0;
DECL_UNINLINABLE (parent_decl) = 1;
}
}
/* Return an expression for the length of TYPE, an integral type, computed in
RESULT_TYPE, another integral type.
We used to compute the length as MAX (hb - lb + 1, 0) which could overflow
when lb == TYPE'First. We now compute it as (hb >= lb) ? hb - lb + 1 : 0
which would only overflow in much rarer cases, for extremely large arrays
we expect never to encounter in practice. Besides, the former computation
required the use of potentially constraining signed arithmetics while the
latter does not. Note that the comparison must be done in the original
base index type in order to avoid any overflow during the conversion. */
static tree
get_type_length (tree type, tree result_type)
{
tree comp_type = get_base_type (result_type);
tree base_type = maybe_character_type (get_base_type (type));
tree lb = convert (base_type, TYPE_MIN_VALUE (type));
tree hb = convert (base_type, TYPE_MAX_VALUE (type));
tree length
= build_binary_op (PLUS_EXPR, comp_type,
build_binary_op (MINUS_EXPR, comp_type,
convert (comp_type, hb),
convert (comp_type, lb)),
build_int_cst (comp_type, 1));
length
= build_cond_expr (result_type,
build_binary_op (GE_EXPR, boolean_type_node, hb, lb),
convert (result_type, length),
build_int_cst (result_type, 0));
return length;
}
/* Subroutine of gnat_to_gnu to translate GNAT_NODE, an N_Attribute node,
to a GCC tree, which is returned. GNU_RESULT_TYPE_P is a pointer to
where we should place the result type. ATTRIBUTE is the attribute ID. */
static tree
Attribute_to_gnu (Node_Id gnat_node, tree *gnu_result_type_p, int attribute)
{
const Node_Id gnat_prefix = Prefix (gnat_node);
tree gnu_prefix = gnat_to_gnu (gnat_prefix);
tree gnu_type = TREE_TYPE (gnu_prefix);
tree gnu_expr, gnu_result_type, gnu_result = error_mark_node;
bool prefix_unused = false;
/* If the input is a NULL_EXPR, make a new one. */
if (TREE_CODE (gnu_prefix) == NULL_EXPR)
{
gnu_result_type = get_unpadded_type (Etype (gnat_node));
*gnu_result_type_p = gnu_result_type;
return build1 (NULL_EXPR, gnu_result_type, TREE_OPERAND (gnu_prefix, 0));
}
switch (attribute)
{
case Attr_Pred:
case Attr_Succ:
/* These just add or subtract the constant 1 since representation
clauses for enumeration types are handled in the front-end. */
gnu_expr = gnat_to_gnu (First (Expressions (gnat_node)));
gnu_result_type = get_unpadded_type (Etype (gnat_node));
gnu_type = maybe_character_type (gnu_result_type);
if (TREE_TYPE (gnu_expr) != gnu_type)
gnu_expr = convert (gnu_type, gnu_expr);
gnu_result
= build_binary_op (attribute == Attr_Pred ? MINUS_EXPR : PLUS_EXPR,
gnu_type, gnu_expr, build_int_cst (gnu_type, 1));
break;
case Attr_Address:
case Attr_Unrestricted_Access:
/* Conversions don't change the address of references but can cause
build_unary_op to miss the references below, so strip them off.
On the contrary, if the address-of operation causes a temporary
to be created, then it must be created with the proper type. */
gnu_expr = remove_conversions (gnu_prefix,
!Must_Be_Byte_Aligned (gnat_node));
if (REFERENCE_CLASS_P (gnu_expr))
gnu_prefix = gnu_expr;
/* If we are taking 'Address of an unconstrained object, this is the
pointer to the underlying array. */
if (attribute == Attr_Address)
gnu_prefix = maybe_unconstrained_array (gnu_prefix);
/* If we are building a static dispatch table, we have to honor
TARGET_VTABLE_USES_DESCRIPTORS if we want to be compatible
with the C++ ABI. We do it in the non-static case as well,
see gnat_to_gnu_entity, case E_Access_Subprogram_Type. */
else if (TARGET_VTABLE_USES_DESCRIPTORS
&& Is_Dispatch_Table_Entity (Etype (gnat_node)))
{
tree gnu_field, t;
/* Descriptors can only be built here for top-level functions. */
bool build_descriptor = (global_bindings_p () != 0);
int i;
vec<constructor_elt, va_gc> *gnu_vec = NULL;
constructor_elt *elt;
gnu_result_type = get_unpadded_type (Etype (gnat_node));
/* If we're not going to build the descriptor, we have to retrieve
the one which will be built by the linker (or by the compiler
later if a static chain is requested). */
if (!build_descriptor)
{
gnu_result = build_unary_op (ADDR_EXPR, NULL_TREE, gnu_prefix);
gnu_result = fold_convert (build_pointer_type (gnu_result_type),
gnu_result);
gnu_result = build1 (INDIRECT_REF, gnu_result_type, gnu_result);
}
vec_safe_grow (gnu_vec, TARGET_VTABLE_USES_DESCRIPTORS, true);
elt = (gnu_vec->address () + TARGET_VTABLE_USES_DESCRIPTORS - 1);
for (gnu_field = TYPE_FIELDS (gnu_result_type), i = 0;
i < TARGET_VTABLE_USES_DESCRIPTORS;
gnu_field = DECL_CHAIN (gnu_field), i++)
{
if (build_descriptor)
{
t = build2 (FDESC_EXPR, TREE_TYPE (gnu_field), gnu_prefix,
build_int_cst (NULL_TREE, i));
TREE_CONSTANT (t) = 1;
}
else
t = build3 (COMPONENT_REF, ptr_void_ftype, gnu_result,
gnu_field, NULL_TREE);
elt->index = gnu_field;
elt->value = t;
elt--;
}
gnu_result = gnat_build_constructor (gnu_result_type, gnu_vec);
break;
}
/* ... fall through ... */
case Attr_Access:
case Attr_Unchecked_Access:
case Attr_Code_Address:
/* Taking the address of a type does not make sense. */
gcc_assert (TREE_CODE (gnu_prefix) != TYPE_DECL);
gnu_result_type = get_unpadded_type (Etype (gnat_node));
gnu_result
= build_unary_op (((attribute == Attr_Address
|| attribute == Attr_Unrestricted_Access)
&& !Must_Be_Byte_Aligned (gnat_node))
? ATTR_ADDR_EXPR : ADDR_EXPR,
gnu_result_type, gnu_prefix);
/* For 'Code_Address, find an inner ADDR_EXPR and mark it so that we
don't try to build a trampoline. */
if (attribute == Attr_Code_Address)
{
gnu_expr = remove_conversions (gnu_result, false);
if (TREE_CODE (gnu_expr) == ADDR_EXPR)
TREE_NO_TRAMPOLINE (gnu_expr) = TREE_CONSTANT (gnu_expr) = 1;
/* On targets for which function symbols denote a descriptor, the
code address is stored within the first slot of the descriptor
so we do an additional dereference:
result = *((result_type *) result)
where we expect result to be of some pointer type already. */
if (targetm.calls.custom_function_descriptors == 0)
gnu_result
= build_unary_op (INDIRECT_REF, NULL_TREE,
convert (build_pointer_type (gnu_result_type),
gnu_result));
}
/* For 'Access, issue an error message if the prefix is a C++ method
since it can use a special calling convention on some platforms,
which cannot be propagated to the access type. */
else if (attribute == Attr_Access
&& TREE_CODE (TREE_TYPE (gnu_prefix)) == METHOD_TYPE)
post_error ("access to C++ constructor or member function not allowed",
gnat_node);
/* For other address attributes applied to a nested function,
find an inner ADDR_EXPR and annotate it so that we can issue
a useful warning with -Wtrampolines. */
else if (FUNC_OR_METHOD_TYPE_P (TREE_TYPE (gnu_prefix))
&& (gnu_expr = remove_conversions (gnu_result, false))
&& TREE_CODE (gnu_expr) == ADDR_EXPR
&& decl_function_context (TREE_OPERAND (gnu_expr, 0)))
{
set_expr_location_from_node (gnu_expr, gnat_node);
/* Also check the inlining status. */
check_inlining_for_nested_subprog (TREE_OPERAND (gnu_expr, 0));
/* Moreover, for 'Access or 'Unrestricted_Access with non-
foreign-compatible representation, mark the ADDR_EXPR so
that we can build a descriptor instead of a trampoline. */
if ((attribute == Attr_Access
|| attribute == Attr_Unrestricted_Access)
&& targetm.calls.custom_function_descriptors > 0
&& Can_Use_Internal_Rep (Underlying_Type (Etype (gnat_node))))
FUNC_ADDR_BY_DESCRIPTOR (gnu_expr) = 1;
/* Otherwise, we need to check that we are not violating the
No_Implicit_Dynamic_Code restriction. */
else if (targetm.calls.custom_function_descriptors != 0)
Check_Implicit_Dynamic_Code_Allowed (gnat_node);
}
break;
case Attr_Pool_Address:
{
tree gnu_ptr = gnu_prefix;
tree gnu_obj_type;
gnu_result_type = get_unpadded_type (Etype (gnat_node));
/* If this is fat pointer, the object must have been allocated with the
template in front of the array. So compute the template address; do
it by converting to a thin pointer. */
if (TYPE_IS_FAT_POINTER_P (TREE_TYPE (gnu_ptr)))
gnu_ptr
= convert (build_pointer_type
(TYPE_OBJECT_RECORD_TYPE
(TYPE_UNCONSTRAINED_ARRAY (TREE_TYPE (gnu_ptr)))),
gnu_ptr);
gnu_obj_type = TREE_TYPE (TREE_TYPE (gnu_ptr));
/* If this is a thin pointer, the object must have been allocated with
the template in front of the array. So compute the template address
and return it. */
if (TYPE_IS_THIN_POINTER_P (TREE_TYPE (gnu_ptr)))
gnu_ptr
= build_binary_op (POINTER_PLUS_EXPR, TREE_TYPE (gnu_ptr),
gnu_ptr,
fold_build1 (NEGATE_EXPR, sizetype,
byte_position
(DECL_CHAIN
TYPE_FIELDS ((gnu_obj_type)))));
gnu_result = convert (gnu_result_type, gnu_ptr);
}
break;
case Attr_Size:
case Attr_Object_Size:
case Attr_Value_Size:
case Attr_Max_Size_In_Storage_Elements:
/* Strip NOPs, conversions between original and packable versions, and
unpadding from GNU_PREFIX. Note that we cannot simply strip every
VIEW_CONVERT_EXPR because some of them may give the actual size, e.g.
for nominally unconstrained packed array. We use GNU_EXPR to see
if a COMPONENT_REF was involved. */
while (CONVERT_EXPR_P (gnu_prefix)
|| TREE_CODE (gnu_prefix) == NON_LVALUE_EXPR
|| (TREE_CODE (gnu_prefix) == VIEW_CONVERT_EXPR
&& TREE_CODE (TREE_TYPE (gnu_prefix)) == RECORD_TYPE
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (gnu_prefix, 0)))
== RECORD_TYPE
&& TYPE_NAME (TREE_TYPE (gnu_prefix))
== TYPE_NAME (TREE_TYPE (TREE_OPERAND (gnu_prefix, 0)))))
gnu_prefix = TREE_OPERAND (gnu_prefix, 0);
gnu_expr = gnu_prefix;
if (TREE_CODE (gnu_prefix) == COMPONENT_REF
&& TYPE_IS_PADDING_P (TREE_TYPE (TREE_OPERAND (gnu_prefix, 0))))
gnu_prefix = TREE_OPERAND (gnu_prefix, 0);
prefix_unused = true;
gnu_type = TREE_TYPE (gnu_prefix);
/* Replace an unconstrained array type with the type of the underlying
array, except for 'Max_Size_In_Storage_Elements because we need to
return the (maximum) size requested for an allocator. */
if (TREE_CODE (gnu_type) == UNCONSTRAINED_ARRAY_TYPE)
{
gnu_type = TYPE_OBJECT_RECORD_TYPE (gnu_type);
if (attribute != Attr_Max_Size_In_Storage_Elements)
gnu_type = TREE_TYPE (DECL_CHAIN (TYPE_FIELDS (gnu_type)));
}
/* The type must be frozen at this point. */
gcc_assert (COMPLETE_TYPE_P (gnu_type));
/* If we're looking for the size of a field, return the field size. */
if (TREE_CODE (gnu_prefix) == COMPONENT_REF)
gnu_result = DECL_SIZE (TREE_OPERAND (gnu_prefix, 1));
/* Otherwise, if the prefix is an object, or if we are looking for
'Object_Size or 'Max_Size_In_Storage_Elements, the result is the
GCC size of the type. We make an exception for padded objects,
as we do not take into account alignment promotions for the size.
This is in keeping with the object case of gnat_to_gnu_entity. */
else if ((TREE_CODE (gnu_prefix) != TYPE_DECL
&& !(TYPE_IS_PADDING_P (gnu_type)
&& TREE_CODE (gnu_expr) == COMPONENT_REF
&& pad_type_has_rm_size (gnu_type)))
|| attribute == Attr_Object_Size
|| attribute == Attr_Max_Size_In_Storage_Elements)
{
/* If this is a dereference and we have a special dynamic constrained
subtype on the prefix, use it to compute the size; otherwise, use
the designated subtype. */
if (Nkind (gnat_prefix) == N_Explicit_Dereference)
{
Node_Id gnat_actual_subtype
= Actual_Designated_Subtype (gnat_prefix);
tree gnu_ptr_type
= TREE_TYPE (gnat_to_gnu (Prefix (gnat_prefix)));
if (TYPE_IS_FAT_OR_THIN_POINTER_P (gnu_ptr_type)
&& Present (gnat_actual_subtype))
{
tree gnu_actual_obj_type
= gnat_to_gnu_type (gnat_actual_subtype);
gnu_type
= build_unc_object_type_from_ptr (gnu_ptr_type,
gnu_actual_obj_type,
get_identifier ("SIZE"),
false);
}
}
gnu_result = TYPE_SIZE (gnu_type);
}
/* Otherwise, the result is the RM size of the type. */
else
gnu_result = rm_size (gnu_type);
/* Deal with a self-referential size by qualifying the size with the
object or returning the maximum size for a type. */
if (TREE_CODE (gnu_prefix) != TYPE_DECL)
gnu_result = SUBSTITUTE_PLACEHOLDER_IN_EXPR (gnu_result, gnu_prefix);
else if (CONTAINS_PLACEHOLDER_P (gnu_result))
gnu_result = max_size (gnu_result, true);
/* If the type contains a template, subtract the padded size of the
template, except for 'Max_Size_In_Storage_Elements because we need
to return the (maximum) size requested for an allocator. */
if (TREE_CODE (gnu_type) == RECORD_TYPE
&& TYPE_CONTAINS_TEMPLATE_P (gnu_type)
&& attribute != Attr_Max_Size_In_Storage_Elements)
gnu_result
= size_binop (MINUS_EXPR, gnu_result,
bit_position (DECL_CHAIN (TYPE_FIELDS (gnu_type))));
/* For 'Max_Size_In_Storage_Elements, adjust the unit. */
if (attribute == Attr_Max_Size_In_Storage_Elements)
gnu_result = size_binop (CEIL_DIV_EXPR, gnu_result, bitsize_unit_node);
gnu_result_type = get_unpadded_type (Etype (gnat_node));
break;
case Attr_Alignment:
{
unsigned int align;
if (TREE_CODE (gnu_prefix) == COMPONENT_REF
&& TYPE_IS_PADDING_P (TREE_TYPE (TREE_OPERAND (gnu_prefix, 0))))
gnu_prefix = TREE_OPERAND (gnu_prefix, 0);
gnu_type = TREE_TYPE (gnu_prefix);
gnu_result_type = get_unpadded_type (Etype (gnat_node));
prefix_unused = true;
if (TREE_CODE (gnu_prefix) == COMPONENT_REF)
align = DECL_ALIGN (TREE_OPERAND (gnu_prefix, 1)) / BITS_PER_UNIT;
else
{
Entity_Id gnat_type = Etype (gnat_prefix);
unsigned int double_align;
bool is_capped_double, align_clause;
/* If the default alignment of "double" or larger scalar types is
specifically capped and there is an alignment clause neither
on the type nor on the prefix itself, return the cap. */
if ((double_align = double_float_alignment) > 0)
is_capped_double
= is_double_float_or_array (gnat_type, &align_clause);
else if ((double_align = double_scalar_alignment) > 0)
is_capped_double
= is_double_scalar_or_array (gnat_type, &align_clause);
else
is_capped_double = align_clause = false;
if (is_capped_double
&& Nkind (gnat_prefix) == N_Identifier
&& Present (Alignment_Clause (Entity (gnat_prefix))))
align_clause = true;
if (is_capped_double && !align_clause)
align = double_align;
else
align = TYPE_ALIGN (gnu_type) / BITS_PER_UNIT;
}
gnu_result = size_int (align);
}
break;
case Attr_First:
case Attr_Last:
case Attr_Range_Length:
prefix_unused = true;
if (INTEGRAL_TYPE_P (gnu_type) || SCALAR_FLOAT_TYPE_P (gnu_type))
{
gnu_result_type = get_unpadded_type (Etype (gnat_node));
if (attribute == Attr_First)
gnu_result = TYPE_MIN_VALUE (gnu_type);
else if (attribute == Attr_Last)
gnu_result = TYPE_MAX_VALUE (gnu_type);
else
gnu_result = get_type_length (gnu_type, gnu_result_type);
break;
}
/* ... fall through ... */
case Attr_Length:
{
int Dimension = (Present (Expressions (gnat_node))
? UI_To_Int (Intval (First (Expressions (gnat_node))))
: 1), i;
struct parm_attr_d *pa = NULL;
Entity_Id gnat_param = Empty;
bool unconstrained_ptr_deref = false;
gnu_prefix = maybe_padded_object (gnu_prefix);
gnu_prefix = maybe_unconstrained_array (gnu_prefix);
/* We treat unconstrained array In parameters specially. We also note
whether we are dereferencing a pointer to unconstrained array. */
if (!Is_Constrained (Etype (gnat_prefix)))
switch (Nkind (gnat_prefix))
{
case N_Identifier:
/* This is the direct case. */
if (Ekind (Entity (gnat_prefix)) == E_In_Parameter)
gnat_param = Entity (gnat_prefix);
break;
case N_Explicit_Dereference:
/* This is the indirect case. Note that we need to be sure that
the access value cannot be null as we'll hoist the load. */
if (Nkind (Prefix (gnat_prefix)) == N_Identifier
&& Ekind (Entity (Prefix (gnat_prefix))) == E_In_Parameter)
{
if (Can_Never_Be_Null (Entity (Prefix (gnat_prefix))))
gnat_param = Entity (Prefix (gnat_prefix));
}
else
unconstrained_ptr_deref = true;
break;
default:
break;
}
/* If the prefix is the view conversion of a constrained array to an
unconstrained form, we retrieve the constrained array because we
might not be able to substitute the PLACEHOLDER_EXPR coming from
the conversion. This can occur with the 'Old attribute applied
to a parameter with an unconstrained type, which gets rewritten
into a constrained local variable very late in the game. */
if (TREE_CODE (gnu_prefix) == VIEW_CONVERT_EXPR
&& CONTAINS_PLACEHOLDER_P (TYPE_SIZE (TREE_TYPE (gnu_prefix)))
&& !CONTAINS_PLACEHOLDER_P
(TYPE_SIZE (TREE_TYPE (TREE_OPERAND (gnu_prefix, 0)))))
gnu_type = TREE_TYPE (TREE_OPERAND (gnu_prefix, 0));
else
gnu_type = TREE_TYPE (gnu_prefix);
prefix_unused = true;
gnu_result_type = get_unpadded_type (Etype (gnat_node));
if (TYPE_CONVENTION_FORTRAN_P (gnu_type))
{
int ndim;
tree gnu_type_temp;
for (ndim = 1, gnu_type_temp = gnu_type;
TREE_CODE (TREE_TYPE (gnu_type_temp)) == ARRAY_TYPE
&& TYPE_MULTI_ARRAY_P (TREE_TYPE (gnu_type_temp));
ndim++, gnu_type_temp = TREE_TYPE (gnu_type_temp))
;
Dimension = ndim + 1 - Dimension;
}
for (i = 1; i < Dimension; i++)
gnu_type = TREE_TYPE (gnu_type);
gcc_assert (TREE_CODE (gnu_type) == ARRAY_TYPE);
/* When not optimizing, look up the slot associated with the parameter
and the dimension in the cache and create a new one on failure.
Don't do this when the actual subtype needs debug info (this happens
with -gnatD): in elaborate_expression_1, we create variables that
hold the bounds, so caching attributes isn't very interesting and
causes dependency issues between these variables and cached
expressions. */
if (!optimize
&& Present (gnat_param)
&& !(Present (Actual_Subtype (gnat_param))
&& Needs_Debug_Info (Actual_Subtype (gnat_param))))
{
FOR_EACH_VEC_SAFE_ELT (f_parm_attr_cache, i, pa)
if (pa->id == gnat_param && pa->dim == Dimension)
break;
if (!pa)
{
pa = ggc_cleared_alloc<parm_attr_d> ();
pa->id = gnat_param;
pa->dim = Dimension;
vec_safe_push (f_parm_attr_cache, pa);
}
}
/* Return the cached expression or build a new one. */
if (attribute == Attr_First)
{
if (pa && pa->first)
{
gnu_result = pa->first;
break;
}
gnu_result
= TYPE_MIN_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (gnu_type)));
}
else if (attribute == Attr_Last)
{
if (pa && pa->last)
{
gnu_result = pa->last;
break;
}
gnu_result
= TYPE_MAX_VALUE (TYPE_INDEX_TYPE (TYPE_DOMAIN (gnu_type)));
}
else /* attribute == Attr_Range_Length || attribute == Attr_Length */
{
if (pa && pa->length)
{
gnu_result = pa->length;
break;
}
gnu_result
= get_type_length (TYPE_INDEX_TYPE (TYPE_DOMAIN (gnu_type)),
gnu_result_type);
}
/* If this has a PLACEHOLDER_EXPR, qualify it by the object we are
handling. Note that these attributes could not have been used on
an unconstrained array type. */
gnu_result = SUBSTITUTE_PLACEHOLDER_IN_EXPR (gnu_result, gnu_prefix);
/* Cache the expression we have just computed. Since we want to do it
at run time, we force the use of a SAVE_EXPR and let the gimplifier
create the temporary in the outermost binding level. We will make
sure in Subprogram_Body_to_gnu that it is evaluated on all possible
paths by forcing its evaluation on entry of the function. */
if (pa)
{
gnu_result
= build1 (SAVE_EXPR, TREE_TYPE (gnu_result), gnu_result);
switch (attribute)
{
case Attr_First:
pa->first = gnu_result;
break;
case Attr_Last:
pa->last = gnu_result;
break;
case Attr_Length:
case Attr_Range_Length:
pa->length = gnu_result;
break;
default:
gcc_unreachable ();
}
}
/* Otherwise, evaluate it each time it is referenced. */
else
switch (attribute)
{
case Attr_First:
case Attr_Last:
/* If we are dereferencing a pointer to unconstrained array, we
need to capture the value because the pointed-to bounds may
subsequently be released. */
if (unconstrained_ptr_deref)
gnu_result
= build1 (SAVE_EXPR, TREE_TYPE (gnu_result), gnu_result);
break;
case Attr_Length:
case Attr_Range_Length:
/* Set the source location onto the predicate of the condition
but not if the expression is cached to avoid messing up the
debug info. */
if (TREE_CODE (gnu_result) == COND_EXPR
&& EXPR_P (TREE_OPERAND (gnu_result, 0)))
set_expr_location_from_node (TREE_OPERAND (gnu_result, 0),
gnat_node);
break;
default:
gcc_unreachable ();
}
break;
}
case Attr_Bit_Position:
case Attr_Position:
case Attr_First_Bit:
case Attr_Last_Bit:
case Attr_Bit:
{
poly_int64 bitsize;
poly_int64 bitpos;
tree gnu_offset;
tree gnu_field_bitpos;
tree gnu_field_offset;
tree gnu_inner;
machine_mode mode;
int unsignedp, reversep, volatilep;
gnu_result_type = get_unpadded_type (Etype (gnat_node));
gnu_prefix = remove_conversions (gnu_prefix, true);
prefix_unused = true;
/* We can have 'Bit on any object, but if it isn't a COMPONENT_REF,
the result is 0. Don't allow 'Bit on a bare component, though. */
if (attribute == Attr_Bit
&& TREE_CODE (gnu_prefix) != COMPONENT_REF
&& TREE_CODE (gnu_prefix) != FIELD_DECL)
{
gnu_result = integer_zero_node;
break;
}
else
gcc_assert (TREE_CODE (gnu_prefix) == COMPONENT_REF
|| (attribute == Attr_Bit_Position
&& TREE_CODE (gnu_prefix) == FIELD_DECL));
get_inner_reference (gnu_prefix, &bitsize, &bitpos, &gnu_offset,
&mode, &unsignedp, &reversep, &volatilep);
if (TREE_CODE (gnu_prefix) == COMPONENT_REF)
{
gnu_field_bitpos = bit_position (TREE_OPERAND (gnu_prefix, 1));
gnu_field_offset = byte_position (TREE_OPERAND (gnu_prefix, 1));
for (gnu_inner = TREE_OPERAND (gnu_prefix, 0);
TREE_CODE (gnu_inner) == COMPONENT_REF
&& DECL_INTERNAL_P (TREE_OPERAND (gnu_inner, 1));
gnu_inner = TREE_OPERAND (gnu_inner, 0))
{
gnu_field_bitpos
= size_binop (PLUS_EXPR, gnu_field_bitpos,
bit_position (TREE_OPERAND (gnu_inner, 1)));
gnu_field_offset
= size_binop (PLUS_EXPR, gnu_field_offset,
byte_position (TREE_OPERAND (gnu_inner, 1)));
}
}
else if (TREE_CODE (gnu_prefix) == FIELD_DECL)
{
gnu_field_bitpos = bit_position (gnu_prefix);
gnu_field_offset = byte_position (gnu_prefix);
}
else
{
gnu_field_bitpos = bitsize_zero_node;
gnu_field_offset = size_zero_node;
}
switch (attribute)
{
case Attr_Position:
gnu_result = gnu_field_offset;
break;
case Attr_First_Bit:
case Attr_Bit:
gnu_result = size_int (num_trailing_bits (bitpos));
break;
case Attr_Last_Bit:
gnu_result = bitsize_int (num_trailing_bits (bitpos));
gnu_result = size_binop (PLUS_EXPR, gnu_result,
TYPE_SIZE (TREE_TYPE (gnu_prefix)));
/* ??? Avoid a large unsigned result that will overflow when
converted to the signed universal_integer. */
if (integer_zerop (gnu_result))
gnu_result = integer_minus_one_node;
else
gnu_result
= size_binop (MINUS_EXPR, gnu_result, bitsize_one_node);
break;
case Attr_Bit_Position:
gnu_result = gnu_field_bitpos;
break;
}
/* If this has a PLACEHOLDER_EXPR, qualify it by the object we are
handling. */
gnu_result = SUBSTITUTE_PLACEHOLDER_IN_EXPR (gnu_result, gnu_prefix);
break;
}
case Attr_Min:
case Attr_Max:
{
tree gnu_lhs = gnat_to_gnu (First (Expressions (gnat_node)));
tree gnu_rhs = gnat_to_gnu (Next (First (Expressions (gnat_node))));
gnu_result_type = get_unpadded_type (Etype (gnat_node));
/* The result of {MIN,MAX}_EXPR is unspecified if either operand is
a NaN so we implement the semantics of C99 f{min,max} to make it
predictable in this case: if either operand is a NaN, the other
is returned; if both operands are NaN's, a NaN is returned. */
if (SCALAR_FLOAT_TYPE_P (gnu_result_type)
&& !Machine_Overflows_On_Target)
{
const bool lhs_side_effects_p = TREE_SIDE_EFFECTS (gnu_lhs);
const bool rhs_side_effects_p = TREE_SIDE_EFFECTS (gnu_rhs);
tree t = builtin_decl_explicit (BUILT_IN_ISNAN);
tree lhs_is_nan, rhs_is_nan;
/* If the operands have side-effects, they need to be evaluated
only once in spite of the multiple references in the result. */
if (lhs_side_effects_p)
gnu_lhs = gnat_protect_expr (gnu_lhs);
if (rhs_side_effects_p)
gnu_rhs = gnat_protect_expr (gnu_rhs);
lhs_is_nan = fold_build2 (NE_EXPR, boolean_type_node,
build_call_expr (t, 1, gnu_lhs),
integer_zero_node);
rhs_is_nan = fold_build2 (NE_EXPR, boolean_type_node,
build_call_expr (t, 1, gnu_rhs),
integer_zero_node);
gnu_result = build_binary_op (attribute == Attr_Min
? MIN_EXPR : MAX_EXPR,
gnu_result_type, gnu_lhs, gnu_rhs);
gnu_result = fold_build3 (COND_EXPR, gnu_result_type,
rhs_is_nan, gnu_lhs, gnu_result);
gnu_result = fold_build3 (COND_EXPR, gnu_result_type,
lhs_is_nan, gnu_rhs, gnu_result);
/* If the operands have side-effects, they need to be evaluated
before doing the tests above since the place they otherwise
would end up being evaluated at run time could be wrong. */
if (lhs_side_effects_p)
gnu_result
= build2 (COMPOUND_EXPR, gnu_result_type, gnu_lhs, gnu_result);
if (rhs_side_effects_p)
gnu_result
= build2 (COMPOUND_EXPR, gnu_result_type, gnu_rhs, gnu_result);
}
else
gnu_result = build_binary_op (attribute == Attr_Min
? MIN_EXPR : MAX_EXPR,
gnu_result_type, gnu_lhs, gnu_rhs);
}
break;
case Attr_Passed_By_Reference:
gnu_result = size_int (default_pass_by_ref (gnu_type)
|| must_pass_by_ref (gnu_type));
gnu_result_type = get_unpadded_type (Etype (gnat_node));
break;
case Attr_Component_Size:
gnu_prefix = maybe_padded_object (gnu_prefix);
gnu_type = TREE_TYPE (gnu_prefix);
if (TREE_CODE (gnu_type) == UNCONSTRAINED_ARRAY_TYPE)
gnu_type = TREE_TYPE (TREE_TYPE (TYPE_FIELDS (TREE_TYPE (gnu_type))));
while (TREE_CODE (TREE_TYPE (gnu_type)) == ARRAY_TYPE
&& TYPE_MULTI_ARRAY_P (TREE_TYPE (gnu_type)))
gnu_type = TREE_TYPE (gnu_type);
gcc_assert (TREE_CODE (gnu_type) == ARRAY_TYPE);
/* Note this size cannot be self-referential. */
gnu_result = TYPE_SIZE (TREE_TYPE (gnu_type));
gnu_result_type = get_unpadded_type (Etype (gnat_node));
prefix_unused = true;
break;
case Attr_Descriptor_Size:
gnu_type = TREE_TYPE (gnu_prefix);
gcc_assert (TREE_CODE (gnu_type) == UNCONSTRAINED_ARRAY_TYPE);
/* Return the padded size of the template in the object record type. */
gnu_type = TYPE_OBJECT_RECORD_TYPE (gnu_type);
gnu_result = bit_position (DECL_CHAIN (TYPE_FIELDS (gnu_type)));
gnu_result_type = get_unpadded_type (Etype (gnat_node));
prefix_unused = true;
break;
case Attr_Null_Parameter:
/* This is just a zero cast to the pointer type for our prefix and
dereferenced. */
gnu_result_type = get_unpadded_type (Etype (gnat_node));
gnu_result
= build_unary_op (INDIRECT_REF, NULL_TREE,
convert (build_pointer_type (gnu_result_type),
integer_zero_node));
break;
case Attr_Mechanism_Code:
{
Entity_Id gnat_obj = Entity (gnat_prefix);
int code;
prefix_unused = true;
gnu_result_type = get_unpadded_type (Etype (gnat_node));
if (Present (Expressions (gnat_node)))
{
int i = UI_To_Int (Intval (First (Expressions (gnat_node))));
for (gnat_obj = First_Formal (gnat_obj); i > 1;
i--, gnat_obj = Next_Formal (gnat_obj))
;
}
code = Mechanism (gnat_obj);
if (code == Default)
code = ((present_gnu_tree (gnat_obj)
&& (DECL_BY_REF_P (get_gnu_tree (gnat_obj))
|| ((TREE_CODE (get_gnu_tree (gnat_obj))
== PARM_DECL)
&& (DECL_BY_COMPONENT_PTR_P
(get_gnu_tree (gnat_obj))))))
? By_Reference : By_Copy);
gnu_result = convert (gnu_result_type, size_int (- code));
}
break;
case Attr_Model:
/* We treat Model as identical to Machine. This is true for at least
IEEE and some other nice floating-point systems. */
/* ... fall through ... */
case Attr_Machine:
/* The trick is to force the compiler to store the result in memory so
that we do not have extra precision used. But do this only when this
is necessary, i.e. if FP_ARITH_MAY_WIDEN is true and the precision of
the type is lower than that of the longest floating-point type. */
prefix_unused = true;
gnu_expr = gnat_to_gnu (First (Expressions (gnat_node)));
gnu_result_type = get_unpadded_type (Etype (gnat_node));
gnu_result = convert (gnu_result_type, gnu_expr);
if (TREE_CODE (gnu_result) != REAL_CST
&& fp_arith_may_widen
&& TYPE_PRECISION (gnu_result_type)
< TYPE_PRECISION (longest_float_type_node))
{
tree rec_type = make_node (RECORD_TYPE);
tree field
= create_field_decl (get_identifier ("OBJ"), gnu_result_type,
rec_type, NULL_TREE, NULL_TREE, 0, 0);
tree rec_val, asm_expr;
finish_record_type (rec_type, field, 0, false);
rec_val = build_constructor_single (rec_type, field, gnu_result);
rec_val = build1 (SAVE_EXPR, rec_type, rec_val);
asm_expr
= build5 (ASM_EXPR, void_type_node,
build_string (0, ""),
tree_cons (build_tree_list (NULL_TREE,
build_string (2, "=m")),
rec_val, NULL_TREE),
tree_cons (build_tree_list (NULL_TREE,
build_string (1, "m")),
rec_val, NULL_TREE),
NULL_TREE, NULL_TREE);
ASM_VOLATILE_P (asm_expr) = 1;
gnu_result
= build_compound_expr (gnu_result_type, asm_expr,
build_component_ref (rec_val, field,
false));
}
break;
case Attr_Deref:
prefix_unused = true;
gnu_expr = gnat_to_gnu (First (Expressions (gnat_node)));
gnu_result_type = get_unpadded_type (Etype (gnat_node));
/* This can be a random address so build an alias-all pointer type. */
gnu_expr
= convert (build_pointer_type_for_mode (gnu_result_type, ptr_mode,
true),
gnu_expr);
gnu_result = build_unary_op (INDIRECT_REF, NULL_TREE, gnu_expr);
break;
default:
/* This abort means that we have an unimplemented attribute. */
gcc_unreachable ();
}
/* If this is an attribute where the prefix was unused, force a use of it if
it has a side-effect. But don't do it if the prefix is just an entity
name. However, if an access check is needed, we must do it. See second
example in AARM 11.6(5.e). */
if (prefix_unused
&& TREE_SIDE_EFFECTS (gnu_prefix)
&& !Is_Entity_Name (gnat_prefix))
gnu_result
= build_compound_expr (TREE_TYPE (gnu_result), gnu_prefix, gnu_result);
*gnu_result_type_p = gnu_result_type;
return gnu_result;
}
/* Subroutine of gnat_to_gnu to translate gnat_node, an N_Case_Statement,
to a GCC tree, which is returned. */
static tree
Case_Statement_to_gnu (Node_Id gnat_node)
{
tree gnu_result, gnu_expr, gnu_type, gnu_label;
Node_Id gnat_when;
location_t end_locus;
bool may_fallthru = false;
gnu_expr = gnat_to_gnu (Expression (gnat_node));
gnu_expr = convert (get_base_type (TREE_TYPE (gnu_expr)), gnu_expr);
gnu_expr = maybe_character_value (gnu_expr);
gnu_type = TREE_TYPE (gnu_expr);
/* We build a SWITCH_EXPR that contains the code with interspersed
CASE_LABEL_EXPRs for each label. */
if (!Sloc_to_locus (End_Location (gnat_node), &end_locus))
end_locus = input_location;
gnu_label = create_artificial_label (end_locus);
start_stmt_group ();
for (gnat_when = First_Non_Pragma (Alternatives (gnat_node));
Present (gnat_when);
gnat_when = Next_Non_Pragma (gnat_when))
{
bool choices_added_p = false;
Node_Id gnat_choice;
/* First compile all the different case choices for the current WHEN
alternative. */
for (gnat_choice = First (Discrete_Choices (gnat_when));
Present (gnat_choice);
gnat_choice = Next (gnat_choice))
{
tree gnu_low = NULL_TREE, gnu_high = NULL_TREE;
tree label = create_artificial_label (input_location);
switch (Nkind (gnat_choice))
{
case N_Range:
gnu_low = gnat_to_gnu (Low_Bound (gnat_choice));
gnu_high = gnat_to_gnu (High_Bound (gnat_choice));
break;
case N_Subtype_Indication:
gnu_low = gnat_to_gnu (Low_Bound (Range_Expression
(Constraint (gnat_choice))));
gnu_high = gnat_to_gnu (High_Bound (Range_Expression
(Constraint (gnat_choice))));
break;
case N_Identifier:
case N_Expanded_Name:
/* This represents either a subtype range or a static value of
some kind; Ekind says which. */
if (Is_Type (Entity (gnat_choice)))
{
tree gnu_type = get_unpadded_type (Entity (gnat_choice));
gnu_low = TYPE_MIN_VALUE (gnu_type);
gnu_high = TYPE_MAX_VALUE (gnu_type);
break;
}
/* ... fall through ... */
case N_Character_Literal:
case N_Integer_Literal:
gnu_low = gnat_to_gnu (gnat_choice);
break;
case N_Others_Choice:
break;
default:
gcc_unreachable ();
}
/* Everything should be folded into constants at this point. */
gcc_assert (!gnu_low || TREE_CODE (gnu_low) == INTEGER_CST);
gcc_assert (!gnu_high || TREE_CODE (gnu_high) == INTEGER_CST);
if (gnu_low && TREE_TYPE (gnu_low) != gnu_type)
gnu_low = convert (gnu_type, gnu_low);
if (gnu_high && TREE_TYPE (gnu_high) != gnu_type)
gnu_high = convert (gnu_type, gnu_high);
add_stmt_with_node (build_case_label (gnu_low, gnu_high, label),
gnat_choice);
choices_added_p = true;
}
/* This construct doesn't define a scope so we shouldn't push a binding
level around the statement list. Except that we have always done so
historically and this makes it possible to reduce stack usage. As a
compromise, we keep doing it for case statements, for which this has
never been problematic, but not for case expressions in Ada 2012. */
if (choices_added_p)
{
const bool is_case_expression
= (Nkind (Parent (gnat_node)) == N_Expression_With_Actions);
tree group
= build_stmt_group (Statements (gnat_when), !is_case_expression);
bool group_may_fallthru = block_may_fallthru (group);
add_stmt (group);
if (group_may_fallthru)
{
tree stmt = build1 (GOTO_EXPR, void_type_node, gnu_label);
SET_EXPR_LOCATION (stmt, end_locus);
add_stmt (stmt);
may_fallthru = true;
}
}
}
/* Now emit a definition of the label the cases branch to, if any. */
if (may_fallthru)
add_stmt (build1 (LABEL_EXPR, void_type_node, gnu_label));
gnu_result = build2 (SWITCH_EXPR, gnu_type, gnu_expr, end_stmt_group ());
return gnu_result;
}
/* Return true if we are in the body of a loop. */
static inline bool
inside_loop_p (void)
{
return !vec_safe_is_empty (gnu_loop_stack);
}
/* Find out whether EXPR is a simple additive expression based on the iteration
variable of some enclosing loop in the current function. If so, return the
loop and set *DISP to the displacement and *NEG_P to true if this is for a
subtraction; otherwise, return NULL. */
static struct loop_info_d *
find_loop_for (tree expr, tree *disp, bool *neg_p)
{
tree var, add, cst;
bool minus_p;
struct loop_info_d *iter = NULL;
unsigned int i;
if (is_simple_additive_expression (expr, &add, &cst, &minus_p))
{
var = add;
if (disp)
*disp = cst;
if (neg_p)
*neg_p = minus_p;
}
else
{
var = expr;
if (disp)
*disp = NULL_TREE;
if (neg_p)
*neg_p = false;
}
var = remove_conversions (var, false);
if (TREE_CODE (var) != VAR_DECL)
return NULL;
gcc_checking_assert (vec_safe_length (gnu_loop_stack) > 0);
FOR_EACH_VEC_ELT_REVERSE (*gnu_loop_stack, i, iter)
if (iter->loop_var == var && iter->fndecl == current_function_decl)
break;
return iter;
}
/* Return the innermost enclosing loop in the current function. */
static struct loop_info_d *
find_loop (void)
{
struct loop_info_d *iter = NULL;
unsigned int i;
gcc_checking_assert (vec_safe_length (gnu_loop_stack) > 0);
FOR_EACH_VEC_ELT_REVERSE (*gnu_loop_stack, i, iter)
if (iter->fndecl == current_function_decl)
break;
return iter;
}
/* Return true if VAL (of type TYPE) can equal the minimum value if MAX is
false, or the maximum value if MAX is true, of TYPE. */
static bool
can_equal_min_or_max_val_p (tree val, tree type, bool max)
{
tree min_or_max_val = (max ? TYPE_MAX_VALUE (type) : TYPE_MIN_VALUE (type));
if (TREE_CODE (min_or_max_val) != INTEGER_CST)
return true;
if (TREE_CODE (val) == NOP_EXPR)
val = (max
? TYPE_MAX_VALUE (TREE_TYPE (TREE_OPERAND (val, 0)))
: TYPE_MIN_VALUE (TREE_TYPE (TREE_OPERAND (val, 0))));
if (TREE_CODE (val) != INTEGER_CST)
return true;
if (max)
return tree_int_cst_lt (val, min_or_max_val) == 0;
else
return tree_int_cst_lt (min_or_max_val, val) == 0;
}
/* Return true if VAL (of type TYPE) can equal the minimum value of TYPE.
If REVERSE is true, minimum value is taken as maximum value. */
static inline bool
can_equal_min_val_p (tree val, tree type, bool reverse)
{
return can_equal_min_or_max_val_p (val, type, reverse);
}
/* Return true if VAL (of type TYPE) can equal the maximum value of TYPE.
If REVERSE is true, maximum value is taken as minimum value. */
static inline bool
can_equal_max_val_p (tree val, tree type, bool reverse)
{
return can_equal_min_or_max_val_p (val, type, !reverse);
}
/* Replace EXPR1 and EXPR2 by invariant expressions if possible. Return
true if both expressions have been replaced and false otherwise. */
static bool
make_invariant (tree *expr1, tree *expr2)
{
tree inv_expr1 = gnat_invariant_expr (*expr1);
tree inv_expr2 = gnat_invariant_expr (*expr2);
if (inv_expr1)
*expr1 = inv_expr1;
if (inv_expr2)
*expr2 = inv_expr2;
return inv_expr1 && inv_expr2;
}
/* Helper function for walk_tree, used by independent_iterations_p below. */
static tree
scan_rhs_r (tree *tp, int *walk_subtrees, void *data)
{
bitmap *params = (bitmap *)data;
tree t = *tp;
/* No need to walk into types or decls. */
if (IS_TYPE_OR_DECL_P (t))
*walk_subtrees = 0;
if (TREE_CODE (t) == PARM_DECL && bitmap_bit_p (*params, DECL_UID (t)))
return t;
return NULL_TREE;
}
/* Return true if STMT_LIST generates independent iterations in a loop. */
static bool
independent_iterations_p (tree stmt_list)
{
tree_stmt_iterator tsi;
bitmap params = BITMAP_GGC_ALLOC();
auto_vec<tree, 16> rhs;
tree iter;
int i;
if (TREE_CODE (stmt_list) == BIND_EXPR)
stmt_list = BIND_EXPR_BODY (stmt_list);
/* Scan the list and return false on anything that is not either a check
or an assignment to a parameter with restricted aliasing. */
for (tsi = tsi_start (stmt_list); !tsi_end_p (tsi); tsi_next (&tsi))
{
tree stmt = tsi_stmt (tsi);
switch (TREE_CODE (stmt))
{
case COND_EXPR:
{
if (COND_EXPR_ELSE (stmt))
return false;
if (TREE_CODE (COND_EXPR_THEN (stmt)) != CALL_EXPR)
return false;
tree func = get_callee_fndecl (COND_EXPR_THEN (stmt));
if (!(func && TREE_THIS_VOLATILE (func)))
return false;
break;
}
case MODIFY_EXPR:
{
tree lhs = TREE_OPERAND (stmt, 0);
while (handled_component_p (lhs))
lhs = TREE_OPERAND (lhs, 0);
if (TREE_CODE (lhs) != INDIRECT_REF)
return false;
lhs = TREE_OPERAND (lhs, 0);
if (!(TREE_CODE (lhs) == PARM_DECL
&& DECL_RESTRICTED_ALIASING_P (lhs)))
return false;
bitmap_set_bit (params, DECL_UID (lhs));
rhs.safe_push (TREE_OPERAND (stmt, 1));
break;
}
default:
return false;
}
}
/* At this point we know that the list contains only statements that will
modify parameters with restricted aliasing. Check that the statements
don't at the time read from these parameters. */
FOR_EACH_VEC_ELT (rhs, i, iter)
if (walk_tree_without_duplicates (&iter, scan_rhs_r, &params))
return false;
return true;
}
/* Subroutine of gnat_to_gnu to translate gnat_node, an N_Loop_Statement,
to a GCC tree, which is returned. */
static tree
Loop_Statement_to_gnu (Node_Id gnat_node)
{
const Node_Id gnat_iter_scheme = Iteration_Scheme (gnat_node);
struct loop_info_d *gnu_loop_info = ggc_cleared_alloc<loop_info_d> ();
tree gnu_loop_stmt = build4 (LOOP_STMT, void_type_node, NULL_TREE,
NULL_TREE, NULL_TREE, NULL_TREE);
tree gnu_loop_label = create_artificial_label (input_location);
tree gnu_cond_expr = NULL_TREE, gnu_low = NULL_TREE, gnu_high = NULL_TREE;
tree gnu_result;
/* Push the loop_info structure associated with the LOOP_STMT. */
gnu_loop_info->fndecl = current_function_decl;
gnu_loop_info->stmt = gnu_loop_stmt;
vec_safe_push (gnu_loop_stack, gnu_loop_info);
/* Set location information for statement and end label. */
set_expr_location_from_node (gnu_loop_stmt, gnat_node);
Sloc_to_locus (Sloc (End_Label (gnat_node)),
&DECL_SOURCE_LOCATION (gnu_loop_label));
LOOP_STMT_LABEL (gnu_loop_stmt) = gnu_loop_label;
/* Set the condition under which the loop must keep going. If we have an
explicit condition, use it to set the location information throughout
the translation of the loop statement to avoid having multiple SLOCs.
For the case "LOOP .... END LOOP;" the condition is always true. */
if (No (gnat_iter_scheme))
;
/* For the case "WHILE condition LOOP ..... END LOOP;" it's immediate. */
else if (Present (Condition (gnat_iter_scheme)))
{
LOOP_STMT_COND (gnu_loop_stmt)
= gnat_to_gnu (Condition (gnat_iter_scheme));
set_expr_location_from_node (gnu_loop_stmt, gnat_iter_scheme);
}
/* Otherwise we have an iteration scheme and the condition is given by the
bounds of the subtype of the iteration variable. */
else
{
Node_Id gnat_loop_spec = Loop_Parameter_Specification (gnat_iter_scheme);
Entity_Id gnat_loop_var = Defining_Entity (gnat_loop_spec);
Entity_Id gnat_type = Etype (gnat_loop_var);
tree gnu_type = get_unpadded_type (gnat_type);
tree gnu_base_type = maybe_character_type (get_base_type (gnu_type));
tree gnu_one_node = build_int_cst (gnu_base_type, 1);
tree gnu_loop_var, gnu_loop_iv, gnu_first, gnu_last, gnu_stmt;
enum tree_code update_code, test_code, shift_code;
bool reverse = Reverse_Present (gnat_loop_spec), use_iv = false;
gnu_low = convert (gnu_base_type, TYPE_MIN_VALUE (gnu_type));
gnu_high = convert (gnu_base_type, TYPE_MAX_VALUE (gnu_type));
/* We must disable modulo reduction for the iteration variable, if any,
in order for the loop comparison to be effective. */
if (reverse)
{
gnu_first = gnu_high;
gnu_last = gnu_low;
update_code = MINUS_NOMOD_EXPR;
test_code = GE_EXPR;
shift_code = PLUS_NOMOD_EXPR;
}
else
{
gnu_first = gnu_low;
gnu_last = gnu_high;
update_code = PLUS_NOMOD_EXPR;
test_code = LE_EXPR;
shift_code = MINUS_NOMOD_EXPR;
}
/* We use two different strategies to translate the loop, depending on
whether optimization is enabled.
If it is, we generate the canonical loop form expected by the loop
optimizer and the loop vectorizer, which is the do-while form:
ENTRY_COND
loop:
TOP_UPDATE
BODY
BOTTOM_COND
GOTO loop
This avoids an implicit dependency on loop header copying and makes
it possible to turn BOTTOM_COND into an inequality test.
If optimization is disabled, loop header copying doesn't come into
play and we try to generate the loop form with the fewer conditional
branches. First, the default form, which is:
loop:
TOP_COND
BODY
BOTTOM_UPDATE
GOTO loop
It should catch most loops with constant ending point. Then, if we
cannot, we try to generate the shifted form:
loop:
TOP_COND
TOP_UPDATE
BODY
GOTO loop
which should catch loops with constant starting point. Otherwise, if
we cannot, we generate the fallback form:
ENTRY_COND
loop:
BODY
BOTTOM_COND
BOTTOM_UPDATE
GOTO loop
which works in all cases. */
if (optimize && !optimize_debug)
{
/* We can use the do-while form directly if GNU_FIRST-1 doesn't
overflow. */
if (!can_equal_min_val_p (gnu_first, gnu_base_type, reverse))
;
/* Otherwise, use the do-while form with the help of a special
induction variable in the unsigned version of the base type
or the unsigned version of the size type, whichever is the
largest, in order to have wrap-around arithmetics for it. */
else
{
if (TYPE_PRECISION (gnu_base_type)
> TYPE_PRECISION (size_type_node))
gnu_base_type
= gnat_type_for_size (TYPE_PRECISION (gnu_base_type), 1);
else
gnu_base_type = size_type_node;
gnu_first = convert (gnu_base_type, gnu_first);
gnu_last = convert (gnu_base_type, gnu_last);
gnu_one_node = build_int_cst (gnu_base_type, 1);
use_iv = true;
}
gnu_first
= build_binary_op (shift_code, gnu_base_type, gnu_first,
gnu_one_node);
LOOP_STMT_TOP_UPDATE_P (gnu_loop_stmt) = 1;
LOOP_STMT_BOTTOM_COND_P (gnu_loop_stmt) = 1;
}
else
{
/* We can use the default form if GNU_LAST+1 doesn't overflow. */
if (!can_equal_max_val_p (gnu_last, gnu_base_type, reverse))
;
/* Otherwise, we can use the shifted form if neither GNU_FIRST-1 nor
GNU_LAST-1 does. */
else if (!can_equal_min_val_p (gnu_first, gnu_base_type, reverse)
&& !can_equal_min_val_p (gnu_last, gnu_base_type, reverse))
{
gnu_first
= build_binary_op (shift_code, gnu_base_type, gnu_first,
gnu_one_node);
gnu_last
= build_binary_op (shift_code, gnu_base_type, gnu_last,
gnu_one_node);
LOOP_STMT_TOP_UPDATE_P (gnu_loop_stmt) = 1;
}
/* Otherwise, use the fallback form. */
else
LOOP_STMT_BOTTOM_COND_P (gnu_loop_stmt) = 1;
}