| /* Support routines for manipulating internal types for GDB. |
| |
| Copyright (C) 1992-2024 Free Software Foundation, Inc. |
| |
| Contributed by Cygnus Support, using pieces from other GDB modules. |
| |
| This file is part of GDB. |
| |
| This program is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3 of the License, or |
| (at your option) any later version. |
| |
| This program is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with this program. If not, see <http://www.gnu.org/licenses/>. */ |
| |
| #include "bfd.h" |
| #include "symtab.h" |
| #include "symfile.h" |
| #include "objfiles.h" |
| #include "gdbtypes.h" |
| #include "expression.h" |
| #include "language.h" |
| #include "target.h" |
| #include "value.h" |
| #include "demangle.h" |
| #include "complaints.h" |
| #include "cli/cli-cmds.h" |
| #include "cp-abi.h" |
| #include "hashtab.h" |
| #include "cp-support.h" |
| #include "bcache.h" |
| #include "dwarf2/loc.h" |
| #include "dwarf2/read.h" |
| #include "gdbcore.h" |
| #include "floatformat.h" |
| #include "f-lang.h" |
| #include <algorithm> |
| #include "gmp-utils.h" |
| #include "rust-lang.h" |
| #include "ada-lang.h" |
| |
| /* The value of an invalid conversion badness. */ |
| #define INVALID_CONVERSION 100 |
| |
| static struct dynamic_prop_list * |
| copy_dynamic_prop_list (struct obstack *, struct dynamic_prop_list *); |
| |
| /* Initialize BADNESS constants. */ |
| |
| const struct rank LENGTH_MISMATCH_BADNESS = {INVALID_CONVERSION,0}; |
| |
| const struct rank TOO_FEW_PARAMS_BADNESS = {INVALID_CONVERSION,0}; |
| const struct rank INCOMPATIBLE_TYPE_BADNESS = {INVALID_CONVERSION,0}; |
| |
| const struct rank EXACT_MATCH_BADNESS = {0,0}; |
| |
| const struct rank INTEGER_PROMOTION_BADNESS = {1,0}; |
| const struct rank FLOAT_PROMOTION_BADNESS = {1,0}; |
| const struct rank BASE_PTR_CONVERSION_BADNESS = {1,0}; |
| const struct rank CV_CONVERSION_BADNESS = {1, 0}; |
| const struct rank INTEGER_CONVERSION_BADNESS = {2,0}; |
| const struct rank FLOAT_CONVERSION_BADNESS = {2,0}; |
| const struct rank INT_FLOAT_CONVERSION_BADNESS = {2,0}; |
| const struct rank VOID_PTR_CONVERSION_BADNESS = {2,0}; |
| const struct rank BOOL_CONVERSION_BADNESS = {3,0}; |
| const struct rank BASE_CONVERSION_BADNESS = {2,0}; |
| const struct rank REFERENCE_CONVERSION_BADNESS = {2,0}; |
| const struct rank REFERENCE_SEE_THROUGH_BADNESS = {0,1}; |
| const struct rank NULL_POINTER_CONVERSION_BADNESS = {2,0}; |
| const struct rank NS_POINTER_CONVERSION_BADNESS = {10,0}; |
| const struct rank NS_INTEGER_POINTER_CONVERSION_BADNESS = {3,0}; |
| const struct rank VARARG_BADNESS = {4, 0}; |
| |
| /* Floatformat pairs. */ |
| const struct floatformat *floatformats_ieee_half[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_ieee_half_big, |
| &floatformat_ieee_half_little |
| }; |
| const struct floatformat *floatformats_ieee_single[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_ieee_single_big, |
| &floatformat_ieee_single_little |
| }; |
| const struct floatformat *floatformats_ieee_double[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_ieee_double_big, |
| &floatformat_ieee_double_little |
| }; |
| const struct floatformat *floatformats_ieee_quad[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_ieee_quad_big, |
| &floatformat_ieee_quad_little |
| }; |
| const struct floatformat *floatformats_ieee_double_littlebyte_bigword[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_ieee_double_big, |
| &floatformat_ieee_double_littlebyte_bigword |
| }; |
| const struct floatformat *floatformats_i387_ext[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_i387_ext, |
| &floatformat_i387_ext |
| }; |
| const struct floatformat *floatformats_m68881_ext[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_m68881_ext, |
| &floatformat_m68881_ext |
| }; |
| const struct floatformat *floatformats_arm_ext[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_arm_ext_big, |
| &floatformat_arm_ext_littlebyte_bigword |
| }; |
| const struct floatformat *floatformats_ia64_spill[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_ia64_spill_big, |
| &floatformat_ia64_spill_little |
| }; |
| const struct floatformat *floatformats_vax_f[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_vax_f, |
| &floatformat_vax_f |
| }; |
| const struct floatformat *floatformats_vax_d[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_vax_d, |
| &floatformat_vax_d |
| }; |
| const struct floatformat *floatformats_ibm_long_double[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_ibm_long_double_big, |
| &floatformat_ibm_long_double_little |
| }; |
| const struct floatformat *floatformats_bfloat16[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_bfloat16_big, |
| &floatformat_bfloat16_little |
| }; |
| |
| /* Should opaque types be resolved? */ |
| |
| static bool opaque_type_resolution = true; |
| |
| /* See gdbtypes.h. */ |
| |
| unsigned int overload_debug = 0; |
| |
| /* A flag to enable strict type checking. */ |
| |
| static bool strict_type_checking = true; |
| |
| /* A function to show whether opaque types are resolved. */ |
| |
| static void |
| show_opaque_type_resolution (struct ui_file *file, int from_tty, |
| struct cmd_list_element *c, |
| const char *value) |
| { |
| gdb_printf (file, _("Resolution of opaque struct/class/union types " |
| "(if set before loading symbols) is %s.\n"), |
| value); |
| } |
| |
| /* A function to show whether C++ overload debugging is enabled. */ |
| |
| static void |
| show_overload_debug (struct ui_file *file, int from_tty, |
| struct cmd_list_element *c, const char *value) |
| { |
| gdb_printf (file, _("Debugging of C++ overloading is %s.\n"), |
| value); |
| } |
| |
| /* A function to show the status of strict type checking. */ |
| |
| static void |
| show_strict_type_checking (struct ui_file *file, int from_tty, |
| struct cmd_list_element *c, const char *value) |
| { |
| gdb_printf (file, _("Strict type checking is %s.\n"), value); |
| } |
| |
| |
| /* Helper function to initialize a newly allocated type. Set type code |
| to CODE and initialize the type-specific fields accordingly. */ |
| |
| static void |
| set_type_code (struct type *type, enum type_code code) |
| { |
| type->set_code (code); |
| |
| switch (code) |
| { |
| case TYPE_CODE_STRUCT: |
| case TYPE_CODE_UNION: |
| case TYPE_CODE_NAMESPACE: |
| INIT_CPLUS_SPECIFIC (type); |
| break; |
| case TYPE_CODE_FLT: |
| TYPE_SPECIFIC_FIELD (type) = TYPE_SPECIFIC_FLOATFORMAT; |
| break; |
| case TYPE_CODE_FUNC: |
| INIT_FUNC_SPECIFIC (type); |
| break; |
| case TYPE_CODE_FIXED_POINT: |
| INIT_FIXED_POINT_SPECIFIC (type); |
| break; |
| } |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| type * |
| type_allocator::new_type () |
| { |
| if (m_smash) |
| return m_data.type; |
| |
| obstack *obstack = (m_is_objfile |
| ? &m_data.objfile->objfile_obstack |
| : gdbarch_obstack (m_data.gdbarch)); |
| |
| /* Alloc the structure and start off with all fields zeroed. */ |
| struct type *type = OBSTACK_ZALLOC (obstack, struct type); |
| TYPE_MAIN_TYPE (type) = OBSTACK_ZALLOC (obstack, struct main_type); |
| TYPE_MAIN_TYPE (type)->m_lang = m_lang; |
| |
| if (m_is_objfile) |
| { |
| OBJSTAT (m_data.objfile, n_types++); |
| type->set_owner (m_data.objfile); |
| } |
| else |
| type->set_owner (m_data.gdbarch); |
| |
| /* Initialize the fields that might not be zero. */ |
| type->set_code (TYPE_CODE_UNDEF); |
| TYPE_CHAIN (type) = type; /* Chain back to itself. */ |
| |
| return type; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| type * |
| type_allocator::new_type (enum type_code code, int bit, const char *name) |
| { |
| struct type *type = new_type (); |
| set_type_code (type, code); |
| gdb_assert ((bit % TARGET_CHAR_BIT) == 0); |
| type->set_length (bit / TARGET_CHAR_BIT); |
| |
| if (name != nullptr) |
| { |
| obstack *obstack = (m_is_objfile |
| ? &m_data.objfile->objfile_obstack |
| : gdbarch_obstack (m_data.gdbarch)); |
| type->set_name (obstack_strdup (obstack, name)); |
| } |
| |
| return type; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| gdbarch * |
| type_allocator::arch () |
| { |
| if (m_smash) |
| return m_data.type->arch (); |
| if (m_is_objfile) |
| return m_data.objfile->arch (); |
| return m_data.gdbarch; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| gdbarch * |
| type::arch () const |
| { |
| struct gdbarch *arch; |
| |
| if (this->is_objfile_owned ()) |
| arch = this->objfile_owner ()->arch (); |
| else |
| arch = this->arch_owner (); |
| |
| /* The ARCH can be NULL if TYPE is associated with neither an objfile nor |
| a gdbarch, however, this is very rare, and even then, in most cases |
| that type::arch is called, we assume that a non-NULL value is |
| returned. */ |
| gdb_assert (arch != nullptr); |
| return arch; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| struct type * |
| get_target_type (struct type *type) |
| { |
| if (type != NULL) |
| { |
| type = type->target_type (); |
| if (type != NULL) |
| type = check_typedef (type); |
| } |
| |
| return type; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| unsigned int |
| type_length_units (struct type *type) |
| { |
| int unit_size = gdbarch_addressable_memory_unit_size (type->arch ()); |
| |
| return type->length () / unit_size; |
| } |
| |
| /* Alloc a new type instance structure, fill it with some defaults, |
| and point it at OLDTYPE. Allocate the new type instance from the |
| same place as OLDTYPE. */ |
| |
| static struct type * |
| alloc_type_instance (struct type *oldtype) |
| { |
| struct type *type; |
| |
| /* Allocate the structure. */ |
| |
| if (!oldtype->is_objfile_owned ()) |
| type = GDBARCH_OBSTACK_ZALLOC (oldtype->arch_owner (), struct type); |
| else |
| type = OBSTACK_ZALLOC (&oldtype->objfile_owner ()->objfile_obstack, |
| struct type); |
| |
| TYPE_MAIN_TYPE (type) = TYPE_MAIN_TYPE (oldtype); |
| |
| TYPE_CHAIN (type) = type; /* Chain back to itself for now. */ |
| |
| return type; |
| } |
| |
| /* Clear all remnants of the previous type at TYPE, in preparation for |
| replacing it with something else. Preserve owner information. */ |
| |
| static void |
| smash_type (struct type *type) |
| { |
| bool objfile_owned = type->is_objfile_owned (); |
| objfile *objfile = type->objfile_owner (); |
| gdbarch *arch = type->arch_owner (); |
| |
| memset (TYPE_MAIN_TYPE (type), 0, sizeof (struct main_type)); |
| |
| /* Restore owner information. */ |
| if (objfile_owned) |
| type->set_owner (objfile); |
| else |
| type->set_owner (arch); |
| |
| /* For now, delete the rings. */ |
| TYPE_CHAIN (type) = type; |
| |
| /* For now, leave the pointer/reference types alone. */ |
| } |
| |
| /* Lookup a pointer to a type TYPE. TYPEPTR, if nonzero, points |
| to a pointer to memory where the pointer type should be stored. |
| If *TYPEPTR is zero, update it to point to the pointer type we return. |
| We allocate new memory if needed. */ |
| |
| struct type * |
| make_pointer_type (struct type *type, struct type **typeptr) |
| { |
| struct type *ntype; /* New type */ |
| struct type *chain; |
| |
| ntype = TYPE_POINTER_TYPE (type); |
| |
| if (ntype) |
| { |
| if (typeptr == 0) |
| return ntype; /* Don't care about alloc, |
| and have new type. */ |
| else if (*typeptr == 0) |
| { |
| *typeptr = ntype; /* Tracking alloc, and have new type. */ |
| return ntype; |
| } |
| } |
| |
| if (typeptr == 0 || *typeptr == 0) /* We'll need to allocate one. */ |
| { |
| ntype = type_allocator (type).new_type (); |
| if (typeptr) |
| *typeptr = ntype; |
| } |
| else /* We have storage, but need to reset it. */ |
| { |
| ntype = *typeptr; |
| chain = TYPE_CHAIN (ntype); |
| smash_type (ntype); |
| TYPE_CHAIN (ntype) = chain; |
| } |
| |
| ntype->set_target_type (type); |
| TYPE_POINTER_TYPE (type) = ntype; |
| |
| /* FIXME! Assumes the machine has only one representation for pointers! */ |
| |
| ntype->set_length (gdbarch_ptr_bit (type->arch ()) / TARGET_CHAR_BIT); |
| ntype->set_code (TYPE_CODE_PTR); |
| |
| /* Mark pointers as unsigned. The target converts between pointers |
| and addresses (CORE_ADDRs) using gdbarch_pointer_to_address and |
| gdbarch_address_to_pointer. */ |
| ntype->set_is_unsigned (true); |
| |
| /* Update the length of all the other variants of this type. */ |
| chain = TYPE_CHAIN (ntype); |
| while (chain != ntype) |
| { |
| chain->set_length (ntype->length ()); |
| chain = TYPE_CHAIN (chain); |
| } |
| |
| return ntype; |
| } |
| |
| /* Given a type TYPE, return a type of pointers to that type. |
| May need to construct such a type if this is the first use. */ |
| |
| struct type * |
| lookup_pointer_type (struct type *type) |
| { |
| return make_pointer_type (type, (struct type **) 0); |
| } |
| |
| /* Lookup a C++ `reference' to a type TYPE. TYPEPTR, if nonzero, |
| points to a pointer to memory where the reference type should be |
| stored. If *TYPEPTR is zero, update it to point to the reference |
| type we return. We allocate new memory if needed. REFCODE denotes |
| the kind of reference type to lookup (lvalue or rvalue reference). */ |
| |
| struct type * |
| make_reference_type (struct type *type, struct type **typeptr, |
| enum type_code refcode) |
| { |
| struct type *ntype; /* New type */ |
| struct type **reftype; |
| struct type *chain; |
| |
| gdb_assert (refcode == TYPE_CODE_REF || refcode == TYPE_CODE_RVALUE_REF); |
| |
| ntype = (refcode == TYPE_CODE_REF ? TYPE_REFERENCE_TYPE (type) |
| : TYPE_RVALUE_REFERENCE_TYPE (type)); |
| |
| if (ntype) |
| { |
| if (typeptr == 0) |
| return ntype; /* Don't care about alloc, |
| and have new type. */ |
| else if (*typeptr == 0) |
| { |
| *typeptr = ntype; /* Tracking alloc, and have new type. */ |
| return ntype; |
| } |
| } |
| |
| if (typeptr == 0 || *typeptr == 0) /* We'll need to allocate one. */ |
| { |
| ntype = type_allocator (type).new_type (); |
| if (typeptr) |
| *typeptr = ntype; |
| } |
| else /* We have storage, but need to reset it. */ |
| { |
| ntype = *typeptr; |
| chain = TYPE_CHAIN (ntype); |
| smash_type (ntype); |
| TYPE_CHAIN (ntype) = chain; |
| } |
| |
| ntype->set_target_type (type); |
| reftype = (refcode == TYPE_CODE_REF ? &TYPE_REFERENCE_TYPE (type) |
| : &TYPE_RVALUE_REFERENCE_TYPE (type)); |
| |
| *reftype = ntype; |
| |
| /* FIXME! Assume the machine has only one representation for |
| references, and that it matches the (only) representation for |
| pointers! */ |
| |
| ntype->set_length (gdbarch_ptr_bit (type->arch ()) / TARGET_CHAR_BIT); |
| ntype->set_code (refcode); |
| |
| *reftype = ntype; |
| |
| /* Update the length of all the other variants of this type. */ |
| chain = TYPE_CHAIN (ntype); |
| while (chain != ntype) |
| { |
| chain->set_length (ntype->length ()); |
| chain = TYPE_CHAIN (chain); |
| } |
| |
| return ntype; |
| } |
| |
| /* Same as above, but caller doesn't care about memory allocation |
| details. */ |
| |
| struct type * |
| lookup_reference_type (struct type *type, enum type_code refcode) |
| { |
| return make_reference_type (type, (struct type **) 0, refcode); |
| } |
| |
| /* Lookup the lvalue reference type for the type TYPE. */ |
| |
| struct type * |
| lookup_lvalue_reference_type (struct type *type) |
| { |
| return lookup_reference_type (type, TYPE_CODE_REF); |
| } |
| |
| /* Lookup the rvalue reference type for the type TYPE. */ |
| |
| struct type * |
| lookup_rvalue_reference_type (struct type *type) |
| { |
| return lookup_reference_type (type, TYPE_CODE_RVALUE_REF); |
| } |
| |
| /* Lookup a function type that returns type TYPE. TYPEPTR, if |
| nonzero, points to a pointer to memory where the function type |
| should be stored. If *TYPEPTR is zero, update it to point to the |
| function type we return. We allocate new memory if needed. */ |
| |
| struct type * |
| make_function_type (struct type *type, struct type **typeptr) |
| { |
| struct type *ntype; /* New type */ |
| |
| if (typeptr == 0 || *typeptr == 0) /* We'll need to allocate one. */ |
| { |
| ntype = type_allocator (type).new_type (); |
| if (typeptr) |
| *typeptr = ntype; |
| } |
| else /* We have storage, but need to reset it. */ |
| { |
| ntype = *typeptr; |
| smash_type (ntype); |
| } |
| |
| ntype->set_target_type (type); |
| |
| ntype->set_length (1); |
| ntype->set_code (TYPE_CODE_FUNC); |
| |
| INIT_FUNC_SPECIFIC (ntype); |
| |
| return ntype; |
| } |
| |
| /* Given a type TYPE, return a type of functions that return that type. |
| May need to construct such a type if this is the first use. */ |
| |
| struct type * |
| lookup_function_type (struct type *type) |
| { |
| return make_function_type (type, (struct type **) 0); |
| } |
| |
| /* Given a type TYPE and argument types, return the appropriate |
| function type. If the final type in PARAM_TYPES is NULL, make a |
| varargs function. */ |
| |
| struct type * |
| lookup_function_type_with_arguments (struct type *type, |
| int nparams, |
| struct type **param_types) |
| { |
| struct type *fn = make_function_type (type, (struct type **) 0); |
| int i; |
| |
| if (nparams > 0) |
| { |
| if (param_types[nparams - 1] == NULL) |
| { |
| --nparams; |
| fn->set_has_varargs (true); |
| } |
| else if (check_typedef (param_types[nparams - 1])->code () |
| == TYPE_CODE_VOID) |
| { |
| --nparams; |
| /* Caller should have ensured this. */ |
| gdb_assert (nparams == 0); |
| fn->set_is_prototyped (true); |
| } |
| else |
| fn->set_is_prototyped (true); |
| } |
| |
| fn->alloc_fields (nparams); |
| for (i = 0; i < nparams; ++i) |
| fn->field (i).set_type (param_types[i]); |
| |
| return fn; |
| } |
| |
| /* Identify address space identifier by name -- return a |
| type_instance_flags. */ |
| |
| type_instance_flags |
| address_space_name_to_type_instance_flags (struct gdbarch *gdbarch, |
| const char *space_identifier) |
| { |
| type_instance_flags type_flags; |
| |
| /* Check for known address space delimiters. */ |
| if (!strcmp (space_identifier, "code")) |
| return TYPE_INSTANCE_FLAG_CODE_SPACE; |
| else if (!strcmp (space_identifier, "data")) |
| return TYPE_INSTANCE_FLAG_DATA_SPACE; |
| else if (gdbarch_address_class_name_to_type_flags_p (gdbarch) |
| && gdbarch_address_class_name_to_type_flags (gdbarch, |
| space_identifier, |
| &type_flags)) |
| return type_flags; |
| else |
| error (_("Unknown address space specifier: \"%s\""), space_identifier); |
| } |
| |
| /* Identify address space identifier by type_instance_flags and return |
| the string version of the adress space name. */ |
| |
| const char * |
| address_space_type_instance_flags_to_name (struct gdbarch *gdbarch, |
| type_instance_flags space_flag) |
| { |
| if (space_flag & TYPE_INSTANCE_FLAG_CODE_SPACE) |
| return "code"; |
| else if (space_flag & TYPE_INSTANCE_FLAG_DATA_SPACE) |
| return "data"; |
| else if ((space_flag & TYPE_INSTANCE_FLAG_ADDRESS_CLASS_ALL) |
| && gdbarch_address_class_type_flags_to_name_p (gdbarch)) |
| return gdbarch_address_class_type_flags_to_name (gdbarch, space_flag); |
| else |
| return NULL; |
| } |
| |
| /* Create a new type with instance flags NEW_FLAGS, based on TYPE. |
| |
| If STORAGE is non-NULL, create the new type instance there. |
| STORAGE must be in the same obstack as TYPE. */ |
| |
| static struct type * |
| make_qualified_type (struct type *type, type_instance_flags new_flags, |
| struct type *storage) |
| { |
| struct type *ntype; |
| |
| ntype = type; |
| do |
| { |
| if (ntype->instance_flags () == new_flags) |
| return ntype; |
| ntype = TYPE_CHAIN (ntype); |
| } |
| while (ntype != type); |
| |
| /* Create a new type instance. */ |
| if (storage == NULL) |
| ntype = alloc_type_instance (type); |
| else |
| { |
| /* If STORAGE was provided, it had better be in the same objfile |
| as TYPE. Otherwise, we can't link it into TYPE's cv chain: |
| if one objfile is freed and the other kept, we'd have |
| dangling pointers. */ |
| gdb_assert (type->objfile_owner () == storage->objfile_owner ()); |
| |
| ntype = storage; |
| TYPE_MAIN_TYPE (ntype) = TYPE_MAIN_TYPE (type); |
| TYPE_CHAIN (ntype) = ntype; |
| } |
| |
| /* Pointers or references to the original type are not relevant to |
| the new type. */ |
| TYPE_POINTER_TYPE (ntype) = (struct type *) 0; |
| TYPE_REFERENCE_TYPE (ntype) = (struct type *) 0; |
| |
| /* Chain the new qualified type to the old type. */ |
| TYPE_CHAIN (ntype) = TYPE_CHAIN (type); |
| TYPE_CHAIN (type) = ntype; |
| |
| /* Now set the instance flags and return the new type. */ |
| ntype->set_instance_flags (new_flags); |
| |
| /* Set length of new type to that of the original type. */ |
| ntype->set_length (type->length ()); |
| |
| return ntype; |
| } |
| |
| /* Make an address-space-delimited variant of a type -- a type that |
| is identical to the one supplied except that it has an address |
| space attribute attached to it (such as "code" or "data"). |
| |
| The space attributes "code" and "data" are for Harvard |
| architectures. The address space attributes are for architectures |
| which have alternately sized pointers or pointers with alternate |
| representations. */ |
| |
| struct type * |
| make_type_with_address_space (struct type *type, |
| type_instance_flags space_flag) |
| { |
| type_instance_flags new_flags = ((type->instance_flags () |
| & ~(TYPE_INSTANCE_FLAG_CODE_SPACE |
| | TYPE_INSTANCE_FLAG_DATA_SPACE |
| | TYPE_INSTANCE_FLAG_ADDRESS_CLASS_ALL)) |
| | space_flag); |
| |
| return make_qualified_type (type, new_flags, NULL); |
| } |
| |
| /* Make a "c-v" variant of a type -- a type that is identical to the |
| one supplied except that it may have const or volatile attributes |
| CNST is a flag for setting the const attribute |
| VOLTL is a flag for setting the volatile attribute |
| TYPE is the base type whose variant we are creating. |
| |
| If TYPEPTR and *TYPEPTR are non-zero, then *TYPEPTR points to |
| storage to hold the new qualified type; *TYPEPTR and TYPE must be |
| in the same objfile. Otherwise, allocate fresh memory for the new |
| type whereever TYPE lives. If TYPEPTR is non-zero, set it to the |
| new type we construct. */ |
| |
| struct type * |
| make_cv_type (int cnst, int voltl, |
| struct type *type, |
| struct type **typeptr) |
| { |
| struct type *ntype; /* New type */ |
| |
| type_instance_flags new_flags = (type->instance_flags () |
| & ~(TYPE_INSTANCE_FLAG_CONST |
| | TYPE_INSTANCE_FLAG_VOLATILE)); |
| |
| if (cnst) |
| new_flags |= TYPE_INSTANCE_FLAG_CONST; |
| |
| if (voltl) |
| new_flags |= TYPE_INSTANCE_FLAG_VOLATILE; |
| |
| if (typeptr && *typeptr != NULL) |
| { |
| /* TYPE and *TYPEPTR must be in the same objfile. We can't have |
| a C-V variant chain that threads across objfiles: if one |
| objfile gets freed, then the other has a broken C-V chain. |
| |
| This code used to try to copy over the main type from TYPE to |
| *TYPEPTR if they were in different objfiles, but that's |
| wrong, too: TYPE may have a field list or member function |
| lists, which refer to types of their own, etc. etc. The |
| whole shebang would need to be copied over recursively; you |
| can't have inter-objfile pointers. The only thing to do is |
| to leave stub types as stub types, and look them up afresh by |
| name each time you encounter them. */ |
| gdb_assert ((*typeptr)->objfile_owner () == type->objfile_owner ()); |
| } |
| |
| ntype = make_qualified_type (type, new_flags, |
| typeptr ? *typeptr : NULL); |
| |
| if (typeptr != NULL) |
| *typeptr = ntype; |
| |
| return ntype; |
| } |
| |
| /* Make a 'restrict'-qualified version of TYPE. */ |
| |
| struct type * |
| make_restrict_type (struct type *type) |
| { |
| return make_qualified_type (type, |
| (type->instance_flags () |
| | TYPE_INSTANCE_FLAG_RESTRICT), |
| NULL); |
| } |
| |
| /* Make a type without const, volatile, or restrict. */ |
| |
| struct type * |
| make_unqualified_type (struct type *type) |
| { |
| return make_qualified_type (type, |
| (type->instance_flags () |
| & ~(TYPE_INSTANCE_FLAG_CONST |
| | TYPE_INSTANCE_FLAG_VOLATILE |
| | TYPE_INSTANCE_FLAG_RESTRICT)), |
| NULL); |
| } |
| |
| /* Make a '_Atomic'-qualified version of TYPE. */ |
| |
| struct type * |
| make_atomic_type (struct type *type) |
| { |
| return make_qualified_type (type, |
| (type->instance_flags () |
| | TYPE_INSTANCE_FLAG_ATOMIC), |
| NULL); |
| } |
| |
| /* Replace the contents of ntype with the type *type. This changes the |
| contents, rather than the pointer for TYPE_MAIN_TYPE (ntype); thus |
| the changes are propagated to all types in the TYPE_CHAIN. |
| |
| In order to build recursive types, it's inevitable that we'll need |
| to update types in place --- but this sort of indiscriminate |
| smashing is ugly, and needs to be replaced with something more |
| controlled. TYPE_MAIN_TYPE is a step in this direction; it's not |
| clear if more steps are needed. */ |
| |
| void |
| replace_type (struct type *ntype, struct type *type) |
| { |
| struct type *chain; |
| |
| /* These two types had better be in the same objfile. Otherwise, |
| the assignment of one type's main type structure to the other |
| will produce a type with references to objects (names; field |
| lists; etc.) allocated on an objfile other than its own. */ |
| gdb_assert (ntype->objfile_owner () == type->objfile_owner ()); |
| |
| *TYPE_MAIN_TYPE (ntype) = *TYPE_MAIN_TYPE (type); |
| |
| /* The type length is not a part of the main type. Update it for |
| each type on the variant chain. */ |
| chain = ntype; |
| do |
| { |
| /* Assert that this element of the chain has no address-class bits |
| set in its flags. Such type variants might have type lengths |
| which are supposed to be different from the non-address-class |
| variants. This assertion shouldn't ever be triggered because |
| symbol readers which do construct address-class variants don't |
| call replace_type(). */ |
| gdb_assert (TYPE_ADDRESS_CLASS_ALL (chain) == 0); |
| |
| chain->set_length (type->length ()); |
| chain = TYPE_CHAIN (chain); |
| } |
| while (ntype != chain); |
| |
| /* Assert that the two types have equivalent instance qualifiers. |
| This should be true for at least all of our debug readers. */ |
| gdb_assert (ntype->instance_flags () == type->instance_flags ()); |
| } |
| |
| /* Implement direct support for MEMBER_TYPE in GNU C++. |
| May need to construct such a type if this is the first use. |
| The TYPE is the type of the member. The DOMAIN is the type |
| of the aggregate that the member belongs to. */ |
| |
| struct type * |
| lookup_memberptr_type (struct type *type, struct type *domain) |
| { |
| struct type *mtype; |
| |
| mtype = type_allocator (type).new_type (); |
| smash_to_memberptr_type (mtype, domain, type); |
| return mtype; |
| } |
| |
| /* Return a pointer-to-method type, for a method of type TO_TYPE. */ |
| |
| struct type * |
| lookup_methodptr_type (struct type *to_type) |
| { |
| struct type *mtype; |
| |
| mtype = type_allocator (to_type).new_type (); |
| smash_to_methodptr_type (mtype, to_type); |
| return mtype; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| bool |
| operator== (const dynamic_prop &l, const dynamic_prop &r) |
| { |
| if (l.kind () != r.kind ()) |
| return false; |
| |
| switch (l.kind ()) |
| { |
| case PROP_UNDEFINED: |
| return true; |
| case PROP_CONST: |
| return l.const_val () == r.const_val (); |
| case PROP_ADDR_OFFSET: |
| case PROP_LOCEXPR: |
| case PROP_LOCLIST: |
| return l.baton () == r.baton (); |
| case PROP_VARIANT_PARTS: |
| return l.variant_parts () == r.variant_parts (); |
| case PROP_TYPE: |
| return l.original_type () == r.original_type (); |
| } |
| |
| gdb_assert_not_reached ("unhandled dynamic_prop kind"); |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| bool |
| operator== (const range_bounds &l, const range_bounds &r) |
| { |
| #define FIELD_EQ(FIELD) (l.FIELD == r.FIELD) |
| |
| return (FIELD_EQ (low) |
| && FIELD_EQ (high) |
| && FIELD_EQ (flag_upper_bound_is_count) |
| && FIELD_EQ (flag_bound_evaluated) |
| && FIELD_EQ (bias)); |
| |
| #undef FIELD_EQ |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| struct type * |
| create_range_type (type_allocator &alloc, struct type *index_type, |
| const struct dynamic_prop *low_bound, |
| const struct dynamic_prop *high_bound, |
| LONGEST bias) |
| { |
| /* The INDEX_TYPE should be a type capable of holding the upper and lower |
| bounds, as such a zero sized, or void type makes no sense. */ |
| gdb_assert (index_type->code () != TYPE_CODE_VOID); |
| gdb_assert (index_type->length () > 0); |
| |
| struct type *result_type = alloc.new_type (); |
| result_type->set_code (TYPE_CODE_RANGE); |
| result_type->set_target_type (index_type); |
| if (index_type->is_stub ()) |
| result_type->set_target_is_stub (true); |
| else |
| result_type->set_length (check_typedef (index_type)->length ()); |
| |
| range_bounds *bounds |
| = (struct range_bounds *) TYPE_ZALLOC (result_type, sizeof (range_bounds)); |
| bounds->low = *low_bound; |
| bounds->high = *high_bound; |
| bounds->bias = bias; |
| bounds->stride.set_const_val (0); |
| |
| result_type->set_bounds (bounds); |
| |
| if (index_type->code () == TYPE_CODE_FIXED_POINT) |
| result_type->set_is_unsigned (index_type->is_unsigned ()); |
| else if (index_type->is_unsigned ()) |
| { |
| /* If the underlying type is unsigned, then the range |
| necessarily is. */ |
| result_type->set_is_unsigned (true); |
| } |
| /* Otherwise, the signed-ness of a range type can't simply be copied |
| from the underlying type. Consider a case where the underlying |
| type is 'int', but the range type can hold 0..65535, and where |
| the range is further specified to fit into 16 bits. In this |
| case, if we copy the underlying type's sign, then reading some |
| range values will cause an unwanted sign extension. So, we have |
| some heuristics here instead. */ |
| else if (low_bound->is_constant () && low_bound->const_val () >= 0) |
| { |
| result_type->set_is_unsigned (true); |
| /* Ada allows the declaration of range types whose upper bound is |
| less than the lower bound, so checking the lower bound is not |
| enough. Make sure we do not mark a range type whose upper bound |
| is negative as unsigned. */ |
| if (high_bound->is_constant () && high_bound->const_val () < 0) |
| result_type->set_is_unsigned (false); |
| } |
| |
| result_type->set_endianity_is_not_default |
| (index_type->endianity_is_not_default ()); |
| |
| return result_type; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| struct type * |
| create_range_type_with_stride (type_allocator &alloc, |
| struct type *index_type, |
| const struct dynamic_prop *low_bound, |
| const struct dynamic_prop *high_bound, |
| LONGEST bias, |
| const struct dynamic_prop *stride, |
| bool byte_stride_p) |
| { |
| struct type *result_type = create_range_type (alloc, index_type, low_bound, |
| high_bound, bias); |
| |
| gdb_assert (stride != nullptr); |
| result_type->bounds ()->stride = *stride; |
| result_type->bounds ()->flag_is_byte_stride = byte_stride_p; |
| |
| return result_type; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| struct type * |
| create_static_range_type (type_allocator &alloc, struct type *index_type, |
| LONGEST low_bound, LONGEST high_bound) |
| { |
| struct dynamic_prop low, high; |
| |
| low.set_const_val (low_bound); |
| high.set_const_val (high_bound); |
| |
| struct type *result_type = create_range_type (alloc, index_type, |
| &low, &high, 0); |
| |
| return result_type; |
| } |
| |
| /* Predicate tests whether BOUNDS are static. Returns 1 if all bounds values |
| are static, otherwise returns 0. */ |
| |
| static bool |
| has_static_range (const struct range_bounds *bounds) |
| { |
| /* If the range doesn't have a defined stride then its stride field will |
| be initialized to the constant 0. */ |
| return (bounds->low.is_constant () |
| && bounds->high.is_constant () |
| && bounds->stride.is_constant ()); |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| std::optional<LONGEST> |
| get_discrete_low_bound (struct type *type) |
| { |
| type = check_typedef (type); |
| switch (type->code ()) |
| { |
| case TYPE_CODE_RANGE: |
| { |
| /* This function only works for ranges with a constant low bound. */ |
| if (!type->bounds ()->low.is_constant ()) |
| return {}; |
| |
| LONGEST low = type->bounds ()->low.const_val (); |
| |
| if (type->target_type ()->code () == TYPE_CODE_ENUM) |
| { |
| std::optional<LONGEST> low_pos |
| = discrete_position (type->target_type (), low); |
| |
| if (low_pos.has_value ()) |
| low = *low_pos; |
| } |
| |
| return low; |
| } |
| |
| case TYPE_CODE_ENUM: |
| { |
| if (type->num_fields () > 0) |
| { |
| /* The enums may not be sorted by value, so search all |
| entries. */ |
| LONGEST low = type->field (0).loc_enumval (); |
| |
| for (int i = 0; i < type->num_fields (); i++) |
| { |
| if (type->field (i).loc_enumval () < low) |
| low = type->field (i).loc_enumval (); |
| } |
| |
| return low; |
| } |
| else |
| return 0; |
| } |
| |
| case TYPE_CODE_BOOL: |
| return 0; |
| |
| case TYPE_CODE_INT: |
| if (type->length () > sizeof (LONGEST)) /* Too big */ |
| return {}; |
| |
| if (!type->is_unsigned ()) |
| return -(1 << (type->length () * TARGET_CHAR_BIT - 1)); |
| |
| [[fallthrough]]; |
| case TYPE_CODE_CHAR: |
| return 0; |
| |
| default: |
| return {}; |
| } |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| std::optional<LONGEST> |
| get_discrete_high_bound (struct type *type) |
| { |
| type = check_typedef (type); |
| switch (type->code ()) |
| { |
| case TYPE_CODE_RANGE: |
| { |
| /* This function only works for ranges with a constant high bound. */ |
| if (!type->bounds ()->high.is_constant ()) |
| return {}; |
| |
| LONGEST high = type->bounds ()->high.const_val (); |
| |
| if (type->target_type ()->code () == TYPE_CODE_ENUM) |
| { |
| std::optional<LONGEST> high_pos |
| = discrete_position (type->target_type (), high); |
| |
| if (high_pos.has_value ()) |
| high = *high_pos; |
| } |
| |
| return high; |
| } |
| |
| case TYPE_CODE_ENUM: |
| { |
| if (type->num_fields () > 0) |
| { |
| /* The enums may not be sorted by value, so search all |
| entries. */ |
| LONGEST high = type->field (0).loc_enumval (); |
| |
| for (int i = 0; i < type->num_fields (); i++) |
| { |
| if (type->field (i).loc_enumval () > high) |
| high = type->field (i).loc_enumval (); |
| } |
| |
| return high; |
| } |
| else |
| return -1; |
| } |
| |
| case TYPE_CODE_BOOL: |
| return 1; |
| |
| case TYPE_CODE_INT: |
| if (type->length () > sizeof (LONGEST)) /* Too big */ |
| return {}; |
| |
| if (!type->is_unsigned ()) |
| { |
| LONGEST low = -(1 << (type->length () * TARGET_CHAR_BIT - 1)); |
| return -low - 1; |
| } |
| |
| [[fallthrough]]; |
| case TYPE_CODE_CHAR: |
| { |
| /* This round-about calculation is to avoid shifting by |
| type->length () * TARGET_CHAR_BIT, which will not work |
| if type->length () == sizeof (LONGEST). */ |
| LONGEST high = 1 << (type->length () * TARGET_CHAR_BIT - 1); |
| return (high - 1) | high; |
| } |
| |
| default: |
| return {}; |
| } |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| bool |
| get_discrete_bounds (struct type *type, LONGEST *lowp, LONGEST *highp) |
| { |
| std::optional<LONGEST> low = get_discrete_low_bound (type); |
| if (!low.has_value ()) |
| return false; |
| |
| std::optional<LONGEST> high = get_discrete_high_bound (type); |
| if (!high.has_value ()) |
| return false; |
| |
| *lowp = *low; |
| *highp = *high; |
| |
| return true; |
| } |
| |
| /* See gdbtypes.h */ |
| |
| bool |
| get_array_bounds (struct type *type, LONGEST *low_bound, LONGEST *high_bound) |
| { |
| struct type *index = type->index_type (); |
| LONGEST low = 0; |
| LONGEST high = 0; |
| |
| if (index == NULL) |
| return false; |
| |
| if (!get_discrete_bounds (index, &low, &high)) |
| return false; |
| |
| if (low_bound) |
| *low_bound = low; |
| |
| if (high_bound) |
| *high_bound = high; |
| |
| return true; |
| } |
| |
| /* Assuming that TYPE is a discrete type and VAL is a valid integer |
| representation of a value of this type, save the corresponding |
| position number in POS. |
| |
| Its differs from VAL only in the case of enumeration types. In |
| this case, the position number of the value of the first listed |
| enumeration literal is zero; the position number of the value of |
| each subsequent enumeration literal is one more than that of its |
| predecessor in the list. |
| |
| Return 1 if the operation was successful. Return zero otherwise, |
| in which case the value of POS is unmodified. |
| */ |
| |
| std::optional<LONGEST> |
| discrete_position (struct type *type, LONGEST val) |
| { |
| if (type->code () == TYPE_CODE_RANGE) |
| type = type->target_type (); |
| |
| if (type->code () == TYPE_CODE_ENUM) |
| { |
| int i; |
| |
| for (i = 0; i < type->num_fields (); i += 1) |
| { |
| if (val == type->field (i).loc_enumval ()) |
| return i; |
| } |
| |
| /* Invalid enumeration value. */ |
| return {}; |
| } |
| else |
| return val; |
| } |
| |
| /* If the array TYPE has static bounds calculate and update its |
| size, then return true. Otherwise return false and leave TYPE |
| unchanged. */ |
| |
| static bool |
| update_static_array_size (struct type *type) |
| { |
| gdb_assert (type->code () == TYPE_CODE_ARRAY); |
| |
| struct type *range_type = type->index_type (); |
| |
| if (type->dyn_prop (DYN_PROP_BYTE_STRIDE) == nullptr |
| && has_static_range (range_type->bounds ()) |
| && (!type_not_associated (type) |
| && !type_not_allocated (type))) |
| { |
| LONGEST low_bound, high_bound; |
| int stride; |
| struct type *element_type; |
| |
| stride = type->bit_stride (); |
| |
| if (!get_discrete_bounds (range_type, &low_bound, &high_bound)) |
| low_bound = high_bound = 0; |
| |
| element_type = check_typedef (type->target_type ()); |
| /* Be careful when setting the array length. Ada arrays can be |
| empty arrays with the high_bound being smaller than the low_bound. |
| In such cases, the array length should be zero. */ |
| if (high_bound < low_bound) |
| type->set_length (0); |
| else if (stride != 0) |
| { |
| /* Ensure that the type length is always positive, even in the |
| case where (for example in Fortran) we have a negative |
| stride. It is possible to have a single element array with a |
| negative stride in Fortran (this doesn't mean anything |
| special, it's still just a single element array) so do |
| consider that case when touching this code. */ |
| LONGEST element_count = std::abs (high_bound - low_bound + 1); |
| type->set_length (((std::abs (stride) * element_count) + 7) / 8); |
| } |
| else |
| type->set_length (element_type->length () |
| * (high_bound - low_bound + 1)); |
| |
| /* If this array's element is itself an array with a bit stride, |
| then we want to update this array's bit stride to reflect the |
| size of the sub-array. Otherwise, we'll end up using the |
| wrong size when trying to find elements of the outer |
| array. */ |
| if (element_type->code () == TYPE_CODE_ARRAY |
| && (stride != 0 || element_type->is_multi_dimensional ()) |
| && element_type->length () != 0 |
| && element_type->field (0).bitsize () != 0 |
| && get_array_bounds (element_type, &low_bound, &high_bound) |
| && high_bound >= low_bound) |
| type->field (0).set_bitsize |
| ((high_bound - low_bound + 1) |
| * element_type->field (0).bitsize ()); |
| |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| struct type * |
| create_array_type_with_stride (type_allocator &alloc, |
| struct type *element_type, |
| struct type *range_type, |
| struct dynamic_prop *byte_stride_prop, |
| unsigned int bit_stride) |
| { |
| if (byte_stride_prop != nullptr && byte_stride_prop->is_constant ()) |
| { |
| /* The byte stride is actually not dynamic. Pretend we were |
| called with bit_stride set instead of byte_stride_prop. |
| This will give us the same result type, while avoiding |
| the need to handle this as a special case. */ |
| bit_stride = byte_stride_prop->const_val () * 8; |
| byte_stride_prop = NULL; |
| } |
| |
| struct type *result_type = alloc.new_type (); |
| |
| result_type->set_code (TYPE_CODE_ARRAY); |
| result_type->set_target_type (element_type); |
| |
| result_type->alloc_fields (1); |
| result_type->set_index_type (range_type); |
| if (byte_stride_prop != NULL) |
| result_type->add_dyn_prop (DYN_PROP_BYTE_STRIDE, *byte_stride_prop); |
| else if (bit_stride > 0) |
| result_type->field (0).set_bitsize (bit_stride); |
| |
| if (!update_static_array_size (result_type)) |
| { |
| /* This type is dynamic and its length needs to be computed |
| on demand. In the meantime, avoid leaving the TYPE_LENGTH |
| undefined by setting it to zero. Although we are not expected |
| to trust TYPE_LENGTH in this case, setting the size to zero |
| allows us to avoid allocating objects of random sizes in case |
| we accidently do. */ |
| result_type->set_length (0); |
| } |
| |
| /* TYPE_TARGET_STUB will take care of zero length arrays. */ |
| if (result_type->length () == 0) |
| result_type->set_target_is_stub (true); |
| |
| return result_type; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| struct type * |
| create_array_type (type_allocator &alloc, |
| struct type *element_type, |
| struct type *range_type) |
| { |
| return create_array_type_with_stride (alloc, element_type, |
| range_type, NULL, 0); |
| } |
| |
| struct type * |
| lookup_array_range_type (struct type *element_type, |
| LONGEST low_bound, LONGEST high_bound) |
| { |
| struct type *index_type; |
| struct type *range_type; |
| |
| type_allocator alloc (element_type); |
| index_type = builtin_type (element_type->arch ())->builtin_int; |
| |
| range_type = create_static_range_type (alloc, index_type, |
| low_bound, high_bound); |
| |
| return create_array_type (alloc, element_type, range_type); |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| struct type * |
| create_string_type (type_allocator &alloc, |
| struct type *string_char_type, |
| struct type *range_type) |
| { |
| struct type *result_type = create_array_type (alloc, |
| string_char_type, |
| range_type); |
| result_type->set_code (TYPE_CODE_STRING); |
| return result_type; |
| } |
| |
| struct type * |
| lookup_string_range_type (struct type *string_char_type, |
| LONGEST low_bound, LONGEST high_bound) |
| { |
| struct type *result_type; |
| |
| result_type = lookup_array_range_type (string_char_type, |
| low_bound, high_bound); |
| result_type->set_code (TYPE_CODE_STRING); |
| return result_type; |
| } |
| |
| struct type * |
| create_set_type (type_allocator &alloc, struct type *domain_type) |
| { |
| struct type *result_type = alloc.new_type (); |
| |
| result_type->set_code (TYPE_CODE_SET); |
| result_type->alloc_fields (1); |
| |
| if (!domain_type->is_stub ()) |
| { |
| LONGEST low_bound, high_bound, bit_length; |
| |
| if (!get_discrete_bounds (domain_type, &low_bound, &high_bound)) |
| low_bound = high_bound = 0; |
| |
| bit_length = high_bound - low_bound + 1; |
| result_type->set_length ((bit_length + TARGET_CHAR_BIT - 1) |
| / TARGET_CHAR_BIT); |
| if (low_bound >= 0) |
| result_type->set_is_unsigned (true); |
| } |
| result_type->field (0).set_type (domain_type); |
| |
| return result_type; |
| } |
| |
| /* Convert ARRAY_TYPE to a vector type. This may modify ARRAY_TYPE |
| and any array types nested inside it. */ |
| |
| void |
| make_vector_type (struct type *array_type) |
| { |
| struct type *inner_array, *elt_type; |
| |
| /* Find the innermost array type, in case the array is |
| multi-dimensional. */ |
| inner_array = array_type; |
| while (inner_array->target_type ()->code () == TYPE_CODE_ARRAY) |
| inner_array = inner_array->target_type (); |
| |
| elt_type = inner_array->target_type (); |
| if (elt_type->code () == TYPE_CODE_INT) |
| { |
| type_instance_flags flags |
| = elt_type->instance_flags () | TYPE_INSTANCE_FLAG_NOTTEXT; |
| elt_type = make_qualified_type (elt_type, flags, NULL); |
| inner_array->set_target_type (elt_type); |
| } |
| |
| array_type->set_is_vector (true); |
| } |
| |
| struct type * |
| init_vector_type (struct type *elt_type, int n) |
| { |
| struct type *array_type; |
| |
| array_type = lookup_array_range_type (elt_type, 0, n - 1); |
| make_vector_type (array_type); |
| return array_type; |
| } |
| |
| /* Internal routine called by TYPE_SELF_TYPE to return the type that TYPE |
| belongs to. In c++ this is the class of "this", but TYPE_THIS_TYPE is too |
| confusing. "self" is a common enough replacement for "this". |
| TYPE must be one of TYPE_CODE_METHODPTR, TYPE_CODE_MEMBERPTR, or |
| TYPE_CODE_METHOD. */ |
| |
| struct type * |
| internal_type_self_type (struct type *type) |
| { |
| switch (type->code ()) |
| { |
| case TYPE_CODE_METHODPTR: |
| case TYPE_CODE_MEMBERPTR: |
| if (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_NONE) |
| return NULL; |
| gdb_assert (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_SELF_TYPE); |
| return TYPE_MAIN_TYPE (type)->type_specific.self_type; |
| case TYPE_CODE_METHOD: |
| if (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_NONE) |
| return NULL; |
| gdb_assert (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_FUNC); |
| return TYPE_MAIN_TYPE (type)->type_specific.func_stuff->self_type; |
| default: |
| gdb_assert_not_reached ("bad type"); |
| } |
| } |
| |
| /* Set the type of the class that TYPE belongs to. |
| In c++ this is the class of "this". |
| TYPE must be one of TYPE_CODE_METHODPTR, TYPE_CODE_MEMBERPTR, or |
| TYPE_CODE_METHOD. */ |
| |
| void |
| set_type_self_type (struct type *type, struct type *self_type) |
| { |
| switch (type->code ()) |
| { |
| case TYPE_CODE_METHODPTR: |
| case TYPE_CODE_MEMBERPTR: |
| if (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_NONE) |
| TYPE_SPECIFIC_FIELD (type) = TYPE_SPECIFIC_SELF_TYPE; |
| gdb_assert (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_SELF_TYPE); |
| TYPE_MAIN_TYPE (type)->type_specific.self_type = self_type; |
| break; |
| case TYPE_CODE_METHOD: |
| if (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_NONE) |
| INIT_FUNC_SPECIFIC (type); |
| gdb_assert (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_FUNC); |
| TYPE_MAIN_TYPE (type)->type_specific.func_stuff->self_type = self_type; |
| break; |
| default: |
| gdb_assert_not_reached ("bad type"); |
| } |
| } |
| |
| /* Smash TYPE to be a type of pointers to members of SELF_TYPE with type |
| TO_TYPE. A member pointer is a wierd thing -- it amounts to a |
| typed offset into a struct, e.g. "an int at offset 8". A MEMBER |
| TYPE doesn't include the offset (that's the value of the MEMBER |
| itself), but does include the structure type into which it points |
| (for some reason). |
| |
| When "smashing" the type, we preserve the objfile that the old type |
| pointed to, since we aren't changing where the type is actually |
| allocated. */ |
| |
| void |
| smash_to_memberptr_type (struct type *type, struct type *self_type, |
| struct type *to_type) |
| { |
| smash_type (type); |
| type->set_code (TYPE_CODE_MEMBERPTR); |
| type->set_target_type (to_type); |
| set_type_self_type (type, self_type); |
| /* Assume that a data member pointer is the same size as a normal |
| pointer. */ |
| type->set_length (gdbarch_ptr_bit (to_type->arch ()) / TARGET_CHAR_BIT); |
| } |
| |
| /* Smash TYPE to be a type of pointer to methods type TO_TYPE. |
| |
| When "smashing" the type, we preserve the objfile that the old type |
| pointed to, since we aren't changing where the type is actually |
| allocated. */ |
| |
| void |
| smash_to_methodptr_type (struct type *type, struct type *to_type) |
| { |
| smash_type (type); |
| type->set_code (TYPE_CODE_METHODPTR); |
| type->set_target_type (to_type); |
| set_type_self_type (type, TYPE_SELF_TYPE (to_type)); |
| type->set_length (cplus_method_ptr_size (to_type)); |
| } |
| |
| /* Smash TYPE to be a type of method of SELF_TYPE with type TO_TYPE. |
| METHOD just means `function that gets an extra "this" argument'. |
| |
| When "smashing" the type, we preserve the objfile that the old type |
| pointed to, since we aren't changing where the type is actually |
| allocated. */ |
| |
| void |
| smash_to_method_type (struct type *type, struct type *self_type, |
| struct type *to_type, struct field *args, |
| int nargs, int varargs) |
| { |
| smash_type (type); |
| type->set_code (TYPE_CODE_METHOD); |
| type->set_target_type (to_type); |
| set_type_self_type (type, self_type); |
| type->set_fields (args); |
| type->set_num_fields (nargs); |
| |
| if (varargs) |
| type->set_has_varargs (true); |
| |
| /* In practice, this is never needed. */ |
| type->set_length (1); |
| } |
| |
| /* A wrapper of TYPE_NAME which calls error if the type is anonymous. |
| Since GCC PR debug/47510 DWARF provides associated information to detect the |
| anonymous class linkage name from its typedef. |
| |
| Parameter TYPE should not yet have CHECK_TYPEDEF applied, this function will |
| apply it itself. */ |
| |
| const char * |
| type_name_or_error (struct type *type) |
| { |
| struct type *saved_type = type; |
| const char *name; |
| struct objfile *objfile; |
| |
| type = check_typedef (type); |
| |
| name = type->name (); |
| if (name != NULL) |
| return name; |
| |
| name = saved_type->name (); |
| objfile = saved_type->objfile_owner (); |
| error (_("Invalid anonymous type %s [in module %s], GCC PR debug/47510 bug?"), |
| name ? name : "<anonymous>", |
| objfile ? objfile_name (objfile) : "<arch>"); |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| struct type * |
| lookup_typename (const struct language_defn *language, |
| const char *name, |
| const struct block *block, int noerr) |
| { |
| struct symbol *sym; |
| |
| sym = lookup_symbol_in_language (name, block, SEARCH_TYPE_DOMAIN, |
| language->la_language, NULL).symbol; |
| if (sym != nullptr) |
| { |
| struct type *type = sym->type (); |
| /* Ensure the length of TYPE is valid. */ |
| check_typedef (type); |
| return type; |
| } |
| |
| if (noerr) |
| return NULL; |
| error (_("No type named %s."), name); |
| } |
| |
| struct type * |
| lookup_unsigned_typename (const struct language_defn *language, |
| const char *name) |
| { |
| std::string uns; |
| uns.reserve (strlen (name) + strlen ("unsigned ")); |
| uns = "unsigned "; |
| uns += name; |
| |
| return lookup_typename (language, uns.c_str (), NULL, 0); |
| } |
| |
| struct type * |
| lookup_signed_typename (const struct language_defn *language, const char *name) |
| { |
| /* In C and C++, "char" and "signed char" are distinct types. */ |
| if (streq (name, "char")) |
| name = "signed char"; |
| return lookup_typename (language, name, NULL, 0); |
| } |
| |
| /* Lookup a structure type named "struct NAME", |
| visible in lexical block BLOCK. */ |
| |
| struct type * |
| lookup_struct (const char *name, const struct block *block) |
| { |
| struct symbol *sym; |
| |
| sym = lookup_symbol (name, block, SEARCH_STRUCT_DOMAIN, 0).symbol; |
| |
| if (sym == NULL) |
| { |
| error (_("No struct type named %s."), name); |
| } |
| if (sym->type ()->code () != TYPE_CODE_STRUCT) |
| { |
| error (_("This context has class, union or enum %s, not a struct."), |
| name); |
| } |
| return (sym->type ()); |
| } |
| |
| /* Lookup a union type named "union NAME", |
| visible in lexical block BLOCK. */ |
| |
| struct type * |
| lookup_union (const char *name, const struct block *block) |
| { |
| struct symbol *sym; |
| struct type *t; |
| |
| sym = lookup_symbol (name, block, SEARCH_STRUCT_DOMAIN, 0).symbol; |
| |
| if (sym == NULL) |
| error (_("No union type named %s."), name); |
| |
| t = sym->type (); |
| |
| if (t->code () == TYPE_CODE_UNION) |
| return t; |
| |
| /* If we get here, it's not a union. */ |
| error (_("This context has class, struct or enum %s, not a union."), |
| name); |
| } |
| |
| /* Lookup an enum type named "enum NAME", |
| visible in lexical block BLOCK. */ |
| |
| struct type * |
| lookup_enum (const char *name, const struct block *block) |
| { |
| struct symbol *sym; |
| |
| sym = lookup_symbol (name, block, SEARCH_STRUCT_DOMAIN, 0).symbol; |
| if (sym == NULL) |
| { |
| error (_("No enum type named %s."), name); |
| } |
| if (sym->type ()->code () != TYPE_CODE_ENUM) |
| { |
| error (_("This context has class, struct or union %s, not an enum."), |
| name); |
| } |
| return (sym->type ()); |
| } |
| |
| /* Lookup a template type named "template NAME<TYPE>", |
| visible in lexical block BLOCK. */ |
| |
| struct type * |
| lookup_template_type (const char *name, struct type *type, |
| const struct block *block) |
| { |
| std::string nam; |
| nam.reserve (strlen (name) + strlen (type->name ()) + strlen ("< >")); |
| nam = name; |
| nam += "<"; |
| nam += type->name (); |
| nam += " >"; /* FIXME, extra space still introduced in gcc? */ |
| |
| symbol *sym = lookup_symbol (nam.c_str (), block, |
| SEARCH_STRUCT_DOMAIN, 0).symbol; |
| |
| if (sym == NULL) |
| { |
| error (_("No template type named %s."), name); |
| } |
| if (sym->type ()->code () != TYPE_CODE_STRUCT) |
| { |
| error (_("This context has class, union or enum %s, not a struct."), |
| name); |
| } |
| return (sym->type ()); |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| struct_elt |
| lookup_struct_elt (struct type *type, const char *name, int noerr) |
| { |
| int i; |
| |
| for (;;) |
| { |
| type = check_typedef (type); |
| if (type->code () != TYPE_CODE_PTR |
| && type->code () != TYPE_CODE_REF) |
| break; |
| type = type->target_type (); |
| } |
| |
| if (type->code () != TYPE_CODE_STRUCT |
| && type->code () != TYPE_CODE_UNION) |
| { |
| std::string type_name = type_to_string (type); |
| error (_("Type %s is not a structure or union type."), |
| type_name.c_str ()); |
| } |
| |
| for (i = type->num_fields () - 1; i >= TYPE_N_BASECLASSES (type); i--) |
| { |
| const char *t_field_name = type->field (i).name (); |
| |
| if (t_field_name && (strcmp_iw (t_field_name, name) == 0)) |
| { |
| return {&type->field (i), type->field (i).loc_bitpos ()}; |
| } |
| else if (!t_field_name || *t_field_name == '\0') |
| { |
| struct_elt elt |
| = lookup_struct_elt (type->field (i).type (), name, 1); |
| if (elt.field != NULL) |
| { |
| elt.offset += type->field (i).loc_bitpos (); |
| return elt; |
| } |
| } |
| } |
| |
| /* OK, it's not in this class. Recursively check the baseclasses. */ |
| for (i = TYPE_N_BASECLASSES (type) - 1; i >= 0; i--) |
| { |
| struct_elt elt = lookup_struct_elt (TYPE_BASECLASS (type, i), name, 1); |
| if (elt.field != NULL) |
| return elt; |
| } |
| |
| if (noerr) |
| return {nullptr, 0}; |
| |
| std::string type_name = type_to_string (type); |
| error (_("Type %s has no component named %s."), type_name.c_str (), name); |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| struct type * |
| lookup_struct_elt_type (struct type *type, const char *name, int noerr) |
| { |
| struct_elt elt = lookup_struct_elt (type, name, noerr); |
| if (elt.field != NULL) |
| return elt.field->type (); |
| else |
| return NULL; |
| } |
| |
| /* Return the largest number representable by unsigned integer type TYPE. */ |
| |
| ULONGEST |
| get_unsigned_type_max (struct type *type) |
| { |
| unsigned int n; |
| |
| type = check_typedef (type); |
| gdb_assert (type->code () == TYPE_CODE_INT && type->is_unsigned ()); |
| gdb_assert (type->length () <= sizeof (ULONGEST)); |
| |
| /* Written this way to avoid overflow. */ |
| n = type->length () * TARGET_CHAR_BIT; |
| return ((((ULONGEST) 1 << (n - 1)) - 1) << 1) | 1; |
| } |
| |
| /* Store in *MIN, *MAX the smallest and largest numbers representable by |
| signed integer type TYPE. */ |
| |
| void |
| get_signed_type_minmax (struct type *type, LONGEST *min, LONGEST *max) |
| { |
| unsigned int n; |
| |
| type = check_typedef (type); |
| gdb_assert (type->code () == TYPE_CODE_INT && !type->is_unsigned ()); |
| gdb_assert (type->length () <= sizeof (LONGEST)); |
| |
| n = type->length () * TARGET_CHAR_BIT; |
| *min = -((ULONGEST) 1 << (n - 1)); |
| *max = ((ULONGEST) 1 << (n - 1)) - 1; |
| } |
| |
| /* Return the largest value representable by pointer type TYPE. */ |
| |
| CORE_ADDR |
| get_pointer_type_max (struct type *type) |
| { |
| unsigned int n; |
| |
| type = check_typedef (type); |
| gdb_assert (type->code () == TYPE_CODE_PTR); |
| gdb_assert (type->length () <= sizeof (CORE_ADDR)); |
| |
| n = type->length () * TARGET_CHAR_BIT; |
| return ((((CORE_ADDR) 1 << (n - 1)) - 1) << 1) | 1; |
| } |
| |
| /* Internal routine called by TYPE_VPTR_FIELDNO to return the value of |
| cplus_stuff.vptr_fieldno. |
| |
| cplus_stuff is initialized to cplus_struct_default which does not |
| set vptr_fieldno to -1 for portability reasons (IWBN to use C99 |
| designated initializers). We cope with that here. */ |
| |
| int |
| internal_type_vptr_fieldno (struct type *type) |
| { |
| type = check_typedef (type); |
| gdb_assert (type->code () == TYPE_CODE_STRUCT |
| || type->code () == TYPE_CODE_UNION); |
| if (!HAVE_CPLUS_STRUCT (type)) |
| return -1; |
| return TYPE_RAW_CPLUS_SPECIFIC (type)->vptr_fieldno; |
| } |
| |
| /* Set the value of cplus_stuff.vptr_fieldno. */ |
| |
| void |
| set_type_vptr_fieldno (struct type *type, int fieldno) |
| { |
| type = check_typedef (type); |
| gdb_assert (type->code () == TYPE_CODE_STRUCT |
| || type->code () == TYPE_CODE_UNION); |
| if (!HAVE_CPLUS_STRUCT (type)) |
| ALLOCATE_CPLUS_STRUCT_TYPE (type); |
| TYPE_RAW_CPLUS_SPECIFIC (type)->vptr_fieldno = fieldno; |
| } |
| |
| /* Internal routine called by TYPE_VPTR_BASETYPE to return the value of |
| cplus_stuff.vptr_basetype. */ |
| |
| struct type * |
| internal_type_vptr_basetype (struct type *type) |
| { |
| type = check_typedef (type); |
| gdb_assert (type->code () == TYPE_CODE_STRUCT |
| || type->code () == TYPE_CODE_UNION); |
| gdb_assert (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_CPLUS_STUFF); |
| return TYPE_RAW_CPLUS_SPECIFIC (type)->vptr_basetype; |
| } |
| |
| /* Set the value of cplus_stuff.vptr_basetype. */ |
| |
| void |
| set_type_vptr_basetype (struct type *type, struct type *basetype) |
| { |
| type = check_typedef (type); |
| gdb_assert (type->code () == TYPE_CODE_STRUCT |
| || type->code () == TYPE_CODE_UNION); |
| if (!HAVE_CPLUS_STRUCT (type)) |
| ALLOCATE_CPLUS_STRUCT_TYPE (type); |
| TYPE_RAW_CPLUS_SPECIFIC (type)->vptr_basetype = basetype; |
| } |
| |
| /* Lookup the vptr basetype/fieldno values for TYPE. |
| If found store vptr_basetype in *BASETYPEP if non-NULL, and return |
| vptr_fieldno. Also, if found and basetype is from the same objfile, |
| cache the results. |
| If not found, return -1 and ignore BASETYPEP. |
| Callers should be aware that in some cases (for example, |
| the type or one of its baseclasses is a stub type and we are |
| debugging a .o file, or the compiler uses DWARF-2 and is not GCC), |
| this function will not be able to find the |
| virtual function table pointer, and vptr_fieldno will remain -1 and |
| vptr_basetype will remain NULL or incomplete. */ |
| |
| int |
| get_vptr_fieldno (struct type *type, struct type **basetypep) |
| { |
| type = check_typedef (type); |
| |
| if (TYPE_VPTR_FIELDNO (type) < 0) |
| { |
| int i; |
| |
| /* We must start at zero in case the first (and only) baseclass |
| is virtual (and hence we cannot share the table pointer). */ |
| for (i = 0; i < TYPE_N_BASECLASSES (type); i++) |
| { |
| struct type *baseclass = check_typedef (TYPE_BASECLASS (type, i)); |
| int fieldno; |
| struct type *basetype; |
| |
| fieldno = get_vptr_fieldno (baseclass, &basetype); |
| if (fieldno >= 0) |
| { |
| /* If the type comes from a different objfile we can't cache |
| it, it may have a different lifetime. PR 2384 */ |
| if (type->objfile_owner () == basetype->objfile_owner ()) |
| { |
| set_type_vptr_fieldno (type, fieldno); |
| set_type_vptr_basetype (type, basetype); |
| } |
| if (basetypep) |
| *basetypep = basetype; |
| return fieldno; |
| } |
| } |
| |
| /* Not found. */ |
| return -1; |
| } |
| else |
| { |
| if (basetypep) |
| *basetypep = TYPE_VPTR_BASETYPE (type); |
| return TYPE_VPTR_FIELDNO (type); |
| } |
| } |
| |
| static void |
| stub_noname_complaint (void) |
| { |
| complaint (_("stub type has NULL name")); |
| } |
| |
| /* Return nonzero if TYPE has a DYN_PROP_BYTE_STRIDE dynamic property |
| attached to it, and that property has a non-constant value. */ |
| |
| static int |
| array_type_has_dynamic_stride (struct type *type) |
| { |
| struct dynamic_prop *prop = type->dyn_prop (DYN_PROP_BYTE_STRIDE); |
| |
| return prop != nullptr && prop->is_constant (); |
| } |
| |
| /* Worker for is_dynamic_type. */ |
| |
| static bool |
| is_dynamic_type_internal (struct type *type, bool top_level) |
| { |
| type = check_typedef (type); |
| |
| /* We only want to recognize references and pointers at the outermost |
| level. */ |
| if (top_level && type->is_pointer_or_reference ()) |
| type = check_typedef (type->target_type ()); |
| |
| /* Types that have a dynamic TYPE_DATA_LOCATION are considered |
| dynamic, even if the type itself is statically defined. |
| From a user's point of view, this may appear counter-intuitive; |
| but it makes sense in this context, because the point is to determine |
| whether any part of the type needs to be resolved before it can |
| be exploited. */ |
| if (TYPE_DATA_LOCATION (type) != NULL |
| && (TYPE_DATA_LOCATION_KIND (type) == PROP_LOCEXPR |
| || TYPE_DATA_LOCATION_KIND (type) == PROP_LOCLIST)) |
| return true; |
| |
| if (TYPE_ASSOCIATED_PROP (type)) |
| return true; |
| |
| if (TYPE_ALLOCATED_PROP (type)) |
| return true; |
| |
| struct dynamic_prop *prop = type->dyn_prop (DYN_PROP_VARIANT_PARTS); |
| if (prop != nullptr && prop->kind () != PROP_TYPE) |
| return true; |
| |
| if (TYPE_HAS_DYNAMIC_LENGTH (type)) |
| return true; |
| |
| switch (type->code ()) |
| { |
| case TYPE_CODE_RANGE: |
| { |
| /* A range type is obviously dynamic if it has at least one |
| dynamic bound. But also consider the range type to be |
| dynamic when its subtype is dynamic, even if the bounds |
| of the range type are static. It allows us to assume that |
| the subtype of a static range type is also static. */ |
| return (!has_static_range (type->bounds ()) |
| || is_dynamic_type_internal (type->target_type (), false)); |
| } |
| |
| case TYPE_CODE_STRING: |
| /* Strings are very much like an array of characters, and can be |
| treated as one here. */ |
| case TYPE_CODE_ARRAY: |
| { |
| gdb_assert (type->num_fields () == 1); |
| |
| /* The array is dynamic if either the bounds are dynamic... */ |
| if (is_dynamic_type_internal (type->index_type (), false)) |
| return true; |
| /* ... or the elements it contains have a dynamic contents... */ |
| if (is_dynamic_type_internal (type->target_type (), false)) |
| return true; |
| /* ... or if it has a dynamic stride... */ |
| if (array_type_has_dynamic_stride (type)) |
| return true; |
| return false; |
| } |
| |
| case TYPE_CODE_STRUCT: |
| case TYPE_CODE_UNION: |
| { |
| int i; |
| |
| bool is_cplus = HAVE_CPLUS_STRUCT (type); |
| |
| for (i = 0; i < type->num_fields (); ++i) |
| { |
| /* Static fields can be ignored here. */ |
| if (type->field (i).is_static ()) |
| continue; |
| /* If the field has dynamic type, then so does TYPE. */ |
| if (is_dynamic_type_internal (type->field (i).type (), false)) |
| return true; |
| /* If the field is at a fixed offset, then it is not |
| dynamic. */ |
| if (type->field (i).loc_kind () != FIELD_LOC_KIND_DWARF_BLOCK) |
| continue; |
| /* Do not consider C++ virtual base types to be dynamic |
| due to the field's offset being dynamic; these are |
| handled via other means. */ |
| if (is_cplus && BASETYPE_VIA_VIRTUAL (type, i)) |
| continue; |
| return true; |
| } |
| } |
| break; |
| } |
| |
| return false; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| bool |
| is_dynamic_type (struct type *type) |
| { |
| return is_dynamic_type_internal (type, true); |
| } |
| |
| static struct type *resolve_dynamic_type_internal |
| (struct type *type, struct property_addr_info *addr_stack, |
| const frame_info_ptr &frame, bool top_level); |
| |
| /* Given a dynamic range type (dyn_range_type) and a stack of |
| struct property_addr_info elements, return a static version |
| of that type. |
| |
| When RESOLVE_P is true then the returned static range is created by |
| actually evaluating any dynamic properties within the range type, while |
| when RESOLVE_P is false the returned static range has all of the bounds |
| and stride information set to undefined. The RESOLVE_P set to false |
| case will be used when evaluating a dynamic array that is not |
| allocated, or not associated, i.e. the bounds information might not be |
| initialized yet. |
| |
| RANK is the array rank for which we are resolving this range, and is a |
| zero based count. The rank should never be negative. |
| */ |
| |
| static struct type * |
| resolve_dynamic_range (struct type *dyn_range_type, |
| struct property_addr_info *addr_stack, |
| const frame_info_ptr &frame, |
| int rank, bool resolve_p = true) |
| { |
| CORE_ADDR value; |
| struct type *static_range_type, *static_target_type; |
| struct dynamic_prop low_bound, high_bound, stride; |
| |
| gdb_assert (dyn_range_type->code () == TYPE_CODE_RANGE); |
| gdb_assert (rank >= 0); |
| |
| const struct dynamic_prop *prop = &dyn_range_type->bounds ()->low; |
| if (resolve_p) |
| { |
| if (dwarf2_evaluate_property (prop, frame, addr_stack, &value, |
| { (CORE_ADDR) rank })) |
| low_bound.set_const_val (value); |
| else if (prop->kind () == PROP_UNDEFINED) |
| low_bound.set_undefined (); |
| else |
| low_bound.set_optimized_out (); |
| } |
| else |
| low_bound.set_undefined (); |
| |
| prop = &dyn_range_type->bounds ()->high; |
| if (resolve_p) |
| { |
| if (dwarf2_evaluate_property (prop, frame, addr_stack, &value, |
| { (CORE_ADDR) rank })) |
| { |
| high_bound.set_const_val (value); |
| |
| if (dyn_range_type->bounds ()->flag_upper_bound_is_count) |
| high_bound.set_const_val |
| (low_bound.const_val () + high_bound.const_val () - 1); |
| } |
| else if (prop->kind () == PROP_UNDEFINED) |
| high_bound.set_undefined (); |
| else |
| high_bound.set_optimized_out (); |
| } |
| else |
| high_bound.set_undefined (); |
| |
| bool byte_stride_p = dyn_range_type->bounds ()->flag_is_byte_stride; |
| prop = &dyn_range_type->bounds ()->stride; |
| if (resolve_p && dwarf2_evaluate_property (prop, frame, addr_stack, &value, |
| { (CORE_ADDR) rank })) |
| { |
| stride.set_const_val (value); |
| |
| /* If we have a bit stride that is not an exact number of bytes then |
| I really don't think this is going to work with current GDB, the |
| array indexing code in GDB seems to be pretty heavily tied to byte |
| offsets right now. Assuming 8 bits in a byte. */ |
| struct gdbarch *gdbarch = dyn_range_type->arch (); |
| int unit_size = gdbarch_addressable_memory_unit_size (gdbarch); |
| if (!byte_stride_p && (value % (unit_size * 8)) != 0) |
| error (_("bit strides that are not a multiple of the byte size " |
| "are currently not supported")); |
| } |
| else |
| { |
| stride.set_undefined (); |
| byte_stride_p = true; |
| } |
| |
| static_target_type |
| = resolve_dynamic_type_internal (dyn_range_type->target_type (), |
| addr_stack, frame, false); |
| LONGEST bias = dyn_range_type->bounds ()->bias; |
| type_allocator alloc (dyn_range_type); |
| static_range_type = create_range_type_with_stride |
| (alloc, static_target_type, |
| &low_bound, &high_bound, bias, &stride, byte_stride_p); |
| static_range_type->set_name (dyn_range_type->name ()); |
| static_range_type->bounds ()->flag_bound_evaluated = 1; |
| return static_range_type; |
| } |
| |
| /* Helper function for resolve_dynamic_array_or_string. This function |
| resolves the properties for a single array at RANK within a nested array |
| of arrays structure. The RANK value is greater than or equal to 0, and |
| starts at it's maximum value and goes down by 1 for each recursive call |
| to this function. So, for a 3-dimensional array, the first call to this |
| function has RANK == 2, then we call ourselves recursively with RANK == |
| 1, than again with RANK == 0, and at that point we should return. |
| |
| TYPE is updated as the dynamic properties are resolved, and so, should |
| be a copy of the dynamic type, rather than the original dynamic type |
| itself. |
| |
| ADDR_STACK is a stack of struct property_addr_info to be used if needed |
| during the dynamic resolution. |
| |
| When RESOLVE_P is true then the dynamic properties of TYPE are |
| evaluated, otherwise the dynamic properties of TYPE are not evaluated, |
| instead we assume the array is not allocated/associated yet. */ |
| |
| static struct type * |
| resolve_dynamic_array_or_string_1 (struct type *type, |
| struct property_addr_info *addr_stack, |
| const frame_info_ptr &frame, |
| int rank, bool resolve_p) |
| { |
| CORE_ADDR value; |
| struct type *elt_type; |
| struct type *range_type; |
| struct type *ary_dim; |
| struct dynamic_prop *prop; |
| unsigned int bit_stride = 0; |
| |
| /* For dynamic type resolution strings can be treated like arrays of |
| characters. */ |
| gdb_assert (type->code () == TYPE_CODE_ARRAY |
| || type->code () == TYPE_CODE_STRING); |
| |
| /* As the rank is a zero based count we expect this to never be |
| negative. */ |
| gdb_assert (rank >= 0); |
| |
| /* Resolve the allocated and associated properties before doing anything |
| else. If an array is not allocated or not associated then (at least |
| for Fortran) there is no guarantee that the data to define the upper |
| bound, lower bound, or stride will be correct. If RESOLVE_P is |
| already false at this point then this is not the first dimension of |
| the array and a more outer dimension has already marked this array as |
| not allocated/associated, as such we just ignore this property. This |
| is fine as GDB only checks the allocated/associated on the outer most |
| dimension of the array. */ |
| prop = TYPE_ALLOCATED_PROP (type); |
| if (prop != NULL && resolve_p |
| && dwarf2_evaluate_property (prop, frame, addr_stack, &value)) |
| { |
| prop->set_const_val (value); |
| if (value == 0) |
| resolve_p = false; |
| } |
| |
| prop = TYPE_ASSOCIATED_PROP (type); |
| if (prop != NULL && resolve_p |
| && dwarf2_evaluate_property (prop, frame, addr_stack, &value)) |
| { |
| prop->set_const_val (value); |
| if (value == 0) |
| resolve_p = false; |
| } |
| |
| range_type = check_typedef (type->index_type ()); |
| range_type |
| = resolve_dynamic_range (range_type, addr_stack, frame, rank, resolve_p); |
| |
| ary_dim = check_typedef (type->target_type ()); |
| if (ary_dim != NULL && ary_dim->code () == TYPE_CODE_ARRAY) |
| { |
| ary_dim = copy_type (ary_dim); |
| elt_type = resolve_dynamic_array_or_string_1 (ary_dim, addr_stack, |
| frame, rank - 1, |
| resolve_p); |
| } |
| else |
| elt_type = type->target_type (); |
| |
| prop = type->dyn_prop (DYN_PROP_BYTE_STRIDE); |
| if (prop != NULL && resolve_p) |
| { |
| if (dwarf2_evaluate_property (prop, frame, addr_stack, &value)) |
| { |
| type->remove_dyn_prop (DYN_PROP_BYTE_STRIDE); |
| bit_stride = (unsigned int) (value * 8); |
| } |
| else |
| { |
| /* Could be a bug in our code, but it could also happen |
| if the DWARF info is not correct. Issue a warning, |
| and assume no byte/bit stride (leave bit_stride = 0). */ |
| warning (_("cannot determine array stride for type %s"), |
| type->name () ? type->name () : "<no name>"); |
| } |
| } |
| else |
| bit_stride = type->field (0).bitsize (); |
| |
| type_allocator alloc (type, type_allocator::SMASH); |
| return create_array_type_with_stride (alloc, elt_type, range_type, NULL, |
| bit_stride); |
| } |
| |
| /* Resolve an array or string type with dynamic properties, return a new |
| type with the dynamic properties resolved to actual values. The |
| ADDR_STACK represents the location of the object being resolved. */ |
| |
| static struct type * |
| resolve_dynamic_array_or_string (struct type *type, |
| struct property_addr_info *addr_stack, |
| const frame_info_ptr &frame) |
| { |
| CORE_ADDR value; |
| int rank = 0; |
| |
| /* For dynamic type resolution strings can be treated like arrays of |
| characters. */ |
| gdb_assert (type->code () == TYPE_CODE_ARRAY |
| || type->code () == TYPE_CODE_STRING); |
| |
| type = copy_type (type); |
| |
| /* Resolve the rank property to get rank value. */ |
| struct dynamic_prop *prop = TYPE_RANK_PROP (type); |
| if (dwarf2_evaluate_property (prop, frame, addr_stack, &value)) |
| { |
| prop->set_const_val (value); |
| rank = value; |
| |
| if (rank == 0) |
| { |
| /* Rank is zero, if a variable is passed as an argument to a |
| function. In this case the resolved type should not be an |
| array, but should instead be that of an array element. */ |
| struct type *dynamic_array_type = type; |
| type = copy_type (dynamic_array_type->target_type ()); |
| struct dynamic_prop_list *prop_list |
| = TYPE_MAIN_TYPE (dynamic_array_type)->dyn_prop_list; |
| if (prop_list != nullptr) |
| { |
| struct obstack *obstack |
| = &type->objfile_owner ()->objfile_obstack; |
| TYPE_MAIN_TYPE (type)->dyn_prop_list |
| = copy_dynamic_prop_list (obstack, prop_list); |
| } |
| return type; |
| } |
| else if (type->code () == TYPE_CODE_STRING && rank != 1) |
| { |
| /* What would this even mean? A string with a dynamic rank |
| greater than 1. */ |
| error (_("unable to handle string with dynamic rank greater than 1")); |
| } |
| else if (rank > 1) |
| { |
| /* Arrays with dynamic rank are initially just an array type |
| with a target type that is the array element. |
| |
| However, now we know the rank of the array we need to build |
| the array of arrays structure that GDB expects, that is we |
| need an array type that has a target which is an array type, |
| and so on, until eventually, we have the element type at the |
| end of the chain. Create all the additional array types here |
| by copying the top level array type. */ |
| struct type *element_type = type->target_type (); |
| struct type *rank_type = type; |
| for (int i = 1; i < rank; i++) |
| { |
| rank_type->set_target_type (copy_type (rank_type)); |
| rank_type = rank_type->target_type (); |
| } |
| rank_type->set_target_type (element_type); |
| } |
| } |
| else |
| { |
| rank = 1; |
| |
| for (struct type *tmp_type = check_typedef (type->target_type ()); |
| tmp_type->code () == TYPE_CODE_ARRAY; |
| tmp_type = check_typedef (tmp_type->target_type ())) |
| ++rank; |
| } |
| |
| /* The rank that we calculated above is actually a count of the number of |
| ranks. However, when we resolve the type of each individual array |
| rank we should actually use a rank "offset", e.g. an array with a rank |
| count of 1 (calculated above) will use the rank offset 0 in order to |
| resolve the details of the first array dimension. As a result, we |
| reduce the rank by 1 here. */ |
| --rank; |
| |
| return resolve_dynamic_array_or_string_1 (type, addr_stack, frame, rank, |
| true); |
| } |
| |
| /* Resolve dynamic bounds of members of the union TYPE to static |
| bounds. ADDR_STACK is a stack of struct property_addr_info |
| to be used if needed during the dynamic resolution. */ |
| |
| static struct type * |
| resolve_dynamic_union (struct type *type, |
| struct property_addr_info *addr_stack, |
| const frame_info_ptr &frame) |
| { |
| struct type *resolved_type; |
| int i; |
| unsigned int max_len = 0; |
| |
| gdb_assert (type->code () == TYPE_CODE_UNION); |
| |
| resolved_type = copy_type (type); |
| resolved_type->copy_fields (type); |
| for (i = 0; i < resolved_type->num_fields (); ++i) |
| { |
| struct type *t; |
| |
| if (type->field (i).is_static ()) |
| continue; |
| |
| t = resolve_dynamic_type_internal (resolved_type->field (i).type (), |
| addr_stack, frame, false); |
| resolved_type->field (i).set_type (t); |
| |
| struct type *real_type = check_typedef (t); |
| if (real_type->length () > max_len) |
| max_len = real_type->length (); |
| } |
| |
| resolved_type->set_length (max_len); |
| return resolved_type; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| bool |
| variant::matches (ULONGEST value, bool is_unsigned) const |
| { |
| for (const discriminant_range &range : discriminants) |
| if (range.contains (value, is_unsigned)) |
| return true; |
| return false; |
| } |
| |
| static void |
| compute_variant_fields_inner (struct type *type, |
| struct property_addr_info *addr_stack, |
| const variant_part &part, |
| std::vector<bool> &flags); |
| |
| /* A helper function to determine which variant fields will be active. |
| This handles both the variant's direct fields, and any variant |
| parts embedded in this variant. TYPE is the type we're examining. |
| ADDR_STACK holds information about the concrete object. VARIANT is |
| the current variant to be handled. FLAGS is where the results are |
| stored -- this function sets the Nth element in FLAGS if the |
| corresponding field is enabled. ENABLED is whether this variant is |
| enabled or not. */ |
| |
| static void |
| compute_variant_fields_recurse (struct type *type, |
| struct property_addr_info *addr_stack, |
| const variant &variant, |
| std::vector<bool> &flags, |
| bool enabled) |
| { |
| for (int field = variant.first_field; field < variant.last_field; ++field) |
| flags[field] = enabled; |
| |
| for (const variant_part &new_part : variant.parts) |
| { |
| if (enabled) |
| compute_variant_fields_inner (type, addr_stack, new_part, flags); |
| else |
| { |
| for (const auto &sub_variant : new_part.variants) |
| compute_variant_fields_recurse (type, addr_stack, sub_variant, |
| flags, enabled); |
| } |
| } |
| } |
| |
| /* A helper function to determine which variant fields will be active. |
| This evaluates the discriminant, decides which variant (if any) is |
| active, and then updates FLAGS to reflect which fields should be |
| available. TYPE is the type we're examining. ADDR_STACK holds |
| information about the concrete object. VARIANT is the current |
| variant to be handled. FLAGS is where the results are stored -- |
| this function sets the Nth element in FLAGS if the corresponding |
| field is enabled. */ |
| |
| static void |
| compute_variant_fields_inner (struct type *type, |
| struct property_addr_info *addr_stack, |
| const variant_part &part, |
| std::vector<bool> &flags) |
| { |
| /* Evaluate the discriminant. */ |
| std::optional<ULONGEST> discr_value; |
| if (part.discriminant_index != -1) |
| { |
| int idx = part.discriminant_index; |
| |
| if (type->field (idx).loc_kind () != FIELD_LOC_KIND_BITPOS) |
| error (_("Cannot determine struct field location" |
| " (invalid location kind)")); |
| |
| if (addr_stack->valaddr.data () != NULL) |
| discr_value = unpack_field_as_long (type, addr_stack->valaddr.data (), |
| idx); |
| else |
| { |
| CORE_ADDR addr = (addr_stack->addr |
| + (type->field (idx).loc_bitpos () |
| / TARGET_CHAR_BIT)); |
| |
| LONGEST bitsize = type->field (idx).bitsize (); |
| LONGEST size = bitsize / 8; |
| if (size == 0) |
| size = type->field (idx).type ()->length (); |
| |
| gdb_byte bits[sizeof (ULONGEST)]; |
| read_memory (addr, bits, size); |
| |
| LONGEST bitpos = (type->field (idx).loc_bitpos () |
| % TARGET_CHAR_BIT); |
| |
| discr_value = unpack_bits_as_long (type->field (idx).type (), |
| bits, bitpos, bitsize); |
| } |
| } |
| |
| /* Go through each variant and see which applies. */ |
| const variant *default_variant = nullptr; |
| const variant *applied_variant = nullptr; |
| for (const auto &variant : part.variants) |
| { |
| if (variant.is_default ()) |
| default_variant = &variant; |
| else if (discr_value.has_value () |
| && variant.matches (*discr_value, part.is_unsigned)) |
| { |
| applied_variant = &variant; |
| break; |
| } |
| } |
| if (applied_variant == nullptr) |
| applied_variant = default_variant; |
| |
| for (const auto &variant : part.variants) |
| compute_variant_fields_recurse (type, addr_stack, variant, |
| flags, applied_variant == &variant); |
| } |
| |
| /* Determine which variant fields are available in TYPE. The enabled |
| fields are stored in RESOLVED_TYPE. ADDR_STACK holds information |
| about the concrete object. PARTS describes the top-level variant |
| parts for this type. */ |
| |
| static void |
| compute_variant_fields (struct type *type, |
| struct type *resolved_type, |
| struct property_addr_info *addr_stack, |
| const gdb::array_view<variant_part> &parts) |
| { |
| /* Assume all fields are included by default. */ |
| std::vector<bool> flags (resolved_type->num_fields (), true); |
| |
| /* Now disable fields based on the variants that control them. */ |
| for (const auto &part : parts) |
| compute_variant_fields_inner (type, addr_stack, part, flags); |
| |
| unsigned int nfields = std::count (flags.begin (), flags.end (), true); |
| /* No need to zero-initialize the newly allocated fields, they'll be |
| initialized by the copy in the loop below. */ |
| resolved_type->alloc_fields (nfields, false); |
| |
| int out = 0; |
| for (int i = 0; i < type->num_fields (); ++i) |
| { |
| if (!flags[i]) |
| continue; |
| |
| resolved_type->field (out) = type->field (i); |
| ++out; |
| } |
| } |
| |
| /* Resolve dynamic bounds of members of the struct TYPE to static |
| bounds. ADDR_STACK is a stack of struct property_addr_info to |
| be used if needed during the dynamic resolution. */ |
| |
| static struct type * |
| resolve_dynamic_struct (struct type *type, |
| struct property_addr_info *addr_stack, |
| const frame_info_ptr &frame) |
| { |
| struct type *resolved_type; |
| int i; |
| unsigned resolved_type_bit_length = 0; |
| |
| gdb_assert (type->code () == TYPE_CODE_STRUCT); |
| |
| resolved_type = copy_type (type); |
| |
| dynamic_prop *variant_prop = resolved_type->dyn_prop (DYN_PROP_VARIANT_PARTS); |
| if (variant_prop != nullptr && variant_prop->kind () == PROP_VARIANT_PARTS) |
| { |
| compute_variant_fields (type, resolved_type, addr_stack, |
| *variant_prop->variant_parts ()); |
| /* We want to leave the property attached, so that the Rust code |
| can tell whether the type was originally an enum. */ |
| variant_prop->set_original_type (type); |
| } |
| else |
| { |
| resolved_type->copy_fields (type); |
| } |
| |
| for (i = 0; i < resolved_type->num_fields (); ++i) |
| { |
| unsigned new_bit_length; |
| struct property_addr_info pinfo; |
| |
| if (resolved_type->field (i).is_static ()) |
| continue; |
| |
| if (resolved_type->field (i).loc_kind () == FIELD_LOC_KIND_DWARF_BLOCK) |
| { |
| struct dwarf2_property_baton baton; |
| baton.property_type |
| = lookup_pointer_type (resolved_type->field (i).type ()); |
| baton.locexpr = *resolved_type->field (i).loc_dwarf_block (); |
| |
| struct dynamic_prop prop; |
| prop.set_locexpr (&baton); |
| |
| CORE_ADDR addr; |
| if (dwarf2_evaluate_property (&prop, frame, addr_stack, &addr, |
| {addr_stack->addr})) |
| resolved_type->field (i).set_loc_bitpos |
| (TARGET_CHAR_BIT * (addr - addr_stack->addr)); |
| } |
| |
| /* As we know this field is not a static field, the field's |
| field_loc_kind should be FIELD_LOC_KIND_BITPOS. Verify |
| this is the case, but only trigger a simple error rather |
| than an internal error if that fails. While failing |
| that verification indicates a bug in our code, the error |
| is not severe enough to suggest to the user he stops |
| his debugging session because of it. */ |
| if (resolved_type->field (i).loc_kind () != FIELD_LOC_KIND_BITPOS) |
| error (_("Cannot determine struct field location" |
| " (invalid location kind)")); |
| |
| pinfo.type = check_typedef (resolved_type->field (i).type ()); |
| size_t offset = resolved_type->field (i).loc_bitpos () / TARGET_CHAR_BIT; |
| pinfo.valaddr = addr_stack->valaddr; |
| if (!pinfo.valaddr.empty ()) |
| pinfo.valaddr = pinfo.valaddr.slice (offset); |
| pinfo.addr = addr_stack->addr + offset; |
| pinfo.next = addr_stack; |
| |
| resolved_type->field (i).set_type |
| (resolve_dynamic_type_internal (resolved_type->field (i).type (), |
| &pinfo, frame, false)); |
| gdb_assert (resolved_type->field (i).loc_kind () |
| == FIELD_LOC_KIND_BITPOS); |
| |
| new_bit_length = resolved_type->field (i).loc_bitpos (); |
| if (resolved_type->field (i).bitsize () != 0) |
| new_bit_length += resolved_type->field (i).bitsize (); |
| else |
| { |
| struct type *real_type |
| = check_typedef (resolved_type->field (i).type ()); |
| |
| new_bit_length += (real_type->length () * TARGET_CHAR_BIT); |
| } |
| |
| /* Normally, we would use the position and size of the last field |
| to determine the size of the enclosing structure. But GCC seems |
| to be encoding the position of some fields incorrectly when |
| the struct contains a dynamic field that is not placed last. |
| So we compute the struct size based on the field that has |
| the highest position + size - probably the best we can do. */ |
| if (new_bit_length > resolved_type_bit_length) |
| resolved_type_bit_length = new_bit_length; |
| } |
| |
| /* The length of a type won't change for fortran, but it does for C and Ada. |
| For fortran the size of dynamic fields might change over time but not the |
| type length of the structure. If we adapt it, we run into problems |
| when calculating the element offset for arrays of structs. */ |
| if (current_language->la_language != language_fortran) |
| resolved_type->set_length ((resolved_type_bit_length + TARGET_CHAR_BIT - 1) |
| / TARGET_CHAR_BIT); |
| |
| /* The Ada language uses this field as a cache for static fixed types: reset |
| it as RESOLVED_TYPE must have its own static fixed type. */ |
| resolved_type->set_target_type (nullptr); |
| |
| return resolved_type; |
| } |
| |
| /* Worker for resolved_dynamic_type. */ |
| |
| static struct type * |
| resolve_dynamic_type_internal (struct type *type, |
| struct property_addr_info *addr_stack, |
| const frame_info_ptr &frame, |
| bool top_level) |
| { |
| struct type *real_type = check_typedef (type); |
| struct type *resolved_type = nullptr; |
| struct dynamic_prop *prop; |
| CORE_ADDR value; |
| |
| if (!is_dynamic_type_internal (real_type, top_level)) |
| return type; |
| |
| std::optional<CORE_ADDR> type_length; |
| prop = TYPE_DYNAMIC_LENGTH (type); |
| if (prop != NULL |
| && dwarf2_evaluate_property (prop, frame, addr_stack, &value)) |
| type_length = value; |
| |
| if (type->code () == TYPE_CODE_TYPEDEF) |
| { |
| resolved_type = copy_type (type); |
| resolved_type->set_target_type |
| (resolve_dynamic_type_internal (type->target_type (), addr_stack, |
| frame, top_level)); |
| } |
| else |
| { |
| /* Before trying to resolve TYPE, make sure it is not a stub. */ |
| type = real_type; |
| |
| switch (type->code ()) |
| { |
| case TYPE_CODE_REF: |
| case TYPE_CODE_PTR: |
| case TYPE_CODE_RVALUE_REF: |
| { |
| struct property_addr_info pinfo; |
| |
| pinfo.type = check_typedef (type->target_type ()); |
| pinfo.valaddr = {}; |
| if (addr_stack->valaddr.data () != NULL) |
| pinfo.addr = extract_typed_address (addr_stack->valaddr.data (), |
| type); |
| else |
| pinfo.addr = read_memory_typed_address (addr_stack->addr, type); |
| pinfo.next = addr_stack; |
| |
| /* Special case a NULL pointer here -- we don't want to |
| dereference it. */ |
| if (pinfo.addr != 0) |
| { |
| resolved_type = copy_type (type); |
| resolved_type->set_target_type |
| (resolve_dynamic_type_internal (type->target_type (), |
| &pinfo, frame, true)); |
| } |
| break; |
| } |
| |
| case TYPE_CODE_STRING: |
| /* Strings are very much like an array of characters, and can be |
| treated as one here. */ |
| case TYPE_CODE_ARRAY: |
| resolved_type = resolve_dynamic_array_or_string (type, addr_stack, |
| frame); |
| break; |
| |
| case TYPE_CODE_RANGE: |
| /* Pass 0 for the rank value here, which indicates this is a |
| range for the first rank of an array. The assumption is that |
| this rank value is not actually required for the resolution of |
| the dynamic range, otherwise, we'd be resolving this range |
| within the context of a dynamic array. */ |
| resolved_type = resolve_dynamic_range (type, addr_stack, frame, 0); |
| break; |
| |
| case TYPE_CODE_UNION: |
| resolved_type = resolve_dynamic_union (type, addr_stack, frame); |
| break; |
| |
| case TYPE_CODE_STRUCT: |
| resolved_type = resolve_dynamic_struct (type, addr_stack, frame); |
| break; |
| } |
| } |
| |
| if (resolved_type == nullptr) |
| return type; |
| |
| if (type_length.has_value ()) |
| { |
| resolved_type->set_length (*type_length); |
| resolved_type->remove_dyn_prop (DYN_PROP_BYTE_SIZE); |
| } |
| |
| /* Resolve data_location attribute. */ |
| prop = TYPE_DATA_LOCATION (resolved_type); |
| if (prop != NULL |
| && dwarf2_evaluate_property (prop, frame, addr_stack, &value)) |
| { |
| /* Start of Fortran hack. See comment in f-lang.h for what is going |
| on here.*/ |
| if (current_language->la_language == language_fortran |
| && resolved_type->code () == TYPE_CODE_ARRAY) |
| value = fortran_adjust_dynamic_array_base_address_hack (resolved_type, |
| value); |
| /* End of Fortran hack. */ |
| prop->set_const_val (value); |
| } |
| |
| return resolved_type; |
| } |
| |
| /* See gdbtypes.h */ |
| |
| struct type * |
| resolve_dynamic_type (struct type *type, |
| gdb::array_view<const gdb_byte> valaddr, |
| CORE_ADDR addr, |
| const frame_info_ptr *in_frame) |
| { |
| struct property_addr_info pinfo |
| = {check_typedef (type), valaddr, addr, NULL}; |
| |
| frame_info_ptr frame; |
| if (in_frame != nullptr) |
| frame = *in_frame; |
| |
| return resolve_dynamic_type_internal (type, &pinfo, frame, true); |
| } |
| |
| /* See gdbtypes.h */ |
| |
| dynamic_prop * |
| type::dyn_prop (dynamic_prop_node_kind prop_kind) const |
| { |
| dynamic_prop_list *node = this->main_type->dyn_prop_list; |
| |
| while (node != NULL) |
| { |
| if (node->prop_kind == prop_kind) |
| return &node->prop; |
| node = node->next; |
| } |
| return NULL; |
| } |
| |
| /* See gdbtypes.h */ |
| |
| void |
| type::add_dyn_prop (dynamic_prop_node_kind prop_kind, dynamic_prop prop) |
| { |
| struct dynamic_prop_list *temp; |
| |
| gdb_assert (this->is_objfile_owned ()); |
| |
| temp = XOBNEW (&this->objfile_owner ()->objfile_obstack, |
| struct dynamic_prop_list); |
| temp->prop_kind = prop_kind; |
| temp->prop = prop; |
| temp->next = this->main_type->dyn_prop_list; |
| |
| this->main_type->dyn_prop_list = temp; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| void |
| type::remove_dyn_prop (dynamic_prop_node_kind kind) |
| { |
| struct dynamic_prop_list *prev_node, *curr_node; |
| |
| curr_node = this->main_type->dyn_prop_list; |
| prev_node = NULL; |
| |
| while (NULL != curr_node) |
| { |
| if (curr_node->prop_kind == kind) |
| { |
| /* Update the linked list but don't free anything. |
| The property was allocated on obstack and it is not known |
| if we are on top of it. Nevertheless, everything is released |
| when the complete obstack is freed. */ |
| if (NULL == prev_node) |
| this->main_type->dyn_prop_list = curr_node->next; |
| else |
| prev_node->next = curr_node->next; |
| |
| return; |
| } |
| |
| prev_node = curr_node; |
| curr_node = curr_node->next; |
| } |
| } |
| |
| /* Find the real type of TYPE. This function returns the real type, |
| after removing all layers of typedefs, and completing opaque or stub |
| types. Completion changes the TYPE argument, but stripping of |
| typedefs does not. |
| |
| Instance flags (e.g. const/volatile) are preserved as typedefs are |
| stripped. If necessary a new qualified form of the underlying type |
| is created. |
| |
| NOTE: This will return a typedef if type::target_type for the typedef has |
| not been computed and we're either in the middle of reading symbols, or |
| there was no name for the typedef in the debug info. |
| |
| NOTE: Lookup of opaque types can throw errors for invalid symbol files. |
| QUITs in the symbol reading code can also throw. |
| Thus this function can throw an exception. |
| |
| If TYPE is a TYPE_CODE_TYPEDEF, its length is updated to the length of |
| the target type. |
| |
| If this is a stubbed struct (i.e. declared as struct foo *), see if |
| we can find a full definition in some other file. If so, copy this |
| definition, so we can use it in future. There used to be a comment |
| (but not any code) that if we don't find a full definition, we'd |
| set a flag so we don't spend time in the future checking the same |
| type. That would be a mistake, though--we might load in more |
| symbols which contain a full definition for the type. */ |
| |
| struct type * |
| check_typedef (struct type *type) |
| { |
| struct type *orig_type = type; |
| |
| gdb_assert (type); |
| |
| /* While we're removing typedefs, we don't want to lose qualifiers. |
| E.g., const/volatile. */ |
| type_instance_flags instance_flags = type->instance_flags (); |
| |
| while (type->code () == TYPE_CODE_TYPEDEF) |
| { |
| if (!type->target_type ()) |
| { |
| const char *name; |
| struct symbol *sym; |
| |
| /* It is dangerous to call lookup_symbol if we are currently |
| reading a symtab. Infinite recursion is one danger. */ |
| if (currently_reading_symtab) |
| return make_qualified_type (type, instance_flags, NULL); |
| |
| name = type->name (); |
| if (name == NULL) |
| { |
| stub_noname_complaint (); |
| return make_qualified_type (type, instance_flags, NULL); |
| } |
| domain_search_flag flag |
| = ((type->language () == language_c |
| || type->language () == language_objc |
| || type->language () == language_opencl |
| || type->language () == language_minimal) |
| ? SEARCH_STRUCT_DOMAIN |
| : SEARCH_TYPE_DOMAIN); |
| sym = lookup_symbol (name, nullptr, flag, nullptr).symbol; |
| if (sym) |
| type->set_target_type (sym->type ()); |
| else /* TYPE_CODE_UNDEF */ |
| type->set_target_type (type_allocator (type->arch ()).new_type ()); |
| } |
| type = type->target_type (); |
| |
| /* Preserve the instance flags as we traverse down the typedef chain. |
| |
| Handling address spaces/classes is nasty, what do we do if there's a |
| conflict? |
| E.g., what if an outer typedef marks the type as class_1 and an inner |
| typedef marks the type as class_2? |
| This is the wrong place to do such error checking. We leave it to |
| the code that created the typedef in the first place to flag the |
| error. We just pick the outer address space (akin to letting the |
| outer cast in a chain of casting win), instead of assuming |
| "it can't happen". */ |
| { |
| const type_instance_flags ALL_SPACES |
| = (TYPE_INSTANCE_FLAG_CODE_SPACE |
| | TYPE_INSTANCE_FLAG_DATA_SPACE); |
| const type_instance_flags ALL_CLASSES |
| = TYPE_INSTANCE_FLAG_ADDRESS_CLASS_ALL; |
| |
| type_instance_flags new_instance_flags = type->instance_flags (); |
| |
| /* Treat code vs data spaces and address classes separately. */ |
| if ((instance_flags & ALL_SPACES) != 0) |
| new_instance_flags &= ~ALL_SPACES; |
| if ((instance_flags & ALL_CLASSES) != 0) |
| new_instance_flags &= ~ALL_CLASSES; |
| |
| instance_flags |= new_instance_flags; |
| } |
| } |
| |
| /* If this is a struct/class/union with no fields, then check |
| whether a full definition exists somewhere else. This is for |
| systems where a type definition with no fields is issued for such |
| types, instead of identifying them as stub types in the first |
| place. */ |
| |
| if (TYPE_IS_OPAQUE (type) |
| && opaque_type_resolution |
| && !currently_reading_symtab) |
| { |
| const char *name = type->name (); |
| struct type *newtype; |
| |
| if (name == NULL) |
| { |
| stub_noname_complaint (); |
| return make_qualified_type (type, instance_flags, NULL); |
| } |
| newtype = lookup_transparent_type (name); |
| |
| if (newtype) |
| { |
| /* If the resolved type and the stub are in the same |
| objfile, then replace the stub type with the real deal. |
| But if they're in separate objfiles, leave the stub |
| alone; we'll just look up the transparent type every time |
| we call check_typedef. We can't create pointers between |
| types allocated to different objfiles, since they may |
| have different lifetimes. Trying to copy NEWTYPE over to |
| TYPE's objfile is pointless, too, since you'll have to |
| move over any other types NEWTYPE refers to, which could |
| be an unbounded amount of stuff. */ |
| if (newtype->objfile_owner () == type->objfile_owner ()) |
| type = make_qualified_type (newtype, type->instance_flags (), type); |
| else |
| type = newtype; |
| } |
| } |
| /* Otherwise, rely on the stub flag being set for opaque/stubbed |
| types. */ |
| else if (type->is_stub () && !currently_reading_symtab) |
| { |
| const char *name = type->name (); |
| struct symbol *sym; |
| |
| if (name == NULL) |
| { |
| stub_noname_complaint (); |
| return make_qualified_type (type, instance_flags, NULL); |
| } |
| domain_search_flag flag |
| = ((type->language () == language_c |
| || type->language () == language_objc |
| || type->language () == language_opencl |
| || type->language () == language_minimal) |
| ? SEARCH_STRUCT_DOMAIN |
| : SEARCH_TYPE_DOMAIN); |
| sym = lookup_symbol (name, nullptr, flag, nullptr).symbol; |
| if (sym) |
| { |
| /* Same as above for opaque types, we can replace the stub |
| with the complete type only if they are in the same |
| objfile. */ |
| if (sym->type ()->objfile_owner () == type->objfile_owner ()) |
| type = make_qualified_type (sym->type (), |
| type->instance_flags (), type); |
| else |
| type = sym->type (); |
| } |
| } |
| |
| if (type->target_is_stub ()) |
| { |
| struct type *target_type = check_typedef (type->target_type ()); |
| |
| if (target_type->is_stub () || target_type->target_is_stub ()) |
| { |
| /* Nothing we can do. */ |
| } |
| else if (type->code () == TYPE_CODE_RANGE) |
| { |
| type->set_length (target_type->length ()); |
| type->set_target_is_stub (false); |
| } |
| else if (type->code () == TYPE_CODE_ARRAY |
| && update_static_array_size (type)) |
| type->set_target_is_stub (false); |
| } |
| |
| type = make_qualified_type (type, instance_flags, NULL); |
| |
| /* Cache TYPE_LENGTH for future use. */ |
| orig_type->set_length (type->length ()); |
| |
| return type; |
| } |
| |
| /* Parse a type expression in the string [P..P+LENGTH). If an error |
| occurs, silently return a void type. */ |
| |
| static struct type * |
| safe_parse_type (struct gdbarch *gdbarch, const char *p, int length) |
| { |
| struct type *type = NULL; /* Initialize to keep gcc happy. */ |
| |
| /* Suppress error messages. */ |
| scoped_restore saved_gdb_stderr = make_scoped_restore (&gdb_stderr, |
| &null_stream); |
| |
| /* Call parse_and_eval_type() without fear of longjmp()s. */ |
| try |
| { |
| type = parse_and_eval_type (p, length); |
| } |
| catch (const gdb_exception_error &except) |
| { |
| type = builtin_type (gdbarch)->builtin_void; |
| } |
| |
| return type; |
| } |
| |
| /* Ugly hack to convert method stubs into method types. |
| |
| He ain't kiddin'. This demangles the name of the method into a |
| string including argument types, parses out each argument type, |
| generates a string casting a zero to that type, evaluates the |
| string, and stuffs the resulting type into an argtype vector!!! |
| Then it knows the type of the whole function (including argument |
| types for overloading), which info used to be in the stab's but was |
| removed to hack back the space required for them. */ |
| |
| static void |
| check_stub_method (struct type *type, int method_id, int signature_id) |
| { |
| struct gdbarch *gdbarch = type->arch (); |
| struct fn_field *f; |
| char *mangled_name = gdb_mangle_name (type, method_id, signature_id); |
| gdb::unique_xmalloc_ptr<char> demangled_name |
| = gdb_demangle (mangled_name, DMGL_PARAMS | DMGL_ANSI); |
| char *argtypetext, *p; |
| int depth = 0, argcount = 1; |
| struct field *argtypes; |
| struct type *mtype; |
| |
| /* Make sure we got back a function string that we can use. */ |
| if (demangled_name) |
| p = strchr (demangled_name.get (), '('); |
| else |
| p = NULL; |
| |
| if (demangled_name == NULL || p == NULL) |
| error (_("Internal: Cannot demangle mangled name `%s'."), |
| mangled_name); |
| |
| /* Now, read in the parameters that define this type. */ |
| p += 1; |
| argtypetext = p; |
| while (*p) |
| { |
| if (*p == '(' || *p == '<') |
| { |
| depth += 1; |
| } |
| else if (*p == ')' || *p == '>') |
| { |
| depth -= 1; |
|