| /* Support routines for manipulating internal types for GDB. |
| |
| Copyright (C) 1992-2021 Free Software Foundation, Inc. |
| |
| Contributed by Cygnus Support, using pieces from other GDB modules. |
| |
| This file is part of GDB. |
| |
| This program is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3 of the License, or |
| (at your option) any later version. |
| |
| This program is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with this program. If not, see <http://www.gnu.org/licenses/>. */ |
| |
| #include "defs.h" |
| #include "bfd.h" |
| #include "symtab.h" |
| #include "symfile.h" |
| #include "objfiles.h" |
| #include "gdbtypes.h" |
| #include "expression.h" |
| #include "language.h" |
| #include "target.h" |
| #include "value.h" |
| #include "demangle.h" |
| #include "complaints.h" |
| #include "gdbcmd.h" |
| #include "cp-abi.h" |
| #include "hashtab.h" |
| #include "cp-support.h" |
| #include "bcache.h" |
| #include "dwarf2/loc.h" |
| #include "dwarf2/read.h" |
| #include "gdbcore.h" |
| #include "floatformat.h" |
| #include "f-lang.h" |
| #include <algorithm> |
| #include "gmp-utils.h" |
| |
| /* Initialize BADNESS constants. */ |
| |
| const struct rank LENGTH_MISMATCH_BADNESS = {100,0}; |
| |
| const struct rank TOO_FEW_PARAMS_BADNESS = {100,0}; |
| const struct rank INCOMPATIBLE_TYPE_BADNESS = {100,0}; |
| |
| const struct rank EXACT_MATCH_BADNESS = {0,0}; |
| |
| const struct rank INTEGER_PROMOTION_BADNESS = {1,0}; |
| const struct rank FLOAT_PROMOTION_BADNESS = {1,0}; |
| const struct rank BASE_PTR_CONVERSION_BADNESS = {1,0}; |
| const struct rank CV_CONVERSION_BADNESS = {1, 0}; |
| const struct rank INTEGER_CONVERSION_BADNESS = {2,0}; |
| const struct rank FLOAT_CONVERSION_BADNESS = {2,0}; |
| const struct rank INT_FLOAT_CONVERSION_BADNESS = {2,0}; |
| const struct rank VOID_PTR_CONVERSION_BADNESS = {2,0}; |
| const struct rank BOOL_CONVERSION_BADNESS = {3,0}; |
| const struct rank BASE_CONVERSION_BADNESS = {2,0}; |
| const struct rank REFERENCE_CONVERSION_BADNESS = {2,0}; |
| const struct rank REFERENCE_SEE_THROUGH_BADNESS = {0,1}; |
| const struct rank NULL_POINTER_CONVERSION_BADNESS = {2,0}; |
| const struct rank NS_POINTER_CONVERSION_BADNESS = {10,0}; |
| const struct rank NS_INTEGER_POINTER_CONVERSION_BADNESS = {3,0}; |
| |
| /* Floatformat pairs. */ |
| const struct floatformat *floatformats_ieee_half[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_ieee_half_big, |
| &floatformat_ieee_half_little |
| }; |
| const struct floatformat *floatformats_ieee_single[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_ieee_single_big, |
| &floatformat_ieee_single_little |
| }; |
| const struct floatformat *floatformats_ieee_double[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_ieee_double_big, |
| &floatformat_ieee_double_little |
| }; |
| const struct floatformat *floatformats_ieee_double_littlebyte_bigword[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_ieee_double_big, |
| &floatformat_ieee_double_littlebyte_bigword |
| }; |
| const struct floatformat *floatformats_i387_ext[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_i387_ext, |
| &floatformat_i387_ext |
| }; |
| const struct floatformat *floatformats_m68881_ext[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_m68881_ext, |
| &floatformat_m68881_ext |
| }; |
| const struct floatformat *floatformats_arm_ext[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_arm_ext_big, |
| &floatformat_arm_ext_littlebyte_bigword |
| }; |
| const struct floatformat *floatformats_ia64_spill[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_ia64_spill_big, |
| &floatformat_ia64_spill_little |
| }; |
| const struct floatformat *floatformats_ia64_quad[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_ia64_quad_big, |
| &floatformat_ia64_quad_little |
| }; |
| const struct floatformat *floatformats_vax_f[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_vax_f, |
| &floatformat_vax_f |
| }; |
| const struct floatformat *floatformats_vax_d[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_vax_d, |
| &floatformat_vax_d |
| }; |
| const struct floatformat *floatformats_ibm_long_double[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_ibm_long_double_big, |
| &floatformat_ibm_long_double_little |
| }; |
| const struct floatformat *floatformats_bfloat16[BFD_ENDIAN_UNKNOWN] = { |
| &floatformat_bfloat16_big, |
| &floatformat_bfloat16_little |
| }; |
| |
| /* Should opaque types be resolved? */ |
| |
| static bool opaque_type_resolution = true; |
| |
| /* See gdbtypes.h. */ |
| |
| unsigned int overload_debug = 0; |
| |
| /* A flag to enable strict type checking. */ |
| |
| static bool strict_type_checking = true; |
| |
| /* A function to show whether opaque types are resolved. */ |
| |
| static void |
| show_opaque_type_resolution (struct ui_file *file, int from_tty, |
| struct cmd_list_element *c, |
| const char *value) |
| { |
| fprintf_filtered (file, _("Resolution of opaque struct/class/union types " |
| "(if set before loading symbols) is %s.\n"), |
| value); |
| } |
| |
| /* A function to show whether C++ overload debugging is enabled. */ |
| |
| static void |
| show_overload_debug (struct ui_file *file, int from_tty, |
| struct cmd_list_element *c, const char *value) |
| { |
| fprintf_filtered (file, _("Debugging of C++ overloading is %s.\n"), |
| value); |
| } |
| |
| /* A function to show the status of strict type checking. */ |
| |
| static void |
| show_strict_type_checking (struct ui_file *file, int from_tty, |
| struct cmd_list_element *c, const char *value) |
| { |
| fprintf_filtered (file, _("Strict type checking is %s.\n"), value); |
| } |
| |
| |
| /* Allocate a new OBJFILE-associated type structure and fill it |
| with some defaults. Space for the type structure is allocated |
| on the objfile's objfile_obstack. */ |
| |
| struct type * |
| alloc_type (struct objfile *objfile) |
| { |
| struct type *type; |
| |
| gdb_assert (objfile != NULL); |
| |
| /* Alloc the structure and start off with all fields zeroed. */ |
| type = OBSTACK_ZALLOC (&objfile->objfile_obstack, struct type); |
| TYPE_MAIN_TYPE (type) = OBSTACK_ZALLOC (&objfile->objfile_obstack, |
| struct main_type); |
| OBJSTAT (objfile, n_types++); |
| |
| type->set_owner (objfile); |
| |
| /* Initialize the fields that might not be zero. */ |
| |
| type->set_code (TYPE_CODE_UNDEF); |
| TYPE_CHAIN (type) = type; /* Chain back to itself. */ |
| |
| return type; |
| } |
| |
| /* Allocate a new GDBARCH-associated type structure and fill it |
| with some defaults. Space for the type structure is allocated |
| on the obstack associated with GDBARCH. */ |
| |
| struct type * |
| alloc_type_arch (struct gdbarch *gdbarch) |
| { |
| struct type *type; |
| |
| gdb_assert (gdbarch != NULL); |
| |
| /* Alloc the structure and start off with all fields zeroed. */ |
| |
| type = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct type); |
| TYPE_MAIN_TYPE (type) = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct main_type); |
| |
| type->set_owner (gdbarch); |
| |
| /* Initialize the fields that might not be zero. */ |
| |
| type->set_code (TYPE_CODE_UNDEF); |
| TYPE_CHAIN (type) = type; /* Chain back to itself. */ |
| |
| return type; |
| } |
| |
| /* If TYPE is objfile-associated, allocate a new type structure |
| associated with the same objfile. If TYPE is gdbarch-associated, |
| allocate a new type structure associated with the same gdbarch. */ |
| |
| struct type * |
| alloc_type_copy (const struct type *type) |
| { |
| if (type->is_objfile_owned ()) |
| return alloc_type (type->objfile_owner ()); |
| else |
| return alloc_type_arch (type->arch_owner ()); |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| gdbarch * |
| type::arch () const |
| { |
| struct gdbarch *arch; |
| |
| if (this->is_objfile_owned ()) |
| arch = this->objfile_owner ()->arch (); |
| else |
| arch = this->arch_owner (); |
| |
| /* The ARCH can be NULL if TYPE is associated with neither an objfile nor |
| a gdbarch, however, this is very rare, and even then, in most cases |
| that type::arch is called, we assume that a non-NULL value is |
| returned. */ |
| gdb_assert (arch != nullptr); |
| return arch; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| struct type * |
| get_target_type (struct type *type) |
| { |
| if (type != NULL) |
| { |
| type = TYPE_TARGET_TYPE (type); |
| if (type != NULL) |
| type = check_typedef (type); |
| } |
| |
| return type; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| unsigned int |
| type_length_units (struct type *type) |
| { |
| int unit_size = gdbarch_addressable_memory_unit_size (type->arch ()); |
| |
| return TYPE_LENGTH (type) / unit_size; |
| } |
| |
| /* Alloc a new type instance structure, fill it with some defaults, |
| and point it at OLDTYPE. Allocate the new type instance from the |
| same place as OLDTYPE. */ |
| |
| static struct type * |
| alloc_type_instance (struct type *oldtype) |
| { |
| struct type *type; |
| |
| /* Allocate the structure. */ |
| |
| if (!oldtype->is_objfile_owned ()) |
| type = GDBARCH_OBSTACK_ZALLOC (oldtype->arch_owner (), struct type); |
| else |
| type = OBSTACK_ZALLOC (&oldtype->objfile_owner ()->objfile_obstack, |
| struct type); |
| |
| TYPE_MAIN_TYPE (type) = TYPE_MAIN_TYPE (oldtype); |
| |
| TYPE_CHAIN (type) = type; /* Chain back to itself for now. */ |
| |
| return type; |
| } |
| |
| /* Clear all remnants of the previous type at TYPE, in preparation for |
| replacing it with something else. Preserve owner information. */ |
| |
| static void |
| smash_type (struct type *type) |
| { |
| bool objfile_owned = type->is_objfile_owned (); |
| objfile *objfile = type->objfile_owner (); |
| gdbarch *arch = type->arch_owner (); |
| |
| memset (TYPE_MAIN_TYPE (type), 0, sizeof (struct main_type)); |
| |
| /* Restore owner information. */ |
| if (objfile_owned) |
| type->set_owner (objfile); |
| else |
| type->set_owner (arch); |
| |
| /* For now, delete the rings. */ |
| TYPE_CHAIN (type) = type; |
| |
| /* For now, leave the pointer/reference types alone. */ |
| } |
| |
| /* Lookup a pointer to a type TYPE. TYPEPTR, if nonzero, points |
| to a pointer to memory where the pointer type should be stored. |
| If *TYPEPTR is zero, update it to point to the pointer type we return. |
| We allocate new memory if needed. */ |
| |
| struct type * |
| make_pointer_type (struct type *type, struct type **typeptr) |
| { |
| struct type *ntype; /* New type */ |
| struct type *chain; |
| |
| ntype = TYPE_POINTER_TYPE (type); |
| |
| if (ntype) |
| { |
| if (typeptr == 0) |
| return ntype; /* Don't care about alloc, |
| and have new type. */ |
| else if (*typeptr == 0) |
| { |
| *typeptr = ntype; /* Tracking alloc, and have new type. */ |
| return ntype; |
| } |
| } |
| |
| if (typeptr == 0 || *typeptr == 0) /* We'll need to allocate one. */ |
| { |
| ntype = alloc_type_copy (type); |
| if (typeptr) |
| *typeptr = ntype; |
| } |
| else /* We have storage, but need to reset it. */ |
| { |
| ntype = *typeptr; |
| chain = TYPE_CHAIN (ntype); |
| smash_type (ntype); |
| TYPE_CHAIN (ntype) = chain; |
| } |
| |
| TYPE_TARGET_TYPE (ntype) = type; |
| TYPE_POINTER_TYPE (type) = ntype; |
| |
| /* FIXME! Assumes the machine has only one representation for pointers! */ |
| |
| TYPE_LENGTH (ntype) = gdbarch_ptr_bit (type->arch ()) / TARGET_CHAR_BIT; |
| ntype->set_code (TYPE_CODE_PTR); |
| |
| /* Mark pointers as unsigned. The target converts between pointers |
| and addresses (CORE_ADDRs) using gdbarch_pointer_to_address and |
| gdbarch_address_to_pointer. */ |
| ntype->set_is_unsigned (true); |
| |
| /* Update the length of all the other variants of this type. */ |
| chain = TYPE_CHAIN (ntype); |
| while (chain != ntype) |
| { |
| TYPE_LENGTH (chain) = TYPE_LENGTH (ntype); |
| chain = TYPE_CHAIN (chain); |
| } |
| |
| return ntype; |
| } |
| |
| /* Given a type TYPE, return a type of pointers to that type. |
| May need to construct such a type if this is the first use. */ |
| |
| struct type * |
| lookup_pointer_type (struct type *type) |
| { |
| return make_pointer_type (type, (struct type **) 0); |
| } |
| |
| /* Lookup a C++ `reference' to a type TYPE. TYPEPTR, if nonzero, |
| points to a pointer to memory where the reference type should be |
| stored. If *TYPEPTR is zero, update it to point to the reference |
| type we return. We allocate new memory if needed. REFCODE denotes |
| the kind of reference type to lookup (lvalue or rvalue reference). */ |
| |
| struct type * |
| make_reference_type (struct type *type, struct type **typeptr, |
| enum type_code refcode) |
| { |
| struct type *ntype; /* New type */ |
| struct type **reftype; |
| struct type *chain; |
| |
| gdb_assert (refcode == TYPE_CODE_REF || refcode == TYPE_CODE_RVALUE_REF); |
| |
| ntype = (refcode == TYPE_CODE_REF ? TYPE_REFERENCE_TYPE (type) |
| : TYPE_RVALUE_REFERENCE_TYPE (type)); |
| |
| if (ntype) |
| { |
| if (typeptr == 0) |
| return ntype; /* Don't care about alloc, |
| and have new type. */ |
| else if (*typeptr == 0) |
| { |
| *typeptr = ntype; /* Tracking alloc, and have new type. */ |
| return ntype; |
| } |
| } |
| |
| if (typeptr == 0 || *typeptr == 0) /* We'll need to allocate one. */ |
| { |
| ntype = alloc_type_copy (type); |
| if (typeptr) |
| *typeptr = ntype; |
| } |
| else /* We have storage, but need to reset it. */ |
| { |
| ntype = *typeptr; |
| chain = TYPE_CHAIN (ntype); |
| smash_type (ntype); |
| TYPE_CHAIN (ntype) = chain; |
| } |
| |
| TYPE_TARGET_TYPE (ntype) = type; |
| reftype = (refcode == TYPE_CODE_REF ? &TYPE_REFERENCE_TYPE (type) |
| : &TYPE_RVALUE_REFERENCE_TYPE (type)); |
| |
| *reftype = ntype; |
| |
| /* FIXME! Assume the machine has only one representation for |
| references, and that it matches the (only) representation for |
| pointers! */ |
| |
| TYPE_LENGTH (ntype) = gdbarch_ptr_bit (type->arch ()) / TARGET_CHAR_BIT; |
| ntype->set_code (refcode); |
| |
| *reftype = ntype; |
| |
| /* Update the length of all the other variants of this type. */ |
| chain = TYPE_CHAIN (ntype); |
| while (chain != ntype) |
| { |
| TYPE_LENGTH (chain) = TYPE_LENGTH (ntype); |
| chain = TYPE_CHAIN (chain); |
| } |
| |
| return ntype; |
| } |
| |
| /* Same as above, but caller doesn't care about memory allocation |
| details. */ |
| |
| struct type * |
| lookup_reference_type (struct type *type, enum type_code refcode) |
| { |
| return make_reference_type (type, (struct type **) 0, refcode); |
| } |
| |
| /* Lookup the lvalue reference type for the type TYPE. */ |
| |
| struct type * |
| lookup_lvalue_reference_type (struct type *type) |
| { |
| return lookup_reference_type (type, TYPE_CODE_REF); |
| } |
| |
| /* Lookup the rvalue reference type for the type TYPE. */ |
| |
| struct type * |
| lookup_rvalue_reference_type (struct type *type) |
| { |
| return lookup_reference_type (type, TYPE_CODE_RVALUE_REF); |
| } |
| |
| /* Lookup a function type that returns type TYPE. TYPEPTR, if |
| nonzero, points to a pointer to memory where the function type |
| should be stored. If *TYPEPTR is zero, update it to point to the |
| function type we return. We allocate new memory if needed. */ |
| |
| struct type * |
| make_function_type (struct type *type, struct type **typeptr) |
| { |
| struct type *ntype; /* New type */ |
| |
| if (typeptr == 0 || *typeptr == 0) /* We'll need to allocate one. */ |
| { |
| ntype = alloc_type_copy (type); |
| if (typeptr) |
| *typeptr = ntype; |
| } |
| else /* We have storage, but need to reset it. */ |
| { |
| ntype = *typeptr; |
| smash_type (ntype); |
| } |
| |
| TYPE_TARGET_TYPE (ntype) = type; |
| |
| TYPE_LENGTH (ntype) = 1; |
| ntype->set_code (TYPE_CODE_FUNC); |
| |
| INIT_FUNC_SPECIFIC (ntype); |
| |
| return ntype; |
| } |
| |
| /* Given a type TYPE, return a type of functions that return that type. |
| May need to construct such a type if this is the first use. */ |
| |
| struct type * |
| lookup_function_type (struct type *type) |
| { |
| return make_function_type (type, (struct type **) 0); |
| } |
| |
| /* Given a type TYPE and argument types, return the appropriate |
| function type. If the final type in PARAM_TYPES is NULL, make a |
| varargs function. */ |
| |
| struct type * |
| lookup_function_type_with_arguments (struct type *type, |
| int nparams, |
| struct type **param_types) |
| { |
| struct type *fn = make_function_type (type, (struct type **) 0); |
| int i; |
| |
| if (nparams > 0) |
| { |
| if (param_types[nparams - 1] == NULL) |
| { |
| --nparams; |
| fn->set_has_varargs (true); |
| } |
| else if (check_typedef (param_types[nparams - 1])->code () |
| == TYPE_CODE_VOID) |
| { |
| --nparams; |
| /* Caller should have ensured this. */ |
| gdb_assert (nparams == 0); |
| fn->set_is_prototyped (true); |
| } |
| else |
| fn->set_is_prototyped (true); |
| } |
| |
| fn->set_num_fields (nparams); |
| fn->set_fields |
| ((struct field *) TYPE_ZALLOC (fn, nparams * sizeof (struct field))); |
| for (i = 0; i < nparams; ++i) |
| fn->field (i).set_type (param_types[i]); |
| |
| return fn; |
| } |
| |
| /* Identify address space identifier by name -- return a |
| type_instance_flags. */ |
| |
| type_instance_flags |
| address_space_name_to_type_instance_flags (struct gdbarch *gdbarch, |
| const char *space_identifier) |
| { |
| type_instance_flags type_flags; |
| |
| /* Check for known address space delimiters. */ |
| if (!strcmp (space_identifier, "code")) |
| return TYPE_INSTANCE_FLAG_CODE_SPACE; |
| else if (!strcmp (space_identifier, "data")) |
| return TYPE_INSTANCE_FLAG_DATA_SPACE; |
| else if (gdbarch_address_class_name_to_type_flags_p (gdbarch) |
| && gdbarch_address_class_name_to_type_flags (gdbarch, |
| space_identifier, |
| &type_flags)) |
| return type_flags; |
| else |
| error (_("Unknown address space specifier: \"%s\""), space_identifier); |
| } |
| |
| /* Identify address space identifier by type_instance_flags and return |
| the string version of the adress space name. */ |
| |
| const char * |
| address_space_type_instance_flags_to_name (struct gdbarch *gdbarch, |
| type_instance_flags space_flag) |
| { |
| if (space_flag & TYPE_INSTANCE_FLAG_CODE_SPACE) |
| return "code"; |
| else if (space_flag & TYPE_INSTANCE_FLAG_DATA_SPACE) |
| return "data"; |
| else if ((space_flag & TYPE_INSTANCE_FLAG_ADDRESS_CLASS_ALL) |
| && gdbarch_address_class_type_flags_to_name_p (gdbarch)) |
| return gdbarch_address_class_type_flags_to_name (gdbarch, space_flag); |
| else |
| return NULL; |
| } |
| |
| /* Create a new type with instance flags NEW_FLAGS, based on TYPE. |
| |
| If STORAGE is non-NULL, create the new type instance there. |
| STORAGE must be in the same obstack as TYPE. */ |
| |
| static struct type * |
| make_qualified_type (struct type *type, type_instance_flags new_flags, |
| struct type *storage) |
| { |
| struct type *ntype; |
| |
| ntype = type; |
| do |
| { |
| if (ntype->instance_flags () == new_flags) |
| return ntype; |
| ntype = TYPE_CHAIN (ntype); |
| } |
| while (ntype != type); |
| |
| /* Create a new type instance. */ |
| if (storage == NULL) |
| ntype = alloc_type_instance (type); |
| else |
| { |
| /* If STORAGE was provided, it had better be in the same objfile |
| as TYPE. Otherwise, we can't link it into TYPE's cv chain: |
| if one objfile is freed and the other kept, we'd have |
| dangling pointers. */ |
| gdb_assert (type->objfile_owner () == storage->objfile_owner ()); |
| |
| ntype = storage; |
| TYPE_MAIN_TYPE (ntype) = TYPE_MAIN_TYPE (type); |
| TYPE_CHAIN (ntype) = ntype; |
| } |
| |
| /* Pointers or references to the original type are not relevant to |
| the new type. */ |
| TYPE_POINTER_TYPE (ntype) = (struct type *) 0; |
| TYPE_REFERENCE_TYPE (ntype) = (struct type *) 0; |
| |
| /* Chain the new qualified type to the old type. */ |
| TYPE_CHAIN (ntype) = TYPE_CHAIN (type); |
| TYPE_CHAIN (type) = ntype; |
| |
| /* Now set the instance flags and return the new type. */ |
| ntype->set_instance_flags (new_flags); |
| |
| /* Set length of new type to that of the original type. */ |
| TYPE_LENGTH (ntype) = TYPE_LENGTH (type); |
| |
| return ntype; |
| } |
| |
| /* Make an address-space-delimited variant of a type -- a type that |
| is identical to the one supplied except that it has an address |
| space attribute attached to it (such as "code" or "data"). |
| |
| The space attributes "code" and "data" are for Harvard |
| architectures. The address space attributes are for architectures |
| which have alternately sized pointers or pointers with alternate |
| representations. */ |
| |
| struct type * |
| make_type_with_address_space (struct type *type, |
| type_instance_flags space_flag) |
| { |
| type_instance_flags new_flags = ((type->instance_flags () |
| & ~(TYPE_INSTANCE_FLAG_CODE_SPACE |
| | TYPE_INSTANCE_FLAG_DATA_SPACE |
| | TYPE_INSTANCE_FLAG_ADDRESS_CLASS_ALL)) |
| | space_flag); |
| |
| return make_qualified_type (type, new_flags, NULL); |
| } |
| |
| /* Make a "c-v" variant of a type -- a type that is identical to the |
| one supplied except that it may have const or volatile attributes |
| CNST is a flag for setting the const attribute |
| VOLTL is a flag for setting the volatile attribute |
| TYPE is the base type whose variant we are creating. |
| |
| If TYPEPTR and *TYPEPTR are non-zero, then *TYPEPTR points to |
| storage to hold the new qualified type; *TYPEPTR and TYPE must be |
| in the same objfile. Otherwise, allocate fresh memory for the new |
| type whereever TYPE lives. If TYPEPTR is non-zero, set it to the |
| new type we construct. */ |
| |
| struct type * |
| make_cv_type (int cnst, int voltl, |
| struct type *type, |
| struct type **typeptr) |
| { |
| struct type *ntype; /* New type */ |
| |
| type_instance_flags new_flags = (type->instance_flags () |
| & ~(TYPE_INSTANCE_FLAG_CONST |
| | TYPE_INSTANCE_FLAG_VOLATILE)); |
| |
| if (cnst) |
| new_flags |= TYPE_INSTANCE_FLAG_CONST; |
| |
| if (voltl) |
| new_flags |= TYPE_INSTANCE_FLAG_VOLATILE; |
| |
| if (typeptr && *typeptr != NULL) |
| { |
| /* TYPE and *TYPEPTR must be in the same objfile. We can't have |
| a C-V variant chain that threads across objfiles: if one |
| objfile gets freed, then the other has a broken C-V chain. |
| |
| This code used to try to copy over the main type from TYPE to |
| *TYPEPTR if they were in different objfiles, but that's |
| wrong, too: TYPE may have a field list or member function |
| lists, which refer to types of their own, etc. etc. The |
| whole shebang would need to be copied over recursively; you |
| can't have inter-objfile pointers. The only thing to do is |
| to leave stub types as stub types, and look them up afresh by |
| name each time you encounter them. */ |
| gdb_assert ((*typeptr)->objfile_owner () == type->objfile_owner ()); |
| } |
| |
| ntype = make_qualified_type (type, new_flags, |
| typeptr ? *typeptr : NULL); |
| |
| if (typeptr != NULL) |
| *typeptr = ntype; |
| |
| return ntype; |
| } |
| |
| /* Make a 'restrict'-qualified version of TYPE. */ |
| |
| struct type * |
| make_restrict_type (struct type *type) |
| { |
| return make_qualified_type (type, |
| (type->instance_flags () |
| | TYPE_INSTANCE_FLAG_RESTRICT), |
| NULL); |
| } |
| |
| /* Make a type without const, volatile, or restrict. */ |
| |
| struct type * |
| make_unqualified_type (struct type *type) |
| { |
| return make_qualified_type (type, |
| (type->instance_flags () |
| & ~(TYPE_INSTANCE_FLAG_CONST |
| | TYPE_INSTANCE_FLAG_VOLATILE |
| | TYPE_INSTANCE_FLAG_RESTRICT)), |
| NULL); |
| } |
| |
| /* Make a '_Atomic'-qualified version of TYPE. */ |
| |
| struct type * |
| make_atomic_type (struct type *type) |
| { |
| return make_qualified_type (type, |
| (type->instance_flags () |
| | TYPE_INSTANCE_FLAG_ATOMIC), |
| NULL); |
| } |
| |
| /* Replace the contents of ntype with the type *type. This changes the |
| contents, rather than the pointer for TYPE_MAIN_TYPE (ntype); thus |
| the changes are propogated to all types in the TYPE_CHAIN. |
| |
| In order to build recursive types, it's inevitable that we'll need |
| to update types in place --- but this sort of indiscriminate |
| smashing is ugly, and needs to be replaced with something more |
| controlled. TYPE_MAIN_TYPE is a step in this direction; it's not |
| clear if more steps are needed. */ |
| |
| void |
| replace_type (struct type *ntype, struct type *type) |
| { |
| struct type *chain; |
| |
| /* These two types had better be in the same objfile. Otherwise, |
| the assignment of one type's main type structure to the other |
| will produce a type with references to objects (names; field |
| lists; etc.) allocated on an objfile other than its own. */ |
| gdb_assert (ntype->objfile_owner () == type->objfile_owner ()); |
| |
| *TYPE_MAIN_TYPE (ntype) = *TYPE_MAIN_TYPE (type); |
| |
| /* The type length is not a part of the main type. Update it for |
| each type on the variant chain. */ |
| chain = ntype; |
| do |
| { |
| /* Assert that this element of the chain has no address-class bits |
| set in its flags. Such type variants might have type lengths |
| which are supposed to be different from the non-address-class |
| variants. This assertion shouldn't ever be triggered because |
| symbol readers which do construct address-class variants don't |
| call replace_type(). */ |
| gdb_assert (TYPE_ADDRESS_CLASS_ALL (chain) == 0); |
| |
| TYPE_LENGTH (chain) = TYPE_LENGTH (type); |
| chain = TYPE_CHAIN (chain); |
| } |
| while (ntype != chain); |
| |
| /* Assert that the two types have equivalent instance qualifiers. |
| This should be true for at least all of our debug readers. */ |
| gdb_assert (ntype->instance_flags () == type->instance_flags ()); |
| } |
| |
| /* Implement direct support for MEMBER_TYPE in GNU C++. |
| May need to construct such a type if this is the first use. |
| The TYPE is the type of the member. The DOMAIN is the type |
| of the aggregate that the member belongs to. */ |
| |
| struct type * |
| lookup_memberptr_type (struct type *type, struct type *domain) |
| { |
| struct type *mtype; |
| |
| mtype = alloc_type_copy (type); |
| smash_to_memberptr_type (mtype, domain, type); |
| return mtype; |
| } |
| |
| /* Return a pointer-to-method type, for a method of type TO_TYPE. */ |
| |
| struct type * |
| lookup_methodptr_type (struct type *to_type) |
| { |
| struct type *mtype; |
| |
| mtype = alloc_type_copy (to_type); |
| smash_to_methodptr_type (mtype, to_type); |
| return mtype; |
| } |
| |
| /* Allocate a stub method whose return type is TYPE. This apparently |
| happens for speed of symbol reading, since parsing out the |
| arguments to the method is cpu-intensive, the way we are doing it. |
| So, we will fill in arguments later. This always returns a fresh |
| type. */ |
| |
| struct type * |
| allocate_stub_method (struct type *type) |
| { |
| struct type *mtype; |
| |
| mtype = alloc_type_copy (type); |
| mtype->set_code (TYPE_CODE_METHOD); |
| TYPE_LENGTH (mtype) = 1; |
| mtype->set_is_stub (true); |
| TYPE_TARGET_TYPE (mtype) = type; |
| /* TYPE_SELF_TYPE (mtype) = unknown yet */ |
| return mtype; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| bool |
| operator== (const dynamic_prop &l, const dynamic_prop &r) |
| { |
| if (l.kind () != r.kind ()) |
| return false; |
| |
| switch (l.kind ()) |
| { |
| case PROP_UNDEFINED: |
| return true; |
| case PROP_CONST: |
| return l.const_val () == r.const_val (); |
| case PROP_ADDR_OFFSET: |
| case PROP_LOCEXPR: |
| case PROP_LOCLIST: |
| return l.baton () == r.baton (); |
| case PROP_VARIANT_PARTS: |
| return l.variant_parts () == r.variant_parts (); |
| case PROP_TYPE: |
| return l.original_type () == r.original_type (); |
| } |
| |
| gdb_assert_not_reached ("unhandled dynamic_prop kind"); |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| bool |
| operator== (const range_bounds &l, const range_bounds &r) |
| { |
| #define FIELD_EQ(FIELD) (l.FIELD == r.FIELD) |
| |
| return (FIELD_EQ (low) |
| && FIELD_EQ (high) |
| && FIELD_EQ (flag_upper_bound_is_count) |
| && FIELD_EQ (flag_bound_evaluated) |
| && FIELD_EQ (bias)); |
| |
| #undef FIELD_EQ |
| } |
| |
| /* Create a range type with a dynamic range from LOW_BOUND to |
| HIGH_BOUND, inclusive. See create_range_type for further details. */ |
| |
| struct type * |
| create_range_type (struct type *result_type, struct type *index_type, |
| const struct dynamic_prop *low_bound, |
| const struct dynamic_prop *high_bound, |
| LONGEST bias) |
| { |
| /* The INDEX_TYPE should be a type capable of holding the upper and lower |
| bounds, as such a zero sized, or void type makes no sense. */ |
| gdb_assert (index_type->code () != TYPE_CODE_VOID); |
| gdb_assert (TYPE_LENGTH (index_type) > 0); |
| |
| if (result_type == NULL) |
| result_type = alloc_type_copy (index_type); |
| result_type->set_code (TYPE_CODE_RANGE); |
| TYPE_TARGET_TYPE (result_type) = index_type; |
| if (index_type->is_stub ()) |
| result_type->set_target_is_stub (true); |
| else |
| TYPE_LENGTH (result_type) = TYPE_LENGTH (check_typedef (index_type)); |
| |
| range_bounds *bounds |
| = (struct range_bounds *) TYPE_ZALLOC (result_type, sizeof (range_bounds)); |
| bounds->low = *low_bound; |
| bounds->high = *high_bound; |
| bounds->bias = bias; |
| bounds->stride.set_const_val (0); |
| |
| result_type->set_bounds (bounds); |
| |
| if (index_type->code () == TYPE_CODE_FIXED_POINT) |
| result_type->set_is_unsigned (index_type->is_unsigned ()); |
| /* Note that the signed-ness of a range type can't simply be copied |
| from the underlying type. Consider a case where the underlying |
| type is 'int', but the range type can hold 0..65535, and where |
| the range is further specified to fit into 16 bits. In this |
| case, if we copy the underlying type's sign, then reading some |
| range values will cause an unwanted sign extension. So, we have |
| some heuristics here instead. */ |
| else if (low_bound->kind () == PROP_CONST && low_bound->const_val () >= 0) |
| result_type->set_is_unsigned (true); |
| /* Ada allows the declaration of range types whose upper bound is |
| less than the lower bound, so checking the lower bound is not |
| enough. Make sure we do not mark a range type whose upper bound |
| is negative as unsigned. */ |
| if (high_bound->kind () == PROP_CONST && high_bound->const_val () < 0) |
| result_type->set_is_unsigned (false); |
| |
| result_type->set_endianity_is_not_default |
| (index_type->endianity_is_not_default ()); |
| |
| return result_type; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| struct type * |
| create_range_type_with_stride (struct type *result_type, |
| struct type *index_type, |
| const struct dynamic_prop *low_bound, |
| const struct dynamic_prop *high_bound, |
| LONGEST bias, |
| const struct dynamic_prop *stride, |
| bool byte_stride_p) |
| { |
| result_type = create_range_type (result_type, index_type, low_bound, |
| high_bound, bias); |
| |
| gdb_assert (stride != nullptr); |
| result_type->bounds ()->stride = *stride; |
| result_type->bounds ()->flag_is_byte_stride = byte_stride_p; |
| |
| return result_type; |
| } |
| |
| |
| |
| /* Create a range type using either a blank type supplied in |
| RESULT_TYPE, or creating a new type, inheriting the objfile from |
| INDEX_TYPE. |
| |
| Indices will be of type INDEX_TYPE, and will range from LOW_BOUND |
| to HIGH_BOUND, inclusive. |
| |
| FIXME: Maybe we should check the TYPE_CODE of RESULT_TYPE to make |
| sure it is TYPE_CODE_UNDEF before we bash it into a range type? */ |
| |
| struct type * |
| create_static_range_type (struct type *result_type, struct type *index_type, |
| LONGEST low_bound, LONGEST high_bound) |
| { |
| struct dynamic_prop low, high; |
| |
| low.set_const_val (low_bound); |
| high.set_const_val (high_bound); |
| |
| result_type = create_range_type (result_type, index_type, &low, &high, 0); |
| |
| return result_type; |
| } |
| |
| /* Predicate tests whether BOUNDS are static. Returns 1 if all bounds values |
| are static, otherwise returns 0. */ |
| |
| static bool |
| has_static_range (const struct range_bounds *bounds) |
| { |
| /* If the range doesn't have a defined stride then its stride field will |
| be initialized to the constant 0. */ |
| return (bounds->low.kind () == PROP_CONST |
| && bounds->high.kind () == PROP_CONST |
| && bounds->stride.kind () == PROP_CONST); |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| gdb::optional<LONGEST> |
| get_discrete_low_bound (struct type *type) |
| { |
| type = check_typedef (type); |
| switch (type->code ()) |
| { |
| case TYPE_CODE_RANGE: |
| { |
| /* This function only works for ranges with a constant low bound. */ |
| if (type->bounds ()->low.kind () != PROP_CONST) |
| return {}; |
| |
| LONGEST low = type->bounds ()->low.const_val (); |
| |
| if (TYPE_TARGET_TYPE (type)->code () == TYPE_CODE_ENUM) |
| { |
| gdb::optional<LONGEST> low_pos |
| = discrete_position (TYPE_TARGET_TYPE (type), low); |
| |
| if (low_pos.has_value ()) |
| low = *low_pos; |
| } |
| |
| return low; |
| } |
| |
| case TYPE_CODE_ENUM: |
| { |
| if (type->num_fields () > 0) |
| { |
| /* The enums may not be sorted by value, so search all |
| entries. */ |
| LONGEST low = TYPE_FIELD_ENUMVAL (type, 0); |
| |
| for (int i = 0; i < type->num_fields (); i++) |
| { |
| if (TYPE_FIELD_ENUMVAL (type, i) < low) |
| low = TYPE_FIELD_ENUMVAL (type, i); |
| } |
| |
| /* Set unsigned indicator if warranted. */ |
| if (low >= 0) |
| type->set_is_unsigned (true); |
| |
| return low; |
| } |
| else |
| return 0; |
| } |
| |
| case TYPE_CODE_BOOL: |
| return 0; |
| |
| case TYPE_CODE_INT: |
| if (TYPE_LENGTH (type) > sizeof (LONGEST)) /* Too big */ |
| return {}; |
| |
| if (!type->is_unsigned ()) |
| return -(1 << (TYPE_LENGTH (type) * TARGET_CHAR_BIT - 1)); |
| |
| /* fall through */ |
| case TYPE_CODE_CHAR: |
| return 0; |
| |
| default: |
| return {}; |
| } |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| gdb::optional<LONGEST> |
| get_discrete_high_bound (struct type *type) |
| { |
| type = check_typedef (type); |
| switch (type->code ()) |
| { |
| case TYPE_CODE_RANGE: |
| { |
| /* This function only works for ranges with a constant high bound. */ |
| if (type->bounds ()->high.kind () != PROP_CONST) |
| return {}; |
| |
| LONGEST high = type->bounds ()->high.const_val (); |
| |
| if (TYPE_TARGET_TYPE (type)->code () == TYPE_CODE_ENUM) |
| { |
| gdb::optional<LONGEST> high_pos |
| = discrete_position (TYPE_TARGET_TYPE (type), high); |
| |
| if (high_pos.has_value ()) |
| high = *high_pos; |
| } |
| |
| return high; |
| } |
| |
| case TYPE_CODE_ENUM: |
| { |
| if (type->num_fields () > 0) |
| { |
| /* The enums may not be sorted by value, so search all |
| entries. */ |
| LONGEST high = TYPE_FIELD_ENUMVAL (type, 0); |
| |
| for (int i = 0; i < type->num_fields (); i++) |
| { |
| if (TYPE_FIELD_ENUMVAL (type, i) > high) |
| high = TYPE_FIELD_ENUMVAL (type, i); |
| } |
| |
| return high; |
| } |
| else |
| return -1; |
| } |
| |
| case TYPE_CODE_BOOL: |
| return 1; |
| |
| case TYPE_CODE_INT: |
| if (TYPE_LENGTH (type) > sizeof (LONGEST)) /* Too big */ |
| return {}; |
| |
| if (!type->is_unsigned ()) |
| { |
| LONGEST low = -(1 << (TYPE_LENGTH (type) * TARGET_CHAR_BIT - 1)); |
| return -low - 1; |
| } |
| |
| /* fall through */ |
| case TYPE_CODE_CHAR: |
| { |
| /* This round-about calculation is to avoid shifting by |
| TYPE_LENGTH (type) * TARGET_CHAR_BIT, which will not work |
| if TYPE_LENGTH (type) == sizeof (LONGEST). */ |
| LONGEST high = 1 << (TYPE_LENGTH (type) * TARGET_CHAR_BIT - 1); |
| return (high - 1) | high; |
| } |
| |
| default: |
| return {}; |
| } |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| bool |
| get_discrete_bounds (struct type *type, LONGEST *lowp, LONGEST *highp) |
| { |
| gdb::optional<LONGEST> low = get_discrete_low_bound (type); |
| if (!low.has_value ()) |
| return false; |
| |
| gdb::optional<LONGEST> high = get_discrete_high_bound (type); |
| if (!high.has_value ()) |
| return false; |
| |
| *lowp = *low; |
| *highp = *high; |
| |
| return true; |
| } |
| |
| /* See gdbtypes.h */ |
| |
| bool |
| get_array_bounds (struct type *type, LONGEST *low_bound, LONGEST *high_bound) |
| { |
| struct type *index = type->index_type (); |
| LONGEST low = 0; |
| LONGEST high = 0; |
| |
| if (index == NULL) |
| return false; |
| |
| if (!get_discrete_bounds (index, &low, &high)) |
| return false; |
| |
| if (low_bound) |
| *low_bound = low; |
| |
| if (high_bound) |
| *high_bound = high; |
| |
| return true; |
| } |
| |
| /* Assuming that TYPE is a discrete type and VAL is a valid integer |
| representation of a value of this type, save the corresponding |
| position number in POS. |
| |
| Its differs from VAL only in the case of enumeration types. In |
| this case, the position number of the value of the first listed |
| enumeration literal is zero; the position number of the value of |
| each subsequent enumeration literal is one more than that of its |
| predecessor in the list. |
| |
| Return 1 if the operation was successful. Return zero otherwise, |
| in which case the value of POS is unmodified. |
| */ |
| |
| gdb::optional<LONGEST> |
| discrete_position (struct type *type, LONGEST val) |
| { |
| if (type->code () == TYPE_CODE_RANGE) |
| type = TYPE_TARGET_TYPE (type); |
| |
| if (type->code () == TYPE_CODE_ENUM) |
| { |
| int i; |
| |
| for (i = 0; i < type->num_fields (); i += 1) |
| { |
| if (val == TYPE_FIELD_ENUMVAL (type, i)) |
| return i; |
| } |
| |
| /* Invalid enumeration value. */ |
| return {}; |
| } |
| else |
| return val; |
| } |
| |
| /* If the array TYPE has static bounds calculate and update its |
| size, then return true. Otherwise return false and leave TYPE |
| unchanged. */ |
| |
| static bool |
| update_static_array_size (struct type *type) |
| { |
| gdb_assert (type->code () == TYPE_CODE_ARRAY); |
| |
| struct type *range_type = type->index_type (); |
| |
| if (type->dyn_prop (DYN_PROP_BYTE_STRIDE) == nullptr |
| && has_static_range (range_type->bounds ()) |
| && (!type_not_associated (type) |
| && !type_not_allocated (type))) |
| { |
| LONGEST low_bound, high_bound; |
| int stride; |
| struct type *element_type; |
| |
| stride = type->bit_stride (); |
| |
| if (!get_discrete_bounds (range_type, &low_bound, &high_bound)) |
| low_bound = high_bound = 0; |
| |
| element_type = check_typedef (TYPE_TARGET_TYPE (type)); |
| /* Be careful when setting the array length. Ada arrays can be |
| empty arrays with the high_bound being smaller than the low_bound. |
| In such cases, the array length should be zero. */ |
| if (high_bound < low_bound) |
| TYPE_LENGTH (type) = 0; |
| else if (stride != 0) |
| { |
| /* Ensure that the type length is always positive, even in the |
| case where (for example in Fortran) we have a negative |
| stride. It is possible to have a single element array with a |
| negative stride in Fortran (this doesn't mean anything |
| special, it's still just a single element array) so do |
| consider that case when touching this code. */ |
| LONGEST element_count = std::abs (high_bound - low_bound + 1); |
| TYPE_LENGTH (type) |
| = ((std::abs (stride) * element_count) + 7) / 8; |
| } |
| else |
| TYPE_LENGTH (type) = |
| TYPE_LENGTH (element_type) * (high_bound - low_bound + 1); |
| |
| /* If this array's element is itself an array with a bit stride, |
| then we want to update this array's bit stride to reflect the |
| size of the sub-array. Otherwise, we'll end up using the |
| wrong size when trying to find elements of the outer |
| array. */ |
| if (element_type->code () == TYPE_CODE_ARRAY |
| && TYPE_LENGTH (element_type) != 0 |
| && TYPE_FIELD_BITSIZE (element_type, 0) != 0 |
| && get_array_bounds (element_type, &low_bound, &high_bound) |
| && high_bound >= low_bound) |
| TYPE_FIELD_BITSIZE (type, 0) |
| = ((high_bound - low_bound + 1) |
| * TYPE_FIELD_BITSIZE (element_type, 0)); |
| |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Create an array type using either a blank type supplied in |
| RESULT_TYPE, or creating a new type, inheriting the objfile from |
| RANGE_TYPE. |
| |
| Elements will be of type ELEMENT_TYPE, the indices will be of type |
| RANGE_TYPE. |
| |
| BYTE_STRIDE_PROP, when not NULL, provides the array's byte stride. |
| This byte stride property is added to the resulting array type |
| as a DYN_PROP_BYTE_STRIDE. As a consequence, the BYTE_STRIDE_PROP |
| argument can only be used to create types that are objfile-owned |
| (see add_dyn_prop), meaning that either this function must be called |
| with an objfile-owned RESULT_TYPE, or an objfile-owned RANGE_TYPE. |
| |
| BIT_STRIDE is taken into account only when BYTE_STRIDE_PROP is NULL. |
| If BIT_STRIDE is not zero, build a packed array type whose element |
| size is BIT_STRIDE. Otherwise, ignore this parameter. |
| |
| FIXME: Maybe we should check the TYPE_CODE of RESULT_TYPE to make |
| sure it is TYPE_CODE_UNDEF before we bash it into an array |
| type? */ |
| |
| struct type * |
| create_array_type_with_stride (struct type *result_type, |
| struct type *element_type, |
| struct type *range_type, |
| struct dynamic_prop *byte_stride_prop, |
| unsigned int bit_stride) |
| { |
| if (byte_stride_prop != NULL |
| && byte_stride_prop->kind () == PROP_CONST) |
| { |
| /* The byte stride is actually not dynamic. Pretend we were |
| called with bit_stride set instead of byte_stride_prop. |
| This will give us the same result type, while avoiding |
| the need to handle this as a special case. */ |
| bit_stride = byte_stride_prop->const_val () * 8; |
| byte_stride_prop = NULL; |
| } |
| |
| if (result_type == NULL) |
| result_type = alloc_type_copy (range_type); |
| |
| result_type->set_code (TYPE_CODE_ARRAY); |
| TYPE_TARGET_TYPE (result_type) = element_type; |
| |
| result_type->set_num_fields (1); |
| result_type->set_fields |
| ((struct field *) TYPE_ZALLOC (result_type, sizeof (struct field))); |
| result_type->set_index_type (range_type); |
| if (byte_stride_prop != NULL) |
| result_type->add_dyn_prop (DYN_PROP_BYTE_STRIDE, *byte_stride_prop); |
| else if (bit_stride > 0) |
| TYPE_FIELD_BITSIZE (result_type, 0) = bit_stride; |
| |
| if (!update_static_array_size (result_type)) |
| { |
| /* This type is dynamic and its length needs to be computed |
| on demand. In the meantime, avoid leaving the TYPE_LENGTH |
| undefined by setting it to zero. Although we are not expected |
| to trust TYPE_LENGTH in this case, setting the size to zero |
| allows us to avoid allocating objects of random sizes in case |
| we accidently do. */ |
| TYPE_LENGTH (result_type) = 0; |
| } |
| |
| /* TYPE_TARGET_STUB will take care of zero length arrays. */ |
| if (TYPE_LENGTH (result_type) == 0) |
| result_type->set_target_is_stub (true); |
| |
| return result_type; |
| } |
| |
| /* Same as create_array_type_with_stride but with no bit_stride |
| (BIT_STRIDE = 0), thus building an unpacked array. */ |
| |
| struct type * |
| create_array_type (struct type *result_type, |
| struct type *element_type, |
| struct type *range_type) |
| { |
| return create_array_type_with_stride (result_type, element_type, |
| range_type, NULL, 0); |
| } |
| |
| struct type * |
| lookup_array_range_type (struct type *element_type, |
| LONGEST low_bound, LONGEST high_bound) |
| { |
| struct type *index_type; |
| struct type *range_type; |
| |
| if (element_type->is_objfile_owned ()) |
| index_type = objfile_type (element_type->objfile_owner ())->builtin_int; |
| else |
| index_type = builtin_type (element_type->arch_owner ())->builtin_int; |
| |
| range_type = create_static_range_type (NULL, index_type, |
| low_bound, high_bound); |
| |
| return create_array_type (NULL, element_type, range_type); |
| } |
| |
| /* Create a string type using either a blank type supplied in |
| RESULT_TYPE, or creating a new type. String types are similar |
| enough to array of char types that we can use create_array_type to |
| build the basic type and then bash it into a string type. |
| |
| For fixed length strings, the range type contains 0 as the lower |
| bound and the length of the string minus one as the upper bound. |
| |
| FIXME: Maybe we should check the TYPE_CODE of RESULT_TYPE to make |
| sure it is TYPE_CODE_UNDEF before we bash it into a string |
| type? */ |
| |
| struct type * |
| create_string_type (struct type *result_type, |
| struct type *string_char_type, |
| struct type *range_type) |
| { |
| result_type = create_array_type (result_type, |
| string_char_type, |
| range_type); |
| result_type->set_code (TYPE_CODE_STRING); |
| return result_type; |
| } |
| |
| struct type * |
| lookup_string_range_type (struct type *string_char_type, |
| LONGEST low_bound, LONGEST high_bound) |
| { |
| struct type *result_type; |
| |
| result_type = lookup_array_range_type (string_char_type, |
| low_bound, high_bound); |
| result_type->set_code (TYPE_CODE_STRING); |
| return result_type; |
| } |
| |
| struct type * |
| create_set_type (struct type *result_type, struct type *domain_type) |
| { |
| if (result_type == NULL) |
| result_type = alloc_type_copy (domain_type); |
| |
| result_type->set_code (TYPE_CODE_SET); |
| result_type->set_num_fields (1); |
| result_type->set_fields |
| ((struct field *) TYPE_ZALLOC (result_type, sizeof (struct field))); |
| |
| if (!domain_type->is_stub ()) |
| { |
| LONGEST low_bound, high_bound, bit_length; |
| |
| if (!get_discrete_bounds (domain_type, &low_bound, &high_bound)) |
| low_bound = high_bound = 0; |
| |
| bit_length = high_bound - low_bound + 1; |
| TYPE_LENGTH (result_type) |
| = (bit_length + TARGET_CHAR_BIT - 1) / TARGET_CHAR_BIT; |
| if (low_bound >= 0) |
| result_type->set_is_unsigned (true); |
| } |
| result_type->field (0).set_type (domain_type); |
| |
| return result_type; |
| } |
| |
| /* Convert ARRAY_TYPE to a vector type. This may modify ARRAY_TYPE |
| and any array types nested inside it. */ |
| |
| void |
| make_vector_type (struct type *array_type) |
| { |
| struct type *inner_array, *elt_type; |
| |
| /* Find the innermost array type, in case the array is |
| multi-dimensional. */ |
| inner_array = array_type; |
| while (TYPE_TARGET_TYPE (inner_array)->code () == TYPE_CODE_ARRAY) |
| inner_array = TYPE_TARGET_TYPE (inner_array); |
| |
| elt_type = TYPE_TARGET_TYPE (inner_array); |
| if (elt_type->code () == TYPE_CODE_INT) |
| { |
| type_instance_flags flags |
| = elt_type->instance_flags () | TYPE_INSTANCE_FLAG_NOTTEXT; |
| elt_type = make_qualified_type (elt_type, flags, NULL); |
| TYPE_TARGET_TYPE (inner_array) = elt_type; |
| } |
| |
| array_type->set_is_vector (true); |
| } |
| |
| struct type * |
| init_vector_type (struct type *elt_type, int n) |
| { |
| struct type *array_type; |
| |
| array_type = lookup_array_range_type (elt_type, 0, n - 1); |
| make_vector_type (array_type); |
| return array_type; |
| } |
| |
| /* Internal routine called by TYPE_SELF_TYPE to return the type that TYPE |
| belongs to. In c++ this is the class of "this", but TYPE_THIS_TYPE is too |
| confusing. "self" is a common enough replacement for "this". |
| TYPE must be one of TYPE_CODE_METHODPTR, TYPE_CODE_MEMBERPTR, or |
| TYPE_CODE_METHOD. */ |
| |
| struct type * |
| internal_type_self_type (struct type *type) |
| { |
| switch (type->code ()) |
| { |
| case TYPE_CODE_METHODPTR: |
| case TYPE_CODE_MEMBERPTR: |
| if (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_NONE) |
| return NULL; |
| gdb_assert (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_SELF_TYPE); |
| return TYPE_MAIN_TYPE (type)->type_specific.self_type; |
| case TYPE_CODE_METHOD: |
| if (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_NONE) |
| return NULL; |
| gdb_assert (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_FUNC); |
| return TYPE_MAIN_TYPE (type)->type_specific.func_stuff->self_type; |
| default: |
| gdb_assert_not_reached ("bad type"); |
| } |
| } |
| |
| /* Set the type of the class that TYPE belongs to. |
| In c++ this is the class of "this". |
| TYPE must be one of TYPE_CODE_METHODPTR, TYPE_CODE_MEMBERPTR, or |
| TYPE_CODE_METHOD. */ |
| |
| void |
| set_type_self_type (struct type *type, struct type *self_type) |
| { |
| switch (type->code ()) |
| { |
| case TYPE_CODE_METHODPTR: |
| case TYPE_CODE_MEMBERPTR: |
| if (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_NONE) |
| TYPE_SPECIFIC_FIELD (type) = TYPE_SPECIFIC_SELF_TYPE; |
| gdb_assert (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_SELF_TYPE); |
| TYPE_MAIN_TYPE (type)->type_specific.self_type = self_type; |
| break; |
| case TYPE_CODE_METHOD: |
| if (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_NONE) |
| INIT_FUNC_SPECIFIC (type); |
| gdb_assert (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_FUNC); |
| TYPE_MAIN_TYPE (type)->type_specific.func_stuff->self_type = self_type; |
| break; |
| default: |
| gdb_assert_not_reached ("bad type"); |
| } |
| } |
| |
| /* Smash TYPE to be a type of pointers to members of SELF_TYPE with type |
| TO_TYPE. A member pointer is a wierd thing -- it amounts to a |
| typed offset into a struct, e.g. "an int at offset 8". A MEMBER |
| TYPE doesn't include the offset (that's the value of the MEMBER |
| itself), but does include the structure type into which it points |
| (for some reason). |
| |
| When "smashing" the type, we preserve the objfile that the old type |
| pointed to, since we aren't changing where the type is actually |
| allocated. */ |
| |
| void |
| smash_to_memberptr_type (struct type *type, struct type *self_type, |
| struct type *to_type) |
| { |
| smash_type (type); |
| type->set_code (TYPE_CODE_MEMBERPTR); |
| TYPE_TARGET_TYPE (type) = to_type; |
| set_type_self_type (type, self_type); |
| /* Assume that a data member pointer is the same size as a normal |
| pointer. */ |
| TYPE_LENGTH (type) = gdbarch_ptr_bit (to_type->arch ()) / TARGET_CHAR_BIT; |
| } |
| |
| /* Smash TYPE to be a type of pointer to methods type TO_TYPE. |
| |
| When "smashing" the type, we preserve the objfile that the old type |
| pointed to, since we aren't changing where the type is actually |
| allocated. */ |
| |
| void |
| smash_to_methodptr_type (struct type *type, struct type *to_type) |
| { |
| smash_type (type); |
| type->set_code (TYPE_CODE_METHODPTR); |
| TYPE_TARGET_TYPE (type) = to_type; |
| set_type_self_type (type, TYPE_SELF_TYPE (to_type)); |
| TYPE_LENGTH (type) = cplus_method_ptr_size (to_type); |
| } |
| |
| /* Smash TYPE to be a type of method of SELF_TYPE with type TO_TYPE. |
| METHOD just means `function that gets an extra "this" argument'. |
| |
| When "smashing" the type, we preserve the objfile that the old type |
| pointed to, since we aren't changing where the type is actually |
| allocated. */ |
| |
| void |
| smash_to_method_type (struct type *type, struct type *self_type, |
| struct type *to_type, struct field *args, |
| int nargs, int varargs) |
| { |
| smash_type (type); |
| type->set_code (TYPE_CODE_METHOD); |
| TYPE_TARGET_TYPE (type) = to_type; |
| set_type_self_type (type, self_type); |
| type->set_fields (args); |
| type->set_num_fields (nargs); |
| if (varargs) |
| type->set_has_varargs (true); |
| TYPE_LENGTH (type) = 1; /* In practice, this is never needed. */ |
| } |
| |
| /* A wrapper of TYPE_NAME which calls error if the type is anonymous. |
| Since GCC PR debug/47510 DWARF provides associated information to detect the |
| anonymous class linkage name from its typedef. |
| |
| Parameter TYPE should not yet have CHECK_TYPEDEF applied, this function will |
| apply it itself. */ |
| |
| const char * |
| type_name_or_error (struct type *type) |
| { |
| struct type *saved_type = type; |
| const char *name; |
| struct objfile *objfile; |
| |
| type = check_typedef (type); |
| |
| name = type->name (); |
| if (name != NULL) |
| return name; |
| |
| name = saved_type->name (); |
| objfile = saved_type->objfile_owner (); |
| error (_("Invalid anonymous type %s [in module %s], GCC PR debug/47510 bug?"), |
| name ? name : "<anonymous>", |
| objfile ? objfile_name (objfile) : "<arch>"); |
| } |
| |
| /* Lookup a typedef or primitive type named NAME, visible in lexical |
| block BLOCK. If NOERR is nonzero, return zero if NAME is not |
| suitably defined. */ |
| |
| struct type * |
| lookup_typename (const struct language_defn *language, |
| const char *name, |
| const struct block *block, int noerr) |
| { |
| struct symbol *sym; |
| |
| sym = lookup_symbol_in_language (name, block, VAR_DOMAIN, |
| language->la_language, NULL).symbol; |
| if (sym != NULL && SYMBOL_CLASS (sym) == LOC_TYPEDEF) |
| return SYMBOL_TYPE (sym); |
| |
| if (noerr) |
| return NULL; |
| error (_("No type named %s."), name); |
| } |
| |
| struct type * |
| lookup_unsigned_typename (const struct language_defn *language, |
| const char *name) |
| { |
| char *uns = (char *) alloca (strlen (name) + 10); |
| |
| strcpy (uns, "unsigned "); |
| strcpy (uns + 9, name); |
| return lookup_typename (language, uns, NULL, 0); |
| } |
| |
| struct type * |
| lookup_signed_typename (const struct language_defn *language, const char *name) |
| { |
| struct type *t; |
| char *uns = (char *) alloca (strlen (name) + 8); |
| |
| strcpy (uns, "signed "); |
| strcpy (uns + 7, name); |
| t = lookup_typename (language, uns, NULL, 1); |
| /* If we don't find "signed FOO" just try again with plain "FOO". */ |
| if (t != NULL) |
| return t; |
| return lookup_typename (language, name, NULL, 0); |
| } |
| |
| /* Lookup a structure type named "struct NAME", |
| visible in lexical block BLOCK. */ |
| |
| struct type * |
| lookup_struct (const char *name, const struct block *block) |
| { |
| struct symbol *sym; |
| |
| sym = lookup_symbol (name, block, STRUCT_DOMAIN, 0).symbol; |
| |
| if (sym == NULL) |
| { |
| error (_("No struct type named %s."), name); |
| } |
| if (SYMBOL_TYPE (sym)->code () != TYPE_CODE_STRUCT) |
| { |
| error (_("This context has class, union or enum %s, not a struct."), |
| name); |
| } |
| return (SYMBOL_TYPE (sym)); |
| } |
| |
| /* Lookup a union type named "union NAME", |
| visible in lexical block BLOCK. */ |
| |
| struct type * |
| lookup_union (const char *name, const struct block *block) |
| { |
| struct symbol *sym; |
| struct type *t; |
| |
| sym = lookup_symbol (name, block, STRUCT_DOMAIN, 0).symbol; |
| |
| if (sym == NULL) |
| error (_("No union type named %s."), name); |
| |
| t = SYMBOL_TYPE (sym); |
| |
| if (t->code () == TYPE_CODE_UNION) |
| return t; |
| |
| /* If we get here, it's not a union. */ |
| error (_("This context has class, struct or enum %s, not a union."), |
| name); |
| } |
| |
| /* Lookup an enum type named "enum NAME", |
| visible in lexical block BLOCK. */ |
| |
| struct type * |
| lookup_enum (const char *name, const struct block *block) |
| { |
| struct symbol *sym; |
| |
| sym = lookup_symbol (name, block, STRUCT_DOMAIN, 0).symbol; |
| if (sym == NULL) |
| { |
| error (_("No enum type named %s."), name); |
| } |
| if (SYMBOL_TYPE (sym)->code () != TYPE_CODE_ENUM) |
| { |
| error (_("This context has class, struct or union %s, not an enum."), |
| name); |
| } |
| return (SYMBOL_TYPE (sym)); |
| } |
| |
| /* Lookup a template type named "template NAME<TYPE>", |
| visible in lexical block BLOCK. */ |
| |
| struct type * |
| lookup_template_type (const char *name, struct type *type, |
| const struct block *block) |
| { |
| struct symbol *sym; |
| char *nam = (char *) |
| alloca (strlen (name) + strlen (type->name ()) + 4); |
| |
| strcpy (nam, name); |
| strcat (nam, "<"); |
| strcat (nam, type->name ()); |
| strcat (nam, " >"); /* FIXME, extra space still introduced in gcc? */ |
| |
| sym = lookup_symbol (nam, block, VAR_DOMAIN, 0).symbol; |
| |
| if (sym == NULL) |
| { |
| error (_("No template type named %s."), name); |
| } |
| if (SYMBOL_TYPE (sym)->code () != TYPE_CODE_STRUCT) |
| { |
| error (_("This context has class, union or enum %s, not a struct."), |
| name); |
| } |
| return (SYMBOL_TYPE (sym)); |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| struct_elt |
| lookup_struct_elt (struct type *type, const char *name, int noerr) |
| { |
| int i; |
| |
| for (;;) |
| { |
| type = check_typedef (type); |
| if (type->code () != TYPE_CODE_PTR |
| && type->code () != TYPE_CODE_REF) |
| break; |
| type = TYPE_TARGET_TYPE (type); |
| } |
| |
| if (type->code () != TYPE_CODE_STRUCT |
| && type->code () != TYPE_CODE_UNION) |
| { |
| std::string type_name = type_to_string (type); |
| error (_("Type %s is not a structure or union type."), |
| type_name.c_str ()); |
| } |
| |
| for (i = type->num_fields () - 1; i >= TYPE_N_BASECLASSES (type); i--) |
| { |
| const char *t_field_name = type->field (i).name (); |
| |
| if (t_field_name && (strcmp_iw (t_field_name, name) == 0)) |
| { |
| return {&type->field (i), TYPE_FIELD_BITPOS (type, i)}; |
| } |
| else if (!t_field_name || *t_field_name == '\0') |
| { |
| struct_elt elt |
| = lookup_struct_elt (type->field (i).type (), name, 1); |
| if (elt.field != NULL) |
| { |
| elt.offset += TYPE_FIELD_BITPOS (type, i); |
| return elt; |
| } |
| } |
| } |
| |
| /* OK, it's not in this class. Recursively check the baseclasses. */ |
| for (i = TYPE_N_BASECLASSES (type) - 1; i >= 0; i--) |
| { |
| struct_elt elt = lookup_struct_elt (TYPE_BASECLASS (type, i), name, 1); |
| if (elt.field != NULL) |
| return elt; |
| } |
| |
| if (noerr) |
| return {nullptr, 0}; |
| |
| std::string type_name = type_to_string (type); |
| error (_("Type %s has no component named %s."), type_name.c_str (), name); |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| struct type * |
| lookup_struct_elt_type (struct type *type, const char *name, int noerr) |
| { |
| struct_elt elt = lookup_struct_elt (type, name, noerr); |
| if (elt.field != NULL) |
| return elt.field->type (); |
| else |
| return NULL; |
| } |
| |
| /* Return the largest number representable by unsigned integer type TYPE. */ |
| |
| ULONGEST |
| get_unsigned_type_max (struct type *type) |
| { |
| unsigned int n; |
| |
| type = check_typedef (type); |
| gdb_assert (type->code () == TYPE_CODE_INT && type->is_unsigned ()); |
| gdb_assert (TYPE_LENGTH (type) <= sizeof (ULONGEST)); |
| |
| /* Written this way to avoid overflow. */ |
| n = TYPE_LENGTH (type) * TARGET_CHAR_BIT; |
| return ((((ULONGEST) 1 << (n - 1)) - 1) << 1) | 1; |
| } |
| |
| /* Store in *MIN, *MAX the smallest and largest numbers representable by |
| signed integer type TYPE. */ |
| |
| void |
| get_signed_type_minmax (struct type *type, LONGEST *min, LONGEST *max) |
| { |
| unsigned int n; |
| |
| type = check_typedef (type); |
| gdb_assert (type->code () == TYPE_CODE_INT && !type->is_unsigned ()); |
| gdb_assert (TYPE_LENGTH (type) <= sizeof (LONGEST)); |
| |
| n = TYPE_LENGTH (type) * TARGET_CHAR_BIT; |
| *min = -((ULONGEST) 1 << (n - 1)); |
| *max = ((ULONGEST) 1 << (n - 1)) - 1; |
| } |
| |
| /* Return the largest value representable by pointer type TYPE. */ |
| |
| CORE_ADDR |
| get_pointer_type_max (struct type *type) |
| { |
| unsigned int n; |
| |
| type = check_typedef (type); |
| gdb_assert (type->code () == TYPE_CODE_PTR); |
| gdb_assert (TYPE_LENGTH (type) <= sizeof (CORE_ADDR)); |
| |
| n = TYPE_LENGTH (type) * TARGET_CHAR_BIT; |
| return ((((CORE_ADDR) 1 << (n - 1)) - 1) << 1) | 1; |
| } |
| |
| /* Internal routine called by TYPE_VPTR_FIELDNO to return the value of |
| cplus_stuff.vptr_fieldno. |
| |
| cplus_stuff is initialized to cplus_struct_default which does not |
| set vptr_fieldno to -1 for portability reasons (IWBN to use C99 |
| designated initializers). We cope with that here. */ |
| |
| int |
| internal_type_vptr_fieldno (struct type *type) |
| { |
| type = check_typedef (type); |
| gdb_assert (type->code () == TYPE_CODE_STRUCT |
| || type->code () == TYPE_CODE_UNION); |
| if (!HAVE_CPLUS_STRUCT (type)) |
| return -1; |
| return TYPE_RAW_CPLUS_SPECIFIC (type)->vptr_fieldno; |
| } |
| |
| /* Set the value of cplus_stuff.vptr_fieldno. */ |
| |
| void |
| set_type_vptr_fieldno (struct type *type, int fieldno) |
| { |
| type = check_typedef (type); |
| gdb_assert (type->code () == TYPE_CODE_STRUCT |
| || type->code () == TYPE_CODE_UNION); |
| if (!HAVE_CPLUS_STRUCT (type)) |
| ALLOCATE_CPLUS_STRUCT_TYPE (type); |
| TYPE_RAW_CPLUS_SPECIFIC (type)->vptr_fieldno = fieldno; |
| } |
| |
| /* Internal routine called by TYPE_VPTR_BASETYPE to return the value of |
| cplus_stuff.vptr_basetype. */ |
| |
| struct type * |
| internal_type_vptr_basetype (struct type *type) |
| { |
| type = check_typedef (type); |
| gdb_assert (type->code () == TYPE_CODE_STRUCT |
| || type->code () == TYPE_CODE_UNION); |
| gdb_assert (TYPE_SPECIFIC_FIELD (type) == TYPE_SPECIFIC_CPLUS_STUFF); |
| return TYPE_RAW_CPLUS_SPECIFIC (type)->vptr_basetype; |
| } |
| |
| /* Set the value of cplus_stuff.vptr_basetype. */ |
| |
| void |
| set_type_vptr_basetype (struct type *type, struct type *basetype) |
| { |
| type = check_typedef (type); |
| gdb_assert (type->code () == TYPE_CODE_STRUCT |
| || type->code () == TYPE_CODE_UNION); |
| if (!HAVE_CPLUS_STRUCT (type)) |
| ALLOCATE_CPLUS_STRUCT_TYPE (type); |
| TYPE_RAW_CPLUS_SPECIFIC (type)->vptr_basetype = basetype; |
| } |
| |
| /* Lookup the vptr basetype/fieldno values for TYPE. |
| If found store vptr_basetype in *BASETYPEP if non-NULL, and return |
| vptr_fieldno. Also, if found and basetype is from the same objfile, |
| cache the results. |
| If not found, return -1 and ignore BASETYPEP. |
| Callers should be aware that in some cases (for example, |
| the type or one of its baseclasses is a stub type and we are |
| debugging a .o file, or the compiler uses DWARF-2 and is not GCC), |
| this function will not be able to find the |
| virtual function table pointer, and vptr_fieldno will remain -1 and |
| vptr_basetype will remain NULL or incomplete. */ |
| |
| int |
| get_vptr_fieldno (struct type *type, struct type **basetypep) |
| { |
| type = check_typedef (type); |
| |
| if (TYPE_VPTR_FIELDNO (type) < 0) |
| { |
| int i; |
| |
| /* We must start at zero in case the first (and only) baseclass |
| is virtual (and hence we cannot share the table pointer). */ |
| for (i = 0; i < TYPE_N_BASECLASSES (type); i++) |
| { |
| struct type *baseclass = check_typedef (TYPE_BASECLASS (type, i)); |
| int fieldno; |
| struct type *basetype; |
| |
| fieldno = get_vptr_fieldno (baseclass, &basetype); |
| if (fieldno >= 0) |
| { |
| /* If the type comes from a different objfile we can't cache |
| it, it may have a different lifetime. PR 2384 */ |
| if (type->objfile_owner () == basetype->objfile_owner ()) |
| { |
| set_type_vptr_fieldno (type, fieldno); |
| set_type_vptr_basetype (type, basetype); |
| } |
| if (basetypep) |
| *basetypep = basetype; |
| return fieldno; |
| } |
| } |
| |
| /* Not found. */ |
| return -1; |
| } |
| else |
| { |
| if (basetypep) |
| *basetypep = TYPE_VPTR_BASETYPE (type); |
| return TYPE_VPTR_FIELDNO (type); |
| } |
| } |
| |
| static void |
| stub_noname_complaint (void) |
| { |
| complaint (_("stub type has NULL name")); |
| } |
| |
| /* Return nonzero if TYPE has a DYN_PROP_BYTE_STRIDE dynamic property |
| attached to it, and that property has a non-constant value. */ |
| |
| static int |
| array_type_has_dynamic_stride (struct type *type) |
| { |
| struct dynamic_prop *prop = type->dyn_prop (DYN_PROP_BYTE_STRIDE); |
| |
| return (prop != NULL && prop->kind () != PROP_CONST); |
| } |
| |
| /* Worker for is_dynamic_type. */ |
| |
| static int |
| is_dynamic_type_internal (struct type *type, int top_level) |
| { |
| type = check_typedef (type); |
| |
| /* We only want to recognize references at the outermost level. */ |
| if (top_level && type->code () == TYPE_CODE_REF) |
| type = check_typedef (TYPE_TARGET_TYPE (type)); |
| |
| /* Types that have a dynamic TYPE_DATA_LOCATION are considered |
| dynamic, even if the type itself is statically defined. |
| From a user's point of view, this may appear counter-intuitive; |
| but it makes sense in this context, because the point is to determine |
| whether any part of the type needs to be resolved before it can |
| be exploited. */ |
| if (TYPE_DATA_LOCATION (type) != NULL |
| && (TYPE_DATA_LOCATION_KIND (type) == PROP_LOCEXPR |
| || TYPE_DATA_LOCATION_KIND (type) == PROP_LOCLIST)) |
| return 1; |
| |
| if (TYPE_ASSOCIATED_PROP (type)) |
| return 1; |
| |
| if (TYPE_ALLOCATED_PROP (type)) |
| return 1; |
| |
| struct dynamic_prop *prop = type->dyn_prop (DYN_PROP_VARIANT_PARTS); |
| if (prop != nullptr && prop->kind () != PROP_TYPE) |
| return 1; |
| |
| if (TYPE_HAS_DYNAMIC_LENGTH (type)) |
| return 1; |
| |
| switch (type->code ()) |
| { |
| case TYPE_CODE_RANGE: |
| { |
| /* A range type is obviously dynamic if it has at least one |
| dynamic bound. But also consider the range type to be |
| dynamic when its subtype is dynamic, even if the bounds |
| of the range type are static. It allows us to assume that |
| the subtype of a static range type is also static. */ |
| return (!has_static_range (type->bounds ()) |
| || is_dynamic_type_internal (TYPE_TARGET_TYPE (type), 0)); |
| } |
| |
| case TYPE_CODE_STRING: |
| /* Strings are very much like an array of characters, and can be |
| treated as one here. */ |
| case TYPE_CODE_ARRAY: |
| { |
| gdb_assert (type->num_fields () == 1); |
| |
| /* The array is dynamic if either the bounds are dynamic... */ |
| if (is_dynamic_type_internal (type->index_type (), 0)) |
| return 1; |
| /* ... or the elements it contains have a dynamic contents... */ |
| if (is_dynamic_type_internal (TYPE_TARGET_TYPE (type), 0)) |
| return 1; |
| /* ... or if it has a dynamic stride... */ |
| if (array_type_has_dynamic_stride (type)) |
| return 1; |
| return 0; |
| } |
| |
| case TYPE_CODE_STRUCT: |
| case TYPE_CODE_UNION: |
| { |
| int i; |
| |
| bool is_cplus = HAVE_CPLUS_STRUCT (type); |
| |
| for (i = 0; i < type->num_fields (); ++i) |
| { |
| /* Static fields can be ignored here. */ |
| if (field_is_static (&type->field (i))) |
| continue; |
| /* If the field has dynamic type, then so does TYPE. */ |
| if (is_dynamic_type_internal (type->field (i).type (), 0)) |
| return 1; |
| /* If the field is at a fixed offset, then it is not |
| dynamic. */ |
| if (TYPE_FIELD_LOC_KIND (type, i) != FIELD_LOC_KIND_DWARF_BLOCK) |
| continue; |
| /* Do not consider C++ virtual base types to be dynamic |
| due to the field's offset being dynamic; these are |
| handled via other means. */ |
| if (is_cplus && BASETYPE_VIA_VIRTUAL (type, i)) |
| continue; |
| return 1; |
| } |
| } |
| break; |
| } |
| |
| return 0; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| int |
| is_dynamic_type (struct type *type) |
| { |
| return is_dynamic_type_internal (type, 1); |
| } |
| |
| static struct type *resolve_dynamic_type_internal |
| (struct type *type, struct property_addr_info *addr_stack, int top_level); |
| |
| /* Given a dynamic range type (dyn_range_type) and a stack of |
| struct property_addr_info elements, return a static version |
| of that type. |
| |
| When RESOLVE_P is true then the returned static range is created by |
| actually evaluating any dynamic properties within the range type, while |
| when RESOLVE_P is false the returned static range has all of the bounds |
| and stride information set to undefined. The RESOLVE_P set to false |
| case will be used when evaluating a dynamic array that is not |
| allocated, or not associated, i.e. the bounds information might not be |
| initialized yet. */ |
| |
| static struct type * |
| resolve_dynamic_range (struct type *dyn_range_type, |
| struct property_addr_info *addr_stack, |
| bool resolve_p = true) |
| { |
| CORE_ADDR value; |
| struct type *static_range_type, *static_target_type; |
| struct dynamic_prop low_bound, high_bound, stride; |
| |
| gdb_assert (dyn_range_type->code () == TYPE_CODE_RANGE); |
| |
| const struct dynamic_prop *prop = &dyn_range_type->bounds ()->low; |
| if (resolve_p && dwarf2_evaluate_property (prop, NULL, addr_stack, &value)) |
| low_bound.set_const_val (value); |
| else |
| low_bound.set_undefined (); |
| |
| prop = &dyn_range_type->bounds ()->high; |
| if (resolve_p && dwarf2_evaluate_property (prop, NULL, addr_stack, &value)) |
| { |
| high_bound.set_const_val (value); |
| |
| if (dyn_range_type->bounds ()->flag_upper_bound_is_count) |
| high_bound.set_const_val |
| (low_bound.const_val () + high_bound.const_val () - 1); |
| } |
| else |
| high_bound.set_undefined (); |
| |
| bool byte_stride_p = dyn_range_type->bounds ()->flag_is_byte_stride; |
| prop = &dyn_range_type->bounds ()->stride; |
| if (resolve_p && dwarf2_evaluate_property (prop, NULL, addr_stack, &value)) |
| { |
| stride.set_const_val (value); |
| |
| /* If we have a bit stride that is not an exact number of bytes then |
| I really don't think this is going to work with current GDB, the |
| array indexing code in GDB seems to be pretty heavily tied to byte |
| offsets right now. Assuming 8 bits in a byte. */ |
| struct gdbarch *gdbarch = dyn_range_type->arch (); |
| int unit_size = gdbarch_addressable_memory_unit_size (gdbarch); |
| if (!byte_stride_p && (value % (unit_size * 8)) != 0) |
| error (_("bit strides that are not a multiple of the byte size " |
| "are currently not supported")); |
| } |
| else |
| { |
| stride.set_undefined (); |
| byte_stride_p = true; |
| } |
| |
| static_target_type |
| = resolve_dynamic_type_internal (TYPE_TARGET_TYPE (dyn_range_type), |
| addr_stack, 0); |
| LONGEST bias = dyn_range_type->bounds ()->bias; |
| static_range_type = create_range_type_with_stride |
| (copy_type (dyn_range_type), static_target_type, |
| &low_bound, &high_bound, bias, &stride, byte_stride_p); |
| static_range_type->bounds ()->flag_bound_evaluated = 1; |
| return static_range_type; |
| } |
| |
| /* Resolves dynamic bound values of an array or string type TYPE to static |
| ones. ADDR_STACK is a stack of struct property_addr_info to be used if |
| needed during the dynamic resolution. |
| |
| When RESOLVE_P is true then the dynamic properties of TYPE are |
| evaluated, otherwise the dynamic properties of TYPE are not evaluated, |
| instead we assume the array is not allocated/associated yet. */ |
| |
| static struct type * |
| resolve_dynamic_array_or_string (struct type *type, |
| struct property_addr_info *addr_stack, |
| bool resolve_p = true) |
| { |
| CORE_ADDR value; |
| struct type *elt_type; |
| struct type *range_type; |
| struct type *ary_dim; |
| struct dynamic_prop *prop; |
| unsigned int bit_stride = 0; |
| |
| /* For dynamic type resolution strings can be treated like arrays of |
| characters. */ |
| gdb_assert (type->code () == TYPE_CODE_ARRAY |
| || type->code () == TYPE_CODE_STRING); |
| |
| type = copy_type (type); |
| |
| /* Resolve the allocated and associated properties before doing anything |
| else. If an array is not allocated or not associated then (at least |
| for Fortran) there is no guarantee that the data to define the upper |
| bound, lower bound, or stride will be correct. If RESOLVE_P is |
| already false at this point then this is not the first dimension of |
| the array and a more outer dimension has already marked this array as |
| not allocated/associated, as such we just ignore this property. This |
| is fine as GDB only checks the allocated/associated on the outer most |
| dimension of the array. */ |
| prop = TYPE_ALLOCATED_PROP (type); |
| if (prop != NULL && resolve_p |
| && dwarf2_evaluate_property (prop, NULL, addr_stack, &value)) |
| { |
| prop->set_const_val (value); |
| if (value == 0) |
| resolve_p = false; |
| } |
| |
| prop = TYPE_ASSOCIATED_PROP (type); |
| if (prop != NULL && resolve_p |
| && dwarf2_evaluate_property (prop, NULL, addr_stack, &value)) |
| { |
| prop->set_const_val (value); |
| if (value == 0) |
| resolve_p = false; |
| } |
| |
| range_type = check_typedef (type->index_type ()); |
| range_type = resolve_dynamic_range (range_type, addr_stack, resolve_p); |
| |
| ary_dim = check_typedef (TYPE_TARGET_TYPE (type)); |
| if (ary_dim != NULL && ary_dim->code () == TYPE_CODE_ARRAY) |
| elt_type = resolve_dynamic_array_or_string (ary_dim, addr_stack, resolve_p); |
| else |
| elt_type = TYPE_TARGET_TYPE (type); |
| |
| prop = type->dyn_prop (DYN_PROP_BYTE_STRIDE); |
| if (prop != NULL && resolve_p) |
| { |
| if (dwarf2_evaluate_property (prop, NULL, addr_stack, &value)) |
| { |
| type->remove_dyn_prop (DYN_PROP_BYTE_STRIDE); |
| bit_stride = (unsigned int) (value * 8); |
| } |
| else |
| { |
| /* Could be a bug in our code, but it could also happen |
| if the DWARF info is not correct. Issue a warning, |
| and assume no byte/bit stride (leave bit_stride = 0). */ |
| warning (_("cannot determine array stride for type %s"), |
| type->name () ? type->name () : "<no name>"); |
| } |
| } |
| else |
| bit_stride = TYPE_FIELD_BITSIZE (type, 0); |
| |
| return create_array_type_with_stride (type, elt_type, range_type, NULL, |
| bit_stride); |
| } |
| |
| /* Resolve dynamic bounds of members of the union TYPE to static |
| bounds. ADDR_STACK is a stack of struct property_addr_info |
| to be used if needed during the dynamic resolution. */ |
| |
| static struct type * |
| resolve_dynamic_union (struct type *type, |
| struct property_addr_info *addr_stack) |
| { |
| struct type *resolved_type; |
| int i; |
| unsigned int max_len = 0; |
| |
| gdb_assert (type->code () == TYPE_CODE_UNION); |
| |
| resolved_type = copy_type (type); |
| resolved_type->set_fields |
| ((struct field *) |
| TYPE_ALLOC (resolved_type, |
| resolved_type->num_fields () * sizeof (struct field))); |
| memcpy (resolved_type->fields (), |
| type->fields (), |
| resolved_type->num_fields () * sizeof (struct field)); |
| for (i = 0; i < resolved_type->num_fields (); ++i) |
| { |
| struct type *t; |
| |
| if (field_is_static (&type->field (i))) |
| continue; |
| |
| t = resolve_dynamic_type_internal (resolved_type->field (i).type (), |
| addr_stack, 0); |
| resolved_type->field (i).set_type (t); |
| |
| struct type *real_type = check_typedef (t); |
| if (TYPE_LENGTH (real_type) > max_len) |
| max_len = TYPE_LENGTH (real_type); |
| } |
| |
| TYPE_LENGTH (resolved_type) = max_len; |
| return resolved_type; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| bool |
| variant::matches (ULONGEST value, bool is_unsigned) const |
| { |
| for (const discriminant_range &range : discriminants) |
| if (range.contains (value, is_unsigned)) |
| return true; |
| return false; |
| } |
| |
| static void |
| compute_variant_fields_inner (struct type *type, |
| struct property_addr_info *addr_stack, |
| const variant_part &part, |
| std::vector<bool> &flags); |
| |
| /* A helper function to determine which variant fields will be active. |
| This handles both the variant's direct fields, and any variant |
| parts embedded in this variant. TYPE is the type we're examining. |
| ADDR_STACK holds information about the concrete object. VARIANT is |
| the current variant to be handled. FLAGS is where the results are |
| stored -- this function sets the Nth element in FLAGS if the |
| corresponding field is enabled. ENABLED is whether this variant is |
| enabled or not. */ |
| |
| static void |
| compute_variant_fields_recurse (struct type *type, |
| struct property_addr_info *addr_stack, |
| const variant &variant, |
| std::vector<bool> &flags, |
| bool enabled) |
| { |
| for (int field = variant.first_field; field < variant.last_field; ++field) |
| flags[field] = enabled; |
| |
| for (const variant_part &new_part : variant.parts) |
| { |
| if (enabled) |
| compute_variant_fields_inner (type, addr_stack, new_part, flags); |
| else |
| { |
| for (const auto &sub_variant : new_part.variants) |
| compute_variant_fields_recurse (type, addr_stack, sub_variant, |
| flags, enabled); |
| } |
| } |
| } |
| |
| /* A helper function to determine which variant fields will be active. |
| This evaluates the discriminant, decides which variant (if any) is |
| active, and then updates FLAGS to reflect which fields should be |
| available. TYPE is the type we're examining. ADDR_STACK holds |
| information about the concrete object. VARIANT is the current |
| variant to be handled. FLAGS is where the results are stored -- |
| this function sets the Nth element in FLAGS if the corresponding |
| field is enabled. */ |
| |
| static void |
| compute_variant_fields_inner (struct type *type, |
| struct property_addr_info *addr_stack, |
| const variant_part &part, |
| std::vector<bool> &flags) |
| { |
| /* Evaluate the discriminant. */ |
| gdb::optional<ULONGEST> discr_value; |
| if (part.discriminant_index != -1) |
| { |
| int idx = part.discriminant_index; |
| |
| if (TYPE_FIELD_LOC_KIND (type, idx) != FIELD_LOC_KIND_BITPOS) |
| error (_("Cannot determine struct field location" |
| " (invalid location kind)")); |
| |
| if (addr_stack->valaddr.data () != NULL) |
| discr_value = unpack_field_as_long (type, addr_stack->valaddr.data (), |
| idx); |
| else |
| { |
| CORE_ADDR addr = (addr_stack->addr |
| + (TYPE_FIELD_BITPOS (type, idx) |
| / TARGET_CHAR_BIT)); |
| |
| LONGEST bitsize = TYPE_FIELD_BITSIZE (type, idx); |
| LONGEST size = bitsize / 8; |
| if (size == 0) |
| size = TYPE_LENGTH (type->field (idx).type ()); |
| |
| gdb_byte bits[sizeof (ULONGEST)]; |
| read_memory (addr, bits, size); |
| |
| LONGEST bitpos = (TYPE_FIELD_BITPOS (type, idx) |
| % TARGET_CHAR_BIT); |
| |
| discr_value = unpack_bits_as_long (type->field (idx).type (), |
| bits, bitpos, bitsize); |
| } |
| } |
| |
| /* Go through each variant and see which applies. */ |
| const variant *default_variant = nullptr; |
| const variant *applied_variant = nullptr; |
| for (const auto &variant : part.variants) |
| { |
| if (variant.is_default ()) |
| default_variant = &variant; |
| else if (discr_value.has_value () |
| && variant.matches (*discr_value, part.is_unsigned)) |
| { |
| applied_variant = &variant; |
| break; |
| } |
| } |
| if (applied_variant == nullptr) |
| applied_variant = default_variant; |
| |
| for (const auto &variant : part.variants) |
| compute_variant_fields_recurse (type, addr_stack, variant, |
| flags, applied_variant == &variant); |
| } |
| |
| /* Determine which variant fields are available in TYPE. The enabled |
| fields are stored in RESOLVED_TYPE. ADDR_STACK holds information |
| about the concrete object. PARTS describes the top-level variant |
| parts for this type. */ |
| |
| static void |
| compute_variant_fields (struct type *type, |
| struct type *resolved_type, |
| struct property_addr_info *addr_stack, |
| const gdb::array_view<variant_part> &parts) |
| { |
| /* Assume all fields are included by default. */ |
| std::vector<bool> flags (resolved_type->num_fields (), true); |
| |
| /* Now disable fields based on the variants that control them. */ |
| for (const auto &part : parts) |
| compute_variant_fields_inner (type, addr_stack, part, flags); |
| |
| resolved_type->set_num_fields |
| (std::count (flags.begin (), flags.end (), true)); |
| resolved_type->set_fields |
| ((struct field *) |
| TYPE_ALLOC (resolved_type, |
| resolved_type->num_fields () * sizeof (struct field))); |
| |
| int out = 0; |
| for (int i = 0; i < type->num_fields (); ++i) |
| { |
| if (!flags[i]) |
| continue; |
| |
| resolved_type->field (out) = type->field (i); |
| ++out; |
| } |
| } |
| |
| /* Resolve dynamic bounds of members of the struct TYPE to static |
| bounds. ADDR_STACK is a stack of struct property_addr_info to |
| be used if needed during the dynamic resolution. */ |
| |
| static struct type * |
| resolve_dynamic_struct (struct type *type, |
| struct property_addr_info *addr_stack) |
| { |
| struct type *resolved_type; |
| int i; |
| unsigned resolved_type_bit_length = 0; |
| |
| gdb_assert (type->code () == TYPE_CODE_STRUCT); |
| |
| resolved_type = copy_type (type); |
| |
| dynamic_prop *variant_prop = resolved_type->dyn_prop (DYN_PROP_VARIANT_PARTS); |
| if (variant_prop != nullptr && variant_prop->kind () == PROP_VARIANT_PARTS) |
| { |
| compute_variant_fields (type, resolved_type, addr_stack, |
| *variant_prop->variant_parts ()); |
| /* We want to leave the property attached, so that the Rust code |
| can tell whether the type was originally an enum. */ |
| variant_prop->set_original_type (type); |
| } |
| else |
| { |
| resolved_type->set_fields |
| ((struct field *) |
| TYPE_ALLOC (resolved_type, |
| resolved_type->num_fields () * sizeof (struct field))); |
| if (type->num_fields () > 0) |
| memcpy (resolved_type->fields (), |
| type->fields (), |
| resolved_type->num_fields () * sizeof (struct field)); |
| } |
| |
| for (i = 0; i < resolved_type->num_fields (); ++i) |
| { |
| unsigned new_bit_length; |
| struct property_addr_info pinfo; |
| |
| if (field_is_static (&resolved_type->field (i))) |
| continue; |
| |
| if (TYPE_FIELD_LOC_KIND (resolved_type, i) == FIELD_LOC_KIND_DWARF_BLOCK) |
| { |
| struct dwarf2_property_baton baton; |
| baton.property_type |
| = lookup_pointer_type (resolved_type->field (i).type ()); |
| baton.locexpr = *TYPE_FIELD_DWARF_BLOCK (resolved_type, i); |
| |
| struct dynamic_prop prop; |
| prop.set_locexpr (&baton); |
| |
| CORE_ADDR addr; |
| if (dwarf2_evaluate_property (&prop, nullptr, addr_stack, &addr, |
| true)) |
| resolved_type->field (i).set_loc_bitpos |
| (TARGET_CHAR_BIT * (addr - addr_stack->addr)); |
| } |
| |
| /* As we know this field is not a static field, the field's |
| field_loc_kind should be FIELD_LOC_KIND_BITPOS. Verify |
| this is the case, but only trigger a simple error rather |
| than an internal error if that fails. While failing |
| that verification indicates a bug in our code, the error |
| is not severe enough to suggest to the user he stops |
| his debugging session because of it. */ |
| if (TYPE_FIELD_LOC_KIND (resolved_type, i) != FIELD_LOC_KIND_BITPOS) |
| error (_("Cannot determine struct field location" |
| " (invalid location kind)")); |
| |
| pinfo.type = check_typedef (resolved_type->field (i).type ()); |
| pinfo.valaddr = addr_stack->valaddr; |
| pinfo.addr |
| = (addr_stack->addr |
| + (TYPE_FIELD_BITPOS (resolved_type, i) / TARGET_CHAR_BIT)); |
| pinfo.next = addr_stack; |
| |
| resolved_type->field (i).set_type |
| (resolve_dynamic_type_internal (resolved_type->field (i).type (), |
| &pinfo, 0)); |
| gdb_assert (TYPE_FIELD_LOC_KIND (resolved_type, i) |
| == FIELD_LOC_KIND_BITPOS); |
| |
| new_bit_length = TYPE_FIELD_BITPOS (resolved_type, i); |
| if (TYPE_FIELD_BITSIZE (resolved_type, i) != 0) |
| new_bit_length += TYPE_FIELD_BITSIZE (resolved_type, i); |
| else |
| { |
| struct type *real_type |
| = check_typedef (resolved_type->field (i).type ()); |
| |
| new_bit_length += (TYPE_LENGTH (real_type) * TARGET_CHAR_BIT); |
| } |
| |
| /* Normally, we would use the position and size of the last field |
| to determine the size of the enclosing structure. But GCC seems |
| to be encoding the position of some fields incorrectly when |
| the struct contains a dynamic field that is not placed last. |
| So we compute the struct size based on the field that has |
| the highest position + size - probably the best we can do. */ |
| if (new_bit_length > resolved_type_bit_length) |
| resolved_type_bit_length = new_bit_length; |
| } |
| |
| /* The length of a type won't change for fortran, but it does for C and Ada. |
| For fortran the size of dynamic fields might change over time but not the |
| type length of the structure. If we adapt it, we run into problems |
| when calculating the element offset for arrays of structs. */ |
| if (current_language->la_language != language_fortran) |
| TYPE_LENGTH (resolved_type) |
| = (resolved_type_bit_length + TARGET_CHAR_BIT - 1) / TARGET_CHAR_BIT; |
| |
| /* The Ada language uses this field as a cache for static fixed types: reset |
| it as RESOLVED_TYPE must have its own static fixed type. */ |
| TYPE_TARGET_TYPE (resolved_type) = NULL; |
| |
| return resolved_type; |
| } |
| |
| /* Worker for resolved_dynamic_type. */ |
| |
| static struct type * |
| resolve_dynamic_type_internal (struct type *type, |
| struct property_addr_info *addr_stack, |
| int top_level) |
| { |
| struct type *real_type = check_typedef (type); |
| struct type *resolved_type = nullptr; |
| struct dynamic_prop *prop; |
| CORE_ADDR value; |
| |
| if (!is_dynamic_type_internal (real_type, top_level)) |
| return type; |
| |
| gdb::optional<CORE_ADDR> type_length; |
| prop = TYPE_DYNAMIC_LENGTH (type); |
| if (prop != NULL |
| && dwarf2_evaluate_property (prop, NULL, addr_stack, &value)) |
| type_length = value; |
| |
| if (type->code () == TYPE_CODE_TYPEDEF) |
| { |
| resolved_type = copy_type (type); |
| TYPE_TARGET_TYPE (resolved_type) |
| = resolve_dynamic_type_internal (TYPE_TARGET_TYPE (type), addr_stack, |
| top_level); |
| } |
| else |
| { |
| /* Before trying to resolve TYPE, make sure it is not a stub. */ |
| type = real_type; |
| |
| switch (type->code ()) |
| { |
| case TYPE_CODE_REF: |
| { |
| struct property_addr_info pinfo; |
| |
| pinfo.type = check_typedef (TYPE_TARGET_TYPE (type)); |
| pinfo.valaddr = {}; |
| if (addr_stack->valaddr.data () != NULL) |
| pinfo.addr = extract_typed_address (addr_stack->valaddr.data (), |
| type); |
| else |
| pinfo.addr = read_memory_typed_address (addr_stack->addr, type); |
| pinfo.next = addr_stack; |
| |
| resolved_type = copy_type (type); |
| TYPE_TARGET_TYPE (resolved_type) |
| = resolve_dynamic_type_internal (TYPE_TARGET_TYPE (type), |
| &pinfo, top_level); |
| break; |
| } |
| |
| case TYPE_CODE_STRING: |
| /* Strings are very much like an array of characters, and can be |
| treated as one here. */ |
| case TYPE_CODE_ARRAY: |
| resolved_type = resolve_dynamic_array_or_string (type, addr_stack); |
| break; |
| |
| case TYPE_CODE_RANGE: |
| resolved_type = resolve_dynamic_range (type, addr_stack); |
| break; |
| |
| case TYPE_CODE_UNION: |
| resolved_type = resolve_dynamic_union (type, addr_stack); |
| break; |
| |
| case TYPE_CODE_STRUCT: |
| resolved_type = resolve_dynamic_struct (type, addr_stack); |
| break; |
| } |
| } |
| |
| if (resolved_type == nullptr) |
| return type; |
| |
| if (type_length.has_value ()) |
| { |
| TYPE_LENGTH (resolved_type) = *type_length; |
| resolved_type->remove_dyn_prop (DYN_PROP_BYTE_SIZE); |
| } |
| |
| /* Resolve data_location attribute. */ |
| prop = TYPE_DATA_LOCATION (resolved_type); |
| if (prop != NULL |
| && dwarf2_evaluate_property (prop, NULL, addr_stack, &value)) |
| { |
| /* Start of Fortran hack. See comment in f-lang.h for what is going |
| on here.*/ |
| if (current_language->la_language == language_fortran |
| && resolved_type->code () == TYPE_CODE_ARRAY) |
| value = fortran_adjust_dynamic_array_base_address_hack (resolved_type, |
| value); |
| /* End of Fortran hack. */ |
| prop->set_const_val (value); |
| } |
| |
| return resolved_type; |
| } |
| |
| /* See gdbtypes.h */ |
| |
| struct type * |
| resolve_dynamic_type (struct type *type, |
| gdb::array_view<const gdb_byte> valaddr, |
| CORE_ADDR addr) |
| { |
| struct property_addr_info pinfo |
| = {check_typedef (type), valaddr, addr, NULL}; |
| |
| return resolve_dynamic_type_internal (type, &pinfo, 1); |
| } |
| |
| /* See gdbtypes.h */ |
| |
| dynamic_prop * |
| type::dyn_prop (dynamic_prop_node_kind prop_kind) const |
| { |
| dynamic_prop_list *node = this->main_type->dyn_prop_list; |
| |
| while (node != NULL) |
| { |
| if (node->prop_kind == prop_kind) |
| return &node->prop; |
| node = node->next; |
| } |
| return NULL; |
| } |
| |
| /* See gdbtypes.h */ |
| |
| void |
| type::add_dyn_prop (dynamic_prop_node_kind prop_kind, dynamic_prop prop) |
| { |
| struct dynamic_prop_list *temp; |
| |
| gdb_assert (this->is_objfile_owned ()); |
| |
| temp = XOBNEW (&this->objfile_owner ()->objfile_obstack, |
| struct dynamic_prop_list); |
| temp->prop_kind = prop_kind; |
| temp->prop = prop; |
| temp->next = this->main_type->dyn_prop_list; |
| |
| this->main_type->dyn_prop_list = temp; |
| } |
| |
| /* See gdbtypes.h. */ |
| |
| void |
| type::remove_dyn_prop (dynamic_prop_node_kind kind) |
| { |
| struct dynamic_prop_list *prev_node, *curr_node; |
| |
| curr_node = this->main_type->dyn_prop_list; |
| prev_node = NULL; |
| |
| while (NULL != curr_node) |
| { |
| if (curr_node->prop_kind == kind) |
| { |
| /* Update the linked list but don't free anything. |
| The property was allocated on objstack and it is not known |
| if we are on top of it. Nevertheless, everything is released |
| when the complete objstack is freed. */ |
| if (NULL == prev_node) |
| this->main_type->dyn_prop_list = curr_node->next; |
| else |
| prev_node->next = curr_node->next; |
| |
| return; |
| } |
| |
| prev_node = curr_node; |
| curr_node = curr_node->next; |
| } |
| } |
| |
| /* Find the real type of TYPE. This function returns the real type, |
| after removing all layers of typedefs, and completing opaque or stub |
| types. Completion changes the TYPE argument, but stripping of |
| typedefs does not. |
| |
| Instance flags (e.g. const/volatile) are preserved as typedefs are |
| stripped. If necessary a new qualified form of the underlying type |
| is created. |
| |
| NOTE: This will return a typedef if TYPE_TARGET_TYPE for the typedef has |
| not been computed and we're either in the middle of reading symbols, or |
| there was no name for the typedef in the debug info. |
| |
| NOTE: Lookup of opaque types can throw errors for invalid symbol files. |
| QUITs in the symbol reading code can also throw. |
| Thus this function can throw an exception. |
| |
| If TYPE is a TYPE_CODE_TYPEDEF, its length is updated to the length of |
| the target type. |
| |
| If this is a stubbed struct (i.e. declared as struct foo *), see if |
| we can find a full definition in some other file. If so, copy this |
| definition, so we can use it in future. There used to be a comment |
| (but not any code) that if we don't find a full definition, we'd |
| set a flag so we don't spend time in the future checking the same |
| type. That would be a mistake, though--we might load in more |
| symbols which contain a full definition for the type. */ |
| |
| struct type * |
| check_typedef (struct type *type) |
| { |
| struct type *orig_type = type; |
| |
| gdb_assert (type); |
| |
| /* While we're removing typedefs, we don't want to lose qualifiers. |
| E.g., const/volatile. */ |
| type_instance_flags instance_flags = type->instance_flags (); |
| |
| while (type->code () == TYPE_CODE_TYPEDEF) |
| { |
| if (!TYPE_TARGET_TYPE (type)) |
| { |
| const char *name; |
| struct symbol *sym; |
| |
| /* It is dangerous to call lookup_symbol if we are currently |
| reading a symtab. Infinite recursion is one danger. */ |
| if (currently_reading_symtab) |
| return make_qualified_type (type, instance_flags, NULL); |
| |
| name = type->name (); |
| /* FIXME: shouldn't we look in STRUCT_DOMAIN and/or |
| VAR_DOMAIN as appropriate? */ |
| if (name == NULL) |
| { |
| stub_noname_complaint (); |
| return make_qualified_type (type, instance_flags, NULL); |
| } |
| sym = lookup_symbol (name, 0, STRUCT_DOMAIN, 0).symbol; |
| if (sym) |
| TYPE_TARGET_TYPE (type) = SYMBOL_TYPE (sym); |
| else /* TYPE_CODE_UNDEF */ |
| TYPE_TARGET_TYPE (type) = alloc_type_arch (type->arch ()); |
| } |
| type = TYPE_TARGET_TYPE (type); |
| |
| /* Preserve the instance flags as we traverse down the typedef chain. |
| |
| Handling address spaces/classes is nasty, what do we do if there's a |
| conflict? |
| E.g., what if an outer typedef marks the type as class_1 and an inner |
| typedef marks the type as class_2? |
| This is the wrong place to do such error checking. We leave it to |
| the code that created the typedef in the first place to flag the |
| error. We just pick the outer address space (akin to letting the |
| outer cast in a chain of casting win), instead of assuming |
| "it can't happen". */ |
| { |
| const type_instance_flags ALL_SPACES |
| = (TYPE_INSTANCE_FLAG_CODE_SPACE |
| | TYPE_INSTANCE_FLAG_DATA_SPACE); |
| const type_instance_flags ALL_CLASSES |
| = TYPE_INSTANCE_FLAG_ADDRESS_CLASS_ALL; |
| |
| type_instance_flags new_instance_flags = type->instance_flags (); |
| |
| /* Treat code vs data spaces and address classes separately. */ |
| if ((instance_flags & ALL_SPACES) != 0) |
| new_instance_flags &= ~ALL_SPACES; |
| if ((instance_flags & ALL_CLASSES) != 0) |
| new_instance_flags &= ~ALL_CLASSES; |
| |
| instance_flags |= new_instance_flags; |
| } |
| } |
| |
| /* If this is a struct/class/union with no fields, then check |
| whether a full definition exists somewhere else. This is for |
| systems where a type definition with no fields is issued for such |
| types, instead of identifying them as stub types in the first |
| place. */ |
| |
| if (TYPE_IS_OPAQUE (type) |
| && opaque_type_resolution |
| && !currently_reading_symtab) |
| { |
| const char *name = type->name (); |
| struct type *newtype; |
| |
| if (name == NULL) |
| { |
| stub_noname_complaint (); |
| return make_qualified_type (type, instance_flags, NULL); |
| } |
| newtype = lookup_transparent_type (name); |
| |
| if (newtype) |
| { |
| /* If the resolved type and the stub are in the same |
| objfile, then replace the stub type with the real deal. |
| But if they're in separate objfiles, leave the stub |
| alone; we'll just look up the transparent type every time |
| we call check_typedef. We can't create pointers between |
| types allocated to different objfiles, since they may |
| have different lifetimes. Trying to copy NEWTYPE over to |
| TYPE's objfile is pointless, too, since you'll have to |
| move over any other types NEWTYPE refers to, which could |
| be an unbounded amount of stuff. */ |
| if (newtype->objfile_owner () == type->objfile_owner ()) |
| type = make_qualified_type (newtype, type->instance_flags (), type); |
| else |
| type = newtype; |
| } |
| } |
| /* Otherwise, rely on the stub flag being set for opaque/stubbed |
| types. */ |
| else if (type->is_stub () && !currently_reading_symtab) |
| { |
| const char *name = type->name (); |
| /* FIXME: shouldn't we look in STRUCT_DOMAIN and/or VAR_DOMAIN |
| as appropriate? */ |
| struct symbol *sym; |
| |
| if (name == NULL) |
| { |
| stub_noname_complaint (); |
| return make_qualified_type (type, instance_flags, NULL); |
| } |
| sym = lookup_symbol (name, 0, STRUCT_DOMAIN, 0).symbol; |
| if (sym) |
| { |
| /* Same as above for opaque types, we can replace the stub |
| with the complete type only if they are in the same |
| objfile. */ |
| if (SYMBOL_TYPE (sym)->objfile_owner () == type->objfile_owner ()) |
| type = make_qualified_type (SYMBOL_TYPE (sym), |
| type->instance_flags (), type); |
| else |
| type = SYMBOL_TYPE (sym); |
| } |
| } |
| |
| if (type->target_is_stub ()) |
| { |
| struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type)); |
| |
| if (target_type->is_stub () || target_type->target_is_stub ()) |
| { |
| /* Nothing we can do. */ |
| } |
| else if (type->code () == TYPE_CODE_RANGE) |
| { |
| TYPE_LENGTH (type) = TYPE_LENGTH (target_type); |
| type->set_target_is_stub (false); |
| } |
| else if (type->code () == TYPE_CODE_ARRAY |
| && update_static_array_size (type)) |
| type->set_target_is_stub (false); |
| } |
| |
| type = make_qualified_type (type, instance_flags, NULL); |
| |
| /* Cache TYPE_LENGTH for future use. */ |
| TYPE_LENGTH (orig_type) = TYPE_LENGTH (type); |
| |
| return type; |
| } |
| |
| /* Parse a type expression in the string [P..P+LENGTH). If an error |
| occurs, silently return a void type. */ |
| |
| static struct type * |
| safe_parse_type (struct gdbarch *gdbarch, const char *p, int length) |
| { |
| struct ui_file *saved_gdb_stderr; |
| struct type *type = NULL; /* Initialize to keep gcc happy. */ |
| |
| /* Suppress error messages. */ |
| saved_gdb_stderr = gdb_stderr; |
| gdb_stderr = &null_stream; |
| |
| /* Call parse_and_eval_type() without fear of longjmp()s. */ |
| try |
| { |
| type = parse_and_eval_type (p, length); |
| } |
| catch (const gdb_exception_error &except) |
| { |
| type = builtin_type (gdbarch)->builtin_void; |
| } |
| |
| /* Stop suppressing error messages. */ |
| gdb_stderr = saved_gdb_stderr; |
| |
| return type; |
| } |
| |
| /* Ugly hack to convert method stubs into method types. |
| |
| He ain't kiddin'. This demangles the name of the method into a |
| string including argument types, parses out each argument type, |
| generates a string casting a zero to that type, evaluates the |
| string, and stuffs the resulting type into an argtype vector!!! |
| Then it knows the type of the whole function (including argument |
| types for overloading), which info used to be in the stab's but was |
| removed to hack back the space required for them. */ |
| |
| static void |
| check_stub_method (struct type *type, int method_id, int signature_id) |
| { |
| struct gdbarch *gdbarch = type->arch (); |
| struct fn_field *f; |
| char *mangled_name = gdb_mangle_name (type, method_id, signature_id); |
| gdb::unique_xmalloc_ptr<char> demangled_name |
| = gdb_demangle (mangled_name, DMGL_PARAMS | DMGL_ANSI); |
| char *argtypetext, *p; |
| int depth = 0, argcount = 1; |
| struct field *argtypes; |
| struct type *mtype; |
| |
| /* Make sure we got back a function string that we can use. */ |
| if (demangled_name) |
| p = strchr (demangled_name.get (), '('); |
| else |
| p = NULL; |
| |
| if (demangled_name == NULL || p == NULL) |
| error (_("Internal: Cannot demangle mangled name `%s'."), |
| mangled_name); |
| |
| /* Now, read in the parameters that define this type. */ |
| p += 1; |
| argtypetext = p; |
| while (*p) |
| { |
| if (*p == '(' || *p == '<') |
| { |
| depth += 1; |
| } |
| else if (*p == ')' || *p == '>') |
| { |
| depth -= 1; |
| } |
| else if (*p == ',' && depth == 0) |
| { |
| argcount += 1; |
| } |
| |
| p += 1; |
| } |
| |
| /* If we read one argument and it was ``void'', don't count it. */ |
| if (startswith (argtypetext, "(void)")) |
| argcount -= 1; |
| |
| /* We need one extra slot, for the THIS pointer. */ |
| |
| argtypes = (struct field *) |
| TYPE_ALLOC (type, (argcount + 1) * sizeof (struct field)); |
| p = argtypetext; |
| |
| /* Add THIS pointer for non-static methods. */ |
| f = TYPE_FN_FIELDLIST1 (type, method_id); |
| if (TYPE_FN_FIELD_STATIC_P (f, signature_id)) |
| argcount = 0; |
| else |
| { |
| argtypes[0].set_type (lookup_pointer_type (type)); |
| argcount = 1; |
| } |
| |
| if (*p != ')') /* () means no args, skip while. */ |
| { |
| depth = 0; |
| while (*p) |
| { |
| if (depth <= 0 && (*p == ',' || *p == ')')) |
| { |
| /* Avoid parsing of ellipsis, they will be handled below. |
| Also avoid ``void'' as above. */ |
| if (strncmp (argtypetext, "...", p - argtypetext) != 0 |
| && strncmp (argtypetext, "void", p - argtypetext) != 0) |
| { |
| argtypes[argcount].set_type |
| (safe_parse_type (gdbarch, argtypetext, p - argtypetext)); |
| argcount += 1; |
| } |
| argtypetext = p + 1; |
| } |
| |
| if (*p == '(' || *p == '<') |
| { |
| depth += 1; |
| } |
| else if (*p == ')' || *p == '>') |
| { |
| depth -= 1; |
| } |
| |
| p += 1; |
| } |
| } |
| |
| TYPE_FN_FIELD_PHYSNAME (f, signature_id) = mangled_name; |
| |
| /* Now update the old "stub" type into a real type. */ |
| mtype = TYPE_FN_FIELD_TYPE (f, signature_id); |
| /* MTYPE may currently be a function (TYPE_CODE_FUNC). |
| We want a method (TYPE_CODE_METHOD). */ |
| smash_to_method_type (mtype, type, TYPE_TARGET_TYPE (mtype), |
| argtypes, argcount, p[-2] == '.'); |
| mtype->set_is_stub (false); |
| TYPE_FN_FIELD_STUB (f, signature_id) = 0; |
| } |
| |
| /* This is the external interface to check_stub_method, above. This |
| function unstubs all of the signatures for TYPE's METHOD_ID method |
| name. After calling this function TYPE_FN_FIELD_STUB will be |
| cleared for each signature and TYPE_FN_FIELDLIST_NAME will be |
| correct. |
| |
| This function unfortunately can not die until stabs do. */ |
| |
| void |
| check_stub_method_group (struct type *type, int method_id) |
| { |
| int len = TYPE_FN_FIELDLIST_LENGTH (type, method_id); |
| struct fn_field *f = TYPE_FN_FIELDLIST1 (type, method_id); |
| |
| for (int j = 0; j < len; j++) |
| { |
| if (TYPE_FN_FIELD_STUB (f, j)) |
| check_stub_method (type, method_id, j); |
| } |
| } |
| |
| /* Ensure it is in .rodata (if available) by working around GCC PR 44690. */ |
| const struct cplus_struct_type cplus_struct_default = { }; |
| |
| void |
| allocate_cplus_struct_type (struct type *type) |
| { |
| if (HAVE_CPLUS_STRUCT (type)) |
| /* Structure was already allocated. Nothing more to do. */ |
| return; |
| |
| TYPE_SPECIFIC_FIELD (type) = TYPE_SPECIFIC_CPLUS_STUFF; |
| TYPE_RAW_CPLUS_SPECIFIC (type) = (struct cplus_struct_type *) |
| TYPE_ALLOC (type, sizeof (struct cplus_struct_type)); |
| *(TYPE_RAW_CPLUS_SPECIFIC (type)) = cplus_struct_default; |
| set_type_vptr_fieldno (type, -1); |
| } |
| |
| const struct gnat_aux_type gnat_aux_default = |
| { NULL }; |
| |
| /* Set the TYPE's type-specific kind to TYPE_SPECIFIC_GNAT_STUFF, |
| and allocate the associated gnat-specific data. The gnat-specific |
| data is also initialized to gnat_aux_default. */ |
| |
| void |
<
|