| /* A pass for lowering trees to RTL. |
| Copyright (C) 2004-2021 Free Software Foundation, Inc. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify |
| it under the terms of the GNU General Public License as published by |
| the Free Software Foundation; either version 3, or (at your option) |
| any later version. |
| |
| GCC is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "target.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "cfghooks.h" |
| #include "tree-pass.h" |
| #include "memmodel.h" |
| #include "tm_p.h" |
| #include "ssa.h" |
| #include "optabs.h" |
| #include "regs.h" /* For reg_renumber. */ |
| #include "emit-rtl.h" |
| #include "recog.h" |
| #include "cgraph.h" |
| #include "diagnostic.h" |
| #include "fold-const.h" |
| #include "varasm.h" |
| #include "stor-layout.h" |
| #include "stmt.h" |
| #include "print-tree.h" |
| #include "cfgrtl.h" |
| #include "cfganal.h" |
| #include "cfgbuild.h" |
| #include "cfgcleanup.h" |
| #include "dojump.h" |
| #include "explow.h" |
| #include "calls.h" |
| #include "expr.h" |
| #include "internal-fn.h" |
| #include "tree-eh.h" |
| #include "gimple-iterator.h" |
| #include "gimple-expr.h" |
| #include "gimple-walk.h" |
| #include "tree-cfg.h" |
| #include "tree-dfa.h" |
| #include "tree-ssa.h" |
| #include "except.h" |
| #include "gimple-pretty-print.h" |
| #include "toplev.h" |
| #include "debug.h" |
| #include "tree-inline.h" |
| #include "value-prof.h" |
| #include "tree-ssa-live.h" |
| #include "tree-outof-ssa.h" |
| #include "cfgloop.h" |
| #include "insn-attr.h" /* For INSN_SCHEDULING. */ |
| #include "stringpool.h" |
| #include "attribs.h" |
| #include "asan.h" |
| #include "tree-ssa-address.h" |
| #include "output.h" |
| #include "builtins.h" |
| #include "opts.h" |
| |
| /* Some systems use __main in a way incompatible with its use in gcc, in these |
| cases use the macros NAME__MAIN to give a quoted symbol and SYMBOL__MAIN to |
| give the same symbol without quotes for an alternative entry point. You |
| must define both, or neither. */ |
| #ifndef NAME__MAIN |
| #define NAME__MAIN "__main" |
| #endif |
| |
| /* This variable holds information helping the rewriting of SSA trees |
| into RTL. */ |
| struct ssaexpand SA; |
| |
| /* This variable holds the currently expanded gimple statement for purposes |
| of comminucating the profile info to the builtin expanders. */ |
| gimple *currently_expanding_gimple_stmt; |
| |
| static rtx expand_debug_expr (tree); |
| |
| static bool defer_stack_allocation (tree, bool); |
| |
| static void record_alignment_for_reg_var (unsigned int); |
| |
| /* Return an expression tree corresponding to the RHS of GIMPLE |
| statement STMT. */ |
| |
| tree |
| gimple_assign_rhs_to_tree (gimple *stmt) |
| { |
| tree t; |
| switch (gimple_assign_rhs_class (stmt)) |
| { |
| case GIMPLE_TERNARY_RHS: |
| t = build3 (gimple_assign_rhs_code (stmt), |
| TREE_TYPE (gimple_assign_lhs (stmt)), |
| gimple_assign_rhs1 (stmt), gimple_assign_rhs2 (stmt), |
| gimple_assign_rhs3 (stmt)); |
| break; |
| case GIMPLE_BINARY_RHS: |
| t = build2 (gimple_assign_rhs_code (stmt), |
| TREE_TYPE (gimple_assign_lhs (stmt)), |
| gimple_assign_rhs1 (stmt), gimple_assign_rhs2 (stmt)); |
| break; |
| case GIMPLE_UNARY_RHS: |
| t = build1 (gimple_assign_rhs_code (stmt), |
| TREE_TYPE (gimple_assign_lhs (stmt)), |
| gimple_assign_rhs1 (stmt)); |
| break; |
| case GIMPLE_SINGLE_RHS: |
| { |
| t = gimple_assign_rhs1 (stmt); |
| /* Avoid modifying this tree in place below. */ |
| if ((gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t) |
| && gimple_location (stmt) != EXPR_LOCATION (t)) |
| || (gimple_block (stmt) && currently_expanding_to_rtl |
| && EXPR_P (t))) |
| t = copy_node (t); |
| break; |
| } |
| default: |
| gcc_unreachable (); |
| } |
| |
| if (gimple_has_location (stmt) && CAN_HAVE_LOCATION_P (t)) |
| SET_EXPR_LOCATION (t, gimple_location (stmt)); |
| |
| return t; |
| } |
| |
| |
| #ifndef STACK_ALIGNMENT_NEEDED |
| #define STACK_ALIGNMENT_NEEDED 1 |
| #endif |
| |
| #define SSAVAR(x) (TREE_CODE (x) == SSA_NAME ? SSA_NAME_VAR (x) : x) |
| |
| /* Choose either CUR or NEXT as the leader DECL for a partition. |
| Prefer ignored decls, to simplify debug dumps and reduce ambiguity |
| out of the same user variable being in multiple partitions (this is |
| less likely for compiler-introduced temps). */ |
| |
| static tree |
| leader_merge (tree cur, tree next) |
| { |
| if (cur == NULL || cur == next) |
| return next; |
| |
| if (DECL_P (cur) && DECL_IGNORED_P (cur)) |
| return cur; |
| |
| if (DECL_P (next) && DECL_IGNORED_P (next)) |
| return next; |
| |
| return cur; |
| } |
| |
| /* Associate declaration T with storage space X. If T is no |
| SSA name this is exactly SET_DECL_RTL, otherwise make the |
| partition of T associated with X. */ |
| static inline void |
| set_rtl (tree t, rtx x) |
| { |
| gcc_checking_assert (!x |
| || !(TREE_CODE (t) == SSA_NAME || is_gimple_reg (t)) |
| || (use_register_for_decl (t) |
| ? (REG_P (x) |
| || (GET_CODE (x) == CONCAT |
| && (REG_P (XEXP (x, 0)) |
| || SUBREG_P (XEXP (x, 0))) |
| && (REG_P (XEXP (x, 1)) |
| || SUBREG_P (XEXP (x, 1)))) |
| /* We need to accept PARALLELs for RESUT_DECLs |
| because of vector types with BLKmode returned |
| in multiple registers, but they are supposed |
| to be uncoalesced. */ |
| || (GET_CODE (x) == PARALLEL |
| && SSAVAR (t) |
| && TREE_CODE (SSAVAR (t)) == RESULT_DECL |
| && (GET_MODE (x) == BLKmode |
| || !flag_tree_coalesce_vars))) |
| : (MEM_P (x) || x == pc_rtx |
| || (GET_CODE (x) == CONCAT |
| && MEM_P (XEXP (x, 0)) |
| && MEM_P (XEXP (x, 1)))))); |
| /* Check that the RTL for SSA_NAMEs and gimple-reg PARM_DECLs and |
| RESULT_DECLs has the expected mode. For memory, we accept |
| unpromoted modes, since that's what we're likely to get. For |
| PARM_DECLs and RESULT_DECLs, we'll have been called by |
| set_parm_rtl, which will give us the default def, so we don't |
| have to compute it ourselves. For RESULT_DECLs, we accept mode |
| mismatches too, as long as we have BLKmode or are not coalescing |
| across variables, so that we don't reject BLKmode PARALLELs or |
| unpromoted REGs. */ |
| gcc_checking_assert (!x || x == pc_rtx || TREE_CODE (t) != SSA_NAME |
| || (SSAVAR (t) |
| && TREE_CODE (SSAVAR (t)) == RESULT_DECL |
| && (promote_ssa_mode (t, NULL) == BLKmode |
| || !flag_tree_coalesce_vars)) |
| || !use_register_for_decl (t) |
| || GET_MODE (x) == promote_ssa_mode (t, NULL)); |
| |
| if (x) |
| { |
| bool skip = false; |
| tree cur = NULL_TREE; |
| rtx xm = x; |
| |
| retry: |
| if (MEM_P (xm)) |
| cur = MEM_EXPR (xm); |
| else if (REG_P (xm)) |
| cur = REG_EXPR (xm); |
| else if (SUBREG_P (xm)) |
| { |
| gcc_assert (subreg_lowpart_p (xm)); |
| xm = SUBREG_REG (xm); |
| goto retry; |
| } |
| else if (GET_CODE (xm) == CONCAT) |
| { |
| xm = XEXP (xm, 0); |
| goto retry; |
| } |
| else if (GET_CODE (xm) == PARALLEL) |
| { |
| xm = XVECEXP (xm, 0, 0); |
| gcc_assert (GET_CODE (xm) == EXPR_LIST); |
| xm = XEXP (xm, 0); |
| goto retry; |
| } |
| else if (xm == pc_rtx) |
| skip = true; |
| else |
| gcc_unreachable (); |
| |
| tree next = skip ? cur : leader_merge (cur, SSAVAR (t) ? SSAVAR (t) : t); |
| |
| if (cur != next) |
| { |
| if (MEM_P (x)) |
| set_mem_attributes (x, |
| next && TREE_CODE (next) == SSA_NAME |
| ? TREE_TYPE (next) |
| : next, true); |
| else |
| set_reg_attrs_for_decl_rtl (next, x); |
| } |
| } |
| |
| if (TREE_CODE (t) == SSA_NAME) |
| { |
| int part = var_to_partition (SA.map, t); |
| if (part != NO_PARTITION) |
| { |
| if (SA.partition_to_pseudo[part]) |
| gcc_assert (SA.partition_to_pseudo[part] == x); |
| else if (x != pc_rtx) |
| SA.partition_to_pseudo[part] = x; |
| } |
| /* For the benefit of debug information at -O0 (where |
| vartracking doesn't run) record the place also in the base |
| DECL. For PARMs and RESULTs, do so only when setting the |
| default def. */ |
| if (x && x != pc_rtx && SSA_NAME_VAR (t) |
| && (VAR_P (SSA_NAME_VAR (t)) |
| || SSA_NAME_IS_DEFAULT_DEF (t))) |
| { |
| tree var = SSA_NAME_VAR (t); |
| /* If we don't yet have something recorded, just record it now. */ |
| if (!DECL_RTL_SET_P (var)) |
| SET_DECL_RTL (var, x); |
| /* If we have it set already to "multiple places" don't |
| change this. */ |
| else if (DECL_RTL (var) == pc_rtx) |
| ; |
| /* If we have something recorded and it's not the same place |
| as we want to record now, we have multiple partitions for the |
| same base variable, with different places. We can't just |
| randomly chose one, hence we have to say that we don't know. |
| This only happens with optimization, and there var-tracking |
| will figure out the right thing. */ |
| else if (DECL_RTL (var) != x) |
| SET_DECL_RTL (var, pc_rtx); |
| } |
| } |
| else |
| SET_DECL_RTL (t, x); |
| } |
| |
| /* This structure holds data relevant to one variable that will be |
| placed in a stack slot. */ |
| class stack_var |
| { |
| public: |
| /* The Variable. */ |
| tree decl; |
| |
| /* Initially, the size of the variable. Later, the size of the partition, |
| if this variable becomes it's partition's representative. */ |
| poly_uint64 size; |
| |
| /* The *byte* alignment required for this variable. Or as, with the |
| size, the alignment for this partition. */ |
| unsigned int alignb; |
| |
| /* The partition representative. */ |
| size_t representative; |
| |
| /* The next stack variable in the partition, or EOC. */ |
| size_t next; |
| |
| /* The numbers of conflicting stack variables. */ |
| bitmap conflicts; |
| }; |
| |
| #define EOC ((size_t)-1) |
| |
| /* We have an array of such objects while deciding allocation. */ |
| static class stack_var *stack_vars; |
| static size_t stack_vars_alloc; |
| static size_t stack_vars_num; |
| static hash_map<tree, size_t> *decl_to_stack_part; |
| |
| /* Conflict bitmaps go on this obstack. This allows us to destroy |
| all of them in one big sweep. */ |
| static bitmap_obstack stack_var_bitmap_obstack; |
| |
| /* An array of indices such that stack_vars[stack_vars_sorted[i]].size |
| is non-decreasing. */ |
| static size_t *stack_vars_sorted; |
| |
| /* The phase of the stack frame. This is the known misalignment of |
| virtual_stack_vars_rtx from PREFERRED_STACK_BOUNDARY. That is, |
| (frame_offset+frame_phase) % PREFERRED_STACK_BOUNDARY == 0. */ |
| static int frame_phase; |
| |
| /* Used during expand_used_vars to remember if we saw any decls for |
| which we'd like to enable stack smashing protection. */ |
| static bool has_protected_decls; |
| |
| /* Used during expand_used_vars. Remember if we say a character buffer |
| smaller than our cutoff threshold. Used for -Wstack-protector. */ |
| static bool has_short_buffer; |
| |
| /* Compute the byte alignment to use for DECL. Ignore alignment |
| we can't do with expected alignment of the stack boundary. */ |
| |
| static unsigned int |
| align_local_variable (tree decl, bool really_expand) |
| { |
| unsigned int align; |
| |
| if (TREE_CODE (decl) == SSA_NAME) |
| { |
| tree type = TREE_TYPE (decl); |
| machine_mode mode = TYPE_MODE (type); |
| |
| align = TYPE_ALIGN (type); |
| if (mode != BLKmode |
| && align < GET_MODE_ALIGNMENT (mode)) |
| align = GET_MODE_ALIGNMENT (mode); |
| } |
| else |
| align = LOCAL_DECL_ALIGNMENT (decl); |
| |
| if (hwasan_sanitize_stack_p ()) |
| align = MAX (align, (unsigned) HWASAN_TAG_GRANULE_SIZE * BITS_PER_UNIT); |
| |
| if (TREE_CODE (decl) != SSA_NAME && really_expand) |
| /* Don't change DECL_ALIGN when called from estimated_stack_frame_size. |
| That is done before IPA and could bump alignment based on host |
| backend even for offloaded code which wants different |
| LOCAL_DECL_ALIGNMENT. */ |
| SET_DECL_ALIGN (decl, align); |
| |
| return align / BITS_PER_UNIT; |
| } |
| |
| /* Align given offset BASE with ALIGN. Truncate up if ALIGN_UP is true, |
| down otherwise. Return truncated BASE value. */ |
| |
| static inline unsigned HOST_WIDE_INT |
| align_base (HOST_WIDE_INT base, unsigned HOST_WIDE_INT align, bool align_up) |
| { |
| return align_up ? (base + align - 1) & -align : base & -align; |
| } |
| |
| /* Allocate SIZE bytes at byte alignment ALIGN from the stack frame. |
| Return the frame offset. */ |
| |
| static poly_int64 |
| alloc_stack_frame_space (poly_int64 size, unsigned HOST_WIDE_INT align) |
| { |
| poly_int64 offset, new_frame_offset; |
| |
| if (FRAME_GROWS_DOWNWARD) |
| { |
| new_frame_offset |
| = aligned_lower_bound (frame_offset - frame_phase - size, |
| align) + frame_phase; |
| offset = new_frame_offset; |
| } |
| else |
| { |
| new_frame_offset |
| = aligned_upper_bound (frame_offset - frame_phase, |
| align) + frame_phase; |
| offset = new_frame_offset; |
| new_frame_offset += size; |
| } |
| frame_offset = new_frame_offset; |
| |
| if (frame_offset_overflow (frame_offset, cfun->decl)) |
| frame_offset = offset = 0; |
| |
| return offset; |
| } |
| |
| /* Ensure that the stack is aligned to ALIGN bytes. |
| Return the new frame offset. */ |
| static poly_int64 |
| align_frame_offset (unsigned HOST_WIDE_INT align) |
| { |
| return alloc_stack_frame_space (0, align); |
| } |
| |
| /* Accumulate DECL into STACK_VARS. */ |
| |
| static void |
| add_stack_var (tree decl, bool really_expand) |
| { |
| class stack_var *v; |
| |
| if (stack_vars_num >= stack_vars_alloc) |
| { |
| if (stack_vars_alloc) |
| stack_vars_alloc = stack_vars_alloc * 3 / 2; |
| else |
| stack_vars_alloc = 32; |
| stack_vars |
| = XRESIZEVEC (class stack_var, stack_vars, stack_vars_alloc); |
| } |
| if (!decl_to_stack_part) |
| decl_to_stack_part = new hash_map<tree, size_t>; |
| |
| v = &stack_vars[stack_vars_num]; |
| decl_to_stack_part->put (decl, stack_vars_num); |
| |
| v->decl = decl; |
| tree size = TREE_CODE (decl) == SSA_NAME |
| ? TYPE_SIZE_UNIT (TREE_TYPE (decl)) |
| : DECL_SIZE_UNIT (decl); |
| v->size = tree_to_poly_uint64 (size); |
| /* Ensure that all variables have size, so that &a != &b for any two |
| variables that are simultaneously live. */ |
| if (known_eq (v->size, 0U)) |
| v->size = 1; |
| v->alignb = align_local_variable (decl, really_expand); |
| /* An alignment of zero can mightily confuse us later. */ |
| gcc_assert (v->alignb != 0); |
| |
| /* All variables are initially in their own partition. */ |
| v->representative = stack_vars_num; |
| v->next = EOC; |
| |
| /* All variables initially conflict with no other. */ |
| v->conflicts = NULL; |
| |
| /* Ensure that this decl doesn't get put onto the list twice. */ |
| set_rtl (decl, pc_rtx); |
| |
| stack_vars_num++; |
| } |
| |
| /* Make the decls associated with luid's X and Y conflict. */ |
| |
| static void |
| add_stack_var_conflict (size_t x, size_t y) |
| { |
| class stack_var *a = &stack_vars[x]; |
| class stack_var *b = &stack_vars[y]; |
| if (x == y) |
| return; |
| if (!a->conflicts) |
| a->conflicts = BITMAP_ALLOC (&stack_var_bitmap_obstack); |
| if (!b->conflicts) |
| b->conflicts = BITMAP_ALLOC (&stack_var_bitmap_obstack); |
| bitmap_set_bit (a->conflicts, y); |
| bitmap_set_bit (b->conflicts, x); |
| } |
| |
| /* Check whether the decls associated with luid's X and Y conflict. */ |
| |
| static bool |
| stack_var_conflict_p (size_t x, size_t y) |
| { |
| class stack_var *a = &stack_vars[x]; |
| class stack_var *b = &stack_vars[y]; |
| if (x == y) |
| return false; |
| /* Partitions containing an SSA name result from gimple registers |
| with things like unsupported modes. They are top-level and |
| hence conflict with everything else. */ |
| if (TREE_CODE (a->decl) == SSA_NAME || TREE_CODE (b->decl) == SSA_NAME) |
| return true; |
| |
| if (!a->conflicts || !b->conflicts) |
| return false; |
| return bitmap_bit_p (a->conflicts, y); |
| } |
| |
| /* Callback for walk_stmt_ops. If OP is a decl touched by add_stack_var |
| enter its partition number into bitmap DATA. */ |
| |
| static bool |
| visit_op (gimple *, tree op, tree, void *data) |
| { |
| bitmap active = (bitmap)data; |
| op = get_base_address (op); |
| if (op |
| && DECL_P (op) |
| && DECL_RTL_IF_SET (op) == pc_rtx) |
| { |
| size_t *v = decl_to_stack_part->get (op); |
| if (v) |
| bitmap_set_bit (active, *v); |
| } |
| return false; |
| } |
| |
| /* Callback for walk_stmt_ops. If OP is a decl touched by add_stack_var |
| record conflicts between it and all currently active other partitions |
| from bitmap DATA. */ |
| |
| static bool |
| visit_conflict (gimple *, tree op, tree, void *data) |
| { |
| bitmap active = (bitmap)data; |
| op = get_base_address (op); |
| if (op |
| && DECL_P (op) |
| && DECL_RTL_IF_SET (op) == pc_rtx) |
| { |
| size_t *v = decl_to_stack_part->get (op); |
| if (v && bitmap_set_bit (active, *v)) |
| { |
| size_t num = *v; |
| bitmap_iterator bi; |
| unsigned i; |
| gcc_assert (num < stack_vars_num); |
| EXECUTE_IF_SET_IN_BITMAP (active, 0, i, bi) |
| add_stack_var_conflict (num, i); |
| } |
| } |
| return false; |
| } |
| |
| /* Helper routine for add_scope_conflicts, calculating the active partitions |
| at the end of BB, leaving the result in WORK. We're called to generate |
| conflicts when FOR_CONFLICT is true, otherwise we're just tracking |
| liveness. */ |
| |
| static void |
| add_scope_conflicts_1 (basic_block bb, bitmap work, bool for_conflict) |
| { |
| edge e; |
| edge_iterator ei; |
| gimple_stmt_iterator gsi; |
| walk_stmt_load_store_addr_fn visit; |
| |
| bitmap_clear (work); |
| FOR_EACH_EDGE (e, ei, bb->preds) |
| bitmap_ior_into (work, (bitmap)e->src->aux); |
| |
| visit = visit_op; |
| |
| for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| walk_stmt_load_store_addr_ops (stmt, work, NULL, NULL, visit); |
| } |
| for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| |
| if (gimple_clobber_p (stmt)) |
| { |
| tree lhs = gimple_assign_lhs (stmt); |
| size_t *v; |
| /* Nested function lowering might introduce LHSs |
| that are COMPONENT_REFs. */ |
| if (!VAR_P (lhs)) |
| continue; |
| if (DECL_RTL_IF_SET (lhs) == pc_rtx |
| && (v = decl_to_stack_part->get (lhs))) |
| bitmap_clear_bit (work, *v); |
| } |
| else if (!is_gimple_debug (stmt)) |
| { |
| if (for_conflict |
| && visit == visit_op) |
| { |
| /* If this is the first real instruction in this BB we need |
| to add conflicts for everything live at this point now. |
| Unlike classical liveness for named objects we can't |
| rely on seeing a def/use of the names we're interested in. |
| There might merely be indirect loads/stores. We'd not add any |
| conflicts for such partitions. */ |
| bitmap_iterator bi; |
| unsigned i; |
| EXECUTE_IF_SET_IN_BITMAP (work, 0, i, bi) |
| { |
| class stack_var *a = &stack_vars[i]; |
| if (!a->conflicts) |
| a->conflicts = BITMAP_ALLOC (&stack_var_bitmap_obstack); |
| bitmap_ior_into (a->conflicts, work); |
| } |
| visit = visit_conflict; |
| } |
| walk_stmt_load_store_addr_ops (stmt, work, visit, visit, visit); |
| } |
| } |
| } |
| |
| /* Generate stack partition conflicts between all partitions that are |
| simultaneously live. */ |
| |
| static void |
| add_scope_conflicts (void) |
| { |
| basic_block bb; |
| bool changed; |
| bitmap work = BITMAP_ALLOC (NULL); |
| int *rpo; |
| int n_bbs; |
| |
| /* We approximate the live range of a stack variable by taking the first |
| mention of its name as starting point(s), and by the end-of-scope |
| death clobber added by gimplify as ending point(s) of the range. |
| This overapproximates in the case we for instance moved an address-taken |
| operation upward, without also moving a dereference to it upwards. |
| But it's conservatively correct as a variable never can hold values |
| before its name is mentioned at least once. |
| |
| We then do a mostly classical bitmap liveness algorithm. */ |
| |
| FOR_ALL_BB_FN (bb, cfun) |
| bb->aux = BITMAP_ALLOC (&stack_var_bitmap_obstack); |
| |
| rpo = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
| n_bbs = pre_and_rev_post_order_compute (NULL, rpo, false); |
| |
| changed = true; |
| while (changed) |
| { |
| int i; |
| changed = false; |
| for (i = 0; i < n_bbs; i++) |
| { |
| bitmap active; |
| bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]); |
| active = (bitmap)bb->aux; |
| add_scope_conflicts_1 (bb, work, false); |
| if (bitmap_ior_into (active, work)) |
| changed = true; |
| } |
| } |
| |
| FOR_EACH_BB_FN (bb, cfun) |
| add_scope_conflicts_1 (bb, work, true); |
| |
| free (rpo); |
| BITMAP_FREE (work); |
| FOR_ALL_BB_FN (bb, cfun) |
| BITMAP_FREE (bb->aux); |
| } |
| |
| /* A subroutine of partition_stack_vars. A comparison function for qsort, |
| sorting an array of indices by the properties of the object. */ |
| |
| static int |
| stack_var_cmp (const void *a, const void *b) |
| { |
| size_t ia = *(const size_t *)a; |
| size_t ib = *(const size_t *)b; |
| unsigned int aligna = stack_vars[ia].alignb; |
| unsigned int alignb = stack_vars[ib].alignb; |
| poly_int64 sizea = stack_vars[ia].size; |
| poly_int64 sizeb = stack_vars[ib].size; |
| tree decla = stack_vars[ia].decl; |
| tree declb = stack_vars[ib].decl; |
| bool largea, largeb; |
| unsigned int uida, uidb; |
| |
| /* Primary compare on "large" alignment. Large comes first. */ |
| largea = (aligna * BITS_PER_UNIT > MAX_SUPPORTED_STACK_ALIGNMENT); |
| largeb = (alignb * BITS_PER_UNIT > MAX_SUPPORTED_STACK_ALIGNMENT); |
| if (largea != largeb) |
| return (int)largeb - (int)largea; |
| |
| /* Secondary compare on size, decreasing */ |
| int diff = compare_sizes_for_sort (sizeb, sizea); |
| if (diff != 0) |
| return diff; |
| |
| /* Tertiary compare on true alignment, decreasing. */ |
| if (aligna < alignb) |
| return -1; |
| if (aligna > alignb) |
| return 1; |
| |
| /* Final compare on ID for sort stability, increasing. |
| Two SSA names are compared by their version, SSA names come before |
| non-SSA names, and two normal decls are compared by their DECL_UID. */ |
| if (TREE_CODE (decla) == SSA_NAME) |
| { |
| if (TREE_CODE (declb) == SSA_NAME) |
| uida = SSA_NAME_VERSION (decla), uidb = SSA_NAME_VERSION (declb); |
| else |
| return -1; |
| } |
| else if (TREE_CODE (declb) == SSA_NAME) |
| return 1; |
| else |
| uida = DECL_UID (decla), uidb = DECL_UID (declb); |
| if (uida < uidb) |
| return 1; |
| if (uida > uidb) |
| return -1; |
| return 0; |
| } |
| |
| struct part_traits : unbounded_int_hashmap_traits <size_t, bitmap> {}; |
| typedef hash_map<size_t, bitmap, part_traits> part_hashmap; |
| |
| /* If the points-to solution *PI points to variables that are in a partition |
| together with other variables add all partition members to the pointed-to |
| variables bitmap. */ |
| |
| static void |
| add_partitioned_vars_to_ptset (struct pt_solution *pt, |
| part_hashmap *decls_to_partitions, |
| hash_set<bitmap> *visited, bitmap temp) |
| { |
| bitmap_iterator bi; |
| unsigned i; |
| bitmap *part; |
| |
| if (pt->anything |
| || pt->vars == NULL |
| /* The pointed-to vars bitmap is shared, it is enough to |
| visit it once. */ |
| || visited->add (pt->vars)) |
| return; |
| |
| bitmap_clear (temp); |
| |
| /* By using a temporary bitmap to store all members of the partitions |
| we have to add we make sure to visit each of the partitions only |
| once. */ |
| EXECUTE_IF_SET_IN_BITMAP (pt->vars, 0, i, bi) |
| if ((!temp |
| || !bitmap_bit_p (temp, i)) |
| && (part = decls_to_partitions->get (i))) |
| bitmap_ior_into (temp, *part); |
| if (!bitmap_empty_p (temp)) |
| bitmap_ior_into (pt->vars, temp); |
| } |
| |
| /* Update points-to sets based on partition info, so we can use them on RTL. |
| The bitmaps representing stack partitions will be saved until expand, |
| where partitioned decls used as bases in memory expressions will be |
| rewritten. */ |
| |
| static void |
| update_alias_info_with_stack_vars (void) |
| { |
| part_hashmap *decls_to_partitions = NULL; |
| size_t i, j; |
| tree var = NULL_TREE; |
| |
| for (i = 0; i < stack_vars_num; i++) |
| { |
| bitmap part = NULL; |
| tree name; |
| struct ptr_info_def *pi; |
| |
| /* Not interested in partitions with single variable. */ |
| if (stack_vars[i].representative != i |
| || stack_vars[i].next == EOC) |
| continue; |
| |
| if (!decls_to_partitions) |
| { |
| decls_to_partitions = new part_hashmap; |
| cfun->gimple_df->decls_to_pointers = new hash_map<tree, tree>; |
| } |
| |
| /* Create an SSA_NAME that points to the partition for use |
| as base during alias-oracle queries on RTL for bases that |
| have been partitioned. */ |
| if (var == NULL_TREE) |
| var = create_tmp_var (ptr_type_node); |
| name = make_ssa_name (var); |
| |
| /* Create bitmaps representing partitions. They will be used for |
| points-to sets later, so use GGC alloc. */ |
| part = BITMAP_GGC_ALLOC (); |
| for (j = i; j != EOC; j = stack_vars[j].next) |
| { |
| tree decl = stack_vars[j].decl; |
| unsigned int uid = DECL_PT_UID (decl); |
| bitmap_set_bit (part, uid); |
| decls_to_partitions->put (uid, part); |
| cfun->gimple_df->decls_to_pointers->put (decl, name); |
| if (TREE_ADDRESSABLE (decl)) |
| TREE_ADDRESSABLE (name) = 1; |
| } |
| |
| /* Make the SSA name point to all partition members. */ |
| pi = get_ptr_info (name); |
| pt_solution_set (&pi->pt, part, false); |
| } |
| |
| /* Make all points-to sets that contain one member of a partition |
| contain all members of the partition. */ |
| if (decls_to_partitions) |
| { |
| unsigned i; |
| tree name; |
| hash_set<bitmap> visited; |
| bitmap temp = BITMAP_ALLOC (&stack_var_bitmap_obstack); |
| |
| FOR_EACH_SSA_NAME (i, name, cfun) |
| { |
| struct ptr_info_def *pi; |
| |
| if (POINTER_TYPE_P (TREE_TYPE (name)) |
| && ((pi = SSA_NAME_PTR_INFO (name)) != NULL)) |
| add_partitioned_vars_to_ptset (&pi->pt, decls_to_partitions, |
| &visited, temp); |
| } |
| |
| add_partitioned_vars_to_ptset (&cfun->gimple_df->escaped, |
| decls_to_partitions, &visited, temp); |
| |
| delete decls_to_partitions; |
| BITMAP_FREE (temp); |
| } |
| } |
| |
| /* A subroutine of partition_stack_vars. The UNION portion of a UNION/FIND |
| partitioning algorithm. Partitions A and B are known to be non-conflicting. |
| Merge them into a single partition A. */ |
| |
| static void |
| union_stack_vars (size_t a, size_t b) |
| { |
| class stack_var *vb = &stack_vars[b]; |
| bitmap_iterator bi; |
| unsigned u; |
| |
| gcc_assert (stack_vars[b].next == EOC); |
| /* Add B to A's partition. */ |
| stack_vars[b].next = stack_vars[a].next; |
| stack_vars[b].representative = a; |
| stack_vars[a].next = b; |
| |
| /* Make sure A is big enough to hold B. */ |
| stack_vars[a].size = upper_bound (stack_vars[a].size, stack_vars[b].size); |
| |
| /* Update the required alignment of partition A to account for B. */ |
| if (stack_vars[a].alignb < stack_vars[b].alignb) |
| stack_vars[a].alignb = stack_vars[b].alignb; |
| |
| /* Update the interference graph and merge the conflicts. */ |
| if (vb->conflicts) |
| { |
| EXECUTE_IF_SET_IN_BITMAP (vb->conflicts, 0, u, bi) |
| add_stack_var_conflict (a, stack_vars[u].representative); |
| BITMAP_FREE (vb->conflicts); |
| } |
| } |
| |
| /* A subroutine of expand_used_vars. Binpack the variables into |
| partitions constrained by the interference graph. The overall |
| algorithm used is as follows: |
| |
| Sort the objects by size in descending order. |
| For each object A { |
| S = size(A) |
| O = 0 |
| loop { |
| Look for the largest non-conflicting object B with size <= S. |
| UNION (A, B) |
| } |
| } |
| */ |
| |
| static void |
| partition_stack_vars (void) |
| { |
| size_t si, sj, n = stack_vars_num; |
| |
| stack_vars_sorted = XNEWVEC (size_t, stack_vars_num); |
| for (si = 0; si < n; ++si) |
| stack_vars_sorted[si] = si; |
| |
| if (n == 1) |
| return; |
| |
| qsort (stack_vars_sorted, n, sizeof (size_t), stack_var_cmp); |
| |
| for (si = 0; si < n; ++si) |
| { |
| size_t i = stack_vars_sorted[si]; |
| unsigned int ialign = stack_vars[i].alignb; |
| poly_int64 isize = stack_vars[i].size; |
| |
| /* Ignore objects that aren't partition representatives. If we |
| see a var that is not a partition representative, it must |
| have been merged earlier. */ |
| if (stack_vars[i].representative != i) |
| continue; |
| |
| for (sj = si + 1; sj < n; ++sj) |
| { |
| size_t j = stack_vars_sorted[sj]; |
| unsigned int jalign = stack_vars[j].alignb; |
| poly_int64 jsize = stack_vars[j].size; |
| |
| /* Ignore objects that aren't partition representatives. */ |
| if (stack_vars[j].representative != j) |
| continue; |
| |
| /* Do not mix objects of "small" (supported) alignment |
| and "large" (unsupported) alignment. */ |
| if ((ialign * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT) |
| != (jalign * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT)) |
| break; |
| |
| /* For Address Sanitizer do not mix objects with different |
| sizes, as the shorter vars wouldn't be adequately protected. |
| Don't do that for "large" (unsupported) alignment objects, |
| those aren't protected anyway. */ |
| if (asan_sanitize_stack_p () |
| && maybe_ne (isize, jsize) |
| && ialign * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT) |
| break; |
| |
| /* Ignore conflicting objects. */ |
| if (stack_var_conflict_p (i, j)) |
| continue; |
| |
| /* UNION the objects, placing J at OFFSET. */ |
| union_stack_vars (i, j); |
| } |
| } |
| |
| update_alias_info_with_stack_vars (); |
| } |
| |
| /* A debugging aid for expand_used_vars. Dump the generated partitions. */ |
| |
| static void |
| dump_stack_var_partition (void) |
| { |
| size_t si, i, j, n = stack_vars_num; |
| |
| for (si = 0; si < n; ++si) |
| { |
| i = stack_vars_sorted[si]; |
| |
| /* Skip variables that aren't partition representatives, for now. */ |
| if (stack_vars[i].representative != i) |
| continue; |
| |
| fprintf (dump_file, "Partition %lu: size ", (unsigned long) i); |
| print_dec (stack_vars[i].size, dump_file); |
| fprintf (dump_file, " align %u\n", stack_vars[i].alignb); |
| |
| for (j = i; j != EOC; j = stack_vars[j].next) |
| { |
| fputc ('\t', dump_file); |
| print_generic_expr (dump_file, stack_vars[j].decl, dump_flags); |
| } |
| fputc ('\n', dump_file); |
| } |
| } |
| |
| /* Assign rtl to DECL at BASE + OFFSET. */ |
| |
| static void |
| expand_one_stack_var_at (tree decl, rtx base, unsigned base_align, |
| poly_int64 offset) |
| { |
| unsigned align; |
| rtx x; |
| |
| /* If this fails, we've overflowed the stack frame. Error nicely? */ |
| gcc_assert (known_eq (offset, trunc_int_for_mode (offset, Pmode))); |
| |
| if (hwasan_sanitize_stack_p ()) |
| x = targetm.memtag.add_tag (base, offset, |
| hwasan_current_frame_tag ()); |
| else |
| x = plus_constant (Pmode, base, offset); |
| |
| x = gen_rtx_MEM (TREE_CODE (decl) == SSA_NAME |
| ? TYPE_MODE (TREE_TYPE (decl)) |
| : DECL_MODE (decl), x); |
| |
| /* Set alignment we actually gave this decl if it isn't an SSA name. |
| If it is we generate stack slots only accidentally so it isn't as |
| important, we'll simply set the alignment directly on the MEM. */ |
| |
| if (stack_vars_base_reg_p (base)) |
| offset -= frame_phase; |
| align = known_alignment (offset); |
| align *= BITS_PER_UNIT; |
| if (align == 0 || align > base_align) |
| align = base_align; |
| |
| if (TREE_CODE (decl) != SSA_NAME) |
| { |
| /* One would think that we could assert that we're not decreasing |
| alignment here, but (at least) the i386 port does exactly this |
| via the MINIMUM_ALIGNMENT hook. */ |
| |
| SET_DECL_ALIGN (decl, align); |
| DECL_USER_ALIGN (decl) = 0; |
| } |
| |
| set_rtl (decl, x); |
| |
| set_mem_align (x, align); |
| } |
| |
| class stack_vars_data |
| { |
| public: |
| /* Vector of offset pairs, always end of some padding followed |
| by start of the padding that needs Address Sanitizer protection. |
| The vector is in reversed, highest offset pairs come first. */ |
| auto_vec<HOST_WIDE_INT> asan_vec; |
| |
| /* Vector of partition representative decls in between the paddings. */ |
| auto_vec<tree> asan_decl_vec; |
| |
| /* Base pseudo register for Address Sanitizer protected automatic vars. */ |
| rtx asan_base; |
| |
| /* Alignment needed for the Address Sanitizer protected automatic vars. */ |
| unsigned int asan_alignb; |
| }; |
| |
| /* A subroutine of expand_used_vars. Give each partition representative |
| a unique location within the stack frame. Update each partition member |
| with that location. */ |
| static void |
| expand_stack_vars (bool (*pred) (size_t), class stack_vars_data *data) |
| { |
| size_t si, i, j, n = stack_vars_num; |
| poly_uint64 large_size = 0, large_alloc = 0; |
| rtx large_base = NULL; |
| rtx large_untagged_base = NULL; |
| unsigned large_align = 0; |
| bool large_allocation_done = false; |
| tree decl; |
| |
| /* Determine if there are any variables requiring "large" alignment. |
| Since these are dynamically allocated, we only process these if |
| no predicate involved. */ |
| large_align = stack_vars[stack_vars_sorted[0]].alignb * BITS_PER_UNIT; |
| if (pred == NULL && large_align > MAX_SUPPORTED_STACK_ALIGNMENT) |
| { |
| /* Find the total size of these variables. */ |
| for (si = 0; si < n; ++si) |
| { |
| unsigned alignb; |
| |
| i = stack_vars_sorted[si]; |
| alignb = stack_vars[i].alignb; |
| |
| /* All "large" alignment decls come before all "small" alignment |
| decls, but "large" alignment decls are not sorted based on |
| their alignment. Increase large_align to track the largest |
| required alignment. */ |
| if ((alignb * BITS_PER_UNIT) > large_align) |
| large_align = alignb * BITS_PER_UNIT; |
| |
| /* Stop when we get to the first decl with "small" alignment. */ |
| if (alignb * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT) |
| break; |
| |
| /* Skip variables that aren't partition representatives. */ |
| if (stack_vars[i].representative != i) |
| continue; |
| |
| /* Skip variables that have already had rtl assigned. See also |
| add_stack_var where we perpetrate this pc_rtx hack. */ |
| decl = stack_vars[i].decl; |
| if (TREE_CODE (decl) == SSA_NAME |
| ? SA.partition_to_pseudo[var_to_partition (SA.map, decl)] != NULL_RTX |
| : DECL_RTL (decl) != pc_rtx) |
| continue; |
| |
| large_size = aligned_upper_bound (large_size, alignb); |
| large_size += stack_vars[i].size; |
| } |
| } |
| |
| for (si = 0; si < n; ++si) |
| { |
| rtx base; |
| unsigned base_align, alignb; |
| poly_int64 offset = 0; |
| |
| i = stack_vars_sorted[si]; |
| |
| /* Skip variables that aren't partition representatives, for now. */ |
| if (stack_vars[i].representative != i) |
| continue; |
| |
| /* Skip variables that have already had rtl assigned. See also |
| add_stack_var where we perpetrate this pc_rtx hack. */ |
| decl = stack_vars[i].decl; |
| if (TREE_CODE (decl) == SSA_NAME |
| ? SA.partition_to_pseudo[var_to_partition (SA.map, decl)] != NULL_RTX |
| : DECL_RTL (decl) != pc_rtx) |
| continue; |
| |
| /* Check the predicate to see whether this variable should be |
| allocated in this pass. */ |
| if (pred && !pred (i)) |
| continue; |
| |
| base = (hwasan_sanitize_stack_p () |
| ? hwasan_frame_base () |
| : virtual_stack_vars_rtx); |
| alignb = stack_vars[i].alignb; |
| if (alignb * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT) |
| { |
| poly_int64 hwasan_orig_offset; |
| if (hwasan_sanitize_stack_p ()) |
| { |
| /* There must be no tag granule "shared" between different |
| objects. This means that no HWASAN_TAG_GRANULE_SIZE byte |
| chunk can have more than one object in it. |
| |
| We ensure this by forcing the end of the last bit of data to |
| be aligned to HWASAN_TAG_GRANULE_SIZE bytes here, and setting |
| the start of each variable to be aligned to |
| HWASAN_TAG_GRANULE_SIZE bytes in `align_local_variable`. |
| |
| We can't align just one of the start or end, since there are |
| untagged things stored on the stack which we do not align to |
| HWASAN_TAG_GRANULE_SIZE bytes. If we only aligned the start |
| or the end of tagged objects then untagged objects could end |
| up sharing the first granule of a tagged object or sharing the |
| last granule of a tagged object respectively. */ |
| hwasan_orig_offset = align_frame_offset (HWASAN_TAG_GRANULE_SIZE); |
| gcc_assert (stack_vars[i].alignb >= HWASAN_TAG_GRANULE_SIZE); |
| } |
| /* ASAN description strings don't yet have a syntax for expressing |
| polynomial offsets. */ |
| HOST_WIDE_INT prev_offset; |
| if (asan_sanitize_stack_p () |
| && pred |
| && frame_offset.is_constant (&prev_offset) |
| && stack_vars[i].size.is_constant ()) |
| { |
| if (data->asan_vec.is_empty ()) |
| { |
| align_frame_offset (ASAN_RED_ZONE_SIZE); |
| prev_offset = frame_offset.to_constant (); |
| } |
| prev_offset = align_base (prev_offset, |
| ASAN_MIN_RED_ZONE_SIZE, |
| !FRAME_GROWS_DOWNWARD); |
| tree repr_decl = NULL_TREE; |
| unsigned HOST_WIDE_INT size |
| = asan_var_and_redzone_size (stack_vars[i].size.to_constant ()); |
| if (data->asan_vec.is_empty ()) |
| size = MAX (size, ASAN_RED_ZONE_SIZE); |
| |
| unsigned HOST_WIDE_INT alignment = MAX (alignb, |
| ASAN_MIN_RED_ZONE_SIZE); |
| offset = alloc_stack_frame_space (size, alignment); |
| |
| data->asan_vec.safe_push (prev_offset); |
| /* Allocating a constant amount of space from a constant |
| starting offset must give a constant result. */ |
| data->asan_vec.safe_push ((offset + stack_vars[i].size) |
| .to_constant ()); |
| /* Find best representative of the partition. |
| Prefer those with DECL_NAME, even better |
| satisfying asan_protect_stack_decl predicate. */ |
| for (j = i; j != EOC; j = stack_vars[j].next) |
| if (asan_protect_stack_decl (stack_vars[j].decl) |
| && DECL_NAME (stack_vars[j].decl)) |
| { |
| repr_decl = stack_vars[j].decl; |
| break; |
| } |
| else if (repr_decl == NULL_TREE |
| && DECL_P (stack_vars[j].decl) |
| && DECL_NAME (stack_vars[j].decl)) |
| repr_decl = stack_vars[j].decl; |
| if (repr_decl == NULL_TREE) |
| repr_decl = stack_vars[i].decl; |
| data->asan_decl_vec.safe_push (repr_decl); |
| |
| /* Make sure a representative is unpoison if another |
| variable in the partition is handled by |
| use-after-scope sanitization. */ |
| if (asan_handled_variables != NULL |
| && !asan_handled_variables->contains (repr_decl)) |
| { |
| for (j = i; j != EOC; j = stack_vars[j].next) |
| if (asan_handled_variables->contains (stack_vars[j].decl)) |
| break; |
| if (j != EOC) |
| asan_handled_variables->add (repr_decl); |
| } |
| |
| data->asan_alignb = MAX (data->asan_alignb, alignb); |
| if (data->asan_base == NULL) |
| data->asan_base = gen_reg_rtx (Pmode); |
| base = data->asan_base; |
| |
| if (!STRICT_ALIGNMENT) |
| base_align = crtl->max_used_stack_slot_alignment; |
| else |
| base_align = MAX (crtl->max_used_stack_slot_alignment, |
| GET_MODE_ALIGNMENT (SImode) |
| << ASAN_SHADOW_SHIFT); |
| } |
| else |
| { |
| offset = alloc_stack_frame_space (stack_vars[i].size, alignb); |
| base_align = crtl->max_used_stack_slot_alignment; |
| |
| if (hwasan_sanitize_stack_p ()) |
| { |
| /* Align again since the point of this alignment is to handle |
| the "end" of the object (i.e. smallest address after the |
| stack object). For FRAME_GROWS_DOWNWARD that requires |
| aligning the stack before allocating, but for a frame that |
| grows upwards that requires aligning the stack after |
| allocation. |
| |
| Use `frame_offset` to record the offset value rather than |
| `offset` since the `frame_offset` describes the extent |
| allocated for this particular variable while `offset` |
| describes the address that this variable starts at. */ |
| align_frame_offset (HWASAN_TAG_GRANULE_SIZE); |
| hwasan_record_stack_var (virtual_stack_vars_rtx, base, |
| hwasan_orig_offset, frame_offset); |
| } |
| } |
| } |
| else |
| { |
| /* Large alignment is only processed in the last pass. */ |
| if (pred) |
| continue; |
| |
| /* If there were any variables requiring "large" alignment, allocate |
| space. */ |
| if (maybe_ne (large_size, 0U) && ! large_allocation_done) |
| { |
| poly_int64 loffset; |
| rtx large_allocsize; |
| |
| large_allocsize = gen_int_mode (large_size, Pmode); |
| get_dynamic_stack_size (&large_allocsize, 0, large_align, NULL); |
| loffset = alloc_stack_frame_space |
| (rtx_to_poly_int64 (large_allocsize), |
| PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT); |
| large_base = get_dynamic_stack_base (loffset, large_align, base); |
| large_allocation_done = true; |
| } |
| |
| gcc_assert (large_base != NULL); |
| large_alloc = aligned_upper_bound (large_alloc, alignb); |
| offset = large_alloc; |
| large_alloc += stack_vars[i].size; |
| if (hwasan_sanitize_stack_p ()) |
| { |
| /* An object with a large alignment requirement means that the |
| alignment requirement is greater than the required alignment |
| for tags. */ |
| if (!large_untagged_base) |
| large_untagged_base |
| = targetm.memtag.untagged_pointer (large_base, NULL_RTX); |
| /* Ensure the end of the variable is also aligned correctly. */ |
| poly_int64 align_again |
| = aligned_upper_bound (large_alloc, HWASAN_TAG_GRANULE_SIZE); |
| /* For large allocations we always allocate a chunk of space |
| (which is addressed by large_untagged_base/large_base) and |
| then use positive offsets from that. Hence the farthest |
| offset is `align_again` and the nearest offset from the base |
| is `offset`. */ |
| hwasan_record_stack_var (large_untagged_base, large_base, |
| offset, align_again); |
| } |
| |
| base = large_base; |
| base_align = large_align; |
| } |
| |
| /* Create rtl for each variable based on their location within the |
| partition. */ |
| for (j = i; j != EOC; j = stack_vars[j].next) |
| { |
| expand_one_stack_var_at (stack_vars[j].decl, |
| base, base_align, offset); |
| } |
| if (hwasan_sanitize_stack_p ()) |
| hwasan_increment_frame_tag (); |
| } |
| |
| gcc_assert (known_eq (large_alloc, large_size)); |
| } |
| |
| /* Take into account all sizes of partitions and reset DECL_RTLs. */ |
| static poly_uint64 |
| account_stack_vars (void) |
| { |
| size_t si, j, i, n = stack_vars_num; |
| poly_uint64 size = 0; |
| |
| for (si = 0; si < n; ++si) |
| { |
| i = stack_vars_sorted[si]; |
| |
| /* Skip variables that aren't partition representatives, for now. */ |
| if (stack_vars[i].representative != i) |
| continue; |
| |
| size += stack_vars[i].size; |
| for (j = i; j != EOC; j = stack_vars[j].next) |
| set_rtl (stack_vars[j].decl, NULL); |
| } |
| return size; |
| } |
| |
| /* Record the RTL assignment X for the default def of PARM. */ |
| |
| extern void |
| set_parm_rtl (tree parm, rtx x) |
| { |
| gcc_assert (TREE_CODE (parm) == PARM_DECL |
| || TREE_CODE (parm) == RESULT_DECL); |
| |
| if (x && !MEM_P (x)) |
| { |
| unsigned int align = MINIMUM_ALIGNMENT (TREE_TYPE (parm), |
| TYPE_MODE (TREE_TYPE (parm)), |
| TYPE_ALIGN (TREE_TYPE (parm))); |
| |
| /* If the variable alignment is very large we'll dynamicaly |
| allocate it, which means that in-frame portion is just a |
| pointer. ??? We've got a pseudo for sure here, do we |
| actually dynamically allocate its spilling area if needed? |
| ??? Isn't it a problem when Pmode alignment also exceeds |
| MAX_SUPPORTED_STACK_ALIGNMENT, as can happen on cris and lm32? */ |
| if (align > MAX_SUPPORTED_STACK_ALIGNMENT) |
| align = GET_MODE_ALIGNMENT (Pmode); |
| |
| record_alignment_for_reg_var (align); |
| } |
| |
| tree ssa = ssa_default_def (cfun, parm); |
| if (!ssa) |
| return set_rtl (parm, x); |
| |
| int part = var_to_partition (SA.map, ssa); |
| gcc_assert (part != NO_PARTITION); |
| |
| bool changed = bitmap_bit_p (SA.partitions_for_parm_default_defs, part); |
| gcc_assert (changed); |
| |
| set_rtl (ssa, x); |
| gcc_assert (DECL_RTL (parm) == x); |
| } |
| |
| /* A subroutine of expand_one_var. Called to immediately assign rtl |
| to a variable to be allocated in the stack frame. */ |
| |
| static void |
| expand_one_stack_var_1 (tree var) |
| { |
| poly_uint64 size; |
| poly_int64 offset; |
| unsigned byte_align; |
| |
| if (TREE_CODE (var) == SSA_NAME) |
| { |
| tree type = TREE_TYPE (var); |
| size = tree_to_poly_uint64 (TYPE_SIZE_UNIT (type)); |
| } |
| else |
| size = tree_to_poly_uint64 (DECL_SIZE_UNIT (var)); |
| |
| byte_align = align_local_variable (var, true); |
| |
| /* We handle highly aligned variables in expand_stack_vars. */ |
| gcc_assert (byte_align * BITS_PER_UNIT <= MAX_SUPPORTED_STACK_ALIGNMENT); |
| |
| rtx base; |
| if (hwasan_sanitize_stack_p ()) |
| { |
| /* Allocate zero bytes to align the stack. */ |
| poly_int64 hwasan_orig_offset |
| = align_frame_offset (HWASAN_TAG_GRANULE_SIZE); |
| offset = alloc_stack_frame_space (size, byte_align); |
| align_frame_offset (HWASAN_TAG_GRANULE_SIZE); |
| base = hwasan_frame_base (); |
| /* Use `frame_offset` to automatically account for machines where the |
| frame grows upwards. |
| |
| `offset` will always point to the "start" of the stack object, which |
| will be the smallest address, for ! FRAME_GROWS_DOWNWARD this is *not* |
| the "furthest" offset from the base delimiting the current stack |
| object. `frame_offset` will always delimit the extent that the frame. |
| */ |
| hwasan_record_stack_var (virtual_stack_vars_rtx, base, |
| hwasan_orig_offset, frame_offset); |
| } |
| else |
| { |
| offset = alloc_stack_frame_space (size, byte_align); |
| base = virtual_stack_vars_rtx; |
| } |
| |
| expand_one_stack_var_at (var, base, |
| crtl->max_used_stack_slot_alignment, offset); |
| |
| if (hwasan_sanitize_stack_p ()) |
| hwasan_increment_frame_tag (); |
| } |
| |
| /* Wrapper for expand_one_stack_var_1 that checks SSA_NAMEs are |
| already assigned some MEM. */ |
| |
| static void |
| expand_one_stack_var (tree var) |
| { |
| if (TREE_CODE (var) == SSA_NAME) |
| { |
| int part = var_to_partition (SA.map, var); |
| if (part != NO_PARTITION) |
| { |
| rtx x = SA.partition_to_pseudo[part]; |
| gcc_assert (x); |
| gcc_assert (MEM_P (x)); |
| return; |
| } |
| } |
| |
| return expand_one_stack_var_1 (var); |
| } |
| |
| /* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL |
| that will reside in a hard register. */ |
| |
| static void |
| expand_one_hard_reg_var (tree var) |
| { |
| rest_of_decl_compilation (var, 0, 0); |
| } |
| |
| /* Record the alignment requirements of some variable assigned to a |
| pseudo. */ |
| |
| static void |
| record_alignment_for_reg_var (unsigned int align) |
| { |
| if (SUPPORTS_STACK_ALIGNMENT |
| && crtl->stack_alignment_estimated < align) |
| { |
| /* stack_alignment_estimated shouldn't change after stack |
| realign decision made */ |
| gcc_assert (!crtl->stack_realign_processed); |
| crtl->stack_alignment_estimated = align; |
| } |
| |
| /* stack_alignment_needed > PREFERRED_STACK_BOUNDARY is permitted. |
| So here we only make sure stack_alignment_needed >= align. */ |
| if (crtl->stack_alignment_needed < align) |
| crtl->stack_alignment_needed = align; |
| if (crtl->max_used_stack_slot_alignment < align) |
| crtl->max_used_stack_slot_alignment = align; |
| } |
| |
| /* Create RTL for an SSA partition. */ |
| |
| static void |
| expand_one_ssa_partition (tree var) |
| { |
| int part = var_to_partition (SA.map, var); |
| gcc_assert (part != NO_PARTITION); |
| |
| if (SA.partition_to_pseudo[part]) |
| return; |
| |
| unsigned int align = MINIMUM_ALIGNMENT (TREE_TYPE (var), |
| TYPE_MODE (TREE_TYPE (var)), |
| TYPE_ALIGN (TREE_TYPE (var))); |
| |
| /* If the variable alignment is very large we'll dynamicaly allocate |
| it, which means that in-frame portion is just a pointer. */ |
| if (align > MAX_SUPPORTED_STACK_ALIGNMENT) |
| align = GET_MODE_ALIGNMENT (Pmode); |
| |
| record_alignment_for_reg_var (align); |
| |
| if (!use_register_for_decl (var)) |
| { |
| if (defer_stack_allocation (var, true)) |
| add_stack_var (var, true); |
| else |
| expand_one_stack_var_1 (var); |
| return; |
| } |
| |
| machine_mode reg_mode = promote_ssa_mode (var, NULL); |
| rtx x = gen_reg_rtx (reg_mode); |
| |
| set_rtl (var, x); |
| |
| /* For a promoted variable, X will not be used directly but wrapped in a |
| SUBREG with SUBREG_PROMOTED_VAR_P set, which means that the RTL land |
| will assume that its upper bits can be inferred from its lower bits. |
| Therefore, if X isn't initialized on every path from the entry, then |
| we must do it manually in order to fulfill the above assumption. */ |
| if (reg_mode != TYPE_MODE (TREE_TYPE (var)) |
| && bitmap_bit_p (SA.partitions_for_undefined_values, part)) |
| emit_move_insn (x, CONST0_RTX (reg_mode)); |
| } |
| |
| /* Record the association between the RTL generated for partition PART |
| and the underlying variable of the SSA_NAME VAR. */ |
| |
| static void |
| adjust_one_expanded_partition_var (tree var) |
| { |
| if (!var) |
| return; |
| |
| tree decl = SSA_NAME_VAR (var); |
| |
| int part = var_to_partition (SA.map, var); |
| if (part == NO_PARTITION) |
| return; |
| |
| rtx x = SA.partition_to_pseudo[part]; |
| |
| gcc_assert (x); |
| |
| set_rtl (var, x); |
| |
| if (!REG_P (x)) |
| return; |
| |
| /* Note if the object is a user variable. */ |
| if (decl && !DECL_ARTIFICIAL (decl)) |
| mark_user_reg (x); |
| |
| if (POINTER_TYPE_P (decl ? TREE_TYPE (decl) : TREE_TYPE (var))) |
| mark_reg_pointer (x, get_pointer_alignment (var)); |
| } |
| |
| /* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL |
| that will reside in a pseudo register. */ |
| |
| static void |
| expand_one_register_var (tree var) |
| { |
| if (TREE_CODE (var) == SSA_NAME) |
| { |
| int part = var_to_partition (SA.map, var); |
| if (part != NO_PARTITION) |
| { |
| rtx x = SA.partition_to_pseudo[part]; |
| gcc_assert (x); |
| gcc_assert (REG_P (x)); |
| return; |
| } |
| gcc_unreachable (); |
| } |
| |
| tree decl = var; |
| tree type = TREE_TYPE (decl); |
| machine_mode reg_mode = promote_decl_mode (decl, NULL); |
| rtx x = gen_reg_rtx (reg_mode); |
| |
| set_rtl (var, x); |
| |
| /* Note if the object is a user variable. */ |
| if (!DECL_ARTIFICIAL (decl)) |
| mark_user_reg (x); |
| |
| if (POINTER_TYPE_P (type)) |
| mark_reg_pointer (x, get_pointer_alignment (var)); |
| } |
| |
| /* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL that |
| has some associated error, e.g. its type is error-mark. We just need |
| to pick something that won't crash the rest of the compiler. */ |
| |
| static void |
| expand_one_error_var (tree var) |
| { |
| machine_mode mode = DECL_MODE (var); |
| rtx x; |
| |
| if (mode == BLKmode) |
| x = gen_rtx_MEM (BLKmode, const0_rtx); |
| else if (mode == VOIDmode) |
| x = const0_rtx; |
| else |
| x = gen_reg_rtx (mode); |
| |
| SET_DECL_RTL (var, x); |
| } |
| |
| /* A subroutine of expand_one_var. VAR is a variable that will be |
| allocated to the local stack frame. Return true if we wish to |
| add VAR to STACK_VARS so that it will be coalesced with other |
| variables. Return false to allocate VAR immediately. |
| |
| This function is used to reduce the number of variables considered |
| for coalescing, which reduces the size of the quadratic problem. */ |
| |
| static bool |
| defer_stack_allocation (tree var, bool toplevel) |
| { |
| tree size_unit = TREE_CODE (var) == SSA_NAME |
| ? TYPE_SIZE_UNIT (TREE_TYPE (var)) |
| : DECL_SIZE_UNIT (var); |
| poly_uint64 size; |
| |
| /* Whether the variable is small enough for immediate allocation not to be |
| a problem with regard to the frame size. */ |
| bool smallish |
| = (poly_int_tree_p (size_unit, &size) |
| && (estimated_poly_value (size) |
| < param_min_size_for_stack_sharing)); |
| |
| /* If stack protection is enabled, *all* stack variables must be deferred, |
| so that we can re-order the strings to the top of the frame. |
| Similarly for Address Sanitizer. */ |
| if (flag_stack_protect || asan_sanitize_stack_p ()) |
| return true; |
| |
| unsigned int align = TREE_CODE (var) == SSA_NAME |
| ? TYPE_ALIGN (TREE_TYPE (var)) |
| : DECL_ALIGN (var); |
| |
| /* We handle "large" alignment via dynamic allocation. We want to handle |
| this extra complication in only one place, so defer them. */ |
| if (align > MAX_SUPPORTED_STACK_ALIGNMENT) |
| return true; |
| |
| bool ignored = TREE_CODE (var) == SSA_NAME |
| ? !SSAVAR (var) || DECL_IGNORED_P (SSA_NAME_VAR (var)) |
| : DECL_IGNORED_P (var); |
| |
| /* When optimization is enabled, DECL_IGNORED_P variables originally scoped |
| might be detached from their block and appear at toplevel when we reach |
| here. We want to coalesce them with variables from other blocks when |
| the immediate contribution to the frame size would be noticeable. */ |
| if (toplevel && optimize > 0 && ignored && !smallish) |
| return true; |
| |
| /* Variables declared in the outermost scope automatically conflict |
| with every other variable. The only reason to want to defer them |
| at all is that, after sorting, we can more efficiently pack |
| small variables in the stack frame. Continue to defer at -O2. */ |
| if (toplevel && optimize < 2) |
| return false; |
| |
| /* Without optimization, *most* variables are allocated from the |
| stack, which makes the quadratic problem large exactly when we |
| want compilation to proceed as quickly as possible. On the |
| other hand, we don't want the function's stack frame size to |
| get completely out of hand. So we avoid adding scalars and |
| "small" aggregates to the list at all. */ |
| if (optimize == 0 && smallish) |
| return false; |
| |
| return true; |
| } |
| |
| /* A subroutine of expand_used_vars. Expand one variable according to |
| its flavor. Variables to be placed on the stack are not actually |
| expanded yet, merely recorded. |
| When REALLY_EXPAND is false, only add stack values to be allocated. |
| Return stack usage this variable is supposed to take. |
| */ |
| |
| static poly_uint64 |
| expand_one_var (tree var, bool toplevel, bool really_expand, |
| bitmap forced_stack_var = NULL) |
| { |
| unsigned int align = BITS_PER_UNIT; |
| tree origvar = var; |
| |
| var = SSAVAR (var); |
| |
| if (TREE_TYPE (var) != error_mark_node && VAR_P (var)) |
| { |
| if (is_global_var (var)) |
| return 0; |
| |
| /* Because we don't know if VAR will be in register or on stack, |
| we conservatively assume it will be on stack even if VAR is |
| eventually put into register after RA pass. For non-automatic |
| variables, which won't be on stack, we collect alignment of |
| type and ignore user specified alignment. Similarly for |
| SSA_NAMEs for which use_register_for_decl returns true. */ |
| if (TREE_STATIC (var) |
| || DECL_EXTERNAL (var) |
| || (TREE_CODE (origvar) == SSA_NAME && use_register_for_decl (var))) |
| align = MINIMUM_ALIGNMENT (TREE_TYPE (var), |
| TYPE_MODE (TREE_TYPE (var)), |
| TYPE_ALIGN (TREE_TYPE (var))); |
| else if (DECL_HAS_VALUE_EXPR_P (var) |
| || (DECL_RTL_SET_P (var) && MEM_P (DECL_RTL (var)))) |
| /* Don't consider debug only variables with DECL_HAS_VALUE_EXPR_P set |
| or variables which were assigned a stack slot already by |
| expand_one_stack_var_at - in the latter case DECL_ALIGN has been |
| changed from the offset chosen to it. */ |
| align = crtl->stack_alignment_estimated; |
| else |
| align = MINIMUM_ALIGNMENT (var, DECL_MODE (var), DECL_ALIGN (var)); |
| |
| /* If the variable alignment is very large we'll dynamicaly allocate |
| it, which means that in-frame portion is just a pointer. */ |
| if (align > MAX_SUPPORTED_STACK_ALIGNMENT) |
| align = GET_MODE_ALIGNMENT (Pmode); |
| } |
| |
| record_alignment_for_reg_var (align); |
| |
| poly_uint64 size; |
| if (TREE_CODE (origvar) == SSA_NAME) |
| { |
| gcc_assert (!VAR_P (var) |
| || (!DECL_EXTERNAL (var) |
| && !DECL_HAS_VALUE_EXPR_P (var) |
| && !TREE_STATIC (var) |
| && TREE_TYPE (var) != error_mark_node |
| && !DECL_HARD_REGISTER (var) |
| && really_expand)); |
| } |
| if (!VAR_P (var) && TREE_CODE (origvar) != SSA_NAME) |
| ; |
| else if (DECL_EXTERNAL (var)) |
| ; |
| else if (DECL_HAS_VALUE_EXPR_P (var)) |
| ; |
| else if (TREE_STATIC (var)) |
| ; |
| else if (TREE_CODE (origvar) != SSA_NAME && DECL_RTL_SET_P (var)) |
| ; |
| else if (TREE_TYPE (var) == error_mark_node) |
| { |
| if (really_expand) |
| expand_one_error_var (var); |
| } |
| else if (VAR_P (var) && DECL_HARD_REGISTER (var)) |
| { |
| if (really_expand) |
| { |
| expand_one_hard_reg_var (var); |
| if (!DECL_HARD_REGISTER (var)) |
| /* Invalid register specification. */ |
| expand_one_error_var (var); |
| } |
| } |
| else if (use_register_for_decl (var) |
| && (!forced_stack_var |
| || !bitmap_bit_p (forced_stack_var, DECL_UID (var)))) |
| { |
| if (really_expand) |
| expand_one_register_var (origvar); |
| } |
| else if (!poly_int_tree_p (DECL_SIZE_UNIT (var), &size) |
| || !valid_constant_size_p (DECL_SIZE_UNIT (var))) |
| { |
| /* Reject variables which cover more than half of the address-space. */ |
| if (really_expand) |
| { |
| if (DECL_NONLOCAL_FRAME (var)) |
| error_at (DECL_SOURCE_LOCATION (current_function_decl), |
| "total size of local objects is too large"); |
| else |
| error_at (DECL_SOURCE_LOCATION (var), |
| "size of variable %q+D is too large", var); |
| expand_one_error_var (var); |
| } |
| } |
| else if (defer_stack_allocation (var, toplevel)) |
| add_stack_var (origvar, really_expand); |
| else |
| { |
| if (really_expand) |
| { |
| if (lookup_attribute ("naked", |
| DECL_ATTRIBUTES (current_function_decl))) |
| error ("cannot allocate stack for variable %q+D, naked function", |
| var); |
| |
| expand_one_stack_var (origvar); |
| } |
| return size; |
| } |
| return 0; |
| } |
| |
| /* A subroutine of expand_used_vars. Walk down through the BLOCK tree |
| expanding variables. Those variables that can be put into registers |
| are allocated pseudos; those that can't are put on the stack. |
| |
| TOPLEVEL is true if this is the outermost BLOCK. */ |
| |
| static void |
| expand_used_vars_for_block (tree block, bool toplevel, bitmap forced_stack_vars) |
| { |
| tree t; |
| |
| /* Expand all variables at this level. */ |
| for (t = BLOCK_VARS (block); t ; t = DECL_CHAIN (t)) |
| if (TREE_USED (t) |
| && ((!VAR_P (t) && TREE_CODE (t) != RESULT_DECL) |
| || !DECL_NONSHAREABLE (t))) |
| expand_one_var (t, toplevel, true, forced_stack_vars); |
| |
| /* Expand all variables at containing levels. */ |
| for (t = BLOCK_SUBBLOCKS (block); t ; t = BLOCK_CHAIN (t)) |
| expand_used_vars_for_block (t, false, forced_stack_vars); |
| } |
| |
| /* A subroutine of expand_used_vars. Walk down through the BLOCK tree |
| and clear TREE_USED on all local variables. */ |
| |
| static void |
| clear_tree_used (tree block) |
| { |
| tree t; |
| |
| for (t = BLOCK_VARS (block); t ; t = DECL_CHAIN (t)) |
| /* if (!TREE_STATIC (t) && !DECL_EXTERNAL (t)) */ |
| if ((!VAR_P (t) && TREE_CODE (t) != RESULT_DECL) |
| || !DECL_NONSHAREABLE (t)) |
| TREE_USED (t) = 0; |
| |
| for (t = BLOCK_SUBBLOCKS (block); t ; t = BLOCK_CHAIN (t)) |
| clear_tree_used (t); |
| } |
| |
| /* Examine TYPE and determine a bit mask of the following features. */ |
| |
| #define SPCT_HAS_LARGE_CHAR_ARRAY 1 |
| #define SPCT_HAS_SMALL_CHAR_ARRAY 2 |
| #define SPCT_HAS_ARRAY 4 |
| #define SPCT_HAS_AGGREGATE 8 |
| |
| static unsigned int |
| stack_protect_classify_type (tree type) |
| { |
| unsigned int ret = 0; |
| tree t; |
| |
| switch (TREE_CODE (type)) |
| { |
| case ARRAY_TYPE: |
| t = TYPE_MAIN_VARIANT (TREE_TYPE (type)); |
| if (t == char_type_node |
| || t == signed_char_type_node |
| || t == unsigned_char_type_node) |
| { |
| unsigned HOST_WIDE_INT max = param_ssp_buffer_size; |
| unsigned HOST_WIDE_INT len; |
| |
| if (!TYPE_SIZE_UNIT (type) |
| || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) |
| len = max; |
| else |
| len = tree_to_uhwi (TYPE_SIZE_UNIT (type)); |
| |
| if (len < max) |
| ret = SPCT_HAS_SMALL_CHAR_ARRAY | SPCT_HAS_ARRAY; |
| else |
| ret = SPCT_HAS_LARGE_CHAR_ARRAY | SPCT_HAS_ARRAY; |
| } |
| else |
| ret = SPCT_HAS_ARRAY; |
| break; |
| |
| case UNION_TYPE: |
| case QUAL_UNION_TYPE: |
| case RECORD_TYPE: |
| ret = SPCT_HAS_AGGREGATE; |
| for (t = TYPE_FIELDS (type); t ; t = TREE_CHAIN (t)) |
| if (TREE_CODE (t) == FIELD_DECL) |
| ret |= stack_protect_classify_type (TREE_TYPE (t)); |
| break; |
| |
| default: |
| break; |
| } |
| |
| return ret; |
| } |
| |
| /* Return nonzero if DECL should be segregated into the "vulnerable" upper |
| part of the local stack frame. Remember if we ever return nonzero for |
| any variable in this function. The return value is the phase number in |
| which the variable should be allocated. */ |
| |
| static int |
| stack_protect_decl_phase (tree decl) |
| { |
| unsigned int bits = stack_protect_classify_type (TREE_TYPE (decl)); |
| int ret = 0; |
| |
| if (bits & SPCT_HAS_SMALL_CHAR_ARRAY) |
| has_short_buffer = true; |
| |
| tree attribs = DECL_ATTRIBUTES (current_function_decl); |
| if (!lookup_attribute ("no_stack_protector", attribs) |
| && (flag_stack_protect == SPCT_FLAG_ALL |
| || flag_stack_protect == SPCT_FLAG_STRONG |
| || (flag_stack_protect == SPCT_FLAG_EXPLICIT |
| && lookup_attribute ("stack_protect", attribs)))) |
| { |
| if ((bits & (SPCT_HAS_SMALL_CHAR_ARRAY | SPCT_HAS_LARGE_CHAR_ARRAY)) |
| && !(bits & SPCT_HAS_AGGREGATE)) |
| ret = 1; |
| else if (bits & SPCT_HAS_ARRAY) |
| ret = 2; |
| } |
| else |
| ret = (bits & SPCT_HAS_LARGE_CHAR_ARRAY) != 0; |
| |
| if (ret) |
| has_protected_decls = true; |
| |
| return ret; |
| } |
| |
| /* Two helper routines that check for phase 1 and phase 2. These are used |
| as callbacks for expand_stack_vars. */ |
| |
| static bool |
| stack_protect_decl_phase_1 (size_t i) |
| { |
| return stack_protect_decl_phase (stack_vars[i].decl) == 1; |
| } |
| |
| static bool |
| stack_protect_decl_phase_2 (size_t i) |
| { |
| return stack_protect_decl_phase (stack_vars[i].decl) == 2; |
| } |
| |
| /* And helper function that checks for asan phase (with stack protector |
| it is phase 3). This is used as callback for expand_stack_vars. |
| Returns true if any of the vars in the partition need to be protected. */ |
| |
| static bool |
| asan_decl_phase_3 (size_t i) |
| { |
| while (i != EOC) |
| { |
| if (asan_protect_stack_decl (stack_vars[i].decl)) |
| return true; |
| i = stack_vars[i].next; |
| } |
| return false; |
| } |
| |
| /* Ensure that variables in different stack protection phases conflict |
| so that they are not merged and share the same stack slot. |
| Return true if there are any address taken variables. */ |
| |
| static bool |
| add_stack_protection_conflicts (void) |
| { |
| size_t i, j, n = stack_vars_num; |
| unsigned char *phase; |
| bool ret = false; |
| |
| phase = XNEWVEC (unsigned char, n); |
| for (i = 0; i < n; ++i) |
| { |
| phase[i] = stack_protect_decl_phase (stack_vars[i].decl); |
| if (TREE_ADDRESSABLE (stack_vars[i].decl)) |
| ret = true; |
| } |
| |
| for (i = 0; i < n; ++i) |
| { |
| unsigned char ph_i = phase[i]; |
| for (j = i + 1; j < n; ++j) |
| if (ph_i != phase[j]) |
| add_stack_var_conflict (i, j); |
| } |
| |
| XDELETEVEC (phase); |
| return ret; |
| } |
| |
| /* Create a decl for the guard at the top of the stack frame. */ |
| |
| static void |
| create_stack_guard (void) |
| { |
| tree guard = build_decl (DECL_SOURCE_LOCATION (current_function_decl), |
| VAR_DECL, NULL, ptr_type_node); |
| TREE_THIS_VOLATILE (guard) = 1; |
| TREE_USED (guard) = 1; |
| expand_one_stack_var (guard); |
| crtl->stack_protect_guard = guard; |
| } |
| |
| /* Prepare for expanding variables. */ |
| static void |
| init_vars_expansion (void) |
| { |
| /* Conflict bitmaps, and a few related temporary bitmaps, go here. */ |
| bitmap_obstack_initialize (&stack_var_bitmap_obstack); |
| |
| /* A map from decl to stack partition. */ |
| decl_to_stack_part = new hash_map<tree, size_t>; |
| |
| /* Initialize local stack smashing state. */ |
| has_protected_decls = false; |
| has_short_buffer = false; |
| if (hwasan_sanitize_stack_p ()) |
| hwasan_record_frame_init (); |
| } |
| |
| /* Free up stack variable graph data. */ |
| static void |
| fini_vars_expansion (void) |
| { |
| bitmap_obstack_release (&stack_var_bitmap_obstack); |
| if (stack_vars) |
| XDELETEVEC (stack_vars); |
| if (stack_vars_sorted) |
| XDELETEVEC (stack_vars_sorted); |
| stack_vars = NULL; |
| stack_vars_sorted = NULL; |
| stack_vars_alloc = stack_vars_num = 0; |
| delete decl_to_stack_part; |
| decl_to_stack_part = NULL; |
| } |
| |
| /* Make a fair guess for the size of the stack frame of the function |
| in NODE. This doesn't have to be exact, the result is only used in |
| the inline heuristics. So we don't want to run the full stack var |
| packing algorithm (which is quadratic in the number of stack vars). |
| Instead, we calculate the total size of all stack vars. This turns |
| out to be a pretty fair estimate -- packing of stack vars doesn't |
| happen very often. */ |
| |
| HOST_WIDE_INT |
| estimated_stack_frame_size (struct cgraph_node *node) |
| { |
| poly_int64 size = 0; |
| size_t i; |
| tree var; |
| struct function *fn = DECL_STRUCT_FUNCTION (node->decl); |
| |
| push_cfun (fn); |
| |
| init_vars_expansion (); |
| |
| FOR_EACH_LOCAL_DECL (fn, i, var) |
| if (auto_var_in_fn_p (var, fn->decl)) |
| size += expand_one_var (var, true, false); |
| |
| if (stack_vars_num > 0) |
| { |
| /* Fake sorting the stack vars for account_stack_vars (). */ |
| stack_vars_sorted = XNEWVEC (size_t, stack_vars_num); |
| for (i = 0; i < stack_vars_num; ++i) |
| stack_vars_sorted[i] = i; |
| size += account_stack_vars (); |
| } |
| |
| fini_vars_expansion (); |
| pop_cfun (); |
| return estimated_poly_value (size); |
| } |
| |
| /* Check if the current function has calls that use a return slot. */ |
| |
| static bool |
| stack_protect_return_slot_p () |
| { |
| basic_block bb; |
| |
| FOR_ALL_BB_FN (bb, cfun) |
| for (gimple_stmt_iterator gsi = gsi_start_bb (bb); |
| !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| gimple *stmt = gsi_stmt (gsi); |
| /* This assumes that calls to internal-only functions never |
| use a return slot. */ |
| if (is_gimple_call (stmt) |
| && !gimple_call_internal_p (stmt) |
| && aggregate_value_p (TREE_TYPE (gimple_call_fntype (stmt)), |
| gimple_call_fndecl (stmt))) |
| return true; |
| } |
| return false; |
| } |
| |
| /* Expand all variables used in the function. */ |
| |
| static rtx_insn * |
| expand_used_vars (bitmap forced_stack_vars) |
| { |
| tree var, outer_block = DECL_INITIAL (current_function_decl); |
| auto_vec<tree> maybe_local_decls; |
| rtx_insn *var_end_seq = NULL; |
| unsigned i; |
| unsigned len; |
| bool gen_stack_protect_signal = false; |
| |
| /* Compute the phase of the stack frame for this function. */ |
| { |
| int align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT; |
| int off = targetm.starting_frame_offset () % align; |
| frame_phase = off ? align - off : 0; |
| } |
| |
| /* Set TREE_USED on all variables in the local_decls. */ |
| FOR_EACH_LOCAL_DECL (cfun, i, var) |
| TREE_USED (var) = 1; |
| /* Clear TREE_USED on all variables associated with a block scope. */ |
| clear_tree_used (DECL_INITIAL (current_function_decl)); |
| |
| init_vars_expansion (); |
| |
| if (targetm.use_pseudo_pic_reg ()) |
| pic_offset_table_rtx = gen_reg_rtx (Pmode); |
| |
| for (i = 0; i < SA.map->num_partitions; i++) |
| { |
| if (bitmap_bit_p (SA.partitions_for_parm_default_defs, i)) |
| continue; |
| |
| tree var = partition_to_var (SA.map, i); |
| |
| gcc_assert (!virtual_operand_p (var)); |
| |
| expand_one_ssa_partition (var); |
| } |
| |
| if (flag_stack_protect == SPCT_FLAG_STRONG) |
| gen_stack_protect_signal = stack_protect_return_slot_p (); |
| |
| /* At this point all variables on the local_decls with TREE_USED |
| set are not associated with any block scope. Lay them out. */ |
| |
| len = vec_safe_length (cfun->local_decls); |
| FOR_EACH_LOCAL_DECL (cfun, i, var) |
| { |
| bool expand_now = false; |
| |
| /* Expanded above already. */ |
| if (is_gimple_reg (var)) |
| { |
| TREE_USED (var) = 0; |
| goto next; |
| } |
| /* We didn't set a block for static or extern because it's hard |
| to tell the difference between a global variable (re)declared |
| in a local scope, and one that's really declared there to |
| begin with. And it doesn't really matter much, since we're |
| not giving them stack space. Expand them now. */ |
| else if (TREE_STATIC (var) || DECL_EXTERNAL (var)) |
| expand_now = true; |
| |
| /* Expand variables not associated with any block now. Those created by |
| the optimizers could be live anywhere in the function. Those that |
| could possibly have been scoped originally and detached from their |
| block will have their allocation deferred so we coalesce them with |
| others when optimization is enabled. */ |
| else if (TREE_USED (var)) |
| expand_now = true; |
| |
| /* Finally, mark all variables on the list as used. We'll use |
| this in a moment when we expand those associated with scopes. */ |
| TREE_USED (var) = 1; |
| |
| if (expand_now) |
| expand_one_var (var, true, true, forced_stack_vars); |
| |
| next: |
| if (DECL_ARTIFICIAL (var) && !DECL_IGNORED_P (var)) |
| { |
| rtx rtl = DECL_RTL_IF_SET (var); |
| |
| /* Keep artificial non-ignored vars in cfun->local_decls |
| chain until instantiate_decls. */ |
| if (rtl && (MEM_P (rtl) || GET_CODE (rtl) == CONCAT)) |
| add_local_decl (cfun, var); |
| else if (rtl == NULL_RTX) |
| /* If rtl isn't set yet, which can happen e.g. with |
| -fstack-protector, retry before returning from this |
| function. */ |
| maybe_local_decls.safe_push (var); |
| } |
| } |
| |
| /* We duplicated some of the decls in CFUN->LOCAL_DECLS. |
| |
| +-----------------+-----------------+ |
| | ...processed... | ...duplicates...| |
| +-----------------+-----------------+ |
| ^ |
| +-- LEN points here. |
| |
| We just want the duplicates, as those are the artificial |
| non-ignored vars that we want to keep until instantiate_decls. |
| Move them down and truncate the array. */ |
| if (!vec_safe_is_empty (cfun->local_decls)) |
| cfun->local_decls->block_remove (0, len); |
| |
| /* At this point, all variables within the block tree with TREE_USED |
| set are actually used by the optimized function. Lay them out. */ |
| expand_used_vars_for_block (outer_block, true, forced_stack_vars); |
| |
| tree attribs = DECL_ATTRIBUTES (current_function_decl); |
| if (stack_vars_num > 0) |
| { |
| bool has_addressable_vars = false; |
| |
| add_scope_conflicts (); |
| |
| /* If stack protection is enabled, we don't share space between |
| vulnerable data and non-vulnerable data. */ |
| if (flag_stack_protect != 0 |
| && !lookup_attribute ("no_stack_protector", attribs) |
| && (flag_stack_protect != SPCT_FLAG_EXPLICIT |
| || (flag_stack_protect == SPCT_FLAG_EXPLICIT |
| && lookup_attribute ("stack_protect", attribs)))) |
| has_addressable_vars = add_stack_protection_conflicts (); |
| |
| if (flag_stack_protect == SPCT_FLAG_STRONG && has_addressable_vars) |
| gen_stack_protect_signal = true; |
| |
| /* Now that we have collected all stack variables, and have computed a |
| minimal interference graph, attempt to save some stack space. */ |
| partition_stack_vars (); |
| if (dump_file) |
| dump_stack_var_partition (); |
| } |
| |
| |
| if (!lookup_attribute ("no_stack_protector", attribs)) |
| switch (flag_stack_protect) |
| { |
| case SPCT_FLAG_ALL: |
| create_stack_guard (); |
| break; |
| |
| case SPCT_FLAG_STRONG: |
| if (gen_stack_protect_signal |
| || cfun->calls_alloca |
| || has_protected_decls |
| || lookup_attribute ("stack_protect", attribs)) |
| create_stack_guard (); |
| break; |
| |
| case SPCT_FLAG_DEFAULT: |
| if (cfun->calls_alloca |
| || has_protected_decls |
| || lookup_attribute ("stack_protect", attribs)) |
| create_stack_guard (); |
| break; |
| |
| case SPCT_FLAG_EXPLICIT: |
| if (lookup_attribute ("stack_protect", attribs)) |
| create_stack_guard (); |
| break; |
| |
| default: |
| break; |
| } |
| |
| /* Assign rtl to each variable based on these partitions. */ |
| if (stack_vars_num > 0) |
| { |
| class stack_vars_data data; |
| |
| data.asan_base = NULL_RTX; |
| data.asan_alignb = 0; |
| |
| /* Reorder decls to be protected by iterating over the variables |
| array multiple times, and allocating out of each phase in turn. */ |
| /* ??? We could probably integrate this into the qsort we did |
| earlier, such that we naturally see these variables first, |
| and thus naturally allocate things in the right order. */ |
| if (has_protected_decls) |
| { |
| /* Phase 1 contains only character arrays. */ |
| expand_stack_vars (stack_protect_decl_phase_1, &data); |
| |
| /* Phase 2 contains other kinds of arrays. */ |
| if (!lookup_attribute ("no_stack_protector", attribs) |
| && (flag_stack_protect == SPCT_FLAG_ALL |
| || flag_stack_protect == SPCT_FLAG_STRONG |
| || (flag_stack_protect == SPCT_FLAG_EXPLICIT |
| && lookup_attribute ("stack_protect", attribs)))) |
| expand_stack_vars (stack_protect_decl_phase_2, &data); |
| } |
| |
| if (asan_sanitize_stack_p ()) |
| /* Phase 3, any partitions that need asan protection |
| in addition to phase 1 and 2. */ |
| expand_stack_vars (asan_decl_phase_3, &data); |
| |
| /* ASAN description strings don't yet have a syntax for expressing |
| polynomial offsets. */ |
| HOST_WIDE_INT prev_offset; |
| if (!data.asan_vec.is_empty () |
| && frame_offset.is_constant (&prev_offset)) |
| { |
| HOST_WIDE_INT offset, sz, redzonesz; |
| redzonesz = ASAN_RED_ZONE_SIZE; |
| sz = data.asan_vec[0] - prev_offset; |
| if (data.asan_alignb > ASAN_RED_ZONE_SIZE |
| && data.asan_alignb <= 4096 |
| && sz + ASAN_RED_ZONE_SIZE >= (int) data.asan_alignb) |
| redzonesz = ((sz + ASAN_RED_ZONE_SIZE + data.asan_alignb - 1) |
| & ~(data.asan_alignb - HOST_WIDE_INT_1)) - sz; |
| /* Allocating a constant amount of space from a constant |
| starting offset must give a constant result. */ |
| offset = (alloc_stack_frame_space (redzonesz, ASAN_RED_ZONE_SIZE) |
| .to_constant ()); |
| data.asan_vec.safe_push (prev_offset); |
| data.asan_vec.safe_push (offset); |
| /* Leave space for alignment if STRICT_ALIGNMENT. */ |
| if (STRICT_ALIGNMENT) |
| alloc_stack_frame_space ((GET_MODE_ALIGNMENT (SImode) |
| << ASAN_SHADOW_SHIFT) |
| / BITS_PER_UNIT, 1); |
| |
| var_end_seq |
| = asan_emit_stack_protection (virtual_stack_vars_rtx, |
| data.asan_base, |
| data.asan_alignb, |
| data.asan_vec.address (), |
| data.asan_decl_vec.address (), |
| data.asan_vec.length ()); |
| } |
| |
| expand_stack_vars (NULL, &data); |
| } |
| |
| if (hwasan_sanitize_stack_p ()) |
| hwasan_emit_prologue (); |
| if (asan_sanitize_allocas_p () && cfun->calls_alloca) |
| var_end_seq = asan_emit_allocas_unpoison (virtual_stack_dynamic_rtx, |
| virtual_stack_vars_rtx, |
| var_end_seq); |
| else if (hwasan_sanitize_allocas_p () && cfun->calls_alloca) |
| /* When using out-of-line instrumentation we only want to emit one function |
| call for clearing the tags in a region of shadow stack. When there are |
| alloca calls in this frame we want to emit a call using the |
| virtual_stack_dynamic_rtx, but when not we use the hwasan_frame_extent |
| rtx we created in expand_stack_vars. */ |
| var_end_seq = hwasan_emit_untag_frame (virtual_stack_dynamic_rtx, |
| virtual_stack_vars_rtx); |
| else if (hwasan_sanitize_stack_p ()) |
| /* If no variables were stored on the stack, `hwasan_get_frame_extent` |
| will return NULL_RTX and hence `hwasan_emit_untag_frame` will return |
| NULL (i.e. an empty sequence). */ |
| var_end_seq = hwasan_emit_untag_frame (hwasan_get_frame_extent (), |
| virtual_stack_vars_rtx); |
| |
| fini_vars_expansion (); |
| |
| /* If there were any artificial non-ignored vars without rtl |
| found earlier, see if deferred stack allocation hasn't assigned |
| rtl to them. */ |
| FOR_EACH_VEC_ELT_REVERSE (maybe_local_decls, i, var) |
| { |
| rtx rtl = DECL_RTL_IF_SET (var); |
| |
| /* Keep artificial non-ignored vars in cfun->local_decls |
| chain until instantiate_decls. */ |
| if (rtl && (MEM_P (rtl) || GET_CODE (rtl) == CONCAT)) |
| add_local_decl (cfun, var); |
| } |
| |
| /* If the target requires that FRAME_OFFSET be aligned, do it. */ |
| if (STACK_ALIGNMENT_NEEDED) |
| { |
| HOST_WIDE_INT align = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT; |
| if (FRAME_GROWS_DOWNWARD) |
| frame_offset = aligned_lower_bound (frame_offset, align); |
| else |
| frame_offset = aligned_upper_bound (frame_offset, align); |
| } |
| |
| return var_end_seq; |
| } |
| |
| |
| /* If we need to produce a detailed dump, print the tree representation |
| for STMT to the dump file. SINCE is the last RTX after which the RTL |
| generated for STMT should have been appended. */ |
| |
| static void |
| maybe_dump_rtl_for_gimple_stmt (gimple *stmt, rtx_insn *since) |
| { |
| if (dump_file && (dump_flags & TDF_DETAILS)) |
| { |
| fprintf (dump_file, "\n;; "); |
| print_gimple_stmt (dump_file, stmt, 0, |
| TDF_SLIM | (dump_flags & TDF_LINENO)); |
| fprintf (dump_file, "\n"); |
| |
| print_rtl (dump_file, since ? NEXT_INSN (since) : since); |
| } |
| } |
| |
| /* Maps the blocks that do not contain tree labels to rtx labels. */ |
| |
| static hash_map<basic_block, rtx_code_label *> *lab_rtx_for_bb; |
| |
| /* Returns the label_rtx expression for a label starting basic block BB. */ |
| |
| static rtx_code_label * |
| label_rtx_for_bb (basic_block bb ATTRIBUTE_UNUSED) |
| { |
| gimple_stmt_iterator gsi; |
| tree lab; |
| |
| if (bb->flags & BB_RTL) |
| return block_label (bb); |
| |
| rtx_code_label **elt = lab_rtx_for_bb->get (bb); |
| if (elt) |
| return *elt; |
| |
| /* Find the tree label if it is present. */ |
| |
| for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) |
| { |
| glabel *lab_stmt; |
| |
| lab_stmt = dyn_cast <glabel *> (gsi_stmt (gsi)); |
| if (!lab_stmt) |
| break; |
| |
| lab = gimple_label_label (lab_stmt); |
| if (DECL_NONLOCAL (lab)) |
| break; |
| |
| return jump_target_rtx (lab); |
| } |
| |
| rtx_code_label *l = gen_label_rtx (); |
| lab_rtx_for_bb->put (bb, l); |
| return l; |
| } |
| |
| |
| /* A subroutine of expand_gimple_cond. Given E, a fallthrough edge |
| of a basic block where we just expanded the conditional at the end, |
| possibly clean up the CFG and instruction sequence. LAST is the |
| last instruction before the just emitted jump sequence. */ |
| |
| static void |
| maybe_cleanup_end_of_block (edge e, rtx_insn *last) |
| { |
| /* Special case: when jumpif decides that the condition is |
| trivial it emits an unconditional jump (and the necessary |
| barrier). But we still have two edges, the fallthru one is |
| wrong. purge_dead_edges would clean this up later. Unfortunately |
| we have to insert insns (and split edges) before |
| find_many_sub_basic_blocks and hence before purge_dead_edges. |
| But splitting edges might create new blocks which depend on the |
| fact that if there are two edges there's no barrier. So the |
| barrier would get lost and verify_flow_info would ICE. Instead |
| of auditing all edge splitters to care for the barrier (which |
| normally isn't there in a cleaned CFG), fix it here. */ |
| if (BARRIER_P (get_last_insn ())) |
| { |
| rtx_insn *insn; |
| remove_edge (e); |
| /* Now, we have a single successor block, if we have insns to |
| insert on the remaining edge we potentially will insert |
| it at the end of this block (if the dest block isn't feasible) |
| in order to avoid splitting the edge. This insertion will take |
| place in front of the last jump. But we might have emitted |
| multiple jumps (conditional and one unconditional) to the |
| same destination. Inserting in front of the last one then |
| is a problem. See PR 40021. We fix this by deleting all |
| jumps except the last unconditional one. */ |
| insn = PREV_INSN (get_last_insn ()); |
| /* Make sure we have an unconditional jump. Otherwise we're |
| confused. */ |
| gcc_assert (JUMP_P (insn) && !any_condjump_p (insn)); |
| for (insn = PREV_INSN (insn); insn != last;) |
| { |
| insn = PREV_INSN (insn); |
| if (JUMP_P (NEXT_INSN (insn))) |
| { |
| if (!any_condjump_p (NEXT_INSN (insn))) |
| { |
| gcc_assert (BARRIER_P (NEXT_INSN (NEXT_INSN (insn)))); |
| delete_insn (NEXT_INSN (NEXT_INSN (insn))); |
| } |
| delete_insn (NEXT_INSN (insn)); |
| } |
| } |
| } |
| } |
| |
| /* A subroutine of expand_gimple_basic_block. Expand one GIMPLE_COND. |
| Returns a new basic block if we've terminated the current basic |
| block and created a new one. */ |
| |
| static basic_block |
| expand_gimple_cond (basic_block bb, gcond *stmt) |
| { |
| basic_block new_bb, dest; |
| edge true_edge; |
| edge false_edge; |
| rtx_insn *last2, *last; |
| enum tree_code code; |
| tree op0, op1; |
| |
| code = gimple_cond_code (stmt); |
| op0 = gimple_cond_lhs (stmt); |
| op1 = gimple_cond_rhs (stmt); |
| /* We're sometimes presented with such code: |
| D.123_1 = x < y; |
| if (D.123_1 != 0) |
| ... |
| This would expand to two comparisons which then later might |
| be cleaned up by combine. But some pattern matchers like if-conversion |
| work better when there's only one compare, so make up for this |
| here as special exception if TER would have made the same change. */ |
| if (SA.values |
| && TREE_CODE (op0) == SSA_NAME |
| && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE |
| && TREE_CODE (op1) == INTEGER_CST |
| && ((gimple_cond_code (stmt) == NE_EXPR |
| && integer_zerop (op1)) |
| || (gimple_cond_code (stmt) == EQ_EXPR |
| && integer_onep (op1))) |
| && bitmap_bit_p (SA.values, SSA_NAME_VERSION (op0))) |
| { |
| gimple *second = SSA_NAME_DEF_STMT (op0); |
| if (gimple_code (second) == GIMPLE_ASSIGN) |
| { |
| enum tree_code code2 = gimple_assign_rhs_code (second); |
| if (TREE_CODE_CLASS (code2) == tcc_comparison) |
| { |
| code = code2; |
| op0 = gimple_assign_rhs1 (second); |
| op1 = gimple_assign_rhs2 (second); |
| } |
| /* If jumps are cheap and the target does not support conditional |
| compare, turn some more codes into jumpy sequences. */ |
| else if (BRANCH_COST (optimize_insn_for_speed_p (), false) < 4 |
| && targetm.gen_ccmp_first == NULL) |
| { |
| if ((code2 == BIT_AND_EXPR |
| && TYPE_PRECISION (TREE_TYPE (op0)) == 1 |
| && TREE_CODE (gimple_assign_rhs2 (second)) != INTEGER_CST) |
| || code2 == TRUTH_AND_EXPR) |
| { |
| code = TRUTH_ANDIF_EXPR; |
| op0 = gimple_assign_rhs1 (second); |
| op1 = gimple_assign_rhs2 (second); |
| } |
| else if (code2 == BIT_IOR_EXPR || code2 == TRUTH_OR_EXPR) |
| { |
| code = TRUTH_ORIF_EXPR; |
| op0 = gimple_assign_rhs1 (second); |
| op1 = gimple_assign_rhs2 (second); |
| } |
| } |
| } |
| } |
| |
| /* Optimize (x % C1) == C2 or (x % C1) != C2 if it is beneficial |
| into (x - C2) * C3 < C4. */ |
| if ((code == EQ_EXPR || code == NE_EXPR) |
| && TREE_CODE (op0) == SSA_NAME |
| && TREE_CODE (op1) == INTEGER_CST) |
| code = maybe_optimize_mod_cmp (code, &op0, &op1); |
| |
| /* Optimize (x - y) < 0 into x < y if x - y has undefined overflow. */ |
| if (!TYPE_UNSIGNED (TREE_TYPE (op0)) |
| && (code == LT_EXPR || code == LE_EXPR |
| || code == GT_EXPR || code == GE_EXPR) |
| && integer_zerop (op1) |
| && TREE_CODE (op0) == SSA_NAME) |
| maybe_optimize_sub_cmp_0 (code, &op0, &op1); |
| |
| last2 = last = get_last_insn (); |
| |
| extract_true_false_edges_from_block (bb, &true_edge, &false_edge); |
| set_curr_insn_location (gimple_location (stmt)); |
| |
| /* These flags have no purpose in RTL land. */ |
| true_edge->flags &= ~EDGE_TRUE_VALUE; |
| false_edge->flags &= ~EDGE_FALSE_VALUE; |
| |
| /* We can either have a pure conditional jump with one fallthru edge or |
| two-way jump that needs to be decomposed into two basic blocks. */ |
| if (false_edge->dest == bb->next_bb) |
| { |
| jumpif_1 (code, op0, op1, label_rtx_for_bb (true_edge->dest), |
| true_edge->probability); |
| maybe_dump_rtl_for_gimple_stmt (stmt, last); |
| if (true_edge->goto_locus != UNKNOWN_LOCATION) |
| set_curr_insn_location (true_edge->goto_locus); |
| false_edge->flags |= EDGE_FALLTHRU; |
| maybe_cleanup_end_of_block (false_edge, last); |
| return NULL; |
| } |
| if (true_edge->dest == bb->next_bb) |
| { |
| jumpifnot_1 (code, op0, op1, label_rtx_for_bb (false_edge->dest), |
| false_edge->probability); |
| maybe_dump_rtl_for_gimple_stmt (stmt, last); |
| if (false_edge->goto_locus != UNKNOWN_LOCATION) |
| set_curr_insn_location (false_edge->goto_locus); |
| true_edge->flags |= EDGE_FALLTHRU; |
| maybe_cleanup_end_of_block (true_edge, last); |
| return NULL; |
| } |
| |
| jumpif_1 (code, op0, op1, label_rtx_for_bb (true_edge->dest), |
| true_edge->probability); |
| last = get_last_insn (); |
| if (false_edge->goto_locus != UNKNOWN_LOCATION) |
| set_curr_insn_location (false_edge->goto_locus); |
| emit_jump (label_rtx_for_bb (false_edge->dest)); |
| |
| BB_END (bb) = last; |
| if (BARRIER_P (BB_END (bb))) |
| BB_END (bb) = PREV_INSN (BB_END (bb)); |
| update_bb_for_insn (bb); |
| |
| new_bb = create_basic_block (NEXT_INSN (last), get_last_insn (), bb); |
| dest = false_edge->dest; |
| redirect_edge_succ (false_edge, new_bb); |
| false_edge->flags |= EDGE_FALLTHRU; |
| new_bb->count = false_edge->count (); |
| loop_p loop = find_common_loop (bb->loop_father, dest->loop_father); |
| add_bb_to_loop (new_bb, loop); |
| if (loop->latch == bb |
| && loop->header == dest) |
| loop->latch = new_bb; |
| make_single_succ_edge (new_bb, dest, 0); |
| if (BARRIER_P (BB_END (new_bb))) |
| BB_END (new_bb) = PREV_INSN (BB_END (new_bb)); |
| update_bb_for_insn (new_bb); |
| |
| maybe_dump_rtl_for_gimple_stmt (stmt, last2); |
| |
| if (true_edge->goto_locus != UNKNOWN_LOCATION) |
| { |
| set_curr_insn_location (true_edge->goto_locus); |
| true_edge->goto_locus = curr_insn_location (); |
| } |
| |
| return new_bb; |
| } |
| |
| /* Mark all calls that can have a transaction restart. */ |
| |
| static void |
| mark_transaction_restart_calls (gimple *stmt) |
| { |
| struct tm_restart_node dummy; |
| tm_restart_node **slot; |
| |
| if (!cfun->gimple_df->tm_restart) |
| return; |
| |
| dummy.stmt = stmt; |
| slot = cfun->gimple_df->tm_restart->find_slot (&dummy, NO_INSERT); |
| if (slot) |
| { |
| struct tm_restart_node *n = *slot; |
| tree list = n->label_or_list; |
| rtx_insn *insn; |
| |
| for (insn = next_real_insn (get_last_insn ()); |
| !CALL_P (insn); |
| insn = next_real_insn (insn)) |
| continue; |
| |
| if (TREE_CODE (list) == LABEL_DECL) |
| add_reg_note (insn, REG_TM, label_rtx (list)); |
| else |
| for (; list ; list = TREE_CHAIN (list)) |
| add_reg_note (insn, REG_TM, label_rtx (TREE_VALUE (list))); |
| } |
| } |
| |
| /* A subroutine of expand_gimple_stmt_1, expanding one GIMPLE_CALL |
| statement STMT. */ |
| |
| static void |
| expand_call_stmt (gcall *stmt) |
| { |
| tree exp, decl, lhs; |
| bool builtin_p; |
| size_t i; |
| |
| if (gimple_call_internal_p (stmt)) |
| { |
| expand_internal_call (stmt); |
| return; |
| } |
| |
| /* If this is a call to a built-in function and it has no effect other |
| than setting the lhs, try to implement it using an internal function |
| instead. */ |
| decl = gimple_call_fndecl (stmt); |
| if (gimple_call_lhs (stmt) |
| && !gimple_has_side_effects (stmt) |
| && (optimize || (decl && called_as_built_in (decl)))) |
| { |
| internal_fn ifn = replacement_internal_fn (stmt); |
| if (ifn != IFN_LAST) |
| { |
| expand_internal_call (ifn, stmt); |
| return; |
| } |
| } |
| |
| exp = build_vl_exp (CALL_EXPR, gimple_call_num_args (stmt) + 3); |
| |
| CALL_EXPR_FN (exp) = gimple_call_fn (stmt); |
| builtin_p = decl && fndecl_built_in_p (decl); |
| |
| /* If this is not a builtin function, the function type through which the |
| call is made may be different from the type of the function. */ |
| if (!builtin_p) |
| CALL_EXPR_FN (exp) |
| = fold_convert (build_pointer_type (gimple_call_fntype (stmt)), |
| CALL_EXPR_FN (exp)); |
| |
| TREE_TYPE (exp) = gimple_call_return_type (stmt); |
| CALL_EXPR_STATIC_CHAIN (exp) = gimple_call_chain (stmt); |
| |
| for (i = 0; i < gimple_call_num_args (stmt); i++) |
| { |
| tree arg = gimple_call_arg (stmt, i); |
| gimple *def; |
| /* TER addresses into arguments of builtin functions so we have a |
| chance to infer more correct alignment information. See PR39954. */ |
| if (builtin_p |
| && TREE_CODE (arg) == SSA_NAME |
| && (def = get_gimple_for_ssa_name (arg)) |
| && gimple_assign_rhs_code (def) == ADDR_EXPR) |
| arg = gimple_assign_rhs1 (def); |
| CALL_EXPR_ARG (exp, i) = arg; |
| } |
| |
| if (gimple_has_side_effects (stmt) |
| /* ??? Downstream in expand_expr_real_1 we assume that expressions |
| w/o side-effects do not throw so work around this here. */ |
| || stmt_could_throw_p (cfun, stmt)) |
| TREE_SIDE_EFFECTS (exp) = 1; |
| |
| if (gimple_call_nothrow_p (stmt)) |
| TREE_NOTHROW (exp) = 1; |
| |
| CALL_EXPR_TAILCALL (exp) = gimple_call_tail_p (stmt); |
| CALL_EXPR_MUST_TAIL_CALL (exp) = gimple_call_must_tail_p (stmt); |
| CALL_EXPR_RETURN_SLOT_OPT (exp) = gimple_call_return_slot_opt_p (stmt); |
| if (decl |
| && fndecl_built_in_p (decl, BUILT_IN_NORMAL) |
| && ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (decl))) |
| CALL_ALLOCA_FOR_VAR_P (exp) = gimple_call_alloca_for_var_p (stmt); |
| else |
| CALL_FROM_THUNK_P (exp) = gimple_call_from_thunk_p (stmt); |
| CALL_EXPR_VA_ARG_PACK (exp) = gimple_call_va_arg_pack_p (stmt); |
| CALL_EXPR_BY_DESCRIPTOR (exp) = gimple_call_by_descriptor_p (stmt); |
| SET_EXPR_LOCATION (exp, gimple_location (stmt)); |
| |
| /* Must come after copying location. */ |
| copy_warning (exp, stmt); |
| |
| /* Ensure RTL is created for debug args. */ |
| if (decl && DECL_HAS_DEBUG_ARGS_P (decl)) |
| { |
| vec<tree, va_gc> **debug_args = decl_debug_args_lookup (decl); |
| unsigned int ix; |
| tree dtemp; |
| |
| if (debug_args) |
| for (ix = 1; (*debug_args)->iterate (ix, &dtemp); ix += 2) |
| { |
| gcc_assert (TREE_CODE (dtemp) == DEBUG_EXPR_DECL); |
| expand_debug_expr (dtemp); |
| } |
| } |
| |
| rtx_insn *before_call = get_last_insn (); |
| lhs = gimple_call_lhs (stmt); |
| if (lhs) |
| expand_assignment (lhs, exp, false); |
| else |
| expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL); |
| |
| /* If the gimple call is an indirect call and has 'nocf_check' |
| attribute find a generated CALL insn to mark it as no |
| control-flow verification is needed. */ |
| if (gimple_call_nocf_check_p (stmt) |
| && !gimple_call_fndecl (stmt)) |
| { |
| rtx_insn *last = get_last_insn (); |
| while (!CALL_P (last) |
| && last != before_call) |
| last = PREV_INSN (last); |
| |
| if (last != before_call) |
| add_reg_note (last, REG_CALL_NOCF_CHECK, const0_rtx); |
| } |
| |
| mark_transaction_restart_calls (stmt); |
| } |
| |
| |
| /* Generate RTL for an asm statement (explicit assembler code). |
| STRING is a STRING_CST node containing the assembler code text, |
| or an ADDR_EXPR containing a STRING_CST. VOL nonzero means the |
| insn is volatile; don't optimize it. */ |
| |
| static void |
| expand_asm_loc (tree string, int vol, location_t locus) |
| { |
| rtx body; |
| |
| body = gen_rtx_ASM_INPUT_loc (VOIDmode, |
| ggc_strdup (TREE_STRING_POINTER (string)), |
| locus); |
| |
| MEM_VOLATILE_P (body) = vol; |
| |
| /* Non-empty basic ASM implicitly clobbers memory. */ |
| if (TREE_STRING_LENGTH (string) != 0) |
| { |
| rtx asm_op, clob; |
| unsigned i, nclobbers; |
| auto_vec<rtx> input_rvec, output_rvec; |
| auto_vec<machine_mode> input_mode; |
| auto_vec<const char *> constraints; |
| auto_vec<rtx> clobber_rvec; |
| HARD_REG_SET clobbered_regs; |
| CLEAR_HARD_REG_SET (clobbered_regs); |
| |
| clob = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)); |
| clobber_rvec.safe_push (clob); |
| |
| if (targetm.md_asm_adjust) |
| targetm.md_asm_adjust (output_rvec, input_rvec, input_mode, |
| constraints, clobber_rvec, clobbered_regs, |
| locus); |
| |
| asm_op = body; |
| nclobbers = clobber_rvec.length (); |
| body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (1 + nclobbers)); |
| |
| XVECEXP (body, 0, 0) = asm_op; |
| for (i = 0; i < nclobbers; i++) |
| XVECEXP (body, 0, i + 1) = gen_rtx_CLOBBER (VOIDmode, clobber_rvec[i]); |
| } |
| |
| emit_insn (body); |
| } |
| |
| /* Return the number of times character C occurs in string S. */ |
| static int |
| n_occurrences (int c, const char *s) |
| { |
| int n = 0; |
| while (*s) |
| n += (*s++ == c); |
| return n; |
| } |
| |
| /* A subroutine of expand_asm_operands. Check that all operands have |
| the same number of alternatives. Return true if so. */ |
| |
| static bool |
| check_operand_nalternatives (const vec<const char *> &constraints) |
| { |
| unsigned len = constraints.length(); |
| if (len > 0) |
| { |
| int nalternatives = n_occurrences (',', constraints[0]); |
| |
| if (nalternatives + 1 > MAX_RECOG_ALTERNATIVES) |
| { |
| error ("too many alternatives in %<asm%>"); |
| return false; |
| } |
| |
| for (unsigned i = 1; i < len; ++i) |
| if (n_occurrences (',', constraints[i]) != nalternatives) |
| { |
| error ("operand constraints for %<asm%> differ " |
| "in number of alternatives"); |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| /* Check for overlap between registers marked in CLOBBERED_REGS and |
| anything inappropriate in T. Emit error and return the register |
| variable definition for error, NULL_TREE for ok. */ |
| |
| static bool |
| tree_conflicts_with_clobbers_p (tree t, HARD_REG_SET *clobbered_regs, |
| location_t loc) |
| { |
| /* Conflicts between asm-declared register variables and the clobber |
| list are not allowed. */ |
| tree overlap = tree_overlaps_hard_reg_set (t, clobbered_regs); |
| |
| if (overlap) |
| { |
| error_at (loc, "%<asm%> specifier for variable %qE conflicts with " |
| "%<asm%> clobber list", DECL_NAME (overlap)); |
| |
| /* Reset registerness to stop multiple errors emitted for a single |
| variable. */ |
| DECL_REGISTER (overlap) = 0; |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* Check that the given REGNO spanning NREGS is a valid |
| asm clobber operand. Some HW registers cannot be |
| saved/restored, hence they should not be clobbered by |
| asm statements. */ |
| static bool |
| asm_clobber_reg_is_valid (int regno, int nregs, const char *regname) |
| { |
| bool is_valid = true; |
| HARD_REG_SET regset; |
| |
| CLEAR_HARD_REG_SET (regset); |
| |
| add_range_to_hard_reg_set (®set, regno, nregs); |
| |
| /* Clobbering the PIC register is an error. */ |
| if (PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM |
| && overlaps_hard_reg_set_p (regset, Pmode, PIC_OFFSET_TABLE_REGNUM)) |
| { |
| /* ??? Diagnose during gimplification? */ |
| error ("PIC register clobbered by %qs in %<asm%>", regname); |
| is_valid = false; |
| } |
| else if (!in_hard_reg_set_p |
| (accessible_reg_set, reg_raw_mode[regno], regno)) |
| { |
| /* ??? Diagnose during gimplification? */ |
| error ("the register %qs cannot be clobbered in %<asm%>" |
| " for the current target", regname); |
| is_valid = false; |
| } |
| |
| /* Clobbering the stack pointer register is deprecated. GCC expects |
| the value of the stack pointer after an asm statement to be the same |
| as it was before, so no asm can validly clobber the stack pointer in |
| the usual sense. Adding the stack pointer to the clobber list has |
| traditionally had some undocumented and somewhat obscure side-effects. */ |
| if (overlaps_hard_reg_set_p (regset, Pmode, STACK_POINTER_REGNUM)) |
| { |
| crtl->sp_is_clobbered_by_asm = true; |
| if (warning (OPT_Wdeprecated, "listing the stack pointer register" |
| " %qs in a clobber list is deprecated", regname)) |
| inform (input_location, "the value of the stack pointer after" |
| " an %<asm%> statement must be the same as it was before" |
| " the statement"); |
| } |
| |
| return is_valid; |
| } |
| |
| /* Generate RTL for an asm statement with arguments. |
| STRING is the instruction template. |
| OUTPUTS is a list of output arguments (lvalues); INPUTS a list of inputs. |
| Each output or input has an expression in the TREE_VALUE and |
| a tree list in TREE_PURPOSE which in turn contains a constraint |
| name in TREE_VALUE (or NULL_TREE) and a constraint string |
| in TREE_PURPOSE. |
| CLOBBERS is a list of STRING_CST nodes each naming a hard register |
| that is clobbered by this insn. |
| |
| LABELS is a list of labels, and if LABELS is non-NULL, FALLTHRU_BB |
| should be the fallthru basic block of the asm goto. |
| |
| Not all kinds of lvalue that may appear in OUTPUTS can be stored directly. |
| Some elements of OUTPUTS may be replaced with trees representing temporary |
| values. The caller should copy those temporary values to the originally |
| specified lvalues. |
| |
| VOL nonzero means the insn is volatile; don't optimize it. */ |
| |
| static void |
| expand_asm_stmt (gasm *stmt) |
| { |
| class save_input_location |
| { |
| location_t old; |
| |
| public: |
| explicit save_input_location(location_t where) |
| { |
| old = input_location; |
| input_location = where; |
| } |
| |
| ~save_input_location() |
| { |
| input_location = old; |
| } |
| }; |
| |
| location_t locus = gimple_location (stmt); |
| |
| if (gimple_asm_input_p (stmt)) |
| { |
| const char *s = gimple_asm_string (stmt); |
| tree string = build_string (strlen (s), s); |
| expand_asm_loc (string, gimple_asm_volatile_p (stmt), locus); |
| return; |
| } |
| |
| /* There are some legacy diagnostics in here. */ |
| save_input_location s_i_l(locus); |
| |
| unsigned noutputs = gimple_asm_noutputs (stmt); |
| unsigned ninputs = gimple_asm_ninputs (stmt); |
| unsigned nlabels = gimple_asm_nlabels (stmt); |
| unsigned i; |
| bool error_seen = false; |
| |
| /* ??? Diagnose during gimplification? */ |
| if (ninputs + noutputs + nlabels > MAX_RECOG_OPERANDS) |
| { |
| error_at (locus, "more than %d operands in %<asm%>", MAX_RECOG_OPERANDS); |
| return; |
| } |
| |
| auto_vec<tree, MAX_RECOG_OPERANDS> output_tvec; |
| auto_vec<tree, MAX_RECOG_OPERANDS> input_tvec; |
| auto_vec<const char *, MAX_RECOG_OPERANDS> constraints; |
| |
| /* Copy the gimple vectors into new vectors that we can manipulate. */ |
| |
| output_tvec.safe_grow (noutputs, true); |
| input_tvec.safe_grow (ninputs, true); |
| constraints.safe_grow (noutputs + ninputs, true); |
| |
| for (i = 0; i < noutputs; ++i) |
| { |
| tree t = gimple_asm_output_op (stmt, i); |
| output_tvec[i] = TREE_VALUE (t); |
| constraints[i] = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); |
| } |
| for (i = 0; i < ninputs; i++) |
| { |
| tree t = gimple_asm_input_op (stmt, i); |
| input_tvec[i] = TREE_VALUE (t); |
| constraints[i + noutputs] |
| = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); |
| } |
| |
| /* ??? Diagnose during gimplification? */ |
| if (! check_operand_nalternatives (constraints)) |
| return; |
| |
| /* Count the number of meaningful clobbered registers, ignoring what |
| we would ignore later. */ |
| auto_vec<rtx> clobber_rvec; |
| HARD_REG_SET clobbered_regs; |
| CLEAR_HARD_REG_SET (clobbered_regs); |
| |
| if (unsigned n = gimple_asm_nclobbers (stmt)) |
| { |
| clobber_rvec.reserve (n); |
| for (i = 0; i < n; i++) |
| { |
| tree t = gimple_asm_clobber_op (stmt, i); |
| const char *regname = TREE_STRING_POINTER (TREE_VALUE (t)); |
| int nregs, j; |
| |
| j = decode_reg_name_and_count (regname, &nregs); |
| if (j < 0) |
| { |
| if (j == -2) |
| { |
| /* ??? Diagnose during gimplification? */ |
| error_at (locus, "unknown register name %qs in %<asm%>", |
| regname); |
| error_seen = true; |
| } |
| else if (j == -4) |
| { |
| rtx x = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)); |
| clobber_rvec.safe_push (x); |
| } |
| else |
| { |
| /* Otherwise we should have -1 == empty string |
| or -3 == cc, which is not a register. */ |
| gcc_assert (j == -1 || j == -3); |
| } |
| } |
| else |
| for (int reg = j; reg < j + nregs; reg++) |
| { |
| if (!asm_clobber_reg_is_valid (reg, nregs, regname)) |
| return; |
| |
| SET_HARD_REG_BIT (clobbered_regs, reg); |
| rtx x = gen_rtx_REG (reg_raw_mode[reg], reg); |
| clobber_rvec.safe_push (x); |
| } |
| } |
| } |
| |
| /* First pass over inputs and outputs checks validity and sets |
| mark_addressable if needed. */ |
| /* ??? Diagnose during gimplification? */ |
| |
| for (i = 0; i < noutputs; ++i) |
| { |
| tree val = output_tvec[i]; |
| tree type = TREE_TYPE (val); |
| const char *constraint; |
| bool is_inout; |
| bool allows_reg; |
| bool allows_mem; |
| |
| /* Try to parse the output constraint. If that fails, there's |
| no point in going further. */ |
| constraint = constraints[i]; |
| if (!parse_output_constraint (&constraint, i, ninputs, noutputs, |
| &allows_mem, &allows_reg, &is_inout)) |
| return; |
| |
| /* If the output is a hard register, verify it doesn't conflict with |
| any other operand's possible hard register use. */ |
| if (DECL_P (val) |
| && REG_P (DECL_RTL (val)) |
| && HARD_REGISTER_P (DECL_RTL (val))) |
| { |
| unsigned j, output_hregno = REGNO (DECL_RTL (val)); |
| bool early_clobber_p = strchr (constraints[i], '&') != NULL; |
| unsigned long match; |
| |
| /* Verify the other outputs do not use the same hard register. */ |
| for (j = i + 1; j < noutputs; ++j) |
| if (DECL_P (output_tvec[j]) |
| && REG_P (DECL_RTL (output_tvec[j])) |
| && HARD_REGISTER_P (DECL_RTL (output_tvec[j])) |
| && output_hregno == REGNO (DECL_RTL (output_tvec[j]))) |
| { |
|