| /* Lower _BitInt(N) operations to scalar operations. |
| Copyright (C) 2023 Free Software Foundation, Inc. |
| Contributed by Jakub Jelinek <jakub@redhat.com>. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it |
| under the terms of the GNU General Public License as published by the |
| Free Software Foundation; either version 3, or (at your option) any |
| later version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT |
| ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "backend.h" |
| #include "rtl.h" |
| #include "tree.h" |
| #include "gimple.h" |
| #include "cfghooks.h" |
| #include "tree-pass.h" |
| #include "ssa.h" |
| #include "fold-const.h" |
| #include "gimplify.h" |
| #include "gimple-iterator.h" |
| #include "tree-cfg.h" |
| #include "tree-dfa.h" |
| #include "cfgloop.h" |
| #include "cfganal.h" |
| #include "target.h" |
| #include "tree-ssa-live.h" |
| #include "tree-ssa-coalesce.h" |
| #include "domwalk.h" |
| #include "memmodel.h" |
| #include "optabs.h" |
| #include "varasm.h" |
| #include "gimple-range.h" |
| #include "value-range.h" |
| #include "langhooks.h" |
| #include "gimplify-me.h" |
| #include "diagnostic-core.h" |
| #include "tree-eh.h" |
| #include "tree-pretty-print.h" |
| #include "alloc-pool.h" |
| #include "tree-into-ssa.h" |
| #include "tree-cfgcleanup.h" |
| #include "tree-switch-conversion.h" |
| #include "ubsan.h" |
| #include "gimple-lower-bitint.h" |
| |
| /* Split BITINT_TYPE precisions in 4 categories. Small _BitInt, where |
| target hook says it is a single limb, middle _BitInt which per ABI |
| does not, but there is some INTEGER_TYPE in which arithmetics can be |
| performed (operations on such _BitInt are lowered to casts to that |
| arithmetic type and cast back; e.g. on x86_64 limb is DImode, but |
| target supports TImode, so _BitInt(65) to _BitInt(128) are middle |
| ones), large _BitInt which should by straight line code and |
| finally huge _BitInt which should be handled by loops over the limbs. */ |
| |
| enum bitint_prec_kind { |
| bitint_prec_small, |
| bitint_prec_middle, |
| bitint_prec_large, |
| bitint_prec_huge |
| }; |
| |
| /* Caches to speed up bitint_precision_kind. */ |
| |
| static int small_max_prec, mid_min_prec, large_min_prec, huge_min_prec; |
| static int limb_prec; |
| |
| /* Categorize _BitInt(PREC) as small, middle, large or huge. */ |
| |
| static bitint_prec_kind |
| bitint_precision_kind (int prec) |
| { |
| if (prec <= small_max_prec) |
| return bitint_prec_small; |
| if (huge_min_prec && prec >= huge_min_prec) |
| return bitint_prec_huge; |
| if (large_min_prec && prec >= large_min_prec) |
| return bitint_prec_large; |
| if (mid_min_prec && prec >= mid_min_prec) |
| return bitint_prec_middle; |
| |
| struct bitint_info info; |
| bool ok = targetm.c.bitint_type_info (prec, &info); |
| gcc_assert (ok); |
| scalar_int_mode limb_mode = as_a <scalar_int_mode> (info.limb_mode); |
| if (prec <= GET_MODE_PRECISION (limb_mode)) |
| { |
| small_max_prec = prec; |
| return bitint_prec_small; |
| } |
| if (!large_min_prec |
| && GET_MODE_PRECISION (limb_mode) < MAX_FIXED_MODE_SIZE) |
| large_min_prec = MAX_FIXED_MODE_SIZE + 1; |
| if (!limb_prec) |
| limb_prec = GET_MODE_PRECISION (limb_mode); |
| if (!huge_min_prec) |
| { |
| if (4 * limb_prec >= MAX_FIXED_MODE_SIZE) |
| huge_min_prec = 4 * limb_prec; |
| else |
| huge_min_prec = MAX_FIXED_MODE_SIZE + 1; |
| } |
| if (prec <= MAX_FIXED_MODE_SIZE) |
| { |
| if (!mid_min_prec || prec < mid_min_prec) |
| mid_min_prec = prec; |
| return bitint_prec_middle; |
| } |
| if (large_min_prec && prec <= large_min_prec) |
| return bitint_prec_large; |
| return bitint_prec_huge; |
| } |
| |
| /* Same for a TYPE. */ |
| |
| static bitint_prec_kind |
| bitint_precision_kind (tree type) |
| { |
| return bitint_precision_kind (TYPE_PRECISION (type)); |
| } |
| |
| /* Return minimum precision needed to describe INTEGER_CST |
| CST. All bits above that precision up to precision of |
| TREE_TYPE (CST) are cleared if EXT is set to 0, or set |
| if EXT is set to -1. */ |
| |
| static unsigned |
| bitint_min_cst_precision (tree cst, int &ext) |
| { |
| ext = tree_int_cst_sgn (cst) < 0 ? -1 : 0; |
| wide_int w = wi::to_wide (cst); |
| unsigned min_prec = wi::min_precision (w, TYPE_SIGN (TREE_TYPE (cst))); |
| /* For signed values, we don't need to count the sign bit, |
| we'll use constant 0 or -1 for the upper bits. */ |
| if (!TYPE_UNSIGNED (TREE_TYPE (cst))) |
| --min_prec; |
| else |
| { |
| /* For unsigned values, also try signed min_precision |
| in case the constant has lots of most significant bits set. */ |
| unsigned min_prec2 = wi::min_precision (w, SIGNED) - 1; |
| if (min_prec2 < min_prec) |
| { |
| ext = -1; |
| return min_prec2; |
| } |
| } |
| return min_prec; |
| } |
| |
| namespace { |
| |
| /* If OP is middle _BitInt, cast it to corresponding INTEGER_TYPE |
| cached in TYPE and return it. */ |
| |
| tree |
| maybe_cast_middle_bitint (gimple_stmt_iterator *gsi, tree op, tree &type) |
| { |
| if (op == NULL_TREE |
| || TREE_CODE (TREE_TYPE (op)) != BITINT_TYPE |
| || bitint_precision_kind (TREE_TYPE (op)) != bitint_prec_middle) |
| return op; |
| |
| int prec = TYPE_PRECISION (TREE_TYPE (op)); |
| int uns = TYPE_UNSIGNED (TREE_TYPE (op)); |
| if (type == NULL_TREE |
| || TYPE_PRECISION (type) != prec |
| || TYPE_UNSIGNED (type) != uns) |
| type = build_nonstandard_integer_type (prec, uns); |
| |
| if (TREE_CODE (op) != SSA_NAME) |
| { |
| tree nop = fold_convert (type, op); |
| if (is_gimple_val (nop)) |
| return nop; |
| } |
| |
| tree nop = make_ssa_name (type); |
| gimple *g = gimple_build_assign (nop, NOP_EXPR, op); |
| gsi_insert_before (gsi, g, GSI_SAME_STMT); |
| return nop; |
| } |
| |
| /* Return true if STMT can be handled in a loop from least to most |
| significant limb together with its dependencies. */ |
| |
| bool |
| mergeable_op (gimple *stmt) |
| { |
| if (!is_gimple_assign (stmt)) |
| return false; |
| switch (gimple_assign_rhs_code (stmt)) |
| { |
| case PLUS_EXPR: |
| case MINUS_EXPR: |
| case NEGATE_EXPR: |
| case BIT_AND_EXPR: |
| case BIT_IOR_EXPR: |
| case BIT_XOR_EXPR: |
| case BIT_NOT_EXPR: |
| case SSA_NAME: |
| case INTEGER_CST: |
| return true; |
| case LSHIFT_EXPR: |
| { |
| tree cnt = gimple_assign_rhs2 (stmt); |
| if (tree_fits_uhwi_p (cnt) |
| && tree_to_uhwi (cnt) < (unsigned HOST_WIDE_INT) limb_prec) |
| return true; |
| } |
| break; |
| CASE_CONVERT: |
| case VIEW_CONVERT_EXPR: |
| { |
| tree lhs_type = TREE_TYPE (gimple_assign_lhs (stmt)); |
| tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); |
| if (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME |
| && TREE_CODE (lhs_type) == BITINT_TYPE |
| && TREE_CODE (rhs_type) == BITINT_TYPE |
| && bitint_precision_kind (lhs_type) >= bitint_prec_large |
| && bitint_precision_kind (rhs_type) >= bitint_prec_large |
| && tree_int_cst_equal (TYPE_SIZE (lhs_type), TYPE_SIZE (rhs_type))) |
| { |
| if (TYPE_PRECISION (rhs_type) >= TYPE_PRECISION (lhs_type)) |
| return true; |
| if ((unsigned) TYPE_PRECISION (lhs_type) % (2 * limb_prec) != 0) |
| return true; |
| if (bitint_precision_kind (lhs_type) == bitint_prec_large) |
| return true; |
| } |
| break; |
| } |
| default: |
| break; |
| } |
| return false; |
| } |
| |
| /* Return non-zero if stmt is .{ADD,SUB,MUL}_OVERFLOW call with |
| _Complex large/huge _BitInt lhs which has at most two immediate uses, |
| at most one use in REALPART_EXPR stmt in the same bb and exactly one |
| IMAGPART_EXPR use in the same bb with a single use which casts it to |
| non-BITINT_TYPE integral type. If there is a REALPART_EXPR use, |
| return 2. Such cases (most common uses of those builtins) can be |
| optimized by marking their lhs and lhs of IMAGPART_EXPR and maybe lhs |
| of REALPART_EXPR as not needed to be backed up by a stack variable. |
| For .UBSAN_CHECK_{ADD,SUB,MUL} return 3. */ |
| |
| int |
| optimizable_arith_overflow (gimple *stmt) |
| { |
| bool is_ubsan = false; |
| if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt)) |
| return false; |
| switch (gimple_call_internal_fn (stmt)) |
| { |
| case IFN_ADD_OVERFLOW: |
| case IFN_SUB_OVERFLOW: |
| case IFN_MUL_OVERFLOW: |
| break; |
| case IFN_UBSAN_CHECK_ADD: |
| case IFN_UBSAN_CHECK_SUB: |
| case IFN_UBSAN_CHECK_MUL: |
| is_ubsan = true; |
| break; |
| default: |
| return 0; |
| } |
| tree lhs = gimple_call_lhs (stmt); |
| if (!lhs) |
| return 0; |
| if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)) |
| return 0; |
| tree type = is_ubsan ? TREE_TYPE (lhs) : TREE_TYPE (TREE_TYPE (lhs)); |
| if (TREE_CODE (type) != BITINT_TYPE |
| || bitint_precision_kind (type) < bitint_prec_large) |
| return 0; |
| |
| if (is_ubsan) |
| { |
| use_operand_p use_p; |
| gimple *use_stmt; |
| if (!single_imm_use (lhs, &use_p, &use_stmt) |
| || gimple_bb (use_stmt) != gimple_bb (stmt) |
| || !gimple_store_p (use_stmt) |
| || !is_gimple_assign (use_stmt) |
| || gimple_has_volatile_ops (use_stmt) |
| || stmt_ends_bb_p (use_stmt)) |
| return 0; |
| return 3; |
| } |
| |
| imm_use_iterator ui; |
| use_operand_p use_p; |
| int seen = 0; |
| FOR_EACH_IMM_USE_FAST (use_p, ui, lhs) |
| { |
| gimple *g = USE_STMT (use_p); |
| if (is_gimple_debug (g)) |
| continue; |
| if (!is_gimple_assign (g) || gimple_bb (g) != gimple_bb (stmt)) |
| return 0; |
| if (gimple_assign_rhs_code (g) == REALPART_EXPR) |
| { |
| if ((seen & 1) != 0) |
| return 0; |
| seen |= 1; |
| } |
| else if (gimple_assign_rhs_code (g) == IMAGPART_EXPR) |
| { |
| if ((seen & 2) != 0) |
| return 0; |
| seen |= 2; |
| |
| use_operand_p use2_p; |
| gimple *use_stmt; |
| tree lhs2 = gimple_assign_lhs (g); |
| if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs2)) |
| return 0; |
| if (!single_imm_use (lhs2, &use2_p, &use_stmt) |
| || gimple_bb (use_stmt) != gimple_bb (stmt) |
| || !gimple_assign_cast_p (use_stmt)) |
| return 0; |
| |
| lhs2 = gimple_assign_lhs (use_stmt); |
| if (!INTEGRAL_TYPE_P (TREE_TYPE (lhs2)) |
| || TREE_CODE (TREE_TYPE (lhs2)) == BITINT_TYPE) |
| return 0; |
| } |
| else |
| return 0; |
| } |
| if ((seen & 2) == 0) |
| return 0; |
| return seen == 3 ? 2 : 1; |
| } |
| |
| /* If STMT is some kind of comparison (GIMPLE_COND, comparison assignment) |
| comparing large/huge _BitInt types, return the comparison code and if |
| non-NULL fill in the comparison operands to *POP1 and *POP2. */ |
| |
| tree_code |
| comparison_op (gimple *stmt, tree *pop1, tree *pop2) |
| { |
| tree op1 = NULL_TREE, op2 = NULL_TREE; |
| tree_code code = ERROR_MARK; |
| if (gimple_code (stmt) == GIMPLE_COND) |
| { |
| code = gimple_cond_code (stmt); |
| op1 = gimple_cond_lhs (stmt); |
| op2 = gimple_cond_rhs (stmt); |
| } |
| else if (is_gimple_assign (stmt)) |
| { |
| code = gimple_assign_rhs_code (stmt); |
| op1 = gimple_assign_rhs1 (stmt); |
| if (TREE_CODE_CLASS (code) == tcc_comparison |
| || TREE_CODE_CLASS (code) == tcc_binary) |
| op2 = gimple_assign_rhs2 (stmt); |
| } |
| if (TREE_CODE_CLASS (code) != tcc_comparison) |
| return ERROR_MARK; |
| tree type = TREE_TYPE (op1); |
| if (TREE_CODE (type) != BITINT_TYPE |
| || bitint_precision_kind (type) < bitint_prec_large) |
| return ERROR_MARK; |
| if (pop1) |
| { |
| *pop1 = op1; |
| *pop2 = op2; |
| } |
| return code; |
| } |
| |
| /* Class used during large/huge _BitInt lowering containing all the |
| state for the methods. */ |
| |
| struct bitint_large_huge |
| { |
| bitint_large_huge () |
| : m_names (NULL), m_loads (NULL), m_preserved (NULL), |
| m_single_use_names (NULL), m_map (NULL), m_vars (NULL), |
| m_limb_type (NULL_TREE), m_data (vNULL) {} |
| |
| ~bitint_large_huge (); |
| |
| void insert_before (gimple *); |
| tree limb_access_type (tree, tree); |
| tree limb_access (tree, tree, tree, bool); |
| void if_then (gimple *, profile_probability, edge &, edge &); |
| void if_then_else (gimple *, profile_probability, edge &, edge &); |
| void if_then_if_then_else (gimple *g, gimple *, |
| profile_probability, profile_probability, |
| edge &, edge &, edge &); |
| tree handle_operand (tree, tree); |
| tree prepare_data_in_out (tree, tree, tree *); |
| tree add_cast (tree, tree); |
| tree handle_plus_minus (tree_code, tree, tree, tree); |
| tree handle_lshift (tree, tree, tree); |
| tree handle_cast (tree, tree, tree); |
| tree handle_load (gimple *, tree); |
| tree handle_stmt (gimple *, tree); |
| tree handle_operand_addr (tree, gimple *, int *, int *); |
| tree create_loop (tree, tree *); |
| tree lower_mergeable_stmt (gimple *, tree_code &, tree, tree); |
| tree lower_comparison_stmt (gimple *, tree_code &, tree, tree); |
| void lower_shift_stmt (tree, gimple *); |
| void lower_muldiv_stmt (tree, gimple *); |
| void lower_float_conv_stmt (tree, gimple *); |
| tree arith_overflow_extract_bits (unsigned int, unsigned int, tree, |
| unsigned int, bool); |
| void finish_arith_overflow (tree, tree, tree, tree, tree, tree, gimple *, |
| tree_code); |
| void lower_addsub_overflow (tree, gimple *); |
| void lower_mul_overflow (tree, gimple *); |
| void lower_cplxpart_stmt (tree, gimple *); |
| void lower_complexexpr_stmt (gimple *); |
| void lower_bit_query (gimple *); |
| void lower_call (tree, gimple *); |
| void lower_asm (gimple *); |
| void lower_stmt (gimple *); |
| |
| /* Bitmap of large/huge _BitInt SSA_NAMEs except those can be |
| merged with their uses. */ |
| bitmap m_names; |
| /* Subset of those for lhs of load statements. These will be |
| cleared in m_names if the loads will be mergeable with all |
| their uses. */ |
| bitmap m_loads; |
| /* Bitmap of large/huge _BitInt SSA_NAMEs that should survive |
| to later passes (arguments or return values of calls). */ |
| bitmap m_preserved; |
| /* Subset of m_names which have a single use. As the lowering |
| can replace various original statements with their lowered |
| form even before it is done iterating over all basic blocks, |
| testing has_single_use for the purpose of emitting clobbers |
| doesn't work properly. */ |
| bitmap m_single_use_names; |
| /* Used for coalescing/partitioning of large/huge _BitInt SSA_NAMEs |
| set in m_names. */ |
| var_map m_map; |
| /* Mapping of the partitions to corresponding decls. */ |
| tree *m_vars; |
| /* Unsigned integer type with limb precision. */ |
| tree m_limb_type; |
| /* Its TYPE_SIZE_UNIT. */ |
| unsigned HOST_WIDE_INT m_limb_size; |
| /* Location of a gimple stmt which is being currently lowered. */ |
| location_t m_loc; |
| /* Current stmt iterator where code is being lowered currently. */ |
| gimple_stmt_iterator m_gsi; |
| /* Statement after which any clobbers should be added if non-NULL. */ |
| gimple *m_after_stmt; |
| /* Set when creating loops to the loop header bb and its preheader. */ |
| basic_block m_bb, m_preheader_bb; |
| /* Stmt iterator after which initialization statements should be emitted. */ |
| gimple_stmt_iterator m_init_gsi; |
| /* Decl into which a mergeable statement stores result. */ |
| tree m_lhs; |
| /* handle_operand/handle_stmt can be invoked in various ways. |
| |
| lower_mergeable_stmt for large _BitInt calls those with constant |
| idx only, expanding to straight line code, for huge _BitInt |
| emits a loop from least significant limb upwards, where each loop |
| iteration handles 2 limbs, plus there can be up to one full limb |
| and one partial limb processed after the loop, where handle_operand |
| and/or handle_stmt are called with constant idx. m_upwards_2limb |
| is set for this case, false otherwise. m_upwards is true if it |
| is either large or huge _BitInt handled by lower_mergeable_stmt, |
| i.e. indexes always increase. |
| |
| Another way is used by lower_comparison_stmt, which walks limbs |
| from most significant to least significant, partial limb if any |
| processed first with constant idx and then loop processing a single |
| limb per iteration with non-constant idx. |
| |
| Another way is used in lower_shift_stmt, where for LSHIFT_EXPR |
| destination limbs are processed from most significant to least |
| significant or for RSHIFT_EXPR the other way around, in loops or |
| straight line code, but idx usually is non-constant (so from |
| handle_operand/handle_stmt POV random access). The LSHIFT_EXPR |
| handling there can access even partial limbs using non-constant |
| idx (then m_var_msb should be true, for all the other cases |
| including lower_mergeable_stmt/lower_comparison_stmt that is |
| not the case and so m_var_msb should be false. |
| |
| m_first should be set the first time handle_operand/handle_stmt |
| is called and clear when it is called for some other limb with |
| the same argument. If the lowering of an operand (e.g. INTEGER_CST) |
| or statement (e.g. +/-/<< with < limb_prec constant) needs some |
| state between the different calls, when m_first is true it should |
| push some trees to m_data vector and also make sure m_data_cnt is |
| incremented by how many trees were pushed, and when m_first is |
| false, it can use the m_data[m_data_cnt] etc. data or update them, |
| just needs to bump m_data_cnt by the same amount as when it was |
| called with m_first set. The toplevel calls to |
| handle_operand/handle_stmt should set m_data_cnt to 0 and truncate |
| m_data vector when setting m_first to true. |
| |
| m_cast_conditional and m_bitfld_load are used when handling a |
| bit-field load inside of a widening cast. handle_cast sometimes |
| needs to do runtime comparisons and handle_operand only conditionally |
| or even in two separate conditional blocks for one idx (once with |
| constant index after comparing the runtime one for equality with the |
| constant). In these cases, m_cast_conditional is set to true and |
| the bit-field load then communicates its m_data_cnt to handle_cast |
| using m_bitfld_load. */ |
| bool m_first; |
| bool m_var_msb; |
| unsigned m_upwards_2limb; |
| bool m_upwards; |
| bool m_cast_conditional; |
| unsigned m_bitfld_load; |
| vec<tree> m_data; |
| unsigned int m_data_cnt; |
| }; |
| |
| bitint_large_huge::~bitint_large_huge () |
| { |
| BITMAP_FREE (m_names); |
| BITMAP_FREE (m_loads); |
| BITMAP_FREE (m_preserved); |
| BITMAP_FREE (m_single_use_names); |
| if (m_map) |
| delete_var_map (m_map); |
| XDELETEVEC (m_vars); |
| m_data.release (); |
| } |
| |
| /* Insert gimple statement G before current location |
| and set its gimple_location. */ |
| |
| void |
| bitint_large_huge::insert_before (gimple *g) |
| { |
| gimple_set_location (g, m_loc); |
| gsi_insert_before (&m_gsi, g, GSI_SAME_STMT); |
| } |
| |
| /* Return type for accessing limb IDX of BITINT_TYPE TYPE. |
| This is normally m_limb_type, except for a partial most |
| significant limb if any. */ |
| |
| tree |
| bitint_large_huge::limb_access_type (tree type, tree idx) |
| { |
| if (type == NULL_TREE) |
| return m_limb_type; |
| unsigned HOST_WIDE_INT i = tree_to_uhwi (idx); |
| unsigned int prec = TYPE_PRECISION (type); |
| gcc_assert (i * limb_prec < prec); |
| if ((i + 1) * limb_prec <= prec) |
| return m_limb_type; |
| else |
| return build_nonstandard_integer_type (prec % limb_prec, |
| TYPE_UNSIGNED (type)); |
| } |
| |
| /* Return a tree how to access limb IDX of VAR corresponding to BITINT_TYPE |
| TYPE. If WRITE_P is true, it will be a store, otherwise a read. */ |
| |
| tree |
| bitint_large_huge::limb_access (tree type, tree var, tree idx, bool write_p) |
| { |
| tree atype = (tree_fits_uhwi_p (idx) |
| ? limb_access_type (type, idx) : m_limb_type); |
| tree ret; |
| if (DECL_P (var) && tree_fits_uhwi_p (idx)) |
| { |
| tree ptype = build_pointer_type (strip_array_types (TREE_TYPE (var))); |
| unsigned HOST_WIDE_INT off = tree_to_uhwi (idx) * m_limb_size; |
| ret = build2 (MEM_REF, m_limb_type, |
| build_fold_addr_expr (var), |
| build_int_cst (ptype, off)); |
| TREE_THIS_VOLATILE (ret) = TREE_THIS_VOLATILE (var); |
| TREE_SIDE_EFFECTS (ret) = TREE_SIDE_EFFECTS (var); |
| } |
| else if (TREE_CODE (var) == MEM_REF && tree_fits_uhwi_p (idx)) |
| { |
| ret |
| = build2 (MEM_REF, m_limb_type, TREE_OPERAND (var, 0), |
| size_binop (PLUS_EXPR, TREE_OPERAND (var, 1), |
| build_int_cst (TREE_TYPE (TREE_OPERAND (var, 1)), |
| tree_to_uhwi (idx) |
| * m_limb_size))); |
| TREE_THIS_VOLATILE (ret) = TREE_THIS_VOLATILE (var); |
| TREE_SIDE_EFFECTS (ret) = TREE_SIDE_EFFECTS (var); |
| TREE_THIS_NOTRAP (ret) = TREE_THIS_NOTRAP (var); |
| } |
| else |
| { |
| var = unshare_expr (var); |
| if (TREE_CODE (TREE_TYPE (var)) != ARRAY_TYPE |
| || !useless_type_conversion_p (m_limb_type, |
| TREE_TYPE (TREE_TYPE (var)))) |
| { |
| unsigned HOST_WIDE_INT nelts |
| = CEIL (tree_to_uhwi (TYPE_SIZE (type)), limb_prec); |
| tree atype = build_array_type_nelts (m_limb_type, nelts); |
| var = build1 (VIEW_CONVERT_EXPR, atype, var); |
| } |
| ret = build4 (ARRAY_REF, m_limb_type, var, idx, NULL_TREE, NULL_TREE); |
| } |
| if (!write_p && !useless_type_conversion_p (atype, m_limb_type)) |
| { |
| gimple *g = gimple_build_assign (make_ssa_name (m_limb_type), ret); |
| insert_before (g); |
| ret = gimple_assign_lhs (g); |
| ret = build1 (NOP_EXPR, atype, ret); |
| } |
| return ret; |
| } |
| |
| /* Emit a half diamond, |
| if (COND) |
| |\ |
| | \ |
| | \ |
| | new_bb1 |
| | / |
| | / |
| |/ |
| or if (COND) new_bb1; |
| PROB is the probability that the condition is true. |
| Updates m_gsi to start of new_bb1. |
| Sets EDGE_TRUE to edge from new_bb1 to successor and |
| EDGE_FALSE to the EDGE_FALSE_VALUE edge from if (COND) bb. */ |
| |
| void |
| bitint_large_huge::if_then (gimple *cond, profile_probability prob, |
| edge &edge_true, edge &edge_false) |
| { |
| insert_before (cond); |
| edge e1 = split_block (gsi_bb (m_gsi), cond); |
| edge e2 = split_block (e1->dest, (gimple *) NULL); |
| edge e3 = make_edge (e1->src, e2->dest, EDGE_FALSE_VALUE); |
| e1->flags = EDGE_TRUE_VALUE; |
| e1->probability = prob; |
| e3->probability = prob.invert (); |
| set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src); |
| edge_true = e2; |
| edge_false = e3; |
| m_gsi = gsi_after_labels (e1->dest); |
| } |
| |
| /* Emit a full diamond, |
| if (COND) |
| /\ |
| / \ |
| / \ |
| new_bb1 new_bb2 |
| \ / |
| \ / |
| \/ |
| or if (COND) new_bb2; else new_bb1; |
| PROB is the probability that the condition is true. |
| Updates m_gsi to start of new_bb2. |
| Sets EDGE_TRUE to edge from new_bb1 to successor and |
| EDGE_FALSE to the EDGE_FALSE_VALUE edge from if (COND) bb. */ |
| |
| void |
| bitint_large_huge::if_then_else (gimple *cond, profile_probability prob, |
| edge &edge_true, edge &edge_false) |
| { |
| insert_before (cond); |
| edge e1 = split_block (gsi_bb (m_gsi), cond); |
| edge e2 = split_block (e1->dest, (gimple *) NULL); |
| basic_block bb = create_empty_bb (e1->dest); |
| add_bb_to_loop (bb, e1->dest->loop_father); |
| edge e3 = make_edge (e1->src, bb, EDGE_TRUE_VALUE); |
| e1->flags = EDGE_FALSE_VALUE; |
| e3->probability = prob; |
| e1->probability = prob.invert (); |
| bb->count = e1->src->count.apply_probability (prob); |
| set_immediate_dominator (CDI_DOMINATORS, bb, e1->src); |
| set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src); |
| edge_true = make_single_succ_edge (bb, e2->dest, EDGE_FALLTHRU); |
| edge_false = e2; |
| m_gsi = gsi_after_labels (bb); |
| } |
| |
| /* Emit a half diamond with full diamond in it |
| if (COND1) |
| |\ |
| | \ |
| | \ |
| | if (COND2) |
| | / \ |
| | / \ |
| |new_bb1 new_bb2 |
| | | / |
| \ | / |
| \ | / |
| \ | / |
| \|/ |
| or if (COND1) { if (COND2) new_bb2; else new_bb1; } |
| PROB1 is the probability that the condition 1 is true. |
| PROB2 is the probability that the condition 2 is true. |
| Updates m_gsi to start of new_bb1. |
| Sets EDGE_TRUE_TRUE to edge from new_bb2 to successor, |
| EDGE_TRUE_FALSE to edge from new_bb1 to successor and |
| EDGE_FALSE to the EDGE_FALSE_VALUE edge from if (COND1) bb. |
| If COND2 is NULL, this is equivalent to |
| if_then (COND1, PROB1, EDGE_TRUE_FALSE, EDGE_FALSE); |
| EDGE_TRUE_TRUE = NULL; */ |
| |
| void |
| bitint_large_huge::if_then_if_then_else (gimple *cond1, gimple *cond2, |
| profile_probability prob1, |
| profile_probability prob2, |
| edge &edge_true_true, |
| edge &edge_true_false, |
| edge &edge_false) |
| { |
| edge e2, e3, e4 = NULL; |
| if_then (cond1, prob1, e2, e3); |
| if (cond2 == NULL) |
| { |
| edge_true_true = NULL; |
| edge_true_false = e2; |
| edge_false = e3; |
| return; |
| } |
| insert_before (cond2); |
| e2 = split_block (gsi_bb (m_gsi), cond2); |
| basic_block bb = create_empty_bb (e2->dest); |
| add_bb_to_loop (bb, e2->dest->loop_father); |
| e4 = make_edge (e2->src, bb, EDGE_TRUE_VALUE); |
| set_immediate_dominator (CDI_DOMINATORS, bb, e2->src); |
| e4->probability = prob2; |
| e2->flags = EDGE_FALSE_VALUE; |
| e2->probability = prob2.invert (); |
| bb->count = e2->src->count.apply_probability (prob2); |
| e4 = make_single_succ_edge (bb, e3->dest, EDGE_FALLTHRU); |
| e2 = find_edge (e2->dest, e3->dest); |
| edge_true_true = e4; |
| edge_true_false = e2; |
| edge_false = e3; |
| m_gsi = gsi_after_labels (e2->src); |
| } |
| |
| /* Emit code to access limb IDX from OP. */ |
| |
| tree |
| bitint_large_huge::handle_operand (tree op, tree idx) |
| { |
| switch (TREE_CODE (op)) |
| { |
| case SSA_NAME: |
| if (m_names == NULL |
| || !bitmap_bit_p (m_names, SSA_NAME_VERSION (op))) |
| { |
| if (SSA_NAME_IS_DEFAULT_DEF (op)) |
| { |
| if (m_first) |
| { |
| tree v = create_tmp_reg (m_limb_type); |
| if (SSA_NAME_VAR (op) && VAR_P (SSA_NAME_VAR (op))) |
| { |
| DECL_NAME (v) = DECL_NAME (SSA_NAME_VAR (op)); |
| DECL_SOURCE_LOCATION (v) |
| = DECL_SOURCE_LOCATION (SSA_NAME_VAR (op)); |
| } |
| v = get_or_create_ssa_default_def (cfun, v); |
| m_data.safe_push (v); |
| } |
| tree ret = m_data[m_data_cnt]; |
| m_data_cnt++; |
| if (tree_fits_uhwi_p (idx)) |
| { |
| tree type = limb_access_type (TREE_TYPE (op), idx); |
| ret = add_cast (type, ret); |
| } |
| return ret; |
| } |
| location_t loc_save = m_loc; |
| m_loc = gimple_location (SSA_NAME_DEF_STMT (op)); |
| tree ret = handle_stmt (SSA_NAME_DEF_STMT (op), idx); |
| m_loc = loc_save; |
| return ret; |
| } |
| int p; |
| gimple *g; |
| tree t; |
| p = var_to_partition (m_map, op); |
| gcc_assert (m_vars[p] != NULL_TREE); |
| t = limb_access (TREE_TYPE (op), m_vars[p], idx, false); |
| g = gimple_build_assign (make_ssa_name (TREE_TYPE (t)), t); |
| insert_before (g); |
| t = gimple_assign_lhs (g); |
| if (m_first |
| && m_single_use_names |
| && m_vars[p] != m_lhs |
| && m_after_stmt |
| && bitmap_bit_p (m_single_use_names, SSA_NAME_VERSION (op))) |
| { |
| tree clobber = build_clobber (TREE_TYPE (m_vars[p]), CLOBBER_EOL); |
| g = gimple_build_assign (m_vars[p], clobber); |
| gimple_stmt_iterator gsi = gsi_for_stmt (m_after_stmt); |
| gsi_insert_after (&gsi, g, GSI_SAME_STMT); |
| } |
| return t; |
| case INTEGER_CST: |
| if (tree_fits_uhwi_p (idx)) |
| { |
| tree c, type = limb_access_type (TREE_TYPE (op), idx); |
| unsigned HOST_WIDE_INT i = tree_to_uhwi (idx); |
| if (m_first) |
| { |
| m_data.safe_push (NULL_TREE); |
| m_data.safe_push (NULL_TREE); |
| } |
| if (limb_prec != HOST_BITS_PER_WIDE_INT) |
| { |
| wide_int w = wi::rshift (wi::to_wide (op), i * limb_prec, |
| TYPE_SIGN (TREE_TYPE (op))); |
| c = wide_int_to_tree (type, |
| wide_int::from (w, TYPE_PRECISION (type), |
| UNSIGNED)); |
| } |
| else if (i >= TREE_INT_CST_EXT_NUNITS (op)) |
| c = build_int_cst (type, |
| tree_int_cst_sgn (op) < 0 ? -1 : 0); |
| else |
| c = build_int_cst (type, TREE_INT_CST_ELT (op, i)); |
| m_data_cnt += 2; |
| return c; |
| } |
| if (m_first |
| || (m_data[m_data_cnt] == NULL_TREE |
| && m_data[m_data_cnt + 1] == NULL_TREE)) |
| { |
| unsigned int prec = TYPE_PRECISION (TREE_TYPE (op)); |
| unsigned int rem = prec % (2 * limb_prec); |
| int ext; |
| unsigned min_prec = bitint_min_cst_precision (op, ext); |
| if (m_first) |
| { |
| m_data.safe_push (NULL_TREE); |
| m_data.safe_push (NULL_TREE); |
| } |
| if (integer_zerop (op)) |
| { |
| tree c = build_zero_cst (m_limb_type); |
| m_data[m_data_cnt] = c; |
| m_data[m_data_cnt + 1] = c; |
| } |
| else if (integer_all_onesp (op)) |
| { |
| tree c = build_all_ones_cst (m_limb_type); |
| m_data[m_data_cnt] = c; |
| m_data[m_data_cnt + 1] = c; |
| } |
| else if (m_upwards_2limb && min_prec <= (unsigned) limb_prec) |
| { |
| /* Single limb constant. Use a phi with that limb from |
| the preheader edge and 0 or -1 constant from the other edge |
| and for the second limb in the loop. */ |
| tree out; |
| gcc_assert (m_first); |
| m_data.pop (); |
| m_data.pop (); |
| prepare_data_in_out (fold_convert (m_limb_type, op), idx, &out); |
| g = gimple_build_assign (m_data[m_data_cnt + 1], |
| build_int_cst (m_limb_type, ext)); |
| insert_before (g); |
| m_data[m_data_cnt + 1] = gimple_assign_rhs1 (g); |
| } |
| else if (min_prec > prec - rem - 2 * limb_prec) |
| { |
| /* Constant which has enough significant bits that it isn't |
| worth trying to save .rodata space by extending from smaller |
| number. */ |
| tree type; |
| if (m_var_msb) |
| type = TREE_TYPE (op); |
| else |
| /* If we have a guarantee the most significant partial limb |
| (if any) will be only accessed through handle_operand |
| with INTEGER_CST idx, we don't need to include the partial |
| limb in .rodata. */ |
| type = build_bitint_type (prec - rem, 1); |
| tree c = tree_output_constant_def (fold_convert (type, op)); |
| m_data[m_data_cnt] = c; |
| m_data[m_data_cnt + 1] = NULL_TREE; |
| } |
| else if (m_upwards_2limb) |
| { |
| /* Constant with smaller number of bits. Trade conditional |
| code for .rodata space by extending from smaller number. */ |
| min_prec = CEIL (min_prec, 2 * limb_prec) * (2 * limb_prec); |
| tree type = build_bitint_type (min_prec, 1); |
| tree c = tree_output_constant_def (fold_convert (type, op)); |
| tree idx2 = make_ssa_name (sizetype); |
| g = gimple_build_assign (idx2, PLUS_EXPR, idx, size_one_node); |
| insert_before (g); |
| g = gimple_build_cond (LT_EXPR, idx, |
| size_int (min_prec / limb_prec), |
| NULL_TREE, NULL_TREE); |
| edge edge_true, edge_false; |
| if_then (g, (min_prec >= (prec - rem) / 2 |
| ? profile_probability::likely () |
| : profile_probability::unlikely ()), |
| edge_true, edge_false); |
| tree c1 = limb_access (TREE_TYPE (op), c, idx, false); |
| g = gimple_build_assign (make_ssa_name (TREE_TYPE (c1)), c1); |
| insert_before (g); |
| c1 = gimple_assign_lhs (g); |
| tree c2 = limb_access (TREE_TYPE (op), c, idx2, false); |
| g = gimple_build_assign (make_ssa_name (TREE_TYPE (c2)), c2); |
| insert_before (g); |
| c2 = gimple_assign_lhs (g); |
| tree c3 = build_int_cst (m_limb_type, ext); |
| m_gsi = gsi_after_labels (edge_true->dest); |
| m_data[m_data_cnt] = make_ssa_name (m_limb_type); |
| m_data[m_data_cnt + 1] = make_ssa_name (m_limb_type); |
| gphi *phi = create_phi_node (m_data[m_data_cnt], |
| edge_true->dest); |
| add_phi_arg (phi, c1, edge_true, UNKNOWN_LOCATION); |
| add_phi_arg (phi, c3, edge_false, UNKNOWN_LOCATION); |
| phi = create_phi_node (m_data[m_data_cnt + 1], edge_true->dest); |
| add_phi_arg (phi, c2, edge_true, UNKNOWN_LOCATION); |
| add_phi_arg (phi, c3, edge_false, UNKNOWN_LOCATION); |
| } |
| else |
| { |
| /* Constant with smaller number of bits. Trade conditional |
| code for .rodata space by extending from smaller number. |
| Version for loops with random access to the limbs or |
| downwards loops. */ |
| min_prec = CEIL (min_prec, limb_prec) * limb_prec; |
| tree c; |
| if (min_prec <= (unsigned) limb_prec) |
| c = fold_convert (m_limb_type, op); |
| else |
| { |
| tree type = build_bitint_type (min_prec, 1); |
| c = tree_output_constant_def (fold_convert (type, op)); |
| } |
| m_data[m_data_cnt] = c; |
| m_data[m_data_cnt + 1] = integer_type_node; |
| } |
| t = m_data[m_data_cnt]; |
| if (m_data[m_data_cnt + 1] == NULL_TREE) |
| { |
| t = limb_access (TREE_TYPE (op), t, idx, false); |
| g = gimple_build_assign (make_ssa_name (TREE_TYPE (t)), t); |
| insert_before (g); |
| t = gimple_assign_lhs (g); |
| } |
| } |
| else if (m_data[m_data_cnt + 1] == NULL_TREE) |
| { |
| t = limb_access (TREE_TYPE (op), m_data[m_data_cnt], idx, false); |
| g = gimple_build_assign (make_ssa_name (TREE_TYPE (t)), t); |
| insert_before (g); |
| t = gimple_assign_lhs (g); |
| } |
| else |
| t = m_data[m_data_cnt + 1]; |
| if (m_data[m_data_cnt + 1] == integer_type_node) |
| { |
| unsigned int prec = TYPE_PRECISION (TREE_TYPE (op)); |
| unsigned rem = prec % (2 * limb_prec); |
| int ext = tree_int_cst_sgn (op) < 0 ? -1 : 0; |
| tree c = m_data[m_data_cnt]; |
| unsigned min_prec = TYPE_PRECISION (TREE_TYPE (c)); |
| g = gimple_build_cond (LT_EXPR, idx, |
| size_int (min_prec / limb_prec), |
| NULL_TREE, NULL_TREE); |
| edge edge_true, edge_false; |
| if_then (g, (min_prec >= (prec - rem) / 2 |
| ? profile_probability::likely () |
| : profile_probability::unlikely ()), |
| edge_true, edge_false); |
| if (min_prec > (unsigned) limb_prec) |
| { |
| c = limb_access (TREE_TYPE (op), c, idx, false); |
| g = gimple_build_assign (make_ssa_name (TREE_TYPE (c)), c); |
| insert_before (g); |
| c = gimple_assign_lhs (g); |
| } |
| tree c2 = build_int_cst (m_limb_type, ext); |
| m_gsi = gsi_after_labels (edge_true->dest); |
| t = make_ssa_name (m_limb_type); |
| gphi *phi = create_phi_node (t, edge_true->dest); |
| add_phi_arg (phi, c, edge_true, UNKNOWN_LOCATION); |
| add_phi_arg (phi, c2, edge_false, UNKNOWN_LOCATION); |
| } |
| m_data_cnt += 2; |
| return t; |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* Helper method, add a PHI node with VAL from preheader edge if |
| inside of a loop and m_first. Keep state in a pair of m_data |
| elements. */ |
| |
| tree |
| bitint_large_huge::prepare_data_in_out (tree val, tree idx, tree *data_out) |
| { |
| if (!m_first) |
| { |
| *data_out = tree_fits_uhwi_p (idx) ? NULL_TREE : m_data[m_data_cnt + 1]; |
| return m_data[m_data_cnt]; |
| } |
| |
| *data_out = NULL_TREE; |
| if (tree_fits_uhwi_p (idx)) |
| { |
| m_data.safe_push (val); |
| m_data.safe_push (NULL_TREE); |
| return val; |
| } |
| |
| tree in = make_ssa_name (TREE_TYPE (val)); |
| gphi *phi = create_phi_node (in, m_bb); |
| edge e1 = find_edge (m_preheader_bb, m_bb); |
| edge e2 = EDGE_PRED (m_bb, 0); |
| if (e1 == e2) |
| e2 = EDGE_PRED (m_bb, 1); |
| add_phi_arg (phi, val, e1, UNKNOWN_LOCATION); |
| tree out = make_ssa_name (TREE_TYPE (val)); |
| add_phi_arg (phi, out, e2, UNKNOWN_LOCATION); |
| m_data.safe_push (in); |
| m_data.safe_push (out); |
| return in; |
| } |
| |
| /* Return VAL cast to TYPE. If VAL is INTEGER_CST, just |
| convert it without emitting any code, otherwise emit |
| the conversion statement before the current location. */ |
| |
| tree |
| bitint_large_huge::add_cast (tree type, tree val) |
| { |
| if (TREE_CODE (val) == INTEGER_CST) |
| return fold_convert (type, val); |
| |
| tree lhs = make_ssa_name (type); |
| gimple *g = gimple_build_assign (lhs, NOP_EXPR, val); |
| insert_before (g); |
| return lhs; |
| } |
| |
| /* Helper of handle_stmt method, handle PLUS_EXPR or MINUS_EXPR. */ |
| |
| tree |
| bitint_large_huge::handle_plus_minus (tree_code code, tree rhs1, tree rhs2, |
| tree idx) |
| { |
| tree lhs, data_out, ctype; |
| tree rhs1_type = TREE_TYPE (rhs1); |
| gimple *g; |
| tree data_in = prepare_data_in_out (build_zero_cst (m_limb_type), idx, |
| &data_out); |
| |
| if (optab_handler (code == PLUS_EXPR ? uaddc5_optab : usubc5_optab, |
| TYPE_MODE (m_limb_type)) != CODE_FOR_nothing) |
| { |
| ctype = build_complex_type (m_limb_type); |
| if (!types_compatible_p (rhs1_type, m_limb_type)) |
| { |
| if (!TYPE_UNSIGNED (rhs1_type)) |
| { |
| tree type = unsigned_type_for (rhs1_type); |
| rhs1 = add_cast (type, rhs1); |
| rhs2 = add_cast (type, rhs2); |
| } |
| rhs1 = add_cast (m_limb_type, rhs1); |
| rhs2 = add_cast (m_limb_type, rhs2); |
| } |
| lhs = make_ssa_name (ctype); |
| g = gimple_build_call_internal (code == PLUS_EXPR |
| ? IFN_UADDC : IFN_USUBC, |
| 3, rhs1, rhs2, data_in); |
| gimple_call_set_lhs (g, lhs); |
| insert_before (g); |
| if (data_out == NULL_TREE) |
| data_out = make_ssa_name (m_limb_type); |
| g = gimple_build_assign (data_out, IMAGPART_EXPR, |
| build1 (IMAGPART_EXPR, m_limb_type, lhs)); |
| insert_before (g); |
| } |
| else if (types_compatible_p (rhs1_type, m_limb_type)) |
| { |
| ctype = build_complex_type (m_limb_type); |
| lhs = make_ssa_name (ctype); |
| g = gimple_build_call_internal (code == PLUS_EXPR |
| ? IFN_ADD_OVERFLOW : IFN_SUB_OVERFLOW, |
| 2, rhs1, rhs2); |
| gimple_call_set_lhs (g, lhs); |
| insert_before (g); |
| if (data_out == NULL_TREE) |
| data_out = make_ssa_name (m_limb_type); |
| if (!integer_zerop (data_in)) |
| { |
| rhs1 = make_ssa_name (m_limb_type); |
| g = gimple_build_assign (rhs1, REALPART_EXPR, |
| build1 (REALPART_EXPR, m_limb_type, lhs)); |
| insert_before (g); |
| rhs2 = make_ssa_name (m_limb_type); |
| g = gimple_build_assign (rhs2, IMAGPART_EXPR, |
| build1 (IMAGPART_EXPR, m_limb_type, lhs)); |
| insert_before (g); |
| lhs = make_ssa_name (ctype); |
| g = gimple_build_call_internal (code == PLUS_EXPR |
| ? IFN_ADD_OVERFLOW |
| : IFN_SUB_OVERFLOW, |
| 2, rhs1, data_in); |
| gimple_call_set_lhs (g, lhs); |
| insert_before (g); |
| data_in = make_ssa_name (m_limb_type); |
| g = gimple_build_assign (data_in, IMAGPART_EXPR, |
| build1 (IMAGPART_EXPR, m_limb_type, lhs)); |
| insert_before (g); |
| g = gimple_build_assign (data_out, PLUS_EXPR, rhs2, data_in); |
| insert_before (g); |
| } |
| else |
| { |
| g = gimple_build_assign (data_out, IMAGPART_EXPR, |
| build1 (IMAGPART_EXPR, m_limb_type, lhs)); |
| insert_before (g); |
| } |
| } |
| else |
| { |
| tree in = add_cast (rhs1_type, data_in); |
| lhs = make_ssa_name (rhs1_type); |
| g = gimple_build_assign (lhs, code, rhs1, rhs2); |
| insert_before (g); |
| rhs1 = make_ssa_name (rhs1_type); |
| g = gimple_build_assign (rhs1, code, lhs, in); |
| insert_before (g); |
| m_data[m_data_cnt] = NULL_TREE; |
| m_data_cnt += 2; |
| return rhs1; |
| } |
| rhs1 = make_ssa_name (m_limb_type); |
| g = gimple_build_assign (rhs1, REALPART_EXPR, |
| build1 (REALPART_EXPR, m_limb_type, lhs)); |
| insert_before (g); |
| if (!types_compatible_p (rhs1_type, m_limb_type)) |
| rhs1 = add_cast (rhs1_type, rhs1); |
| m_data[m_data_cnt] = data_out; |
| m_data_cnt += 2; |
| return rhs1; |
| } |
| |
| /* Helper function for handle_stmt method, handle LSHIFT_EXPR by |
| count in [0, limb_prec - 1] range. */ |
| |
| tree |
| bitint_large_huge::handle_lshift (tree rhs1, tree rhs2, tree idx) |
| { |
| unsigned HOST_WIDE_INT cnt = tree_to_uhwi (rhs2); |
| gcc_checking_assert (cnt < (unsigned) limb_prec); |
| if (cnt == 0) |
| return rhs1; |
| |
| tree lhs, data_out, rhs1_type = TREE_TYPE (rhs1); |
| gimple *g; |
| tree data_in = prepare_data_in_out (build_zero_cst (m_limb_type), idx, |
| &data_out); |
| |
| if (!integer_zerop (data_in)) |
| { |
| lhs = make_ssa_name (m_limb_type); |
| g = gimple_build_assign (lhs, RSHIFT_EXPR, data_in, |
| build_int_cst (unsigned_type_node, |
| limb_prec - cnt)); |
| insert_before (g); |
| if (!types_compatible_p (rhs1_type, m_limb_type)) |
| lhs = add_cast (rhs1_type, lhs); |
| data_in = lhs; |
| } |
| if (types_compatible_p (rhs1_type, m_limb_type)) |
| { |
| if (data_out == NULL_TREE) |
| data_out = make_ssa_name (m_limb_type); |
| g = gimple_build_assign (data_out, rhs1); |
| insert_before (g); |
| } |
| if (cnt < (unsigned) TYPE_PRECISION (rhs1_type)) |
| { |
| lhs = make_ssa_name (rhs1_type); |
| g = gimple_build_assign (lhs, LSHIFT_EXPR, rhs1, rhs2); |
| insert_before (g); |
| if (!integer_zerop (data_in)) |
| { |
| rhs1 = lhs; |
| lhs = make_ssa_name (rhs1_type); |
| g = gimple_build_assign (lhs, BIT_IOR_EXPR, rhs1, data_in); |
| insert_before (g); |
| } |
| } |
| else |
| lhs = data_in; |
| m_data[m_data_cnt] = data_out; |
| m_data_cnt += 2; |
| return lhs; |
| } |
| |
| /* Helper function for handle_stmt method, handle an integral |
| to integral conversion. */ |
| |
| tree |
| bitint_large_huge::handle_cast (tree lhs_type, tree rhs1, tree idx) |
| { |
| tree rhs_type = TREE_TYPE (rhs1); |
| gimple *g; |
| if (TREE_CODE (rhs1) == SSA_NAME |
| && TREE_CODE (lhs_type) == BITINT_TYPE |
| && TREE_CODE (rhs_type) == BITINT_TYPE |
| && bitint_precision_kind (lhs_type) >= bitint_prec_large |
| && bitint_precision_kind (rhs_type) >= bitint_prec_large) |
| { |
| if (TYPE_PRECISION (rhs_type) >= TYPE_PRECISION (lhs_type) |
| /* If lhs has bigger precision than rhs, we can use |
| the simple case only if there is a guarantee that |
| the most significant limb is handled in straight |
| line code. If m_var_msb (on left shifts) or |
| if m_upwards_2limb * limb_prec is equal to |
| lhs precision that is not the case. */ |
| || (!m_var_msb |
| && tree_int_cst_equal (TYPE_SIZE (rhs_type), |
| TYPE_SIZE (lhs_type)) |
| && (!m_upwards_2limb |
| || (m_upwards_2limb * limb_prec |
| < TYPE_PRECISION (lhs_type))))) |
| { |
| rhs1 = handle_operand (rhs1, idx); |
| if (tree_fits_uhwi_p (idx)) |
| { |
| tree type = limb_access_type (lhs_type, idx); |
| if (!types_compatible_p (type, TREE_TYPE (rhs1))) |
| rhs1 = add_cast (type, rhs1); |
| } |
| return rhs1; |
| } |
| tree t; |
| /* Indexes lower than this don't need any special processing. */ |
| unsigned low = ((unsigned) TYPE_PRECISION (rhs_type) |
| - !TYPE_UNSIGNED (rhs_type)) / limb_prec; |
| /* Indexes >= than this always contain an extension. */ |
| unsigned high = CEIL ((unsigned) TYPE_PRECISION (rhs_type), limb_prec); |
| bool save_first = m_first; |
| if (m_first) |
| { |
| m_data.safe_push (NULL_TREE); |
| m_data.safe_push (NULL_TREE); |
| m_data.safe_push (NULL_TREE); |
| if (TYPE_UNSIGNED (rhs_type)) |
| /* No need to keep state between iterations. */ |
| ; |
| else if (m_upwards && !m_upwards_2limb) |
| /* We need to keep state between iterations, but |
| not within any loop, everything is straight line |
| code with only increasing indexes. */ |
| ; |
| else if (!m_upwards_2limb) |
| { |
| unsigned save_data_cnt = m_data_cnt; |
| gimple_stmt_iterator save_gsi = m_gsi; |
| m_gsi = m_init_gsi; |
| if (gsi_end_p (m_gsi)) |
| m_gsi = gsi_after_labels (gsi_bb (m_gsi)); |
| else |
| gsi_next (&m_gsi); |
| m_data_cnt = save_data_cnt + 3; |
| t = handle_operand (rhs1, size_int (low)); |
| m_first = false; |
| m_data[save_data_cnt + 2] |
| = build_int_cst (NULL_TREE, m_data_cnt); |
| m_data_cnt = save_data_cnt; |
| t = add_cast (signed_type_for (m_limb_type), t); |
| tree lpm1 = build_int_cst (unsigned_type_node, limb_prec - 1); |
| tree n = make_ssa_name (TREE_TYPE (t)); |
| g = gimple_build_assign (n, RSHIFT_EXPR, t, lpm1); |
| insert_before (g); |
| m_data[save_data_cnt + 1] = add_cast (m_limb_type, n); |
| m_init_gsi = m_gsi; |
| if (gsi_end_p (m_init_gsi)) |
| m_init_gsi = gsi_last_bb (gsi_bb (m_init_gsi)); |
| else |
| gsi_prev (&m_init_gsi); |
| m_gsi = save_gsi; |
| } |
| else if (m_upwards_2limb * limb_prec < TYPE_PRECISION (rhs_type)) |
| /* We need to keep state between iterations, but |
| fortunately not within the loop, only afterwards. */ |
| ; |
| else |
| { |
| tree out; |
| m_data.truncate (m_data_cnt); |
| prepare_data_in_out (build_zero_cst (m_limb_type), idx, &out); |
| m_data.safe_push (NULL_TREE); |
| } |
| } |
| |
| unsigned save_data_cnt = m_data_cnt; |
| m_data_cnt += 3; |
| if (!tree_fits_uhwi_p (idx)) |
| { |
| if (m_upwards_2limb |
| && (m_upwards_2limb * limb_prec |
| <= ((unsigned) TYPE_PRECISION (rhs_type) |
| - !TYPE_UNSIGNED (rhs_type)))) |
| { |
| rhs1 = handle_operand (rhs1, idx); |
| if (m_first) |
| m_data[save_data_cnt + 2] |
| = build_int_cst (NULL_TREE, m_data_cnt); |
| m_first = save_first; |
| return rhs1; |
| } |
| bool single_comparison |
| = low == high || (m_upwards_2limb && (low & 1) == m_first); |
| g = gimple_build_cond (single_comparison ? LT_EXPR : LE_EXPR, |
| idx, size_int (low), NULL_TREE, NULL_TREE); |
| edge edge_true_true, edge_true_false, edge_false; |
| if_then_if_then_else (g, (single_comparison ? NULL |
| : gimple_build_cond (EQ_EXPR, idx, |
| size_int (low), |
| NULL_TREE, |
| NULL_TREE)), |
| profile_probability::likely (), |
| profile_probability::unlikely (), |
| edge_true_true, edge_true_false, edge_false); |
| bool save_cast_conditional = m_cast_conditional; |
| m_cast_conditional = true; |
| m_bitfld_load = 0; |
| tree t1 = handle_operand (rhs1, idx), t2 = NULL_TREE; |
| if (m_first) |
| m_data[save_data_cnt + 2] |
| = build_int_cst (NULL_TREE, m_data_cnt); |
| tree ext = NULL_TREE; |
| tree bitfld = NULL_TREE; |
| if (!single_comparison) |
| { |
| m_gsi = gsi_after_labels (edge_true_true->src); |
| m_first = false; |
| m_data_cnt = save_data_cnt + 3; |
| if (m_bitfld_load) |
| { |
| bitfld = m_data[m_bitfld_load]; |
| m_data[m_bitfld_load] = m_data[m_bitfld_load + 2]; |
| m_bitfld_load = 0; |
| } |
| t2 = handle_operand (rhs1, size_int (low)); |
| if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (t2))) |
| t2 = add_cast (m_limb_type, t2); |
| if (!TYPE_UNSIGNED (rhs_type) && m_upwards_2limb) |
| { |
| ext = add_cast (signed_type_for (m_limb_type), t2); |
| tree lpm1 = build_int_cst (unsigned_type_node, |
| limb_prec - 1); |
| tree n = make_ssa_name (TREE_TYPE (ext)); |
| g = gimple_build_assign (n, RSHIFT_EXPR, ext, lpm1); |
| insert_before (g); |
| ext = add_cast (m_limb_type, n); |
| } |
| } |
| tree t3; |
| if (TYPE_UNSIGNED (rhs_type)) |
| t3 = build_zero_cst (m_limb_type); |
| else if (m_upwards_2limb && (save_first || ext != NULL_TREE)) |
| t3 = m_data[save_data_cnt]; |
| else |
| t3 = m_data[save_data_cnt + 1]; |
| m_gsi = gsi_after_labels (edge_true_false->dest); |
| t = make_ssa_name (m_limb_type); |
| gphi *phi = create_phi_node (t, edge_true_false->dest); |
| add_phi_arg (phi, t1, edge_true_false, UNKNOWN_LOCATION); |
| add_phi_arg (phi, t3, edge_false, UNKNOWN_LOCATION); |
| if (edge_true_true) |
| add_phi_arg (phi, t2, edge_true_true, UNKNOWN_LOCATION); |
| if (ext) |
| { |
| tree t4 = make_ssa_name (m_limb_type); |
| phi = create_phi_node (t4, edge_true_false->dest); |
| add_phi_arg (phi, build_zero_cst (m_limb_type), edge_true_false, |
| UNKNOWN_LOCATION); |
| add_phi_arg (phi, m_data[save_data_cnt], edge_false, |
| UNKNOWN_LOCATION); |
| add_phi_arg (phi, ext, edge_true_true, UNKNOWN_LOCATION); |
| g = gimple_build_assign (m_data[save_data_cnt + 1], t4); |
| insert_before (g); |
| } |
| if (m_bitfld_load) |
| { |
| tree t4; |
| if (!m_first) |
| t4 = m_data[m_bitfld_load + 1]; |
| else |
| t4 = make_ssa_name (m_limb_type); |
| phi = create_phi_node (t4, edge_true_false->dest); |
| add_phi_arg (phi, |
| edge_true_true ? bitfld : m_data[m_bitfld_load], |
| edge_true_false, UNKNOWN_LOCATION); |
| add_phi_arg (phi, m_data[m_bitfld_load + 2], |
| edge_false, UNKNOWN_LOCATION); |
| if (edge_true_true) |
| add_phi_arg (phi, m_data[m_bitfld_load], edge_true_true, |
| UNKNOWN_LOCATION); |
| m_data[m_bitfld_load] = t4; |
| m_data[m_bitfld_load + 2] = t4; |
| m_bitfld_load = 0; |
| } |
| m_cast_conditional = save_cast_conditional; |
| m_first = save_first; |
| return t; |
| } |
| else |
| { |
| if (tree_to_uhwi (idx) < low) |
| { |
| t = handle_operand (rhs1, idx); |
| if (m_first) |
| m_data[save_data_cnt + 2] |
| = build_int_cst (NULL_TREE, m_data_cnt); |
| } |
| else if (tree_to_uhwi (idx) < high) |
| { |
| t = handle_operand (rhs1, size_int (low)); |
| if (m_first) |
| m_data[save_data_cnt + 2] |
| = build_int_cst (NULL_TREE, m_data_cnt); |
| if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (t))) |
| t = add_cast (m_limb_type, t); |
| tree ext = NULL_TREE; |
| if (!TYPE_UNSIGNED (rhs_type) && m_upwards) |
| { |
| ext = add_cast (signed_type_for (m_limb_type), t); |
| tree lpm1 = build_int_cst (unsigned_type_node, |
| limb_prec - 1); |
| tree n = make_ssa_name (TREE_TYPE (ext)); |
| g = gimple_build_assign (n, RSHIFT_EXPR, ext, lpm1); |
| insert_before (g); |
| ext = add_cast (m_limb_type, n); |
| m_data[save_data_cnt + 1] = ext; |
| } |
| } |
| else |
| { |
| if (TYPE_UNSIGNED (rhs_type) && m_first) |
| { |
| handle_operand (rhs1, size_zero_node); |
| m_data[save_data_cnt + 2] |
| = build_int_cst (NULL_TREE, m_data_cnt); |
| } |
| else |
| m_data_cnt = tree_to_uhwi (m_data[save_data_cnt + 2]); |
| if (TYPE_UNSIGNED (rhs_type)) |
| t = build_zero_cst (m_limb_type); |
| else |
| t = m_data[save_data_cnt + 1]; |
| } |
| tree type = limb_access_type (lhs_type, idx); |
| if (!useless_type_conversion_p (type, m_limb_type)) |
| t = add_cast (type, t); |
| m_first = save_first; |
| return t; |
| } |
| } |
| else if (TREE_CODE (lhs_type) == BITINT_TYPE |
| && bitint_precision_kind (lhs_type) >= bitint_prec_large |
| && INTEGRAL_TYPE_P (rhs_type)) |
| { |
| /* Add support for 3 or more limbs filled in from normal integral |
| type if this assert fails. If no target chooses limb mode smaller |
| than half of largest supported normal integral type, this will not |
| be needed. */ |
| gcc_assert (TYPE_PRECISION (rhs_type) <= 2 * limb_prec); |
| tree r1 = NULL_TREE, r2 = NULL_TREE, rext = NULL_TREE; |
| if (m_first) |
| { |
| gimple_stmt_iterator save_gsi = m_gsi; |
| m_gsi = m_init_gsi; |
| if (gsi_end_p (m_gsi)) |
| m_gsi = gsi_after_labels (gsi_bb (m_gsi)); |
| else |
| gsi_next (&m_gsi); |
| if (TREE_CODE (rhs_type) == BITINT_TYPE |
| && bitint_precision_kind (rhs_type) == bitint_prec_middle) |
| { |
| tree type = NULL_TREE; |
| rhs1 = maybe_cast_middle_bitint (&m_gsi, rhs1, type); |
| rhs_type = TREE_TYPE (rhs1); |
| } |
| r1 = rhs1; |
| if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (rhs1))) |
| r1 = add_cast (m_limb_type, rhs1); |
| if (TYPE_PRECISION (rhs_type) > limb_prec) |
| { |
| g = gimple_build_assign (make_ssa_name (rhs_type), |
| RSHIFT_EXPR, rhs1, |
| build_int_cst (unsigned_type_node, |
| limb_prec)); |
| insert_before (g); |
| r2 = add_cast (m_limb_type, gimple_assign_lhs (g)); |
| } |
| if (TYPE_UNSIGNED (rhs_type)) |
| rext = build_zero_cst (m_limb_type); |
| else |
| { |
| rext = add_cast (signed_type_for (m_limb_type), r2 ? r2 : r1); |
| g = gimple_build_assign (make_ssa_name (TREE_TYPE (rext)), |
| RSHIFT_EXPR, rext, |
| build_int_cst (unsigned_type_node, |
| limb_prec - 1)); |
| insert_before (g); |
| rext = add_cast (m_limb_type, gimple_assign_lhs (g)); |
| } |
| m_init_gsi = m_gsi; |
| if (gsi_end_p (m_init_gsi)) |
| m_init_gsi = gsi_last_bb (gsi_bb (m_init_gsi)); |
| else |
| gsi_prev (&m_init_gsi); |
| m_gsi = save_gsi; |
| } |
| tree t; |
| if (m_upwards_2limb) |
| { |
| if (m_first) |
| { |
| tree out1, out2; |
| prepare_data_in_out (r1, idx, &out1); |
| g = gimple_build_assign (m_data[m_data_cnt + 1], rext); |
| insert_before (g); |
| if (TYPE_PRECISION (rhs_type) > limb_prec) |
| { |
| prepare_data_in_out (r2, idx, &out2); |
| g = gimple_build_assign (m_data[m_data_cnt + 3], rext); |
| insert_before (g); |
| m_data.pop (); |
| t = m_data.pop (); |
| m_data[m_data_cnt + 1] = t; |
| } |
| else |
| m_data[m_data_cnt + 1] = rext; |
| m_data.safe_push (rext); |
| t = m_data[m_data_cnt]; |
| } |
| else if (!tree_fits_uhwi_p (idx)) |
| t = m_data[m_data_cnt + 1]; |
| else |
| { |
| tree type = limb_access_type (lhs_type, idx); |
| t = m_data[m_data_cnt + 2]; |
| if (!useless_type_conversion_p (type, m_limb_type)) |
| t = add_cast (type, t); |
| } |
| m_data_cnt += 3; |
| return t; |
| } |
| else if (m_first) |
| { |
| m_data.safe_push (r1); |
| m_data.safe_push (r2); |
| m_data.safe_push (rext); |
| } |
| if (tree_fits_uhwi_p (idx)) |
| { |
| tree type = limb_access_type (lhs_type, idx); |
| if (integer_zerop (idx)) |
| t = m_data[m_data_cnt]; |
| else if (TYPE_PRECISION (rhs_type) > limb_prec |
| && integer_onep (idx)) |
| t = m_data[m_data_cnt + 1]; |
| else |
| t = m_data[m_data_cnt + 2]; |
| if (!useless_type_conversion_p (type, m_limb_type)) |
| t = add_cast (type, t); |
| m_data_cnt += 3; |
| return t; |
| } |
| g = gimple_build_cond (NE_EXPR, idx, size_zero_node, |
| NULL_TREE, NULL_TREE); |
| edge e2, e3, e4 = NULL; |
| if_then (g, profile_probability::likely (), e2, e3); |
| if (m_data[m_data_cnt + 1]) |
| { |
| g = gimple_build_cond (EQ_EXPR, idx, size_one_node, |
| NULL_TREE, NULL_TREE); |
| insert_before (g); |
| edge e5 = split_block (gsi_bb (m_gsi), g); |
| e4 = make_edge (e5->src, e2->dest, EDGE_TRUE_VALUE); |
| e2 = find_edge (e5->dest, e2->dest); |
| e4->probability = profile_probability::unlikely (); |
| e5->flags = EDGE_FALSE_VALUE; |
| e5->probability = e4->probability.invert (); |
| } |
| m_gsi = gsi_after_labels (e2->dest); |
| t = make_ssa_name (m_limb_type); |
| gphi *phi = create_phi_node (t, e2->dest); |
| add_phi_arg (phi, m_data[m_data_cnt + 2], e2, UNKNOWN_LOCATION); |
| add_phi_arg (phi, m_data[m_data_cnt], e3, UNKNOWN_LOCATION); |
| if (e4) |
| add_phi_arg (phi, m_data[m_data_cnt + 1], e4, UNKNOWN_LOCATION); |
| m_data_cnt += 3; |
| return t; |
| } |
| return NULL_TREE; |
| } |
| |
| /* Helper function for handle_stmt method, handle a load from memory. */ |
| |
| tree |
| bitint_large_huge::handle_load (gimple *stmt, tree idx) |
| { |
| tree rhs1 = gimple_assign_rhs1 (stmt); |
| tree rhs_type = TREE_TYPE (rhs1); |
| bool eh = stmt_ends_bb_p (stmt); |
| edge eh_edge = NULL; |
| gimple *g; |
| |
| if (eh) |
| { |
| edge_iterator ei; |
| basic_block bb = gimple_bb (stmt); |
| |
| FOR_EACH_EDGE (eh_edge, ei, bb->succs) |
| if (eh_edge->flags & EDGE_EH) |
| break; |
| } |
| |
| if (TREE_CODE (rhs1) == COMPONENT_REF |
| && DECL_BIT_FIELD_TYPE (TREE_OPERAND (rhs1, 1))) |
| { |
| tree fld = TREE_OPERAND (rhs1, 1); |
| /* For little-endian, we can allow as inputs bit-fields |
| which start at a limb boundary. */ |
| gcc_assert (tree_fits_uhwi_p (DECL_FIELD_BIT_OFFSET (fld))); |
| if (DECL_OFFSET_ALIGN (fld) >= TYPE_ALIGN (TREE_TYPE (rhs1)) |
| && (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld)) % limb_prec) == 0) |
| goto normal_load; |
| /* Even if DECL_FIELD_BIT_OFFSET (fld) is a multiple of UNITS_PER_BIT, |
| handle it normally for now. */ |
| if ((tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld)) % BITS_PER_UNIT) == 0) |
| goto normal_load; |
| tree repr = DECL_BIT_FIELD_REPRESENTATIVE (fld); |
| poly_int64 bitoffset; |
| poly_uint64 field_offset, repr_offset; |
| bool var_field_off = false; |
| if (poly_int_tree_p (DECL_FIELD_OFFSET (fld), &field_offset) |
| && poly_int_tree_p (DECL_FIELD_OFFSET (repr), &repr_offset)) |
| bitoffset = (field_offset - repr_offset) * BITS_PER_UNIT; |
| else |
| { |
| bitoffset = 0; |
| var_field_off = true; |
| } |
| bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld)) |
| - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); |
| tree nrhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), |
| TREE_OPERAND (rhs1, 0), repr, |
| var_field_off ? TREE_OPERAND (rhs1, 2) : NULL_TREE); |
| HOST_WIDE_INT bo = bitoffset.to_constant (); |
| unsigned bo_idx = (unsigned HOST_WIDE_INT) bo / limb_prec; |
| unsigned bo_bit = (unsigned HOST_WIDE_INT) bo % limb_prec; |
| if (m_first) |
| { |
| if (m_upwards) |
| { |
| gimple_stmt_iterator save_gsi = m_gsi; |
| m_gsi = m_init_gsi; |
| if (gsi_end_p (m_gsi)) |
| m_gsi = gsi_after_labels (gsi_bb (m_gsi)); |
| else |
| gsi_next (&m_gsi); |
| tree t = limb_access (rhs_type, nrhs1, size_int (bo_idx), true); |
| tree iv = make_ssa_name (m_limb_type); |
| g = gimple_build_assign (iv, t); |
| insert_before (g); |
| if (eh) |
| { |
| maybe_duplicate_eh_stmt (g, stmt); |
| if (eh_edge) |
| { |
| edge e = split_block (gsi_bb (m_gsi), g); |
| make_edge (e->src, eh_edge->dest, EDGE_EH)->probability |
| = profile_probability::very_unlikely (); |
| m_gsi = gsi_after_labels (e->dest); |
| if (gsi_bb (save_gsi) == e->src) |
| { |
| if (gsi_end_p (save_gsi)) |
| save_gsi = gsi_end_bb (e->dest); |
| else |
| save_gsi = gsi_for_stmt (gsi_stmt (save_gsi)); |
| } |
| if (m_preheader_bb == e->src) |
| m_preheader_bb = e->dest; |
| } |
| } |
| m_init_gsi = m_gsi; |
| if (gsi_end_p (m_init_gsi)) |
| m_init_gsi = gsi_last_bb (gsi_bb (m_init_gsi)); |
| else |
| gsi_prev (&m_init_gsi); |
| m_gsi = save_gsi; |
| tree out; |
| prepare_data_in_out (iv, idx, &out); |
| out = m_data[m_data_cnt]; |
| m_data.safe_push (out); |
| } |
| else |
| { |
| m_data.safe_push (NULL_TREE); |
| m_data.safe_push (NULL_TREE); |
| m_data.safe_push (NULL_TREE); |
| } |
| } |
| |
| tree nidx0 = NULL_TREE, nidx1; |
| tree iv = m_data[m_data_cnt]; |
| if (m_cast_conditional && iv) |
| { |
| gcc_assert (!m_bitfld_load); |
| m_bitfld_load = m_data_cnt; |
| } |
| if (tree_fits_uhwi_p (idx)) |
| { |
| unsigned prec = TYPE_PRECISION (rhs_type); |
| unsigned HOST_WIDE_INT i = tree_to_uhwi (idx); |
| gcc_assert (i * limb_prec < prec); |
| nidx1 = size_int (i + bo_idx + 1); |
| if ((i + 1) * limb_prec > prec) |
| { |
| prec %= limb_prec; |
| if (prec + bo_bit <= (unsigned) limb_prec) |
| nidx1 = NULL_TREE; |
| } |
| if (!iv) |
| nidx0 = size_int (i + bo_idx); |
| } |
| else |
| { |
| if (!iv) |
| { |
| if (bo_idx == 0) |
| nidx0 = idx; |
| else |
| { |
| nidx0 = make_ssa_name (sizetype); |
| g = gimple_build_assign (nidx0, PLUS_EXPR, idx, |
| size_int (bo_idx)); |
| insert_before (g); |
| } |
| } |
| nidx1 = make_ssa_name (sizetype); |
| g = gimple_build_assign (nidx1, PLUS_EXPR, idx, |
| size_int (bo_idx + 1)); |
| insert_before (g); |
| } |
| |
| tree iv2 = NULL_TREE; |
| if (nidx0) |
| { |
| tree t = limb_access (rhs_type, nrhs1, nidx0, true); |
| iv = make_ssa_name (m_limb_type); |
| g = gimple_build_assign (iv, t); |
| insert_before (g); |
| gcc_assert (!eh); |
| } |
| if (nidx1) |
| { |
| bool conditional = m_var_msb && !tree_fits_uhwi_p (idx); |
| unsigned prec = TYPE_PRECISION (rhs_type); |
| if (conditional) |
| { |
| if ((prec % limb_prec) == 0 |
| || ((prec % limb_prec) + bo_bit > (unsigned) limb_prec)) |
| conditional = false; |
| } |
| edge edge_true = NULL, edge_false = NULL; |
| if (conditional) |
| { |
| g = gimple_build_cond (NE_EXPR, idx, |
| size_int (prec / limb_prec), |
| NULL_TREE, NULL_TREE); |
| if_then (g, profile_probability::likely (), |
| edge_true, edge_false); |
| } |
| tree t = limb_access (rhs_type, nrhs1, nidx1, true); |
| if (m_upwards_2limb |
| && !m_first |
| && !m_bitfld_load |
| && !tree_fits_uhwi_p (idx)) |
| iv2 = m_data[m_data_cnt + 1]; |
| else |
| iv2 = make_ssa_name (m_limb_type); |
| g = gimple_build_assign (iv2, t); |
| insert_before (g); |
| if (eh) |
| { |
| maybe_duplicate_eh_stmt (g, stmt); |
| if (eh_edge) |
| { |
| edge e = split_block (gsi_bb (m_gsi), g); |
| m_gsi = gsi_after_labels (e->dest); |
| make_edge (e->src, eh_edge->dest, EDGE_EH)->probability |
| = profile_probability::very_unlikely (); |
| } |
| } |
| if (conditional) |
| { |
| tree iv3 = make_ssa_name (m_limb_type); |
| if (eh) |
| edge_true = find_edge (gsi_bb (m_gsi), edge_false->dest); |
| gphi *phi = create_phi_node (iv3, edge_true->dest); |
| add_phi_arg (phi, iv2, edge_true, UNKNOWN_LOCATION); |
| add_phi_arg (phi, build_zero_cst (m_limb_type), |
| edge_false, UNKNOWN_LOCATION); |
| m_gsi = gsi_after_labels (edge_true->dest); |
| } |
| } |
| g = gimple_build_assign (make_ssa_name (m_limb_type), RSHIFT_EXPR, |
| iv, build_int_cst (unsigned_type_node, bo_bit)); |
| insert_before (g); |
| iv = gimple_assign_lhs (g); |
| if (iv2) |
| { |
| g = gimple_build_assign (make_ssa_name (m_limb_type), LSHIFT_EXPR, |
| iv2, build_int_cst (unsigned_type_node, |
| limb_prec - bo_bit)); |
| insert_before (g); |
| g = gimple_build_assign (make_ssa_name (m_limb_type), BIT_IOR_EXPR, |
| gimple_assign_lhs (g), iv); |
| insert_before (g); |
| iv = gimple_assign_lhs (g); |
| if (m_data[m_data_cnt]) |
| m_data[m_data_cnt] = iv2; |
| } |
| if (tree_fits_uhwi_p (idx)) |
| { |
| tree atype = limb_access_type (rhs_type, idx); |
| if (!useless_type_conversion_p (atype, TREE_TYPE (iv))) |
| iv = add_cast (atype, iv); |
| } |
| m_data_cnt += 3; |
| return iv; |
| } |
| |
| normal_load: |
| /* Use write_p = true for loads with EH edges to make |
| sure limb_access doesn't add a cast as separate |
| statement after it. */ |
| rhs1 = limb_access (rhs_type, rhs1, idx, eh); |
| tree ret = make_ssa_name (TREE_TYPE (rhs1)); |
| g = gimple_build_assign (ret, rhs1); |
| insert_before (g); |
| if (eh) |
| { |
| maybe_duplicate_eh_stmt (g, stmt); |
| if (eh_edge) |
| { |
| edge e = split_block (gsi_bb (m_gsi), g); |
| m_gsi = gsi_after_labels (e->dest); |
| make_edge (e->src, eh_edge->dest, EDGE_EH)->probability |
| = profile_probability::very_unlikely (); |
| } |
| if (tree_fits_uhwi_p (idx)) |
| { |
| tree atype = limb_access_type (rhs_type, idx); |
| if (!useless_type_conversion_p (atype, TREE_TYPE (rhs1))) |
| ret = add_cast (atype, ret); |
| } |
| } |
| return ret; |
| } |
| |
| /* Return a limb IDX from a mergeable statement STMT. */ |
| |
| tree |
| bitint_large_huge::handle_stmt (gimple *stmt, tree idx) |
| { |
| tree lhs, rhs1, rhs2 = NULL_TREE; |
| gimple *g; |
| switch (gimple_code (stmt)) |
| { |
| case GIMPLE_ASSIGN: |
| if (gimple_assign_load_p (stmt)) |
| return handle_load (stmt, idx); |
| switch (gimple_assign_rhs_code (stmt)) |
| { |
| case BIT_AND_EXPR: |
| case BIT_IOR_EXPR: |
| case BIT_XOR_EXPR: |
| rhs2 = handle_operand (gimple_assign_rhs2 (stmt), idx); |
| /* FALLTHRU */ |
| case BIT_NOT_EXPR: |
| rhs1 = handle_operand (gimple_assign_rhs1 (stmt), idx); |
| lhs = make_ssa_name (TREE_TYPE (rhs1)); |
| g = gimple_build_assign (lhs, gimple_assign_rhs_code (stmt), |
| rhs1, rhs2); |
| insert_before (g); |
| return lhs; |
| case PLUS_EXPR: |
| case MINUS_EXPR: |
| rhs1 = handle_operand (gimple_assign_rhs1 (stmt), idx); |
| rhs2 = handle_operand (gimple_assign_rhs2 (stmt), idx); |
| return handle_plus_minus (gimple_assign_rhs_code (stmt), |
| rhs1, rhs2, idx); |
| case NEGATE_EXPR: |
| rhs2 = handle_operand (gimple_assign_rhs1 (stmt), idx); |
| rhs1 = build_zero_cst (TREE_TYPE (rhs2)); |
| return handle_plus_minus (MINUS_EXPR, rhs1, rhs2, idx); |
| case LSHIFT_EXPR: |
| return handle_lshift (handle_operand (gimple_assign_rhs1 (stmt), |
| idx), |
| gimple_assign_rhs2 (stmt), idx); |
| case SSA_NAME: |
| case INTEGER_CST: |
| return handle_operand (gimple_assign_rhs1 (stmt), idx); |
| CASE_CONVERT: |
| case VIEW_CONVERT_EXPR: |
| return handle_cast (TREE_TYPE (gimple_assign_lhs (stmt)), |
| gimple_assign_rhs1 (stmt), idx); |
| default: |
| break; |
| } |
| break; |
| default: |
| break; |
| } |
| gcc_unreachable (); |
| } |
| |
| /* Return minimum precision of OP at STMT. |
| Positive value is minimum precision above which all bits |
| are zero, negative means all bits above negation of the |
| value are copies of the sign bit. */ |
| |
| static int |
| range_to_prec (tree op, gimple *stmt) |
| { |
| int_range_max r; |
| wide_int w; |
| tree type = TREE_TYPE (op); |
| unsigned int prec = TYPE_PRECISION (type); |
| |
| if (!optimize |
| || !get_range_query (cfun)->range_of_expr (r, op, stmt) |
| || r.undefined_p ()) |
| { |
| if (TYPE_UNSIGNED (type)) |
| return prec; |
| else |
| return MIN ((int) -prec, -2); |
| } |
| |
| if (!TYPE_UNSIGNED (TREE_TYPE (op))) |
| { |
| w = r.lower_bound (); |
| if (wi::neg_p (w)) |
| { |
| int min_prec1 = wi::min_precision (w, SIGNED); |
| w = r.upper_bound (); |
| int min_prec2 = wi::min_precision (w, SIGNED); |
| int min_prec = MAX (min_prec1, min_prec2); |
| return MIN (-min_prec, -2); |
| } |
| } |
| |
| w = r.upper_bound (); |
| int min_prec = wi::min_precision (w, UNSIGNED); |
| return MAX (min_prec, 1); |
| } |
| |
| /* Return address of the first limb of OP and write into *PREC |
| its precision. If positive, the operand is zero extended |
| from that precision, if it is negative, the operand is sign-extended |
| from -*PREC. If PREC_STORED is NULL, it is the toplevel call, |
| otherwise *PREC_STORED is prec from the innermost call without |
| range optimizations. */ |
| |
| tree |
| bitint_large_huge::handle_operand_addr (tree op, gimple *stmt, |
| int *prec_stored, int *prec) |
| { |
| wide_int w; |
| location_t loc_save = m_loc; |
| if ((TREE_CODE (TREE_TYPE (op)) != BITINT_TYPE |
| || bitint_precision_kind (TREE_TYPE (op)) < bitint_prec_large) |
| && TREE_CODE (op) != INTEGER_CST) |
| { |
| do_int: |
| *prec = range_to_prec (op, stmt); |
| bitint_prec_kind kind = bitint_prec_small; |
| gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (op))); |
| if (TREE_CODE (TREE_TYPE (op)) == BITINT_TYPE) |
| kind = bitint_precision_kind (TREE_TYPE (op)); |
| if (kind == bitint_prec_middle) |
| { |
| tree type = NULL_TREE; |
| op = maybe_cast_middle_bitint (&m_gsi, op, type); |
| } |
| tree op_type = TREE_TYPE (op); |
| unsigned HOST_WIDE_INT nelts |
| = CEIL (TYPE_PRECISION (op_type), limb_prec); |
| /* Add support for 3 or more limbs filled in from normal |
| integral type if this assert fails. If no target chooses |
| limb mode smaller than half of largest supported normal |
| integral type, this will not be needed. */ |
| gcc_assert (nelts <= 2); |
| if (prec_stored) |
| *prec_stored = (TYPE_UNSIGNED (op_type) |
| ? TYPE_PRECISION (op_type) |
| : -TYPE_PRECISION (op_type)); |
| if (*prec <= limb_prec && *prec >= -limb_prec) |
| { |
| nelts = 1; |
| if (prec_stored) |
| { |
| if (TYPE_UNSIGNED (op_type)) |
| { |
| if (*prec_stored > limb_prec) |
| *prec_stored = limb_prec; |
| } |
| else if (*prec_stored < -limb_prec) |
| *prec_stored = -limb_prec; |
| } |
| } |
| tree atype = build_array_type_nelts (m_limb_type, nelts); |
| tree var = create_tmp_var (atype); |
| tree t1 = op; |
| if (!useless_type_conversion_p (m_limb_type, op_type)) |
| t1 = add_cast (m_limb_type, t1); |
| tree v = build4 (ARRAY_REF, m_limb_type, var, size_zero_node, |
| NULL_TREE, NULL_TREE); |
| gimple *g = gimple_build_assign (v, t1); |
| insert_before (g); |
| if (nelts > 1) |
| { |
| tree lp = build_int_cst (unsigned_type_node, limb_prec); |
| g = gimple_build_assign (make_ssa_name (op_type), |
| RSHIFT_EXPR, op, lp); |
| insert_before (g); |
| tree t2 = gimple_assign_lhs (g); |
| t2 = add_cast (m_limb_type, t2); |
| v = build4 (ARRAY_REF, m_limb_type, var, size_one_node, |
| NULL_TREE, NULL_TREE); |
| g = gimple_build_assign (v, t2); |
| insert_before (g); |
| } |
| tree ret = build_fold_addr_expr (var); |
| if (!stmt_ends_bb_p (gsi_stmt (m_gsi))) |
| { |
| tree clobber = build_clobber (atype, CLOBBER_EOL); |
| g = gimple_build_assign (var, clobber); |
| gsi_insert_after (&m_gsi, g, GSI_SAME_STMT); |
| } |
| m_loc = loc_save; |
| return ret; |
| } |
| switch (TREE_CODE (op)) |
| { |
| case SSA_NAME: |
| if (m_names == NULL |
| || !bitmap_bit_p (m_names, SSA_NAME_VERSION (op))) |
| { |
| gimple *g = SSA_NAME_DEF_STMT (op); |
| tree ret; |
| m_loc = gimple_location (g); |
| if (gimple_assign_load_p (g)) |
| { |
| *prec = range_to_prec (op, NULL); |
| if (prec_stored) |
| *prec_stored = (TYPE_UNSIGNED (TREE_TYPE (op)) |
| ? TYPE_PRECISION (TREE_TYPE (op)) |
| : -TYPE_PRECISION (TREE_TYPE (op))); |
| ret = build_fold_addr_expr (gimple_assign_rhs1 (g)); |
| ret = force_gimple_operand_gsi (&m_gsi, ret, true, |
| NULL_TREE, true, GSI_SAME_STMT); |
| } |
| else if (gimple_code (g) == GIMPLE_NOP) |
| { |
| *prec = TYPE_UNSIGNED (TREE_TYPE (op)) ? limb_prec : -limb_prec; |
| if (prec_stored) |
| *prec_stored = *prec; |
| tree var = create_tmp_var (m_limb_type); |
| TREE_ADDRESSABLE (var) = 1; |
| ret = build_fold_addr_expr (var); |
| if (!stmt_ends_bb_p (gsi_stmt (m_gsi))) |
| { |
| tree clobber = build_clobber (m_limb_type, CLOBBER_EOL); |
| g = gimple_build_assign (var, clobber); |
| gsi_insert_after (&m_gsi, g, GSI_SAME_STMT); |
| } |
| } |
| else |
| { |
| gcc_assert (gimple_assign_cast_p (g)); |
| tree rhs1 = gimple_assign_rhs1 (g); |
| bitint_prec_kind kind = bitint_prec_small; |
| gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))); |
| if (TREE_CODE (TREE_TYPE (rhs1)) == BITINT_TYPE) |
| kind = bitint_precision_kind (TREE_TYPE (rhs1)); |
| if (kind >= bitint_prec_large) |
| { |
| tree lhs_type = TREE_TYPE (op); |
| tree rhs_type = TREE_TYPE (rhs1); |
| int prec_stored_val = 0; |
| ret = handle_operand_addr (rhs1, g, &prec_stored_val, prec); |
| if (TYPE_PRECISION (lhs_type) > TYPE_PRECISION (rhs_type)) |
| { |
| if (TYPE_UNSIGNED (lhs_type) |
| && !TYPE_UNSIGNED (rhs_type)) |
| gcc_assert (*prec >= 0 || prec_stored == NULL); |
| } |
| else |
| { |
| if (*prec > 0 && *prec < TYPE_PRECISION (lhs_type)) |
| ; |
| else if (TYPE_UNSIGNED (lhs_type)) |
| { |
| gcc_assert (*prec > 0 |
| || prec_stored_val > 0 |
| || (-prec_stored_val |
| >= TYPE_PRECISION (lhs_type))); |
| *prec = TYPE_PRECISION (lhs_type); |
| } |
| else if (*prec < 0 && -*prec < TYPE_PRECISION (lhs_type)) |
| ; |
| else |
| *prec = -TYPE_PRECISION (lhs_type); |
| } |
| } |
| else |
| { |
| op = rhs1; |
| stmt = g; |
| goto do_int; |
| } |
| } |
| m_loc = loc_save; |
| return ret; |
| } |
| else |
| { |
| int p = var_to_partition (m_map, op); |
| gcc_assert (m_vars[p] != NULL_TREE); |
| *prec = range_to_prec (op, stmt); |
| if (prec_stored) |
| *prec_stored = (TYPE_UNSIGNED (TREE_TYPE (op)) |
| ? TYPE_PRECISION (TREE_TYPE (op)) |
| : -TYPE_PRECISION (TREE_TYPE (op))); |
| return build_fold_addr_expr (m_vars[p]); |
| } |
| case INTEGER_CST: |
| unsigned int min_prec, mp; |
| tree type; |
| w = wi::to_wide (op); |
| if (tree_int_cst_sgn (op) >= 0) |
| { |
| min_prec = wi::min_precision (w, UNSIGNED); |
| *prec = MAX (min_prec, 1); |
| } |
| else |
| { |
| min_prec = wi::min_precision (w, SIGNED); |
| *prec = MIN ((int) -min_prec, -2); |
| } |
| mp = CEIL (min_prec, limb_prec) * limb_prec; |
| if (mp == 0) |
| mp = 1; |
| if (mp >= (unsigned) TYPE_PRECISION (TREE_TYPE (op))) |
| type = TREE_TYPE (op); |
| else |
| type = build_bitint_type (mp, 1); |
| if (TREE_CODE (type) != BITINT_TYPE |
| || bitint_precision_kind (type) == bitint_prec_small) |
| { |
| if (TYPE_PRECISION (type) <= limb_prec) |
| type = m_limb_type; |
| else |
| /* This case is for targets which e.g. have 64-bit |
| limb but categorize up to 128-bits _BitInts as |
| small. We could use type of m_limb_type[2] and |
| similar instead to save space. */ |
| type = build_bitint_type (mid_min_prec, 1); |
| } |
| if (prec_stored) |
| { |
| if (tree_int_cst_sgn (op) >= 0) |
| *prec_stored = MAX (TYPE_PRECISION (type), 1); |
| else |
| *prec_stored = MIN ((int) -TYPE_PRECISION (type), -2); |
| } |
| op = tree_output_constant_def (fold_convert (type, op)); |
| return build_fold_addr_expr (op); |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* Helper function, create a loop before the current location, |
| start with sizetype INIT value from the preheader edge. Return |
| a PHI result and set *IDX_NEXT to SSA_NAME it creates and uses |
| from the latch edge. */ |
| |
| tree |
| bitint_large_huge::create_loop (tree init, tree *idx_next) |
| { |
| if (!gsi_end_p (m_gsi)) |
| gsi_prev (&m_gsi); |
| else |
| m_gsi = gsi_last_bb (gsi_bb (m_gsi)); |
| edge e1 = split_block (gsi_bb (m_gsi), gsi_stmt (m_gsi)); |
| edge e2 = split_block (e1->dest, (gimple *) NULL); |
| edge e3 = make_edge (e1->dest, e1->dest, EDGE_TRUE_VALUE); |
| e3->probability = profile_probability::very_unlikely (); |
| e2->flags = EDGE_FALSE_VALUE; |
| e2->probability = e3->probability.invert (); |
| tree idx = make_ssa_name (sizetype); |
| gphi *phi = create_phi_node (idx, e1->dest); |
| add_phi_arg (phi, init, e1, UNKNOWN_LOCATION); |
| *idx_next = make_ssa_name (sizetype); |
| add_phi_arg (phi, *idx_next, e3, UNKNOWN_LOCATION); |
| m_gsi = gsi_after_labels (e1->dest); |
| m_bb = e1->dest; |
| m_preheader_bb = e1->src; |
| class loop *loop = alloc_loop (); |
| loop->header = e1->dest; |
| add_loop (loop, e1->src->loop_father); |
| return idx; |
| } |
| |
| /* Lower large/huge _BitInt statement mergeable or similar STMT which can be |
| lowered using iteration from the least significant limb up to the most |
| significant limb. For large _BitInt it is emitted as straight line code |
| before current location, for huge _BitInt as a loop handling two limbs |
| at once, followed by handling up to limbs in straight line code (at most |
| one full and one partial limb). It can also handle EQ_EXPR/NE_EXPR |
| comparisons, in that case CMP_CODE should be the comparison code and |
| CMP_OP1/CMP_OP2 the comparison operands. */ |
| |
| tree |
| bitint_large_huge::lower_mergeable_stmt (gimple *stmt, tree_code &cmp_code, |
| tree cmp_op1, tree cmp_op2) |
| { |
| bool eq_p = cmp_code != ERROR_MARK; |
| tree type; |
| if (eq_p) |
| type = TREE_TYPE (cmp_op1); |
| else |
| type = TREE_TYPE (gimple_assign_lhs (stmt)); |
| gcc_assert (TREE_CODE (type) == BITINT_TYPE); |
| bitint_prec_kind kind = bitint_precision_kind (type); |
| gcc_assert (kind >= bitint_prec_large); |
| gimple *g; |
| tree lhs = gimple_get_lhs (stmt); |
| tree rhs1, lhs_type = lhs ? TREE_TYPE (lhs) : NULL_TREE; |
| if (lhs |
| && TREE_CODE (lhs) == SSA_NAME |
| && TREE_CODE (TREE_TYPE (lhs)) == BITINT_TYPE |
| && bitint_precision_kind (TREE_TYPE (lhs)) >= bitint_prec_large) |
| { |
| int p = var_to_partition (m_map, lhs); |
| gcc_assert (m_vars[p] != NULL_TREE); |
| m_lhs = lhs = m_vars[p]; |
| } |
| unsigned cnt, rem = 0, end = 0, prec = TYPE_PRECISION (type); |
| bool sext = false; |
| tree ext = NULL_TREE, store_operand = NULL_TREE; |
| bool eh = false; |
| basic_block eh_pad = NULL; |
| tree nlhs = NULL_TREE; |
| unsigned HOST_WIDE_INT bo_idx = 0; |
| unsigned HOST_WIDE_INT bo_bit = 0; |
| tree bf_cur = NULL_TREE, bf_next = NULL_TREE; |
| if (gimple_store_p (stmt)) |
| { |
| store_operand = gimple_assign_rhs1 (stmt); |
| eh = stmt_ends_bb_p (stmt); |
| if (eh) |
| { |
| edge e; |
| edge_iterator ei; |
| basic_block bb = gimple_bb (stmt); |
| |
| FOR_EACH_EDGE (e, ei, bb->succs) |
| if (e->flags & EDGE_EH) |
| { |
| eh_pad = e->dest; |
| break; |
| } |
| } |
| if (TREE_CODE (lhs) == COMPONENT_REF |
| && DECL_BIT_FIELD_TYPE (TREE_OPERAND (lhs, 1))) |
| { |
| tree fld = TREE_OPERAND (lhs, 1); |
| gcc_assert (tree_fits_uhwi_p (DECL_FIELD_BIT_OFFSET (fld))); |
| tree repr = DECL_BIT_FIELD_REPRESENTATIVE (fld); |
| poly_int64 bitoffset; |
| poly_uint64 field_offset, repr_offset; |
| if ((tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld)) % BITS_PER_UNIT) == 0) |
| nlhs = lhs; |
| else |
| { |
| bool var_field_off = false; |
| if (poly_int_tree_p (DECL_FIELD_OFFSET (fld), &field_offset) |
| && poly_int_tree_p (DECL_FIELD_OFFSET (repr), &repr_offset)) |
| bitoffset = (field_offset - repr_offset) * BITS_PER_UNIT; |
| else |
| { |
| bitoffset = 0; |
| var_field_off = true; |
| } |
| bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld)) |
| - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); |
| nlhs = build3 (COMPONENT_REF, TREE_TYPE (repr), |
| TREE_OPERAND (lhs, 0), repr, |
| var_field_off |
| ? TREE_OPERAND (lhs, 2) : NULL_TREE); |
| HOST_WIDE_INT bo = bitoffset.to_constant (); |
| bo_idx = (unsigned HOST_WIDE_INT) bo / limb_prec; |
| bo_bit = (unsigned HOST_WIDE_INT) bo % limb_prec; |
| } |
| } |
| } |
| if ((store_operand |
| && TREE_CODE (store_operand) == SSA_NAME |
| && (m_names == NULL |
| || !bitmap_bit_p (m_names, SSA_NAME_VERSION (store_operand))) |
| && gimple_assign_cast_p (SSA_NAME_DEF_STMT (store_operand))) |
| || gimple_assign_cast_p (stmt)) |
| { |
| rhs1 = gimple_assign_rhs1 (store_operand |
| ? SSA_NAME_DEF_STMT (store_operand) |
| : stmt); |
| /* Optimize mergeable ops ending with widening cast to _BitInt |
| (or followed by store). We can lower just the limbs of the |
| cast operand and widen afterwards. */ |
| if (TREE_CODE (rhs1) == SSA_NAME |
| && (m_names == NULL |
| || !bitmap_bit_p (m_names, SSA_NAME_VERSION (rhs1))) |
| && TREE_CODE (TREE_TYPE (rhs1)) == BITINT_TYPE |
| && bitint_precision_kind (TREE_TYPE (rhs1)) >= bitint_prec_large |
| && (CEIL ((unsigned) TYPE_PRECISION (TREE_TYPE (rhs1)), |
| limb_prec) < CEIL (prec, limb_prec) |
| || (kind == bitint_prec_huge |
| && TYPE_PRECISION (TREE_TYPE (rhs1)) < prec))) |
| { |
| store_operand = rhs1; |
| prec = TYPE_PRECISION (TREE_TYPE (rhs1)); |
| kind = bitint_precision_kind (TREE_TYPE (rhs1)); |
| if (!TYPE_UNSIGNED (TREE_TYPE (rhs1))) |
| sext = true; |
| } |
| } |
| tree idx = NULL_TREE, idx_first = NULL_TREE, idx_next = NULL_TREE; |
| if (kind == bitint_prec_large) |
| cnt = CEIL (prec, limb_prec); |
| else |
| { |
| rem = (prec % (2 * limb_prec)); |
| end = (prec - rem) / limb_prec; |
| cnt = 2 + CEIL (rem, limb_prec); |
| idx = idx_first = create_loop (size_zero_node, &idx_next); |
| } |
| |
| basic_block edge_bb = NULL; |
| if (eq_p) |
| { |
| gimple_stmt_iterator gsi = gsi_for_stmt (stmt); |
| gsi_prev (&gsi); |
| edge e = split_block (gsi_bb (gsi), gsi_stmt (gsi)); |
| edge_bb = e->src; |
| if (kind == bitint_prec_large) |
| m_gsi = gsi_end_bb (edge_bb); |
| } |
| else |
| m_after_stmt = stmt; |
| if (kind != bitint_prec_large) |
| m_upwards_2limb = end; |
| m_upwards = true; |
| |
| bool separate_ext |
| = (prec != (unsigned) TYPE_PRECISION (type) |
| && (CEIL ((unsigned) TYPE_PRECISION (type), limb_prec) |
| > CEIL (prec, limb_prec))); |
| |
| for (unsigned i = 0; i < cnt; i++) |
| { |
| m_data_cnt = 0; |
| if (kind == bitint_prec_large) |
| idx = size_int (i); |
| else if (i >= 2) |
| idx = size_int (end + (i > 2)); |
| if (eq_p) |
| { |
| rhs1 = handle_operand (cmp_op1, idx); |
| tree rhs2 = handle_operand (cmp_op2, idx); |
| g = gimple_build_cond (NE_EXPR, rhs1, rhs2, NULL_TREE, NULL_TREE); |
| insert_before (g); |
| edge e1 = split_block (gsi_bb (m_gsi), g); |
| e1->flags = EDGE_FALSE_VALUE; |
| edge e2 = make_edge (e1->src, gimple_bb (stmt), EDGE_TRUE_VALUE); |
| e1->probability = profile_probability::unlikely (); |
| e2->probability = e1->probability.invert (); |
| if (i == 0) |
| set_immediate_dominator (CDI_DOMINATORS, e2->dest, e2->src); |
| m_gsi = gsi_after_labels (e1->dest); |
| } |
| else |
| { |
| if (store_operand) |
| rhs1 = handle_operand (store_operand, idx); |
| else |
| rhs1 = handle_stmt (stmt, idx); |
| if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (rhs1))) |
| rhs1 = add_cast (m_limb_type, rhs1); |
| if (sext && i == cnt - 1) |
| ext = rhs1; |
| tree nidx = idx; |
| if (bo_idx) |
| { |
| if (tree_fits_uhwi_p (idx)) |
| nidx = size_int (tree_to_uhwi (idx) + bo_idx); |
| else |
| { |
| nidx = make_ssa_name (sizetype); |
| g = gimple_build_assign (nidx, PLUS_EXPR, idx, |
| size_int (bo_idx)); |
| insert_before (g); |
| } |
| } |
| bool done = false; |
| basic_block new_bb = NULL; |
| /* Handle stores into bit-fields. */ |
| if (bo_bit) |
| { |
| if (i == 0) |
| { |
| edge e2 = NULL; |
| if (kind != bitint_prec_large) |
| { |
| prepare_data_in_out (build_zero_cst (m_limb_type), |
| idx, &bf_next); |
| bf_next = m_data.pop (); |
| bf_cur = m_data.pop (); |
| g = gimple_build_cond (EQ_EXPR, idx, size_zero_node, |
| NULL_TREE, NULL_TREE); |
| edge edge_true; |
| if_then_else (g, profile_probability::unlikely (), |
| edge_true, e2); |
| new_bb = e2->dest; |
| } |
| tree ftype |
| = build_nonstandard_integer_type (limb_prec - bo_bit, 1); |
| tree bfr = build3 (BIT_FIELD_REF, ftype, unshare_expr (nlhs), |
| bitsize_int (limb_prec - bo_bit), |
| bitsize_int (bo_idx * limb_prec + bo_bit)); |
| tree t = add_cast (ftype, rhs1); |
| g = gimple_build_assign (bfr, t); |
| insert_before (g); |
| if (eh) |
| { |
| maybe_duplicate_eh_stmt (g, stmt); |
| if (eh_pad) |
| { |
| edge e = split_block (gsi_bb (m_gsi), g); |
| m_gsi = gsi_after_labels (e->dest); |
| make_edge (e->src, eh_pad, EDGE_EH)->probability |
| = profile_probability::very_unlikely (); |
| } |
| } |
| if (kind == bitint_prec_large) |
| { |
| bf_cur = rhs1; |
| done = true; |
| } |
| else if (e2) |
| m_gsi = gsi_after_labels (e2->src); |
| } |
| if (!done) |
| { |
| tree t1 = make_ssa_name (m_limb_type); |
| tree t2 = make_ssa_name (m_limb_type); |
| tree t3 = make_ssa_name (m_limb_type); |
| g = gimple_build_assign (t1, RSHIFT_EXPR, bf_cur, |
| build_int_cst (unsigned_type_node, |
| limb_prec - bo_bit)); |
| insert_before (g); |
| g = gimple_build_assign (t2, LSHIFT_EXPR, rhs1, |
| build_int_cst (unsigned_type_node, |
| bo_bit)); |
| insert_before (g); |
| bf_cur = rhs1; |
| g = gimple_build_assign (t3, BIT_IOR_EXPR, t1, t2); |
| insert_before (g); |
| rhs1 = t3; |
| if (bf_next && i == 1) |
| { |
| g = gimple_build_assign (bf_next, bf_cur); |
| insert_before (g); |
| } |
| } |
| } |
| if (!done) |
| { |
| /* Handle bit-field access to partial last limb if needed. */ |
| if (nlhs |
| && i == cnt - 1 |
| && !separate_ext |
| && tree_fits_uhwi_p (idx)) |
| { |
| unsigned int tprec = TYPE_PRECISION (type); |
| unsigned int rprec = tprec % limb_prec; |
| if (rprec + bo_bit < (unsigned) limb_prec) |
| { |
| tree ftype |
| = build_nonstandard_integer_type (rprec + bo_bit, 1); |
| tree bfr = build3 (BIT_FIELD_REF, ftype, |
| unshare_expr (nlhs), |
| bitsize_int (rprec + bo_bit), |
| bitsize_int ((bo_idx |
| + tprec / limb_prec) |
| * limb_prec)); |
| tree t = add_cast (ftype, rhs1); |
| g = gimple_build_assign (bfr, t); |
| done = true; |
| bf_cur = NULL_TREE; |
| } |
| else if (rprec + bo_bit == (unsigned) limb_prec) |
| bf_cur = NULL_TREE; |
| } |
| /* Otherwise, stores to any other lhs. */ |
| if (!done) |
| { |
| tree l = limb_access (lhs_type, nlhs ? nlhs : lhs, |
| nidx, true); |
| g = gimple_build_assign (l, rhs1); |
| } |
| insert_before (g); |
| if (eh) |
| { |
| maybe_duplicate_eh_stmt (g, stmt); |
| if (eh_pad) |
| { |
| edge e = split_block (gsi_bb (m_gsi), g); |
| m_gsi = gsi_after_labels (e->dest); |
| make_edge (e->src, eh_pad, EDGE_EH)->probability |
| = profile_probability::very_unlikely (); |
| } |
| } |
| if (new_bb) |
| m_gsi = gsi_after_labels (new_bb); |
| } |
| } |
| m_first = false; |
| if (kind == bitint_prec_huge && i <= 1) |
| { |
| if (i == 0) |
| { |
| idx = make_ssa_name (sizetype); |
| g = gimple_build_assign (idx, PLUS_EXPR, idx_first, |
| size_one_node); |
| insert_before (g); |
| } |
| else |
| { |
| g = gimple_build_assign (idx_next, PLUS_EXPR, idx_first, |
| size_int (2)); |
| insert_before (g); |
| g = gimple_build_cond (NE_EXPR, idx_next, size_int (end), |
| NULL_TREE, NULL_TREE); |
| insert_before (g); |
| if (eq_p) |
| m_gsi = gsi_after_labels (edge_bb); |
| else |
| m_gsi = gsi_for_stmt (stmt); |
| } |
| } |
| } |
| |
| if (separate_ext) |
| { |
| if (sext) |
| { |
| ext = add_cast (signed_type_for (m_limb_type), ext); |
| tree lpm1 = build_int_cst (unsigned_type_node, |
| limb_prec - 1); |
| tree n = make_ssa_name (TREE_TYPE (ext)); |
| g = gimple_build_assign (n, RSHIFT_EXPR, ext, lpm1); |
| insert_before (g); |
| ext = add_cast (m_limb_type, n); |
| } |
| else |
| ext = build_zero_cst (m_limb_type); |
| kind = bitint_precision_kind (type); |
| unsigned start = CEIL (prec, limb_prec); |
| prec = TYPE_PRECISION (type); |
| idx = idx_first = idx_next = NULL_TREE; |
| if (prec <= (start + 2 + (bo_bit != 0)) * limb_prec) |
| kind = bitint_prec_large; |
| if (kind == bitint_prec_large) |
| cnt = CEIL (prec, limb_prec) - start; |
| else |
| { |
| rem = prec % limb_prec; |
| end = (prec - rem) / limb_prec; |
| cnt = (bo_bit != 0) + 1 + (rem != 0); |
| } |
| for (unsigned i = 0; i < cnt; i++) |
| { |
| if (kind == bitint_prec_large || (i == 0 && bo_bit != 0)) |
| idx = size_int (start + i); |
| else if (i == cnt - 1 && (rem != 0)) |
| idx = size_int (end); |
| else if (i == (bo_bit != 0)) |
| idx = create_loop (size_int (start + i), &idx_next); |
| rhs1 = ext; |
| if (bf_cur != NULL_TREE && bf_cur != ext) |
| { |
| tree t1 = make_ssa_name (m_limb_type); |
| g = gimple_build_assign (t1, RSHIFT_EXPR, bf_cur, |
| build_int_cst (unsigned_type_node, |
| limb_prec - bo_bit)); |
| insert_before (g); |
| if (integer_zerop (ext)) |
| rhs1 = t1; |
| else |
| { |
| tree t2 = make_ssa_name (m_limb_type); |
| rhs1 = make_ssa_name (m_limb_type); |
| g = gimple_build_assign (t2, LSHIFT_EXPR, ext, |
| build_int_cst (unsigned_type_node, |
| bo_bit)); |
| insert_before (g); |
| g = gimple_build_assign (rhs1, BIT_IOR_EXPR, t1, t2); |
| insert_before (g); |
| } |
| bf_cur = ext; |
| } |
| tree nidx = idx; |
| if (bo_idx) |
| { |
| if (tree_fits_uhwi_p (idx)) |
| nidx = size_int (tree_to_uhwi (idx) + bo_idx); |
| else |
| { |
| nidx = make_ssa_name (sizetype); |
| g = gimple_build_assign (nidx, PLUS_EXPR, idx, |
| size_int (bo_idx)); |
| insert_before (g); |
| } |
| } |
| bool done = false; |
| /* Handle bit-field access to partial last limb if needed. */ |
| if (nlhs && i == cnt - 1) |
| { |
| unsigned int tprec = TYPE_PRECISION (type); |
| unsigned int rprec = tprec % limb_prec; |
| if (rprec + bo_bit < (unsigned) limb_prec) |
| { |
| tree ftype |
| = build_nonstandard_integer_type (rprec + bo_bit, 1); |
| tree bfr = build3 (BIT_FIELD_REF, ftype, |
| unshare_expr (nlhs), |
| bitsize_int (rprec + bo_bit), |
| bitsize_int ((bo_idx + tprec / limb_prec) |
| * limb_prec)); |
| tree t = add_cast (ftype, rhs1); |
| g = gimple_build_assign (bfr, t); |
| done = true; |
| bf_cur = NULL_TREE; |
| } |
| else if (rprec + bo_bit == (unsigned) limb_prec) |
| bf_cur = NULL_TREE; |
| } |
| /* Otherwise, stores to any other lhs. */ |
| if (!done) |
| { |
| tree l = limb_access (lhs_type, nlhs ? nlhs : lhs, nidx, true); |
| g = gimple_build_assign (l, rhs1); |
| } |
| insert_before (g); |
| if (eh) |
| { |
| maybe_duplicate_eh_stmt (g, stmt); |
| if (eh_pad) |
| { |
| edge e = split_block (gsi_bb (m_gsi), g); |
| m_gsi = gsi_after_labels (e->dest); |
| make_edge (e->src, eh_pad, EDGE_EH)->probability |
| = profile_probability::very_unlikely (); |
| } |
| } |
| if (kind == bitint_prec_huge && i == (bo_bit != 0)) |
| { |
| g = gimple_build_assign (idx_next, PLUS_EXPR, idx, |
| size_one_node); |
| insert_before (g); |
| g = gimple_build_cond (NE_EXPR, idx_next, size_int (end), |
| NULL_TREE, NULL_TREE); |
| insert_before (g); |
| m_gsi = gsi_for_stmt (stmt); |
| } |
| } |
| } |
| if (bf_cur != NULL_TREE) |
| { |
| unsigned int tprec = TYPE_PRECISION (type); |
| unsigned int rprec = tprec % limb_prec; |
| tree ftype = build_nonstandard_integer_type (rprec + bo_bit, 1); |
| tree bfr = build3 (BIT_FIELD_REF, ftype, unshare_expr (nlhs), |
| bitsize_int (rprec + bo_bit), |
| bitsize_int ((bo_idx + tprec / limb_prec) |
| * limb_prec)); |
| rhs1 = bf_cur; |
| if (bf_cur != ext) |
| { |
| rhs1 = make_ssa_name (TREE_TYPE (rhs1)); |
| g = gimple_build_assign (rhs1, RSHIFT_EXPR, bf_cur, |
| build_int_cst (unsigned_type_node, |
| limb_prec - bo_bit)); |
| insert_before (g); |
| } |
| rhs1 = add_cast (ftype, rhs1); |
| g = gimple_build_assign (bfr, rhs1); |
| insert_before (g); |
| if (eh) |
| { |
| maybe_duplicate_eh_stmt (g, stmt); |
| if (eh_pad) |
| { |
| edge e = split_block (gsi_bb (m_gsi), g); |
| m_gsi = gsi_after_labels (e->dest); |
| make_edge (e->src, eh_pad, EDGE_EH)->probability |
| = profile_probability::very_unlikely (); |
| } |
| } |
| } |
| |
| if (gimple_store_p (stmt)) |
| { |
| unlink_stmt_vdef (stmt); |
| release_ssa_name (gimple_vdef (stmt)); |
| gsi_remove (&m_gsi, true); |
| } |
| if (eq_p) |
| { |
| lhs = make_ssa_name (boolean_type_node); |
| basic_block bb = gimple_bb (stmt); |
| gphi *phi = create_phi_node (lhs, bb); |
| edge e = find_edge (gsi_bb (m_gsi), bb); |
| unsigned int n = EDGE_COUNT (bb->preds); |
| for (unsigned int i = 0; i < n; i++) |
| { |
| edge e2 = EDGE_PRED (bb, i); |
| add_phi_arg (phi, e == e2 ? boolean_true_node : boolean_false_node, |
| e2, UNKNOWN_LOCATION); |
| } |
| cmp_code = cmp_code == EQ_EXPR ? NE_EXPR : EQ_EXPR; |
| return lhs; |
| } |
| else |
| return NULL_TREE; |
| } |
| |
| /* Handle a large/huge _BitInt comparison statement STMT other than |
| EQ_EXPR/NE_EXPR. CMP_CODE, CMP_OP1 and CMP_OP2 meaning is like in |
| lower_mergeable_stmt. The {GT,GE,LT,LE}_EXPR comparisons are |
| lowered by iteration from the most significant limb downwards to |
| the least significant one, for large _BitInt in straight line code, |
| otherwise with most significant limb handled in |
| straight line code followed by a loop handling one limb at a time. |
| Comparisons with unsigned huge _BitInt with precisions which are |
| multiples of limb precision can use just the loop and don't need to |
| handle most significant limb before the loop. The loop or straight |
| line code jumps to final basic block if a particular pair of limbs |
| is not equal. */ |
| |
| tree |
| bitint_large_huge::lower_comparison_stmt (gimple *stmt, tree_code &cmp_code, |
| tree cmp_op1, tree cmp_op2) |
| { |
| tree type = TREE_TYPE (cmp_op1); |
| gcc_assert (TREE_CODE (type) == BITINT_TYPE); |
| bitint_prec_kind kind = bitint_precision_kind (type); |
| gcc_assert (kind >= bitint_prec_large); |
| gimple *g; |
| if (!TYPE_UNSIGNED (type) |
| && integer_zerop (cmp_op2) |
| && (cmp_code == GE_EXPR || cmp_code == LT_EXPR)) |
| { |
| unsigned end = CEIL ((unsigned) TYPE_PRECISION (type), limb_prec) - 1; |
| tree idx = size_int (end); |
| m_data_cnt = 0; |
| tree rhs1 = handle_operand (cmp_op1, idx); |
| if (TYPE_UNSIGNED (TREE_TYPE (rhs1))) |
| { |
| tree stype = signed_type_for (TREE_TYPE (rhs1)); |
| rhs1 = add_cast (stype, rhs1); |
| } |
| tree lhs = make_ssa_name (boolean_type_node); |
| g = gimple_build_assign (lhs, cmp_code, rhs1, |
| build_zero_cst (TREE_TYPE (rhs1))); |
| insert_before (g); |
| cmp_code = NE_EXPR; |
| return lhs; |
| } |
| |
| unsigned cnt, rem = 0, end = 0; |
| tree idx = NULL_TREE, idx_next = NULL_TREE; |
| if (kind == bitint_prec_large) |
| cnt = CEIL ((unsigned) TYPE_PRECISION (type), limb_prec); |
| else |
| { |
| rem = ((unsigned) TYPE_PRECISION (type) % limb_prec); |
| if (rem == 0 && !TYPE_UNSIGNED (type)) |
| rem = limb_prec; |
| end = ((unsigned) TYPE_PRECISION (type) - rem) / limb_prec; |
| cnt = 1 + (rem != 0); |
| } |
| |
| basic_block edge_bb = NULL; |
| gimple_stmt_iterator gsi = gsi_for_stmt (stmt); |
| gsi_prev (&gsi); |
| edge e = split_block (gsi_bb (gsi), gsi_stmt (gsi)); |
| edge_bb = e->src; |
| m_gsi = gsi_end_bb (edge_bb); |
| |
| edge *edges = XALLOCAVEC (edge, cnt * 2); |
| for (unsigned i = 0; i < cnt; i++) |
| { |
| m_data_cnt = 0; |
| if (kind == bitint_prec_large) |
| idx = size_int (cnt - i - 1); |
| else if (i == cnt - 1) |
| idx = create_loop (size_int (end - 1), &idx_next); |
| else |
| idx = size_int (end); |
| tree rhs1 = handle_operand (cmp_op1, idx); |
| tree rhs2 = handle_operand (cmp_op2, idx); |
| if (i == 0 |
| && !TYPE_UNSIGNED (type) |
| && TYPE_UNSIGNED (TREE_TYPE (rhs1))) |
| { |
| tree stype = signed_type_for (TREE_TYPE (rhs1)); |
| rhs1 = add_cast (stype, rhs1); |
| rhs2 = add_cast (stype, rhs2); |
| } |
| g = gimple_build_cond (GT_EXPR, rhs1, rhs2, NULL_TREE, NULL_TREE); |
| insert_before (g); |
| edge e1 = split_block (gsi_bb (m_gsi), g); |
| e1->flags = EDGE_FALSE_VALUE; |
| edge e2 = make_edge (e1->src, gimple_bb (stmt), EDGE_TRUE_VALUE); |
| e1->probability = profile_probability::likely (); |
| e2->probability = e1->probability.invert (); |
| if (i == 0) |
| set_immediate_dominator (CDI_DOMINATORS, e2->dest, e2->src); |
| m_gsi = gsi_after_labels (e1->dest); |
| edges[2 * i] = e2; |
| g = gimple_build_cond (LT_EXPR, rhs1, rhs2, NULL_TREE, NULL_TREE); |
| insert_before (g); |
| e1 = split_block (gsi_bb (m_gsi), g); |
| e1->flags = EDGE_FALSE_VALUE; |
| e2 = make_edge (e1->src, gimple_bb (stmt), EDGE_TRUE_VALUE); |
| e1->probability = profile_probability::unlikely (); |
| e2->probability = e1->probability.invert (); |
| m_gsi = gsi_after_labels (e1->dest); |
| edges[2 * i + 1] = e2; |
| m_first = false; |
| if (kind == bitint_prec_huge && i == cnt - 1) |
| { |
| g = gimple_build_assign (idx_next, PLUS_EXPR, idx, size_int (-1)); |
| insert_before (g); |
| g = gimple_build_cond (NE_EXPR, idx, size_zero_node, |
| NULL_TREE, NULL_TREE); |
| insert_before (g); |
| edge true_edge, false_edge; |
| extract_true_false_edges_from_block (gsi_bb (m_gsi), |
| &true_edge, &false_edge); |
| m_gsi = gsi_after_labels (false_edge->dest); |
| } |
| } |
| |
| tree lhs = make_ssa_name (boolean_type_node); |
| basic_block bb = gimple_bb (stmt); |
| gphi *phi = create_phi_node (lhs, bb); |
| for (unsigned int i = 0; i < cnt * 2; i++) |
| { |
| tree val = ((cmp_code == GT_EXPR || cmp_code == GE_EXPR) |
| ^ (i & 1)) ? boolean_true_node : boolean_false_node; |
| add_phi_arg (phi, val, edges[i], UNKNOWN_LOCATION); |
| } |
| add_phi_arg (phi, (cmp_code == GE_EXPR || cmp_code == LE_EXPR) |
| ? boolean_true_node : boolean_false_node, |
| find_edge (gsi_bb (m_gsi), bb), UNKNOWN_LOCATION); |
| cmp_code = NE_EXPR; |
| return lhs; |
| } |
| |
| /* Lower large/huge _BitInt left and right shift except for left |
| shift by < limb_prec constant. */ |
| |
| void |
| bitint_large_huge::lower_shift_stmt (tree o
|