| /* Intrinsic translation |
| Copyright (C) 2002-2022 Free Software Foundation, Inc. |
| Contributed by Paul Brook <paul@nowt.org> |
| and Steven Bosscher <s.bosscher@student.tudelft.nl> |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| /* trans-intrinsic.cc-- generate GENERIC trees for calls to intrinsics. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "memmodel.h" |
| #include "tm.h" /* For UNITS_PER_WORD. */ |
| #include "tree.h" |
| #include "gfortran.h" |
| #include "trans.h" |
| #include "stringpool.h" |
| #include "fold-const.h" |
| #include "internal-fn.h" |
| #include "tree-nested.h" |
| #include "stor-layout.h" |
| #include "toplev.h" /* For rest_of_decl_compilation. */ |
| #include "arith.h" |
| #include "trans-const.h" |
| #include "trans-types.h" |
| #include "trans-array.h" |
| #include "dependency.h" /* For CAF array alias analysis. */ |
| #include "attribs.h" |
| #include "realmpfr.h" |
| |
| /* Only for gfc_trans_assign and gfc_trans_pointer_assign. */ |
| |
| /* This maps Fortran intrinsic math functions to external library or GCC |
| builtin functions. */ |
| typedef struct GTY(()) gfc_intrinsic_map_t { |
| /* The explicit enum is required to work around inadequacies in the |
| garbage collection/gengtype parsing mechanism. */ |
| enum gfc_isym_id id; |
| |
| /* Enum value from the "language-independent", aka C-centric, part |
| of gcc, or END_BUILTINS of no such value set. */ |
| enum built_in_function float_built_in; |
| enum built_in_function double_built_in; |
| enum built_in_function long_double_built_in; |
| enum built_in_function complex_float_built_in; |
| enum built_in_function complex_double_built_in; |
| enum built_in_function complex_long_double_built_in; |
| |
| /* True if the naming pattern is to prepend "c" for complex and |
| append "f" for kind=4. False if the naming pattern is to |
| prepend "_gfortran_" and append "[rc](4|8|10|16)". */ |
| bool libm_name; |
| |
| /* True if a complex version of the function exists. */ |
| bool complex_available; |
| |
| /* True if the function should be marked const. */ |
| bool is_constant; |
| |
| /* The base library name of this function. */ |
| const char *name; |
| |
| /* Cache decls created for the various operand types. */ |
| tree real4_decl; |
| tree real8_decl; |
| tree real10_decl; |
| tree real16_decl; |
| tree complex4_decl; |
| tree complex8_decl; |
| tree complex10_decl; |
| tree complex16_decl; |
| } |
| gfc_intrinsic_map_t; |
| |
| /* ??? The NARGS==1 hack here is based on the fact that (c99 at least) |
| defines complex variants of all of the entries in mathbuiltins.def |
| except for atan2. */ |
| #define DEFINE_MATH_BUILTIN(ID, NAME, ARGTYPE) \ |
| { GFC_ISYM_ ## ID, BUILT_IN_ ## ID ## F, BUILT_IN_ ## ID, \ |
| BUILT_IN_ ## ID ## L, END_BUILTINS, END_BUILTINS, END_BUILTINS, \ |
| true, false, true, NAME, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, \ |
| NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE}, |
| |
| #define DEFINE_MATH_BUILTIN_C(ID, NAME, ARGTYPE) \ |
| { GFC_ISYM_ ## ID, BUILT_IN_ ## ID ## F, BUILT_IN_ ## ID, \ |
| BUILT_IN_ ## ID ## L, BUILT_IN_C ## ID ## F, BUILT_IN_C ## ID, \ |
| BUILT_IN_C ## ID ## L, true, true, true, NAME, NULL_TREE, NULL_TREE, \ |
| NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE}, |
| |
| #define LIB_FUNCTION(ID, NAME, HAVE_COMPLEX) \ |
| { GFC_ISYM_ ## ID, END_BUILTINS, END_BUILTINS, END_BUILTINS, \ |
| END_BUILTINS, END_BUILTINS, END_BUILTINS, \ |
| false, HAVE_COMPLEX, true, NAME, NULL_TREE, NULL_TREE, NULL_TREE, \ |
| NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE } |
| |
| #define OTHER_BUILTIN(ID, NAME, TYPE, CONST) \ |
| { GFC_ISYM_NONE, BUILT_IN_ ## ID ## F, BUILT_IN_ ## ID, \ |
| BUILT_IN_ ## ID ## L, END_BUILTINS, END_BUILTINS, END_BUILTINS, \ |
| true, false, CONST, NAME, NULL_TREE, NULL_TREE, \ |
| NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE}, |
| |
| static GTY(()) gfc_intrinsic_map_t gfc_intrinsic_map[] = |
| { |
| /* Functions built into gcc itself (DEFINE_MATH_BUILTIN and |
| DEFINE_MATH_BUILTIN_C), then the built-ins that don't correspond |
| to any GFC_ISYM id directly, which use the OTHER_BUILTIN macro. */ |
| #include "mathbuiltins.def" |
| |
| /* Functions in libgfortran. */ |
| LIB_FUNCTION (ERFC_SCALED, "erfc_scaled", false), |
| LIB_FUNCTION (SIND, "sind", false), |
| LIB_FUNCTION (COSD, "cosd", false), |
| LIB_FUNCTION (TAND, "tand", false), |
| |
| /* End the list. */ |
| LIB_FUNCTION (NONE, NULL, false) |
| |
| }; |
| #undef OTHER_BUILTIN |
| #undef LIB_FUNCTION |
| #undef DEFINE_MATH_BUILTIN |
| #undef DEFINE_MATH_BUILTIN_C |
| |
| |
| enum rounding_mode { RND_ROUND, RND_TRUNC, RND_CEIL, RND_FLOOR }; |
| |
| |
| /* Find the correct variant of a given builtin from its argument. */ |
| static tree |
| builtin_decl_for_precision (enum built_in_function base_built_in, |
| int precision) |
| { |
| enum built_in_function i = END_BUILTINS; |
| |
| gfc_intrinsic_map_t *m; |
| for (m = gfc_intrinsic_map; m->double_built_in != base_built_in ; m++) |
| ; |
| |
| if (precision == TYPE_PRECISION (float_type_node)) |
| i = m->float_built_in; |
| else if (precision == TYPE_PRECISION (double_type_node)) |
| i = m->double_built_in; |
| else if (precision == TYPE_PRECISION (long_double_type_node) |
| && (!gfc_real16_is_float128 |
| || long_double_type_node != gfc_float128_type_node)) |
| i = m->long_double_built_in; |
| else if (precision == TYPE_PRECISION (gfc_float128_type_node)) |
| { |
| /* Special treatment, because it is not exactly a built-in, but |
| a library function. */ |
| return m->real16_decl; |
| } |
| |
| return (i == END_BUILTINS ? NULL_TREE : builtin_decl_explicit (i)); |
| } |
| |
| |
| tree |
| gfc_builtin_decl_for_float_kind (enum built_in_function double_built_in, |
| int kind) |
| { |
| int i = gfc_validate_kind (BT_REAL, kind, false); |
| |
| if (gfc_real_kinds[i].c_float128) |
| { |
| /* For _Float128, the story is a bit different, because we return |
| a decl to a library function rather than a built-in. */ |
| gfc_intrinsic_map_t *m; |
| for (m = gfc_intrinsic_map; m->double_built_in != double_built_in ; m++) |
| ; |
| |
| return m->real16_decl; |
| } |
| |
| return builtin_decl_for_precision (double_built_in, |
| gfc_real_kinds[i].mode_precision); |
| } |
| |
| |
| /* Evaluate the arguments to an intrinsic function. The value |
| of NARGS may be less than the actual number of arguments in EXPR |
| to allow optional "KIND" arguments that are not included in the |
| generated code to be ignored. */ |
| |
| static void |
| gfc_conv_intrinsic_function_args (gfc_se *se, gfc_expr *expr, |
| tree *argarray, int nargs) |
| { |
| gfc_actual_arglist *actual; |
| gfc_expr *e; |
| gfc_intrinsic_arg *formal; |
| gfc_se argse; |
| int curr_arg; |
| |
| formal = expr->value.function.isym->formal; |
| actual = expr->value.function.actual; |
| |
| for (curr_arg = 0; curr_arg < nargs; curr_arg++, |
| actual = actual->next, |
| formal = formal ? formal->next : NULL) |
| { |
| gcc_assert (actual); |
| e = actual->expr; |
| /* Skip omitted optional arguments. */ |
| if (!e) |
| { |
| --curr_arg; |
| continue; |
| } |
| |
| /* Evaluate the parameter. This will substitute scalarized |
| references automatically. */ |
| gfc_init_se (&argse, se); |
| |
| if (e->ts.type == BT_CHARACTER) |
| { |
| gfc_conv_expr (&argse, e); |
| gfc_conv_string_parameter (&argse); |
| argarray[curr_arg++] = argse.string_length; |
| gcc_assert (curr_arg < nargs); |
| } |
| else |
| gfc_conv_expr_val (&argse, e); |
| |
| /* If an optional argument is itself an optional dummy argument, |
| check its presence and substitute a null if absent. */ |
| if (e->expr_type == EXPR_VARIABLE |
| && e->symtree->n.sym->attr.optional |
| && formal |
| && formal->optional) |
| gfc_conv_missing_dummy (&argse, e, formal->ts, 0); |
| |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| argarray[curr_arg] = argse.expr; |
| } |
| } |
| |
| /* Count the number of actual arguments to the intrinsic function EXPR |
| including any "hidden" string length arguments. */ |
| |
| static unsigned int |
| gfc_intrinsic_argument_list_length (gfc_expr *expr) |
| { |
| int n = 0; |
| gfc_actual_arglist *actual; |
| |
| for (actual = expr->value.function.actual; actual; actual = actual->next) |
| { |
| if (!actual->expr) |
| continue; |
| |
| if (actual->expr->ts.type == BT_CHARACTER) |
| n += 2; |
| else |
| n++; |
| } |
| |
| return n; |
| } |
| |
| |
| /* Conversions between different types are output by the frontend as |
| intrinsic functions. We implement these directly with inline code. */ |
| |
| static void |
| gfc_conv_intrinsic_conversion (gfc_se * se, gfc_expr * expr) |
| { |
| tree type; |
| tree *args; |
| int nargs; |
| |
| nargs = gfc_intrinsic_argument_list_length (expr); |
| args = XALLOCAVEC (tree, nargs); |
| |
| /* Evaluate all the arguments passed. Whilst we're only interested in the |
| first one here, there are other parts of the front-end that assume this |
| and will trigger an ICE if it's not the case. */ |
| type = gfc_typenode_for_spec (&expr->ts); |
| gcc_assert (expr->value.function.actual->expr); |
| gfc_conv_intrinsic_function_args (se, expr, args, nargs); |
| |
| /* Conversion between character kinds involves a call to a library |
| function. */ |
| if (expr->ts.type == BT_CHARACTER) |
| { |
| tree fndecl, var, addr, tmp; |
| |
| if (expr->ts.kind == 1 |
| && expr->value.function.actual->expr->ts.kind == 4) |
| fndecl = gfor_fndecl_convert_char4_to_char1; |
| else if (expr->ts.kind == 4 |
| && expr->value.function.actual->expr->ts.kind == 1) |
| fndecl = gfor_fndecl_convert_char1_to_char4; |
| else |
| gcc_unreachable (); |
| |
| /* Create the variable storing the converted value. */ |
| type = gfc_get_pchar_type (expr->ts.kind); |
| var = gfc_create_var (type, "str"); |
| addr = gfc_build_addr_expr (build_pointer_type (type), var); |
| |
| /* Call the library function that will perform the conversion. */ |
| gcc_assert (nargs >= 2); |
| tmp = build_call_expr_loc (input_location, |
| fndecl, 3, addr, args[0], args[1]); |
| gfc_add_expr_to_block (&se->pre, tmp); |
| |
| /* Free the temporary afterwards. */ |
| tmp = gfc_call_free (var); |
| gfc_add_expr_to_block (&se->post, tmp); |
| |
| se->expr = var; |
| se->string_length = args[0]; |
| |
| return; |
| } |
| |
| /* Conversion from complex to non-complex involves taking the real |
| component of the value. */ |
| if (TREE_CODE (TREE_TYPE (args[0])) == COMPLEX_TYPE |
| && expr->ts.type != BT_COMPLEX) |
| { |
| tree artype; |
| |
| artype = TREE_TYPE (TREE_TYPE (args[0])); |
| args[0] = fold_build1_loc (input_location, REALPART_EXPR, artype, |
| args[0]); |
| } |
| |
| se->expr = convert (type, args[0]); |
| } |
| |
| /* This is needed because the gcc backend only implements |
| FIX_TRUNC_EXPR, which is the same as INT() in Fortran. |
| FLOOR(x) = INT(x) <= x ? INT(x) : INT(x) - 1 |
| Similarly for CEILING. */ |
| |
| static tree |
| build_fixbound_expr (stmtblock_t * pblock, tree arg, tree type, int up) |
| { |
| tree tmp; |
| tree cond; |
| tree argtype; |
| tree intval; |
| |
| argtype = TREE_TYPE (arg); |
| arg = gfc_evaluate_now (arg, pblock); |
| |
| intval = convert (type, arg); |
| intval = gfc_evaluate_now (intval, pblock); |
| |
| tmp = convert (argtype, intval); |
| cond = fold_build2_loc (input_location, up ? GE_EXPR : LE_EXPR, |
| logical_type_node, tmp, arg); |
| |
| tmp = fold_build2_loc (input_location, up ? PLUS_EXPR : MINUS_EXPR, type, |
| intval, build_int_cst (type, 1)); |
| tmp = fold_build3_loc (input_location, COND_EXPR, type, cond, intval, tmp); |
| return tmp; |
| } |
| |
| |
| /* Round to nearest integer, away from zero. */ |
| |
| static tree |
| build_round_expr (tree arg, tree restype) |
| { |
| tree argtype; |
| tree fn; |
| int argprec, resprec; |
| |
| argtype = TREE_TYPE (arg); |
| argprec = TYPE_PRECISION (argtype); |
| resprec = TYPE_PRECISION (restype); |
| |
| /* Depending on the type of the result, choose the int intrinsic (iround, |
| available only as a builtin, therefore cannot use it for _Float128), long |
| int intrinsic (lround family) or long long intrinsic (llround). If we |
| don't have an appropriate function that converts directly to the integer |
| type (such as kind == 16), just use ROUND, and then convert the result to |
| an integer. We might also need to convert the result afterwards. */ |
| if (resprec <= INT_TYPE_SIZE && argprec <= LONG_DOUBLE_TYPE_SIZE) |
| fn = builtin_decl_for_precision (BUILT_IN_IROUND, argprec); |
| else if (resprec <= LONG_TYPE_SIZE) |
| fn = builtin_decl_for_precision (BUILT_IN_LROUND, argprec); |
| else if (resprec <= LONG_LONG_TYPE_SIZE) |
| fn = builtin_decl_for_precision (BUILT_IN_LLROUND, argprec); |
| else if (resprec >= argprec) |
| fn = builtin_decl_for_precision (BUILT_IN_ROUND, argprec); |
| else |
| gcc_unreachable (); |
| |
| return convert (restype, build_call_expr_loc (input_location, |
| fn, 1, arg)); |
| } |
| |
| |
| /* Convert a real to an integer using a specific rounding mode. |
| Ideally we would just build the corresponding GENERIC node, |
| however the RTL expander only actually supports FIX_TRUNC_EXPR. */ |
| |
| static tree |
| build_fix_expr (stmtblock_t * pblock, tree arg, tree type, |
| enum rounding_mode op) |
| { |
| switch (op) |
| { |
| case RND_FLOOR: |
| return build_fixbound_expr (pblock, arg, type, 0); |
| |
| case RND_CEIL: |
| return build_fixbound_expr (pblock, arg, type, 1); |
| |
| case RND_ROUND: |
| return build_round_expr (arg, type); |
| |
| case RND_TRUNC: |
| return fold_build1_loc (input_location, FIX_TRUNC_EXPR, type, arg); |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| |
| /* Round a real value using the specified rounding mode. |
| We use a temporary integer of that same kind size as the result. |
| Values larger than those that can be represented by this kind are |
| unchanged, as they will not be accurate enough to represent the |
| rounding. |
| huge = HUGE (KIND (a)) |
| aint (a) = ((a > huge) || (a < -huge)) ? a : (real)(int)a |
| */ |
| |
| static void |
| gfc_conv_intrinsic_aint (gfc_se * se, gfc_expr * expr, enum rounding_mode op) |
| { |
| tree type; |
| tree itype; |
| tree arg[2]; |
| tree tmp; |
| tree cond; |
| tree decl; |
| mpfr_t huge; |
| int n, nargs; |
| int kind; |
| |
| kind = expr->ts.kind; |
| nargs = gfc_intrinsic_argument_list_length (expr); |
| |
| decl = NULL_TREE; |
| /* We have builtin functions for some cases. */ |
| switch (op) |
| { |
| case RND_ROUND: |
| decl = gfc_builtin_decl_for_float_kind (BUILT_IN_ROUND, kind); |
| break; |
| |
| case RND_TRUNC: |
| decl = gfc_builtin_decl_for_float_kind (BUILT_IN_TRUNC, kind); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| |
| /* Evaluate the argument. */ |
| gcc_assert (expr->value.function.actual->expr); |
| gfc_conv_intrinsic_function_args (se, expr, arg, nargs); |
| |
| /* Use a builtin function if one exists. */ |
| if (decl != NULL_TREE) |
| { |
| se->expr = build_call_expr_loc (input_location, decl, 1, arg[0]); |
| return; |
| } |
| |
| /* This code is probably redundant, but we'll keep it lying around just |
| in case. */ |
| type = gfc_typenode_for_spec (&expr->ts); |
| arg[0] = gfc_evaluate_now (arg[0], &se->pre); |
| |
| /* Test if the value is too large to handle sensibly. */ |
| gfc_set_model_kind (kind); |
| mpfr_init (huge); |
| n = gfc_validate_kind (BT_INTEGER, kind, false); |
| mpfr_set_z (huge, gfc_integer_kinds[n].huge, GFC_RND_MODE); |
| tmp = gfc_conv_mpfr_to_tree (huge, kind, 0); |
| cond = fold_build2_loc (input_location, LT_EXPR, logical_type_node, arg[0], |
| tmp); |
| |
| mpfr_neg (huge, huge, GFC_RND_MODE); |
| tmp = gfc_conv_mpfr_to_tree (huge, kind, 0); |
| tmp = fold_build2_loc (input_location, GT_EXPR, logical_type_node, arg[0], |
| tmp); |
| cond = fold_build2_loc (input_location, TRUTH_AND_EXPR, logical_type_node, |
| cond, tmp); |
| itype = gfc_get_int_type (kind); |
| |
| tmp = build_fix_expr (&se->pre, arg[0], itype, op); |
| tmp = convert (type, tmp); |
| se->expr = fold_build3_loc (input_location, COND_EXPR, type, cond, tmp, |
| arg[0]); |
| mpfr_clear (huge); |
| } |
| |
| |
| /* Convert to an integer using the specified rounding mode. */ |
| |
| static void |
| gfc_conv_intrinsic_int (gfc_se * se, gfc_expr * expr, enum rounding_mode op) |
| { |
| tree type; |
| tree *args; |
| int nargs; |
| |
| nargs = gfc_intrinsic_argument_list_length (expr); |
| args = XALLOCAVEC (tree, nargs); |
| |
| /* Evaluate the argument, we process all arguments even though we only |
| use the first one for code generation purposes. */ |
| type = gfc_typenode_for_spec (&expr->ts); |
| gcc_assert (expr->value.function.actual->expr); |
| gfc_conv_intrinsic_function_args (se, expr, args, nargs); |
| |
| if (TREE_CODE (TREE_TYPE (args[0])) == INTEGER_TYPE) |
| { |
| /* Conversion to a different integer kind. */ |
| se->expr = convert (type, args[0]); |
| } |
| else |
| { |
| /* Conversion from complex to non-complex involves taking the real |
| component of the value. */ |
| if (TREE_CODE (TREE_TYPE (args[0])) == COMPLEX_TYPE |
| && expr->ts.type != BT_COMPLEX) |
| { |
| tree artype; |
| |
| artype = TREE_TYPE (TREE_TYPE (args[0])); |
| args[0] = fold_build1_loc (input_location, REALPART_EXPR, artype, |
| args[0]); |
| } |
| |
| se->expr = build_fix_expr (&se->pre, args[0], type, op); |
| } |
| } |
| |
| |
| /* Get the imaginary component of a value. */ |
| |
| static void |
| gfc_conv_intrinsic_imagpart (gfc_se * se, gfc_expr * expr) |
| { |
| tree arg; |
| |
| gfc_conv_intrinsic_function_args (se, expr, &arg, 1); |
| se->expr = fold_build1_loc (input_location, IMAGPART_EXPR, |
| TREE_TYPE (TREE_TYPE (arg)), arg); |
| } |
| |
| |
| /* Get the complex conjugate of a value. */ |
| |
| static void |
| gfc_conv_intrinsic_conjg (gfc_se * se, gfc_expr * expr) |
| { |
| tree arg; |
| |
| gfc_conv_intrinsic_function_args (se, expr, &arg, 1); |
| se->expr = fold_build1_loc (input_location, CONJ_EXPR, TREE_TYPE (arg), arg); |
| } |
| |
| |
| |
| static tree |
| define_quad_builtin (const char *name, tree type, bool is_const) |
| { |
| tree fndecl; |
| fndecl = build_decl (input_location, FUNCTION_DECL, get_identifier (name), |
| type); |
| |
| /* Mark the decl as external. */ |
| DECL_EXTERNAL (fndecl) = 1; |
| TREE_PUBLIC (fndecl) = 1; |
| |
| /* Mark it __attribute__((const)). */ |
| TREE_READONLY (fndecl) = is_const; |
| |
| rest_of_decl_compilation (fndecl, 1, 0); |
| |
| return fndecl; |
| } |
| |
| /* Add SIMD attribute for FNDECL built-in if the built-in |
| name is in VECTORIZED_BUILTINS. */ |
| |
| static void |
| add_simd_flag_for_built_in (tree fndecl) |
| { |
| if (gfc_vectorized_builtins == NULL |
| || fndecl == NULL_TREE) |
| return; |
| |
| const char *name = IDENTIFIER_POINTER (DECL_NAME (fndecl)); |
| int *clauses = gfc_vectorized_builtins->get (name); |
| if (clauses) |
| { |
| for (unsigned i = 0; i < 3; i++) |
| if (*clauses & (1 << i)) |
| { |
| gfc_simd_clause simd_type = (gfc_simd_clause)*clauses; |
| tree omp_clause = NULL_TREE; |
| if (simd_type == SIMD_NONE) |
| ; /* No SIMD clause. */ |
| else |
| { |
| omp_clause_code code |
| = (simd_type == SIMD_INBRANCH |
| ? OMP_CLAUSE_INBRANCH : OMP_CLAUSE_NOTINBRANCH); |
| omp_clause = build_omp_clause (UNKNOWN_LOCATION, code); |
| omp_clause = build_tree_list (NULL_TREE, omp_clause); |
| } |
| |
| DECL_ATTRIBUTES (fndecl) |
| = tree_cons (get_identifier ("omp declare simd"), omp_clause, |
| DECL_ATTRIBUTES (fndecl)); |
| } |
| } |
| } |
| |
| /* Set SIMD attribute to all built-in functions that are mentioned |
| in gfc_vectorized_builtins vector. */ |
| |
| void |
| gfc_adjust_builtins (void) |
| { |
| gfc_intrinsic_map_t *m; |
| for (m = gfc_intrinsic_map; |
| m->id != GFC_ISYM_NONE || m->double_built_in != END_BUILTINS; m++) |
| { |
| add_simd_flag_for_built_in (m->real4_decl); |
| add_simd_flag_for_built_in (m->complex4_decl); |
| add_simd_flag_for_built_in (m->real8_decl); |
| add_simd_flag_for_built_in (m->complex8_decl); |
| add_simd_flag_for_built_in (m->real10_decl); |
| add_simd_flag_for_built_in (m->complex10_decl); |
| add_simd_flag_for_built_in (m->real16_decl); |
| add_simd_flag_for_built_in (m->complex16_decl); |
| add_simd_flag_for_built_in (m->real16_decl); |
| add_simd_flag_for_built_in (m->complex16_decl); |
| } |
| |
| /* Release all strings. */ |
| if (gfc_vectorized_builtins != NULL) |
| { |
| for (hash_map<nofree_string_hash, int>::iterator it |
| = gfc_vectorized_builtins->begin (); |
| it != gfc_vectorized_builtins->end (); ++it) |
| free (CONST_CAST (char *, (*it).first)); |
| |
| delete gfc_vectorized_builtins; |
| gfc_vectorized_builtins = NULL; |
| } |
| } |
| |
| /* Initialize function decls for library functions. The external functions |
| are created as required. Builtin functions are added here. */ |
| |
| void |
| gfc_build_intrinsic_lib_fndecls (void) |
| { |
| gfc_intrinsic_map_t *m; |
| tree quad_decls[END_BUILTINS + 1]; |
| |
| if (gfc_real16_is_float128) |
| { |
| /* If we have soft-float types, we create the decls for their |
| C99-like library functions. For now, we only handle _Float128 |
| q-suffixed or IEC 60559 f128-suffixed functions. */ |
| |
| tree type, complex_type, func_1, func_2, func_3, func_cabs, func_frexp; |
| tree func_iround, func_lround, func_llround, func_scalbn, func_cpow; |
| |
| memset (quad_decls, 0, sizeof(tree) * (END_BUILTINS + 1)); |
| |
| type = gfc_float128_type_node; |
| complex_type = gfc_complex_float128_type_node; |
| /* type (*) (type) */ |
| func_1 = build_function_type_list (type, type, NULL_TREE); |
| /* int (*) (type) */ |
| func_iround = build_function_type_list (integer_type_node, |
| type, NULL_TREE); |
| /* long (*) (type) */ |
| func_lround = build_function_type_list (long_integer_type_node, |
| type, NULL_TREE); |
| /* long long (*) (type) */ |
| func_llround = build_function_type_list (long_long_integer_type_node, |
| type, NULL_TREE); |
| /* type (*) (type, type) */ |
| func_2 = build_function_type_list (type, type, type, NULL_TREE); |
| /* type (*) (type, type, type) */ |
| func_3 = build_function_type_list (type, type, type, type, NULL_TREE); |
| /* type (*) (type, &int) */ |
| func_frexp |
| = build_function_type_list (type, |
| type, |
| build_pointer_type (integer_type_node), |
| NULL_TREE); |
| /* type (*) (type, int) */ |
| func_scalbn = build_function_type_list (type, |
| type, integer_type_node, NULL_TREE); |
| /* type (*) (complex type) */ |
| func_cabs = build_function_type_list (type, complex_type, NULL_TREE); |
| /* complex type (*) (complex type, complex type) */ |
| func_cpow |
| = build_function_type_list (complex_type, |
| complex_type, complex_type, NULL_TREE); |
| |
| #define DEFINE_MATH_BUILTIN(ID, NAME, ARGTYPE) |
| #define DEFINE_MATH_BUILTIN_C(ID, NAME, ARGTYPE) |
| #define LIB_FUNCTION(ID, NAME, HAVE_COMPLEX) |
| |
| /* Only these built-ins are actually needed here. These are used directly |
| from the code, when calling builtin_decl_for_precision() or |
| builtin_decl_for_float_type(). The others are all constructed by |
| gfc_get_intrinsic_lib_fndecl(). */ |
| #define OTHER_BUILTIN(ID, NAME, TYPE, CONST) \ |
| quad_decls[BUILT_IN_ ## ID] \ |
| = define_quad_builtin (gfc_real16_use_iec_60559 \ |
| ? NAME "f128" : NAME "q", func_ ## TYPE, \ |
| CONST); |
| |
| #include "mathbuiltins.def" |
| |
| #undef OTHER_BUILTIN |
| #undef LIB_FUNCTION |
| #undef DEFINE_MATH_BUILTIN |
| #undef DEFINE_MATH_BUILTIN_C |
| |
| /* There is one built-in we defined manually, because it gets called |
| with builtin_decl_for_precision() or builtin_decl_for_float_type() |
| even though it is not an OTHER_BUILTIN: it is SQRT. */ |
| quad_decls[BUILT_IN_SQRT] |
| = define_quad_builtin (gfc_real16_use_iec_60559 |
| ? "sqrtf128" : "sqrtq", func_1, true); |
| } |
| |
| /* Add GCC builtin functions. */ |
| for (m = gfc_intrinsic_map; |
| m->id != GFC_ISYM_NONE || m->double_built_in != END_BUILTINS; m++) |
| { |
| if (m->float_built_in != END_BUILTINS) |
| m->real4_decl = builtin_decl_explicit (m->float_built_in); |
| if (m->complex_float_built_in != END_BUILTINS) |
| m->complex4_decl = builtin_decl_explicit (m->complex_float_built_in); |
| if (m->double_built_in != END_BUILTINS) |
| m->real8_decl = builtin_decl_explicit (m->double_built_in); |
| if (m->complex_double_built_in != END_BUILTINS) |
| m->complex8_decl = builtin_decl_explicit (m->complex_double_built_in); |
| |
| /* If real(kind=10) exists, it is always long double. */ |
| if (m->long_double_built_in != END_BUILTINS) |
| m->real10_decl = builtin_decl_explicit (m->long_double_built_in); |
| if (m->complex_long_double_built_in != END_BUILTINS) |
| m->complex10_decl |
| = builtin_decl_explicit (m->complex_long_double_built_in); |
| |
| if (!gfc_real16_is_float128) |
| { |
| if (m->long_double_built_in != END_BUILTINS) |
| m->real16_decl = builtin_decl_explicit (m->long_double_built_in); |
| if (m->complex_long_double_built_in != END_BUILTINS) |
| m->complex16_decl |
| = builtin_decl_explicit (m->complex_long_double_built_in); |
| } |
| else if (quad_decls[m->double_built_in] != NULL_TREE) |
| { |
| /* Quad-precision function calls are constructed when first |
| needed by builtin_decl_for_precision(), except for those |
| that will be used directly (define by OTHER_BUILTIN). */ |
| m->real16_decl = quad_decls[m->double_built_in]; |
| } |
| else if (quad_decls[m->complex_double_built_in] != NULL_TREE) |
| { |
| /* Same thing for the complex ones. */ |
| m->complex16_decl = quad_decls[m->double_built_in]; |
| } |
| } |
| } |
| |
| |
| /* Create a fndecl for a simple intrinsic library function. */ |
| |
| static tree |
| gfc_get_intrinsic_lib_fndecl (gfc_intrinsic_map_t * m, gfc_expr * expr) |
| { |
| tree type; |
| vec<tree, va_gc> *argtypes; |
| tree fndecl; |
| gfc_actual_arglist *actual; |
| tree *pdecl; |
| gfc_typespec *ts; |
| char name[GFC_MAX_SYMBOL_LEN + 3]; |
| |
| ts = &expr->ts; |
| if (ts->type == BT_REAL) |
| { |
| switch (ts->kind) |
| { |
| case 4: |
| pdecl = &m->real4_decl; |
| break; |
| case 8: |
| pdecl = &m->real8_decl; |
| break; |
| case 10: |
| pdecl = &m->real10_decl; |
| break; |
| case 16: |
| pdecl = &m->real16_decl; |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| } |
| else if (ts->type == BT_COMPLEX) |
| { |
| gcc_assert (m->complex_available); |
| |
| switch (ts->kind) |
| { |
| case 4: |
| pdecl = &m->complex4_decl; |
| break; |
| case 8: |
| pdecl = &m->complex8_decl; |
| break; |
| case 10: |
| pdecl = &m->complex10_decl; |
| break; |
| case 16: |
| pdecl = &m->complex16_decl; |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| } |
| else |
| gcc_unreachable (); |
| |
| if (*pdecl) |
| return *pdecl; |
| |
| if (m->libm_name) |
| { |
| int n = gfc_validate_kind (BT_REAL, ts->kind, false); |
| if (gfc_real_kinds[n].c_float) |
| snprintf (name, sizeof (name), "%s%s%s", |
| ts->type == BT_COMPLEX ? "c" : "", m->name, "f"); |
| else if (gfc_real_kinds[n].c_double) |
| snprintf (name, sizeof (name), "%s%s", |
| ts->type == BT_COMPLEX ? "c" : "", m->name); |
| else if (gfc_real_kinds[n].c_long_double) |
| snprintf (name, sizeof (name), "%s%s%s", |
| ts->type == BT_COMPLEX ? "c" : "", m->name, "l"); |
| else if (gfc_real_kinds[n].c_float128) |
| snprintf (name, sizeof (name), "%s%s%s", |
| ts->type == BT_COMPLEX ? "c" : "", m->name, |
| gfc_real_kinds[n].use_iec_60559 ? "f128" : "q"); |
| else |
| gcc_unreachable (); |
| } |
| else |
| { |
| snprintf (name, sizeof (name), PREFIX ("%s_%c%d"), m->name, |
| ts->type == BT_COMPLEX ? 'c' : 'r', |
| gfc_type_abi_kind (ts)); |
| } |
| |
| argtypes = NULL; |
| for (actual = expr->value.function.actual; actual; actual = actual->next) |
| { |
| type = gfc_typenode_for_spec (&actual->expr->ts); |
| vec_safe_push (argtypes, type); |
| } |
| type = build_function_type_vec (gfc_typenode_for_spec (ts), argtypes); |
| fndecl = build_decl (input_location, |
| FUNCTION_DECL, get_identifier (name), type); |
| |
| /* Mark the decl as external. */ |
| DECL_EXTERNAL (fndecl) = 1; |
| TREE_PUBLIC (fndecl) = 1; |
| |
| /* Mark it __attribute__((const)), if possible. */ |
| TREE_READONLY (fndecl) = m->is_constant; |
| |
| rest_of_decl_compilation (fndecl, 1, 0); |
| |
| (*pdecl) = fndecl; |
| return fndecl; |
| } |
| |
| |
| /* Convert an intrinsic function into an external or builtin call. */ |
| |
| static void |
| gfc_conv_intrinsic_lib_function (gfc_se * se, gfc_expr * expr) |
| { |
| gfc_intrinsic_map_t *m; |
| tree fndecl; |
| tree rettype; |
| tree *args; |
| unsigned int num_args; |
| gfc_isym_id id; |
| |
| id = expr->value.function.isym->id; |
| /* Find the entry for this function. */ |
| for (m = gfc_intrinsic_map; |
| m->id != GFC_ISYM_NONE || m->double_built_in != END_BUILTINS; m++) |
| { |
| if (id == m->id) |
| break; |
| } |
| |
| if (m->id == GFC_ISYM_NONE) |
| { |
| gfc_internal_error ("Intrinsic function %qs (%d) not recognized", |
| expr->value.function.name, id); |
| } |
| |
| /* Get the decl and generate the call. */ |
| num_args = gfc_intrinsic_argument_list_length (expr); |
| args = XALLOCAVEC (tree, num_args); |
| |
| gfc_conv_intrinsic_function_args (se, expr, args, num_args); |
| fndecl = gfc_get_intrinsic_lib_fndecl (m, expr); |
| rettype = TREE_TYPE (TREE_TYPE (fndecl)); |
| |
| fndecl = build_addr (fndecl); |
| se->expr = build_call_array_loc (input_location, rettype, fndecl, num_args, args); |
| } |
| |
| |
| /* If bounds-checking is enabled, create code to verify at runtime that the |
| string lengths for both expressions are the same (needed for e.g. MERGE). |
| If bounds-checking is not enabled, does nothing. */ |
| |
| void |
| gfc_trans_same_strlen_check (const char* intr_name, locus* where, |
| tree a, tree b, stmtblock_t* target) |
| { |
| tree cond; |
| tree name; |
| |
| /* If bounds-checking is disabled, do nothing. */ |
| if (!(gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)) |
| return; |
| |
| /* Compare the two string lengths. */ |
| cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node, a, b); |
| |
| /* Output the runtime-check. */ |
| name = gfc_build_cstring_const (intr_name); |
| name = gfc_build_addr_expr (pchar_type_node, name); |
| gfc_trans_runtime_check (true, false, cond, target, where, |
| "Unequal character lengths (%ld/%ld) in %s", |
| fold_convert (long_integer_type_node, a), |
| fold_convert (long_integer_type_node, b), name); |
| } |
| |
| |
| /* The EXPONENT(X) intrinsic function is translated into |
| int ret; |
| return isfinite(X) ? (frexp (X, &ret) , ret) : huge |
| so that if X is a NaN or infinity, the result is HUGE(0). |
| */ |
| |
| static void |
| gfc_conv_intrinsic_exponent (gfc_se *se, gfc_expr *expr) |
| { |
| tree arg, type, res, tmp, frexp, cond, huge; |
| int i; |
| |
| frexp = gfc_builtin_decl_for_float_kind (BUILT_IN_FREXP, |
| expr->value.function.actual->expr->ts.kind); |
| |
| gfc_conv_intrinsic_function_args (se, expr, &arg, 1); |
| arg = gfc_evaluate_now (arg, &se->pre); |
| |
| i = gfc_validate_kind (BT_INTEGER, gfc_c_int_kind, false); |
| huge = gfc_conv_mpz_to_tree (gfc_integer_kinds[i].huge, gfc_c_int_kind); |
| cond = build_call_expr_loc (input_location, |
| builtin_decl_explicit (BUILT_IN_ISFINITE), |
| 1, arg); |
| |
| res = gfc_create_var (integer_type_node, NULL); |
| tmp = build_call_expr_loc (input_location, frexp, 2, arg, |
| gfc_build_addr_expr (NULL_TREE, res)); |
| tmp = fold_build2_loc (input_location, COMPOUND_EXPR, integer_type_node, |
| tmp, res); |
| se->expr = fold_build3_loc (input_location, COND_EXPR, integer_type_node, |
| cond, tmp, huge); |
| |
| type = gfc_typenode_for_spec (&expr->ts); |
| se->expr = fold_convert (type, se->expr); |
| } |
| |
| |
| /* Fill in the following structure |
| struct caf_vector_t { |
| size_t nvec; // size of the vector |
| union { |
| struct { |
| void *vector; |
| int kind; |
| } v; |
| struct { |
| ptrdiff_t lower_bound; |
| ptrdiff_t upper_bound; |
| ptrdiff_t stride; |
| } triplet; |
| } u; |
| } */ |
| |
| static void |
| conv_caf_vector_subscript_elem (stmtblock_t *block, int i, tree desc, |
| tree lower, tree upper, tree stride, |
| tree vector, int kind, tree nvec) |
| { |
| tree field, type, tmp; |
| |
| desc = gfc_build_array_ref (desc, gfc_rank_cst[i], NULL_TREE); |
| type = TREE_TYPE (desc); |
| |
| field = gfc_advance_chain (TYPE_FIELDS (type), 0); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), |
| desc, field, NULL_TREE); |
| gfc_add_modify (block, tmp, fold_convert (TREE_TYPE (field), nvec)); |
| |
| /* Access union. */ |
| field = gfc_advance_chain (TYPE_FIELDS (type), 1); |
| desc = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), |
| desc, field, NULL_TREE); |
| type = TREE_TYPE (desc); |
| |
| /* Access the inner struct. */ |
| field = gfc_advance_chain (TYPE_FIELDS (type), vector != NULL_TREE ? 0 : 1); |
| desc = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), |
| desc, field, NULL_TREE); |
| type = TREE_TYPE (desc); |
| |
| if (vector != NULL_TREE) |
| { |
| /* Set vector and kind. */ |
| field = gfc_advance_chain (TYPE_FIELDS (type), 0); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), |
| desc, field, NULL_TREE); |
| gfc_add_modify (block, tmp, fold_convert (TREE_TYPE (field), vector)); |
| field = gfc_advance_chain (TYPE_FIELDS (type), 1); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), |
| desc, field, NULL_TREE); |
| gfc_add_modify (block, tmp, build_int_cst (integer_type_node, kind)); |
| } |
| else |
| { |
| /* Set dim.lower/upper/stride. */ |
| field = gfc_advance_chain (TYPE_FIELDS (type), 0); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), |
| desc, field, NULL_TREE); |
| gfc_add_modify (block, tmp, fold_convert (TREE_TYPE (field), lower)); |
| |
| field = gfc_advance_chain (TYPE_FIELDS (type), 1); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), |
| desc, field, NULL_TREE); |
| gfc_add_modify (block, tmp, fold_convert (TREE_TYPE (field), upper)); |
| |
| field = gfc_advance_chain (TYPE_FIELDS (type), 2); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), |
| desc, field, NULL_TREE); |
| gfc_add_modify (block, tmp, fold_convert (TREE_TYPE (field), stride)); |
| } |
| } |
| |
| |
| static tree |
| conv_caf_vector_subscript (stmtblock_t *block, tree desc, gfc_array_ref *ar) |
| { |
| gfc_se argse; |
| tree var, lower, upper = NULL_TREE, stride = NULL_TREE, vector, nvec; |
| tree lbound, ubound, tmp; |
| int i; |
| |
| var = gfc_create_var (gfc_get_caf_vector_type (ar->dimen), "vector"); |
| |
| for (i = 0; i < ar->dimen; i++) |
| switch (ar->dimen_type[i]) |
| { |
| case DIMEN_RANGE: |
| if (ar->end[i]) |
| { |
| gfc_init_se (&argse, NULL); |
| gfc_conv_expr (&argse, ar->end[i]); |
| gfc_add_block_to_block (block, &argse.pre); |
| upper = gfc_evaluate_now (argse.expr, block); |
| } |
| else |
| upper = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[i]); |
| if (ar->stride[i]) |
| { |
| gfc_init_se (&argse, NULL); |
| gfc_conv_expr (&argse, ar->stride[i]); |
| gfc_add_block_to_block (block, &argse.pre); |
| stride = gfc_evaluate_now (argse.expr, block); |
| } |
| else |
| stride = gfc_index_one_node; |
| |
| /* Fall through. */ |
| case DIMEN_ELEMENT: |
| if (ar->start[i]) |
| { |
| gfc_init_se (&argse, NULL); |
| gfc_conv_expr (&argse, ar->start[i]); |
| gfc_add_block_to_block (block, &argse.pre); |
| lower = gfc_evaluate_now (argse.expr, block); |
| } |
| else |
| lower = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[i]); |
| if (ar->dimen_type[i] == DIMEN_ELEMENT) |
| { |
| upper = lower; |
| stride = gfc_index_one_node; |
| } |
| vector = NULL_TREE; |
| nvec = size_zero_node; |
| conv_caf_vector_subscript_elem (block, i, var, lower, upper, stride, |
| vector, 0, nvec); |
| break; |
| |
| case DIMEN_VECTOR: |
| gfc_init_se (&argse, NULL); |
| argse.descriptor_only = 1; |
| gfc_conv_expr_descriptor (&argse, ar->start[i]); |
| gfc_add_block_to_block (block, &argse.pre); |
| vector = argse.expr; |
| lbound = gfc_conv_descriptor_lbound_get (vector, gfc_rank_cst[0]); |
| ubound = gfc_conv_descriptor_ubound_get (vector, gfc_rank_cst[0]); |
| nvec = gfc_conv_array_extent_dim (lbound, ubound, NULL); |
| tmp = gfc_conv_descriptor_stride_get (vector, gfc_rank_cst[0]); |
| nvec = fold_build2_loc (input_location, TRUNC_DIV_EXPR, |
| TREE_TYPE (nvec), nvec, tmp); |
| lower = gfc_index_zero_node; |
| upper = gfc_index_zero_node; |
| stride = gfc_index_zero_node; |
| vector = gfc_conv_descriptor_data_get (vector); |
| conv_caf_vector_subscript_elem (block, i, var, lower, upper, stride, |
| vector, ar->start[i]->ts.kind, nvec); |
| break; |
| default: |
| gcc_unreachable(); |
| } |
| return gfc_build_addr_expr (NULL_TREE, var); |
| } |
| |
| |
| static tree |
| compute_component_offset (tree field, tree type) |
| { |
| tree tmp; |
| if (DECL_FIELD_BIT_OFFSET (field) != NULL_TREE |
| && !integer_zerop (DECL_FIELD_BIT_OFFSET (field))) |
| { |
| tmp = fold_build2 (TRUNC_DIV_EXPR, type, |
| DECL_FIELD_BIT_OFFSET (field), |
| bitsize_unit_node); |
| return fold_build2 (PLUS_EXPR, type, DECL_FIELD_OFFSET (field), tmp); |
| } |
| else |
| return DECL_FIELD_OFFSET (field); |
| } |
| |
| |
| static tree |
| conv_expr_ref_to_caf_ref (stmtblock_t *block, gfc_expr *expr) |
| { |
| gfc_ref *ref = expr->ref, *last_comp_ref; |
| tree caf_ref = NULL_TREE, prev_caf_ref = NULL_TREE, reference_type, tmp, tmp2, |
| field, last_type, inner_struct, mode, mode_rhs, dim_array, dim, dim_type, |
| start, end, stride, vector, nvec; |
| gfc_se se; |
| bool ref_static_array = false; |
| tree last_component_ref_tree = NULL_TREE; |
| int i, last_type_n; |
| |
| if (expr->symtree) |
| { |
| last_component_ref_tree = expr->symtree->n.sym->backend_decl; |
| ref_static_array = !expr->symtree->n.sym->attr.allocatable |
| && !expr->symtree->n.sym->attr.pointer; |
| } |
| |
| /* Prevent uninit-warning. */ |
| reference_type = NULL_TREE; |
| |
| /* Skip refs upto the first coarray-ref. */ |
| last_comp_ref = NULL; |
| while (ref && (ref->type != REF_ARRAY || ref->u.ar.codimen == 0)) |
| { |
| /* Remember the type of components skipped. */ |
| if (ref->type == REF_COMPONENT) |
| last_comp_ref = ref; |
| ref = ref->next; |
| } |
| /* When a component was skipped, get the type information of the last |
| component ref, else get the type from the symbol. */ |
| if (last_comp_ref) |
| { |
| last_type = gfc_typenode_for_spec (&last_comp_ref->u.c.component->ts); |
| last_type_n = last_comp_ref->u.c.component->ts.type; |
| } |
| else |
| { |
| last_type = gfc_typenode_for_spec (&expr->symtree->n.sym->ts); |
| last_type_n = expr->symtree->n.sym->ts.type; |
| } |
| |
| while (ref) |
| { |
| if (ref->type == REF_ARRAY && ref->u.ar.codimen > 0 |
| && ref->u.ar.dimen == 0) |
| { |
| /* Skip pure coindexes. */ |
| ref = ref->next; |
| continue; |
| } |
| tmp = gfc_create_var (gfc_get_caf_reference_type (), "caf_ref"); |
| reference_type = TREE_TYPE (tmp); |
| |
| if (caf_ref == NULL_TREE) |
| caf_ref = tmp; |
| |
| /* Construct the chain of refs. */ |
| if (prev_caf_ref != NULL_TREE) |
| { |
| field = gfc_advance_chain (TYPE_FIELDS (reference_type), 0); |
| tmp2 = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), prev_caf_ref, field, |
| NULL_TREE); |
| gfc_add_modify (block, tmp2, gfc_build_addr_expr (TREE_TYPE (field), |
| tmp)); |
| } |
| prev_caf_ref = tmp; |
| |
| switch (ref->type) |
| { |
| case REF_COMPONENT: |
| last_type = gfc_typenode_for_spec (&ref->u.c.component->ts); |
| last_type_n = ref->u.c.component->ts.type; |
| /* Set the type of the ref. */ |
| field = gfc_advance_chain (TYPE_FIELDS (reference_type), 1); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), prev_caf_ref, field, |
| NULL_TREE); |
| gfc_add_modify (block, tmp, build_int_cst (integer_type_node, |
| GFC_CAF_REF_COMPONENT)); |
| |
| /* Ref the c in union u. */ |
| field = gfc_advance_chain (TYPE_FIELDS (reference_type), 3); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), prev_caf_ref, field, |
| NULL_TREE); |
| field = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (field)), 0); |
| inner_struct = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), tmp, field, |
| NULL_TREE); |
| |
| /* Set the offset. */ |
| field = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (inner_struct)), 0); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), inner_struct, field, |
| NULL_TREE); |
| /* Computing the offset is somewhat harder. The bit_offset has to be |
| taken into account. When the bit_offset in the field_decl is non- |
| null, divide it by the bitsize_unit and add it to the regular |
| offset. */ |
| tmp2 = compute_component_offset (ref->u.c.component->backend_decl, |
| TREE_TYPE (tmp)); |
| gfc_add_modify (block, tmp, fold_convert (TREE_TYPE (tmp), tmp2)); |
| |
| /* Set caf_token_offset. */ |
| field = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (inner_struct)), 1); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), inner_struct, field, |
| NULL_TREE); |
| if ((ref->u.c.component->attr.allocatable |
| || ref->u.c.component->attr.pointer) |
| && ref->u.c.component->attr.dimension) |
| { |
| tree arr_desc_token_offset; |
| /* Get the token field from the descriptor. */ |
| arr_desc_token_offset = TREE_OPERAND ( |
| gfc_conv_descriptor_token (ref->u.c.component->backend_decl), 1); |
| arr_desc_token_offset |
| = compute_component_offset (arr_desc_token_offset, |
| TREE_TYPE (tmp)); |
| tmp2 = fold_build2_loc (input_location, PLUS_EXPR, |
| TREE_TYPE (tmp2), tmp2, |
| arr_desc_token_offset); |
| } |
| else if (ref->u.c.component->caf_token) |
| tmp2 = compute_component_offset (ref->u.c.component->caf_token, |
| TREE_TYPE (tmp)); |
| else |
| tmp2 = integer_zero_node; |
| gfc_add_modify (block, tmp, fold_convert (TREE_TYPE (tmp), tmp2)); |
| |
| /* Remember whether this ref was to a non-allocatable/non-pointer |
| component so the next array ref can be tailored correctly. */ |
| ref_static_array = !ref->u.c.component->attr.allocatable |
| && !ref->u.c.component->attr.pointer; |
| last_component_ref_tree = ref_static_array |
| ? ref->u.c.component->backend_decl : NULL_TREE; |
| break; |
| case REF_ARRAY: |
| if (ref_static_array && ref->u.ar.as->type == AS_DEFERRED) |
| ref_static_array = false; |
| /* Set the type of the ref. */ |
| field = gfc_advance_chain (TYPE_FIELDS (reference_type), 1); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), prev_caf_ref, field, |
| NULL_TREE); |
| gfc_add_modify (block, tmp, build_int_cst (integer_type_node, |
| ref_static_array |
| ? GFC_CAF_REF_STATIC_ARRAY |
| : GFC_CAF_REF_ARRAY)); |
| |
| /* Ref the a in union u. */ |
| field = gfc_advance_chain (TYPE_FIELDS (reference_type), 3); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), prev_caf_ref, field, |
| NULL_TREE); |
| field = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (field)), 1); |
| inner_struct = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), tmp, field, |
| NULL_TREE); |
| |
| /* Set the static_array_type in a for static arrays. */ |
| if (ref_static_array) |
| { |
| field = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (inner_struct)), |
| 1); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), inner_struct, field, |
| NULL_TREE); |
| gfc_add_modify (block, tmp, build_int_cst (TREE_TYPE (tmp), |
| last_type_n)); |
| } |
| /* Ref the mode in the inner_struct. */ |
| field = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (inner_struct)), 0); |
| mode = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), inner_struct, field, |
| NULL_TREE); |
| /* Ref the dim in the inner_struct. */ |
| field = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (inner_struct)), 2); |
| dim_array = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), inner_struct, field, |
| NULL_TREE); |
| for (i = 0; i < ref->u.ar.dimen; ++i) |
| { |
| /* Ref dim i. */ |
| dim = gfc_build_array_ref (dim_array, gfc_rank_cst[i], NULL_TREE); |
| dim_type = TREE_TYPE (dim); |
| mode_rhs = start = end = stride = NULL_TREE; |
| switch (ref->u.ar.dimen_type[i]) |
| { |
| case DIMEN_RANGE: |
| if (ref->u.ar.end[i]) |
| { |
| gfc_init_se (&se, NULL); |
| gfc_conv_expr (&se, ref->u.ar.end[i]); |
| gfc_add_block_to_block (block, &se.pre); |
| if (ref_static_array) |
| { |
| /* Make the index zero-based, when reffing a static |
| array. */ |
| end = se.expr; |
| gfc_init_se (&se, NULL); |
| gfc_conv_expr (&se, ref->u.ar.as->lower[i]); |
| gfc_add_block_to_block (block, &se.pre); |
| se.expr = fold_build2 (MINUS_EXPR, |
| gfc_array_index_type, |
| end, fold_convert ( |
| gfc_array_index_type, |
| se.expr)); |
| } |
| end = gfc_evaluate_now (fold_convert ( |
| gfc_array_index_type, |
| se.expr), |
| block); |
| } |
| else if (ref_static_array) |
| end = fold_build2 (MINUS_EXPR, |
| gfc_array_index_type, |
| gfc_conv_array_ubound ( |
| last_component_ref_tree, i), |
| gfc_conv_array_lbound ( |
| last_component_ref_tree, i)); |
| else |
| { |
| end = NULL_TREE; |
| mode_rhs = build_int_cst (unsigned_char_type_node, |
| GFC_CAF_ARR_REF_OPEN_END); |
| } |
| if (ref->u.ar.stride[i]) |
| { |
| gfc_init_se (&se, NULL); |
| gfc_conv_expr (&se, ref->u.ar.stride[i]); |
| gfc_add_block_to_block (block, &se.pre); |
| stride = gfc_evaluate_now (fold_convert ( |
| gfc_array_index_type, |
| se.expr), |
| block); |
| if (ref_static_array) |
| { |
| /* Make the index zero-based, when reffing a static |
| array. */ |
| stride = fold_build2 (MULT_EXPR, |
| gfc_array_index_type, |
| gfc_conv_array_stride ( |
| last_component_ref_tree, |
| i), |
| stride); |
| gcc_assert (end != NULL_TREE); |
| /* Multiply with the product of array's stride and |
| the step of the ref to a virtual upper bound. |
| We cannot compute the actual upper bound here or |
| the caflib would compute the extend |
| incorrectly. */ |
| end = fold_build2 (MULT_EXPR, gfc_array_index_type, |
| end, gfc_conv_array_stride ( |
| last_component_ref_tree, |
| i)); |
| end = gfc_evaluate_now (end, block); |
| stride = gfc_evaluate_now (stride, block); |
| } |
| } |
| else if (ref_static_array) |
| { |
| stride = gfc_conv_array_stride (last_component_ref_tree, |
| i); |
| end = fold_build2 (MULT_EXPR, gfc_array_index_type, |
| end, stride); |
| end = gfc_evaluate_now (end, block); |
| } |
| else |
| /* Always set a ref stride of one to make caflib's |
| handling easier. */ |
| stride = gfc_index_one_node; |
| |
| /* Fall through. */ |
| case DIMEN_ELEMENT: |
| if (ref->u.ar.start[i]) |
| { |
| gfc_init_se (&se, NULL); |
| gfc_conv_expr (&se, ref->u.ar.start[i]); |
| gfc_add_block_to_block (block, &se.pre); |
| if (ref_static_array) |
| { |
| /* Make the index zero-based, when reffing a static |
| array. */ |
| start = fold_convert (gfc_array_index_type, se.expr); |
| gfc_init_se (&se, NULL); |
| gfc_conv_expr (&se, ref->u.ar.as->lower[i]); |
| gfc_add_block_to_block (block, &se.pre); |
| se.expr = fold_build2 (MINUS_EXPR, |
| gfc_array_index_type, |
| start, fold_convert ( |
| gfc_array_index_type, |
| se.expr)); |
| /* Multiply with the stride. */ |
| se.expr = fold_build2 (MULT_EXPR, |
| gfc_array_index_type, |
| se.expr, |
| gfc_conv_array_stride ( |
| last_component_ref_tree, |
| i)); |
| } |
| start = gfc_evaluate_now (fold_convert ( |
| gfc_array_index_type, |
| se.expr), |
| block); |
| if (mode_rhs == NULL_TREE) |
| mode_rhs = build_int_cst (unsigned_char_type_node, |
| ref->u.ar.dimen_type[i] |
| == DIMEN_ELEMENT |
| ? GFC_CAF_ARR_REF_SINGLE |
| : GFC_CAF_ARR_REF_RANGE); |
| } |
| else if (ref_static_array) |
| { |
| start = integer_zero_node; |
| mode_rhs = build_int_cst (unsigned_char_type_node, |
| ref->u.ar.start[i] == NULL |
| ? GFC_CAF_ARR_REF_FULL |
| : GFC_CAF_ARR_REF_RANGE); |
| } |
| else if (end == NULL_TREE) |
| mode_rhs = build_int_cst (unsigned_char_type_node, |
| GFC_CAF_ARR_REF_FULL); |
| else |
| mode_rhs = build_int_cst (unsigned_char_type_node, |
| GFC_CAF_ARR_REF_OPEN_START); |
| |
| /* Ref the s in dim. */ |
| field = gfc_advance_chain (TYPE_FIELDS (dim_type), 0); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), dim, field, |
| NULL_TREE); |
| |
| /* Set start in s. */ |
| if (start != NULL_TREE) |
| { |
| field = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (tmp)), |
| 0); |
| tmp2 = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), tmp, field, |
| NULL_TREE); |
| gfc_add_modify (block, tmp2, |
| fold_convert (TREE_TYPE (tmp2), start)); |
| } |
| |
| /* Set end in s. */ |
| if (end != NULL_TREE) |
| { |
| field = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (tmp)), |
| 1); |
| tmp2 = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), tmp, field, |
| NULL_TREE); |
| gfc_add_modify (block, tmp2, |
| fold_convert (TREE_TYPE (tmp2), end)); |
| } |
| |
| /* Set end in s. */ |
| if (stride != NULL_TREE) |
| { |
| field = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (tmp)), |
| 2); |
| tmp2 = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), tmp, field, |
| NULL_TREE); |
| gfc_add_modify (block, tmp2, |
| fold_convert (TREE_TYPE (tmp2), stride)); |
| } |
| break; |
| case DIMEN_VECTOR: |
| /* TODO: In case of static array. */ |
| gcc_assert (!ref_static_array); |
| mode_rhs = build_int_cst (unsigned_char_type_node, |
| GFC_CAF_ARR_REF_VECTOR); |
| gfc_init_se (&se, NULL); |
| se.descriptor_only = 1; |
| gfc_conv_expr_descriptor (&se, ref->u.ar.start[i]); |
| gfc_add_block_to_block (block, &se.pre); |
| vector = se.expr; |
| tmp = gfc_conv_descriptor_lbound_get (vector, |
| gfc_rank_cst[0]); |
| tmp2 = gfc_conv_descriptor_ubound_get (vector, |
| gfc_rank_cst[0]); |
| nvec = gfc_conv_array_extent_dim (tmp, tmp2, NULL); |
| tmp = gfc_conv_descriptor_stride_get (vector, |
| gfc_rank_cst[0]); |
| nvec = fold_build2_loc (input_location, TRUNC_DIV_EXPR, |
| TREE_TYPE (nvec), nvec, tmp); |
| vector = gfc_conv_descriptor_data_get (vector); |
| |
| /* Ref the v in dim. */ |
| field = gfc_advance_chain (TYPE_FIELDS (dim_type), 1); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), dim, field, |
| NULL_TREE); |
| |
| /* Set vector in v. */ |
| field = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (tmp)), 0); |
| tmp2 = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), tmp, field, |
| NULL_TREE); |
| gfc_add_modify (block, tmp2, fold_convert (TREE_TYPE (tmp2), |
| vector)); |
| |
| /* Set nvec in v. */ |
| field = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (tmp)), 1); |
| tmp2 = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), tmp, field, |
| NULL_TREE); |
| gfc_add_modify (block, tmp2, fold_convert (TREE_TYPE (tmp2), |
| nvec)); |
| |
| /* Set kind in v. */ |
| field = gfc_advance_chain (TYPE_FIELDS (TREE_TYPE (tmp)), 2); |
| tmp2 = fold_build3_loc (input_location, COMPONENT_REF, |
| TREE_TYPE (field), tmp, field, |
| NULL_TREE); |
| gfc_add_modify (block, tmp2, build_int_cst (integer_type_node, |
| ref->u.ar.start[i]->ts.kind)); |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| /* Set the mode for dim i. */ |
| tmp = gfc_build_array_ref (mode, gfc_rank_cst[i], NULL_TREE); |
| gfc_add_modify (block, tmp, fold_convert (TREE_TYPE (tmp), |
| mode_rhs)); |
| } |
| |
| /* Set the mode for dim i+1 to GFC_ARR_REF_NONE. */ |
| if (i < GFC_MAX_DIMENSIONS) |
| { |
| tmp = gfc_build_array_ref (mode, gfc_rank_cst[i], NULL_TREE); |
| gfc_add_modify (block, tmp, |
| build_int_cst (unsigned_char_type_node, |
| GFC_CAF_ARR_REF_NONE)); |
| } |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| |
| /* Set the size of the current type. */ |
| field = gfc_advance_chain (TYPE_FIELDS (reference_type), 2); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), |
| prev_caf_ref, field, NULL_TREE); |
| gfc_add_modify (block, tmp, fold_convert (TREE_TYPE (field), |
| TYPE_SIZE_UNIT (last_type))); |
| |
| ref = ref->next; |
| } |
| |
| if (prev_caf_ref != NULL_TREE) |
| { |
| field = gfc_advance_chain (TYPE_FIELDS (reference_type), 0); |
| tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), |
| prev_caf_ref, field, NULL_TREE); |
| gfc_add_modify (block, tmp, fold_convert (TREE_TYPE (field), |
| null_pointer_node)); |
| } |
| return caf_ref != NULL_TREE ? gfc_build_addr_expr (NULL_TREE, caf_ref) |
| : NULL_TREE; |
| } |
| |
| /* Get data from a remote coarray. */ |
| |
| static void |
| gfc_conv_intrinsic_caf_get (gfc_se *se, gfc_expr *expr, tree lhs, tree lhs_kind, |
| tree may_require_tmp, bool may_realloc, |
| symbol_attribute *caf_attr) |
| { |
| gfc_expr *array_expr, *tmp_stat; |
| gfc_se argse; |
| tree caf_decl, token, offset, image_index, tmp; |
| tree res_var, dst_var, type, kind, vec, stat; |
| tree caf_reference; |
| symbol_attribute caf_attr_store; |
| |
| gcc_assert (flag_coarray == GFC_FCOARRAY_LIB); |
| |
| if (se->ss && se->ss->info->useflags) |
| { |
| /* Access the previously obtained result. */ |
| gfc_conv_tmp_array_ref (se); |
| return; |
| } |
| |
| /* If lhs is set, the CAF_GET intrinsic has already been stripped. */ |
| array_expr = (lhs == NULL_TREE) ? expr->value.function.actual->expr : expr; |
| type = gfc_typenode_for_spec (&array_expr->ts); |
| |
| if (caf_attr == NULL) |
| { |
| caf_attr_store = gfc_caf_attr (array_expr); |
| caf_attr = &caf_attr_store; |
| } |
| |
| res_var = lhs; |
| dst_var = lhs; |
| |
| vec = null_pointer_node; |
| tmp_stat = gfc_find_stat_co (expr); |
| |
| if (tmp_stat) |
| { |
| gfc_se stat_se; |
| gfc_init_se (&stat_se, NULL); |
| gfc_conv_expr_reference (&stat_se, tmp_stat); |
| stat = stat_se.expr; |
| gfc_add_block_to_block (&se->pre, &stat_se.pre); |
| gfc_add_block_to_block (&se->post, &stat_se.post); |
| } |
| else |
| stat = null_pointer_node; |
| |
| /* Only use the new get_by_ref () where it is necessary. I.e., when the lhs |
| is reallocatable or the right-hand side has allocatable components. */ |
| if (caf_attr->alloc_comp || caf_attr->pointer_comp || may_realloc) |
| { |
| /* Get using caf_get_by_ref. */ |
| caf_reference = conv_expr_ref_to_caf_ref (&se->pre, array_expr); |
| |
| if (caf_reference != NULL_TREE) |
| { |
| if (lhs == NULL_TREE) |
| { |
| if (array_expr->ts.type == BT_CHARACTER) |
| gfc_init_se (&argse, NULL); |
| if (array_expr->rank == 0) |
| { |
| symbol_attribute attr; |
| gfc_clear_attr (&attr); |
| if (array_expr->ts.type == BT_CHARACTER) |
| { |
| res_var = gfc_conv_string_tmp (se, |
| build_pointer_type (type), |
| array_expr->ts.u.cl->backend_decl); |
| argse.string_length = array_expr->ts.u.cl->backend_decl; |
| } |
| else |
| res_var = gfc_create_var (type, "caf_res"); |
| dst_var = gfc_conv_scalar_to_descriptor (se, res_var, attr); |
| dst_var = gfc_build_addr_expr (NULL_TREE, dst_var); |
| } |
| else |
| { |
| /* Create temporary. */ |
| if (array_expr->ts.type == BT_CHARACTER) |
| gfc_conv_expr_descriptor (&argse, array_expr); |
| may_realloc = gfc_trans_create_temp_array (&se->pre, |
| &se->post, |
| se->ss, type, |
| NULL_TREE, false, |
| false, false, |
| &array_expr->where) |
| == NULL_TREE; |
| res_var = se->ss->info->data.array.descriptor; |
| dst_var = gfc_build_addr_expr (NULL_TREE, res_var); |
| if (may_realloc) |
| { |
| tmp = gfc_conv_descriptor_data_get (res_var); |
| tmp = gfc_deallocate_with_status (tmp, NULL_TREE, |
| NULL_TREE, NULL_TREE, |
| NULL_TREE, true, |
| NULL, |
| GFC_CAF_COARRAY_NOCOARRAY); |
| gfc_add_expr_to_block (&se->post, tmp); |
| } |
| } |
| } |
| |
| kind = build_int_cst (integer_type_node, expr->ts.kind); |
| if (lhs_kind == NULL_TREE) |
| lhs_kind = kind; |
| |
| caf_decl = gfc_get_tree_for_caf_expr (array_expr); |
| if (TREE_CODE (TREE_TYPE (caf_decl)) == REFERENCE_TYPE) |
| caf_decl = build_fold_indirect_ref_loc (input_location, caf_decl); |
| image_index = gfc_caf_get_image_index (&se->pre, array_expr, |
| caf_decl); |
| gfc_get_caf_token_offset (se, &token, NULL, caf_decl, NULL, |
| array_expr); |
| |
| /* No overlap possible as we have generated a temporary. */ |
| if (lhs == NULL_TREE) |
| may_require_tmp = boolean_false_node; |
| |
| /* It guarantees memory consistency within the same segment. */ |
| tmp = gfc_build_string_const (strlen ("memory") + 1, "memory"); |
| tmp = build5_loc (input_location, ASM_EXPR, void_type_node, |
| gfc_build_string_const (1, ""), NULL_TREE, |
| NULL_TREE, tree_cons (NULL_TREE, tmp, NULL_TREE), |
| NULL_TREE); |
| ASM_VOLATILE_P (tmp) = 1; |
| gfc_add_expr_to_block (&se->pre, tmp); |
| |
| tmp = build_call_expr_loc (input_location, gfor_fndecl_caf_get_by_ref, |
| 10, token, image_index, dst_var, |
| caf_reference, lhs_kind, kind, |
| may_require_tmp, |
| may_realloc ? boolean_true_node : |
| boolean_false_node, |
| stat, build_int_cst (integer_type_node, |
| array_expr->ts.type)); |
| |
| gfc_add_expr_to_block (&se->pre, tmp); |
| |
| if (se->ss) |
| gfc_advance_se_ss_chain (se); |
| |
| se->expr = res_var; |
| if (array_expr->ts.type == BT_CHARACTER) |
| se->string_length = argse.string_length; |
| |
| return; |
| } |
| } |
| |
| gfc_init_se (&argse, NULL); |
| if (array_expr->rank == 0) |
| { |
| symbol_attribute attr; |
| |
| gfc_clear_attr (&attr); |
| gfc_conv_expr (&argse, array_expr); |
| |
| if (lhs == NULL_TREE) |
| { |
| gfc_clear_attr (&attr); |
| if (array_expr->ts.type == BT_CHARACTER) |
| res_var = gfc_conv_string_tmp (se, build_pointer_type (type), |
| argse.string_length); |
| else |
| res_var = gfc_create_var (type, "caf_res"); |
| dst_var = gfc_conv_scalar_to_descriptor (&argse, res_var, attr); |
| dst_var = gfc_build_addr_expr (NULL_TREE, dst_var); |
| } |
| argse.expr = gfc_conv_scalar_to_descriptor (&argse, argse.expr, attr); |
| argse.expr = gfc_build_addr_expr (NULL_TREE, argse.expr); |
| } |
| else |
| { |
| /* If has_vector, pass descriptor for whole array and the |
| vector bounds separately. */ |
| gfc_array_ref *ar, ar2; |
| bool has_vector = false; |
| |
| if (gfc_is_coindexed (expr) && gfc_has_vector_subscript (expr)) |
| { |
| has_vector = true; |
| ar = gfc_find_array_ref (expr); |
| ar2 = *ar; |
| memset (ar, '\0', sizeof (*ar)); |
| ar->as = ar2.as; |
| ar->type = AR_FULL; |
| } |
| // TODO: Check whether argse.want_coarray = 1 can help with the below. |
| gfc_conv_expr_descriptor (&argse, array_expr); |
| /* Using gfc_conv_expr_descriptor, we only get the descriptor, but that |
| has the wrong type if component references are done. */ |
| gfc_add_modify (&argse.pre, gfc_conv_descriptor_dtype (argse.expr), |
| gfc_get_dtype_rank_type (has_vector ? ar2.dimen |
| : array_expr->rank, |
| type)); |
| if (has_vector) |
| { |
| vec = conv_caf_vector_subscript (&argse.pre, argse.expr, &ar2); |
| *ar = ar2; |
| } |
| |
| if (lhs == NULL_TREE) |
| { |
| /* Create temporary. */ |
| for (int n = 0; n < se->ss->loop->dimen; n++) |
| if (se->loop->to[n] == NULL_TREE) |
| { |
| se->loop->from[n] = gfc_conv_descriptor_lbound_get (argse.expr, |
| gfc_rank_cst[n]); |
| se->loop->to[n] = gfc_conv_descriptor_ubound_get (argse.expr, |
| gfc_rank_cst[n]); |
| } |
| gfc_trans_create_temp_array (&argse.pre, &argse.post, se->ss, type, |
| NULL_TREE, false, true, false, |
| &array_expr->where); |
| res_var = se->ss->info->data.array.descriptor; |
| dst_var = gfc_build_addr_expr (NULL_TREE, res_var); |
| } |
| argse.expr = gfc_build_addr_expr (NULL_TREE, argse.expr); |
| } |
| |
| kind = build_int_cst (integer_type_node, expr->ts.kind); |
| if (lhs_kind == NULL_TREE) |
| lhs_kind = kind; |
| |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| |
| caf_decl = gfc_get_tree_for_caf_expr (array_expr); |
| if (TREE_CODE (TREE_TYPE (caf_decl)) == REFERENCE_TYPE) |
| caf_decl = build_fold_indirect_ref_loc (input_location, caf_decl); |
| image_index = gfc_caf_get_image_index (&se->pre, array_expr, caf_decl); |
| gfc_get_caf_token_offset (se, &token, &offset, caf_decl, argse.expr, |
| array_expr); |
| |
| /* No overlap possible as we have generated a temporary. */ |
| if (lhs == NULL_TREE) |
| may_require_tmp = boolean_false_node; |
| |
| /* It guarantees memory consistency within the same segment. */ |
| tmp = gfc_build_string_const (strlen ("memory") + 1, "memory"); |
| tmp = build5_loc (input_location, ASM_EXPR, void_type_node, |
| gfc_build_string_const (1, ""), NULL_TREE, NULL_TREE, |
| tree_cons (NULL_TREE, tmp, NULL_TREE), NULL_TREE); |
| ASM_VOLATILE_P (tmp) = 1; |
| gfc_add_expr_to_block (&se->pre, tmp); |
| |
| tmp = build_call_expr_loc (input_location, gfor_fndecl_caf_get, 10, |
| token, offset, image_index, argse.expr, vec, |
| dst_var, kind, lhs_kind, may_require_tmp, stat); |
| |
| gfc_add_expr_to_block (&se->pre, tmp); |
| |
| if (se->ss) |
| gfc_advance_se_ss_chain (se); |
| |
| se->expr = res_var; |
| if (array_expr->ts.type == BT_CHARACTER) |
| se->string_length = argse.string_length; |
| } |
| |
| |
| /* Send data to a remote coarray. */ |
| |
| static tree |
| conv_caf_send (gfc_code *code) { |
| gfc_expr *lhs_expr, *rhs_expr, *tmp_stat, *tmp_team; |
| gfc_se lhs_se, rhs_se; |
| stmtblock_t block; |
| tree caf_decl, token, offset, image_index, tmp, lhs_kind, rhs_kind; |
| tree may_require_tmp, src_stat, dst_stat, dst_team; |
| tree lhs_type = NULL_TREE; |
| tree vec = null_pointer_node, rhs_vec = null_pointer_node; |
| symbol_attribute lhs_caf_attr, rhs_caf_attr; |
| |
| gcc_assert (flag_coarray == GFC_FCOARRAY_LIB); |
| |
| lhs_expr = code->ext.actual->expr; |
| rhs_expr = code->ext.actual->next->expr; |
| may_require_tmp = gfc_check_dependency (lhs_expr, rhs_expr, true) == 0 |
| ? boolean_false_node : boolean_true_node; |
| gfc_init_block (&block); |
| |
| lhs_caf_attr = gfc_caf_attr (lhs_expr); |
| rhs_caf_attr = gfc_caf_attr (rhs_expr); |
| src_stat = dst_stat = null_pointer_node; |
| dst_team = null_pointer_node; |
| |
| /* LHS. */ |
| gfc_init_se (&lhs_se, NULL); |
| if (lhs_expr->rank == 0) |
| { |
| if (lhs_expr->ts.type == BT_CHARACTER && lhs_expr->ts.deferred) |
| { |
| lhs_se.expr = gfc_get_tree_for_caf_expr (lhs_expr); |
| lhs_se.expr = gfc_build_addr_expr (NULL_TREE, lhs_se.expr); |
| } |
| else |
| { |
| symbol_attribute attr; |
| gfc_clear_attr (&attr); |
| gfc_conv_expr (&lhs_se, lhs_expr); |
| lhs_type = TREE_TYPE (lhs_se.expr); |
| lhs_se.expr = gfc_conv_scalar_to_descriptor (&lhs_se, lhs_se.expr, |
| attr); |
| lhs_se.expr = gfc_build_addr_expr (NULL_TREE, lhs_se.expr); |
| } |
| } |
| else if ((lhs_caf_attr.alloc_comp || lhs_caf_attr.pointer_comp) |
| && lhs_caf_attr.codimension) |
| { |
| lhs_se.want_pointer = 1; |
| gfc_conv_expr_descriptor (&lhs_se, lhs_expr); |
| /* Using gfc_conv_expr_descriptor, we only get the descriptor, but that |
| has the wrong type if component references are done. */ |
| lhs_type = gfc_typenode_for_spec (&lhs_expr->ts); |
| tmp = build_fold_indirect_ref_loc (input_location, lhs_se.expr); |
| gfc_add_modify (&lhs_se.pre, gfc_conv_descriptor_dtype (tmp), |
| gfc_get_dtype_rank_type ( |
| gfc_has_vector_subscript (lhs_expr) |
| ? gfc_find_array_ref (lhs_expr)->dimen |
| : lhs_expr->rank, |
| lhs_type)); |
| } |
| else |
| { |
| bool has_vector = gfc_has_vector_subscript (lhs_expr); |
| |
| if (gfc_is_coindexed (lhs_expr) || !has_vector) |
| { |
| /* If has_vector, pass descriptor for whole array and the |
| vector bounds separately. */ |
| gfc_array_ref *ar, ar2; |
| bool has_tmp_lhs_array = false; |
| if (has_vector) |
| { |
| has_tmp_lhs_array = true; |
| ar = gfc_find_array_ref (lhs_expr); |
| ar2 = *ar; |
| memset (ar, '\0', sizeof (*ar)); |
| ar->as = ar2.as; |
| ar->type = AR_FULL; |
| } |
| lhs_se.want_pointer = 1; |
| gfc_conv_expr_descriptor (&lhs_se, lhs_expr); |
| /* Using gfc_conv_expr_descriptor, we only get the descriptor, but |
| that has the wrong type if component references are done. */ |
| lhs_type = gfc_typenode_for_spec (&lhs_expr->ts); |
| tmp = build_fold_indirect_ref_loc (input_location, lhs_se.expr); |
| gfc_add_modify (&lhs_se.pre, gfc_conv_descriptor_dtype (tmp), |
| gfc_get_dtype_rank_type (has_vector ? ar2.dimen |
| : lhs_expr->rank, |
| lhs_type)); |
| if (has_tmp_lhs_array) |
| { |
| vec = conv_caf_vector_subscript (&block, lhs_se.expr, &ar2); |
| *ar = ar2; |
| } |
| } |
| else |
| { |
| /* Special casing for arr1 ([...]) = arr2[...], i.e. caf_get to |
| indexed array expression. This is rewritten to: |
| |
| tmp_array = arr2[...] |
| arr1 ([...]) = tmp_array |
| |
| because using the standard gfc_conv_expr (lhs_expr) did the |
| assignment with lhs and rhs exchanged. */ |
| |
| gfc_ss *lss_for_tmparray, *lss_real; |
| gfc_loopinfo loop; |
| gfc_se se; |
| stmtblock_t body; |
| tree tmparr_desc, src; |
| tree index = gfc_index_zero_node; |
| tree stride = gfc_index_zero_node; |
| int n; |
| |
| /* Walk both sides of the assignment, once to get the shape of the |
| temporary array to create right. */ |
| lss_for_tmparray = gfc_walk_expr (lhs_expr); |
| /* And a second time to be able to create an assignment of the |
| temporary to the lhs_expr. gfc_trans_create_temp_array replaces |
| the tree in the descriptor with the one for the temporary |
| array. */ |
| lss_real = gfc_walk_expr (lhs_expr); |
| gfc_init_loopinfo (&loop); |
| gfc_add_ss_to_loop (&loop, lss_for_tmparray); |
| gfc_add_ss_to_loop (&loop, lss_real); |
| gfc_conv_ss_startstride (&loop); |
| gfc_conv_loop_setup (&loop, &lhs_expr->where); |
| lhs_type = gfc_typenode_for_spec (&lhs_expr->ts); |
| gfc_trans_create_temp_array (&lhs_se.pre, &lhs_se.post, |
| lss_for_tmparray, lhs_type, NULL_TREE, |
| false, true, false, |
| &lhs_expr->where); |
| tmparr_desc = lss_for_tmparray->info->data.array.descriptor; |
| gfc_start_scalarized_body (&loop, &body); |
| gfc_init_se (&se, NULL); |
| gfc_copy_loopinfo_to_se (&se, &loop); |
| se.ss = lss_real; |
| gfc_conv_expr (&se, lhs_expr); |
| gfc_add_block_to_block (&body, &se.pre); |
| |
| /* Walk over all indexes of the loop. */ |
| for (n = loop.dimen - 1; n > 0; --n) |
| { |
| tmp = loop.loopvar[n]; |
| tmp = fold_build2_loc (input_location, MINUS_EXPR, |
| gfc_array_index_type, tmp, loop.from[n]); |
| tmp = fold_build2_loc (input_location, PLUS_EXPR, |
| gfc_array_index_type, tmp, index); |
| |
| stride = fold_build2_loc (input_location, MINUS_EXPR, |
| gfc_array_index_type, |
| loop.to[n - 1], loop.from[n - 1]); |
| stride = fold_build2_loc (input_location, PLUS_EXPR, |
| gfc_array_index_type, |
| stride, gfc_index_one_node); |
| |
| index = fold_build2_loc (input_location, MULT_EXPR, |
| gfc_array_index_type, tmp, stride); |
| } |
| |
| index = fold_build2_loc (input_location, MINUS_EXPR, |
| gfc_array_index_type, |
| index, loop.from[0]); |
| |
| index = fold_build2_loc (input_location, PLUS_EXPR, |
| gfc_array_index_type, |
| loop.loopvar[0], index); |
| |
| src = build_fold_indirect_ref (gfc_conv_array_data (tmparr_desc)); |
| src = gfc_build_array_ref (src, index, NULL); |
| /* Now create the assignment of lhs_expr = tmp_array. */ |
| gfc_add_modify (&body, se.expr, src); |
| gfc_add_block_to_block (&body, &se.post); |
| lhs_se.expr = gfc_build_addr_expr (NULL_TREE, tmparr_desc); |
| gfc_trans_scalarizing_loops (&loop, &body); |
| gfc_add_block_to_block (&loop.pre, &loop.post); |
| gfc_add_expr_to_block (&lhs_se.post, gfc_finish_block (&loop.pre)); |
| gfc_free_ss (lss_for_tmparray); |
| gfc_free_ss (lss_real); |
| } |
| } |
| |
| lhs_kind = build_int_cst (integer_type_node, lhs_expr->ts.kind); |
| |
| /* Special case: RHS is a coarray but LHS is not; this code path avoids a |
| temporary and a loop. */ |
| if (!gfc_is_coindexed (lhs_expr) |
| && (!lhs_caf_attr.codimension |
| || !(lhs_expr->rank > 0 |
| && (lhs_caf_attr.allocatable || lhs_caf_attr.pointer)))) |
| { |
| bool lhs_may_realloc = lhs_expr->rank > 0 && lhs_caf_attr.allocatable; |
| gcc_assert (gfc_is_coindexed (rhs_expr)); |
| gfc_init_se (&rhs_se, NULL); |
| if (lhs_expr->rank == 0 && lhs_caf_attr.allocatable) |
| { |
| gfc_se scal_se; |
| gfc_init_se (&scal_se, NULL); |
| scal_se.want_pointer = 1; |
| gfc_conv_expr (&scal_se, lhs_expr); |
| /* Ensure scalar on lhs is allocated. */ |
| gfc_add_block_to_block (&block, &scal_se.pre); |
| |
| gfc_allocate_using_malloc (&scal_se.pre, scal_se.expr, |
| TYPE_SIZE_UNIT ( |
| gfc_typenode_for_spec (&lhs_expr->ts)), |
| NULL_TREE); |
| tmp = fold_build2 (EQ_EXPR, logical_type_node, scal_se.expr, |
| null_pointer_node); |
| tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, |
| tmp, gfc_finish_block (&scal_se.pre), |
| build_empty_stmt (input_location)); |
| gfc_add_expr_to_block (&block, tmp); |
| } |
| else |
| lhs_may_realloc = lhs_may_realloc |
| && gfc_full_array_ref_p (lhs_expr->ref, NULL); |
| gfc_add_block_to_block (&block, &lhs_se.pre); |
| gfc_conv_intrinsic_caf_get (&rhs_se, rhs_expr, lhs_se.expr, lhs_kind, |
| may_require_tmp, lhs_may_realloc, |
| &rhs_caf_attr); |
| gfc_add_block_to_block (&block, &rhs_se.pre); |
| gfc_add_block_to_block (&block, &rhs_se.post); |
| gfc_add_block_to_block (&block, &lhs_se.post); |
| return gfc_finish_block (&block); |
| } |
| |
| gfc_add_block_to_block (&block, &lhs_se.pre); |
| |
| /* Obtain token, offset and image index for the LHS. */ |
| caf_decl = gfc_get_tree_for_caf_expr (lhs_expr); |
| if (TREE_CODE (TREE_TYPE (caf_decl)) == REFERENCE_TYPE) |
| caf_decl = build_fold_indirect_ref_loc (input_location, caf_decl); |
| image_index = gfc_caf_get_image_index (&block, lhs_expr, caf_decl); |
| tmp = lhs_se.expr; |
| if (lhs_caf_attr.alloc_comp) |
| gfc_get_caf_token_offset (&lhs_se, &token, NULL, caf_decl, NULL_TREE, |
| NULL); |
| else |
| gfc_get_caf_token_offset (&lhs_se, &token, &offset, caf_decl, tmp, |
| lhs_expr); |
| lhs_se.expr = tmp; |
| |
| /* RHS. */ |
| gfc_init_se (&rhs_se, NULL); |
| if (rhs_expr->expr_type == EXPR_FUNCTION && rhs_expr->value.function.isym |
| && rhs_expr->value.function.isym->id == GFC_ISYM_CONVERSION) |
| rhs_expr = rhs_expr->value.function.actual->expr; |
| if (rhs_expr->rank == 0) |
| { |
| symbol_attribute attr; |
| gfc_clear_attr (&attr); |
| gfc_conv_expr (&rhs_se, rhs_expr); |
| rhs_se.expr = gfc_conv_scalar_to_descriptor (&rhs_se, rhs_se.expr, attr); |
| rhs_se.expr = gfc_build_addr_expr (NULL_TREE, rhs_se.expr); |
| } |
| else if ((rhs_caf_attr.alloc_comp || rhs_caf_attr.pointer_comp) |
| && rhs_caf_attr.codimension) |
| { |
| tree tmp2; |
| rhs_se.want_pointer = 1; |
| gfc_conv_expr_descriptor (&rhs_se, rhs_expr); |
| /* Using gfc_conv_expr_descriptor, we only get the descriptor, but that |
| has the wrong type if component references are done. */ |
| tmp2 = gfc_typenode_for_spec (&rhs_expr->ts); |
| tmp = build_fold_indirect_ref_loc (input_location, rhs_se.expr); |
| gfc_add_modify (&rhs_se.pre, gfc_conv_descriptor_dtype (tmp), |
| gfc_get_dtype_rank_type ( |
| gfc_has_vector_subscript (rhs_expr) |
| ? gfc_find_array_ref (rhs_expr)->dimen |
| : rhs_expr->rank, |
| tmp2)); |
| } |
| else |
| { |
| /* If has_vector, pass descriptor for whole array and the |
| vector bounds separately. */ |
| gfc_array_ref *ar, ar2; |
| bool has_vector = false; |
| tree tmp2; |
| |
| if (gfc_is_coindexed (rhs_expr) && gfc_has_vector_subscript (rhs_expr)) |
| { |
| has_vector = true; |
| ar = gfc_find_array_ref (rhs_expr); |
| ar2 = *ar; |
| memset (ar, '\0', sizeof (*ar)); |
| ar->as = ar2.as; |
| ar->type = AR_FULL; |
| } |
| rhs_se.want_pointer = 1; |
| gfc_conv_expr_descriptor (&rhs_se, rhs_expr); |
| /* Using gfc_conv_expr_descriptor, we only get the descriptor, but that |
| has the wrong type if component references are done. */ |
| tmp = build_fold_indirect_ref_loc (input_location, rhs_se.expr); |
| tmp2 = gfc_typenode_for_spec (&rhs_expr->ts); |
| gfc_add_modify (&rhs_se.pre, gfc_conv_descriptor_dtype (tmp), |
| gfc_get_dtype_rank_type (has_vector ? ar2.dimen |
| : rhs_expr->rank, |
| tmp2)); |
| if (has_vector) |
| { |
| rhs_vec = conv_caf_vector_subscript (&block, rhs_se.expr, &ar2); |
| *ar = ar2; |
| } |
| } |
| |
| gfc_add_block_to_block (&block, &rhs_se.pre); |
| |
| rhs_kind = build_int_cst (integer_type_node, rhs_expr->ts.kind); |
| |
| tmp_stat = gfc_find_stat_co (lhs_expr); |
| |
| if (tmp_stat) |
| { |
| gfc_se stat_se; |
| gfc_init_se (&stat_se, NULL); |
| gfc_conv_expr_reference (&stat_se, tmp_stat); |
| dst_stat = stat_se.expr; |
| gfc_add_block_to_block (&block, &stat_se.pre); |
| gfc_add_block_to_block (&block, &stat_se.post); |
| } |
| |
| tmp_team = gfc_find_team_co (lhs_expr); |
| |
| if (tmp_team) |
| { |
| gfc_se team_se; |
| gfc_init_se (&team_se, NULL); |
| gfc_conv_expr_reference (&team_se, tmp_team); |
| dst_team = team_se.expr; |
| gfc_add_block_to_block (&block, &team_se.pre); |
| gfc_add_block_to_block (&block, &team_se.post); |
| } |
| |
| if (!gfc_is_coindexed (rhs_expr)) |
| { |
| if (lhs_caf_attr.alloc_comp || lhs_caf_attr.pointer_comp) |
| { |
| tree reference, dst_realloc; |
| reference = conv_expr_ref_to_caf_ref (&block, lhs_expr); |
| dst_realloc = lhs_caf_attr.allocatable ? boolean_true_node |
| : boolean_false_node; |
| tmp = build_call_expr_loc (input_location, |
| gfor_fndecl_caf_send_by_ref, |
| 10, token, image_index, rhs_se.expr, |
| reference, lhs_kind, rhs_kind, |
| may_require_tmp, dst_realloc, src_stat, |
| build_int_cst (integer_type_node, |
| lhs_expr->ts.type)); |
| } |
| else |
| tmp = build_call_expr_loc (input_location, gfor_fndecl_caf_send, 11, |
| token, offset, image_index, lhs_se.expr, vec, |
| rhs_se.expr, lhs_kind, rhs_kind, |
| may_require_tmp, src_stat, dst_team); |
| } |
| else |
| { |
| tree rhs_token, rhs_offset, rhs_image_index; |
| |
| /* It guarantees memory consistency within the same segment. */ |
| tmp = gfc_build_string_const (strlen ("memory") + 1, "memory"); |
| tmp = build5_loc (input_location, ASM_EXPR, void_type_node, |
| gfc_build_string_const (1, ""), NULL_TREE, NULL_TREE, |
| tree_cons (NULL_TREE, tmp, NULL_TREE), NULL_TREE); |
| ASM_VOLATILE_P (tmp) = 1; |
| gfc_add_expr_to_block (&block, tmp); |
| |
| caf_decl = gfc_get_tree_for_caf_expr (rhs_expr); |
| if (TREE_CODE (TREE_TYPE (caf_decl)) == REFERENCE_TYPE) |
| caf_decl = build_fold_indirect_ref_loc (input_location, caf_decl); |
| rhs_image_index = gfc_caf_get_image_index (&block, rhs_expr, caf_decl); |
| tmp = rhs_se.expr; |
| if (rhs_caf_attr.alloc_comp || rhs_caf_attr.pointer_comp) |
| { |
| tmp_stat = gfc_find_stat_co (lhs_expr); |
| |
| if (tmp_stat) |
| { |
| gfc_se stat_se; |
| gfc_init_se (&stat_se, NULL); |
| gfc_conv_expr_reference (&stat_se, tmp_stat); |
| src_stat = stat_se.expr; |
| gfc_add_block_to_block (&block, &stat_se.pre); |
| gfc_add_block_to_block (&block, &stat_se.post); |
| } |
| |
| gfc_get_caf_token_offset (&rhs_se, &rhs_token, NULL, caf_decl, |
| NULL_TREE, NULL); |
| tree lhs_reference, rhs_reference; |
| lhs_reference = conv_expr_ref_to_caf_ref (&block, lhs_expr); |
| rhs_reference = conv_expr_ref_to_caf_ref (&block, rhs_expr); |
| tmp = build_call_expr_loc (input_location, |
| gfor_fndecl_caf_sendget_by_ref, 13, |
| token, image_index, lhs_reference, |
| rhs_token, rhs_image_index, rhs_reference, |
| lhs_kind, rhs_kind, may_require_tmp, |
| dst_stat, src_stat, |
| build_int_cst (integer_type_node, |
| lhs_expr->ts.type), |
| build_int_cst (integer_type_node, |
| rhs_expr->ts.type)); |
| } |
| else |
| { |
| gfc_get_caf_token_offset (&rhs_se, &rhs_token, &rhs_offset, caf_decl, |
| tmp, rhs_expr); |
| tmp = build_call_expr_loc (input_location, gfor_fndecl_caf_sendget, |
| 14, token, offset, image_index, |
| lhs_se.expr, vec, rhs_token, rhs_offset, |
| rhs_image_index, tmp, rhs_vec, lhs_kind, |
| rhs_kind, may_require_tmp, src_stat); |
| } |
| } |
| gfc_add_expr_to_block (&block, tmp); |
| gfc_add_block_to_block (&block, &lhs_se.post); |
| gfc_add_block_to_block (&block, &rhs_se.post); |
| |
| /* It guarantees memory consistency within the same segment. */ |
| tmp = gfc_build_string_const (strlen ("memory") + 1, "memory"); |
| tmp = build5_loc (input_location, ASM_EXPR, void_type_node, |
| gfc_build_string_const (1, ""), NULL_TREE, NULL_TREE, |
| tree_cons (NULL_TREE, tmp, NULL_TREE), NULL_TREE); |
| ASM_VOLATILE_P (tmp) = 1; |
| gfc_add_expr_to_block (&block, tmp); |
| |
| return gfc_finish_block (&block); |
| } |
| |
| |
| static void |
| trans_this_image (gfc_se * se, gfc_expr *expr) |
| { |
| stmtblock_t loop; |
| tree type, desc, dim_arg, cond, tmp, m, loop_var, exit_label, min_var, |
| lbound, ubound, extent, ml; |
| gfc_se argse; |
| int rank, corank; |
| gfc_expr *distance = expr->value.function.actual->next->next->expr; |
| |
| if (expr->value.function.actual->expr |
| && !gfc_is_coarray (expr->value.function.actual->expr)) |
| distance = expr->value.function.actual->expr; |
| |
| /* The case -fcoarray=single is handled elsewhere. */ |
| gcc_assert (flag_coarray != GFC_FCOARRAY_SINGLE); |
| |
| /* Argument-free version: THIS_IMAGE(). */ |
| if (distance || expr->value.function.actual->expr == NULL) |
| { |
| if (distance) |
| { |
| gfc_init_se (&argse, NULL); |
| gfc_conv_expr_val (&argse, distance); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| tmp = fold_convert (integer_type_node, argse.expr); |
| } |
| else |
| tmp = integer_zero_node; |
| tmp = build_call_expr_loc (input_location, gfor_fndecl_caf_this_image, 1, |
| tmp); |
| se->expr = fold_convert (gfc_get_int_type (gfc_default_integer_kind), |
| tmp); |
| return; |
| } |
| |
| /* Coarray-argument version: THIS_IMAGE(coarray [, dim]). */ |
| |
| type = gfc_get_int_type (gfc_default_integer_kind); |
| corank = gfc_get_corank (expr->value.function.actual->expr); |
| rank = expr->value.function.actual->expr->rank; |
| |
| /* Obtain the descriptor of the COARRAY. */ |
| gfc_init_se (&argse, NULL); |
| argse.want_coarray = 1; |
| gfc_conv_expr_descriptor (&argse, expr->value.function.actual->expr); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| desc = argse.expr; |
| |
| if (se->ss) |
| { |
| /* Create an implicit second parameter from the loop variable. */ |
| gcc_assert (!expr->value.function.actual->next->expr); |
| gcc_assert (corank > 0); |
| gcc_assert (se->loop->dimen == 1); |
| gcc_assert (se->ss->info->expr == expr); |
| |
| dim_arg = se->loop->loopvar[0]; |
| dim_arg = fold_build2_loc (input_location, PLUS_EXPR, |
| gfc_array_index_type, dim_arg, |
| build_int_cst (TREE_TYPE (dim_arg), 1)); |
| gfc_advance_se_ss_chain (se); |
| } |
| else |
| { |
| /* Use the passed DIM= argument. */ |
| gcc_assert (expr->value.function.actual->next->expr); |
| gfc_init_se (&argse, NULL); |
| gfc_conv_expr_type (&argse, expr->value.function.actual->next->expr, |
| gfc_array_index_type); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| dim_arg = argse.expr; |
| |
| if (INTEGER_CST_P (dim_arg)) |
| { |
| if (wi::ltu_p (wi::to_wide (dim_arg), 1) |
| || wi::gtu_p (wi::to_wide (dim_arg), |
| GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc)))) |
| gfc_error ("%<dim%> argument of %s intrinsic at %L is not a valid " |
| "dimension index", expr->value.function.isym->name, |
| &expr->where); |
| } |
| else if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS) |
| { |
| dim_arg = gfc_evaluate_now (dim_arg, &se->pre); |
| cond = fold_build2_loc (input_location, LT_EXPR, logical_type_node, |
| dim_arg, |
| build_int_cst (TREE_TYPE (dim_arg), 1)); |
| tmp = gfc_rank_cst[GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))]; |
| tmp = fold_build2_loc (input_location, GT_EXPR, logical_type_node, |
| dim_arg, tmp); |
| cond = fold_build2_loc (input_location, TRUTH_ORIF_EXPR, |
| logical_type_node, cond, tmp); |
| gfc_trans_runtime_check (true, false, cond, &se->pre, &expr->where, |
| gfc_msg_fault); |
| } |
| } |
| |
| /* Used algorithm; cf. Fortran 2008, C.10. Note, due to the scalarizer, |
| one always has a dim_arg argument. |
| |
| m = this_image() - 1 |
| if (corank == 1) |
| { |
| sub(1) = m + lcobound(corank) |
| return; |
| } |
| i = rank |
| min_var = min (rank + corank - 2, rank + dim_arg - 1) |
| for (;;) |
| { |
| extent = gfc_extent(i) |
| ml = m |
| m = m/extent |
| if (i >= min_var) |
| goto exit_label |
| i++ |
| } |
| exit_label: |
| sub(dim_arg) = (dim_arg < corank) ? ml - m*extent + lcobound(dim_arg) |
| : m + lcobound(corank) |
| */ |
| |
| /* this_image () - 1. */ |
| tmp = build_call_expr_loc (input_location, gfor_fndecl_caf_this_image, 1, |
| integer_zero_node); |
| tmp = fold_build2_loc (input_location, MINUS_EXPR, type, |
| fold_convert (type, tmp), build_int_cst (type, 1)); |
| if (corank == 1) |
| { |
| /* sub(1) = m + lcobound(corank). */ |
| lbound = gfc_conv_descriptor_lbound_get (desc, |
| build_int_cst (TREE_TYPE (gfc_array_index_type), |
| corank+rank-1)); |
| lbound = fold_convert (type, lbound); |
| tmp = fold_build2_loc (input_location, PLUS_EXPR, type, tmp, lbound); |
| |
| se->expr = tmp; |
| return; |
| } |
| |
| m = gfc_create_var (type, NULL); |
| ml = gfc_create_var (type, NULL); |
| loop_var = gfc_create_var (integer_type_node, NULL); |
| min_var = gfc_create_var (integer_type_node, NULL); |
| |
| /* m = this_image () - 1. */ |
| gfc_add_modify (&se->pre, m, tmp); |
| |
| /* min_var = min (rank + corank-2, rank + dim_arg - 1). */ |
| tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node, |
| fold_convert (integer_type_node, dim_arg), |
| build_int_cst (integer_type_node, rank - 1)); |
| tmp = fold_build2_loc (input_location, MIN_EXPR, integer_type_node, |
| build_int_cst (integer_type_node, rank + corank - 2), |
| tmp); |
| gfc_add_modify (&se->pre, min_var, tmp); |
| |
| /* i = rank. */ |
| tmp = build_int_cst (integer_type_node, rank); |
| gfc_add_modify (&se->pre, loop_var, tmp); |
| |
| exit_label = gfc_build_label_decl (NULL_TREE); |
| TREE_USED (exit_label) = 1; |
| |
| /* Loop body. */ |
| gfc_init_block (&loop); |
| |
| /* ml = m. */ |
| gfc_add_modify (&loop, ml, m); |
| |
| /* extent = ... */ |
| lbound = gfc_conv_descriptor_lbound_get (desc, loop_var); |
| ubound = gfc_conv_descriptor_ubound_get (desc, loop_var); |
| extent = gfc_conv_array_extent_dim (lbound, ubound, NULL); |
| extent = fold_convert (type, extent); |
| |
| /* m = m/extent. */ |
| gfc_add_modify (&loop, m, |
| fold_build2_loc (input_location, TRUNC_DIV_EXPR, type, |
| m, extent)); |
| |
| /* Exit condition: if (i >= min_var) goto exit_label. */ |
| cond = fold_build2_loc (input_location, GE_EXPR, logical_type_node, loop_var, |
| min_var); |
| tmp = build1_v (GOTO_EXPR, exit_label); |
| tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond, tmp, |
| build_empty_stmt (input_location)); |
| gfc_add_expr_to_block (&loop, tmp); |
| |
| /* Increment loop variable: i++. */ |
| gfc_add_modify (&loop, loop_var, |
| fold_build2_loc (input_location, PLUS_EXPR, integer_type_node, |
| loop_var, |
| build_int_cst (integer_type_node, 1))); |
| |
| /* Making the loop... actually loop! */ |
| tmp = gfc_finish_block (&loop); |
| tmp = build1_v (LOOP_EXPR, tmp); |
| gfc_add_expr_to_block (&se->pre, tmp); |
| |
| /* The exit label. */ |
| tmp = build1_v (LABEL_EXPR, exit_label); |
| gfc_add_expr_to_block (&se->pre, tmp); |
| |
| /* sub(co_dim) = (co_dim < corank) ? ml - m*extent + lcobound(dim_arg) |
| : m + lcobound(corank) */ |
| |
| cond = fold_build2_loc (input_location, LT_EXPR, logical_type_node, dim_arg, |
| build_int_cst (TREE_TYPE (dim_arg), corank)); |
| |
| lbound = gfc_conv_descriptor_lbound_get (desc, |
| fold_build2_loc (input_location, PLUS_EXPR, |
| gfc_array_index_type, dim_arg, |
| build_int_cst (TREE_TYPE (dim_arg), rank-1))); |
| lbound = fold_convert (type, lbound); |
| |
| tmp = fold_build2_loc (input_location, MINUS_EXPR, type, ml, |
| fold_build2_loc (input_location, MULT_EXPR, type, |
| m, extent)); |
| tmp = fold_build2_loc (input_location, PLUS_EXPR, type, tmp, lbound); |
| |
| se->expr = fold_build3_loc (input_location, COND_EXPR, type, cond, tmp, |
| fold_build2_loc (input_location, PLUS_EXPR, type, |
| m, lbound)); |
| } |
| |
| |
| /* Convert a call to image_status. */ |
| |
| static void |
| conv_intrinsic_image_status (gfc_se *se, gfc_expr *expr) |
| { |
| unsigned int num_args; |
| tree *args, tmp; |
| |
| num_args = gfc_intrinsic_argument_list_length (expr); |
| args = XALLOCAVEC (tree, num_args); |
| gfc_conv_intrinsic_function_args (se, expr, args, num_args); |
| /* In args[0] the number of the image the status is desired for has to be |
| given. */ |
| |
| if (flag_coarray == GFC_FCOARRAY_SINGLE) |
| { |
| tree arg; |
| arg = gfc_evaluate_now (args[0], &se->pre); |
| tmp = fold_build2_loc (input_location, EQ_EXPR, logical_type_node, |
| fold_convert (integer_type_node, arg), |
| integer_one_node); |
| tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, |
| tmp, integer_zero_node, |
| build_int_cst (integer_type_node, |
| GFC_STAT_STOPPED_IMAGE)); |
| } |
| else if (flag_coarray == GFC_FCOARRAY_LIB) |
| tmp = build_call_expr_loc (input_location, gfor_fndecl_caf_image_status, 2, |
| args[0], build_int_cst (integer_type_node, -1)); |
| else |
| gcc_unreachable (); |
| |
| se->expr = fold_convert (gfc_get_int_type (gfc_default_integer_kind), tmp); |
| } |
| |
| static void |
| conv_intrinsic_team_number (gfc_se *se, gfc_expr *expr) |
| { |
| unsigned int num_args; |
| |
| tree *args, tmp; |
| |
| num_args = gfc_intrinsic_argument_list_length (expr); |
| args = XALLOCAVEC (tree, num_args); |
| gfc_conv_intrinsic_function_args (se, expr, args, num_args); |
| |
| if (flag_coarray == |
| GFC_FCOARRAY_SINGLE && expr->value.function.actual->expr) |
| { |
| tree arg; |
| |
| arg = gfc_evaluate_now (args[0], &se->pre); |
| tmp = fold_build2_loc (input_location, EQ_EXPR, logical_type_node, |
| fold_convert (integer_type_node, arg), |
| integer_one_node); |
| tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, |
| tmp, integer_zero_node, |
| build_int_cst (integer_type_node, |
| GFC_STAT_STOPPED_IMAGE)); |
| } |
| else if (flag_coarray == GFC_FCOARRAY_SINGLE) |
| { |
| // the value -1 represents that no team has been created yet |
| tmp = build_int_cst (integer_type_node, -1); |
| } |
| else if (flag_coarray == GFC_FCOARRAY_LIB && expr->value.function.actual->expr) |
| tmp = build_call_expr_loc (input_location, gfor_fndecl_caf_team_number, 1, |
| args[0], build_int_cst (integer_type_node, -1)); |
| else if (flag_coarray == GFC_FCOARRAY_LIB) |
| tmp = build_call_expr_loc (input_location, gfor_fndecl_caf_team_number, 1, |
| integer_zero_node, build_int_cst (integer_type_node, -1)); |
| else |
| gcc_unreachable (); |
| |
| se->expr = fold_convert (gfc_get_int_type (gfc_default_integer_kind), tmp); |
| } |
| |
| |
| static void |
| trans_image_index (gfc_se * se, gfc_expr *expr) |
| { |
| tree num_images, cond, coindex, type, lbound, ubound, desc, subdesc, |
| tmp, invalid_bound; |
| gfc_se argse, subse; |
| int rank, corank, codim; |
| |
| type = gfc_get_int_type (gfc_default_integer_kind); |
| corank = gfc_get_corank (expr->value.function.actual->expr); |
| rank = expr->value.function.actual->expr->rank; |
| |
| /* Obtain the descriptor of the COARRAY. */ |
| gfc_init_se (&argse, NULL); |
| argse.want_coarray = 1; |
| gfc_conv_expr_descriptor (&argse, expr->value.function.actual->expr); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| desc = argse.expr; |
| |
| /* Obtain a handle to the SUB argument. */ |
| gfc_init_se (&subse, NULL); |
| gfc_conv_expr_descriptor (&subse, expr->value.function.actual->next->expr); |
| gfc_add_block_to_block (&se->pre, &subse.pre); |
| gfc_add_block_to_block (&se->post, &subse.post); |
| subdesc = build_fold_indirect_ref_loc (input_location, |
| gfc_conv_descriptor_data_get (subse.expr)); |
| |
| /* Fortran 2008 does not require that the values remain in the cobounds, |
| thus we need explicitly check this - and return 0 if they are exceeded. */ |
| |
| lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[rank+corank-1]); |
| tmp = gfc_build_array_ref (subdesc, gfc_rank_cst[corank-1], NULL); |
| invalid_bound = fold_build2_loc (input_location, LT_EXPR, logical_type_node, |
| fold_convert (gfc_array_index_type, tmp), |
| lbound); |
| |
| for (codim = corank + rank - 2; codim >= rank; codim--) |
| { |
| lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[codim]); |
| ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[codim]); |
| tmp = gfc_build_array_ref (subdesc, gfc_rank_cst[codim-rank], NULL); |
| cond = fold_build2_loc (input_location, LT_EXPR, logical_type_node, |
| fold_convert (gfc_array_index_type, tmp), |
| lbound); |
| invalid_bound = fold_build2_loc (input_location, TRUTH_OR_EXPR, |
| logical_type_node, invalid_bound, cond); |
| cond = fold_build2_loc (input_location, GT_EXPR, logical_type_node, |
| fold_convert (gfc_array_index_type, tmp), |
| ubound); |
| invalid_bound = fold_build2_loc (input_location, TRUTH_OR_EXPR, |
| logical_type_node, invalid_bound, cond); |
| } |
| |
| invalid_bound = gfc_unlikely (invalid_bound, PRED_FORTRAN_INVALID_BOUND); |
| |
| /* See Fortran 2008, C.10 for the following algorithm. */ |
| |
| /* coindex = sub(corank) - lcobound(n). */ |
| coindex = fold_convert (gfc_array_index_type, |
| gfc_build_array_ref (subdesc, gfc_rank_cst[corank-1], |
| NULL)); |
| lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[rank+corank-1]); |
| coindex = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type, |
| fold_convert (gfc_array_index_type, coindex), |
| lbound); |
| |
| for (codim = corank + rank - 2; codim >= rank; codim--) |
| { |
| tree extent, ubound; |
| |
| /* coindex = coindex*extent(codim) + sub(codim) - lcobound(codim). */ |
| lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[codim]); |
| ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[codim]); |
| extent = gfc_conv_array_extent_dim (lbound, ubound, NULL); |
| |
| /* coindex *= extent. */ |
| coindex = fold_build2_loc (input_location, MULT_EXPR, |
| gfc_array_index_type, coindex, extent); |
| |
| /* coindex += sub(codim). */ |
| tmp = gfc_build_array_ref (subdesc, gfc_rank_cst[codim-rank], NULL); |
| coindex = fold_build2_loc (input_location, PLUS_EXPR, |
| gfc_array_index_type, coindex, |
| fold_convert (gfc_array_index_type, tmp)); |
| |
| /* coindex -= lbound(codim). */ |
| lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[codim]); |
| coindex = fold_build2_loc (input_location, MINUS_EXPR, |
| gfc_array_index_type, coindex, lbound); |
| } |
| |
| coindex = fold_build2_loc (input_location, PLUS_EXPR, type, |
| fold_convert(type, coindex), |
| build_int_cst (type, 1)); |
| |
| /* Return 0 if "coindex" exceeds num_images(). */ |
| |
| if (flag_coarray == GFC_FCOARRAY_SINGLE) |
| num_images = build_int_cst (type, 1); |
| else |
| { |
| tmp = build_call_expr_loc (input_location, gfor_fndecl_caf_num_images, 2, |
| integer_zero_node, |
| build_int_cst (integer_type_node, -1)); |
| num_images = fold_convert (type, tmp); |
| } |
| |
| tmp = gfc_create_var (type, NULL); |
| gfc_add_modify (&se->pre, tmp, coindex); |
| |
| cond = fold_build2_loc (input_location, GT_EXPR, logical_type_node, tmp, |
| num_images); |
| cond = fold_build2_loc (input_location, TRUTH_OR_EXPR, logical_type_node, |
| cond, |
| fold_convert (logical_type_node, invalid_bound)); |
| se->expr = fold_build3_loc (input_location, COND_EXPR, type, cond, |
| build_int_cst (type, 0), tmp); |
| } |
| |
| static void |
| trans_num_images (gfc_se * se, gfc_expr *expr) |
| { |
| tree tmp, distance, failed; |
| gfc_se argse; |
| |
| if (expr->value.function.actual->expr) |
| { |
| gfc_init_se (&argse, NULL); |
| gfc_conv_expr_val (&argse, expr->value.function.actual->expr); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| distance = fold_convert (integer_type_node, argse.expr); |
| } |
| else |
| distance = integer_zero_node; |
| |
| if (expr->value.function.actual->next->expr) |
| { |
| gfc_init_se (&argse, NULL); |
| gfc_conv_expr_val (&argse, expr->value.function.actual->next->expr); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| failed = fold_convert (integer_type_node, argse.expr); |
| } |
| else |
| failed = build_int_cst (integer_type_node, -1); |
| tmp = build_call_expr_loc (input_location, gfor_fndecl_caf_num_images, 2, |
| distance, failed); |
| se->expr = fold_convert (gfc_get_int_type (gfc_default_integer_kind), tmp); |
| } |
| |
| |
| static void |
| gfc_conv_intrinsic_rank (gfc_se *se, gfc_expr *expr) |
| { |
| gfc_se argse; |
| |
| gfc_init_se (&argse, NULL); |
| argse.data_not_needed = 1; |
| argse.descriptor_only = 1; |
| |
| gfc_conv_expr_descriptor (&argse, expr->value.function.actual->expr); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| |
| se->expr = gfc_conv_descriptor_rank (argse.expr); |
| se->expr = fold_convert (gfc_get_int_type (gfc_default_integer_kind), |
| se->expr); |
| } |
| |
| |
| static void |
| gfc_conv_intrinsic_is_contiguous (gfc_se * se, gfc_expr * expr) |
| { |
| gfc_expr *arg; |
| arg = expr->value.function.actual->expr; |
| gfc_conv_is_contiguous_expr (se, arg); |
| se->expr = fold_convert (gfc_typenode_for_spec (&expr->ts), se->expr); |
| } |
| |
| /* This function does the work for gfc_conv_intrinsic_is_contiguous, |
| plus it can be called directly. */ |
| |
| void |
| gfc_conv_is_contiguous_expr (gfc_se *se, gfc_expr *arg) |
| { |
| gfc_ss *ss; |
| gfc_se argse; |
| tree desc, tmp, stride, extent, cond; |
| int i; |
| tree fncall0; |
| gfc_array_spec *as; |
| |
| if (arg->ts.type == BT_CLASS) |
| gfc_add_class_array_ref (arg); |
| |
| ss = gfc_walk_expr (arg); |
| gcc_assert (ss != gfc_ss_terminator); |
| gfc_init_se (&argse, NULL); |
| argse.data_not_needed = 1; |
| gfc_conv_expr_descriptor (&argse, arg); |
| |
| as = gfc_get_full_arrayspec_from_expr (arg); |
| |
| /* Create: stride[0] == 1 && stride[1] == extend[0]*stride[0] && ... |
| Note in addition that zero-sized arrays don't count as contiguous. */ |
| |
| if (as && as->type == AS_ASSUMED_RANK) |
| { |
| /* Build the call to is_contiguous0. */ |
| argse.want_pointer = 1; |
| gfc_conv_expr_descriptor (&argse, arg); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| desc = gfc_evaluate_now (argse.expr, &se->pre); |
| fncall0 = build_call_expr_loc (input_location, |
| gfor_fndecl_is_contiguous0, 1, desc); |
| se->expr = fncall0; |
| se->expr = convert (logical_type_node, se->expr); |
| } |
| else |
| { |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| desc = gfc_evaluate_now (argse.expr, &se->pre); |
| |
| stride = gfc_conv_descriptor_stride_get (desc, gfc_rank_cst[0]); |
| cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, |
| stride, build_int_cst (TREE_TYPE (stride), 1)); |
| |
| for (i = 0; i < arg->rank - 1; i++) |
| { |
| tmp = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[i]); |
| extent = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[i]); |
| extent = fold_build2_loc (input_location, MINUS_EXPR, |
| gfc_array_index_type, extent, tmp); |
| extent = fold_build2_loc (input_location, PLUS_EXPR, |
| gfc_array_index_type, extent, |
| gfc_index_one_node); |
| tmp = gfc_conv_descriptor_stride_get (desc, gfc_rank_cst[i]); |
| tmp = fold_build2_loc (input_location, MULT_EXPR, TREE_TYPE (tmp), |
| tmp, extent); |
| stride = gfc_conv_descriptor_stride_get (desc, gfc_rank_cst[i+1]); |
| tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, |
| stride, tmp); |
| cond = fold_build2_loc (input_location, TRUTH_AND_EXPR, |
| boolean_type_node, cond, tmp); |
| } |
| se->expr = cond; |
| } |
| } |
| |
| |
| /* Evaluate a single upper or lower bound. */ |
| /* TODO: bound intrinsic generates way too much unnecessary code. */ |
| |
| static void |
| gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, enum gfc_isym_id op) |
| { |
| gfc_actual_arglist *arg; |
| gfc_actual_arglist *arg2; |
| tree desc; |
| tree type; |
| tree bound; |
| tree tmp; |
| tree cond, cond1; |
| tree ubound; |
| tree lbound; |
| tree size; |
| gfc_se argse; |
| gfc_array_spec * as; |
| bool assumed_rank_lb_one; |
| |
| arg = expr->value.function.actual; |
| arg2 = arg->next; |
| |
| if (se->ss) |
| { |
| /* Create an implicit second parameter from the loop variable. */ |
| gcc_assert (!arg2->expr || op == GFC_ISYM_SHAPE); |
| gcc_assert (se->loop->dimen == 1); |
| gcc_assert (se->ss->info->expr == expr); |
| gfc_advance_se_ss_chain (se); |
| bound = se->loop->loopvar[0]; |
| bound = fold_build2_loc (input_location, MINUS_EXPR, |
| gfc_array_index_type, bound, |
| se->loop->from[0]); |
| } |
| else |
| { |
| /* use the passed argument. */ |
| gcc_assert (arg2->expr); |
| gfc_init_se (&argse, NULL); |
| gfc_conv_expr_type (&argse, arg2->expr, gfc_array_index_type); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| bound = argse.expr; |
| /* Convert from one based to zero based. */ |
| bound = fold_build2_loc (input_location, MINUS_EXPR, |
| gfc_array_index_type, bound, |
| |