| /* Intrinsic translation |
| Copyright (C) 2002-2013 Free Software Foundation, Inc. |
| Contributed by Paul Brook <paul@nowt.org> |
| and Steven Bosscher <s.bosscher@student.tudelft.nl> |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it under |
| the terms of the GNU General Public License as published by the Free |
| Software Foundation; either version 3, or (at your option) any later |
| version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
| WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with GCC; see the file COPYING3. If not see |
| <http://www.gnu.org/licenses/>. */ |
| |
| /* trans-intrinsic.c-- generate GENERIC trees for calls to intrinsics. */ |
| |
| #include "config.h" |
| #include "system.h" |
| #include "coretypes.h" |
| #include "tm.h" /* For UNITS_PER_WORD. */ |
| #include "tree.h" |
| #include "ggc.h" |
| #include "diagnostic-core.h" /* For internal_error. */ |
| #include "toplev.h" /* For rest_of_decl_compilation. */ |
| #include "flags.h" |
| #include "gfortran.h" |
| #include "arith.h" |
| #include "intrinsic.h" |
| #include "trans.h" |
| #include "trans-const.h" |
| #include "trans-types.h" |
| #include "trans-array.h" |
| /* Only for gfc_trans_assign and gfc_trans_pointer_assign. */ |
| #include "trans-stmt.h" |
| |
| /* This maps Fortran intrinsic math functions to external library or GCC |
| builtin functions. */ |
| typedef struct GTY(()) gfc_intrinsic_map_t { |
| /* The explicit enum is required to work around inadequacies in the |
| garbage collection/gengtype parsing mechanism. */ |
| enum gfc_isym_id id; |
| |
| /* Enum value from the "language-independent", aka C-centric, part |
| of gcc, or END_BUILTINS of no such value set. */ |
| enum built_in_function float_built_in; |
| enum built_in_function double_built_in; |
| enum built_in_function long_double_built_in; |
| enum built_in_function complex_float_built_in; |
| enum built_in_function complex_double_built_in; |
| enum built_in_function complex_long_double_built_in; |
| |
| /* True if the naming pattern is to prepend "c" for complex and |
| append "f" for kind=4. False if the naming pattern is to |
| prepend "_gfortran_" and append "[rc](4|8|10|16)". */ |
| bool libm_name; |
| |
| /* True if a complex version of the function exists. */ |
| bool complex_available; |
| |
| /* True if the function should be marked const. */ |
| bool is_constant; |
| |
| /* The base library name of this function. */ |
| const char *name; |
| |
| /* Cache decls created for the various operand types. */ |
| tree real4_decl; |
| tree real8_decl; |
| tree real10_decl; |
| tree real16_decl; |
| tree complex4_decl; |
| tree complex8_decl; |
| tree complex10_decl; |
| tree complex16_decl; |
| } |
| gfc_intrinsic_map_t; |
| |
| /* ??? The NARGS==1 hack here is based on the fact that (c99 at least) |
| defines complex variants of all of the entries in mathbuiltins.def |
| except for atan2. */ |
| #define DEFINE_MATH_BUILTIN(ID, NAME, ARGTYPE) \ |
| { GFC_ISYM_ ## ID, BUILT_IN_ ## ID ## F, BUILT_IN_ ## ID, \ |
| BUILT_IN_ ## ID ## L, END_BUILTINS, END_BUILTINS, END_BUILTINS, \ |
| true, false, true, NAME, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, \ |
| NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE}, |
| |
| #define DEFINE_MATH_BUILTIN_C(ID, NAME, ARGTYPE) \ |
| { GFC_ISYM_ ## ID, BUILT_IN_ ## ID ## F, BUILT_IN_ ## ID, \ |
| BUILT_IN_ ## ID ## L, BUILT_IN_C ## ID ## F, BUILT_IN_C ## ID, \ |
| BUILT_IN_C ## ID ## L, true, true, true, NAME, NULL_TREE, NULL_TREE, \ |
| NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE}, |
| |
| #define LIB_FUNCTION(ID, NAME, HAVE_COMPLEX) \ |
| { GFC_ISYM_ ## ID, END_BUILTINS, END_BUILTINS, END_BUILTINS, \ |
| END_BUILTINS, END_BUILTINS, END_BUILTINS, \ |
| false, HAVE_COMPLEX, true, NAME, NULL_TREE, NULL_TREE, NULL_TREE, \ |
| NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE } |
| |
| #define OTHER_BUILTIN(ID, NAME, TYPE, CONST) \ |
| { GFC_ISYM_NONE, BUILT_IN_ ## ID ## F, BUILT_IN_ ## ID, \ |
| BUILT_IN_ ## ID ## L, END_BUILTINS, END_BUILTINS, END_BUILTINS, \ |
| true, false, CONST, NAME, NULL_TREE, NULL_TREE, \ |
| NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE}, |
| |
| static GTY(()) gfc_intrinsic_map_t gfc_intrinsic_map[] = |
| { |
| /* Functions built into gcc itself (DEFINE_MATH_BUILTIN and |
| DEFINE_MATH_BUILTIN_C), then the built-ins that don't correspond |
| to any GFC_ISYM id directly, which use the OTHER_BUILTIN macro. */ |
| #include "mathbuiltins.def" |
| |
| /* Functions in libgfortran. */ |
| LIB_FUNCTION (ERFC_SCALED, "erfc_scaled", false), |
| |
| /* End the list. */ |
| LIB_FUNCTION (NONE, NULL, false) |
| |
| }; |
| #undef OTHER_BUILTIN |
| #undef LIB_FUNCTION |
| #undef DEFINE_MATH_BUILTIN |
| #undef DEFINE_MATH_BUILTIN_C |
| |
| |
| enum rounding_mode { RND_ROUND, RND_TRUNC, RND_CEIL, RND_FLOOR }; |
| |
| |
| /* Find the correct variant of a given builtin from its argument. */ |
| static tree |
| builtin_decl_for_precision (enum built_in_function base_built_in, |
| int precision) |
| { |
| enum built_in_function i = END_BUILTINS; |
| |
| gfc_intrinsic_map_t *m; |
| for (m = gfc_intrinsic_map; m->double_built_in != base_built_in ; m++) |
| ; |
| |
| if (precision == TYPE_PRECISION (float_type_node)) |
| i = m->float_built_in; |
| else if (precision == TYPE_PRECISION (double_type_node)) |
| i = m->double_built_in; |
| else if (precision == TYPE_PRECISION (long_double_type_node)) |
| i = m->long_double_built_in; |
| else if (precision == TYPE_PRECISION (float128_type_node)) |
| { |
| /* Special treatment, because it is not exactly a built-in, but |
| a library function. */ |
| return m->real16_decl; |
| } |
| |
| return (i == END_BUILTINS ? NULL_TREE : builtin_decl_explicit (i)); |
| } |
| |
| |
| tree |
| gfc_builtin_decl_for_float_kind (enum built_in_function double_built_in, |
| int kind) |
| { |
| int i = gfc_validate_kind (BT_REAL, kind, false); |
| |
| if (gfc_real_kinds[i].c_float128) |
| { |
| /* For __float128, the story is a bit different, because we return |
| a decl to a library function rather than a built-in. */ |
| gfc_intrinsic_map_t *m; |
| for (m = gfc_intrinsic_map; m->double_built_in != double_built_in ; m++) |
| ; |
| |
| return m->real16_decl; |
| } |
| |
| return builtin_decl_for_precision (double_built_in, |
| gfc_real_kinds[i].mode_precision); |
| } |
| |
| |
| /* Evaluate the arguments to an intrinsic function. The value |
| of NARGS may be less than the actual number of arguments in EXPR |
| to allow optional "KIND" arguments that are not included in the |
| generated code to be ignored. */ |
| |
| static void |
| gfc_conv_intrinsic_function_args (gfc_se *se, gfc_expr *expr, |
| tree *argarray, int nargs) |
| { |
| gfc_actual_arglist *actual; |
| gfc_expr *e; |
| gfc_intrinsic_arg *formal; |
| gfc_se argse; |
| int curr_arg; |
| |
| formal = expr->value.function.isym->formal; |
| actual = expr->value.function.actual; |
| |
| for (curr_arg = 0; curr_arg < nargs; curr_arg++, |
| actual = actual->next, |
| formal = formal ? formal->next : NULL) |
| { |
| gcc_assert (actual); |
| e = actual->expr; |
| /* Skip omitted optional arguments. */ |
| if (!e) |
| { |
| --curr_arg; |
| continue; |
| } |
| |
| /* Evaluate the parameter. This will substitute scalarized |
| references automatically. */ |
| gfc_init_se (&argse, se); |
| |
| if (e->ts.type == BT_CHARACTER) |
| { |
| gfc_conv_expr (&argse, e); |
| gfc_conv_string_parameter (&argse); |
| argarray[curr_arg++] = argse.string_length; |
| gcc_assert (curr_arg < nargs); |
| } |
| else |
| gfc_conv_expr_val (&argse, e); |
| |
| /* If an optional argument is itself an optional dummy argument, |
| check its presence and substitute a null if absent. */ |
| if (e->expr_type == EXPR_VARIABLE |
| && e->symtree->n.sym->attr.optional |
| && formal |
| && formal->optional) |
| gfc_conv_missing_dummy (&argse, e, formal->ts, 0); |
| |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| argarray[curr_arg] = argse.expr; |
| } |
| } |
| |
| /* Count the number of actual arguments to the intrinsic function EXPR |
| including any "hidden" string length arguments. */ |
| |
| static unsigned int |
| gfc_intrinsic_argument_list_length (gfc_expr *expr) |
| { |
| int n = 0; |
| gfc_actual_arglist *actual; |
| |
| for (actual = expr->value.function.actual; actual; actual = actual->next) |
| { |
| if (!actual->expr) |
| continue; |
| |
| if (actual->expr->ts.type == BT_CHARACTER) |
| n += 2; |
| else |
| n++; |
| } |
| |
| return n; |
| } |
| |
| |
| /* Conversions between different types are output by the frontend as |
| intrinsic functions. We implement these directly with inline code. */ |
| |
| static void |
| gfc_conv_intrinsic_conversion (gfc_se * se, gfc_expr * expr) |
| { |
| tree type; |
| tree *args; |
| int nargs; |
| |
| nargs = gfc_intrinsic_argument_list_length (expr); |
| args = XALLOCAVEC (tree, nargs); |
| |
| /* Evaluate all the arguments passed. Whilst we're only interested in the |
| first one here, there are other parts of the front-end that assume this |
| and will trigger an ICE if it's not the case. */ |
| type = gfc_typenode_for_spec (&expr->ts); |
| gcc_assert (expr->value.function.actual->expr); |
| gfc_conv_intrinsic_function_args (se, expr, args, nargs); |
| |
| /* Conversion between character kinds involves a call to a library |
| function. */ |
| if (expr->ts.type == BT_CHARACTER) |
| { |
| tree fndecl, var, addr, tmp; |
| |
| if (expr->ts.kind == 1 |
| && expr->value.function.actual->expr->ts.kind == 4) |
| fndecl = gfor_fndecl_convert_char4_to_char1; |
| else if (expr->ts.kind == 4 |
| && expr->value.function.actual->expr->ts.kind == 1) |
| fndecl = gfor_fndecl_convert_char1_to_char4; |
| else |
| gcc_unreachable (); |
| |
| /* Create the variable storing the converted value. */ |
| type = gfc_get_pchar_type (expr->ts.kind); |
| var = gfc_create_var (type, "str"); |
| addr = gfc_build_addr_expr (build_pointer_type (type), var); |
| |
| /* Call the library function that will perform the conversion. */ |
| gcc_assert (nargs >= 2); |
| tmp = build_call_expr_loc (input_location, |
| fndecl, 3, addr, args[0], args[1]); |
| gfc_add_expr_to_block (&se->pre, tmp); |
| |
| /* Free the temporary afterwards. */ |
| tmp = gfc_call_free (var); |
| gfc_add_expr_to_block (&se->post, tmp); |
| |
| se->expr = var; |
| se->string_length = args[0]; |
| |
| return; |
| } |
| |
| /* Conversion from complex to non-complex involves taking the real |
| component of the value. */ |
| if (TREE_CODE (TREE_TYPE (args[0])) == COMPLEX_TYPE |
| && expr->ts.type != BT_COMPLEX) |
| { |
| tree artype; |
| |
| artype = TREE_TYPE (TREE_TYPE (args[0])); |
| args[0] = fold_build1_loc (input_location, REALPART_EXPR, artype, |
| args[0]); |
| } |
| |
| se->expr = convert (type, args[0]); |
| } |
| |
| /* This is needed because the gcc backend only implements |
| FIX_TRUNC_EXPR, which is the same as INT() in Fortran. |
| FLOOR(x) = INT(x) <= x ? INT(x) : INT(x) - 1 |
| Similarly for CEILING. */ |
| |
| static tree |
| build_fixbound_expr (stmtblock_t * pblock, tree arg, tree type, int up) |
| { |
| tree tmp; |
| tree cond; |
| tree argtype; |
| tree intval; |
| |
| argtype = TREE_TYPE (arg); |
| arg = gfc_evaluate_now (arg, pblock); |
| |
| intval = convert (type, arg); |
| intval = gfc_evaluate_now (intval, pblock); |
| |
| tmp = convert (argtype, intval); |
| cond = fold_build2_loc (input_location, up ? GE_EXPR : LE_EXPR, |
| boolean_type_node, tmp, arg); |
| |
| tmp = fold_build2_loc (input_location, up ? PLUS_EXPR : MINUS_EXPR, type, |
| intval, build_int_cst (type, 1)); |
| tmp = fold_build3_loc (input_location, COND_EXPR, type, cond, intval, tmp); |
| return tmp; |
| } |
| |
| |
| /* Round to nearest integer, away from zero. */ |
| |
| static tree |
| build_round_expr (tree arg, tree restype) |
| { |
| tree argtype; |
| tree fn; |
| int argprec, resprec; |
| |
| argtype = TREE_TYPE (arg); |
| argprec = TYPE_PRECISION (argtype); |
| resprec = TYPE_PRECISION (restype); |
| |
| /* Depending on the type of the result, choose the int intrinsic |
| (iround, available only as a builtin, therefore cannot use it for |
| __float128), long int intrinsic (lround family) or long long |
| intrinsic (llround). We might also need to convert the result |
| afterwards. */ |
| if (resprec <= INT_TYPE_SIZE && argprec <= LONG_DOUBLE_TYPE_SIZE) |
| fn = builtin_decl_for_precision (BUILT_IN_IROUND, argprec); |
| else if (resprec <= LONG_TYPE_SIZE) |
| fn = builtin_decl_for_precision (BUILT_IN_LROUND, argprec); |
| else if (resprec <= LONG_LONG_TYPE_SIZE) |
| fn = builtin_decl_for_precision (BUILT_IN_LLROUND, argprec); |
| else |
| gcc_unreachable (); |
| |
| return fold_convert (restype, build_call_expr_loc (input_location, |
| fn, 1, arg)); |
| } |
| |
| |
| /* Convert a real to an integer using a specific rounding mode. |
| Ideally we would just build the corresponding GENERIC node, |
| however the RTL expander only actually supports FIX_TRUNC_EXPR. */ |
| |
| static tree |
| build_fix_expr (stmtblock_t * pblock, tree arg, tree type, |
| enum rounding_mode op) |
| { |
| switch (op) |
| { |
| case RND_FLOOR: |
| return build_fixbound_expr (pblock, arg, type, 0); |
| break; |
| |
| case RND_CEIL: |
| return build_fixbound_expr (pblock, arg, type, 1); |
| break; |
| |
| case RND_ROUND: |
| return build_round_expr (arg, type); |
| break; |
| |
| case RND_TRUNC: |
| return fold_build1_loc (input_location, FIX_TRUNC_EXPR, type, arg); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| |
| /* Round a real value using the specified rounding mode. |
| We use a temporary integer of that same kind size as the result. |
| Values larger than those that can be represented by this kind are |
| unchanged, as they will not be accurate enough to represent the |
| rounding. |
| huge = HUGE (KIND (a)) |
| aint (a) = ((a > huge) || (a < -huge)) ? a : (real)(int)a |
| */ |
| |
| static void |
| gfc_conv_intrinsic_aint (gfc_se * se, gfc_expr * expr, enum rounding_mode op) |
| { |
| tree type; |
| tree itype; |
| tree arg[2]; |
| tree tmp; |
| tree cond; |
| tree decl; |
| mpfr_t huge; |
| int n, nargs; |
| int kind; |
| |
| kind = expr->ts.kind; |
| nargs = gfc_intrinsic_argument_list_length (expr); |
| |
| decl = NULL_TREE; |
| /* We have builtin functions for some cases. */ |
| switch (op) |
| { |
| case RND_ROUND: |
| decl = gfc_builtin_decl_for_float_kind (BUILT_IN_ROUND, kind); |
| break; |
| |
| case RND_TRUNC: |
| decl = gfc_builtin_decl_for_float_kind (BUILT_IN_TRUNC, kind); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| |
| /* Evaluate the argument. */ |
| gcc_assert (expr->value.function.actual->expr); |
| gfc_conv_intrinsic_function_args (se, expr, arg, nargs); |
| |
| /* Use a builtin function if one exists. */ |
| if (decl != NULL_TREE) |
| { |
| se->expr = build_call_expr_loc (input_location, decl, 1, arg[0]); |
| return; |
| } |
| |
| /* This code is probably redundant, but we'll keep it lying around just |
| in case. */ |
| type = gfc_typenode_for_spec (&expr->ts); |
| arg[0] = gfc_evaluate_now (arg[0], &se->pre); |
| |
| /* Test if the value is too large to handle sensibly. */ |
| gfc_set_model_kind (kind); |
| mpfr_init (huge); |
| n = gfc_validate_kind (BT_INTEGER, kind, false); |
| mpfr_set_z (huge, gfc_integer_kinds[n].huge, GFC_RND_MODE); |
| tmp = gfc_conv_mpfr_to_tree (huge, kind, 0); |
| cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, arg[0], |
| tmp); |
| |
| mpfr_neg (huge, huge, GFC_RND_MODE); |
| tmp = gfc_conv_mpfr_to_tree (huge, kind, 0); |
| tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, arg[0], |
| tmp); |
| cond = fold_build2_loc (input_location, TRUTH_AND_EXPR, boolean_type_node, |
| cond, tmp); |
| itype = gfc_get_int_type (kind); |
| |
| tmp = build_fix_expr (&se->pre, arg[0], itype, op); |
| tmp = convert (type, tmp); |
| se->expr = fold_build3_loc (input_location, COND_EXPR, type, cond, tmp, |
| arg[0]); |
| mpfr_clear (huge); |
| } |
| |
| |
| /* Convert to an integer using the specified rounding mode. */ |
| |
| static void |
| gfc_conv_intrinsic_int (gfc_se * se, gfc_expr * expr, enum rounding_mode op) |
| { |
| tree type; |
| tree *args; |
| int nargs; |
| |
| nargs = gfc_intrinsic_argument_list_length (expr); |
| args = XALLOCAVEC (tree, nargs); |
| |
| /* Evaluate the argument, we process all arguments even though we only |
| use the first one for code generation purposes. */ |
| type = gfc_typenode_for_spec (&expr->ts); |
| gcc_assert (expr->value.function.actual->expr); |
| gfc_conv_intrinsic_function_args (se, expr, args, nargs); |
| |
| if (TREE_CODE (TREE_TYPE (args[0])) == INTEGER_TYPE) |
| { |
| /* Conversion to a different integer kind. */ |
| se->expr = convert (type, args[0]); |
| } |
| else |
| { |
| /* Conversion from complex to non-complex involves taking the real |
| component of the value. */ |
| if (TREE_CODE (TREE_TYPE (args[0])) == COMPLEX_TYPE |
| && expr->ts.type != BT_COMPLEX) |
| { |
| tree artype; |
| |
| artype = TREE_TYPE (TREE_TYPE (args[0])); |
| args[0] = fold_build1_loc (input_location, REALPART_EXPR, artype, |
| args[0]); |
| } |
| |
| se->expr = build_fix_expr (&se->pre, args[0], type, op); |
| } |
| } |
| |
| |
| /* Get the imaginary component of a value. */ |
| |
| static void |
| gfc_conv_intrinsic_imagpart (gfc_se * se, gfc_expr * expr) |
| { |
| tree arg; |
| |
| gfc_conv_intrinsic_function_args (se, expr, &arg, 1); |
| se->expr = fold_build1_loc (input_location, IMAGPART_EXPR, |
| TREE_TYPE (TREE_TYPE (arg)), arg); |
| } |
| |
| |
| /* Get the complex conjugate of a value. */ |
| |
| static void |
| gfc_conv_intrinsic_conjg (gfc_se * se, gfc_expr * expr) |
| { |
| tree arg; |
| |
| gfc_conv_intrinsic_function_args (se, expr, &arg, 1); |
| se->expr = fold_build1_loc (input_location, CONJ_EXPR, TREE_TYPE (arg), arg); |
| } |
| |
| |
| |
| static tree |
| define_quad_builtin (const char *name, tree type, bool is_const) |
| { |
| tree fndecl; |
| fndecl = build_decl (input_location, FUNCTION_DECL, get_identifier (name), |
| type); |
| |
| /* Mark the decl as external. */ |
| DECL_EXTERNAL (fndecl) = 1; |
| TREE_PUBLIC (fndecl) = 1; |
| |
| /* Mark it __attribute__((const)). */ |
| TREE_READONLY (fndecl) = is_const; |
| |
| rest_of_decl_compilation (fndecl, 1, 0); |
| |
| return fndecl; |
| } |
| |
| |
| |
| /* Initialize function decls for library functions. The external functions |
| are created as required. Builtin functions are added here. */ |
| |
| void |
| gfc_build_intrinsic_lib_fndecls (void) |
| { |
| gfc_intrinsic_map_t *m; |
| tree quad_decls[END_BUILTINS + 1]; |
| |
| if (gfc_real16_is_float128) |
| { |
| /* If we have soft-float types, we create the decls for their |
| C99-like library functions. For now, we only handle __float128 |
| q-suffixed functions. */ |
| |
| tree type, complex_type, func_1, func_2, func_cabs, func_frexp; |
| tree func_iround, func_lround, func_llround, func_scalbn, func_cpow; |
| |
| memset (quad_decls, 0, sizeof(tree) * (END_BUILTINS + 1)); |
| |
| type = float128_type_node; |
| complex_type = complex_float128_type_node; |
| /* type (*) (type) */ |
| func_1 = build_function_type_list (type, type, NULL_TREE); |
| /* int (*) (type) */ |
| func_iround = build_function_type_list (integer_type_node, |
| type, NULL_TREE); |
| /* long (*) (type) */ |
| func_lround = build_function_type_list (long_integer_type_node, |
| type, NULL_TREE); |
| /* long long (*) (type) */ |
| func_llround = build_function_type_list (long_long_integer_type_node, |
| type, NULL_TREE); |
| /* type (*) (type, type) */ |
| func_2 = build_function_type_list (type, type, type, NULL_TREE); |
| /* type (*) (type, &int) */ |
| func_frexp |
| = build_function_type_list (type, |
| type, |
| build_pointer_type (integer_type_node), |
| NULL_TREE); |
| /* type (*) (type, int) */ |
| func_scalbn = build_function_type_list (type, |
| type, integer_type_node, NULL_TREE); |
| /* type (*) (complex type) */ |
| func_cabs = build_function_type_list (type, complex_type, NULL_TREE); |
| /* complex type (*) (complex type, complex type) */ |
| func_cpow |
| = build_function_type_list (complex_type, |
| complex_type, complex_type, NULL_TREE); |
| |
| #define DEFINE_MATH_BUILTIN(ID, NAME, ARGTYPE) |
| #define DEFINE_MATH_BUILTIN_C(ID, NAME, ARGTYPE) |
| #define LIB_FUNCTION(ID, NAME, HAVE_COMPLEX) |
| |
| /* Only these built-ins are actually needed here. These are used directly |
| from the code, when calling builtin_decl_for_precision() or |
| builtin_decl_for_float_type(). The others are all constructed by |
| gfc_get_intrinsic_lib_fndecl(). */ |
| #define OTHER_BUILTIN(ID, NAME, TYPE, CONST) \ |
| quad_decls[BUILT_IN_ ## ID] = define_quad_builtin (NAME "q", func_ ## TYPE, CONST); |
| |
| #include "mathbuiltins.def" |
| |
| #undef OTHER_BUILTIN |
| #undef LIB_FUNCTION |
| #undef DEFINE_MATH_BUILTIN |
| #undef DEFINE_MATH_BUILTIN_C |
| |
| } |
| |
| /* Add GCC builtin functions. */ |
| for (m = gfc_intrinsic_map; |
| m->id != GFC_ISYM_NONE || m->double_built_in != END_BUILTINS; m++) |
| { |
| if (m->float_built_in != END_BUILTINS) |
| m->real4_decl = builtin_decl_explicit (m->float_built_in); |
| if (m->complex_float_built_in != END_BUILTINS) |
| m->complex4_decl = builtin_decl_explicit (m->complex_float_built_in); |
| if (m->double_built_in != END_BUILTINS) |
| m->real8_decl = builtin_decl_explicit (m->double_built_in); |
| if (m->complex_double_built_in != END_BUILTINS) |
| m->complex8_decl = builtin_decl_explicit (m->complex_double_built_in); |
| |
| /* If real(kind=10) exists, it is always long double. */ |
| if (m->long_double_built_in != END_BUILTINS) |
| m->real10_decl = builtin_decl_explicit (m->long_double_built_in); |
| if (m->complex_long_double_built_in != END_BUILTINS) |
| m->complex10_decl |
| = builtin_decl_explicit (m->complex_long_double_built_in); |
| |
| if (!gfc_real16_is_float128) |
| { |
| if (m->long_double_built_in != END_BUILTINS) |
| m->real16_decl = builtin_decl_explicit (m->long_double_built_in); |
| if (m->complex_long_double_built_in != END_BUILTINS) |
| m->complex16_decl |
| = builtin_decl_explicit (m->complex_long_double_built_in); |
| } |
| else if (quad_decls[m->double_built_in] != NULL_TREE) |
| { |
| /* Quad-precision function calls are constructed when first |
| needed by builtin_decl_for_precision(), except for those |
| that will be used directly (define by OTHER_BUILTIN). */ |
| m->real16_decl = quad_decls[m->double_built_in]; |
| } |
| else if (quad_decls[m->complex_double_built_in] != NULL_TREE) |
| { |
| /* Same thing for the complex ones. */ |
| m->complex16_decl = quad_decls[m->double_built_in]; |
| } |
| } |
| } |
| |
| |
| /* Create a fndecl for a simple intrinsic library function. */ |
| |
| static tree |
| gfc_get_intrinsic_lib_fndecl (gfc_intrinsic_map_t * m, gfc_expr * expr) |
| { |
| tree type; |
| vec<tree, va_gc> *argtypes; |
| tree fndecl; |
| gfc_actual_arglist *actual; |
| tree *pdecl; |
| gfc_typespec *ts; |
| char name[GFC_MAX_SYMBOL_LEN + 3]; |
| |
| ts = &expr->ts; |
| if (ts->type == BT_REAL) |
| { |
| switch (ts->kind) |
| { |
| case 4: |
| pdecl = &m->real4_decl; |
| break; |
| case 8: |
| pdecl = &m->real8_decl; |
| break; |
| case 10: |
| pdecl = &m->real10_decl; |
| break; |
| case 16: |
| pdecl = &m->real16_decl; |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| } |
| else if (ts->type == BT_COMPLEX) |
| { |
| gcc_assert (m->complex_available); |
| |
| switch (ts->kind) |
| { |
| case 4: |
| pdecl = &m->complex4_decl; |
| break; |
| case 8: |
| pdecl = &m->complex8_decl; |
| break; |
| case 10: |
| pdecl = &m->complex10_decl; |
| break; |
| case 16: |
| pdecl = &m->complex16_decl; |
| break; |
| default: |
| gcc_unreachable (); |
| } |
| } |
| else |
| gcc_unreachable (); |
| |
| if (*pdecl) |
| return *pdecl; |
| |
| if (m->libm_name) |
| { |
| int n = gfc_validate_kind (BT_REAL, ts->kind, false); |
| if (gfc_real_kinds[n].c_float) |
| snprintf (name, sizeof (name), "%s%s%s", |
| ts->type == BT_COMPLEX ? "c" : "", m->name, "f"); |
| else if (gfc_real_kinds[n].c_double) |
| snprintf (name, sizeof (name), "%s%s", |
| ts->type == BT_COMPLEX ? "c" : "", m->name); |
| else if (gfc_real_kinds[n].c_long_double) |
| snprintf (name, sizeof (name), "%s%s%s", |
| ts->type == BT_COMPLEX ? "c" : "", m->name, "l"); |
| else if (gfc_real_kinds[n].c_float128) |
| snprintf (name, sizeof (name), "%s%s%s", |
| ts->type == BT_COMPLEX ? "c" : "", m->name, "q"); |
| else |
| gcc_unreachable (); |
| } |
| else |
| { |
| snprintf (name, sizeof (name), PREFIX ("%s_%c%d"), m->name, |
| ts->type == BT_COMPLEX ? 'c' : 'r', |
| ts->kind); |
| } |
| |
| argtypes = NULL; |
| for (actual = expr->value.function.actual; actual; actual = actual->next) |
| { |
| type = gfc_typenode_for_spec (&actual->expr->ts); |
| vec_safe_push (argtypes, type); |
| } |
| type = build_function_type_vec (gfc_typenode_for_spec (ts), argtypes); |
| fndecl = build_decl (input_location, |
| FUNCTION_DECL, get_identifier (name), type); |
| |
| /* Mark the decl as external. */ |
| DECL_EXTERNAL (fndecl) = 1; |
| TREE_PUBLIC (fndecl) = 1; |
| |
| /* Mark it __attribute__((const)), if possible. */ |
| TREE_READONLY (fndecl) = m->is_constant; |
| |
| rest_of_decl_compilation (fndecl, 1, 0); |
| |
| (*pdecl) = fndecl; |
| return fndecl; |
| } |
| |
| |
| /* Convert an intrinsic function into an external or builtin call. */ |
| |
| static void |
| gfc_conv_intrinsic_lib_function (gfc_se * se, gfc_expr * expr) |
| { |
| gfc_intrinsic_map_t *m; |
| tree fndecl; |
| tree rettype; |
| tree *args; |
| unsigned int num_args; |
| gfc_isym_id id; |
| |
| id = expr->value.function.isym->id; |
| /* Find the entry for this function. */ |
| for (m = gfc_intrinsic_map; |
| m->id != GFC_ISYM_NONE || m->double_built_in != END_BUILTINS; m++) |
| { |
| if (id == m->id) |
| break; |
| } |
| |
| if (m->id == GFC_ISYM_NONE) |
| { |
| internal_error ("Intrinsic function %s(%d) not recognized", |
| expr->value.function.name, id); |
| } |
| |
| /* Get the decl and generate the call. */ |
| num_args = gfc_intrinsic_argument_list_length (expr); |
| args = XALLOCAVEC (tree, num_args); |
| |
| gfc_conv_intrinsic_function_args (se, expr, args, num_args); |
| fndecl = gfc_get_intrinsic_lib_fndecl (m, expr); |
| rettype = TREE_TYPE (TREE_TYPE (fndecl)); |
| |
| fndecl = build_addr (fndecl, current_function_decl); |
| se->expr = build_call_array_loc (input_location, rettype, fndecl, num_args, args); |
| } |
| |
| |
| /* If bounds-checking is enabled, create code to verify at runtime that the |
| string lengths for both expressions are the same (needed for e.g. MERGE). |
| If bounds-checking is not enabled, does nothing. */ |
| |
| void |
| gfc_trans_same_strlen_check (const char* intr_name, locus* where, |
| tree a, tree b, stmtblock_t* target) |
| { |
| tree cond; |
| tree name; |
| |
| /* If bounds-checking is disabled, do nothing. */ |
| if (!(gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)) |
| return; |
| |
| /* Compare the two string lengths. */ |
| cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, a, b); |
| |
| /* Output the runtime-check. */ |
| name = gfc_build_cstring_const (intr_name); |
| name = gfc_build_addr_expr (pchar_type_node, name); |
| gfc_trans_runtime_check (true, false, cond, target, where, |
| "Unequal character lengths (%ld/%ld) in %s", |
| fold_convert (long_integer_type_node, a), |
| fold_convert (long_integer_type_node, b), name); |
| } |
| |
| |
| /* The EXPONENT(s) intrinsic function is translated into |
| int ret; |
| frexp (s, &ret); |
| return ret; |
| */ |
| |
| static void |
| gfc_conv_intrinsic_exponent (gfc_se *se, gfc_expr *expr) |
| { |
| tree arg, type, res, tmp, frexp; |
| |
| frexp = gfc_builtin_decl_for_float_kind (BUILT_IN_FREXP, |
| expr->value.function.actual->expr->ts.kind); |
| |
| gfc_conv_intrinsic_function_args (se, expr, &arg, 1); |
| |
| res = gfc_create_var (integer_type_node, NULL); |
| tmp = build_call_expr_loc (input_location, frexp, 2, arg, |
| gfc_build_addr_expr (NULL_TREE, res)); |
| gfc_add_expr_to_block (&se->pre, tmp); |
| |
| type = gfc_typenode_for_spec (&expr->ts); |
| se->expr = fold_convert (type, res); |
| } |
| |
| |
| static void |
| trans_this_image (gfc_se * se, gfc_expr *expr) |
| { |
| stmtblock_t loop; |
| tree type, desc, dim_arg, cond, tmp, m, loop_var, exit_label, min_var, |
| lbound, ubound, extent, ml; |
| gfc_se argse; |
| int rank, corank; |
| |
| /* The case -fcoarray=single is handled elsewhere. */ |
| gcc_assert (gfc_option.coarray != GFC_FCOARRAY_SINGLE); |
| |
| gfc_init_coarray_decl (false); |
| |
| /* Argument-free version: THIS_IMAGE(). */ |
| if (expr->value.function.actual->expr == NULL) |
| { |
| se->expr = fold_convert (gfc_get_int_type (gfc_default_integer_kind), |
| gfort_gvar_caf_this_image); |
| return; |
| } |
| |
| /* Coarray-argument version: THIS_IMAGE(coarray [, dim]). */ |
| |
| type = gfc_get_int_type (gfc_default_integer_kind); |
| corank = gfc_get_corank (expr->value.function.actual->expr); |
| rank = expr->value.function.actual->expr->rank; |
| |
| /* Obtain the descriptor of the COARRAY. */ |
| gfc_init_se (&argse, NULL); |
| argse.want_coarray = 1; |
| gfc_conv_expr_descriptor (&argse, expr->value.function.actual->expr); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| desc = argse.expr; |
| |
| if (se->ss) |
| { |
| /* Create an implicit second parameter from the loop variable. */ |
| gcc_assert (!expr->value.function.actual->next->expr); |
| gcc_assert (corank > 0); |
| gcc_assert (se->loop->dimen == 1); |
| gcc_assert (se->ss->info->expr == expr); |
| |
| dim_arg = se->loop->loopvar[0]; |
| dim_arg = fold_build2_loc (input_location, PLUS_EXPR, |
| gfc_array_index_type, dim_arg, |
| build_int_cst (TREE_TYPE (dim_arg), 1)); |
| gfc_advance_se_ss_chain (se); |
| } |
| else |
| { |
| /* Use the passed DIM= argument. */ |
| gcc_assert (expr->value.function.actual->next->expr); |
| gfc_init_se (&argse, NULL); |
| gfc_conv_expr_type (&argse, expr->value.function.actual->next->expr, |
| gfc_array_index_type); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| dim_arg = argse.expr; |
| |
| if (INTEGER_CST_P (dim_arg)) |
| { |
| int hi, co_dim; |
| |
| hi = TREE_INT_CST_HIGH (dim_arg); |
| co_dim = TREE_INT_CST_LOW (dim_arg); |
| if (hi || co_dim < 1 |
| || co_dim > GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))) |
| gfc_error ("'dim' argument of %s intrinsic at %L is not a valid " |
| "dimension index", expr->value.function.isym->name, |
| &expr->where); |
| } |
| else if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS) |
| { |
| dim_arg = gfc_evaluate_now (dim_arg, &se->pre); |
| cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, |
| dim_arg, |
| build_int_cst (TREE_TYPE (dim_arg), 1)); |
| tmp = gfc_rank_cst[GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))]; |
| tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, |
| dim_arg, tmp); |
| cond = fold_build2_loc (input_location, TRUTH_ORIF_EXPR, |
| boolean_type_node, cond, tmp); |
| gfc_trans_runtime_check (true, false, cond, &se->pre, &expr->where, |
| gfc_msg_fault); |
| } |
| } |
| |
| /* Used algorithm; cf. Fortran 2008, C.10. Note, due to the scalarizer, |
| one always has a dim_arg argument. |
| |
| m = this_image() - 1 |
| if (corank == 1) |
| { |
| sub(1) = m + lcobound(corank) |
| return; |
| } |
| i = rank |
| min_var = min (rank + corank - 2, rank + dim_arg - 1) |
| for (;;) |
| { |
| extent = gfc_extent(i) |
| ml = m |
| m = m/extent |
| if (i >= min_var) |
| goto exit_label |
| i++ |
| } |
| exit_label: |
| sub(dim_arg) = (dim_arg < corank) ? ml - m*extent + lcobound(dim_arg) |
| : m + lcobound(corank) |
| */ |
| |
| /* this_image () - 1. */ |
| tmp = fold_convert (type, gfort_gvar_caf_this_image); |
| tmp = fold_build2_loc (input_location, MINUS_EXPR, type, tmp, |
| build_int_cst (type, 1)); |
| if (corank == 1) |
| { |
| /* sub(1) = m + lcobound(corank). */ |
| lbound = gfc_conv_descriptor_lbound_get (desc, |
| build_int_cst (TREE_TYPE (gfc_array_index_type), |
| corank+rank-1)); |
| lbound = fold_convert (type, lbound); |
| tmp = fold_build2_loc (input_location, PLUS_EXPR, type, tmp, lbound); |
| |
| se->expr = tmp; |
| return; |
| } |
| |
| m = gfc_create_var (type, NULL); |
| ml = gfc_create_var (type, NULL); |
| loop_var = gfc_create_var (integer_type_node, NULL); |
| min_var = gfc_create_var (integer_type_node, NULL); |
| |
| /* m = this_image () - 1. */ |
| gfc_add_modify (&se->pre, m, tmp); |
| |
| /* min_var = min (rank + corank-2, rank + dim_arg - 1). */ |
| tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node, |
| fold_convert (integer_type_node, dim_arg), |
| build_int_cst (integer_type_node, rank - 1)); |
| tmp = fold_build2_loc (input_location, MIN_EXPR, integer_type_node, |
| build_int_cst (integer_type_node, rank + corank - 2), |
| tmp); |
| gfc_add_modify (&se->pre, min_var, tmp); |
| |
| /* i = rank. */ |
| tmp = build_int_cst (integer_type_node, rank); |
| gfc_add_modify (&se->pre, loop_var, tmp); |
| |
| exit_label = gfc_build_label_decl (NULL_TREE); |
| TREE_USED (exit_label) = 1; |
| |
| /* Loop body. */ |
| gfc_init_block (&loop); |
| |
| /* ml = m. */ |
| gfc_add_modify (&loop, ml, m); |
| |
| /* extent = ... */ |
| lbound = gfc_conv_descriptor_lbound_get (desc, loop_var); |
| ubound = gfc_conv_descriptor_ubound_get (desc, loop_var); |
| extent = gfc_conv_array_extent_dim (lbound, ubound, NULL); |
| extent = fold_convert (type, extent); |
| |
| /* m = m/extent. */ |
| gfc_add_modify (&loop, m, |
| fold_build2_loc (input_location, TRUNC_DIV_EXPR, type, |
| m, extent)); |
| |
| /* Exit condition: if (i >= min_var) goto exit_label. */ |
| cond = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, loop_var, |
| min_var); |
| tmp = build1_v (GOTO_EXPR, exit_label); |
| tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond, tmp, |
| build_empty_stmt (input_location)); |
| gfc_add_expr_to_block (&loop, tmp); |
| |
| /* Increment loop variable: i++. */ |
| gfc_add_modify (&loop, loop_var, |
| fold_build2_loc (input_location, PLUS_EXPR, integer_type_node, |
| loop_var, |
| build_int_cst (integer_type_node, 1))); |
| |
| /* Making the loop... actually loop! */ |
| tmp = gfc_finish_block (&loop); |
| tmp = build1_v (LOOP_EXPR, tmp); |
| gfc_add_expr_to_block (&se->pre, tmp); |
| |
| /* The exit label. */ |
| tmp = build1_v (LABEL_EXPR, exit_label); |
| gfc_add_expr_to_block (&se->pre, tmp); |
| |
| /* sub(co_dim) = (co_dim < corank) ? ml - m*extent + lcobound(dim_arg) |
| : m + lcobound(corank) */ |
| |
| cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, dim_arg, |
| build_int_cst (TREE_TYPE (dim_arg), corank)); |
| |
| lbound = gfc_conv_descriptor_lbound_get (desc, |
| fold_build2_loc (input_location, PLUS_EXPR, |
| gfc_array_index_type, dim_arg, |
| build_int_cst (TREE_TYPE (dim_arg), rank-1))); |
| lbound = fold_convert (type, lbound); |
| |
| tmp = fold_build2_loc (input_location, MINUS_EXPR, type, ml, |
| fold_build2_loc (input_location, MULT_EXPR, type, |
| m, extent)); |
| tmp = fold_build2_loc (input_location, PLUS_EXPR, type, tmp, lbound); |
| |
| se->expr = fold_build3_loc (input_location, COND_EXPR, type, cond, tmp, |
| fold_build2_loc (input_location, PLUS_EXPR, type, |
| m, lbound)); |
| } |
| |
| |
| static void |
| trans_image_index (gfc_se * se, gfc_expr *expr) |
| { |
| tree num_images, cond, coindex, type, lbound, ubound, desc, subdesc, |
| tmp, invalid_bound; |
| gfc_se argse, subse; |
| int rank, corank, codim; |
| |
| type = gfc_get_int_type (gfc_default_integer_kind); |
| corank = gfc_get_corank (expr->value.function.actual->expr); |
| rank = expr->value.function.actual->expr->rank; |
| |
| /* Obtain the descriptor of the COARRAY. */ |
| gfc_init_se (&argse, NULL); |
| argse.want_coarray = 1; |
| gfc_conv_expr_descriptor (&argse, expr->value.function.actual->expr); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| desc = argse.expr; |
| |
| /* Obtain a handle to the SUB argument. */ |
| gfc_init_se (&subse, NULL); |
| gfc_conv_expr_descriptor (&subse, expr->value.function.actual->next->expr); |
| gfc_add_block_to_block (&se->pre, &subse.pre); |
| gfc_add_block_to_block (&se->post, &subse.post); |
| subdesc = build_fold_indirect_ref_loc (input_location, |
| gfc_conv_descriptor_data_get (subse.expr)); |
| |
| /* Fortran 2008 does not require that the values remain in the cobounds, |
| thus we need explicitly check this - and return 0 if they are exceeded. */ |
| |
| lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[rank+corank-1]); |
| tmp = gfc_build_array_ref (subdesc, gfc_rank_cst[corank-1], NULL); |
| invalid_bound = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, |
| fold_convert (gfc_array_index_type, tmp), |
| lbound); |
| |
| for (codim = corank + rank - 2; codim >= rank; codim--) |
| { |
| lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[codim]); |
| ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[codim]); |
| tmp = gfc_build_array_ref (subdesc, gfc_rank_cst[codim-rank], NULL); |
| cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, |
| fold_convert (gfc_array_index_type, tmp), |
| lbound); |
| invalid_bound = fold_build2_loc (input_location, TRUTH_OR_EXPR, |
| boolean_type_node, invalid_bound, cond); |
| cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, |
| fold_convert (gfc_array_index_type, tmp), |
| ubound); |
| invalid_bound = fold_build2_loc (input_location, TRUTH_OR_EXPR, |
| boolean_type_node, invalid_bound, cond); |
| } |
| |
| invalid_bound = gfc_unlikely (invalid_bound); |
| |
| |
| /* See Fortran 2008, C.10 for the following algorithm. */ |
| |
| /* coindex = sub(corank) - lcobound(n). */ |
| coindex = fold_convert (gfc_array_index_type, |
| gfc_build_array_ref (subdesc, gfc_rank_cst[corank-1], |
| NULL)); |
| lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[rank+corank-1]); |
| coindex = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type, |
| fold_convert (gfc_array_index_type, coindex), |
| lbound); |
| |
| for (codim = corank + rank - 2; codim >= rank; codim--) |
| { |
| tree extent, ubound; |
| |
| /* coindex = coindex*extent(codim) + sub(codim) - lcobound(codim). */ |
| lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[codim]); |
| ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[codim]); |
| extent = gfc_conv_array_extent_dim (lbound, ubound, NULL); |
| |
| /* coindex *= extent. */ |
| coindex = fold_build2_loc (input_location, MULT_EXPR, |
| gfc_array_index_type, coindex, extent); |
| |
| /* coindex += sub(codim). */ |
| tmp = gfc_build_array_ref (subdesc, gfc_rank_cst[codim-rank], NULL); |
| coindex = fold_build2_loc (input_location, PLUS_EXPR, |
| gfc_array_index_type, coindex, |
| fold_convert (gfc_array_index_type, tmp)); |
| |
| /* coindex -= lbound(codim). */ |
| lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[codim]); |
| coindex = fold_build2_loc (input_location, MINUS_EXPR, |
| gfc_array_index_type, coindex, lbound); |
| } |
| |
| coindex = fold_build2_loc (input_location, PLUS_EXPR, type, |
| fold_convert(type, coindex), |
| build_int_cst (type, 1)); |
| |
| /* Return 0 if "coindex" exceeds num_images(). */ |
| |
| if (gfc_option.coarray == GFC_FCOARRAY_SINGLE) |
| num_images = build_int_cst (type, 1); |
| else |
| { |
| gfc_init_coarray_decl (false); |
| num_images = fold_convert (type, gfort_gvar_caf_num_images); |
| } |
| |
| tmp = gfc_create_var (type, NULL); |
| gfc_add_modify (&se->pre, tmp, coindex); |
| |
| cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, tmp, |
| num_images); |
| cond = fold_build2_loc (input_location, TRUTH_OR_EXPR, boolean_type_node, |
| cond, |
| fold_convert (boolean_type_node, invalid_bound)); |
| se->expr = fold_build3_loc (input_location, COND_EXPR, type, cond, |
| build_int_cst (type, 0), tmp); |
| } |
| |
| |
| static void |
| trans_num_images (gfc_se * se) |
| { |
| gfc_init_coarray_decl (false); |
| se->expr = fold_convert (gfc_get_int_type (gfc_default_integer_kind), |
| gfort_gvar_caf_num_images); |
| } |
| |
| |
| static void |
| gfc_conv_intrinsic_rank (gfc_se *se, gfc_expr *expr) |
| { |
| gfc_se argse; |
| |
| gfc_init_se (&argse, NULL); |
| argse.data_not_needed = 1; |
| argse.descriptor_only = 1; |
| |
| gfc_conv_expr_descriptor (&argse, expr->value.function.actual->expr); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| |
| se->expr = gfc_conv_descriptor_rank (argse.expr); |
| } |
| |
| |
| /* Evaluate a single upper or lower bound. */ |
| /* TODO: bound intrinsic generates way too much unnecessary code. */ |
| |
| static void |
| gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper) |
| { |
| gfc_actual_arglist *arg; |
| gfc_actual_arglist *arg2; |
| tree desc; |
| tree type; |
| tree bound; |
| tree tmp; |
| tree cond, cond1, cond3, cond4, size; |
| tree ubound; |
| tree lbound; |
| gfc_se argse; |
| gfc_array_spec * as; |
| bool assumed_rank_lb_one; |
| |
| arg = expr->value.function.actual; |
| arg2 = arg->next; |
| |
| if (se->ss) |
| { |
| /* Create an implicit second parameter from the loop variable. */ |
| gcc_assert (!arg2->expr); |
| gcc_assert (se->loop->dimen == 1); |
| gcc_assert (se->ss->info->expr == expr); |
| gfc_advance_se_ss_chain (se); |
| bound = se->loop->loopvar[0]; |
| bound = fold_build2_loc (input_location, MINUS_EXPR, |
| gfc_array_index_type, bound, |
| se->loop->from[0]); |
| } |
| else |
| { |
| /* use the passed argument. */ |
| gcc_assert (arg2->expr); |
| gfc_init_se (&argse, NULL); |
| gfc_conv_expr_type (&argse, arg2->expr, gfc_array_index_type); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| bound = argse.expr; |
| /* Convert from one based to zero based. */ |
| bound = fold_build2_loc (input_location, MINUS_EXPR, |
| gfc_array_index_type, bound, |
| gfc_index_one_node); |
| } |
| |
| /* TODO: don't re-evaluate the descriptor on each iteration. */ |
| /* Get a descriptor for the first parameter. */ |
| gfc_init_se (&argse, NULL); |
| gfc_conv_expr_descriptor (&argse, arg->expr); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| |
| desc = argse.expr; |
| |
| as = gfc_get_full_arrayspec_from_expr (arg->expr); |
| |
| if (INTEGER_CST_P (bound)) |
| { |
| int hi, low; |
| |
| hi = TREE_INT_CST_HIGH (bound); |
| low = TREE_INT_CST_LOW (bound); |
| if (hi || low < 0 |
| || ((!as || as->type != AS_ASSUMED_RANK) |
| && low >= GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc))) |
| || low > GFC_MAX_DIMENSIONS) |
| gfc_error ("'dim' argument of %s intrinsic at %L is not a valid " |
| "dimension index", upper ? "UBOUND" : "LBOUND", |
| &expr->where); |
| } |
| |
| if (!INTEGER_CST_P (bound) || (as && as->type == AS_ASSUMED_RANK)) |
| { |
| if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS) |
| { |
| bound = gfc_evaluate_now (bound, &se->pre); |
| cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, |
| bound, build_int_cst (TREE_TYPE (bound), 0)); |
| if (as && as->type == AS_ASSUMED_RANK) |
| tmp = gfc_conv_descriptor_rank (desc); |
| else |
| tmp = gfc_rank_cst[GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc))]; |
| tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, |
| bound, fold_convert(TREE_TYPE (bound), tmp)); |
| cond = fold_build2_loc (input_location, TRUTH_ORIF_EXPR, |
| boolean_type_node, cond, tmp); |
| gfc_trans_runtime_check (true, false, cond, &se->pre, &expr->where, |
| gfc_msg_fault); |
| } |
| } |
| |
| /* Take care of the lbound shift for assumed-rank arrays, which are |
| nonallocatable and nonpointers. Those has a lbound of 1. */ |
| assumed_rank_lb_one = as && as->type == AS_ASSUMED_RANK |
| && ((arg->expr->ts.type != BT_CLASS |
| && !arg->expr->symtree->n.sym->attr.allocatable |
| && !arg->expr->symtree->n.sym->attr.pointer) |
| || (arg->expr->ts.type == BT_CLASS |
| && !CLASS_DATA (arg->expr)->attr.allocatable |
| && !CLASS_DATA (arg->expr)->attr.class_pointer)); |
| |
| ubound = gfc_conv_descriptor_ubound_get (desc, bound); |
| lbound = gfc_conv_descriptor_lbound_get (desc, bound); |
| |
| /* 13.14.53: Result value for LBOUND |
| |
| Case (i): For an array section or for an array expression other than a |
| whole array or array structure component, LBOUND(ARRAY, DIM) |
| has the value 1. For a whole array or array structure |
| component, LBOUND(ARRAY, DIM) has the value: |
| (a) equal to the lower bound for subscript DIM of ARRAY if |
| dimension DIM of ARRAY does not have extent zero |
| or if ARRAY is an assumed-size array of rank DIM, |
| or (b) 1 otherwise. |
| |
| 13.14.113: Result value for UBOUND |
| |
| Case (i): For an array section or for an array expression other than a |
| whole array or array structure component, UBOUND(ARRAY, DIM) |
| has the value equal to the number of elements in the given |
| dimension; otherwise, it has a value equal to the upper bound |
| for subscript DIM of ARRAY if dimension DIM of ARRAY does |
| not have size zero and has value zero if dimension DIM has |
| size zero. */ |
| |
| if (!upper && assumed_rank_lb_one) |
| se->expr = gfc_index_one_node; |
| else if (as) |
| { |
| tree stride = gfc_conv_descriptor_stride_get (desc, bound); |
| |
| cond1 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, |
| ubound, lbound); |
| cond3 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, |
| stride, gfc_index_zero_node); |
| cond3 = fold_build2_loc (input_location, TRUTH_AND_EXPR, |
| boolean_type_node, cond3, cond1); |
| cond4 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, |
| stride, gfc_index_zero_node); |
| |
| if (upper) |
| { |
| tree cond5; |
| cond = fold_build2_loc (input_location, TRUTH_OR_EXPR, |
| boolean_type_node, cond3, cond4); |
| cond5 = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, |
| gfc_index_one_node, lbound); |
| cond5 = fold_build2_loc (input_location, TRUTH_AND_EXPR, |
| boolean_type_node, cond4, cond5); |
| |
| cond = fold_build2_loc (input_location, TRUTH_OR_EXPR, |
| boolean_type_node, cond, cond5); |
| |
| if (assumed_rank_lb_one) |
| { |
| tmp = fold_build2_loc (input_location, MINUS_EXPR, |
| gfc_array_index_type, ubound, lbound); |
| tmp = fold_build2_loc (input_location, PLUS_EXPR, |
| gfc_array_index_type, tmp, gfc_index_one_node); |
| } |
| else |
| tmp = ubound; |
| |
| se->expr = fold_build3_loc (input_location, COND_EXPR, |
| gfc_array_index_type, cond, |
| tmp, gfc_index_zero_node); |
| } |
| else |
| { |
| if (as->type == AS_ASSUMED_SIZE) |
| cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, |
| bound, build_int_cst (TREE_TYPE (bound), |
| arg->expr->rank - 1)); |
| else |
| cond = boolean_false_node; |
| |
| cond1 = fold_build2_loc (input_location, TRUTH_OR_EXPR, |
| boolean_type_node, cond3, cond4); |
| cond = fold_build2_loc (input_location, TRUTH_OR_EXPR, |
| boolean_type_node, cond, cond1); |
| |
| se->expr = fold_build3_loc (input_location, COND_EXPR, |
| gfc_array_index_type, cond, |
| lbound, gfc_index_one_node); |
| } |
| } |
| else |
| { |
| if (upper) |
| { |
| size = fold_build2_loc (input_location, MINUS_EXPR, |
| gfc_array_index_type, ubound, lbound); |
| se->expr = fold_build2_loc (input_location, PLUS_EXPR, |
| gfc_array_index_type, size, |
| gfc_index_one_node); |
| se->expr = fold_build2_loc (input_location, MAX_EXPR, |
| gfc_array_index_type, se->expr, |
| gfc_index_zero_node); |
| } |
| else |
| se->expr = gfc_index_one_node; |
| } |
| |
| type = gfc_typenode_for_spec (&expr->ts); |
| se->expr = convert (type, se->expr); |
| } |
| |
| |
| static void |
| conv_intrinsic_cobound (gfc_se * se, gfc_expr * expr) |
| { |
| gfc_actual_arglist *arg; |
| gfc_actual_arglist *arg2; |
| gfc_se argse; |
| tree bound, resbound, resbound2, desc, cond, tmp; |
| tree type; |
| int corank; |
| |
| gcc_assert (expr->value.function.isym->id == GFC_ISYM_LCOBOUND |
| || expr->value.function.isym->id == GFC_ISYM_UCOBOUND |
| || expr->value.function.isym->id == GFC_ISYM_THIS_IMAGE); |
| |
| arg = expr->value.function.actual; |
| arg2 = arg->next; |
| |
| gcc_assert (arg->expr->expr_type == EXPR_VARIABLE); |
| corank = gfc_get_corank (arg->expr); |
| |
| gfc_init_se (&argse, NULL); |
| argse.want_coarray = 1; |
| |
| gfc_conv_expr_descriptor (&argse, arg->expr); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| desc = argse.expr; |
| |
| if (se->ss) |
| { |
| /* Create an implicit second parameter from the loop variable. */ |
| gcc_assert (!arg2->expr); |
| gcc_assert (corank > 0); |
| gcc_assert (se->loop->dimen == 1); |
| gcc_assert (se->ss->info->expr == expr); |
| |
| bound = se->loop->loopvar[0]; |
| bound = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type, |
| bound, gfc_rank_cst[arg->expr->rank]); |
| gfc_advance_se_ss_chain (se); |
| } |
| else |
| { |
| /* use the passed argument. */ |
| gcc_assert (arg2->expr); |
| gfc_init_se (&argse, NULL); |
| gfc_conv_expr_type (&argse, arg2->expr, gfc_array_index_type); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| bound = argse.expr; |
| |
| if (INTEGER_CST_P (bound)) |
| { |
| int hi, low; |
| |
| hi = TREE_INT_CST_HIGH (bound); |
| low = TREE_INT_CST_LOW (bound); |
| if (hi || low < 1 || low > GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))) |
| gfc_error ("'dim' argument of %s intrinsic at %L is not a valid " |
| "dimension index", expr->value.function.isym->name, |
| &expr->where); |
| } |
| else if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS) |
| { |
| bound = gfc_evaluate_now (bound, &se->pre); |
| cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, |
| bound, build_int_cst (TREE_TYPE (bound), 1)); |
| tmp = gfc_rank_cst[GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))]; |
| tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, |
| bound, tmp); |
| cond = fold_build2_loc (input_location, TRUTH_ORIF_EXPR, |
| boolean_type_node, cond, tmp); |
| gfc_trans_runtime_check (true, false, cond, &se->pre, &expr->where, |
| gfc_msg_fault); |
| } |
| |
| |
| /* Subtract 1 to get to zero based and add dimensions. */ |
| switch (arg->expr->rank) |
| { |
| case 0: |
| bound = fold_build2_loc (input_location, MINUS_EXPR, |
| gfc_array_index_type, bound, |
| gfc_index_one_node); |
| case 1: |
| break; |
| default: |
| bound = fold_build2_loc (input_location, PLUS_EXPR, |
| gfc_array_index_type, bound, |
| gfc_rank_cst[arg->expr->rank - 1]); |
| } |
| } |
| |
| resbound = gfc_conv_descriptor_lbound_get (desc, bound); |
| |
| /* Handle UCOBOUND with special handling of the last codimension. */ |
| if (expr->value.function.isym->id == GFC_ISYM_UCOBOUND) |
| { |
| /* Last codimension: For -fcoarray=single just return |
| the lcobound - otherwise add |
| ceiling (real (num_images ()) / real (size)) - 1 |
| = (num_images () + size - 1) / size - 1 |
| = (num_images - 1) / size(), |
| where size is the product of the extent of all but the last |
| codimension. */ |
| |
| if (gfc_option.coarray != GFC_FCOARRAY_SINGLE && corank > 1) |
| { |
| tree cosize; |
| |
| gfc_init_coarray_decl (false); |
| cosize = gfc_conv_descriptor_cosize (desc, arg->expr->rank, corank); |
| |
| tmp = fold_build2_loc (input_location, MINUS_EXPR, |
| gfc_array_index_type, |
| fold_convert (gfc_array_index_type, |
| gfort_gvar_caf_num_images), |
| build_int_cst (gfc_array_index_type, 1)); |
| tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR, |
| gfc_array_index_type, tmp, |
| fold_convert (gfc_array_index_type, cosize)); |
| resbound = fold_build2_loc (input_location, PLUS_EXPR, |
| gfc_array_index_type, resbound, tmp); |
| } |
| else if (gfc_option.coarray != GFC_FCOARRAY_SINGLE) |
| { |
| /* ubound = lbound + num_images() - 1. */ |
| gfc_init_coarray_decl (false); |
| tmp = fold_build2_loc (input_location, MINUS_EXPR, |
| gfc_array_index_type, |
| fold_convert (gfc_array_index_type, |
| gfort_gvar_caf_num_images), |
| build_int_cst (gfc_array_index_type, 1)); |
| resbound = fold_build2_loc (input_location, PLUS_EXPR, |
| gfc_array_index_type, resbound, tmp); |
| } |
| |
| if (corank > 1) |
| { |
| cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, |
| bound, |
| build_int_cst (TREE_TYPE (bound), |
| arg->expr->rank + corank - 1)); |
| |
| resbound2 = gfc_conv_descriptor_ubound_get (desc, bound); |
| se->expr = fold_build3_loc (input_location, COND_EXPR, |
| gfc_array_index_type, cond, |
| resbound, resbound2); |
| } |
| else |
| se->expr = resbound; |
| } |
| else |
| se->expr = resbound; |
| |
| type = gfc_typenode_for_spec (&expr->ts); |
| se->expr = convert (type, se->expr); |
| } |
| |
| |
| static void |
| conv_intrinsic_stride (gfc_se * se, gfc_expr * expr) |
| { |
| gfc_actual_arglist *array_arg; |
| gfc_actual_arglist *dim_arg; |
| gfc_se argse; |
| tree desc, tmp; |
| |
| array_arg = expr->value.function.actual; |
| dim_arg = array_arg->next; |
| |
| gcc_assert (array_arg->expr->expr_type == EXPR_VARIABLE); |
| |
| gfc_init_se (&argse, NULL); |
| gfc_conv_expr_descriptor (&argse, array_arg->expr); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| gfc_add_block_to_block (&se->post, &argse.post); |
| desc = argse.expr; |
| |
| gcc_assert (dim_arg->expr); |
| gfc_init_se (&argse, NULL); |
| gfc_conv_expr_type (&argse, dim_arg->expr, gfc_array_index_type); |
| gfc_add_block_to_block (&se->pre, &argse.pre); |
| tmp = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type, |
| argse.expr, gfc_index_one_node); |
| se->expr = gfc_conv_descriptor_stride_get (desc, tmp); |
| } |
| |
| |
| static void |
| gfc_conv_intrinsic_abs (gfc_se * se, gfc_expr * expr) |
| { |
| tree arg, cabs; |
| |
| gfc_conv_intrinsic_function_args (se, expr, &arg, 1); |
| |
| switch (expr->value.function.actual->expr->ts.type) |
| { |
| case BT_INTEGER: |
| case BT_REAL: |
| se->expr = fold_build1_loc (input_location, ABS_EXPR, TREE_TYPE (arg), |
| arg); |
| break; |
| |
| case BT_COMPLEX: |
| cabs = gfc_builtin_decl_for_float_kind (BUILT_IN_CABS, expr->ts.kind); |
| se->expr = build_call_expr_loc (input_location, cabs, 1, arg); |
| break; |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| |
| /* Create a complex value from one or two real components. */ |
| |
| static void |
| gfc_conv_intrinsic_cmplx (gfc_se * se, gfc_expr * expr, int both) |
| { |
| tree real; |
| tree imag; |
| tree type; |
| tree *args; |
| unsigned int num_args; |
| |
| num_args = gfc_intrinsic_argument_list_length (expr); |
| args = XALLOCAVEC (tree, num_args); |
| |
| type = gfc_typenode_for_spec (&expr->ts); |
| gfc_conv_intrinsic_function_args (se, expr, args, num_args); |
| real = convert (TREE_TYPE (type), args[0]); |
| if (both) |
| imag = convert (TREE_TYPE (type), args[1]); |
| else if (TREE_CODE (TREE_TYPE (args[0])) == COMPLEX_TYPE) |
| { |
| imag = fold_build1_loc (input_location, IMAGPART_EXPR, |
| TREE_TYPE (TREE_TYPE (args[0])), args[0]); |
| imag = convert (TREE_TYPE (type), imag); |
| } |
| else |
| imag = build_real_from_int_cst (TREE_TYPE (type), integer_zero_node); |
| |
| se->expr = fold_build2_loc (input_location, COMPLEX_EXPR, type, real, imag); |
| } |
| |
| |
| /* Remainder function MOD(A, P) = A - INT(A / P) * P |
| MODULO(A, P) = A - FLOOR (A / P) * P |
| |
| The obvious algorithms above are numerically instable for large |
| arguments, hence these intrinsics are instead implemented via calls |
| to the fmod family of functions. It is the responsibility of the |
| user to ensure that the second argument is non-zero. */ |
| |
| static void |
| gfc_conv_intrinsic_mod (gfc_se * se, gfc_expr * expr, int modulo) |
| { |
| tree type; |
| tree tmp; |
| tree test; |
| tree test2; |
| tree fmod; |
| tree zero; |
| tree args[2]; |
| |
| gfc_conv_intrinsic_function_args (se, expr, args, 2); |
| |
| switch (expr->ts.type) |
| { |
| case BT_INTEGER: |
| /* Integer case is easy, we've got a builtin op. */ |
| type = TREE_TYPE (args[0]); |
| |
| if (modulo) |
| se->expr = fold_build2_loc (input_location, FLOOR_MOD_EXPR, type, |
| args[0], args[1]); |
| else |
| se->expr = fold_build2_loc (input_location, TRUNC_MOD_EXPR, type, |
| args[0], args[1]); |
| break; |
| |
| case BT_REAL: |
| fmod = NULL_TREE; |
| /* Check if we have a builtin fmod. */ |
| fmod = gfc_builtin_decl_for_float_kind (BUILT_IN_FMOD, expr->ts.kind); |
| |
| /* The builtin should always be available. */ |
| gcc_assert (fmod != NULL_TREE); |
| |
| tmp = build_addr (fmod, current_function_decl); |
| se->expr = build_call_array_loc (input_location, |
| TREE_TYPE (TREE_TYPE (fmod)), |
| tmp, 2, args); |
| if (modulo == 0) |
| return; |
| |
| type = TREE_TYPE (args[0]); |
| |
| args[0] = gfc_evaluate_now (args[0], &se->pre); |
| args[1] = gfc_evaluate_now (args[1], &se->pre); |
| |
| /* Definition: |
| modulo = arg - floor (arg/arg2) * arg2 |
| |
| In order to calculate the result accurately, we use the fmod |
| function as follows. |
| |
| res = fmod (arg, arg2); |
| if (res) |
| { |
| if ((arg < 0) xor (arg2 < 0)) |
| res += arg2; |
| } |
| else |
| res = copysign (0., arg2); |
| |
| => As two nested ternary exprs: |
| |
| res = res ? (((arg < 0) xor (arg2 < 0)) ? res + arg2 : res) |
| : copysign (0., arg2); |
| |
| */ |
| |
| zero = gfc_build_const (type, integer_zero_node); |
| tmp = gfc_evaluate_now (se->expr, &se->pre); |
| if (!flag_signed_zeros) |
| { |
| test = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, |
| args[0], zero); |
| test2 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, |
| args[1], zero); |
| test2 = fold_build2_loc (input_location, TRUTH_XOR_EXPR, |
| boolean_type_node, test, test2); |
| test = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, |
| tmp, zero); |
| test = fold_build2_loc (input_location, TRUTH_AND_EXPR, |
| boolean_type_node, test, test2); |
| test = gfc_evaluate_now (test, &se->pre); |
| se->expr = fold_build3_loc (input_location, COND_EXPR, type, test, |
| fold_build2_loc (input_location, |
| PLUS_EXPR, |
| type, tmp, args[1]), |
| tmp); |
| } |
| else |
| { |
| tree expr1, copysign, cscall; |
| copysign = gfc_builtin_decl_for_float_kind (BUILT_IN_COPYSIGN, |
| expr->ts.kind); |
| test = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, |
| args[0], zero); |
| test2 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, |
| args[1], zero); |
| test2 = fold_build2_loc (input_location, TRUTH_XOR_EXPR, |
| boolean_type_node, test, test2); |
| expr1 = fold_build3_loc (input_location, COND_EXPR, type, test2, |
| fold_build2_loc (input_location, |
| PLUS_EXPR, |
| type, tmp, args[1]), |
| tmp); |
| test = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, |
| tmp, zero); |
| cscall = build_call_expr_loc (input_location, copysign, 2, zero, |
| args[1]); |
| se->expr = fold_build3_loc (input_location, COND_EXPR, type, test, |
| expr1, cscall); |
| } |
| return; |
| |
| default: |
| gcc_unreachable (); |
| } |
| } |
| |
| /* DSHIFTL(I,J,S) = (I << S) | (J >> (BITSIZE(J) - S)) |
| DSHIFTR(I,J,S) = (I << (BITSIZE(I) - S)) | (J >> S) |
| where the right shifts are logical (i.e. 0's are shifted in). |
| Because SHIFT_EXPR's want shifts strictly smaller than the integral |
| type width, we have to special-case both S == 0 and S == BITSIZE(J): |
| DSHIFTL(I,J,0) = I |
| DSHIFTL(I,J,BITSIZE) = J |
| DSHIFTR(I,J,0) = J |
| DSHIFTR(I,J,BITSIZE) = I. */ |
| |
| static void |
| gfc_conv_intrinsic_dshift (gfc_se * se, gfc_expr * expr, bool dshiftl) |
| { |
| tree type, utype, stype, arg1, arg2, shift, res, left, right; |
| tree args[3], cond, tmp; |
| int bitsize; |
| |
| gfc_conv_intrinsic_function_args (se, expr, args, 3); |
| |
| gcc_assert (TREE_TYPE (args[0]) == TREE_TYPE (args[1])); |
| type = TREE_TYPE (args[0]); |
| bitsize = TYPE_PRECISION (type); |
| utype = unsigned_type_for (type); |
| stype = TREE_TYPE (args[2]); |
| |
| arg1 = gfc_evaluate_now (args[0], &se->pre); |
| arg2 = gfc_evaluate_now (args[1], &se->pre); |
| shift = gfc_evaluate_now (args[2], &se->pre); |
| |
| /* The generic case. */ |
| tmp = fold_build2_loc (input_location, MINUS_EXPR, stype, |
| build_int_cst (stype, bitsize), shift); |
| left = fold_build2_loc (input_location, LSHIFT_EXPR, type, |
| arg1, dshiftl ? shift : tmp); |
| |
| right = fold_build2_loc (input_location, RSHIFT_EXPR, utype, |
| fold_convert (utype, arg2), dshiftl ? tmp : shift); |
| right = fold_convert (type, right); |
| |
| res = fold_build2_loc (input_location, BIT_IOR_EXPR, type, left, right); |
| |
| /* Special cases. */ |
| cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, shift, |
| build_int_cst (stype, 0)); |
| res = fold_build3_loc (input_location, COND_EXPR, type, cond, |
| dshiftl ? arg1 : arg2, res); |
| |
| cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, shift, |
| build_int_cst (stype, bitsize)); |
| res = fold_build3_loc (input_location, COND_EXPR, type, cond, |
| dshiftl ? arg2 : arg1, res); |
| |
| se->expr = res; |
| } |
| |
| |
| /* Positive difference DIM (x, y) = ((x - y) < 0) ? 0 : x - y. */ |
| |
| static void |
| gfc_conv_intrinsic_dim (gfc_se * se, gfc_expr * expr) |
| { |
| tree val; |
| tree tmp; |
| tree type; |
| tree zero; |
| tree args[2]; |
| |
| gfc_conv_intrinsic_function_args (se, expr, args, 2); |
| type = TREE_TYPE (args[0]); |
| |
| val = fold_build2_loc (input_location, MINUS_EXPR, type, args[0], args[1]); |
| val = gfc_evaluate_now (val, &se->pre); |
| |
| zero = gfc_build_const (type, integer_zero_node); |
| tmp = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, val, zero); |
| se->expr = fold_build3_loc (input_location, COND_EXPR, type, tmp, zero, val); |
| } |
| |
| |
| /* SIGN(A, B) is absolute value of A times sign of B. |
| The real value versions use library functions to ensure the correct |
| handling of negative zero. Integer case implemented as: |
| SIGN(A, B) = { tmp = (A ^ B) >> C; (A + tmp) ^ tmp } |
| */ |
| |
| static void |
| gfc_conv_intrinsic_sign (gfc_se * se, gfc_expr * expr) |
| { |
| tree tmp; |
| tree type; |
| tree args[2]; |
| |
| gfc_conv_intrinsic_function_args (se, expr, args, 2); |
| if (expr->ts.type == BT_REAL) |
| { |
| tree abs; |
| |
| tmp = gfc_builtin_decl_for_float_kind (BUILT_IN_COPYSIGN, expr->ts.kind); |
| abs = gfc_builtin_decl_for_float_kind (BUILT_IN_FABS, expr->ts.kind); |
| |
| /* We explicitly have to ignore the minus sign. We do so by using |
| result = (arg1 == 0) ? abs(arg0) : copysign(arg0, arg1). */ |
| if (!gfc_option.flag_sign_zero |
| && MODE_HAS_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (args[1])))) |
| { |
| tree cond, zero; |
| zero = build_real_from_int_cst (TREE_TYPE (args[1]), integer_zero_node); |
| cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, |
| args[1], zero); |
| se->expr = fold_build3_loc (input_location, COND_EXPR, |
| TREE_TYPE (args[0]), cond, |
| build_call_expr_loc (input_location, abs, 1, |
| args[0]), |
| build_call_expr_loc (input_location, tmp, 2, |
| args[0], args[1])); |
| } |
| else |
| se->expr = build_call_expr_loc (input_location, tmp, 2, |
| args[0], args[1]); |
| return; |
| } |
| |
| /* Having excluded floating point types, we know we are now dealing |
| with signed integer types. */ |
| type = TREE_TYPE (args[0]); |
| |
| /* Args[0] is used multiple times below. */ |
| args[0] = gfc_evaluate_now (args[0], &se->pre); |
| |
| /* Construct (A ^ B) >> 31, which generates a bit mask of all zeros if |
| the signs of A and B are the same, and of all ones if they differ. */ |
| tmp = fold_build2_loc (input_location, BIT_XOR_EXPR, type, args[0], args[1]); |
| tmp = fold_build2_loc (input_location, RSHIFT_EXPR, type, tmp, |
| build_int_cst (type, TYPE_PRECISION (type) - 1)); |
| tmp = gfc_evaluate_now (tmp, &se->pre); |
| |
| /* Construct (A + tmp) ^ tmp, which is A if tmp is zero, and -A if tmp] |
| is all ones (i.e. -1). */ |
| se->expr = fold_build2_loc (input_location, BIT_XOR_EXPR, type, |
| fold_build2_loc (input_location, PLUS_EXPR, |
| type, args[0], tmp), tmp); |
| } |
| |
| |
| /* Test for the presence of an optional argument. */ |
| |
| static void |
| gfc_conv_intrinsic_present (gfc_se * se, gfc_expr * expr) |
| { |
| gfc_expr *arg; |
| |
| arg = expr->value.function.actual->expr; |
| gcc_assert (arg->expr_type == EXPR_VARIABLE); |
| se->expr = gfc_conv_expr_present (arg->symtree->n.sym); |
| se->expr = convert (gfc_typenode_for_spec (&expr->ts), se->expr); |
| } |
| |
| |
| /* Calculate the double precision product of two single precision values. */ |
| |
| static void |
| gfc_conv_intrinsic_dprod (gfc_se * se, gfc_expr * expr) |
| { |
| tree type; |
| tree args[2]; |
| |
| gfc_conv_intrinsic_function_args (se, expr, args, 2); |
| |
| /* Convert the args to double precision before multiplying. */ |
| type = gfc_typenode_for_spec (&expr->ts); |
| args[0] = convert (type, args[0]); |
| args[1] = convert (type, args[1]); |
| se->expr = fold_build2_loc (input_location, MULT_EXPR, type, args[0], |
| args[1]); |
| } |
| |
| |
| /* Return a length one character string containing an ascii character. */ |
| |
| static void |
| gfc_conv_intrinsic_char (gfc_se * se, gfc_expr * expr) |
| { |
| tree arg[2]; |
| tree var; |
| tree type; |
| unsigned int num_args; |
| |
| num_args = gfc_intrinsic_argument_list_length (expr); |
| gfc_conv_intrinsic_function_args (se, expr, arg, num_args); |
| |
| type = gfc_get_char_type (expr->ts.kind); |
| var = gfc_create_var (type, "char"); |
| |
| arg[0] = fold_build1_loc (input_location, NOP_EXPR, type, arg[0]); |
| gfc_add_modify (&se->pre, var, arg[0]); |
| se->expr = gfc_build_addr_expr (build_pointer_type (type), var); |
| se->string_length = build_int_cst (gfc_charlen_type_node, 1); |
| } |
| |
| |
| static void |
| gfc_conv_intrinsic_ctime (gfc_se * se, gfc_expr * expr) |
| { |
| tree var; |
| tree len; |
| tree tmp; |
| tree cond; |
| tree fndecl; |
| tree *args; |
| unsigned int num_args; |
| |
| num_args = gfc_intrinsic_argument_list_length (expr) + 2; |
| args = XALLOCAVEC (tree, num_args); |
| |
| var = gfc_create_var (pchar_type_node, "pstr"); |
| len = gfc_create_var (gfc_charlen_type_node, "len"); |
| |
| gfc_conv_intrinsic_function_args (se, expr, &args[2], num_args - 2); |
| args[0] = gfc_build_addr_expr (NULL_TREE, var); |
| args[1] = gfc_build_addr_expr (NULL_TREE, len); |
| |
| fndecl = build_addr (gfor_fndecl_ctime, current_function_decl); |
| tmp = build_call_array_loc (input_location, |
| TREE_TYPE (TREE_TYPE (gfor_fndecl_ctime)), |
| fndecl, num_args, args); |
| gfc_add_expr_to_block (&se->pre, tmp); |
| |
| /* Free the temporary afterwards, if necessary. */ |
| cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, |
| len, build_int_cst (TREE_TYPE (len), 0)); |
| tmp = gfc_call_free (var); |
| tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location)); |
| gfc_add_expr_to_block (&se->post, tmp); |
| |
| se->expr = var; |
| se->string_length = len; |
| } |
| |
| |
| static void |
| gfc_conv_intrinsic_fdate (gfc_se * se, gfc_expr * expr) |
| { |
| tree var; |
| tree len; |
| tree tmp; |
| tree cond; |
| tree fndecl; |
| tree *args; |
| unsigned int num_args; |
| |
| num_args = gfc_intrinsic_argument_list_length (expr) + 2; |
| args = XALLOCAVEC (tree, num_args); |
| |
| var = gfc_create_var (pchar_type_node, "pstr"); |
| len = gfc_create_var (gfc_charlen_type_node, "len"); |
| |
| gfc_conv_intrinsic_function_args (se, expr, &args[2], num_args - 2); |
| args[0] = gfc_build_addr_expr (NULL_TREE, var); |
| args[1] = gfc_build_addr_expr (NULL_TREE, len); |
| |
| fndecl = build_addr (gfor_fndecl_fdate, current_function_decl); |
| tmp = build_call_array_loc (input_location, |
| TREE_TYPE (TREE_TYPE (gfor_fndecl_fdate)), |
| fndecl, num_args, args); |
| gfc_add_expr_to_block (&se->pre, tmp); |
| |
| /* Free the temporary afterwards, if necessary. */ |
| cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, |
| len, build_int_cst (TREE_TYPE (len), 0)); |
| tmp = gfc_call_free (var); |
| tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location)); |
| gfc_add_expr_to_block (&se->post, tmp); |
| |
| se->expr = var; |
| se->string_length = len; |
| } |
| |
| |
| /* Return a character string containing the tty name. */ |
| |
| static void |
| gfc_conv_intrinsic_ttynam (gfc_se * se, gfc_expr * expr) |
| { |
| tree var; |
| tree len; |
| tree tmp; |
| tree cond; |
| tree fndecl; |
| tree *args; |
| unsigned int num_args; |
| |
| num_args = gfc_intrinsic_argument_list_length (expr) + 2; |
| args = XALLOCAVEC (tree, num_args); |
| |
| var = gfc_create_var (pchar_type_node, "pstr"); |
| len = gfc_create_var (gfc_charlen_type_node, "len"); |
| |
| gfc_conv_intrinsic_function_args (se, expr, &args[2], num_args - 2); |
| args[0] = gfc_build_addr_expr (NULL_TREE, var); |
| args[1] = gfc_build_addr_expr (NULL_TREE, len); |
| |
| fndecl = build_addr (gfor_fndecl_ttynam, current_function_decl); |
| tmp = build_call_array_loc (input_location, |
| TREE_TYPE (TREE_TYPE (gfor_fndecl_ttynam)), |
| fndecl, num_args, args); |
| gfc_add_expr_to_block (&se->pre, tmp); |
| |
| /* Free the temporary afterwards, if necessary. */ |
| cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, |
| len, build_int_cst (TREE_TYPE (len), 0)); |
| tmp = gfc_call_free (var); |
| tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location)); |
| gfc_add_expr_to_block (&se->post, tmp); |
| |
| se->expr = var; |
| se->string_length = len; |
| } |
| |
| |
| /* Get the minimum/maximum value of all the parameters. |
| minmax (a1, a2, a3, ...) |
| { |
| mvar = a1; |
| if (a2 .op. mvar || isnan(mvar)) |
| mvar = a2; |
| if (a3 .op. mvar || isnan(mvar)) |
| mvar = a3; |
| ... |
| return mvar |
| } |
| */ |
| |
| /* TODO: Mismatching types can occur when specific names are used. |
| These should be handled during resolution. */ |
| static void |
| gfc_conv_intrinsic_minmax (gfc_se * se, gfc_expr * expr, enum tree_code op) |
| { |
| tree tmp; |
| tree mvar; |
| tree val; |
| tree thencase; |
| tree *args; |
| tree type; |
| gfc_actual_arglist *argexpr; |
| unsigned int i, nargs; |
| |
| nargs = gfc_intrinsic_argument_list_length (expr); |
| args = XALLOCAVEC (tree, nargs); |
| |
| gfc_conv_intrinsic_function_args (se, expr, args, nargs); |
| type = gfc_typenode_for_spec (&expr->ts); |
| |
| argexpr = expr->value.function.actual; |
| if (TREE_TYPE (args[0]) != type) |
| args[0] = convert (type, args[0]); |
| /* Only evaluate the argument once. */ |
| if (TREE_CODE (args[0]) != VAR_DECL && !TREE_CONSTANT (args[0])) |
| args[0] = gfc_evaluate_now (args[0], &se->pre); |
| |
| mvar = gfc_create_var (type, "M"); |
| gfc_add_modify (&se->pre, mvar, args[0]); |
| for (i = 1, argexpr = argexpr->next; i < nargs; i++) |
| { |
| tree cond, isnan; |
| |
| val = args[i]; |
| |
| /* Handle absent optional arguments by ignoring the comparison. */ |
| if (argexpr->expr->expr_type == EXPR_VARIABLE |
| && argexpr->expr->symtree->n.sym->attr.optional |
| && TREE_CODE (val) == INDIRECT_REF) |
| cond = fold_build2_loc (input_location, |
| NE_EXPR, boolean_type_node, |
| TREE_OPERAND (val, 0), |
| build_int_cst (TREE_TYPE (TREE_OPERAND (val, 0)), 0)); |
| else |
| { |
| cond = NULL_TREE; |
| |
| /* Only evaluate the argument once. */ |
| if (TREE_CODE (val) != VAR_DECL && !TREE_CONSTANT (val)) |
| val = gfc_evaluate_now (val, &se->pre); |
| } |
| |
| thencase = build2_v (MODIFY_EXPR, mvar, convert (type, val)); |
| |
| tmp = fold_build2_loc (input_location, op, boolean_type_node, |
| convert (type, val), mvar); |
| |
| /* FIXME: When the IEEE_ARITHMETIC module is implemented, the call to |
| __builtin_isnan might be made dependent on that module being loaded, |
| to help performance of programs that don't rely on IEEE semantics. */ |
| if (FLOAT_TYPE_P (TREE_TYPE (mvar))) |
| { |
| isnan = build_call_expr_loc (input_location, |
| builtin_decl_explicit (BUILT_IN_ISNAN), |
| 1, mvar); |
| tmp = fold_build2_loc (input_location, TRUTH_OR_EXPR, |
| boolean_type_node, tmp, |
| fold_convert (boolean_type_node, isnan)); |
| } |
| tmp = build3_v (COND_EXPR, tmp, thencase, |
| build_empty_stmt (input_location)); |
| |
| if (cond != NULL_TREE) |
| tmp = build3_v (COND_EXPR, cond, tmp, |
| build_empty_stmt (input_location)); |
| |
| gfc_add_expr_to_block (&se->pre, tmp); |
| argexpr = argexpr->next; |
| } |
| se->expr = mvar; |
| } |
| |
| |
| /* Generate library calls for MIN and MAX intrinsics for character |
| variables. */ |
| static void |
| gfc_conv_intrinsic_minmax_char (gfc_se * se, gfc_expr * expr, int op) |
| { |
| tree *args; |
| tree var, len, fndecl, tmp, cond, function; |
| unsigned int nargs; |
| |
| nargs = gfc_intrinsic_argument_list_length (expr); |
| args = XALLOCAVEC (tree, nargs + 4); |
| gfc_conv_intrinsic_function_args (se, expr, &args[4], nargs); |
| |
| /* Create the result variables. */ |
| len = gfc_create_var (gfc_charlen_type_node, "len"); |
| args[0] = gfc_build_addr_expr (NULL_TREE, len); |
| var = gfc_create_var (gfc_get_pchar_type (expr->ts.kind), "pstr"); |
| args[1] = gfc_build_addr_expr (ppvoid_type_node, var); |
| args[2] = build_int_cst (integer_type_node, op); |
| args[3] = build_int_cst (integer_type_node, nargs / 2); |
| |
| if (expr->ts.kind == 1) |
| function = gfor_fndecl_string_minmax; |
| else if (expr->ts.kind == 4) |
| function = gfor_fndecl_string_minmax_char4; |
| else |
| gcc_unreachable (); |
| |
| /* Make the function call. */ |
| fndecl = build_addr (function, current_function_decl); |
| tmp = build_call_array_loc (input_location, |
| TREE_TYPE (TREE_TYPE (function)), fndecl, |
| nargs + 4, args); |
| gfc_add_expr_to_block (&se->pre, tmp); |
| |
| /* Free the temporary afterwards, if necessary. */ |
| cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, |
| len, build_int_cst (TREE_TYPE (len), 0)); |
| tmp = gfc_call_free (var); |
| tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location)); |
| gfc_add_expr_to_block (&se->post, tmp); |
| |
| se->expr = var; |
| se->string_length = len; |
| } |
| |
| |
| /* Create a symbol node for this intrinsic. The symbol from the frontend |
| has the generic name. */ |
| |
| static gfc_symbol * |
| gfc_get_symbol_for_expr (gfc_expr * expr) |
| { |
| gfc_symbol *sym; |
| |
| /* TODO: Add symbols for intrinsic function to the global namespace. */ |
| gcc_assert (strlen (expr->value.function.name) <= GFC_MAX_SYMBOL_LEN - 5); |
| sym = gfc_new_symbol (expr->value.function.name, NULL); |
| |
| sym->ts = expr->ts; |
| sym->attr.external = 1; |
| sym->attr.function = 1; |
| sym->attr.always_explicit = 1; |
| sym->attr.proc = PROC_INTRINSIC; |
| sym->attr.flavor = FL_PROCEDURE; |
| sym->result = sym; |
| if (expr->rank > 0) |
| { |
| sym->attr.dimension = 1; |
| sym->as = gfc_get_array_spec (); |
| sym->as->type = AS_ASSUMED_SHAPE; |
| sym->as->rank = expr->rank; |
| } |
| |
| gfc_copy_formal_args_intr (sym, expr->value.function.isym); |
| |
| return sym; |
| } |
| |
| /* Generate a call to an external intrinsic function. */ |
| static void |
| gfc_conv_intrinsic_funcall (gfc_se * se, gfc_expr * expr) |
| { |
| gfc_symbol *sym; |
| vec<tree, va_gc> *append_args; |
| |
| gcc_assert (!se->ss || se->ss->info->expr == expr); |
| |
| if (se->ss) |
| gcc_assert (expr->rank > 0); |
| else |
| gcc_assert (expr->rank == 0); |
| |
| sym = gfc_get_symbol_for_expr (expr); |
| |
| /* Calls to libgfortran_matmul need to be appended special arguments, |
| to be able to call the BLAS ?gemm functions if required and possible. */ |
| append_args = NULL; |
| if (expr->value.function.isym->id == GFC_ISYM_MATMUL |
| && sym->ts.type != BT_LOGICAL) |
| { |
| tree cint = gfc_get_int_type (gfc_c_int_kind); |
| |
| if (gfc_option.flag_external_blas |
| && (sym->ts.type == BT_REAL || sym->ts.type == BT_COMPLEX) |
| && (sym->ts.kind == 4 || sym->ts.kind == 8)) |
| { |
| tree gemm_fndecl; |
| |
| if (sym->ts.type == BT_REAL) |
| { |
| if (sym->ts.kind == 4) |
| gemm_fndecl = gfor_fndecl_sgemm; |
| else |
| gemm_fndecl = gfor_fndecl_dgemm; |
| } |
| else |
| { |
| if (sym->ts.kind == 4) |
| gemm_fndecl = gfor_fndecl_cgemm; |
| else |
| gemm_fndecl = gfor_fndecl_zgemm; |
| } |
| |
| vec_alloc (append_args, 3); |
| append_args->quick_push (build_int_cst (cint, 1)); |
| append_args->quick_push (build_int_cst (cint, |
| gfc_option.blas_matmul_limit)); |
| append_args->quick_push (gfc_build_addr_expr (NULL_TREE, |
| gemm_fndecl)); |
| } |
| else |
| { |
| vec_alloc (append_args, 3); |
| append_args->quick_push (build_int_cst (cint, 0)); |
| append_args->quick_push (build_int_cst (cint, 0)); |
| append_args->quick_push (null_pointer_node); |
| } |
| } |
| |
| gfc_conv_procedure_call (se, sym, expr->value.function.actual, expr, |
| append_args); |
| gfc_free_symbol (sym); |
| } |
| |
| /* ANY and ALL intrinsics. ANY->op == NE_EXPR, ALL->op == EQ_EXPR. |
| Implemented as |
| any(a) |
| { |
| forall (i=...) |
| if (a[i] != 0) |
| return 1 |
| end forall |
| return 0 |
| } |
| all(a) |
| { |
| forall (i=...) |
| if (a[i] == 0) |
| return 0 |
| end forall |
| return 1 |
| } |
| */ |
| static void |
| gfc_conv_intrinsic_anyall (gfc_se * se, gfc_expr * expr, enum tree_code op) |
| { |
| tree resvar; |
| stmtblock_t block; |
| stmtblock_t body; |
| tree type; |
| tree tmp; |
| tree found; |
| gfc_loopinfo loop; |
| gfc_actual_arglist *actual; |
| gfc_ss *arrayss; |
| gfc_se arrayse; |
| tree exit_label; |
| |
| if (se->ss) |
| { |
| gfc_conv_intrinsic_funcall (se, expr); |
| return; |
| } |
| |
| actual = expr->value.function.actual; |
| type = gfc_typenode_for_spec (&expr->ts); |
| /* Initialize the result. */ |
| resvar = gfc_create_var (type, "test"); |
| if (op == EQ_EXPR) |
| tmp = convert (type, boolean_true_node); |
| else |
| tmp = convert (type, boolean_false_node); |
| gfc_add_modify (&se->pre, resvar, tmp); |
| |
| /* Walk the arguments. */ |
| arrayss = gfc_walk_expr (actual->expr); |
| gcc_assert (arrayss != gfc_ss_terminator); |
| |
| /* Initialize the scalarizer. */ |
| gfc_init_loopinfo (&loop); |
| exit_label = gfc_build_label_decl (NULL_TREE); |
| TREE_USED (exit_label) = 1; |
| gfc_add_ss_to_loop (&loop, arrayss); |
| |
| /* Initialize the loop. */ |
| gfc_conv_ss_startstride (&loop); |
| gfc_conv_loop_setup (&loop, &expr->where); |
| |
| gfc_mark_ss_chain_used (arrayss, 1); |
| /* Generate the loop body. */ |
| gfc_start_scalarized_body (&loop, &body); |
| |
| /* If the condition matches then set the return value. */ |
| gfc_start_block (&block); |
| if (op == EQ_EXPR) |
| tmp = convert (type, boolean_false_node); |
| else |
| tmp = convert (type, boolean_true_node); |
| gfc_add_modify (&block, resvar, tmp); |
| |
| /* And break out of the loop. */ |
| tmp = build1_v (GOTO_EXPR, exit_label); |
| gfc_add_expr_to_block (&block, tmp); |
| |
| found = gfc_finish_block (&block); |
| |
| /* Check this element. */ |
| gfc_init_se (&arrayse, NULL); |
| gfc_copy_loopinfo_to_se (&arrayse, &loop); |
| arrayse.ss = arrayss; |
| gfc_conv_expr_val (&arrayse, actual->expr); |
| |
| gfc_add_block_to_block (&body, &arrayse.pre); |
| tmp = fold_build2_loc (input_location, op, boolean_type_node, arrayse.expr, |
| build_int_cst (TREE_TYPE (arrayse.expr), 0)); |
| tmp = build3_v (COND_EXPR, tmp, found, build_empty_stmt (input_location)); |
| gfc_add_expr_to_block (&body, tmp); |
| gfc_add_block_to_block (&body, &arrayse.post); |
| |
| gfc_trans_scalarizing_loops (&loop, &body); |
| |
| /* Add the exit label. */ |
| tmp = build1_v (LABEL_EXPR, exit_label); |
| gfc_add_expr_to_block (&loop.pre, tmp); |
| |
| gfc_add_block_to_block (&se->pre, &loop.pre); |
| gfc_add_block_to_block (&se->pre, &loop.post); |
| gfc_cleanup_loop (&loop); |
| |
| se->expr = resvar; |
| } |
| |
| /* COUNT(A) = Number of true elements in A. */ |
| static void |
| gfc_conv_intrinsic_count (gfc_se * se, gfc_expr * expr) |
| { |
| tree resvar; |
| tree type; |
| stmtblock_t body; |
| tree tmp; |
| gfc_loopinfo loop; |
| gfc_actual_arglist *actual; |
| gfc_ss *arrayss; |
| gfc_se arrayse; |
| |
| if (se->ss) |
| { |
| gfc_conv_intrinsic_funcall (se, expr); |
| return; |
| } |
| |
| actual = expr->value.function.actual; |
| |
| type = gfc_typenode_for_spec (&expr->ts); |
| /* Initialize the result. */ |
| resvar = gfc_create_var (type, "count"); |
| gfc_add_modify (&se->pre, resvar, build_int_cst (type, 0)); |
| |
| /* Walk the arguments. */ |
| arrayss = gfc_walk_expr (actual->expr); |
| gcc_assert (arrayss != gfc_ss_terminator); |
| |
| /* Initialize the scalarizer. */ |
| gfc_init_loopinfo (&loop); |
| gfc_add_ss_to_loop (&loop, arrayss); |
| |
| /* Initialize the loop. */ |
| gfc_conv_ss_startstride (&loop); |
| gfc_conv_loop_setup (&loop, &expr->where); |
| |
| gfc_mark_ss_chain_used (arrayss, 1); |
| /* Generate the loop body. */ |
| gfc_start_scalarized_body (&loop, &body); |
| |
| tmp = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (resvar), |
| resvar, build_int_cst (TREE_TYPE (resvar), 1)); |
| tmp = build2_v (MODIFY_EXPR, resvar, tmp); |
| |
| gfc_init_se (&arrayse, NULL); |
| gfc_copy_loopinfo_to_se (&arrayse, &loop); |
| arrayse.ss = arrayss; |
| gfc_conv_expr_val (&arrayse, actual->expr); |
| tmp = build3_v (COND_EXPR, arrayse.expr, tmp, |
| build_empty_stmt (input_location)); |
| |
| gfc_add_block_to_block (&body, &arrayse.pre); |
| gfc_add_expr_to_block (&body, tmp); |
| gfc_add_block_to_block (&body, &arrayse.post); |
| |
| gfc_trans_scalarizing_loops (&loop, &body); |
| |
| gfc_add_block_to_block (&se->pre, &loop.pre); |
| gfc_add_block_to_block (&se->pre, &loop.post); |
| gfc_cleanup_loop (&loop); |
| |
| se->expr = resvar; |
| } |
| |
| |
| /* Update given gfc_se to have ss component pointing to the nested gfc_ss |
| struct and return the corresponding loopinfo. */ |
| |
| static gfc_loopinfo * |
| enter_nested_loop (gfc_se *se) |
| { |
| se->ss = se->ss->nested_ss; |
| gcc_assert (se->ss == se->ss->loop->ss); |
| |
| return se->ss->loop; |
| } |
| |
| |
| /* Inline implementation of the sum and product intrinsics. */ |
| static void |
| gfc_conv_intrinsic_arith (gfc_se * se, gfc_expr * expr, enum tree_code op, |
| bool norm2) |
| { |
| tree resvar; |
| tree scale = NULL_TREE; |
| tree type; |
| stmtblock_t body; |
| stmtblock_t block; |
| tree tmp; |
| gfc_loopinfo loop, *ploop; |
| gfc_actual_arglist *arg_array, *arg_mask; |
| gfc_ss *arrayss = NULL; |
| gfc_ss *maskss = NULL; |
| gfc_se arrayse; |
| gfc_se maskse; |
| gfc_se *parent_se; |
| gfc_expr *arrayexpr; |
| gfc_expr *maskexpr; |
| |
| if (expr->rank > 0) |
| { |
| gcc_assert (gfc_inline_intrinsic_function_p (expr)); |
| parent_se = se; |
| } |
| else |
| parent_se = NULL; |
| |
| type = gfc_typenode_for_spec (&expr->ts); |
| /* Initialize the result. */ |
| resvar = gfc_create_var (type, "val"); |
| if (norm2) |
| { |
| /* result = 0.0; |
| scale = 1.0. */ |
| scale = gfc_create_var (type, "scale"); |
| gfc_add_modify (&se->pre, scale, |
| gfc_build_const (type, integer_one_node)); |
| tmp = gfc_build_const (type, integer_zero_node); |
| } |
| else if (op == PLUS_EXPR || op == BIT_IOR_EXPR || op == BIT_XOR_EXPR) |
| tmp = gfc_build_const (type, integer_zero_node); |
| else if (op == NE_EXPR) |
| /* PARITY. */ |
| tmp = convert (type, boolean_false_node); |
| else if (op == BIT_AND_EXPR) |
| tmp = gfc_build_const (type, fold_build1_loc (input_location, NEGATE_EXPR, |
| type, integer_one_node)); |
| else |
| tmp = gfc_build_const (type, integer_one_node); |
| |
| gfc_add_modify (&se->pre, resvar, tmp); |
| |
| arg_array = expr->value.function.actual; |
| |
| arrayexpr = arg_array->expr; |
| |
| if (op == NE_EXPR || norm2) |
| /* PARITY and NORM2. */ |
| maskexpr = NULL; |
| else |
| { |
| arg_mask = arg_array->next->next; |
| gcc_assert (arg_mask != NULL); |
| maskexpr = arg_mask->expr; |
| } |
| |
| if (expr->rank == 0) |
| { |
| /* Walk the arguments. */ |
| arrayss = gfc_walk_expr (arrayexpr); |
| gcc_assert (arrayss != gfc_ss_terminator); |
| |
| if (maskexpr && maskexpr->rank > 0) |
| { |
| maskss = gfc_walk_expr (maskexpr); |
| gcc_assert (maskss != gfc_ss_terminator); |
| } |
| else |
| maskss = NULL; |
| |
| /* Initialize the scalarizer. */ |
| gfc_init_loopinfo (&loop); |
| gfc_add_ss_to_loop (&loop, arrayss); |
| if (maskexpr && maskexpr->rank > 0) |
| gfc_add_ss_to_loop (&loop, maskss); |
| |
| /* Initialize the loop. */ |
| gfc_conv_ss_startstride (&loop); |
| gfc_conv_loop_setup (&loop, &expr->where); |
| |
| gfc_mark_ss_chain_used (arrayss, 1); |
| if (maskexpr && maskexpr->rank > 0) |
| gfc_mark_ss_chain_used (maskss, 1); |
| |
| ploop = &loop; |
| } |
| else |
| /* All the work has been done in the parent loops. */ |
| ploop = enter_nested_loop (se); |
| |
| gcc_assert (ploop); |
| |
| /* Generate the loop body. */ |
| gfc_start_scalarized_body (ploop, &body); |
| |
| /* If we have a mask, only add this element if the mask is set. */ |
| if (maskexpr && maskexpr->rank > 0) |
| { |
| gfc_init_se (&maskse, parent_se); |
| gfc_copy_loopinfo_to_se (&maskse, ploop); |
| if (expr->rank == 0) |
| maskse.ss = maskss; |
| gfc_conv_expr_val (&maskse, maskexpr); |
| gfc_add_block_to_block (&body, &maskse.pre); |
| |
| gfc_start_block (&block); |
| } |
| else |
| gfc_init_block (&block); |
| |
| /* Do the actual summation/product. */ |
| gfc_init_se (&arrayse, parent_se); |
| gfc_copy_loopinfo_to_se (&arrayse, ploop); |
| if (expr->rank == 0) |
| arrayse.ss = arrayss; |
| gfc_conv_expr_val (&arrayse, arrayexpr); |
| gfc_add_block_to_block (&block, &arrayse.pre); |
| |
| if (norm2) |
| { |
| /* if (x(i) != 0.0) |
| { |
| absX = abs(x(i)) |
| if (absX > scale) |
| { |
| val = scale/absX; |
| result = 1.0 + result * val * val; |
| scale = absX; |
| } |
| else |
| { |
| val = absX/scale; |
| result += val * val; |
| } |
| } */ |
| tree res1, res2, cond, absX, val; |
| stmtblock_t ifblock1, ifblock2, ifblock3; |
| |
| gfc_init_block (&ifblock1); |
| |
| absX = gfc_create_var (type, "absX"); |
| gfc_add_modify (&ifblock1, absX, |
| fold_build1_loc (input_location, ABS_EXPR, type, |
| arrayse.expr)); |
| val = gfc_create_var (type, "val"); |
| gfc_add_expr_to_block (&ifblock1, val); |
| |
| gfc_init_block (&ifblock2); |
| gfc_add_modify (&ifblock2, val, |
| fold_build2_loc (input_location, RDIV_EXPR, type, scale, |
| absX)); |
| res1 = fold_build2_loc (input_location, MULT_EXPR, type, val, val); |
| res1 = fold_build2_loc (input_location, MULT_EXPR, type, resvar, res1); |
| res1 = fold_build2_loc (input_location, PLUS_EXPR, type, res1, |
| gfc_build_const (type, integer_one_node)); |
| gfc_add_modify (&ifblock2, resvar, res1); |
| gfc_add_modify (&ifblock2, scale, absX); |
| res1 = gfc_finish_block (&ifblock2); |
| |
| gfc_init_block (&ifblock3); |
| gfc_add_modify (&ifblock3, val, |
| fold_build2_loc (input_location, RDIV_EXPR, type, absX, |
| scale)); |
| res2 = fold_build2_loc (input_location, MULT_EXPR, type, val, val); |
| res2 = fold_build2_loc (input_location, PLUS_EXPR, type, resvar, res2); |
| gfc_add_modify (&ifblock3, resvar, res2); |
| res2 = gfc_finish_block (&ifblock3); |
| |
| cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, |
| absX, scale); |
| tmp = build3_v (COND_EXPR, cond, res1, res2); |
| gfc_add_expr_to_block (&ifblock1, tmp); |
| tmp = gfc_finish_block (&ifblock1); |
| |
| cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, |
| arrayse.expr, |
| gfc_build_const (type, integer_zero_node)); |
| |
| tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location)); |
| gfc_add_expr_to_block (&block, tmp); |
| } |
| else |
| { |
| tmp = fold_build2_loc (input_location, op, type, resvar, arrayse.expr); |
| gfc_add_modify (&block, resvar, tmp); |
| } |
| |
| gfc_add_block_to_block (&block, &arrayse.post); |
| |
| if (maskexpr && maskexpr->rank > 0) |
| { |
| /* We enclose the above in if (mask) {...} . */ |
| |
| tmp = gfc_finish_block (&block); |
| tmp = build3_v (COND_EXPR, maskse.expr, tmp, |
| build_empty_stmt (input_location)); |
| } |
| else |
| tmp = gfc_finish_block (&block); |
| gfc_add_expr_to_block (&body, tmp); |
| |
| gfc_trans_scalarizing_loops (ploop, &body); |
| |
| /* For a scalar mask, enclose the loop in an if statement. */ |
| if (maskexpr && maskexpr->rank == 0) |
| { |
| gfc_init_block (&block); |
| gfc_add_block_to_block (&block, &ploop->pre); |
| gfc_add_block_to_block (&block, &ploop->post); |
| tmp = gfc_finish_block (&block); |
| |
| if (expr->rank > 0) |
| { |
| tmp = build3_v (COND_EXPR, se->ss->info->data.scalar.value, tmp, |
| build_empty_stmt (input_location)); |
| gfc_advance_se_ss_chain (se); |
| } |
| else |
| { |
| gcc_assert (expr->rank == 0); |
| gfc_init_se (&maskse, NULL); |
| gfc_conv_expr_val (&maskse, maskexpr); |
| tmp = build3_v (COND_EXPR, maskse.expr, tmp, |
| build_empty_stmt (input_location)); |
| } |
| |
| gfc_add_expr_to_block (&block, tmp); |
| gfc_add_block_to_block (&se->pre, &block); |
| gcc_assert (se->post.head == NULL); |
| } |
| else |
| { |
| gfc_add_block_to_block (&se->pre, &ploop->pre); |
| gfc_add_block_to_block (&se->pre, &ploop->post); |
| } |
| |
| if (expr->rank == 0) |
| gfc_cleanup_loop (ploop); |
| |
| if (norm2) |
| { |
| /* result = scale * sqrt(result). */ |
| tree sqrt; |
| sqrt = gfc_builtin_decl_for_float_kind (BUILT_IN_SQRT, expr->ts.kind); |
| resvar = build_call_expr_loc (input_location, |
| sqrt, 1, resvar); |
| resvar = fold_build2_loc (input_location, MULT_EXPR, type, scale, resvar); |
| } |
| |
| se->expr = resvar; |
| } |
| |
| |
| /* Inline implementation of the dot_product intrinsic. This function |
| is based on gfc_conv_intrinsic_arith (the previous function). */ |
| static void |
| gfc_conv_intrinsic_dot_product (gfc_se * se, gfc_expr * expr) |
| { |
| tree resvar; |
| tree type; |
| stmtblock_t body; |
| stmtblock_t block; |
| tree tmp; |
| gfc_loopinfo loop; |
| gfc_actual_arglist *actual; |
| gfc_ss *arrayss1, *arrayss2; |
| gfc_se arrayse1, arrayse2; |
| gfc_expr *arrayexpr1, *arrayexpr2; |
| |
| type = gfc_typenode_for_spec (&expr->ts); |
| |
| /* Initialize the result. */ |
| resvar = gfc_create_var (type, "val"); |
| if (expr->ts.type == BT_LOGICAL) |
| tmp = build_int_cst (type, 0); |
| else |
| tmp = gfc_build_const (type, integer_zero_node); |
| |
| gfc_add_modify (&se->pre, resvar, tmp); |
| |
| /* Walk argument #1. */ |
| actual = expr->value.function.actual; |
| arrayexpr1 = actual->expr; |
| arrayss1 = gfc_walk_expr (arrayexpr1); |
| gcc_assert (arrayss1 != gfc_ss_terminator); |
| |
| /* Walk argument #2. */ |
| actual = actual->next; |
| arrayexpr2 = actual->expr; |
| arrayss2 = gfc_walk_expr (arrayexpr2); |
| gcc_assert (arrayss2 != gfc_ss_terminator); |
| |
| /* Initialize the scalarizer. */ |
| gfc_init_loopinfo (&loop); |
| gfc_add_ss_to_loop (&loop, arrayss1); |
| gfc_add_ss_to_loop (&loop, arrayss2); |
| |
| /* Initialize the loop. */ |
| gfc_conv_ss_startstride (&loop); |
| gfc_conv_loop_setup (&loop, &expr->where); |
| |
| gfc_mark_ss_chain_used (arrayss1, 1); |
| gfc_mark_ss_chain_used (arrayss2, 1); |
| |
| /* Generate the loop body. */ |
| gfc_start_scalarized_body (&loop, &body); |
| gfc_init_block (&block); |
| |
| /* Make the tree expression for [conjg(]array1[)]. */ |
| gfc_init_se (&arrayse1, NULL); |
| gfc_copy_loopinfo_to_se (&arrayse1, &loop); |
| arrayse1.ss = arrayss1; |
| gfc_conv_expr_val (&arrayse1, arrayexpr1); |
| if (expr->ts.type == BT_COMPLEX) |
| arrayse1.expr = fold_build1_loc (input_location, CONJ_EXPR, type, |
| arrayse1.expr); |
| gfc_add_block_to_block (&block, &arrayse1.pre); |
| |
| /* Make the tree expression for array2. */ |
| gfc_init_se (&arrayse2, NULL); |
| gfc_copy_loopinfo_to_se (&arrayse2, &loop); |
| arrayse2.ss = arrayss2; |
| gfc_conv_expr_val (&arrayse2, arrayexpr2); |
| gfc_add_block_to_block (&block, &arrayse2.pre); |
| |
| /* Do the actual product and sum. */ |
| if (expr->ts.type == BT_LOGICAL) |
| { |
| tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR, type, |
| arrayse1.expr, arrayse2.expr); |
| tmp = fold_build2_loc (input_location, TRUTH_OR_EXPR, type, resvar, tmp); |
| } |
| else |
| { |
| tmp = fold_build2_loc (input_location, MULT_EXPR, type, arrayse1.expr, |
| arrayse2.expr); |
| tmp = fold_build2_loc (input_location, PLUS_EXPR, type, resvar, tmp); |
| } |
| gfc_add_modify (&block, resvar, tmp); |
| |
| /* Finish up the loop block and the loop. */ |
| tmp = gfc_finish_block (&block); |
| gfc_add_expr_to_block (&body, tmp); |
| |
| gfc_trans_scalarizing_loops (&loop, &body); |
| gfc_add_block_to_block (&se->pre, &loop.pre); |
| gfc_add_block_to_block (&se->pre, &loop.post); |
| gfc_cleanup_loop (&loop); |
| |
| se->expr = resvar; |
| } |
| |
| |
| /* Emit code for minloc or maxloc intrinsic. There are many different cases |
| we need to handle. For performance reasons we sometimes create two |
| loops instead of one, where the second one is much simpler. |
| Examples for minloc intrinsic: |
| 1) Result is an array, a call is generated |
| 2) Array mask is used and NaNs need to be supported: |
| limit = Infinity; |
| pos = 0; |
| S = from; |
| while (S <= to) { |
| if (mask[S]) { |
| if (pos == 0) pos = S + (1 - from); |
| if (a[S] <= limit) { limit = a[S]; pos = S + (1 - from); goto lab1; } |
| } |
| S++; |
| } |
| goto lab2; |
| lab1:; |
| while (S <= to) { |
| if (mask[S]) if (a[S] < limit) { limit = a[S]; pos = S + (1 - from); } |
| S++; |
| } |
| lab2:; |
| 3) NaNs need to be supported, but it is known at compile time or cheaply |
| at runtime whether array is nonempty or not: |
| limit = Infinity; |
| pos = 0; |
| S = from; |
| while (S <= to) { |
| if (a[S] <= limit) { limit = a[S]; pos = S + (1 - from); goto lab1; } |
| S++; |
| } |
| if (from <= to) pos = 1; |
| goto lab2; |
| lab1:; |
| while (S <= to) { |
| if (a[S] < limit) { limit = a[S]; pos = S + (1 - from); } |
| S++; |
| } |
| lab2:; |
| 4) NaNs aren't supported, array mask is used: |
| limit = infinities_supported ? Infinity : huge (limit); |
| pos = 0; |
| S = from; |
| while (S <= to) { |
| if (mask[S]) { limit = a[S]; pos = S + (1 - from); goto lab1; } |
| S++; |
| } |
| goto lab2; |
| lab1:; |
| while (S <= to) { |
| if (mask[S]) if (a[S] < limit) { limit = a[S]; pos = S + (1 - from); } |
| S++; |
| } |
| lab2:; |
| 5) Same without array mask: |
| limit = infinities_supported ? Infinity : huge (limit); |
| pos = (from <= to) ? 1 : 0; |
| S = from; |
| while (S <= to) { |
| if (a[S] < limit) { limit = a[S]; pos = S + (1 - from); } |
| S++; |
| } |
| For 3) and 5), if mask is scalar, this all goes into a conditional, |
| setting pos = 0; in the else branch. */ |
| |
| static void |
| gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op) |
| { |
| stmtblock_t body; |
| stmtblock_t block; |
| stmtblock_t ifblock; |
| stmtblock_t elseblock; |
| tree limit; |
| tree type; |
| tree tmp; |
| tree cond; |
| tree elsetmp; |
| tree ifbody; |
| tree offset; |
| tree nonempty; |
| tree lab1, lab2; |
| gfc_loopinfo loop; |
| gfc_actual_arglist *actual; |
| gfc_ss *arrayss; |
| gfc_ss *maskss; |
| gfc_se arrayse; |
| gfc_se maskse; |
| gfc_expr *arrayexpr; |
| gfc_expr *maskexpr; |
| tree pos; |
| int n; |
| |
| if (se->ss) |
| { |
| gfc_conv_intrinsic_funcall (se, expr); |
| return; |
| } |
| |
| /* Initialize the result. */ |
| pos = gfc_create_var (gfc_array_index_type, "pos"); |
| offset = gfc_create_var (gfc_array_index_type, "offset"); |
| type = gfc_typenode_for_spec (&expr->ts); |
| |
| /* Walk the arguments. */ |
| actual = expr->value.function.actual; |
| arrayexpr = actual->expr; |
| arrayss = gfc_walk_expr (arrayexpr); |
| gcc_assert (arrayss != gfc_ss_terminator); |
| |
| actual = actual->next->next; |
| gcc_assert (actual); |
| maskexpr = actual->expr; |
| nonempty = NULL; |
| if (maskexpr && maskexpr->rank != 0) |
|