| /* Implementation of the MINLOC intrinsic |
| Copyright 2002, 2007, 2009 Free Software Foundation, Inc. |
| Contributed by Paul Brook <paul@nowt.org> |
| |
| This file is part of the GNU Fortran 95 runtime library (libgfortran). |
| |
| Libgfortran is free software; you can redistribute it and/or |
| modify it under the terms of the GNU General Public |
| License as published by the Free Software Foundation; either |
| version 3 of the License, or (at your option) any later version. |
| |
| Libgfortran is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| Under Section 7 of GPL version 3, you are granted additional |
| permissions described in the GCC Runtime Library Exception, version |
| 3.1, as published by the Free Software Foundation. |
| |
| You should have received a copy of the GNU General Public License and |
| a copy of the GCC Runtime Library Exception along with this program; |
| see the files COPYING3 and COPYING.RUNTIME respectively. If not, see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #include "libgfortran.h" |
| #include <stdlib.h> |
| #include <assert.h> |
| #include <limits.h> |
| |
| |
| #if defined (HAVE_GFC_REAL_4) && defined (HAVE_GFC_INTEGER_4) |
| |
| |
| extern void minloc1_4_r4 (gfc_array_i4 * const restrict, |
| gfc_array_r4 * const restrict, const index_type * const restrict); |
| export_proto(minloc1_4_r4); |
| |
| void |
| minloc1_4_r4 (gfc_array_i4 * const restrict retarray, |
| gfc_array_r4 * const restrict array, |
| const index_type * const restrict pdim) |
| { |
| index_type count[GFC_MAX_DIMENSIONS]; |
| index_type extent[GFC_MAX_DIMENSIONS]; |
| index_type sstride[GFC_MAX_DIMENSIONS]; |
| index_type dstride[GFC_MAX_DIMENSIONS]; |
| const GFC_REAL_4 * restrict base; |
| GFC_INTEGER_4 * restrict dest; |
| index_type rank; |
| index_type n; |
| index_type len; |
| index_type delta; |
| index_type dim; |
| int continue_loop; |
| |
| /* Make dim zero based to avoid confusion. */ |
| dim = (*pdim) - 1; |
| rank = GFC_DESCRIPTOR_RANK (array) - 1; |
| |
| len = array->dim[dim].ubound + 1 - array->dim[dim].lbound; |
| if (len < 0) |
| len = 0; |
| delta = array->dim[dim].stride; |
| |
| for (n = 0; n < dim; n++) |
| { |
| sstride[n] = array->dim[n].stride; |
| extent[n] = array->dim[n].ubound + 1 - array->dim[n].lbound; |
| |
| if (extent[n] < 0) |
| extent[n] = 0; |
| } |
| for (n = dim; n < rank; n++) |
| { |
| sstride[n] = array->dim[n + 1].stride; |
| extent[n] = |
| array->dim[n + 1].ubound + 1 - array->dim[n + 1].lbound; |
| |
| if (extent[n] < 0) |
| extent[n] = 0; |
| } |
| |
| if (retarray->data == NULL) |
| { |
| size_t alloc_size; |
| |
| for (n = 0; n < rank; n++) |
| { |
| retarray->dim[n].lbound = 0; |
| retarray->dim[n].ubound = extent[n]-1; |
| if (n == 0) |
| retarray->dim[n].stride = 1; |
| else |
| retarray->dim[n].stride = retarray->dim[n-1].stride * extent[n-1]; |
| } |
| |
| retarray->offset = 0; |
| retarray->dtype = (array->dtype & ~GFC_DTYPE_RANK_MASK) | rank; |
| |
| alloc_size = sizeof (GFC_INTEGER_4) * retarray->dim[rank-1].stride |
| * extent[rank-1]; |
| |
| if (alloc_size == 0) |
| { |
| /* Make sure we have a zero-sized array. */ |
| retarray->dim[0].lbound = 0; |
| retarray->dim[0].ubound = -1; |
| return; |
| } |
| else |
| retarray->data = internal_malloc_size (alloc_size); |
| } |
| else |
| { |
| if (rank != GFC_DESCRIPTOR_RANK (retarray)) |
| runtime_error ("rank of return array incorrect in" |
| " MINLOC intrinsic: is %ld, should be %ld", |
| (long int) (GFC_DESCRIPTOR_RANK (retarray)), |
| (long int) rank); |
| |
| if (unlikely (compile_options.bounds_check)) |
| { |
| for (n=0; n < rank; n++) |
| { |
| index_type ret_extent; |
| |
| ret_extent = retarray->dim[n].ubound + 1 |
| - retarray->dim[n].lbound; |
| if (extent[n] != ret_extent) |
| runtime_error ("Incorrect extent in return value of" |
| " MINLOC intrinsic in dimension %ld:" |
| " is %ld, should be %ld", (long int) n + 1, |
| (long int) ret_extent, (long int) extent[n]); |
| } |
| } |
| } |
| |
| for (n = 0; n < rank; n++) |
| { |
| count[n] = 0; |
| dstride[n] = retarray->dim[n].stride; |
| if (extent[n] <= 0) |
| len = 0; |
| } |
| |
| base = array->data; |
| dest = retarray->data; |
| |
| continue_loop = 1; |
| while (continue_loop) |
| { |
| const GFC_REAL_4 * restrict src; |
| GFC_INTEGER_4 result; |
| src = base; |
| { |
| |
| GFC_REAL_4 minval; |
| minval = GFC_REAL_4_HUGE; |
| result = 0; |
| if (len <= 0) |
| *dest = 0; |
| else |
| { |
| for (n = 0; n < len; n++, src += delta) |
| { |
| |
| if (*src < minval || !result) |
| { |
| minval = *src; |
| result = (GFC_INTEGER_4)n + 1; |
| } |
| } |
| *dest = result; |
| } |
| } |
| /* Advance to the next element. */ |
| count[0]++; |
| base += sstride[0]; |
| dest += dstride[0]; |
| n = 0; |
| while (count[n] == extent[n]) |
| { |
| /* When we get to the end of a dimension, reset it and increment |
| the next dimension. */ |
| count[n] = 0; |
| /* We could precalculate these products, but this is a less |
| frequently used path so probably not worth it. */ |
| base -= sstride[n] * extent[n]; |
| dest -= dstride[n] * extent[n]; |
| n++; |
| if (n == rank) |
| { |
| /* Break out of the look. */ |
| continue_loop = 0; |
| break; |
| } |
| else |
| { |
| count[n]++; |
| base += sstride[n]; |
| dest += dstride[n]; |
| } |
| } |
| } |
| } |
| |
| |
| extern void mminloc1_4_r4 (gfc_array_i4 * const restrict, |
| gfc_array_r4 * const restrict, const index_type * const restrict, |
| gfc_array_l1 * const restrict); |
| export_proto(mminloc1_4_r4); |
| |
| void |
| mminloc1_4_r4 (gfc_array_i4 * const restrict retarray, |
| gfc_array_r4 * const restrict array, |
| const index_type * const restrict pdim, |
| gfc_array_l1 * const restrict mask) |
| { |
| index_type count[GFC_MAX_DIMENSIONS]; |
| index_type extent[GFC_MAX_DIMENSIONS]; |
| index_type sstride[GFC_MAX_DIMENSIONS]; |
| index_type dstride[GFC_MAX_DIMENSIONS]; |
| index_type mstride[GFC_MAX_DIMENSIONS]; |
| GFC_INTEGER_4 * restrict dest; |
| const GFC_REAL_4 * restrict base; |
| const GFC_LOGICAL_1 * restrict mbase; |
| int rank; |
| int dim; |
| index_type n; |
| index_type len; |
| index_type delta; |
| index_type mdelta; |
| int mask_kind; |
| |
| dim = (*pdim) - 1; |
| rank = GFC_DESCRIPTOR_RANK (array) - 1; |
| |
| len = array->dim[dim].ubound + 1 - array->dim[dim].lbound; |
| if (len <= 0) |
| return; |
| |
| mbase = mask->data; |
| |
| mask_kind = GFC_DESCRIPTOR_SIZE (mask); |
| |
| if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8 |
| #ifdef HAVE_GFC_LOGICAL_16 |
| || mask_kind == 16 |
| #endif |
| ) |
| mbase = GFOR_POINTER_TO_L1 (mbase, mask_kind); |
| else |
| runtime_error ("Funny sized logical array"); |
| |
| delta = array->dim[dim].stride; |
| mdelta = mask->dim[dim].stride * mask_kind; |
| |
| for (n = 0; n < dim; n++) |
| { |
| sstride[n] = array->dim[n].stride; |
| mstride[n] = mask->dim[n].stride * mask_kind; |
| extent[n] = array->dim[n].ubound + 1 - array->dim[n].lbound; |
| |
| if (extent[n] < 0) |
| extent[n] = 0; |
| |
| } |
| for (n = dim; n < rank; n++) |
| { |
| sstride[n] = array->dim[n + 1].stride; |
| mstride[n] = mask->dim[n + 1].stride * mask_kind; |
| extent[n] = |
| array->dim[n + 1].ubound + 1 - array->dim[n + 1].lbound; |
| |
| if (extent[n] < 0) |
| extent[n] = 0; |
| } |
| |
| if (retarray->data == NULL) |
| { |
| size_t alloc_size; |
| |
| for (n = 0; n < rank; n++) |
| { |
| retarray->dim[n].lbound = 0; |
| retarray->dim[n].ubound = extent[n]-1; |
| if (n == 0) |
| retarray->dim[n].stride = 1; |
| else |
| retarray->dim[n].stride = retarray->dim[n-1].stride * extent[n-1]; |
| } |
| |
| alloc_size = sizeof (GFC_INTEGER_4) * retarray->dim[rank-1].stride |
| * extent[rank-1]; |
| |
| retarray->offset = 0; |
| retarray->dtype = (array->dtype & ~GFC_DTYPE_RANK_MASK) | rank; |
| |
| if (alloc_size == 0) |
| { |
| /* Make sure we have a zero-sized array. */ |
| retarray->dim[0].lbound = 0; |
| retarray->dim[0].ubound = -1; |
| return; |
| } |
| else |
| retarray->data = internal_malloc_size (alloc_size); |
| |
| } |
| else |
| { |
| if (rank != GFC_DESCRIPTOR_RANK (retarray)) |
| runtime_error ("rank of return array incorrect in MINLOC intrinsic"); |
| |
| if (unlikely (compile_options.bounds_check)) |
| { |
| for (n=0; n < rank; n++) |
| { |
| index_type ret_extent; |
| |
| ret_extent = retarray->dim[n].ubound + 1 |
| - retarray->dim[n].lbound; |
| if (extent[n] != ret_extent) |
| runtime_error ("Incorrect extent in return value of" |
| " MINLOC intrinsic in dimension %ld:" |
| " is %ld, should be %ld", (long int) n + 1, |
| (long int) ret_extent, (long int) extent[n]); |
| } |
| for (n=0; n<= rank; n++) |
| { |
| index_type mask_extent, array_extent; |
| |
| array_extent = array->dim[n].ubound + 1 - array->dim[n].lbound; |
| mask_extent = mask->dim[n].ubound + 1 - mask->dim[n].lbound; |
| if (array_extent != mask_extent) |
| runtime_error ("Incorrect extent in MASK argument of" |
| " MINLOC intrinsic in dimension %ld:" |
| " is %ld, should be %ld", (long int) n + 1, |
| (long int) mask_extent, (long int) array_extent); |
| } |
| } |
| } |
| |
| for (n = 0; n < rank; n++) |
| { |
| count[n] = 0; |
| dstride[n] = retarray->dim[n].stride; |
| if (extent[n] <= 0) |
| return; |
| } |
| |
| dest = retarray->data; |
| base = array->data; |
| |
| while (base) |
| { |
| const GFC_REAL_4 * restrict src; |
| const GFC_LOGICAL_1 * restrict msrc; |
| GFC_INTEGER_4 result; |
| src = base; |
| msrc = mbase; |
| { |
| |
| GFC_REAL_4 minval; |
| minval = GFC_REAL_4_HUGE; |
| result = 0; |
| if (len <= 0) |
| *dest = 0; |
| else |
| { |
| for (n = 0; n < len; n++, src += delta, msrc += mdelta) |
| { |
| |
| if (*msrc && (*src < minval || !result)) |
| { |
| minval = *src; |
| result = (GFC_INTEGER_4)n + 1; |
| } |
| } |
| *dest = result; |
| } |
| } |
| /* Advance to the next element. */ |
| count[0]++; |
| base += sstride[0]; |
| mbase += mstride[0]; |
| dest += dstride[0]; |
| n = 0; |
| while (count[n] == extent[n]) |
| { |
| /* When we get to the end of a dimension, reset it and increment |
| the next dimension. */ |
| count[n] = 0; |
| /* We could precalculate these products, but this is a less |
| frequently used path so probably not worth it. */ |
| base -= sstride[n] * extent[n]; |
| mbase -= mstride[n] * extent[n]; |
| dest -= dstride[n] * extent[n]; |
| n++; |
| if (n == rank) |
| { |
| /* Break out of the look. */ |
| base = NULL; |
| break; |
| } |
| else |
| { |
| count[n]++; |
| base += sstride[n]; |
| mbase += mstride[n]; |
| dest += dstride[n]; |
| } |
| } |
| } |
| } |
| |
| |
| extern void sminloc1_4_r4 (gfc_array_i4 * const restrict, |
| gfc_array_r4 * const restrict, const index_type * const restrict, |
| GFC_LOGICAL_4 *); |
| export_proto(sminloc1_4_r4); |
| |
| void |
| sminloc1_4_r4 (gfc_array_i4 * const restrict retarray, |
| gfc_array_r4 * const restrict array, |
| const index_type * const restrict pdim, |
| GFC_LOGICAL_4 * mask) |
| { |
| index_type count[GFC_MAX_DIMENSIONS]; |
| index_type extent[GFC_MAX_DIMENSIONS]; |
| index_type sstride[GFC_MAX_DIMENSIONS]; |
| index_type dstride[GFC_MAX_DIMENSIONS]; |
| GFC_INTEGER_4 * restrict dest; |
| index_type rank; |
| index_type n; |
| index_type dim; |
| |
| |
| if (*mask) |
| { |
| minloc1_4_r4 (retarray, array, pdim); |
| return; |
| } |
| /* Make dim zero based to avoid confusion. */ |
| dim = (*pdim) - 1; |
| rank = GFC_DESCRIPTOR_RANK (array) - 1; |
| |
| for (n = 0; n < dim; n++) |
| { |
| sstride[n] = array->dim[n].stride; |
| extent[n] = array->dim[n].ubound + 1 - array->dim[n].lbound; |
| |
| if (extent[n] <= 0) |
| extent[n] = 0; |
| } |
| |
| for (n = dim; n < rank; n++) |
| { |
| sstride[n] = array->dim[n + 1].stride; |
| extent[n] = |
| array->dim[n + 1].ubound + 1 - array->dim[n + 1].lbound; |
| |
| if (extent[n] <= 0) |
| extent[n] = 0; |
| } |
| |
| if (retarray->data == NULL) |
| { |
| size_t alloc_size; |
| |
| for (n = 0; n < rank; n++) |
| { |
| retarray->dim[n].lbound = 0; |
| retarray->dim[n].ubound = extent[n]-1; |
| if (n == 0) |
| retarray->dim[n].stride = 1; |
| else |
| retarray->dim[n].stride = retarray->dim[n-1].stride * extent[n-1]; |
| } |
| |
| retarray->offset = 0; |
| retarray->dtype = (array->dtype & ~GFC_DTYPE_RANK_MASK) | rank; |
| |
| alloc_size = sizeof (GFC_INTEGER_4) * retarray->dim[rank-1].stride |
| * extent[rank-1]; |
| |
| if (alloc_size == 0) |
| { |
| /* Make sure we have a zero-sized array. */ |
| retarray->dim[0].lbound = 0; |
| retarray->dim[0].ubound = -1; |
| return; |
| } |
| else |
| retarray->data = internal_malloc_size (alloc_size); |
| } |
| else |
| { |
| if (rank != GFC_DESCRIPTOR_RANK (retarray)) |
| runtime_error ("rank of return array incorrect in" |
| " MINLOC intrinsic: is %ld, should be %ld", |
| (long int) (GFC_DESCRIPTOR_RANK (retarray)), |
| (long int) rank); |
| |
| if (unlikely (compile_options.bounds_check)) |
| { |
| for (n=0; n < rank; n++) |
| { |
| index_type ret_extent; |
| |
| ret_extent = retarray->dim[n].ubound + 1 |
| - retarray->dim[n].lbound; |
| if (extent[n] != ret_extent) |
| runtime_error ("Incorrect extent in return value of" |
| " MINLOC intrinsic in dimension %ld:" |
| " is %ld, should be %ld", (long int) n + 1, |
| (long int) ret_extent, (long int) extent[n]); |
| } |
| } |
| } |
| |
| for (n = 0; n < rank; n++) |
| { |
| count[n] = 0; |
| dstride[n] = retarray->dim[n].stride; |
| } |
| |
| dest = retarray->data; |
| |
| while(1) |
| { |
| *dest = 0; |
| count[0]++; |
| dest += dstride[0]; |
| n = 0; |
| while (count[n] == extent[n]) |
| { |
| /* When we get to the end of a dimension, reset it and increment |
| the next dimension. */ |
| count[n] = 0; |
| /* We could precalculate these products, but this is a less |
| frequently used path so probably not worth it. */ |
| dest -= dstride[n] * extent[n]; |
| n++; |
| if (n == rank) |
| return; |
| else |
| { |
| count[n]++; |
| dest += dstride[n]; |
| } |
| } |
| } |
| } |
| |
| #endif |