| /* ARM NEON intrinsics include file. |
| |
| Copyright (C) 2011-2021 Free Software Foundation, Inc. |
| Contributed by ARM Ltd. |
| |
| This file is part of GCC. |
| |
| GCC is free software; you can redistribute it and/or modify it |
| under the terms of the GNU General Public License as published |
| by the Free Software Foundation; either version 3, or (at your |
| option) any later version. |
| |
| GCC is distributed in the hope that it will be useful, but WITHOUT |
| ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
| or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public |
| License for more details. |
| |
| Under Section 7 of GPL version 3, you are granted additional |
| permissions described in the GCC Runtime Library Exception, version |
| 3.1, as published by the Free Software Foundation. |
| |
| You should have received a copy of the GNU General Public License and |
| a copy of the GCC Runtime Library Exception along with this program; |
| see the files COPYING3 and COPYING.RUNTIME respectively. If not, see |
| <http://www.gnu.org/licenses/>. */ |
| |
| #ifndef _AARCH64_NEON_H_ |
| #define _AARCH64_NEON_H_ |
| |
| #pragma GCC push_options |
| #pragma GCC target ("+nothing+simd") |
| |
| #include <stdint.h> |
| |
| #define __AARCH64_UINT64_C(__C) ((uint64_t) __C) |
| #define __AARCH64_INT64_C(__C) ((int64_t) __C) |
| |
| typedef __Int8x8_t int8x8_t; |
| typedef __Int16x4_t int16x4_t; |
| typedef __Int32x2_t int32x2_t; |
| typedef __Int64x1_t int64x1_t; |
| typedef __Float16x4_t float16x4_t; |
| typedef __Float32x2_t float32x2_t; |
| typedef __Poly8x8_t poly8x8_t; |
| typedef __Poly16x4_t poly16x4_t; |
| typedef __Uint8x8_t uint8x8_t; |
| typedef __Uint16x4_t uint16x4_t; |
| typedef __Uint32x2_t uint32x2_t; |
| typedef __Float64x1_t float64x1_t; |
| typedef __Uint64x1_t uint64x1_t; |
| typedef __Int8x16_t int8x16_t; |
| typedef __Int16x8_t int16x8_t; |
| typedef __Int32x4_t int32x4_t; |
| typedef __Int64x2_t int64x2_t; |
| typedef __Float16x8_t float16x8_t; |
| typedef __Float32x4_t float32x4_t; |
| typedef __Float64x2_t float64x2_t; |
| typedef __Poly8x16_t poly8x16_t; |
| typedef __Poly16x8_t poly16x8_t; |
| typedef __Poly64x2_t poly64x2_t; |
| typedef __Poly64x1_t poly64x1_t; |
| typedef __Uint8x16_t uint8x16_t; |
| typedef __Uint16x8_t uint16x8_t; |
| typedef __Uint32x4_t uint32x4_t; |
| typedef __Uint64x2_t uint64x2_t; |
| |
| typedef __Poly8_t poly8_t; |
| typedef __Poly16_t poly16_t; |
| typedef __Poly64_t poly64_t; |
| typedef __Poly128_t poly128_t; |
| |
| typedef __fp16 float16_t; |
| typedef float float32_t; |
| typedef double float64_t; |
| |
| typedef __Bfloat16x4_t bfloat16x4_t; |
| typedef __Bfloat16x8_t bfloat16x8_t; |
| |
| typedef struct bfloat16x4x2_t |
| { |
| bfloat16x4_t val[2]; |
| } bfloat16x4x2_t; |
| |
| typedef struct bfloat16x8x2_t |
| { |
| bfloat16x8_t val[2]; |
| } bfloat16x8x2_t; |
| |
| typedef struct bfloat16x4x3_t |
| { |
| bfloat16x4_t val[3]; |
| } bfloat16x4x3_t; |
| |
| typedef struct bfloat16x8x3_t |
| { |
| bfloat16x8_t val[3]; |
| } bfloat16x8x3_t; |
| |
| typedef struct bfloat16x4x4_t |
| { |
| bfloat16x4_t val[4]; |
| } bfloat16x4x4_t; |
| |
| typedef struct bfloat16x8x4_t |
| { |
| bfloat16x8_t val[4]; |
| } bfloat16x8x4_t; |
| |
| typedef struct int8x8x2_t |
| { |
| int8x8_t val[2]; |
| } int8x8x2_t; |
| |
| typedef struct int8x16x2_t |
| { |
| int8x16_t val[2]; |
| } int8x16x2_t; |
| |
| typedef struct int16x4x2_t |
| { |
| int16x4_t val[2]; |
| } int16x4x2_t; |
| |
| typedef struct int16x8x2_t |
| { |
| int16x8_t val[2]; |
| } int16x8x2_t; |
| |
| typedef struct int32x2x2_t |
| { |
| int32x2_t val[2]; |
| } int32x2x2_t; |
| |
| typedef struct int32x4x2_t |
| { |
| int32x4_t val[2]; |
| } int32x4x2_t; |
| |
| typedef struct int64x1x2_t |
| { |
| int64x1_t val[2]; |
| } int64x1x2_t; |
| |
| typedef struct int64x2x2_t |
| { |
| int64x2_t val[2]; |
| } int64x2x2_t; |
| |
| typedef struct uint8x8x2_t |
| { |
| uint8x8_t val[2]; |
| } uint8x8x2_t; |
| |
| typedef struct uint8x16x2_t |
| { |
| uint8x16_t val[2]; |
| } uint8x16x2_t; |
| |
| typedef struct uint16x4x2_t |
| { |
| uint16x4_t val[2]; |
| } uint16x4x2_t; |
| |
| typedef struct uint16x8x2_t |
| { |
| uint16x8_t val[2]; |
| } uint16x8x2_t; |
| |
| typedef struct uint32x2x2_t |
| { |
| uint32x2_t val[2]; |
| } uint32x2x2_t; |
| |
| typedef struct uint32x4x2_t |
| { |
| uint32x4_t val[2]; |
| } uint32x4x2_t; |
| |
| typedef struct uint64x1x2_t |
| { |
| uint64x1_t val[2]; |
| } uint64x1x2_t; |
| |
| typedef struct uint64x2x2_t |
| { |
| uint64x2_t val[2]; |
| } uint64x2x2_t; |
| |
| typedef struct float16x4x2_t |
| { |
| float16x4_t val[2]; |
| } float16x4x2_t; |
| |
| typedef struct float16x8x2_t |
| { |
| float16x8_t val[2]; |
| } float16x8x2_t; |
| |
| typedef struct float32x2x2_t |
| { |
| float32x2_t val[2]; |
| } float32x2x2_t; |
| |
| typedef struct float32x4x2_t |
| { |
| float32x4_t val[2]; |
| } float32x4x2_t; |
| |
| typedef struct float64x2x2_t |
| { |
| float64x2_t val[2]; |
| } float64x2x2_t; |
| |
| typedef struct float64x1x2_t |
| { |
| float64x1_t val[2]; |
| } float64x1x2_t; |
| |
| typedef struct poly8x8x2_t |
| { |
| poly8x8_t val[2]; |
| } poly8x8x2_t; |
| |
| typedef struct poly8x16x2_t |
| { |
| poly8x16_t val[2]; |
| } poly8x16x2_t; |
| |
| typedef struct poly16x4x2_t |
| { |
| poly16x4_t val[2]; |
| } poly16x4x2_t; |
| |
| typedef struct poly16x8x2_t |
| { |
| poly16x8_t val[2]; |
| } poly16x8x2_t; |
| |
| typedef struct poly64x1x2_t |
| { |
| poly64x1_t val[2]; |
| } poly64x1x2_t; |
| |
| typedef struct poly64x1x3_t |
| { |
| poly64x1_t val[3]; |
| } poly64x1x3_t; |
| |
| typedef struct poly64x1x4_t |
| { |
| poly64x1_t val[4]; |
| } poly64x1x4_t; |
| |
| typedef struct poly64x2x2_t |
| { |
| poly64x2_t val[2]; |
| } poly64x2x2_t; |
| |
| typedef struct poly64x2x3_t |
| { |
| poly64x2_t val[3]; |
| } poly64x2x3_t; |
| |
| typedef struct poly64x2x4_t |
| { |
| poly64x2_t val[4]; |
| } poly64x2x4_t; |
| |
| typedef struct int8x8x3_t |
| { |
| int8x8_t val[3]; |
| } int8x8x3_t; |
| |
| typedef struct int8x16x3_t |
| { |
| int8x16_t val[3]; |
| } int8x16x3_t; |
| |
| typedef struct int16x4x3_t |
| { |
| int16x4_t val[3]; |
| } int16x4x3_t; |
| |
| typedef struct int16x8x3_t |
| { |
| int16x8_t val[3]; |
| } int16x8x3_t; |
| |
| typedef struct int32x2x3_t |
| { |
| int32x2_t val[3]; |
| } int32x2x3_t; |
| |
| typedef struct int32x4x3_t |
| { |
| int32x4_t val[3]; |
| } int32x4x3_t; |
| |
| typedef struct int64x1x3_t |
| { |
| int64x1_t val[3]; |
| } int64x1x3_t; |
| |
| typedef struct int64x2x3_t |
| { |
| int64x2_t val[3]; |
| } int64x2x3_t; |
| |
| typedef struct uint8x8x3_t |
| { |
| uint8x8_t val[3]; |
| } uint8x8x3_t; |
| |
| typedef struct uint8x16x3_t |
| { |
| uint8x16_t val[3]; |
| } uint8x16x3_t; |
| |
| typedef struct uint16x4x3_t |
| { |
| uint16x4_t val[3]; |
| } uint16x4x3_t; |
| |
| typedef struct uint16x8x3_t |
| { |
| uint16x8_t val[3]; |
| } uint16x8x3_t; |
| |
| typedef struct uint32x2x3_t |
| { |
| uint32x2_t val[3]; |
| } uint32x2x3_t; |
| |
| typedef struct uint32x4x3_t |
| { |
| uint32x4_t val[3]; |
| } uint32x4x3_t; |
| |
| typedef struct uint64x1x3_t |
| { |
| uint64x1_t val[3]; |
| } uint64x1x3_t; |
| |
| typedef struct uint64x2x3_t |
| { |
| uint64x2_t val[3]; |
| } uint64x2x3_t; |
| |
| typedef struct float16x4x3_t |
| { |
| float16x4_t val[3]; |
| } float16x4x3_t; |
| |
| typedef struct float16x8x3_t |
| { |
| float16x8_t val[3]; |
| } float16x8x3_t; |
| |
| typedef struct float32x2x3_t |
| { |
| float32x2_t val[3]; |
| } float32x2x3_t; |
| |
| typedef struct float32x4x3_t |
| { |
| float32x4_t val[3]; |
| } float32x4x3_t; |
| |
| typedef struct float64x2x3_t |
| { |
| float64x2_t val[3]; |
| } float64x2x3_t; |
| |
| typedef struct float64x1x3_t |
| { |
| float64x1_t val[3]; |
| } float64x1x3_t; |
| |
| typedef struct poly8x8x3_t |
| { |
| poly8x8_t val[3]; |
| } poly8x8x3_t; |
| |
| typedef struct poly8x16x3_t |
| { |
| poly8x16_t val[3]; |
| } poly8x16x3_t; |
| |
| typedef struct poly16x4x3_t |
| { |
| poly16x4_t val[3]; |
| } poly16x4x3_t; |
| |
| typedef struct poly16x8x3_t |
| { |
| poly16x8_t val[3]; |
| } poly16x8x3_t; |
| |
| typedef struct int8x8x4_t |
| { |
| int8x8_t val[4]; |
| } int8x8x4_t; |
| |
| typedef struct int8x16x4_t |
| { |
| int8x16_t val[4]; |
| } int8x16x4_t; |
| |
| typedef struct int16x4x4_t |
| { |
| int16x4_t val[4]; |
| } int16x4x4_t; |
| |
| typedef struct int16x8x4_t |
| { |
| int16x8_t val[4]; |
| } int16x8x4_t; |
| |
| typedef struct int32x2x4_t |
| { |
| int32x2_t val[4]; |
| } int32x2x4_t; |
| |
| typedef struct int32x4x4_t |
| { |
| int32x4_t val[4]; |
| } int32x4x4_t; |
| |
| typedef struct int64x1x4_t |
| { |
| int64x1_t val[4]; |
| } int64x1x4_t; |
| |
| typedef struct int64x2x4_t |
| { |
| int64x2_t val[4]; |
| } int64x2x4_t; |
| |
| typedef struct uint8x8x4_t |
| { |
| uint8x8_t val[4]; |
| } uint8x8x4_t; |
| |
| typedef struct uint8x16x4_t |
| { |
| uint8x16_t val[4]; |
| } uint8x16x4_t; |
| |
| typedef struct uint16x4x4_t |
| { |
| uint16x4_t val[4]; |
| } uint16x4x4_t; |
| |
| typedef struct uint16x8x4_t |
| { |
| uint16x8_t val[4]; |
| } uint16x8x4_t; |
| |
| typedef struct uint32x2x4_t |
| { |
| uint32x2_t val[4]; |
| } uint32x2x4_t; |
| |
| typedef struct uint32x4x4_t |
| { |
| uint32x4_t val[4]; |
| } uint32x4x4_t; |
| |
| typedef struct uint64x1x4_t |
| { |
| uint64x1_t val[4]; |
| } uint64x1x4_t; |
| |
| typedef struct uint64x2x4_t |
| { |
| uint64x2_t val[4]; |
| } uint64x2x4_t; |
| |
| typedef struct float16x4x4_t |
| { |
| float16x4_t val[4]; |
| } float16x4x4_t; |
| |
| typedef struct float16x8x4_t |
| { |
| float16x8_t val[4]; |
| } float16x8x4_t; |
| |
| typedef struct float32x2x4_t |
| { |
| float32x2_t val[4]; |
| } float32x2x4_t; |
| |
| typedef struct float32x4x4_t |
| { |
| float32x4_t val[4]; |
| } float32x4x4_t; |
| |
| typedef struct float64x2x4_t |
| { |
| float64x2_t val[4]; |
| } float64x2x4_t; |
| |
| typedef struct float64x1x4_t |
| { |
| float64x1_t val[4]; |
| } float64x1x4_t; |
| |
| typedef struct poly8x8x4_t |
| { |
| poly8x8_t val[4]; |
| } poly8x8x4_t; |
| |
| typedef struct poly8x16x4_t |
| { |
| poly8x16_t val[4]; |
| } poly8x16x4_t; |
| |
| typedef struct poly16x4x4_t |
| { |
| poly16x4_t val[4]; |
| } poly16x4x4_t; |
| |
| typedef struct poly16x8x4_t |
| { |
| poly16x8_t val[4]; |
| } poly16x8x4_t; |
| |
| /* __aarch64_vdup_lane internal macros. */ |
| #define __aarch64_vdup_lane_any(__size, __q, __a, __b) \ |
| vdup##__q##_n_##__size (__aarch64_vget_lane_any (__a, __b)) |
| |
| #define __aarch64_vdup_lane_f16(__a, __b) \ |
| __aarch64_vdup_lane_any (f16, , __a, __b) |
| #define __aarch64_vdup_lane_f32(__a, __b) \ |
| __aarch64_vdup_lane_any (f32, , __a, __b) |
| #define __aarch64_vdup_lane_f64(__a, __b) \ |
| __aarch64_vdup_lane_any (f64, , __a, __b) |
| #define __aarch64_vdup_lane_p8(__a, __b) \ |
| __aarch64_vdup_lane_any (p8, , __a, __b) |
| #define __aarch64_vdup_lane_p16(__a, __b) \ |
| __aarch64_vdup_lane_any (p16, , __a, __b) |
| #define __aarch64_vdup_lane_p64(__a, __b) \ |
| __aarch64_vdup_lane_any (p64, , __a, __b) |
| #define __aarch64_vdup_lane_s8(__a, __b) \ |
| __aarch64_vdup_lane_any (s8, , __a, __b) |
| #define __aarch64_vdup_lane_s16(__a, __b) \ |
| __aarch64_vdup_lane_any (s16, , __a, __b) |
| #define __aarch64_vdup_lane_s32(__a, __b) \ |
| __aarch64_vdup_lane_any (s32, , __a, __b) |
| #define __aarch64_vdup_lane_s64(__a, __b) \ |
| __aarch64_vdup_lane_any (s64, , __a, __b) |
| #define __aarch64_vdup_lane_u8(__a, __b) \ |
| __aarch64_vdup_lane_any (u8, , __a, __b) |
| #define __aarch64_vdup_lane_u16(__a, __b) \ |
| __aarch64_vdup_lane_any (u16, , __a, __b) |
| #define __aarch64_vdup_lane_u32(__a, __b) \ |
| __aarch64_vdup_lane_any (u32, , __a, __b) |
| #define __aarch64_vdup_lane_u64(__a, __b) \ |
| __aarch64_vdup_lane_any (u64, , __a, __b) |
| |
| /* __aarch64_vdup_laneq internal macros. */ |
| #define __aarch64_vdup_laneq_f16(__a, __b) \ |
| __aarch64_vdup_lane_any (f16, , __a, __b) |
| #define __aarch64_vdup_laneq_f32(__a, __b) \ |
| __aarch64_vdup_lane_any (f32, , __a, __b) |
| #define __aarch64_vdup_laneq_f64(__a, __b) \ |
| __aarch64_vdup_lane_any (f64, , __a, __b) |
| #define __aarch64_vdup_laneq_p8(__a, __b) \ |
| __aarch64_vdup_lane_any (p8, , __a, __b) |
| #define __aarch64_vdup_laneq_p16(__a, __b) \ |
| __aarch64_vdup_lane_any (p16, , __a, __b) |
| #define __aarch64_vdup_laneq_p64(__a, __b) \ |
| __aarch64_vdup_lane_any (p64, , __a, __b) |
| #define __aarch64_vdup_laneq_s8(__a, __b) \ |
| __aarch64_vdup_lane_any (s8, , __a, __b) |
| #define __aarch64_vdup_laneq_s16(__a, __b) \ |
| __aarch64_vdup_lane_any (s16, , __a, __b) |
| #define __aarch64_vdup_laneq_s32(__a, __b) \ |
| __aarch64_vdup_lane_any (s32, , __a, __b) |
| #define __aarch64_vdup_laneq_s64(__a, __b) \ |
| __aarch64_vdup_lane_any (s64, , __a, __b) |
| #define __aarch64_vdup_laneq_u8(__a, __b) \ |
| __aarch64_vdup_lane_any (u8, , __a, __b) |
| #define __aarch64_vdup_laneq_u16(__a, __b) \ |
| __aarch64_vdup_lane_any (u16, , __a, __b) |
| #define __aarch64_vdup_laneq_u32(__a, __b) \ |
| __aarch64_vdup_lane_any (u32, , __a, __b) |
| #define __aarch64_vdup_laneq_u64(__a, __b) \ |
| __aarch64_vdup_lane_any (u64, , __a, __b) |
| |
| /* __aarch64_vdupq_lane internal macros. */ |
| #define __aarch64_vdupq_lane_f16(__a, __b) \ |
| __aarch64_vdup_lane_any (f16, q, __a, __b) |
| #define __aarch64_vdupq_lane_f32(__a, __b) \ |
| __aarch64_vdup_lane_any (f32, q, __a, __b) |
| #define __aarch64_vdupq_lane_f64(__a, __b) \ |
| __aarch64_vdup_lane_any (f64, q, __a, __b) |
| #define __aarch64_vdupq_lane_p8(__a, __b) \ |
| __aarch64_vdup_lane_any (p8, q, __a, __b) |
| #define __aarch64_vdupq_lane_p16(__a, __b) \ |
| __aarch64_vdup_lane_any (p16, q, __a, __b) |
| #define __aarch64_vdupq_lane_p64(__a, __b) \ |
| __aarch64_vdup_lane_any (p64, q, __a, __b) |
| #define __aarch64_vdupq_lane_s8(__a, __b) \ |
| __aarch64_vdup_lane_any (s8, q, __a, __b) |
| #define __aarch64_vdupq_lane_s16(__a, __b) \ |
| __aarch64_vdup_lane_any (s16, q, __a, __b) |
| #define __aarch64_vdupq_lane_s32(__a, __b) \ |
| __aarch64_vdup_lane_any (s32, q, __a, __b) |
| #define __aarch64_vdupq_lane_s64(__a, __b) \ |
| __aarch64_vdup_lane_any (s64, q, __a, __b) |
| #define __aarch64_vdupq_lane_u8(__a, __b) \ |
| __aarch64_vdup_lane_any (u8, q, __a, __b) |
| #define __aarch64_vdupq_lane_u16(__a, __b) \ |
| __aarch64_vdup_lane_any (u16, q, __a, __b) |
| #define __aarch64_vdupq_lane_u32(__a, __b) \ |
| __aarch64_vdup_lane_any (u32, q, __a, __b) |
| #define __aarch64_vdupq_lane_u64(__a, __b) \ |
| __aarch64_vdup_lane_any (u64, q, __a, __b) |
| |
| /* __aarch64_vdupq_laneq internal macros. */ |
| #define __aarch64_vdupq_laneq_f16(__a, __b) \ |
| __aarch64_vdup_lane_any (f16, q, __a, __b) |
| #define __aarch64_vdupq_laneq_f32(__a, __b) \ |
| __aarch64_vdup_lane_any (f32, q, __a, __b) |
| #define __aarch64_vdupq_laneq_f64(__a, __b) \ |
| __aarch64_vdup_lane_any (f64, q, __a, __b) |
| #define __aarch64_vdupq_laneq_p8(__a, __b) \ |
| __aarch64_vdup_lane_any (p8, q, __a, __b) |
| #define __aarch64_vdupq_laneq_p16(__a, __b) \ |
| __aarch64_vdup_lane_any (p16, q, __a, __b) |
| #define __aarch64_vdupq_laneq_p64(__a, __b) \ |
| __aarch64_vdup_lane_any (p64, q, __a, __b) |
| #define __aarch64_vdupq_laneq_s8(__a, __b) \ |
| __aarch64_vdup_lane_any (s8, q, __a, __b) |
| #define __aarch64_vdupq_laneq_s16(__a, __b) \ |
| __aarch64_vdup_lane_any (s16, q, __a, __b) |
| #define __aarch64_vdupq_laneq_s32(__a, __b) \ |
| __aarch64_vdup_lane_any (s32, q, __a, __b) |
| #define __aarch64_vdupq_laneq_s64(__a, __b) \ |
| __aarch64_vdup_lane_any (s64, q, __a, __b) |
| #define __aarch64_vdupq_laneq_u8(__a, __b) \ |
| __aarch64_vdup_lane_any (u8, q, __a, __b) |
| #define __aarch64_vdupq_laneq_u16(__a, __b) \ |
| __aarch64_vdup_lane_any (u16, q, __a, __b) |
| #define __aarch64_vdupq_laneq_u32(__a, __b) \ |
| __aarch64_vdup_lane_any (u32, q, __a, __b) |
| #define __aarch64_vdupq_laneq_u64(__a, __b) \ |
| __aarch64_vdup_lane_any (u64, q, __a, __b) |
| |
| /* Internal macro for lane indices. */ |
| |
| #define __AARCH64_NUM_LANES(__v) (sizeof (__v) / sizeof (__v[0])) |
| #define __AARCH64_LANE_CHECK(__vec, __idx) \ |
| __builtin_aarch64_im_lane_boundsi (sizeof(__vec), sizeof(__vec[0]), __idx) |
| |
| /* For big-endian, GCC's vector indices are the opposite way around |
| to the architectural lane indices used by Neon intrinsics. */ |
| #ifdef __AARCH64EB__ |
| #define __aarch64_lane(__vec, __idx) (__AARCH64_NUM_LANES (__vec) - 1 - __idx) |
| #else |
| #define __aarch64_lane(__vec, __idx) __idx |
| #endif |
| |
| /* vget_lane internal macro. */ |
| #define __aarch64_vget_lane_any(__vec, __index) \ |
| __extension__ \ |
| ({ \ |
| __AARCH64_LANE_CHECK (__vec, __index); \ |
| __vec[__aarch64_lane (__vec, __index)]; \ |
| }) |
| |
| /* vset_lane and vld1_lane internal macro. */ |
| #define __aarch64_vset_lane_any(__elem, __vec, __index) \ |
| __extension__ \ |
| ({ \ |
| __AARCH64_LANE_CHECK (__vec, __index); \ |
| __vec[__aarch64_lane (__vec, __index)] = __elem; \ |
| __vec; \ |
| }) |
| |
| /* vadd */ |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vadd_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vadd_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vadd_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vadd_f32 (float32x2_t __a, float32x2_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vadd_f64 (float64x1_t __a, float64x1_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vadd_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vadd_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vadd_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vadd_s64 (int64x1_t __a, int64x1_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vadd_u64 (uint64x1_t __a, uint64x1_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddq_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddq_f32 (float32x4_t __a, float32x4_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddq_f64 (float64x2_t __a, float64x2_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddq_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddl_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return (int16x8_t) __builtin_aarch64_saddlv8qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddl_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return (int32x4_t) __builtin_aarch64_saddlv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddl_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return (int64x2_t) __builtin_aarch64_saddlv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddl_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return (uint16x8_t) __builtin_aarch64_uaddlv8qi ((int8x8_t) __a, |
| (int8x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddl_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return (uint32x4_t) __builtin_aarch64_uaddlv4hi ((int16x4_t) __a, |
| (int16x4_t) __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddl_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return (uint64x2_t) __builtin_aarch64_uaddlv2si ((int32x2_t) __a, |
| (int32x2_t) __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddl_high_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return (int16x8_t) __builtin_aarch64_saddl2v16qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddl_high_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (int32x4_t) __builtin_aarch64_saddl2v8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddl_high_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (int64x2_t) __builtin_aarch64_saddl2v4si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddl_high_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return (uint16x8_t) __builtin_aarch64_uaddl2v16qi ((int8x16_t) __a, |
| (int8x16_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddl_high_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return (uint32x4_t) __builtin_aarch64_uaddl2v8hi ((int16x8_t) __a, |
| (int16x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddl_high_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return (uint64x2_t) __builtin_aarch64_uaddl2v4si ((int32x4_t) __a, |
| (int32x4_t) __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddw_s8 (int16x8_t __a, int8x8_t __b) |
| { |
| return (int16x8_t) __builtin_aarch64_saddwv8qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddw_s16 (int32x4_t __a, int16x4_t __b) |
| { |
| return (int32x4_t) __builtin_aarch64_saddwv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddw_s32 (int64x2_t __a, int32x2_t __b) |
| { |
| return (int64x2_t) __builtin_aarch64_saddwv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddw_u8 (uint16x8_t __a, uint8x8_t __b) |
| { |
| return (uint16x8_t) __builtin_aarch64_uaddwv8qi ((int16x8_t) __a, |
| (int8x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddw_u16 (uint32x4_t __a, uint16x4_t __b) |
| { |
| return (uint32x4_t) __builtin_aarch64_uaddwv4hi ((int32x4_t) __a, |
| (int16x4_t) __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddw_u32 (uint64x2_t __a, uint32x2_t __b) |
| { |
| return (uint64x2_t) __builtin_aarch64_uaddwv2si ((int64x2_t) __a, |
| (int32x2_t) __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddw_high_s8 (int16x8_t __a, int8x16_t __b) |
| { |
| return (int16x8_t) __builtin_aarch64_saddw2v16qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddw_high_s16 (int32x4_t __a, int16x8_t __b) |
| { |
| return (int32x4_t) __builtin_aarch64_saddw2v8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddw_high_s32 (int64x2_t __a, int32x4_t __b) |
| { |
| return (int64x2_t) __builtin_aarch64_saddw2v4si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddw_high_u8 (uint16x8_t __a, uint8x16_t __b) |
| { |
| return (uint16x8_t) __builtin_aarch64_uaddw2v16qi ((int16x8_t) __a, |
| (int8x16_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddw_high_u16 (uint32x4_t __a, uint16x8_t __b) |
| { |
| return (uint32x4_t) __builtin_aarch64_uaddw2v8hi ((int32x4_t) __a, |
| (int16x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddw_high_u32 (uint64x2_t __a, uint32x4_t __b) |
| { |
| return (uint64x2_t) __builtin_aarch64_uaddw2v4si ((int64x2_t) __a, |
| (int32x4_t) __b); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhadd_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return (int8x8_t) __builtin_aarch64_shaddv8qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhadd_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return (int16x4_t) __builtin_aarch64_shaddv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhadd_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return (int32x2_t) __builtin_aarch64_shaddv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhadd_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return (uint8x8_t) __builtin_aarch64_uhaddv8qi ((int8x8_t) __a, |
| (int8x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhadd_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return (uint16x4_t) __builtin_aarch64_uhaddv4hi ((int16x4_t) __a, |
| (int16x4_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhadd_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return (uint32x2_t) __builtin_aarch64_uhaddv2si ((int32x2_t) __a, |
| (int32x2_t) __b); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhaddq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return (int8x16_t) __builtin_aarch64_shaddv16qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhaddq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (int16x8_t) __builtin_aarch64_shaddv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhaddq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (int32x4_t) __builtin_aarch64_shaddv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhaddq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return (uint8x16_t) __builtin_aarch64_uhaddv16qi ((int8x16_t) __a, |
| (int8x16_t) __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhaddq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return (uint16x8_t) __builtin_aarch64_uhaddv8hi ((int16x8_t) __a, |
| (int16x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhaddq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return (uint32x4_t) __builtin_aarch64_uhaddv4si ((int32x4_t) __a, |
| (int32x4_t) __b); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrhadd_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return (int8x8_t) __builtin_aarch64_srhaddv8qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrhadd_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return (int16x4_t) __builtin_aarch64_srhaddv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrhadd_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return (int32x2_t) __builtin_aarch64_srhaddv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrhadd_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return (uint8x8_t) __builtin_aarch64_urhaddv8qi ((int8x8_t) __a, |
| (int8x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrhadd_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return (uint16x4_t) __builtin_aarch64_urhaddv4hi ((int16x4_t) __a, |
| (int16x4_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrhadd_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return (uint32x2_t) __builtin_aarch64_urhaddv2si ((int32x2_t) __a, |
| (int32x2_t) __b); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrhaddq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return (int8x16_t) __builtin_aarch64_srhaddv16qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrhaddq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (int16x8_t) __builtin_aarch64_srhaddv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrhaddq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (int32x4_t) __builtin_aarch64_srhaddv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return (uint8x16_t) __builtin_aarch64_urhaddv16qi ((int8x16_t) __a, |
| (int8x16_t) __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return (uint16x8_t) __builtin_aarch64_urhaddv8hi ((int16x8_t) __a, |
| (int16x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return (uint32x4_t) __builtin_aarch64_urhaddv4si ((int32x4_t) __a, |
| (int32x4_t) __b); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddhn_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (int8x8_t) __builtin_aarch64_addhnv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddhn_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (int16x4_t) __builtin_aarch64_addhnv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddhn_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return (int32x2_t) __builtin_aarch64_addhnv2di (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddhn_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return (uint8x8_t) __builtin_aarch64_addhnv8hi ((int16x8_t) __a, |
| (int16x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddhn_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return (uint16x4_t) __builtin_aarch64_addhnv4si ((int32x4_t) __a, |
| (int32x4_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddhn_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return (uint32x2_t) __builtin_aarch64_addhnv2di ((int64x2_t) __a, |
| (int64x2_t) __b); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vraddhn_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (int8x8_t) __builtin_aarch64_raddhnv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vraddhn_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (int16x4_t) __builtin_aarch64_raddhnv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vraddhn_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return (int32x2_t) __builtin_aarch64_raddhnv2di (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vraddhn_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return (uint8x8_t) __builtin_aarch64_raddhnv8hi ((int16x8_t) __a, |
| (int16x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vraddhn_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return (uint16x4_t) __builtin_aarch64_raddhnv4si ((int32x4_t) __a, |
| (int32x4_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vraddhn_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return (uint32x2_t) __builtin_aarch64_raddhnv2di ((int64x2_t) __a, |
| (int64x2_t) __b); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c) |
| { |
| return (int8x16_t) __builtin_aarch64_addhn2v8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c) |
| { |
| return (int16x8_t) __builtin_aarch64_addhn2v4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c) |
| { |
| return (int32x4_t) __builtin_aarch64_addhn2v2di (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c) |
| { |
| return (uint8x16_t) __builtin_aarch64_addhn2v8hi ((int8x8_t) __a, |
| (int16x8_t) __b, |
| (int16x8_t) __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c) |
| { |
| return (uint16x8_t) __builtin_aarch64_addhn2v4si ((int16x4_t) __a, |
| (int32x4_t) __b, |
| (int32x4_t) __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c) |
| { |
| return (uint32x4_t) __builtin_aarch64_addhn2v2di ((int32x2_t) __a, |
| (int64x2_t) __b, |
| (int64x2_t) __c); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vraddhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c) |
| { |
| return (int8x16_t) __builtin_aarch64_raddhn2v8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vraddhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c) |
| { |
| return (int16x8_t) __builtin_aarch64_raddhn2v4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vraddhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c) |
| { |
| return (int32x4_t) __builtin_aarch64_raddhn2v2di (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vraddhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c) |
| { |
| return (uint8x16_t) __builtin_aarch64_raddhn2v8hi ((int8x8_t) __a, |
| (int16x8_t) __b, |
| (int16x8_t) __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vraddhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c) |
| { |
| return (uint16x8_t) __builtin_aarch64_raddhn2v4si ((int16x4_t) __a, |
| (int32x4_t) __b, |
| (int32x4_t) __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vraddhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c) |
| { |
| return (uint32x4_t) __builtin_aarch64_raddhn2v2di ((int32x2_t) __a, |
| (int64x2_t) __b, |
| (int64x2_t) __c); |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdiv_f32 (float32x2_t __a, float32x2_t __b) |
| { |
| return __a / __b; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdiv_f64 (float64x1_t __a, float64x1_t __b) |
| { |
| return __a / __b; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdivq_f32 (float32x4_t __a, float32x4_t __b) |
| { |
| return __a / __b; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdivq_f64 (float64x2_t __a, float64x2_t __b) |
| { |
| return __a / __b; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmul_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return __a * __b; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmul_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return __a * __b; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmul_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return __a * __b; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmul_f32 (float32x2_t __a, float32x2_t __b) |
| { |
| return __a * __b; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmul_f64 (float64x1_t __a, float64x1_t __b) |
| { |
| return __a * __b; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmul_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return __a * __b; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmul_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return __a * __b; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmul_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return __a * __b; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmul_p8 (poly8x8_t __a, poly8x8_t __b) |
| { |
| return (poly8x8_t) __builtin_aarch64_pmulv8qi ((int8x8_t) __a, |
| (int8x8_t) __b); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmulq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return __a * __b; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmulq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return __a * __b; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmulq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return __a * __b; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmulq_f32 (float32x4_t __a, float32x4_t __b) |
| { |
| return __a * __b; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmulq_f64 (float64x2_t __a, float64x2_t __b) |
| { |
| return __a * __b; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmulq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return __a * __b; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmulq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return __a * __b; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmulq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return __a * __b; |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmulq_p8 (poly8x16_t __a, poly8x16_t __b) |
| { |
| return (poly8x16_t) __builtin_aarch64_pmulv16qi ((int8x16_t) __a, |
| (int8x16_t) __b); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vand_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return __a & __b; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vand_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return __a & __b; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vand_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return __a & __b; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vand_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return __a & __b; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vand_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return __a & __b; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vand_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return __a & __b; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vand_s64 (int64x1_t __a, int64x1_t __b) |
| { |
| return __a & __b; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vand_u64 (uint64x1_t __a, uint64x1_t __b) |
| { |
| return __a & __b; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vandq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return __a & __b; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vandq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return __a & __b; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vandq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return __a & __b; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vandq_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return __a & __b; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vandq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return __a & __b; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vandq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return __a & __b; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vandq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return __a & __b; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vandq_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return __a & __b; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorr_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return __a | __b; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorr_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return __a | __b; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorr_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return __a | __b; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorr_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return __a | __b; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorr_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return __a | __b; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorr_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return __a | __b; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorr_s64 (int64x1_t __a, int64x1_t __b) |
| { |
| return __a | __b; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorr_u64 (uint64x1_t __a, uint64x1_t __b) |
| { |
| return __a | __b; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorrq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return __a | __b; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorrq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return __a | __b; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorrq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return __a | __b; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorrq_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return __a | __b; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorrq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return __a | __b; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorrq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return __a | __b; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorrq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return __a | __b; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorrq_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return __a | __b; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| veor_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return __a ^ __b; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| veor_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return __a ^ __b; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| veor_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return __a ^ __b; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| veor_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return __a ^ __b; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| veor_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return __a ^ __b; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| veor_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return __a ^ __b; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| veor_s64 (int64x1_t __a, int64x1_t __b) |
| { |
| return __a ^ __b; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| veor_u64 (uint64x1_t __a, uint64x1_t __b) |
| { |
| return __a ^ __b; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| veorq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return __a ^ __b; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| veorq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return __a ^ __b; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| veorq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return __a ^ __b; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| veorq_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return __a ^ __b; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| veorq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return __a ^ __b; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| veorq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return __a ^ __b; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| veorq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return __a ^ __b; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| veorq_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return __a ^ __b; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbic_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return __a & ~__b; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbic_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return __a & ~__b; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbic_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return __a & ~__b; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbic_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return __a & ~__b; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbic_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return __a & ~__b; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbic_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return __a & ~__b; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbic_s64 (int64x1_t __a, int64x1_t __b) |
| { |
| return __a & ~__b; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbic_u64 (uint64x1_t __a, uint64x1_t __b) |
| { |
| return __a & ~__b; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbicq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return __a & ~__b; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbicq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return __a & ~__b; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbicq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return __a & ~__b; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbicq_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return __a & ~__b; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbicq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return __a & ~__b; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbicq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return __a & ~__b; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbicq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return __a & ~__b; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbicq_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return __a & ~__b; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorn_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return __a | ~__b; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorn_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return __a | ~__b; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorn_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return __a | ~__b; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorn_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return __a | ~__b; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorn_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return __a | ~__b; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorn_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return __a | ~__b; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorn_s64 (int64x1_t __a, int64x1_t __b) |
| { |
| return __a | ~__b; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vorn_u64 (uint64x1_t __a, uint64x1_t __b) |
| { |
| return __a | ~__b; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vornq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return __a | ~__b; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vornq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return __a | ~__b; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vornq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return __a | ~__b; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vornq_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return __a | ~__b; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vornq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return __a | ~__b; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vornq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return __a | ~__b; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vornq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return __a | ~__b; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vornq_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return __a | ~__b; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsub_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsub_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsub_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsub_f32 (float32x2_t __a, float32x2_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsub_f64 (float64x1_t __a, float64x1_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsub_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsub_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsub_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsub_s64 (int64x1_t __a, int64x1_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsub_u64 (uint64x1_t __a, uint64x1_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubq_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubq_f32 (float32x4_t __a, float32x4_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubq_f64 (float64x2_t __a, float64x2_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubq_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return __a - __b; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubl_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return (int16x8_t) __builtin_aarch64_ssublv8qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubl_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return (int32x4_t) __builtin_aarch64_ssublv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubl_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return (int64x2_t) __builtin_aarch64_ssublv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubl_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return (uint16x8_t) __builtin_aarch64_usublv8qi ((int8x8_t) __a, |
| (int8x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubl_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return (uint32x4_t) __builtin_aarch64_usublv4hi ((int16x4_t) __a, |
| (int16x4_t) __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubl_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return (uint64x2_t) __builtin_aarch64_usublv2si ((int32x2_t) __a, |
| (int32x2_t) __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubl_high_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return (int16x8_t) __builtin_aarch64_ssubl2v16qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubl_high_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (int32x4_t) __builtin_aarch64_ssubl2v8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubl_high_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (int64x2_t) __builtin_aarch64_ssubl2v4si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubl_high_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return (uint16x8_t) __builtin_aarch64_usubl2v16qi ((int8x16_t) __a, |
| (int8x16_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubl_high_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return (uint32x4_t) __builtin_aarch64_usubl2v8hi ((int16x8_t) __a, |
| (int16x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubl_high_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return (uint64x2_t) __builtin_aarch64_usubl2v4si ((int32x4_t) __a, |
| (int32x4_t) __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubw_s8 (int16x8_t __a, int8x8_t __b) |
| { |
| return (int16x8_t) __builtin_aarch64_ssubwv8qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubw_s16 (int32x4_t __a, int16x4_t __b) |
| { |
| return (int32x4_t) __builtin_aarch64_ssubwv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubw_s32 (int64x2_t __a, int32x2_t __b) |
| { |
| return (int64x2_t) __builtin_aarch64_ssubwv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubw_u8 (uint16x8_t __a, uint8x8_t __b) |
| { |
| return (uint16x8_t) __builtin_aarch64_usubwv8qi ((int16x8_t) __a, |
| (int8x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubw_u16 (uint32x4_t __a, uint16x4_t __b) |
| { |
| return (uint32x4_t) __builtin_aarch64_usubwv4hi ((int32x4_t) __a, |
| (int16x4_t) __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubw_u32 (uint64x2_t __a, uint32x2_t __b) |
| { |
| return (uint64x2_t) __builtin_aarch64_usubwv2si ((int64x2_t) __a, |
| (int32x2_t) __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubw_high_s8 (int16x8_t __a, int8x16_t __b) |
| { |
| return (int16x8_t) __builtin_aarch64_ssubw2v16qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubw_high_s16 (int32x4_t __a, int16x8_t __b) |
| { |
| return (int32x4_t) __builtin_aarch64_ssubw2v8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubw_high_s32 (int64x2_t __a, int32x4_t __b) |
| { |
| return (int64x2_t) __builtin_aarch64_ssubw2v4si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubw_high_u8 (uint16x8_t __a, uint8x16_t __b) |
| { |
| return (uint16x8_t) __builtin_aarch64_usubw2v16qi ((int16x8_t) __a, |
| (int8x16_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubw_high_u16 (uint32x4_t __a, uint16x8_t __b) |
| { |
| return (uint32x4_t) __builtin_aarch64_usubw2v8hi ((int32x4_t) __a, |
| (int16x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubw_high_u32 (uint64x2_t __a, uint32x4_t __b) |
| { |
| return (uint64x2_t) __builtin_aarch64_usubw2v4si ((int64x2_t) __a, |
| (int32x4_t) __b); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqadd_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return (int8x8_t) __builtin_aarch64_sqaddv8qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqadd_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return (int16x4_t) __builtin_aarch64_sqaddv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqadd_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return (int32x2_t) __builtin_aarch64_sqaddv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqadd_s64 (int64x1_t __a, int64x1_t __b) |
| { |
| return (int64x1_t) {__builtin_aarch64_sqadddi (__a[0], __b[0])}; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqadd_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return __builtin_aarch64_uqaddv8qi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhsub_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return (int8x8_t)__builtin_aarch64_shsubv8qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhsub_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return (int16x4_t) __builtin_aarch64_shsubv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhsub_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return (int32x2_t) __builtin_aarch64_shsubv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhsub_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return (uint8x8_t) __builtin_aarch64_uhsubv8qi ((int8x8_t) __a, |
| (int8x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhsub_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return (uint16x4_t) __builtin_aarch64_uhsubv4hi ((int16x4_t) __a, |
| (int16x4_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhsub_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return (uint32x2_t) __builtin_aarch64_uhsubv2si ((int32x2_t) __a, |
| (int32x2_t) __b); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhsubq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return (int8x16_t) __builtin_aarch64_shsubv16qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhsubq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (int16x8_t) __builtin_aarch64_shsubv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhsubq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (int32x4_t) __builtin_aarch64_shsubv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhsubq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return (uint8x16_t) __builtin_aarch64_uhsubv16qi ((int8x16_t) __a, |
| (int8x16_t) __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhsubq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return (uint16x8_t) __builtin_aarch64_uhsubv8hi ((int16x8_t) __a, |
| (int16x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vhsubq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return (uint32x4_t) __builtin_aarch64_uhsubv4si ((int32x4_t) __a, |
| (int32x4_t) __b); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubhn_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (int8x8_t) __builtin_aarch64_subhnv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubhn_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (int16x4_t) __builtin_aarch64_subhnv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubhn_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return (int32x2_t) __builtin_aarch64_subhnv2di (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubhn_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return (uint8x8_t) __builtin_aarch64_subhnv8hi ((int16x8_t) __a, |
| (int16x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubhn_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return (uint16x4_t) __builtin_aarch64_subhnv4si ((int32x4_t) __a, |
| (int32x4_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubhn_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return (uint32x2_t) __builtin_aarch64_subhnv2di ((int64x2_t) __a, |
| (int64x2_t) __b); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrsubhn_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (int8x8_t) __builtin_aarch64_rsubhnv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrsubhn_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (int16x4_t) __builtin_aarch64_rsubhnv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrsubhn_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return (int32x2_t) __builtin_aarch64_rsubhnv2di (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrsubhn_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return (uint8x8_t) __builtin_aarch64_rsubhnv8hi ((int16x8_t) __a, |
| (int16x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrsubhn_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return (uint16x4_t) __builtin_aarch64_rsubhnv4si ((int32x4_t) __a, |
| (int32x4_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrsubhn_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return (uint32x2_t) __builtin_aarch64_rsubhnv2di ((int64x2_t) __a, |
| (int64x2_t) __b); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrsubhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c) |
| { |
| return (int8x16_t) __builtin_aarch64_rsubhn2v8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrsubhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c) |
| { |
| return (int16x8_t) __builtin_aarch64_rsubhn2v4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrsubhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c) |
| { |
| return (int32x4_t) __builtin_aarch64_rsubhn2v2di (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrsubhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c) |
| { |
| return (uint8x16_t) __builtin_aarch64_rsubhn2v8hi ((int8x8_t) __a, |
| (int16x8_t) __b, |
| (int16x8_t) __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrsubhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c) |
| { |
| return (uint16x8_t) __builtin_aarch64_rsubhn2v4si ((int16x4_t) __a, |
| (int32x4_t) __b, |
| (int32x4_t) __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrsubhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c) |
| { |
| return (uint32x4_t) __builtin_aarch64_rsubhn2v2di ((int32x2_t) __a, |
| (int64x2_t) __b, |
| (int64x2_t) __c); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubhn_high_s16 (int8x8_t __a, int16x8_t __b, int16x8_t __c) |
| { |
| return (int8x16_t) __builtin_aarch64_subhn2v8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubhn_high_s32 (int16x4_t __a, int32x4_t __b, int32x4_t __c) |
| { |
| return (int16x8_t) __builtin_aarch64_subhn2v4si (__a, __b, __c);; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubhn_high_s64 (int32x2_t __a, int64x2_t __b, int64x2_t __c) |
| { |
| return (int32x4_t) __builtin_aarch64_subhn2v2di (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubhn_high_u16 (uint8x8_t __a, uint16x8_t __b, uint16x8_t __c) |
| { |
| return (uint8x16_t) __builtin_aarch64_subhn2v8hi ((int8x8_t) __a, |
| (int16x8_t) __b, |
| (int16x8_t) __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubhn_high_u32 (uint16x4_t __a, uint32x4_t __b, uint32x4_t __c) |
| { |
| return (uint16x8_t) __builtin_aarch64_subhn2v4si ((int16x4_t) __a, |
| (int32x4_t) __b, |
| (int32x4_t) __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsubhn_high_u64 (uint32x2_t __a, uint64x2_t __b, uint64x2_t __c) |
| { |
| return (uint32x4_t) __builtin_aarch64_subhn2v2di ((int32x2_t) __a, |
| (int64x2_t) __b, |
| (int64x2_t) __c); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqadd_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return __builtin_aarch64_uqaddv4hi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqadd_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return __builtin_aarch64_uqaddv2si_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqadd_u64 (uint64x1_t __a, uint64x1_t __b) |
| { |
| return (uint64x1_t) {__builtin_aarch64_uqadddi_uuu (__a[0], __b[0])}; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqaddq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return (int8x16_t) __builtin_aarch64_sqaddv16qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqaddq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (int16x8_t) __builtin_aarch64_sqaddv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqaddq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (int32x4_t) __builtin_aarch64_sqaddv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqaddq_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return (int64x2_t) __builtin_aarch64_sqaddv2di (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqaddq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return __builtin_aarch64_uqaddv16qi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqaddq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return __builtin_aarch64_uqaddv8hi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqaddq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return __builtin_aarch64_uqaddv4si_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqaddq_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return __builtin_aarch64_uqaddv2di_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqsub_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return (int8x8_t) __builtin_aarch64_sqsubv8qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqsub_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return (int16x4_t) __builtin_aarch64_sqsubv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqsub_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return (int32x2_t) __builtin_aarch64_sqsubv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqsub_s64 (int64x1_t __a, int64x1_t __b) |
| { |
| return (int64x1_t) {__builtin_aarch64_sqsubdi (__a[0], __b[0])}; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqsub_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return __builtin_aarch64_uqsubv8qi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqsub_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return __builtin_aarch64_uqsubv4hi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqsub_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return __builtin_aarch64_uqsubv2si_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqsub_u64 (uint64x1_t __a, uint64x1_t __b) |
| { |
| return (uint64x1_t) {__builtin_aarch64_uqsubdi_uuu (__a[0], __b[0])}; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqsubq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return (int8x16_t) __builtin_aarch64_sqsubv16qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqsubq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (int16x8_t) __builtin_aarch64_sqsubv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqsubq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (int32x4_t) __builtin_aarch64_sqsubv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqsubq_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return (int64x2_t) __builtin_aarch64_sqsubv2di (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqsubq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return __builtin_aarch64_uqsubv16qi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqsubq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return __builtin_aarch64_uqsubv8hi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqsubq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return __builtin_aarch64_uqsubv4si_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqsubq_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return __builtin_aarch64_uqsubv2di_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqneg_s8 (int8x8_t __a) |
| { |
| return (int8x8_t) __builtin_aarch64_sqnegv8qi (__a); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqneg_s16 (int16x4_t __a) |
| { |
| return (int16x4_t) __builtin_aarch64_sqnegv4hi (__a); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqneg_s32 (int32x2_t __a) |
| { |
| return (int32x2_t) __builtin_aarch64_sqnegv2si (__a); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqneg_s64 (int64x1_t __a) |
| { |
| return (int64x1_t) {__builtin_aarch64_sqnegdi (__a[0])}; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqnegq_s8 (int8x16_t __a) |
| { |
| return (int8x16_t) __builtin_aarch64_sqnegv16qi (__a); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqnegq_s16 (int16x8_t __a) |
| { |
| return (int16x8_t) __builtin_aarch64_sqnegv8hi (__a); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqnegq_s32 (int32x4_t __a) |
| { |
| return (int32x4_t) __builtin_aarch64_sqnegv4si (__a); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqabs_s8 (int8x8_t __a) |
| { |
| return (int8x8_t) __builtin_aarch64_sqabsv8qi (__a); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqabs_s16 (int16x4_t __a) |
| { |
| return (int16x4_t) __builtin_aarch64_sqabsv4hi (__a); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqabs_s32 (int32x2_t __a) |
| { |
| return (int32x2_t) __builtin_aarch64_sqabsv2si (__a); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqabs_s64 (int64x1_t __a) |
| { |
| return (int64x1_t) {__builtin_aarch64_sqabsdi (__a[0])}; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqabsq_s8 (int8x16_t __a) |
| { |
| return (int8x16_t) __builtin_aarch64_sqabsv16qi (__a); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqabsq_s16 (int16x8_t __a) |
| { |
| return (int16x8_t) __builtin_aarch64_sqabsv8hi (__a); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqabsq_s32 (int32x4_t __a) |
| { |
| return (int32x4_t) __builtin_aarch64_sqabsv4si (__a); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqdmulh_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return (int16x4_t) __builtin_aarch64_sqdmulhv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqdmulh_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return (int32x2_t) __builtin_aarch64_sqdmulhv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqdmulhq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (int16x8_t) __builtin_aarch64_sqdmulhv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqdmulhq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (int32x4_t) __builtin_aarch64_sqdmulhv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmulh_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return (int16x4_t) __builtin_aarch64_sqrdmulhv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmulh_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return (int32x2_t) __builtin_aarch64_sqrdmulhv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmulhq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (int16x8_t) __builtin_aarch64_sqrdmulhv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmulhq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (int32x4_t) __builtin_aarch64_sqrdmulhv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcreate_s8 (uint64_t __a) |
| { |
| return (int8x8_t) __a; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcreate_s16 (uint64_t __a) |
| { |
| return (int16x4_t) __a; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcreate_s32 (uint64_t __a) |
| { |
| return (int32x2_t) __a; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcreate_s64 (uint64_t __a) |
| { |
| return (int64x1_t) {__a}; |
| } |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcreate_f16 (uint64_t __a) |
| { |
| return (float16x4_t) __a; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcreate_f32 (uint64_t __a) |
| { |
| return (float32x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcreate_u8 (uint64_t __a) |
| { |
| return (uint8x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcreate_u16 (uint64_t __a) |
| { |
| return (uint16x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcreate_u32 (uint64_t __a) |
| { |
| return (uint32x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcreate_u64 (uint64_t __a) |
| { |
| return (uint64x1_t) {__a}; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcreate_f64 (uint64_t __a) |
| { |
| return (float64x1_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcreate_p8 (uint64_t __a) |
| { |
| return (poly8x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcreate_p16 (uint64_t __a) |
| { |
| return (poly16x4_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcreate_p64 (uint64_t __a) |
| { |
| return (poly64x1_t) __a; |
| } |
| |
| /* vget_lane */ |
| |
| __extension__ extern __inline float16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_lane_f16 (float16x4_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline float32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_lane_f32 (float32x2_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline float64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_lane_f64 (float64x1_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline poly8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_lane_p8 (poly8x8_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline poly16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_lane_p16 (poly16x4_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline poly64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_lane_p64 (poly64x1_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline int8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_lane_s8 (int8x8_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline int16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_lane_s16 (int16x4_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_lane_s32 (int32x2_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline int64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_lane_s64 (int64x1_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_lane_u8 (uint8x8_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_lane_u16 (uint16x4_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_lane_u32 (uint32x2_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_lane_u64 (uint64x1_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| /* vgetq_lane */ |
| |
| __extension__ extern __inline float16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vgetq_lane_f16 (float16x8_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline float32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vgetq_lane_f32 (float32x4_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline float64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vgetq_lane_f64 (float64x2_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline poly8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vgetq_lane_p8 (poly8x16_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline poly16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vgetq_lane_p16 (poly16x8_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline poly64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vgetq_lane_p64 (poly64x2_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline int8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vgetq_lane_s8 (int8x16_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline int16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vgetq_lane_s16 (int16x8_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vgetq_lane_s32 (int32x4_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline int64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vgetq_lane_s64 (int64x2_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vgetq_lane_u8 (uint8x16_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vgetq_lane_u16 (uint16x8_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vgetq_lane_u32 (uint32x4_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vgetq_lane_u64 (uint64x2_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| /* vreinterpret */ |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p8_f16 (float16x4_t __a) |
| { |
| return (poly8x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p8_f64 (float64x1_t __a) |
| { |
| return (poly8x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p8_s8 (int8x8_t __a) |
| { |
| return (poly8x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p8_s16 (int16x4_t __a) |
| { |
| return (poly8x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p8_s32 (int32x2_t __a) |
| { |
| return (poly8x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p8_s64 (int64x1_t __a) |
| { |
| return (poly8x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p8_f32 (float32x2_t __a) |
| { |
| return (poly8x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p8_u8 (uint8x8_t __a) |
| { |
| return (poly8x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p8_u16 (uint16x4_t __a) |
| { |
| return (poly8x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p8_u32 (uint32x2_t __a) |
| { |
| return (poly8x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p8_u64 (uint64x1_t __a) |
| { |
| return (poly8x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p8_p16 (poly16x4_t __a) |
| { |
| return (poly8x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p8_p64 (poly64x1_t __a) |
| { |
| return (poly8x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p8_f64 (float64x2_t __a) |
| { |
| return (poly8x16_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p8_s8 (int8x16_t __a) |
| { |
| return (poly8x16_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p8_s16 (int16x8_t __a) |
| { |
| return (poly8x16_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p8_s32 (int32x4_t __a) |
| { |
| return (poly8x16_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p8_s64 (int64x2_t __a) |
| { |
| return (poly8x16_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p8_f16 (float16x8_t __a) |
| { |
| return (poly8x16_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p8_f32 (float32x4_t __a) |
| { |
| return (poly8x16_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p8_u8 (uint8x16_t __a) |
| { |
| return (poly8x16_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p8_u16 (uint16x8_t __a) |
| { |
| return (poly8x16_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p8_u32 (uint32x4_t __a) |
| { |
| return (poly8x16_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p8_u64 (uint64x2_t __a) |
| { |
| return (poly8x16_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p8_p16 (poly16x8_t __a) |
| { |
| return (poly8x16_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p8_p64 (poly64x2_t __a) |
| { |
| return (poly8x16_t) __a; |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p8_p128 (poly128_t __a) |
| { |
| return (poly8x16_t)__a; |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p16_f16 (float16x4_t __a) |
| { |
| return (poly16x4_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p16_f64 (float64x1_t __a) |
| { |
| return (poly16x4_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p16_s8 (int8x8_t __a) |
| { |
| return (poly16x4_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p16_s16 (int16x4_t __a) |
| { |
| return (poly16x4_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p16_s32 (int32x2_t __a) |
| { |
| return (poly16x4_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p16_s64 (int64x1_t __a) |
| { |
| return (poly16x4_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p16_f32 (float32x2_t __a) |
| { |
| return (poly16x4_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p16_u8 (uint8x8_t __a) |
| { |
| return (poly16x4_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p16_u16 (uint16x4_t __a) |
| { |
| return (poly16x4_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p16_u32 (uint32x2_t __a) |
| { |
| return (poly16x4_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p16_u64 (uint64x1_t __a) |
| { |
| return (poly16x4_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p16_p8 (poly8x8_t __a) |
| { |
| return (poly16x4_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p16_p64 (poly64x1_t __a) |
| { |
| return (poly16x4_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p16_f64 (float64x2_t __a) |
| { |
| return (poly16x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p16_s8 (int8x16_t __a) |
| { |
| return (poly16x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p16_s16 (int16x8_t __a) |
| { |
| return (poly16x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p16_s32 (int32x4_t __a) |
| { |
| return (poly16x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p16_s64 (int64x2_t __a) |
| { |
| return (poly16x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p16_f16 (float16x8_t __a) |
| { |
| return (poly16x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p16_f32 (float32x4_t __a) |
| { |
| return (poly16x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p16_u8 (uint8x16_t __a) |
| { |
| return (poly16x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p16_u16 (uint16x8_t __a) |
| { |
| return (poly16x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p16_u32 (uint32x4_t __a) |
| { |
| return (poly16x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p16_u64 (uint64x2_t __a) |
| { |
| return (poly16x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p16_p8 (poly8x16_t __a) |
| { |
| return (poly16x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p16_p64 (poly64x2_t __a) |
| { |
| return (poly16x8_t) __a; |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p16_p128 (poly128_t __a) |
| { |
| return (poly16x8_t)__a; |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p64_f16 (float16x4_t __a) |
| { |
| return (poly64x1_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p64_f64 (float64x1_t __a) |
| { |
| return (poly64x1_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p64_s8 (int8x8_t __a) |
| { |
| return (poly64x1_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p64_s16 (int16x4_t __a) |
| { |
| return (poly64x1_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p64_s32 (int32x2_t __a) |
| { |
| return (poly64x1_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p64_s64 (int64x1_t __a) |
| { |
| return (poly64x1_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p64_f32 (float32x2_t __a) |
| { |
| return (poly64x1_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p64_u8 (uint8x8_t __a) |
| { |
| return (poly64x1_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p64_u16 (uint16x4_t __a) |
| { |
| return (poly64x1_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p64_u32 (uint32x2_t __a) |
| { |
| return (poly64x1_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p64_u64 (uint64x1_t __a) |
| { |
| return (poly64x1_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p64_p8 (poly8x8_t __a) |
| { |
| return (poly64x1_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_p64_p16 (poly16x4_t __a) |
| { |
| return (poly64x1_t)__a; |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p64_f64 (float64x2_t __a) |
| { |
| return (poly64x2_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p64_s8 (int8x16_t __a) |
| { |
| return (poly64x2_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p64_s16 (int16x8_t __a) |
| { |
| return (poly64x2_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p64_s32 (int32x4_t __a) |
| { |
| return (poly64x2_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p64_s64 (int64x2_t __a) |
| { |
| return (poly64x2_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p64_f16 (float16x8_t __a) |
| { |
| return (poly64x2_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p64_f32 (float32x4_t __a) |
| { |
| return (poly64x2_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p64_p128 (poly128_t __a) |
| { |
| return (poly64x2_t)__a; |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p64_u8 (uint8x16_t __a) |
| { |
| return (poly64x2_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p64_u16 (uint16x8_t __a) |
| { |
| return (poly64x2_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p64_p16 (poly16x8_t __a) |
| { |
| return (poly64x2_t)__a; |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p64_u32 (uint32x4_t __a) |
| { |
| return (poly64x2_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p64_u64 (uint64x2_t __a) |
| { |
| return (poly64x2_t) __a; |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p64_p8 (poly8x16_t __a) |
| { |
| return (poly64x2_t) __a; |
| } |
| |
| __extension__ extern __inline poly128_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p128_p8 (poly8x16_t __a) |
| { |
| return (poly128_t)__a; |
| } |
| |
| __extension__ extern __inline poly128_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p128_p16 (poly16x8_t __a) |
| { |
| return (poly128_t)__a; |
| } |
| |
| __extension__ extern __inline poly128_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p128_f16 (float16x8_t __a) |
| { |
| return (poly128_t) __a; |
| } |
| |
| __extension__ extern __inline poly128_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p128_f32 (float32x4_t __a) |
| { |
| return (poly128_t)__a; |
| } |
| |
| __extension__ extern __inline poly128_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p128_p64 (poly64x2_t __a) |
| { |
| return (poly128_t)__a; |
| } |
| |
| __extension__ extern __inline poly128_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p128_s64 (int64x2_t __a) |
| { |
| return (poly128_t)__a; |
| } |
| |
| __extension__ extern __inline poly128_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p128_u64 (uint64x2_t __a) |
| { |
| return (poly128_t)__a; |
| } |
| |
| __extension__ extern __inline poly128_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p128_s8 (int8x16_t __a) |
| { |
| return (poly128_t)__a; |
| } |
| |
| __extension__ extern __inline poly128_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p128_s16 (int16x8_t __a) |
| { |
| return (poly128_t)__a; |
| } |
| |
| __extension__ extern __inline poly128_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p128_s32 (int32x4_t __a) |
| { |
| return (poly128_t)__a; |
| } |
| |
| __extension__ extern __inline poly128_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p128_u8 (uint8x16_t __a) |
| { |
| return (poly128_t)__a; |
| } |
| |
| __extension__ extern __inline poly128_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p128_u16 (uint16x8_t __a) |
| { |
| return (poly128_t)__a; |
| } |
| |
| __extension__ extern __inline poly128_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p128_u32 (uint32x4_t __a) |
| { |
| return (poly128_t)__a; |
| } |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f16_f64 (float64x1_t __a) |
| { |
| return (float16x4_t) __a; |
| } |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f16_s8 (int8x8_t __a) |
| { |
| return (float16x4_t) __a; |
| } |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f16_s16 (int16x4_t __a) |
| { |
| return (float16x4_t) __a; |
| } |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f16_s32 (int32x2_t __a) |
| { |
| return (float16x4_t) __a; |
| } |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f16_s64 (int64x1_t __a) |
| { |
| return (float16x4_t) __a; |
| } |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f16_f32 (float32x2_t __a) |
| { |
| return (float16x4_t) __a; |
| } |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f16_u8 (uint8x8_t __a) |
| { |
| return (float16x4_t) __a; |
| } |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f16_u16 (uint16x4_t __a) |
| { |
| return (float16x4_t) __a; |
| } |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f16_u32 (uint32x2_t __a) |
| { |
| return (float16x4_t) __a; |
| } |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f16_u64 (uint64x1_t __a) |
| { |
| return (float16x4_t) __a; |
| } |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f16_p8 (poly8x8_t __a) |
| { |
| return (float16x4_t) __a; |
| } |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f16_p16 (poly16x4_t __a) |
| { |
| return (float16x4_t) __a; |
| } |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f16_p64 (poly64x1_t __a) |
| { |
| return (float16x4_t) __a; |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f16_f64 (float64x2_t __a) |
| { |
| return (float16x8_t) __a; |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f16_s8 (int8x16_t __a) |
| { |
| return (float16x8_t) __a; |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f16_s16 (int16x8_t __a) |
| { |
| return (float16x8_t) __a; |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f16_s32 (int32x4_t __a) |
| { |
| return (float16x8_t) __a; |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f16_s64 (int64x2_t __a) |
| { |
| return (float16x8_t) __a; |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f16_f32 (float32x4_t __a) |
| { |
| return (float16x8_t) __a; |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f16_u8 (uint8x16_t __a) |
| { |
| return (float16x8_t) __a; |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f16_u16 (uint16x8_t __a) |
| { |
| return (float16x8_t) __a; |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f16_u32 (uint32x4_t __a) |
| { |
| return (float16x8_t) __a; |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f16_u64 (uint64x2_t __a) |
| { |
| return (float16x8_t) __a; |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f16_p8 (poly8x16_t __a) |
| { |
| return (float16x8_t) __a; |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f16_p128 (poly128_t __a) |
| { |
| return (float16x8_t) __a; |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f16_p16 (poly16x8_t __a) |
| { |
| return (float16x8_t) __a; |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f16_p64 (poly64x2_t __a) |
| { |
| return (float16x8_t) __a; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f32_f16 (float16x4_t __a) |
| { |
| return (float32x2_t) __a; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f32_f64 (float64x1_t __a) |
| { |
| return (float32x2_t) __a; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f32_s8 (int8x8_t __a) |
| { |
| return (float32x2_t) __a; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f32_s16 (int16x4_t __a) |
| { |
| return (float32x2_t) __a; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f32_s32 (int32x2_t __a) |
| { |
| return (float32x2_t) __a; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f32_s64 (int64x1_t __a) |
| { |
| return (float32x2_t) __a; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f32_u8 (uint8x8_t __a) |
| { |
| return (float32x2_t) __a; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f32_u16 (uint16x4_t __a) |
| { |
| return (float32x2_t) __a; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f32_u32 (uint32x2_t __a) |
| { |
| return (float32x2_t) __a; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f32_u64 (uint64x1_t __a) |
| { |
| return (float32x2_t) __a; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f32_p8 (poly8x8_t __a) |
| { |
| return (float32x2_t) __a; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f32_p16 (poly16x4_t __a) |
| { |
| return (float32x2_t) __a; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f32_p64 (poly64x1_t __a) |
| { |
| return (float32x2_t) __a; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f32_f16 (float16x8_t __a) |
| { |
| return (float32x4_t) __a; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f32_f64 (float64x2_t __a) |
| { |
| return (float32x4_t) __a; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f32_s8 (int8x16_t __a) |
| { |
| return (float32x4_t) __a; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f32_s16 (int16x8_t __a) |
| { |
| return (float32x4_t) __a; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f32_s32 (int32x4_t __a) |
| { |
| return (float32x4_t) __a; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f32_s64 (int64x2_t __a) |
| { |
| return (float32x4_t) __a; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f32_u8 (uint8x16_t __a) |
| { |
| return (float32x4_t) __a; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f32_u16 (uint16x8_t __a) |
| { |
| return (float32x4_t) __a; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f32_u32 (uint32x4_t __a) |
| { |
| return (float32x4_t) __a; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f32_u64 (uint64x2_t __a) |
| { |
| return (float32x4_t) __a; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f32_p8 (poly8x16_t __a) |
| { |
| return (float32x4_t) __a; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f32_p16 (poly16x8_t __a) |
| { |
| return (float32x4_t) __a; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f32_p64 (poly64x2_t __a) |
| { |
| return (float32x4_t) __a; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f32_p128 (poly128_t __a) |
| { |
| return (float32x4_t)__a; |
| } |
| |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f64_f16 (float16x4_t __a) |
| { |
| return (float64x1_t) __a; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f64_f32 (float32x2_t __a) |
| { |
| return (float64x1_t) __a; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f64_p8 (poly8x8_t __a) |
| { |
| return (float64x1_t) __a; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f64_p16 (poly16x4_t __a) |
| { |
| return (float64x1_t) __a; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f64_p64 (poly64x1_t __a) |
| { |
| return (float64x1_t) __a; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f64_s8 (int8x8_t __a) |
| { |
| return (float64x1_t) __a; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f64_s16 (int16x4_t __a) |
| { |
| return (float64x1_t) __a; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f64_s32 (int32x2_t __a) |
| { |
| return (float64x1_t) __a; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f64_s64 (int64x1_t __a) |
| { |
| return (float64x1_t) __a; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f64_u8 (uint8x8_t __a) |
| { |
| return (float64x1_t) __a; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f64_u16 (uint16x4_t __a) |
| { |
| return (float64x1_t) __a; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f64_u32 (uint32x2_t __a) |
| { |
| return (float64x1_t) __a; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_f64_u64 (uint64x1_t __a) |
| { |
| return (float64x1_t) __a; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f64_f16 (float16x8_t __a) |
| { |
| return (float64x2_t) __a; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f64_f32 (float32x4_t __a) |
| { |
| return (float64x2_t) __a; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f64_p8 (poly8x16_t __a) |
| { |
| return (float64x2_t) __a; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f64_p16 (poly16x8_t __a) |
| { |
| return (float64x2_t) __a; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f64_p64 (poly64x2_t __a) |
| { |
| return (float64x2_t) __a; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f64_s8 (int8x16_t __a) |
| { |
| return (float64x2_t) __a; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f64_s16 (int16x8_t __a) |
| { |
| return (float64x2_t) __a; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f64_s32 (int32x4_t __a) |
| { |
| return (float64x2_t) __a; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f64_s64 (int64x2_t __a) |
| { |
| return (float64x2_t) __a; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f64_u8 (uint8x16_t __a) |
| { |
| return (float64x2_t) __a; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f64_u16 (uint16x8_t __a) |
| { |
| return (float64x2_t) __a; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f64_u32 (uint32x4_t __a) |
| { |
| return (float64x2_t) __a; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f64_u64 (uint64x2_t __a) |
| { |
| return (float64x2_t) __a; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s64_f16 (float16x4_t __a) |
| { |
| return (int64x1_t) __a; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s64_f64 (float64x1_t __a) |
| { |
| return (int64x1_t) __a; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s64_s8 (int8x8_t __a) |
| { |
| return (int64x1_t) __a; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s64_s16 (int16x4_t __a) |
| { |
| return (int64x1_t) __a; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s64_s32 (int32x2_t __a) |
| { |
| return (int64x1_t) __a; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s64_f32 (float32x2_t __a) |
| { |
| return (int64x1_t) __a; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s64_u8 (uint8x8_t __a) |
| { |
| return (int64x1_t) __a; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s64_u16 (uint16x4_t __a) |
| { |
| return (int64x1_t) __a; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s64_u32 (uint32x2_t __a) |
| { |
| return (int64x1_t) __a; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s64_u64 (uint64x1_t __a) |
| { |
| return (int64x1_t) __a; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s64_p8 (poly8x8_t __a) |
| { |
| return (int64x1_t) __a; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s64_p16 (poly16x4_t __a) |
| { |
| return (int64x1_t) __a; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s64_p64 (poly64x1_t __a) |
| { |
| return (int64x1_t) __a; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s64_f64 (float64x2_t __a) |
| { |
| return (int64x2_t) __a; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s64_s8 (int8x16_t __a) |
| { |
| return (int64x2_t) __a; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s64_s16 (int16x8_t __a) |
| { |
| return (int64x2_t) __a; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s64_s32 (int32x4_t __a) |
| { |
| return (int64x2_t) __a; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s64_f16 (float16x8_t __a) |
| { |
| return (int64x2_t) __a; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s64_f32 (float32x4_t __a) |
| { |
| return (int64x2_t) __a; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s64_u8 (uint8x16_t __a) |
| { |
| return (int64x2_t) __a; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s64_u16 (uint16x8_t __a) |
| { |
| return (int64x2_t) __a; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s64_u32 (uint32x4_t __a) |
| { |
| return (int64x2_t) __a; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s64_u64 (uint64x2_t __a) |
| { |
| return (int64x2_t) __a; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s64_p8 (poly8x16_t __a) |
| { |
| return (int64x2_t) __a; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s64_p16 (poly16x8_t __a) |
| { |
| return (int64x2_t) __a; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s64_p64 (poly64x2_t __a) |
| { |
| return (int64x2_t) __a; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s64_p128 (poly128_t __a) |
| { |
| return (int64x2_t)__a; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u64_f16 (float16x4_t __a) |
| { |
| return (uint64x1_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u64_f64 (float64x1_t __a) |
| { |
| return (uint64x1_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u64_s8 (int8x8_t __a) |
| { |
| return (uint64x1_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u64_s16 (int16x4_t __a) |
| { |
| return (uint64x1_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u64_s32 (int32x2_t __a) |
| { |
| return (uint64x1_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u64_s64 (int64x1_t __a) |
| { |
| return (uint64x1_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u64_f32 (float32x2_t __a) |
| { |
| return (uint64x1_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u64_u8 (uint8x8_t __a) |
| { |
| return (uint64x1_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u64_u16 (uint16x4_t __a) |
| { |
| return (uint64x1_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u64_u32 (uint32x2_t __a) |
| { |
| return (uint64x1_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u64_p8 (poly8x8_t __a) |
| { |
| return (uint64x1_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u64_p16 (poly16x4_t __a) |
| { |
| return (uint64x1_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u64_p64 (poly64x1_t __a) |
| { |
| return (uint64x1_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u64_f64 (float64x2_t __a) |
| { |
| return (uint64x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u64_s8 (int8x16_t __a) |
| { |
| return (uint64x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u64_s16 (int16x8_t __a) |
| { |
| return (uint64x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u64_s32 (int32x4_t __a) |
| { |
| return (uint64x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u64_s64 (int64x2_t __a) |
| { |
| return (uint64x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u64_f16 (float16x8_t __a) |
| { |
| return (uint64x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u64_f32 (float32x4_t __a) |
| { |
| return (uint64x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u64_u8 (uint8x16_t __a) |
| { |
| return (uint64x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u64_u16 (uint16x8_t __a) |
| { |
| return (uint64x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u64_u32 (uint32x4_t __a) |
| { |
| return (uint64x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u64_p8 (poly8x16_t __a) |
| { |
| return (uint64x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u64_p16 (poly16x8_t __a) |
| { |
| return (uint64x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u64_p64 (poly64x2_t __a) |
| { |
| return (uint64x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u64_p128 (poly128_t __a) |
| { |
| return (uint64x2_t)__a; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s8_f16 (float16x4_t __a) |
| { |
| return (int8x8_t) __a; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s8_f64 (float64x1_t __a) |
| { |
| return (int8x8_t) __a; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s8_s16 (int16x4_t __a) |
| { |
| return (int8x8_t) __a; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s8_s32 (int32x2_t __a) |
| { |
| return (int8x8_t) __a; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s8_s64 (int64x1_t __a) |
| { |
| return (int8x8_t) __a; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s8_f32 (float32x2_t __a) |
| { |
| return (int8x8_t) __a; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s8_u8 (uint8x8_t __a) |
| { |
| return (int8x8_t) __a; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s8_u16 (uint16x4_t __a) |
| { |
| return (int8x8_t) __a; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s8_u32 (uint32x2_t __a) |
| { |
| return (int8x8_t) __a; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s8_u64 (uint64x1_t __a) |
| { |
| return (int8x8_t) __a; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s8_p8 (poly8x8_t __a) |
| { |
| return (int8x8_t) __a; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s8_p16 (poly16x4_t __a) |
| { |
| return (int8x8_t) __a; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s8_p64 (poly64x1_t __a) |
| { |
| return (int8x8_t) __a; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s8_f64 (float64x2_t __a) |
| { |
| return (int8x16_t) __a; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s8_s16 (int16x8_t __a) |
| { |
| return (int8x16_t) __a; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s8_s32 (int32x4_t __a) |
| { |
| return (int8x16_t) __a; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s8_s64 (int64x2_t __a) |
| { |
| return (int8x16_t) __a; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s8_f16 (float16x8_t __a) |
| { |
| return (int8x16_t) __a; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s8_f32 (float32x4_t __a) |
| { |
| return (int8x16_t) __a; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s8_u8 (uint8x16_t __a) |
| { |
| return (int8x16_t) __a; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s8_u16 (uint16x8_t __a) |
| { |
| return (int8x16_t) __a; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s8_u32 (uint32x4_t __a) |
| { |
| return (int8x16_t) __a; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s8_u64 (uint64x2_t __a) |
| { |
| return (int8x16_t) __a; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s8_p8 (poly8x16_t __a) |
| { |
| return (int8x16_t) __a; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s8_p16 (poly16x8_t __a) |
| { |
| return (int8x16_t) __a; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s8_p64 (poly64x2_t __a) |
| { |
| return (int8x16_t) __a; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s8_p128 (poly128_t __a) |
| { |
| return (int8x16_t)__a; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s16_f16 (float16x4_t __a) |
| { |
| return (int16x4_t) __a; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s16_f64 (float64x1_t __a) |
| { |
| return (int16x4_t) __a; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s16_s8 (int8x8_t __a) |
| { |
| return (int16x4_t) __a; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s16_s32 (int32x2_t __a) |
| { |
| return (int16x4_t) __a; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s16_s64 (int64x1_t __a) |
| { |
| return (int16x4_t) __a; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s16_f32 (float32x2_t __a) |
| { |
| return (int16x4_t) __a; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s16_u8 (uint8x8_t __a) |
| { |
| return (int16x4_t) __a; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s16_u16 (uint16x4_t __a) |
| { |
| return (int16x4_t) __a; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s16_u32 (uint32x2_t __a) |
| { |
| return (int16x4_t) __a; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s16_u64 (uint64x1_t __a) |
| { |
| return (int16x4_t) __a; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s16_p8 (poly8x8_t __a) |
| { |
| return (int16x4_t) __a; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s16_p16 (poly16x4_t __a) |
| { |
| return (int16x4_t) __a; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s16_p64 (poly64x1_t __a) |
| { |
| return (int16x4_t) __a; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s16_f64 (float64x2_t __a) |
| { |
| return (int16x8_t) __a; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s16_s8 (int8x16_t __a) |
| { |
| return (int16x8_t) __a; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s16_s32 (int32x4_t __a) |
| { |
| return (int16x8_t) __a; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s16_s64 (int64x2_t __a) |
| { |
| return (int16x8_t) __a; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s16_f16 (float16x8_t __a) |
| { |
| return (int16x8_t) __a; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s16_f32 (float32x4_t __a) |
| { |
| return (int16x8_t) __a; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s16_u8 (uint8x16_t __a) |
| { |
| return (int16x8_t) __a; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s16_u16 (uint16x8_t __a) |
| { |
| return (int16x8_t) __a; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s16_u32 (uint32x4_t __a) |
| { |
| return (int16x8_t) __a; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s16_u64 (uint64x2_t __a) |
| { |
| return (int16x8_t) __a; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s16_p8 (poly8x16_t __a) |
| { |
| return (int16x8_t) __a; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s16_p16 (poly16x8_t __a) |
| { |
| return (int16x8_t) __a; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s16_p64 (poly64x2_t __a) |
| { |
| return (int16x8_t) __a; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s16_p128 (poly128_t __a) |
| { |
| return (int16x8_t)__a; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s32_f16 (float16x4_t __a) |
| { |
| return (int32x2_t) __a; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s32_f64 (float64x1_t __a) |
| { |
| return (int32x2_t) __a; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s32_s8 (int8x8_t __a) |
| { |
| return (int32x2_t) __a; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s32_s16 (int16x4_t __a) |
| { |
| return (int32x2_t) __a; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s32_s64 (int64x1_t __a) |
| { |
| return (int32x2_t) __a; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s32_f32 (float32x2_t __a) |
| { |
| return (int32x2_t) __a; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s32_u8 (uint8x8_t __a) |
| { |
| return (int32x2_t) __a; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s32_u16 (uint16x4_t __a) |
| { |
| return (int32x2_t) __a; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s32_u32 (uint32x2_t __a) |
| { |
| return (int32x2_t) __a; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s32_u64 (uint64x1_t __a) |
| { |
| return (int32x2_t) __a; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s32_p8 (poly8x8_t __a) |
| { |
| return (int32x2_t) __a; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s32_p16 (poly16x4_t __a) |
| { |
| return (int32x2_t) __a; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_s32_p64 (poly64x1_t __a) |
| { |
| return (int32x2_t) __a; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s32_f64 (float64x2_t __a) |
| { |
| return (int32x4_t) __a; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s32_s8 (int8x16_t __a) |
| { |
| return (int32x4_t) __a; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s32_s16 (int16x8_t __a) |
| { |
| return (int32x4_t) __a; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s32_s64 (int64x2_t __a) |
| { |
| return (int32x4_t) __a; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s32_f16 (float16x8_t __a) |
| { |
| return (int32x4_t) __a; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s32_f32 (float32x4_t __a) |
| { |
| return (int32x4_t) __a; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s32_u8 (uint8x16_t __a) |
| { |
| return (int32x4_t) __a; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s32_u16 (uint16x8_t __a) |
| { |
| return (int32x4_t) __a; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s32_u32 (uint32x4_t __a) |
| { |
| return (int32x4_t) __a; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s32_u64 (uint64x2_t __a) |
| { |
| return (int32x4_t) __a; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s32_p8 (poly8x16_t __a) |
| { |
| return (int32x4_t) __a; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s32_p16 (poly16x8_t __a) |
| { |
| return (int32x4_t) __a; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s32_p64 (poly64x2_t __a) |
| { |
| return (int32x4_t) __a; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_s32_p128 (poly128_t __a) |
| { |
| return (int32x4_t)__a; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u8_f16 (float16x4_t __a) |
| { |
| return (uint8x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u8_f64 (float64x1_t __a) |
| { |
| return (uint8x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u8_s8 (int8x8_t __a) |
| { |
| return (uint8x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u8_s16 (int16x4_t __a) |
| { |
| return (uint8x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u8_s32 (int32x2_t __a) |
| { |
| return (uint8x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u8_s64 (int64x1_t __a) |
| { |
| return (uint8x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u8_f32 (float32x2_t __a) |
| { |
| return (uint8x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u8_u16 (uint16x4_t __a) |
| { |
| return (uint8x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u8_u32 (uint32x2_t __a) |
| { |
| return (uint8x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u8_u64 (uint64x1_t __a) |
| { |
| return (uint8x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u8_p8 (poly8x8_t __a) |
| { |
| return (uint8x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u8_p16 (poly16x4_t __a) |
| { |
| return (uint8x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u8_p64 (poly64x1_t __a) |
| { |
| return (uint8x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u8_f64 (float64x2_t __a) |
| { |
| return (uint8x16_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u8_s8 (int8x16_t __a) |
| { |
| return (uint8x16_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u8_s16 (int16x8_t __a) |
| { |
| return (uint8x16_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u8_s32 (int32x4_t __a) |
| { |
| return (uint8x16_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u8_s64 (int64x2_t __a) |
| { |
| return (uint8x16_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u8_f16 (float16x8_t __a) |
| { |
| return (uint8x16_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u8_f32 (float32x4_t __a) |
| { |
| return (uint8x16_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u8_u16 (uint16x8_t __a) |
| { |
| return (uint8x16_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u8_u32 (uint32x4_t __a) |
| { |
| return (uint8x16_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u8_u64 (uint64x2_t __a) |
| { |
| return (uint8x16_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u8_p8 (poly8x16_t __a) |
| { |
| return (uint8x16_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u8_p16 (poly16x8_t __a) |
| { |
| return (uint8x16_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u8_p64 (poly64x2_t __a) |
| { |
| return (uint8x16_t) __a; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u8_p128 (poly128_t __a) |
| { |
| return (uint8x16_t)__a; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u16_f16 (float16x4_t __a) |
| { |
| return (uint16x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u16_f64 (float64x1_t __a) |
| { |
| return (uint16x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u16_s8 (int8x8_t __a) |
| { |
| return (uint16x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u16_s16 (int16x4_t __a) |
| { |
| return (uint16x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u16_s32 (int32x2_t __a) |
| { |
| return (uint16x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u16_s64 (int64x1_t __a) |
| { |
| return (uint16x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u16_f32 (float32x2_t __a) |
| { |
| return (uint16x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u16_u8 (uint8x8_t __a) |
| { |
| return (uint16x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u16_u32 (uint32x2_t __a) |
| { |
| return (uint16x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u16_u64 (uint64x1_t __a) |
| { |
| return (uint16x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u16_p8 (poly8x8_t __a) |
| { |
| return (uint16x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u16_p16 (poly16x4_t __a) |
| { |
| return (uint16x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u16_p64 (poly64x1_t __a) |
| { |
| return (uint16x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u16_f64 (float64x2_t __a) |
| { |
| return (uint16x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u16_s8 (int8x16_t __a) |
| { |
| return (uint16x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u16_s16 (int16x8_t __a) |
| { |
| return (uint16x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u16_s32 (int32x4_t __a) |
| { |
| return (uint16x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u16_s64 (int64x2_t __a) |
| { |
| return (uint16x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u16_f16 (float16x8_t __a) |
| { |
| return (uint16x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u16_f32 (float32x4_t __a) |
| { |
| return (uint16x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u16_u8 (uint8x16_t __a) |
| { |
| return (uint16x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u16_u32 (uint32x4_t __a) |
| { |
| return (uint16x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u16_u64 (uint64x2_t __a) |
| { |
| return (uint16x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u16_p8 (poly8x16_t __a) |
| { |
| return (uint16x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u16_p16 (poly16x8_t __a) |
| { |
| return (uint16x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u16_p64 (poly64x2_t __a) |
| { |
| return (uint16x8_t) __a; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u16_p128 (poly128_t __a) |
| { |
| return (uint16x8_t)__a; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u32_f16 (float16x4_t __a) |
| { |
| return (uint32x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u32_f64 (float64x1_t __a) |
| { |
| return (uint32x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u32_s8 (int8x8_t __a) |
| { |
| return (uint32x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u32_s16 (int16x4_t __a) |
| { |
| return (uint32x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u32_s32 (int32x2_t __a) |
| { |
| return (uint32x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u32_s64 (int64x1_t __a) |
| { |
| return (uint32x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u32_f32 (float32x2_t __a) |
| { |
| return (uint32x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u32_u8 (uint8x8_t __a) |
| { |
| return (uint32x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u32_u16 (uint16x4_t __a) |
| { |
| return (uint32x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u32_u64 (uint64x1_t __a) |
| { |
| return (uint32x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u32_p8 (poly8x8_t __a) |
| { |
| return (uint32x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u32_p16 (poly16x4_t __a) |
| { |
| return (uint32x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpret_u32_p64 (poly64x1_t __a) |
| { |
| return (uint32x2_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u32_f64 (float64x2_t __a) |
| { |
| return (uint32x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u32_s8 (int8x16_t __a) |
| { |
| return (uint32x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u32_s16 (int16x8_t __a) |
| { |
| return (uint32x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u32_s32 (int32x4_t __a) |
| { |
| return (uint32x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u32_s64 (int64x2_t __a) |
| { |
| return (uint32x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u32_f16 (float16x8_t __a) |
| { |
| return (uint32x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u32_f32 (float32x4_t __a) |
| { |
| return (uint32x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u32_u8 (uint8x16_t __a) |
| { |
| return (uint32x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u32_u16 (uint16x8_t __a) |
| { |
| return (uint32x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u32_u64 (uint64x2_t __a) |
| { |
| return (uint32x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u32_p8 (poly8x16_t __a) |
| { |
| return (uint32x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u32_p16 (poly16x8_t __a) |
| { |
| return (uint32x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u32_p64 (poly64x2_t __a) |
| { |
| return (uint32x4_t) __a; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_u32_p128 (poly128_t __a) |
| { |
| return (uint32x4_t)__a; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_f64_p128 (poly128_t __a) |
| { |
| return (float64x2_t) __a; |
| } |
| |
| __extension__ extern __inline poly128_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vreinterpretq_p128_f64 (float64x2_t __a) |
| { |
| return (poly128_t) __a; |
| } |
| |
| /* vset_lane */ |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vset_lane_f16 (float16_t __elem, float16x4_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vset_lane_f32 (float32_t __elem, float32x2_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vset_lane_f64 (float64_t __elem, float64x1_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vset_lane_p8 (poly8_t __elem, poly8x8_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vset_lane_p16 (poly16_t __elem, poly16x4_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vset_lane_p64 (poly64_t __elem, poly64x1_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vset_lane_s8 (int8_t __elem, int8x8_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vset_lane_s16 (int16_t __elem, int16x4_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vset_lane_s32 (int32_t __elem, int32x2_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vset_lane_s64 (int64_t __elem, int64x1_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vset_lane_u8 (uint8_t __elem, uint8x8_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vset_lane_u16 (uint16_t __elem, uint16x4_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vset_lane_u32 (uint32_t __elem, uint32x2_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vset_lane_u64 (uint64_t __elem, uint64x1_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| /* vsetq_lane */ |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsetq_lane_f16 (float16_t __elem, float16x8_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsetq_lane_f32 (float32_t __elem, float32x4_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsetq_lane_f64 (float64_t __elem, float64x2_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsetq_lane_p8 (poly8_t __elem, poly8x16_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsetq_lane_p16 (poly16_t __elem, poly16x8_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsetq_lane_p64 (poly64_t __elem, poly64x2_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsetq_lane_s8 (int8_t __elem, int8x16_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsetq_lane_s16 (int16_t __elem, int16x8_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsetq_lane_s32 (int32_t __elem, int32x4_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsetq_lane_s64 (int64_t __elem, int64x2_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsetq_lane_u8 (uint8_t __elem, uint8x16_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsetq_lane_u16 (uint16_t __elem, uint16x8_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsetq_lane_u32 (uint32_t __elem, uint32x4_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsetq_lane_u64 (uint64_t __elem, uint64x2_t __vec, const int __index) |
| { |
| return __aarch64_vset_lane_any (__elem, __vec, __index); |
| } |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_low_f16 (float16x8_t __a) |
| { |
| return __builtin_aarch64_get_lowv8hf (__a); |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_low_f32 (float32x4_t __a) |
| { |
| return __builtin_aarch64_get_lowv4sf (__a); |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_low_f64 (float64x2_t __a) |
| { |
| return (float64x1_t) {__builtin_aarch64_get_lowv2df (__a)}; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_low_p8 (poly8x16_t __a) |
| { |
| return (poly8x8_t) __builtin_aarch64_get_lowv16qi ((int8x16_t) __a); |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_low_p16 (poly16x8_t __a) |
| { |
| return (poly16x4_t) __builtin_aarch64_get_lowv8hi ((int16x8_t) __a); |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_low_p64 (poly64x2_t __a) |
| { |
| return (poly64x1_t) __builtin_aarch64_get_lowv2di ((int64x2_t) __a); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_low_s8 (int8x16_t __a) |
| { |
| return __builtin_aarch64_get_lowv16qi (__a); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_low_s16 (int16x8_t __a) |
| { |
| return __builtin_aarch64_get_lowv8hi (__a); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_low_s32 (int32x4_t __a) |
| { |
| return __builtin_aarch64_get_lowv4si (__a); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_low_s64 (int64x2_t __a) |
| { |
| return (int64x1_t) {__builtin_aarch64_get_lowv2di (__a)}; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_low_u8 (uint8x16_t __a) |
| { |
| return (uint8x8_t) __builtin_aarch64_get_lowv16qi ((int8x16_t) __a); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_low_u16 (uint16x8_t __a) |
| { |
| return (uint16x4_t) __builtin_aarch64_get_lowv8hi ((int16x8_t) __a); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_low_u32 (uint32x4_t __a) |
| { |
| return (uint32x2_t) __builtin_aarch64_get_lowv4si ((int32x4_t) __a); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_low_u64 (uint64x2_t __a) |
| { |
| return (uint64x1_t) {__builtin_aarch64_get_lowv2di ((int64x2_t) __a)}; |
| } |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_high_f16 (float16x8_t __a) |
| { |
| return __builtin_aarch64_get_highv8hf (__a); |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_high_f32 (float32x4_t __a) |
| { |
| return __builtin_aarch64_get_highv4sf (__a); |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_high_f64 (float64x2_t __a) |
| { |
| return (float64x1_t) {__builtin_aarch64_get_highv2df (__a)}; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_high_p8 (poly8x16_t __a) |
| { |
| return (poly8x8_t) __builtin_aarch64_get_highv16qi ((int8x16_t) __a); |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_high_p16 (poly16x8_t __a) |
| { |
| return (poly16x4_t) __builtin_aarch64_get_highv8hi ((int16x8_t) __a); |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_high_p64 (poly64x2_t __a) |
| { |
| return (poly64x1_t) __builtin_aarch64_get_highv2di ((int64x2_t) __a); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_high_s8 (int8x16_t __a) |
| { |
| return __builtin_aarch64_get_highv16qi (__a); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_high_s16 (int16x8_t __a) |
| { |
| return __builtin_aarch64_get_highv8hi (__a); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_high_s32 (int32x4_t __a) |
| { |
| return __builtin_aarch64_get_highv4si (__a); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_high_s64 (int64x2_t __a) |
| { |
| return (int64x1_t) {__builtin_aarch64_get_highv2di (__a)}; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_high_u8 (uint8x16_t __a) |
| { |
| return (uint8x8_t) __builtin_aarch64_get_highv16qi ((int8x16_t) __a); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_high_u16 (uint16x8_t __a) |
| { |
| return (uint16x4_t) __builtin_aarch64_get_highv8hi ((int16x8_t) __a); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_high_u32 (uint32x4_t __a) |
| { |
| return (uint32x2_t) __builtin_aarch64_get_highv4si ((int32x4_t) __a); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vget_high_u64 (uint64x2_t __a) |
| { |
| return (uint64x1_t) {__builtin_aarch64_get_highv2di ((int64x2_t) __a)}; |
| } |
| |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcombine_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return (int8x16_t) __builtin_aarch64_combinev8qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcombine_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return (int16x8_t) __builtin_aarch64_combinev4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcombine_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return (int32x4_t) __builtin_aarch64_combinev2si (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcombine_s64 (int64x1_t __a, int64x1_t __b) |
| { |
| return __builtin_aarch64_combinedi (__a[0], __b[0]); |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcombine_f16 (float16x4_t __a, float16x4_t __b) |
| { |
| return __builtin_aarch64_combinev4hf (__a, __b); |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcombine_f32 (float32x2_t __a, float32x2_t __b) |
| { |
| return (float32x4_t) __builtin_aarch64_combinev2sf (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcombine_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return (uint8x16_t) __builtin_aarch64_combinev8qi ((int8x8_t) __a, |
| (int8x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcombine_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return (uint16x8_t) __builtin_aarch64_combinev4hi ((int16x4_t) __a, |
| (int16x4_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcombine_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return (uint32x4_t) __builtin_aarch64_combinev2si ((int32x2_t) __a, |
| (int32x2_t) __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcombine_u64 (uint64x1_t __a, uint64x1_t __b) |
| { |
| return (uint64x2_t) __builtin_aarch64_combinedi (__a[0], __b[0]); |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcombine_f64 (float64x1_t __a, float64x1_t __b) |
| { |
| return __builtin_aarch64_combinedf (__a[0], __b[0]); |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcombine_p8 (poly8x8_t __a, poly8x8_t __b) |
| { |
| return (poly8x16_t) __builtin_aarch64_combinev8qi ((int8x8_t) __a, |
| (int8x8_t) __b); |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcombine_p16 (poly16x4_t __a, poly16x4_t __b) |
| { |
| return (poly16x8_t) __builtin_aarch64_combinev4hi ((int16x4_t) __a, |
| (int16x4_t) __b); |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcombine_p64 (poly64x1_t __a, poly64x1_t __b) |
| { |
| return (poly64x2_t) __builtin_aarch64_combinedi_ppp (__a[0], __b[0]); |
| } |
| |
| /* Start of temporary inline asm implementations. */ |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaba_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c) |
| { |
| return __builtin_aarch64_sabav8qi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaba_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c) |
| { |
| return __builtin_aarch64_sabav4hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaba_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c) |
| { |
| return __builtin_aarch64_sabav2si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaba_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) |
| { |
| return __builtin_aarch64_uabav8qi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaba_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) |
| { |
| return __builtin_aarch64_uabav4hi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaba_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) |
| { |
| return __builtin_aarch64_uabav2si_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabal_high_s8 (int16x8_t __a, int8x16_t __b, int8x16_t __c) |
| { |
| return __builtin_aarch64_sabal2v16qi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabal_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c) |
| { |
| return __builtin_aarch64_sabal2v8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabal_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c) |
| { |
| return __builtin_aarch64_sabal2v4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabal_high_u8 (uint16x8_t __a, uint8x16_t __b, uint8x16_t __c) |
| { |
| return __builtin_aarch64_uabal2v16qi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabal_high_u16 (uint32x4_t __a, uint16x8_t __b, uint16x8_t __c) |
| { |
| return __builtin_aarch64_uabal2v8hi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabal_high_u32 (uint64x2_t __a, uint32x4_t __b, uint32x4_t __c) |
| { |
| return __builtin_aarch64_uabal2v4si_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c) |
| { |
| return __builtin_aarch64_sabalv8qi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c) |
| { |
| return __builtin_aarch64_sabalv4hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c) |
| { |
| return __builtin_aarch64_sabalv2si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c) |
| { |
| return __builtin_aarch64_uabalv8qi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c) |
| { |
| return __builtin_aarch64_uabalv4hi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c) |
| { |
| return __builtin_aarch64_uabalv2si_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c) |
| { |
| return __builtin_aarch64_sabav16qi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c) |
| { |
| return __builtin_aarch64_sabav8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c) |
| { |
| return __builtin_aarch64_sabav4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) |
| { |
| return __builtin_aarch64_uabav16qi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) |
| { |
| return __builtin_aarch64_uabav8hi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) |
| { |
| return __builtin_aarch64_uabav4si_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabd_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return __builtin_aarch64_sabdv8qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabd_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return __builtin_aarch64_sabdv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabd_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return __builtin_aarch64_sabdv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabd_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return __builtin_aarch64_uabdv8qi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabd_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return __builtin_aarch64_uabdv4hi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabd_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return __builtin_aarch64_uabdv2si_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdl_high_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return __builtin_aarch64_sabdl2v16qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdl_high_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return __builtin_aarch64_sabdl2v8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdl_high_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return __builtin_aarch64_sabdl2v4si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdl_high_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return __builtin_aarch64_uabdl2v16qi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdl_high_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return __builtin_aarch64_uabdl2v8hi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdl_high_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return __builtin_aarch64_uabdl2v4si_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdl_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return __builtin_aarch64_sabdlv8qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdl_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return __builtin_aarch64_sabdlv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdl_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return __builtin_aarch64_sabdlv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdl_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return __builtin_aarch64_uabdlv8qi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdl_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return __builtin_aarch64_uabdlv4hi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdl_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return __builtin_aarch64_uabdlv2si_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return __builtin_aarch64_sabdv16qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return __builtin_aarch64_sabdv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return __builtin_aarch64_sabdv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return __builtin_aarch64_uabdv16qi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return __builtin_aarch64_uabdv8hi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return __builtin_aarch64_uabdv4si_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline int16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddlv_s8 (int8x8_t __a) |
| { |
| return __builtin_aarch64_saddlvv8qi (__a); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddlv_s16 (int16x4_t __a) |
| { |
| return __builtin_aarch64_saddlvv4hi (__a); |
| } |
| |
| __extension__ extern __inline uint16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddlv_u8 (uint8x8_t __a) |
| { |
| return __builtin_aarch64_uaddlvv8qi_uu (__a); |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddlv_u16 (uint16x4_t __a) |
| { |
| return __builtin_aarch64_uaddlvv4hi_uu (__a); |
| } |
| |
| __extension__ extern __inline int16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddlvq_s8 (int8x16_t __a) |
| { |
| return __builtin_aarch64_saddlvv16qi (__a); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddlvq_s16 (int16x8_t __a) |
| { |
| return __builtin_aarch64_saddlvv8hi (__a); |
| } |
| |
| __extension__ extern __inline int64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddlvq_s32 (int32x4_t __a) |
| { |
| return __builtin_aarch64_saddlvv4si (__a); |
| } |
| |
| __extension__ extern __inline uint16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddlvq_u8 (uint8x16_t __a) |
| { |
| return __builtin_aarch64_uaddlvv16qi_uu (__a); |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddlvq_u16 (uint16x8_t __a) |
| { |
| return __builtin_aarch64_uaddlvv8hi_uu (__a); |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddlvq_u32 (uint32x4_t __a) |
| { |
| return __builtin_aarch64_uaddlvv4si_uu (__a); |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtx_f32_f64 (float64x2_t __a) |
| { |
| return __builtin_aarch64_float_trunc_rodd_lo_v2sf (__a); |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtx_high_f32_f64 (float32x2_t __a, float64x2_t __b) |
| { |
| return __builtin_aarch64_float_trunc_rodd_hi_v4sf (__a, __b); |
| } |
| |
| __extension__ extern __inline float32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtxd_f32_f64 (float64_t __a) |
| { |
| return __builtin_aarch64_float_trunc_rodd_df (__a); |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c) |
| { |
| return __builtin_aarch64_float_mla_nv2sf (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmla_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c) |
| { |
| return __builtin_aarch64_mla_nv4hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmla_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c) |
| { |
| return __builtin_aarch64_mla_nv2si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmla_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c) |
| { |
| return (uint16x4_t) __builtin_aarch64_mla_nv4hi ((int16x4_t) __a, |
| (int16x4_t) __b, |
| (int16_t) __c); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmla_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c) |
| { |
| return (uint32x2_t) __builtin_aarch64_mla_nv2si ((int32x2_t) __a, |
| (int32x2_t) __b, |
| (int32_t) __c); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmla_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c) |
| { |
| return __builtin_aarch64_mlav8qi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmla_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c) |
| { |
| return __builtin_aarch64_mlav4hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmla_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c) |
| { |
| return __builtin_aarch64_mlav2si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmla_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) |
| { |
| return (uint8x8_t) __builtin_aarch64_mlav8qi ((int8x8_t) __a, |
| (int8x8_t) __b, |
| (int8x8_t) __c); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmla_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) |
| { |
| return (uint16x4_t) __builtin_aarch64_mlav4hi ((int16x4_t) __a, |
| (int16x4_t) __b, |
| (int16x4_t) __c); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmla_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) |
| { |
| return (uint32x2_t) __builtin_aarch64_mlav2si ((int32x2_t) __a, |
| (int32x2_t) __b, |
| (int32x2_t) __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_lane_s16(int32x4_t __a, int16x8_t __b, int16x4_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_smlal_hi_lanev8hi (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_lane_s32(int64x2_t __a, int32x4_t __b, int32x2_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_smlal_hi_lanev4si (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_lane_u16(uint32x4_t __a, uint16x8_t __b, uint16x4_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_umlal_hi_lanev8hi_uuuus (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_lane_u32(uint64x2_t __a, uint32x4_t __b, uint32x2_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_umlal_hi_lanev4si_uuuus (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_laneq_s16(int32x4_t __a, int16x8_t __b, int16x8_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_smlal_hi_laneqv8hi (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_laneq_s32(int64x2_t __a, int32x4_t __b, int32x4_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_smlal_hi_laneqv4si (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_laneq_u16(uint32x4_t __a, uint16x8_t __b, uint16x8_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_umlal_hi_laneqv8hi_uuuus (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_laneq_u32(uint64x2_t __a, uint32x4_t __b, uint32x4_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_umlal_hi_laneqv4si_uuuus (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c) |
| { |
| return __builtin_aarch64_smlal_hi_nv8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c) |
| { |
| return __builtin_aarch64_smlal_hi_nv4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_n_u16 (uint32x4_t __a, uint16x8_t __b, uint16_t __c) |
| { |
| return __builtin_aarch64_umlal_hi_nv8hi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_n_u32 (uint64x2_t __a, uint32x4_t __b, uint32_t __c) |
| { |
| return __builtin_aarch64_umlal_hi_nv4si_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_s8 (int16x8_t __a, int8x16_t __b, int8x16_t __c) |
| { |
| return __builtin_aarch64_smlal_hiv16qi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c) |
| { |
| return __builtin_aarch64_smlal_hiv8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c) |
| { |
| return __builtin_aarch64_smlal_hiv4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_u8 (uint16x8_t __a, uint8x16_t __b, uint8x16_t __c) |
| { |
| return __builtin_aarch64_umlal_hiv16qi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_u16 (uint32x4_t __a, uint16x8_t __b, uint16x8_t __c) |
| { |
| return __builtin_aarch64_umlal_hiv8hi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_high_u32 (uint64x2_t __a, uint32x4_t __b, uint32x4_t __c) |
| { |
| return __builtin_aarch64_umlal_hiv4si_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_lane_s16 (int32x4_t __acc, int16x4_t __a, int16x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_vec_smlal_lane_v4hi (__acc, __a, __b, __c); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_lane_s32 (int64x2_t __acc, int32x2_t __a, int32x2_t __b, const int __c) |
| { |
| return __builtin_aarch64_vec_smlal_lane_v2si (__acc, __a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_lane_u16 (uint32x4_t __acc, uint16x4_t __a, uint16x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_vec_umlal_lane_v4hi_uuuus (__acc, __a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_lane_u32 (uint64x2_t __acc, uint32x2_t __a, uint32x2_t __b, const int __c) |
| { |
| return __builtin_aarch64_vec_umlal_lane_v2si_uuuus (__acc, __a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_laneq_s16 (int32x4_t __acc, int16x4_t __a, int16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_vec_smlal_laneq_v4hi (__acc, __a, __b, __c); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_laneq_s32 (int64x2_t __acc, int32x2_t __a, int32x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_vec_smlal_laneq_v2si (__acc, __a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_laneq_u16 (uint32x4_t __acc, uint16x4_t __a, uint16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_vec_umlal_laneq_v4hi_uuuus (__acc, __a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_laneq_u32 (uint64x2_t __acc, uint32x2_t __a, uint32x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_vec_umlal_laneq_v2si_uuuus (__acc, __a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c) |
| { |
| return __builtin_aarch64_smlal_nv4hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c) |
| { |
| return __builtin_aarch64_smlal_nv2si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c) |
| { |
| return __builtin_aarch64_umlal_nv4hi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c) |
| { |
| return __builtin_aarch64_umlal_nv2si_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c) |
| { |
| return __builtin_aarch64_smlalv8qi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c) |
| { |
| return __builtin_aarch64_smlalv4hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c) |
| { |
| return __builtin_aarch64_smlalv2si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c) |
| { |
| return __builtin_aarch64_umlalv8qi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c) |
| { |
| return __builtin_aarch64_umlalv4hi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c) |
| { |
| return __builtin_aarch64_umlalv2si_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c) |
| { |
| return __builtin_aarch64_float_mla_nv4sf (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlaq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) |
| { |
| return __builtin_aarch64_mla_nv8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlaq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) |
| { |
| return __builtin_aarch64_mla_nv4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlaq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) |
| { |
| return (uint16x8_t) __builtin_aarch64_mla_nv8hi ((int16x8_t) __a, |
| (int16x8_t) __b, |
| (int16_t) __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlaq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) |
| { |
| return (uint32x4_t) __builtin_aarch64_mla_nv4si ((int32x4_t) __a, |
| (int32x4_t) __b, |
| (int32_t) __c); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c) |
| { |
| return __builtin_aarch64_mlav16qi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c) |
| { |
| return __builtin_aarch64_mlav8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c) |
| { |
| return __builtin_aarch64_mlav4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) |
| { |
| return (uint8x16_t) __builtin_aarch64_mlav16qi ((int8x16_t) __a, |
| (int8x16_t) __b, |
| (int8x16_t) __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) |
| { |
| return (uint16x8_t) __builtin_aarch64_mlav8hi ((int16x8_t) __a, |
| (int16x8_t) __b, |
| (int16x8_t) __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) |
| { |
| return (uint32x4_t) __builtin_aarch64_mlav4si ((int32x4_t) __a, |
| (int32x4_t) __b, |
| (int32x4_t) __c); |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c) |
| { |
| return __builtin_aarch64_float_mls_nv2sf (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmls_n_s16 (int16x4_t __a, int16x4_t __b, int16_t __c) |
| { |
| return __builtin_aarch64_mls_nv4hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmls_n_s32 (int32x2_t __a, int32x2_t __b, int32_t __c) |
| { |
| return __builtin_aarch64_mls_nv2si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmls_n_u16 (uint16x4_t __a, uint16x4_t __b, uint16_t __c) |
| { |
| return (uint16x4_t) __builtin_aarch64_mls_nv4hi ((int16x4_t) __a, |
| (int16x4_t) __b, |
| (int16_t) __c); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmls_n_u32 (uint32x2_t __a, uint32x2_t __b, uint32_t __c) |
| { |
| return (uint32x2_t) __builtin_aarch64_mls_nv2si ((int32x2_t) __a, |
| (int32x2_t) __b, |
| (int32_t) __c); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmls_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c) |
| { |
| return __builtin_aarch64_mlsv8qi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmls_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c) |
| { |
| return __builtin_aarch64_mlsv4hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmls_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c) |
| { |
| return __builtin_aarch64_mlsv2si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmls_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) |
| { |
| return (uint8x8_t) __builtin_aarch64_mlsv8qi ((int8x8_t) __a, |
| (int8x8_t) __b, |
| (int8x8_t) __c); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmls_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) |
| { |
| return (uint16x4_t) __builtin_aarch64_mlsv4hi ((int16x4_t) __a, |
| (int16x4_t) __b, |
| (int16x4_t) __c); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmls_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) |
| { |
| return (uint32x2_t) __builtin_aarch64_mlsv2si ((int32x2_t) __a, |
| (int32x2_t) __b, |
| (int32x2_t) __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_lane_s16(int32x4_t __a, int16x8_t __b, int16x4_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_smlsl_hi_lanev8hi (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_lane_s32(int64x2_t __a, int32x4_t __b, int32x2_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_smlsl_hi_lanev4si (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_lane_u16(uint32x4_t __a, uint16x8_t __b, uint16x4_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_umlsl_hi_lanev8hi_uuuus (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_lane_u32(uint64x2_t __a, uint32x4_t __b, uint32x2_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_umlsl_hi_lanev4si_uuuus (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_laneq_s16(int32x4_t __a, int16x8_t __b, int16x8_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_smlsl_hi_laneqv8hi (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_laneq_s32(int64x2_t __a, int32x4_t __b, int32x4_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_smlsl_hi_laneqv4si (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_laneq_u16(uint32x4_t __a, uint16x8_t __b, uint16x8_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_umlsl_hi_laneqv8hi_uuuus (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_laneq_u32(uint64x2_t __a, uint32x4_t __b, uint32x4_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_umlsl_hi_laneqv4si_uuuus (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_n_s16 (int32x4_t __a, int16x8_t __b, int16_t __c) |
| { |
| return __builtin_aarch64_smlsl_hi_nv8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_n_s32 (int64x2_t __a, int32x4_t __b, int32_t __c) |
| { |
| return __builtin_aarch64_smlsl_hi_nv4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_n_u16 (uint32x4_t __a, uint16x8_t __b, uint16_t __c) |
| { |
| return __builtin_aarch64_umlsl_hi_nv8hi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_n_u32 (uint64x2_t __a, uint32x4_t __b, uint32_t __c) |
| { |
| return __builtin_aarch64_umlsl_hi_nv4si_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_s8 (int16x8_t __a, int8x16_t __b, int8x16_t __c) |
| { |
| return __builtin_aarch64_smlsl_hiv16qi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c) |
| { |
| return __builtin_aarch64_smlsl_hiv8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c) |
| { |
| return __builtin_aarch64_smlsl_hiv4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_u8 (uint16x8_t __a, uint8x16_t __b, uint8x16_t __c) |
| { |
| return __builtin_aarch64_umlsl_hiv16qi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_u16 (uint32x4_t __a, uint16x8_t __b, uint16x8_t __c) |
| { |
| return __builtin_aarch64_umlsl_hiv8hi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_high_u32 (uint64x2_t __a, uint32x4_t __b, uint32x4_t __c) |
| { |
| return __builtin_aarch64_umlsl_hiv4si_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __v, const int __lane) |
| { |
| return __builtin_aarch64_vec_smlsl_lane_v4hi (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __v, const int __lane) |
| { |
| return __builtin_aarch64_vec_smlsl_lane_v2si (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_lane_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_vec_umlsl_lane_v4hi_uuuus (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_lane_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_vec_umlsl_lane_v2si_uuuus (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_laneq_s16 (int32x4_t __a, int16x4_t __b, int16x8_t __v, const int __lane) |
| { |
| return __builtin_aarch64_vec_smlsl_laneq_v4hi (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_laneq_s32 (int64x2_t __a, int32x2_t __b, int32x4_t __v, const int __lane) |
| { |
| return __builtin_aarch64_vec_smlsl_laneq_v2si (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_laneq_u16 (uint32x4_t __a, uint16x4_t __b, uint16x8_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_vec_umlsl_laneq_v4hi_uuuus (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_laneq_u32 (uint64x2_t __a, uint32x2_t __b, uint32x4_t __v, |
| const int __lane) |
| { |
| return __builtin_aarch64_vec_umlsl_laneq_v2si_uuuus (__a, __b, __v, __lane); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_n_s16 (int32x4_t __a, int16x4_t __b, int16_t __c) |
| { |
| return __builtin_aarch64_smlsl_nv4hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_n_s32 (int64x2_t __a, int32x2_t __b, int32_t __c) |
| { |
| return __builtin_aarch64_smlsl_nv2si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_n_u16 (uint32x4_t __a, uint16x4_t __b, uint16_t __c) |
| { |
| return __builtin_aarch64_umlsl_nv4hi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_n_u32 (uint64x2_t __a, uint32x2_t __b, uint32_t __c) |
| { |
| return __builtin_aarch64_umlsl_nv2si_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c) |
| { |
| return __builtin_aarch64_smlslv8qi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c) |
| { |
| return __builtin_aarch64_smlslv4hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c) |
| { |
| return __builtin_aarch64_smlslv2si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c) |
| { |
| return __builtin_aarch64_umlslv8qi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c) |
| { |
| return __builtin_aarch64_umlslv4hi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsl_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c) |
| { |
| return __builtin_aarch64_umlslv2si_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c) |
| { |
| return __builtin_aarch64_float_mls_nv4sf (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsq_n_s16 (int16x8_t __a, int16x8_t __b, int16_t __c) |
| { |
| return __builtin_aarch64_mls_nv8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsq_n_s32 (int32x4_t __a, int32x4_t __b, int32_t __c) |
| { |
| return __builtin_aarch64_mls_nv4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsq_n_u16 (uint16x8_t __a, uint16x8_t __b, uint16_t __c) |
| { |
| return (uint16x8_t) __builtin_aarch64_mls_nv8hi ((int16x8_t) __a, |
| (int16x8_t) __b, |
| (int16_t) __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsq_n_u32 (uint32x4_t __a, uint32x4_t __b, uint32_t __c) |
| { |
| return (uint32x4_t) __builtin_aarch64_mls_nv4si ((int32x4_t) __a, |
| (int32x4_t) __b, |
| (int32_t) __c); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c) |
| { |
| return __builtin_aarch64_mlsv16qi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c) |
| { |
| return __builtin_aarch64_mlsv8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c) |
| { |
| return __builtin_aarch64_mlsv4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) |
| { |
| return (uint8x16_t) __builtin_aarch64_mlsv16qi ((int8x16_t) __a, |
| (int8x16_t) __b, |
| (int8x16_t) __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) |
| { |
| return (uint16x8_t) __builtin_aarch64_mlsv8hi ((int16x8_t) __a, |
| (int16x8_t) __b, |
| (int16x8_t) __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmlsq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) |
| { |
| return (uint32x4_t) __builtin_aarch64_mlsv4si ((int32x4_t) __a, |
| (int32x4_t) __b, |
| (int32x4_t) __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovl_high_s8 (int8x16_t __a) |
| { |
| return __builtin_aarch64_vec_unpacks_hi_v16qi (__a); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovl_high_s16 (int16x8_t __a) |
| { |
| return __builtin_aarch64_vec_unpacks_hi_v8hi (__a); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovl_high_s32 (int32x4_t __a) |
| { |
| return __builtin_aarch64_vec_unpacks_hi_v4si (__a); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovl_high_u8 (uint8x16_t __a) |
| { |
| return __builtin_aarch64_vec_unpacku_hi_v16qi_uu (__a); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovl_high_u16 (uint16x8_t __a) |
| { |
| return __builtin_aarch64_vec_unpacku_hi_v8hi_uu (__a); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovl_high_u32 (uint32x4_t __a) |
| { |
| return __builtin_aarch64_vec_unpacku_hi_v4si_uu (__a); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovl_s8 (int8x8_t __a) |
| { |
| return __builtin_aarch64_sxtlv8hi (__a); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovl_s16 (int16x4_t __a) |
| { |
| return __builtin_aarch64_sxtlv4si (__a); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovl_s32 (int32x2_t __a) |
| { |
| return __builtin_aarch64_sxtlv2di (__a); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovl_u8 (uint8x8_t __a) |
| { |
| return __builtin_aarch64_uxtlv8hi_uu (__a); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovl_u16 (uint16x4_t __a) |
| { |
| return __builtin_aarch64_uxtlv4si_uu (__a); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovl_u32 (uint32x2_t __a) |
| { |
| return __builtin_aarch64_uxtlv2di_uu (__a); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovn_high_s16 (int8x8_t __a, int16x8_t __b) |
| { |
| return __builtin_aarch64_xtn2v8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovn_high_s32 (int16x4_t __a, int32x4_t __b) |
| { |
| return __builtin_aarch64_xtn2v4si (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovn_high_s64 (int32x2_t __a, int64x2_t __b) |
| { |
| return __builtin_aarch64_xtn2v2di (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovn_high_u16 (uint8x8_t __a, uint16x8_t __b) |
| { |
| return (uint8x16_t) |
| __builtin_aarch64_xtn2v8hi ((int8x8_t) __a, (int16x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovn_high_u32 (uint16x4_t __a, uint32x4_t __b) |
| { |
| return (uint16x8_t) |
| __builtin_aarch64_xtn2v4si ((int16x4_t) __a, (int32x4_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovn_high_u64 (uint32x2_t __a, uint64x2_t __b) |
| { |
| return (uint32x4_t) |
| __builtin_aarch64_xtn2v2di ((int32x2_t) __a, (int64x2_t) __b); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovn_s16 (int16x8_t __a) |
| { |
| return __builtin_aarch64_xtnv8hi (__a); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovn_s32 (int32x4_t __a) |
| { |
| return __builtin_aarch64_xtnv4si (__a); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovn_s64 (int64x2_t __a) |
| { |
| return __builtin_aarch64_xtnv2di (__a); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovn_u16 (uint16x8_t __a) |
| { |
| return (uint8x8_t)__builtin_aarch64_xtnv8hi ((int16x8_t) __a); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovn_u32 (uint32x4_t __a) |
| { |
| return (uint16x4_t) __builtin_aarch64_xtnv4si ((int32x4_t )__a); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmovn_u64 (uint64x2_t __a) |
| { |
| return (uint32x2_t) __builtin_aarch64_xtnv2di ((int64x2_t) __a); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vshrn_n_s16 (int16x8_t __a, const int __b) |
| { |
| return __builtin_aarch64_shrnv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vshrn_n_s32 (int32x4_t __a, const int __b) |
| { |
| return __builtin_aarch64_shrnv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vshrn_n_s64 (int64x2_t __a, const int __b) |
| { |
| return __builtin_aarch64_shrnv2di (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vshrn_n_u16 (uint16x8_t __a, const int __b) |
| { |
| return (uint8x8_t)__builtin_aarch64_shrnv8hi ((int16x8_t)__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vshrn_n_u32 (uint32x4_t __a, const int __b) |
| { |
| return (uint16x4_t)__builtin_aarch64_shrnv4si ((int32x4_t)__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vshrn_n_u64 (uint64x2_t __a, const int __b) |
| { |
| return (uint32x2_t)__builtin_aarch64_shrnv2di ((int64x2_t)__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_lane_s16 (int16x8_t __a, int16x4_t __v, const int __lane) |
| { |
| return __builtin_aarch64_smull_hi_lanev8hi (__a, __v, __lane); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_lane_s32 (int32x4_t __a, int32x2_t __v, const int __lane) |
| { |
| return __builtin_aarch64_smull_hi_lanev4si (__a, __v, __lane); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_lane_u16 (uint16x8_t __a, uint16x4_t __v, const int __lane) |
| { |
| return __builtin_aarch64_umull_hi_lanev8hi_uuus (__a, __v, __lane); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_lane_u32 (uint32x4_t __a, uint32x2_t __v, const int __lane) |
| { |
| return __builtin_aarch64_umull_hi_lanev4si_uuus (__a, __v, __lane); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_laneq_s16 (int16x8_t __a, int16x8_t __v, const int __lane) |
| { |
| return __builtin_aarch64_smull_hi_laneqv8hi (__a, __v, __lane); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_laneq_s32 (int32x4_t __a, int32x4_t __v, const int __lane) |
| { |
| return __builtin_aarch64_smull_hi_laneqv4si (__a, __v, __lane); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_laneq_u16 (uint16x8_t __a, uint16x8_t __v, const int __lane) |
| { |
| return __builtin_aarch64_umull_hi_laneqv8hi_uuus (__a, __v, __lane); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_laneq_u32 (uint32x4_t __a, uint32x4_t __v, const int __lane) |
| { |
| return __builtin_aarch64_umull_hi_laneqv4si_uuus (__a, __v, __lane); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_n_s16 (int16x8_t __a, int16_t __b) |
| { |
| return __builtin_aarch64_smull_hi_nv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_n_s32 (int32x4_t __a, int32_t __b) |
| { |
| return __builtin_aarch64_smull_hi_nv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_n_u16 (uint16x8_t __a, uint16_t __b) |
| { |
| return __builtin_aarch64_umull_hi_nv8hi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_n_u32 (uint32x4_t __a, uint32_t __b) |
| { |
| return __builtin_aarch64_umull_hi_nv4si_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_p8 (poly8x16_t __a, poly8x16_t __b) |
| { |
| return (poly16x8_t) __builtin_aarch64_pmull_hiv16qi ((int8x16_t) __a, |
| (int8x16_t) __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return __builtin_aarch64_vec_widen_smult_hi_v16qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return __builtin_aarch64_vec_widen_smult_hi_v8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return __builtin_aarch64_vec_widen_smult_hi_v4si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return __builtin_aarch64_vec_widen_umult_hi_v16qi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return __builtin_aarch64_vec_widen_umult_hi_v8hi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_high_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return __builtin_aarch64_vec_widen_umult_hi_v4si_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_vec_smult_lane_v4hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c) |
| { |
| return __builtin_aarch64_vec_smult_lane_v2si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_vec_umult_lane_v4hi_uuus (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c) |
| { |
| return __builtin_aarch64_vec_umult_lane_v2si_uuus (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_vec_smult_laneq_v4hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_vec_smult_laneq_v2si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_laneq_u16 (uint16x4_t __a, uint16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_vec_umult_laneq_v4hi_uuus (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_laneq_u32 (uint32x2_t __a, uint32x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_vec_umult_laneq_v2si_uuus (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_n_s16 (int16x4_t __a, int16_t __b) |
| { |
| return __builtin_aarch64_smull_nv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_n_s32 (int32x2_t __a, int32_t __b) |
| { |
| return __builtin_aarch64_smull_nv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_n_u16 (uint16x4_t __a, uint16_t __b) |
| { |
| return __builtin_aarch64_umull_nv4hi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_n_u32 (uint32x2_t __a, uint32_t __b) |
| { |
| return __builtin_aarch64_umull_nv2si_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_p8 (poly8x8_t __a, poly8x8_t __b) |
| { |
| return (poly16x8_t) __builtin_aarch64_pmullv8qi ((int8x8_t) __a, |
| (int8x8_t) __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return __builtin_aarch64_intrinsic_vec_smult_lo_v8qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return __builtin_aarch64_intrinsic_vec_smult_lo_v4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return __builtin_aarch64_intrinsic_vec_smult_lo_v2si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return __builtin_aarch64_intrinsic_vec_umult_lo_v8qi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return __builtin_aarch64_intrinsic_vec_umult_lo_v4hi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vmull_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return __builtin_aarch64_intrinsic_vec_umult_lo_v2si_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpadal_s8 (int16x4_t __a, int8x8_t __b) |
| { |
| return __builtin_aarch64_sadalpv8qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpadal_s16 (int32x2_t __a, int16x4_t __b) |
| { |
| return __builtin_aarch64_sadalpv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpadal_s32 (int64x1_t __a, int32x2_t __b) |
| { |
| return (int64x1_t) __builtin_aarch64_sadalpv2si (__a[0], __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpadal_u8 (uint16x4_t __a, uint8x8_t __b) |
| { |
| return __builtin_aarch64_uadalpv8qi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpadal_u16 (uint32x2_t __a, uint16x4_t __b) |
| { |
| return __builtin_aarch64_uadalpv4hi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpadal_u32 (uint64x1_t __a, uint32x2_t __b) |
| { |
| return (uint64x1_t) __builtin_aarch64_uadalpv2si_uuu (__a[0], __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpadalq_s8 (int16x8_t __a, int8x16_t __b) |
| { |
| return __builtin_aarch64_sadalpv16qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpadalq_s16 (int32x4_t __a, int16x8_t __b) |
| { |
| return __builtin_aarch64_sadalpv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpadalq_s32 (int64x2_t __a, int32x4_t __b) |
| { |
| return __builtin_aarch64_sadalpv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpadalq_u8 (uint16x8_t __a, uint8x16_t __b) |
| { |
| return __builtin_aarch64_uadalpv16qi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpadalq_u16 (uint32x4_t __a, uint16x8_t __b) |
| { |
| return __builtin_aarch64_uadalpv8hi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpadalq_u32 (uint64x2_t __a, uint32x4_t __b) |
| { |
| return __builtin_aarch64_uadalpv4si_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddl_s8 (int8x8_t __a) |
| { |
| return __builtin_aarch64_saddlpv8qi (__a); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddl_s16 (int16x4_t __a) |
| { |
| return __builtin_aarch64_saddlpv4hi (__a); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddl_s32 (int32x2_t __a) |
| { |
| return (int64x1_t) __builtin_aarch64_saddlpv2si (__a); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddl_u8 (uint8x8_t __a) |
| { |
| return __builtin_aarch64_uaddlpv8qi_uu (__a); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddl_u16 (uint16x4_t __a) |
| { |
| return __builtin_aarch64_uaddlpv4hi_uu (__a); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddl_u32 (uint32x2_t __a) |
| { |
| return (uint64x1_t) __builtin_aarch64_uaddlpv2si_uu (__a); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddlq_s8 (int8x16_t __a) |
| { |
| return __builtin_aarch64_saddlpv16qi (__a); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddlq_s16 (int16x8_t __a) |
| { |
| return __builtin_aarch64_saddlpv8hi (__a); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddlq_s32 (int32x4_t __a) |
| { |
| return __builtin_aarch64_saddlpv4si (__a); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddlq_u8 (uint8x16_t __a) |
| { |
| return __builtin_aarch64_uaddlpv16qi_uu (__a); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddlq_u16 (uint16x8_t __a) |
| { |
| return __builtin_aarch64_uaddlpv8hi_uu (__a); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddlq_u32 (uint32x4_t __a) |
| { |
| return __builtin_aarch64_uaddlpv4si_uu (__a); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return __builtin_aarch64_addpv16qi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return __builtin_aarch64_addpv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return __builtin_aarch64_addpv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddq_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return __builtin_aarch64_addpv2di (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return (uint8x16_t) __builtin_aarch64_addpv16qi ((int8x16_t) __a, |
| (int8x16_t) __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return (uint16x8_t) __builtin_aarch64_addpv8hi ((int16x8_t) __a, |
| (int16x8_t) __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return (uint32x4_t) __builtin_aarch64_addpv4si ((int32x4_t) __a, |
| (int32x4_t) __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vpaddq_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return (uint64x2_t) __builtin_aarch64_addpv2di ((int64x2_t) __a, |
| (int64x2_t) __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqdmulh_n_s16 (int16x4_t __a, int16_t __b) |
| { |
| return __builtin_aarch64_sqdmulh_nv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqdmulh_n_s32 (int32x2_t __a, int32_t __b) |
| { |
| return __builtin_aarch64_sqdmulh_nv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqdmulhq_n_s16 (int16x8_t __a, int16_t __b) |
| { |
| return __builtin_aarch64_sqdmulh_nv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqdmulhq_n_s32 (int32x4_t __a, int32_t __b) |
| { |
| return __builtin_aarch64_sqdmulh_nv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqmovn_high_s16 (int8x8_t __a, int16x8_t __b) |
| { |
| return __builtin_aarch64_sqxtn2v8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqmovn_high_s32 (int16x4_t __a, int32x4_t __b) |
| { |
| return __builtin_aarch64_sqxtn2v4si (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqmovn_high_s64 (int32x2_t __a, int64x2_t __b) |
| { |
| return __builtin_aarch64_sqxtn2v2di (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqmovn_high_u16 (uint8x8_t __a, uint16x8_t __b) |
| { |
| return __builtin_aarch64_uqxtn2v8hi_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqmovn_high_u32 (uint16x4_t __a, uint32x4_t __b) |
| { |
| return __builtin_aarch64_uqxtn2v4si_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqmovn_high_u64 (uint32x2_t __a, uint64x2_t __b) |
| { |
| return __builtin_aarch64_uqxtn2v2di_uuu (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqmovun_high_s16 (uint8x8_t __a, int16x8_t __b) |
| { |
| return __builtin_aarch64_sqxtun2v8hi_uus (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqmovun_high_s32 (uint16x4_t __a, int32x4_t __b) |
| { |
| return __builtin_aarch64_sqxtun2v4si_uus (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqmovun_high_s64 (uint32x2_t __a, int64x2_t __b) |
| { |
| return __builtin_aarch64_sqxtun2v2di_uus (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmulh_n_s16 (int16x4_t __a, int16_t __b) |
| { |
| return __builtin_aarch64_sqrdmulh_nv4hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmulh_n_s32 (int32x2_t __a, int32_t __b) |
| { |
| return __builtin_aarch64_sqrdmulh_nv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmulhq_n_s16 (int16x8_t __a, int16_t __b) |
| { |
| return __builtin_aarch64_sqrdmulh_nv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmulhq_n_s32 (int32x4_t __a, int32_t __b) |
| { |
| return __builtin_aarch64_sqrdmulh_nv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqrshrn2_nv8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqrshrn2_nv4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqrshrn2_nv2di (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_uqrshrn2_nv8hi_uuus (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_uqrshrn2_nv4si_uuus (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c) |
| { |
| return __builtin_aarch64_uqrshrn2_nv2di_uuus (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrshrun_high_n_s16 (uint8x8_t __a, int16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqrshrun2_nv8hi_uuss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrshrun_high_n_s32 (uint16x4_t __a, int32x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqrshrun2_nv4si_uuss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrshrun_high_n_s64 (uint32x2_t __a, int64x2_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqrshrun2_nv2di_uuss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqshrn2_nv8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqshrn2_nv4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqshrn2_nv2di (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_uqshrn2_nv8hi_uuus (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_uqshrn2_nv4si_uuus (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c) |
| { |
| return __builtin_aarch64_uqshrn2_nv2di_uuus (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqshrun_high_n_s16 (uint8x8_t __a, int16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqshrun2_nv8hi_uuss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqshrun_high_n_s32 (uint16x4_t __a, int32x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqshrun2_nv4si_uuss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqshrun_high_n_s64 (uint32x2_t __a, int64x2_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqshrun2_nv2di_uuss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_rshrn2v8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_rshrn2v4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c) |
| { |
| return __builtin_aarch64_rshrn2v2di (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c) |
| { |
| return (uint8x16_t) __builtin_aarch64_rshrn2v8hi ((int8x8_t) __a, |
| (int16x8_t) __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c) |
| { |
| return (uint16x8_t) __builtin_aarch64_rshrn2v4si ((int16x4_t) __a, |
| (int32x4_t) __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c) |
| { |
| return (uint32x4_t) __builtin_aarch64_rshrn2v2di ((int32x2_t)__a, |
| (int64x2_t)__b, __c); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrshrn_n_s16 (int16x8_t __a, const int __b) |
| { |
| return __builtin_aarch64_rshrnv8hi (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrshrn_n_s32 (int32x4_t __a, const int __b) |
| { |
| return __builtin_aarch64_rshrnv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrshrn_n_s64 (int64x2_t __a, const int __b) |
| { |
| return __builtin_aarch64_rshrnv2di (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrshrn_n_u16 (uint16x8_t __a, const int __b) |
| { |
| return (uint8x8_t) __builtin_aarch64_rshrnv8hi ((int16x8_t) __a, __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrshrn_n_u32 (uint32x4_t __a, const int __b) |
| { |
| return (uint16x4_t) __builtin_aarch64_rshrnv4si ((int32x4_t) __a, __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrshrn_n_u64 (uint64x2_t __a, const int __b) |
| { |
| return (uint32x2_t) __builtin_aarch64_rshrnv2di ((int64x2_t) __a, __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrsqrte_u32 (uint32x2_t __a) |
| { |
| return __builtin_aarch64_ursqrtev2si_uu (__a); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vrsqrteq_u32 (uint32x4_t __a) |
| { |
| return __builtin_aarch64_ursqrtev4si_uu (__a); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vshrn_high_n_s16 (int8x8_t __a, int16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_shrn2v8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vshrn_high_n_s32 (int16x4_t __a, int32x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_shrn2v4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vshrn_high_n_s64 (int32x2_t __a, int64x2_t __b, const int __c) |
| { |
| return __builtin_aarch64_shrn2v2di (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vshrn_high_n_u16 (uint8x8_t __a, uint16x8_t __b, const int __c) |
| { |
| return (uint8x16_t) |
| __builtin_aarch64_shrn2v8hi ((int8x8_t) __a, (int16x8_t) __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vshrn_high_n_u32 (uint16x4_t __a, uint32x4_t __b, const int __c) |
| { |
| return (uint16x8_t) |
| __builtin_aarch64_shrn2v4si ((int16x4_t) __a, (int32x4_t) __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vshrn_high_n_u64 (uint32x2_t __a, uint64x2_t __b, const int __c) |
| { |
| return (uint32x4_t) |
| __builtin_aarch64_shrn2v2di ((int32x2_t) __a, (int64x2_t) __b, __c); |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsli_n_p8 (poly8x8_t __a, poly8x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_ssli_nv8qi_ppps (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsli_n_p16 (poly16x4_t __a, poly16x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_ssli_nv4hi_ppps (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsliq_n_p8 (poly8x16_t __a, poly8x16_t __b, const int __c) |
| { |
| return __builtin_aarch64_ssli_nv16qi_ppps (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsliq_n_p16 (poly16x8_t __a, poly16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_ssli_nv8hi_ppps (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsri_n_p8 (poly8x8_t __a, poly8x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_ssri_nv8qi_ppps (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsri_n_p16 (poly16x4_t __a, poly16x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_ssri_nv4hi_ppps (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsri_n_p64 (poly64x1_t __a, poly64x1_t __b, const int __c) |
| { |
| return (poly64x1_t) __builtin_aarch64_ssri_ndi_ppps (__a[0], __b[0], __c); |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsriq_n_p8 (poly8x16_t __a, poly8x16_t __b, const int __c) |
| { |
| return __builtin_aarch64_ssri_nv16qi_ppps (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsriq_n_p16 (poly16x8_t __a, poly16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_ssri_nv8hi_ppps (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vsriq_n_p64 (poly64x2_t __a, poly64x2_t __b, const int __c) |
| { |
| return __builtin_aarch64_ssri_nv2di_ppps (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtst_p8 (poly8x8_t __a, poly8x8_t __b) |
| { |
| return (uint8x8_t) ((((uint8x8_t) __a) & ((uint8x8_t) __b)) |
| != 0); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtst_p16 (poly16x4_t __a, poly16x4_t __b) |
| { |
| return (uint16x4_t) ((((uint16x4_t) __a) & ((uint16x4_t) __b)) |
| != 0); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtst_p64 (poly64x1_t __a, poly64x1_t __b) |
| { |
| return (uint64x1_t) ((__a & __b) != __AARCH64_INT64_C (0)); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtstq_p8 (poly8x16_t __a, poly8x16_t __b) |
| { |
| return (uint8x16_t) ((((uint8x16_t) __a) & ((uint8x16_t) __b)) |
| != 0); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtstq_p16 (poly16x8_t __a, poly16x8_t __b) |
| { |
| return (uint16x8_t) ((((uint16x8_t) __a) & ((uint16x8_t) __b)) |
| != 0); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtstq_p64 (poly64x2_t __a, poly64x2_t __b) |
| { |
| return (uint64x2_t) ((((uint64x2_t) __a) & ((uint64x2_t) __b)) |
| != __AARCH64_INT64_C (0)); |
| } |
| |
| /* End of temporary inline asm implementations. */ |
| |
| /* Start of temporary inline asm for vldn, vstn and friends. */ |
| |
| /* Create struct element types for duplicating loads. |
| |
| Create 2 element structures of: |
| |
| +------+----+----+----+----+ |
| | | 8 | 16 | 32 | 64 | |
| +------+----+----+----+----+ |
| |int | Y | Y | N | N | |
| +------+----+----+----+----+ |
| |uint | Y | Y | N | N | |
| +------+----+----+----+----+ |
| |float | - | Y | N | N | |
| +------+----+----+----+----+ |
| |poly | Y | Y | - | - | |
| +------+----+----+----+----+ |
| |
| Create 3 element structures of: |
| |
| +------+----+----+----+----+ |
| | | 8 | 16 | 32 | 64 | |
| +------+----+----+----+----+ |
| |int | Y | Y | Y | Y | |
| +------+----+----+----+----+ |
| |uint | Y | Y | Y | Y | |
| +------+----+----+----+----+ |
| |float | - | Y | Y | Y | |
| +------+----+----+----+----+ |
| |poly | Y | Y | - | - | |
| +------+----+----+----+----+ |
| |
| Create 4 element structures of: |
| |
| +------+----+----+----+----+ |
| | | 8 | 16 | 32 | 64 | |
| +------+----+----+----+----+ |
| |int | Y | N | N | Y | |
| +------+----+----+----+----+ |
| |uint | Y | N | N | Y | |
| +------+----+----+----+----+ |
| |float | - | N | N | Y | |
| +------+----+----+----+----+ |
| |poly | Y | N | - | - | |
| +------+----+----+----+----+ |
| |
| This is required for casting memory reference. */ |
| #define __STRUCTN(t, sz, nelem) \ |
| typedef struct t ## sz ## x ## nelem ## _t { \ |
| t ## sz ## _t val[nelem]; \ |
| } t ## sz ## x ## nelem ## _t; |
| |
| /* 2-element structs. */ |
| __STRUCTN (int, 8, 2) |
| __STRUCTN (int, 16, 2) |
| __STRUCTN (uint, 8, 2) |
| __STRUCTN (uint, 16, 2) |
| __STRUCTN (float, 16, 2) |
| __STRUCTN (poly, 8, 2) |
| __STRUCTN (poly, 16, 2) |
| /* 3-element structs. */ |
| __STRUCTN (int, 8, 3) |
| __STRUCTN (int, 16, 3) |
| __STRUCTN (int, 32, 3) |
| __STRUCTN (int, 64, 3) |
| __STRUCTN (uint, 8, 3) |
| __STRUCTN (uint, 16, 3) |
| __STRUCTN (uint, 32, 3) |
| __STRUCTN (uint, 64, 3) |
| __STRUCTN (float, 16, 3) |
| __STRUCTN (float, 32, 3) |
| __STRUCTN (float, 64, 3) |
| __STRUCTN (poly, 8, 3) |
| __STRUCTN (poly, 16, 3) |
| /* 4-element structs. */ |
| __STRUCTN (int, 8, 4) |
| __STRUCTN (int, 64, 4) |
| __STRUCTN (uint, 8, 4) |
| __STRUCTN (uint, 64, 4) |
| __STRUCTN (poly, 8, 4) |
| __STRUCTN (float, 64, 4) |
| #undef __STRUCTN |
| |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2_lane_f16 (float16_t *__ptr, float16x4x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| float16x8x2_t __temp; |
| __temp.val[0] = vcombine_f16 (__val.val[0], |
| vcreate_f16 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_f16 (__val.val[1], |
| vcreate_f16 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st2_lanev4hf ((__builtin_aarch64_simd_hf *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2_lane_f32 (float32_t *__ptr, float32x2x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| float32x4x2_t __temp; |
| __temp.val[0] = vcombine_f32 (__val.val[0], |
| vcreate_f32 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_f32 (__val.val[1], |
| vcreate_f32 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st2_lanev2sf ((__builtin_aarch64_simd_sf *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2_lane_f64 (float64_t *__ptr, float64x1x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| float64x2x2_t __temp; |
| __temp.val[0] = vcombine_f64 (__val.val[0], |
| vcreate_f64 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_f64 (__val.val[1], |
| vcreate_f64 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st2_lanedf ((__builtin_aarch64_simd_df *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2_lane_p8 (poly8_t *__ptr, poly8x8x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| poly8x16x2_t __temp; |
| __temp.val[0] = vcombine_p8 (__val.val[0], |
| vcreate_p8 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_p8 (__val.val[1], |
| vcreate_p8 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st2_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2_lane_p16 (poly16_t *__ptr, poly16x4x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| poly16x8x2_t __temp; |
| __temp.val[0] = vcombine_p16 (__val.val[0], |
| vcreate_p16 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_p16 (__val.val[1], |
| vcreate_p16 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st2_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2_lane_p64 (poly64_t *__ptr, poly64x1x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| poly64x2x2_t __temp; |
| __temp.val[0] = vcombine_p64 (__val.val[0], |
| vcreate_p64 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_p64 (__val.val[1], |
| vcreate_p64 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st2_lanedi ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2_lane_s8 (int8_t *__ptr, int8x8x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| int8x16x2_t __temp; |
| __temp.val[0] = vcombine_s8 (__val.val[0], |
| vcreate_s8 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_s8 (__val.val[1], |
| vcreate_s8 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st2_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2_lane_s16 (int16_t *__ptr, int16x4x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| int16x8x2_t __temp; |
| __temp.val[0] = vcombine_s16 (__val.val[0], |
| vcreate_s16 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_s16 (__val.val[1], |
| vcreate_s16 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st2_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2_lane_s32 (int32_t *__ptr, int32x2x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| int32x4x2_t __temp; |
| __temp.val[0] = vcombine_s32 (__val.val[0], |
| vcreate_s32 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_s32 (__val.val[1], |
| vcreate_s32 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st2_lanev2si ((__builtin_aarch64_simd_si *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2_lane_s64 (int64_t *__ptr, int64x1x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| int64x2x2_t __temp; |
| __temp.val[0] = vcombine_s64 (__val.val[0], |
| vcreate_s64 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_s64 (__val.val[1], |
| vcreate_s64 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st2_lanedi ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2_lane_u8 (uint8_t *__ptr, uint8x8x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| uint8x16x2_t __temp; |
| __temp.val[0] = vcombine_u8 (__val.val[0], |
| vcreate_u8 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_u8 (__val.val[1], |
| vcreate_u8 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st2_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2_lane_u16 (uint16_t *__ptr, uint16x4x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| uint16x8x2_t __temp; |
| __temp.val[0] = vcombine_u16 (__val.val[0], |
| vcreate_u16 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_u16 (__val.val[1], |
| vcreate_u16 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st2_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2_lane_u32 (uint32_t *__ptr, uint32x2x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| uint32x4x2_t __temp; |
| __temp.val[0] = vcombine_u32 (__val.val[0], |
| vcreate_u32 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_u32 (__val.val[1], |
| vcreate_u32 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st2_lanev2si ((__builtin_aarch64_simd_si *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2_lane_u64 (uint64_t *__ptr, uint64x1x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| uint64x2x2_t __temp; |
| __temp.val[0] = vcombine_u64 (__val.val[0], |
| vcreate_u64 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_u64 (__val.val[1], |
| vcreate_u64 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st2_lanedi ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2q_lane_f16 (float16_t *__ptr, float16x8x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st2_lanev8hf ((__builtin_aarch64_simd_hf *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2q_lane_f32 (float32_t *__ptr, float32x4x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st2_lanev4sf ((__builtin_aarch64_simd_sf *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2q_lane_f64 (float64_t *__ptr, float64x2x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st2_lanev2df ((__builtin_aarch64_simd_df *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2q_lane_p8 (poly8_t *__ptr, poly8x16x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st2_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2q_lane_p16 (poly16_t *__ptr, poly16x8x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st2_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2q_lane_p64 (poly64_t *__ptr, poly64x2x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st2_lanev2di ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2q_lane_s8 (int8_t *__ptr, int8x16x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st2_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2q_lane_s16 (int16_t *__ptr, int16x8x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st2_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2q_lane_s32 (int32_t *__ptr, int32x4x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st2_lanev4si ((__builtin_aarch64_simd_si *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2q_lane_s64 (int64_t *__ptr, int64x2x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st2_lanev2di ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2q_lane_u8 (uint8_t *__ptr, uint8x16x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st2_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2q_lane_u16 (uint16_t *__ptr, uint16x8x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st2_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2q_lane_u32 (uint32_t *__ptr, uint32x4x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st2_lanev4si ((__builtin_aarch64_simd_si *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst2q_lane_u64 (uint64_t *__ptr, uint64x2x2_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_oi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st2_lanev2di ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3_lane_f16 (float16_t *__ptr, float16x4x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| float16x8x3_t __temp; |
| __temp.val[0] = vcombine_f16 (__val.val[0], |
| vcreate_f16 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_f16 (__val.val[1], |
| vcreate_f16 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_f16 (__val.val[2], |
| vcreate_f16 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st3_lanev4hf ((__builtin_aarch64_simd_hf *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3_lane_f32 (float32_t *__ptr, float32x2x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| float32x4x3_t __temp; |
| __temp.val[0] = vcombine_f32 (__val.val[0], |
| vcreate_f32 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_f32 (__val.val[1], |
| vcreate_f32 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_f32 (__val.val[2], |
| vcreate_f32 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st3_lanev2sf ((__builtin_aarch64_simd_sf *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3_lane_f64 (float64_t *__ptr, float64x1x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| float64x2x3_t __temp; |
| __temp.val[0] = vcombine_f64 (__val.val[0], |
| vcreate_f64 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_f64 (__val.val[1], |
| vcreate_f64 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_f64 (__val.val[2], |
| vcreate_f64 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st3_lanedf ((__builtin_aarch64_simd_df *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3_lane_p8 (poly8_t *__ptr, poly8x8x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| poly8x16x3_t __temp; |
| __temp.val[0] = vcombine_p8 (__val.val[0], |
| vcreate_p8 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_p8 (__val.val[1], |
| vcreate_p8 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_p8 (__val.val[2], |
| vcreate_p8 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st3_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3_lane_p16 (poly16_t *__ptr, poly16x4x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| poly16x8x3_t __temp; |
| __temp.val[0] = vcombine_p16 (__val.val[0], |
| vcreate_p16 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_p16 (__val.val[1], |
| vcreate_p16 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_p16 (__val.val[2], |
| vcreate_p16 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st3_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3_lane_p64 (poly64_t *__ptr, poly64x1x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| poly64x2x3_t __temp; |
| __temp.val[0] = vcombine_p64 (__val.val[0], |
| vcreate_p64 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_p64 (__val.val[1], |
| vcreate_p64 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_p64 (__val.val[2], |
| vcreate_p64 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st3_lanedi ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3_lane_s8 (int8_t *__ptr, int8x8x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| int8x16x3_t __temp; |
| __temp.val[0] = vcombine_s8 (__val.val[0], |
| vcreate_s8 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_s8 (__val.val[1], |
| vcreate_s8 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_s8 (__val.val[2], |
| vcreate_s8 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st3_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3_lane_s16 (int16_t *__ptr, int16x4x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| int16x8x3_t __temp; |
| __temp.val[0] = vcombine_s16 (__val.val[0], |
| vcreate_s16 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_s16 (__val.val[1], |
| vcreate_s16 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_s16 (__val.val[2], |
| vcreate_s16 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st3_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3_lane_s32 (int32_t *__ptr, int32x2x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| int32x4x3_t __temp; |
| __temp.val[0] = vcombine_s32 (__val.val[0], |
| vcreate_s32 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_s32 (__val.val[1], |
| vcreate_s32 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_s32 (__val.val[2], |
| vcreate_s32 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st3_lanev2si ((__builtin_aarch64_simd_si *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3_lane_s64 (int64_t *__ptr, int64x1x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| int64x2x3_t __temp; |
| __temp.val[0] = vcombine_s64 (__val.val[0], |
| vcreate_s64 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_s64 (__val.val[1], |
| vcreate_s64 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_s64 (__val.val[2], |
| vcreate_s64 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st3_lanedi ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3_lane_u8 (uint8_t *__ptr, uint8x8x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| uint8x16x3_t __temp; |
| __temp.val[0] = vcombine_u8 (__val.val[0], |
| vcreate_u8 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_u8 (__val.val[1], |
| vcreate_u8 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_u8 (__val.val[2], |
| vcreate_u8 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st3_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3_lane_u16 (uint16_t *__ptr, uint16x4x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| uint16x8x3_t __temp; |
| __temp.val[0] = vcombine_u16 (__val.val[0], |
| vcreate_u16 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_u16 (__val.val[1], |
| vcreate_u16 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_u16 (__val.val[2], |
| vcreate_u16 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st3_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3_lane_u32 (uint32_t *__ptr, uint32x2x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| uint32x4x3_t __temp; |
| __temp.val[0] = vcombine_u32 (__val.val[0], |
| vcreate_u32 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_u32 (__val.val[1], |
| vcreate_u32 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_u32 (__val.val[2], |
| vcreate_u32 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st3_lanev2si ((__builtin_aarch64_simd_si *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3_lane_u64 (uint64_t *__ptr, uint64x1x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| uint64x2x3_t __temp; |
| __temp.val[0] = vcombine_u64 (__val.val[0], |
| vcreate_u64 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_u64 (__val.val[1], |
| vcreate_u64 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_u64 (__val.val[2], |
| vcreate_u64 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st3_lanedi ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3q_lane_f16 (float16_t *__ptr, float16x8x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st3_lanev8hf ((__builtin_aarch64_simd_hf *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3q_lane_f32 (float32_t *__ptr, float32x4x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st3_lanev4sf ((__builtin_aarch64_simd_sf *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3q_lane_f64 (float64_t *__ptr, float64x2x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st3_lanev2df ((__builtin_aarch64_simd_df *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3q_lane_p8 (poly8_t *__ptr, poly8x16x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st3_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3q_lane_p16 (poly16_t *__ptr, poly16x8x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st3_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3q_lane_p64 (poly64_t *__ptr, poly64x2x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st3_lanev2di ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3q_lane_s8 (int8_t *__ptr, int8x16x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st3_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3q_lane_s16 (int16_t *__ptr, int16x8x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st3_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3q_lane_s32 (int32_t *__ptr, int32x4x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st3_lanev4si ((__builtin_aarch64_simd_si *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3q_lane_s64 (int64_t *__ptr, int64x2x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st3_lanev2di ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3q_lane_u8 (uint8_t *__ptr, uint8x16x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st3_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3q_lane_u16 (uint16_t *__ptr, uint16x8x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st3_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3q_lane_u32 (uint32_t *__ptr, uint32x4x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st3_lanev4si ((__builtin_aarch64_simd_si *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst3q_lane_u64 (uint64_t *__ptr, uint64x2x3_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_ci __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st3_lanev2di ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4_lane_f16 (float16_t *__ptr, float16x4x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| float16x8x4_t __temp; |
| __temp.val[0] = vcombine_f16 (__val.val[0], |
| vcreate_f16 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_f16 (__val.val[1], |
| vcreate_f16 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_f16 (__val.val[2], |
| vcreate_f16 (__AARCH64_UINT64_C (0))); |
| __temp.val[3] = vcombine_f16 (__val.val[3], |
| vcreate_f16 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st4_lanev4hf ((__builtin_aarch64_simd_hf *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4_lane_f32 (float32_t *__ptr, float32x2x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| float32x4x4_t __temp; |
| __temp.val[0] = vcombine_f32 (__val.val[0], |
| vcreate_f32 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_f32 (__val.val[1], |
| vcreate_f32 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_f32 (__val.val[2], |
| vcreate_f32 (__AARCH64_UINT64_C (0))); |
| __temp.val[3] = vcombine_f32 (__val.val[3], |
| vcreate_f32 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st4_lanev2sf ((__builtin_aarch64_simd_sf *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4_lane_f64 (float64_t *__ptr, float64x1x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| float64x2x4_t __temp; |
| __temp.val[0] = vcombine_f64 (__val.val[0], |
| vcreate_f64 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_f64 (__val.val[1], |
| vcreate_f64 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_f64 (__val.val[2], |
| vcreate_f64 (__AARCH64_UINT64_C (0))); |
| __temp.val[3] = vcombine_f64 (__val.val[3], |
| vcreate_f64 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st4_lanedf ((__builtin_aarch64_simd_df *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4_lane_p8 (poly8_t *__ptr, poly8x8x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| poly8x16x4_t __temp; |
| __temp.val[0] = vcombine_p8 (__val.val[0], |
| vcreate_p8 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_p8 (__val.val[1], |
| vcreate_p8 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_p8 (__val.val[2], |
| vcreate_p8 (__AARCH64_UINT64_C (0))); |
| __temp.val[3] = vcombine_p8 (__val.val[3], |
| vcreate_p8 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st4_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4_lane_p16 (poly16_t *__ptr, poly16x4x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| poly16x8x4_t __temp; |
| __temp.val[0] = vcombine_p16 (__val.val[0], |
| vcreate_p16 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_p16 (__val.val[1], |
| vcreate_p16 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_p16 (__val.val[2], |
| vcreate_p16 (__AARCH64_UINT64_C (0))); |
| __temp.val[3] = vcombine_p16 (__val.val[3], |
| vcreate_p16 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st4_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4_lane_p64 (poly64_t *__ptr, poly64x1x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| poly64x2x4_t __temp; |
| __temp.val[0] = vcombine_p64 (__val.val[0], |
| vcreate_p64 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_p64 (__val.val[1], |
| vcreate_p64 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_p64 (__val.val[2], |
| vcreate_p64 (__AARCH64_UINT64_C (0))); |
| __temp.val[3] = vcombine_p64 (__val.val[3], |
| vcreate_p64 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st4_lanedi ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4_lane_s8 (int8_t *__ptr, int8x8x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| int8x16x4_t __temp; |
| __temp.val[0] = vcombine_s8 (__val.val[0], |
| vcreate_s8 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_s8 (__val.val[1], |
| vcreate_s8 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_s8 (__val.val[2], |
| vcreate_s8 (__AARCH64_UINT64_C (0))); |
| __temp.val[3] = vcombine_s8 (__val.val[3], |
| vcreate_s8 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st4_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4_lane_s16 (int16_t *__ptr, int16x4x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| int16x8x4_t __temp; |
| __temp.val[0] = vcombine_s16 (__val.val[0], |
| vcreate_s16 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_s16 (__val.val[1], |
| vcreate_s16 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_s16 (__val.val[2], |
| vcreate_s16 (__AARCH64_UINT64_C (0))); |
| __temp.val[3] = vcombine_s16 (__val.val[3], |
| vcreate_s16 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st4_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4_lane_s32 (int32_t *__ptr, int32x2x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| int32x4x4_t __temp; |
| __temp.val[0] = vcombine_s32 (__val.val[0], |
| vcreate_s32 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_s32 (__val.val[1], |
| vcreate_s32 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_s32 (__val.val[2], |
| vcreate_s32 (__AARCH64_UINT64_C (0))); |
| __temp.val[3] = vcombine_s32 (__val.val[3], |
| vcreate_s32 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st4_lanev2si ((__builtin_aarch64_simd_si *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4_lane_s64 (int64_t *__ptr, int64x1x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| int64x2x4_t __temp; |
| __temp.val[0] = vcombine_s64 (__val.val[0], |
| vcreate_s64 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_s64 (__val.val[1], |
| vcreate_s64 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_s64 (__val.val[2], |
| vcreate_s64 (__AARCH64_UINT64_C (0))); |
| __temp.val[3] = vcombine_s64 (__val.val[3], |
| vcreate_s64 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st4_lanedi ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4_lane_u8 (uint8_t *__ptr, uint8x8x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| uint8x16x4_t __temp; |
| __temp.val[0] = vcombine_u8 (__val.val[0], |
| vcreate_u8 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_u8 (__val.val[1], |
| vcreate_u8 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_u8 (__val.val[2], |
| vcreate_u8 (__AARCH64_UINT64_C (0))); |
| __temp.val[3] = vcombine_u8 (__val.val[3], |
| vcreate_u8 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st4_lanev8qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4_lane_u16 (uint16_t *__ptr, uint16x4x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| uint16x8x4_t __temp; |
| __temp.val[0] = vcombine_u16 (__val.val[0], |
| vcreate_u16 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_u16 (__val.val[1], |
| vcreate_u16 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_u16 (__val.val[2], |
| vcreate_u16 (__AARCH64_UINT64_C (0))); |
| __temp.val[3] = vcombine_u16 (__val.val[3], |
| vcreate_u16 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st4_lanev4hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4_lane_u32 (uint32_t *__ptr, uint32x2x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| uint32x4x4_t __temp; |
| __temp.val[0] = vcombine_u32 (__val.val[0], |
| vcreate_u32 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_u32 (__val.val[1], |
| vcreate_u32 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_u32 (__val.val[2], |
| vcreate_u32 (__AARCH64_UINT64_C (0))); |
| __temp.val[3] = vcombine_u32 (__val.val[3], |
| vcreate_u32 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st4_lanev2si ((__builtin_aarch64_simd_si *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4_lane_u64 (uint64_t *__ptr, uint64x1x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| uint64x2x4_t __temp; |
| __temp.val[0] = vcombine_u64 (__val.val[0], |
| vcreate_u64 (__AARCH64_UINT64_C (0))); |
| __temp.val[1] = vcombine_u64 (__val.val[1], |
| vcreate_u64 (__AARCH64_UINT64_C (0))); |
| __temp.val[2] = vcombine_u64 (__val.val[2], |
| vcreate_u64 (__AARCH64_UINT64_C (0))); |
| __temp.val[3] = vcombine_u64 (__val.val[3], |
| vcreate_u64 (__AARCH64_UINT64_C (0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| __builtin_aarch64_st4_lanedi ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4q_lane_f16 (float16_t *__ptr, float16x8x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st4_lanev8hf ((__builtin_aarch64_simd_hf *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4q_lane_f32 (float32_t *__ptr, float32x4x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st4_lanev4sf ((__builtin_aarch64_simd_sf *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4q_lane_f64 (float64_t *__ptr, float64x2x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st4_lanev2df ((__builtin_aarch64_simd_df *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4q_lane_p8 (poly8_t *__ptr, poly8x16x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st4_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4q_lane_p16 (poly16_t *__ptr, poly16x8x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st4_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4q_lane_p64 (poly64_t *__ptr, poly64x2x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st4_lanev2di ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4q_lane_s8 (int8_t *__ptr, int8x16x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st4_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4q_lane_s16 (int16_t *__ptr, int16x8x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st4_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4q_lane_s32 (int32_t *__ptr, int32x4x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st4_lanev4si ((__builtin_aarch64_simd_si *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4q_lane_s64 (int64_t *__ptr, int64x2x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st4_lanev2di ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4q_lane_u8 (uint8_t *__ptr, uint8x16x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st4_lanev16qi ((__builtin_aarch64_simd_qi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4q_lane_u16 (uint16_t *__ptr, uint16x8x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st4_lanev8hi ((__builtin_aarch64_simd_hi *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4q_lane_u32 (uint32_t *__ptr, uint32x4x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st4_lanev4si ((__builtin_aarch64_simd_si *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline void |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vst4q_lane_u64 (uint64_t *__ptr, uint64x2x4_t __val, const int __lane) |
| { |
| __builtin_aarch64_simd_xi __o; |
| __builtin_memcpy (&__o, &__val, sizeof (__val)); |
| __builtin_aarch64_st4_lanev2di ((__builtin_aarch64_simd_di *) __ptr, __o, |
| __lane); |
| } |
| |
| __extension__ extern __inline int64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddlv_s32 (int32x2_t __a) |
| { |
| return __builtin_aarch64_saddlvv2si (__a); |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddlv_u32 (uint32x2_t __a) |
| { |
| return __builtin_aarch64_uaddlvv2si_uu (__a); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqdmulh_laneqv4hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqdmulh_laneqv2si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqdmulh_laneqv8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqdmulh_laneqv4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmulh_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqrdmulh_laneqv4hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmulh_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqrdmulh_laneqv2si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmulhq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqrdmulh_laneqv8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmulhq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __c) |
| { |
| return __builtin_aarch64_sqrdmulh_laneqv4si (__a, __b, __c); |
| } |
| |
| /* Table intrinsics. */ |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqtbl1_p8 (poly8x16_t __tab, uint8x8_t __idx) |
| { |
| return __builtin_aarch64_qtbl1v8qi_ppu (__tab, __idx); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqtbl1_s8 (int8x16_t __tab, uint8x8_t __idx) |
| { |
| return __builtin_aarch64_qtbl1v8qi_ssu (__tab, __idx); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqtbl1_u8 (uint8x16_t __tab, uint8x8_t __idx) |
| { |
| return __builtin_aarch64_qtbl1v8qi_uuu (__tab, __idx); |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqtbl1q_p8 (poly8x16_t __tab, uint8x16_t __idx) |
| { |
| return __builtin_aarch64_qtbl1v16qi_ppu (__tab, __idx); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqtbl1q_s8 (int8x16_t __tab, uint8x16_t __idx) |
| { |
| return __builtin_aarch64_qtbl1v16qi_ssu (__tab, __idx); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqtbl1q_u8 (uint8x16_t __tab, uint8x16_t __idx) |
| { |
| return __builtin_aarch64_qtbl1v16qi_uuu (__tab, __idx); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqtbx1_s8 (int8x8_t __r, int8x16_t __tab, uint8x8_t __idx) |
| { |
| return __builtin_aarch64_qtbx1v8qi_sssu (__r, __tab, __idx); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqtbx1_u8 (uint8x8_t __r, uint8x16_t __tab, uint8x8_t __idx) |
| { |
| return __builtin_aarch64_qtbx1v8qi_uuuu (__r, __tab, __idx); |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqtbx1_p8 (poly8x8_t __r, poly8x16_t __tab, uint8x8_t __idx) |
| { |
| return __builtin_aarch64_qtbx1v8qi_pppu (__r, __tab, __idx); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqtbx1q_s8 (int8x16_t __r, int8x16_t __tab, uint8x16_t __idx) |
| { |
| return __builtin_aarch64_qtbx1v16qi_sssu (__r, __tab, __idx); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqtbx1q_u8 (uint8x16_t __r, uint8x16_t __tab, uint8x16_t __idx) |
| { |
| return __builtin_aarch64_qtbx1v16qi_uuuu (__r, __tab, __idx); |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqtbx1q_p8 (poly8x16_t __r, poly8x16_t __tab, uint8x16_t __idx) |
| { |
| return __builtin_aarch64_qtbx1v16qi_pppu (__r, __tab, __idx); |
| } |
| |
| /* V7 legacy table intrinsics. */ |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtbl1_s8 (int8x8_t __tab, int8x8_t __idx) |
| { |
| int8x16_t __temp = vcombine_s8 (__tab, |
| vcreate_s8 (__AARCH64_UINT64_C (0x0))); |
| return __builtin_aarch64_qtbl1v8qi (__temp, __idx); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtbl1_u8 (uint8x8_t __tab, uint8x8_t __idx) |
| { |
| uint8x16_t __temp = vcombine_u8 (__tab, |
| vcreate_u8 (__AARCH64_UINT64_C (0x0))); |
| return __builtin_aarch64_qtbl1v8qi_uuu (__temp, __idx); |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtbl1_p8 (poly8x8_t __tab, uint8x8_t __idx) |
| { |
| poly8x16_t __temp = vcombine_p8 (__tab, |
| vcreate_p8 (__AARCH64_UINT64_C (0x0))); |
| return __builtin_aarch64_qtbl1v8qi_ppu (__temp, __idx); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtbl2_s8 (int8x8x2_t __tab, int8x8_t __idx) |
| { |
| int8x16_t __temp = vcombine_s8 (__tab.val[0], __tab.val[1]); |
| return __builtin_aarch64_qtbl1v8qi (__temp, __idx); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtbl2_u8 (uint8x8x2_t __tab, uint8x8_t __idx) |
| { |
| uint8x16_t __temp = vcombine_u8 (__tab.val[0], __tab.val[1]); |
| return __builtin_aarch64_qtbl1v8qi_uuu (__temp, __idx); |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtbl2_p8 (poly8x8x2_t __tab, uint8x8_t __idx) |
| { |
| poly8x16_t __temp = vcombine_p8 (__tab.val[0], __tab.val[1]); |
| return __builtin_aarch64_qtbl1v8qi_ppu (__temp, __idx); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtbl3_s8 (int8x8x3_t __tab, int8x8_t __idx) |
| { |
| int8x16x2_t __temp; |
| __builtin_aarch64_simd_oi __o; |
| __temp.val[0] = vcombine_s8 (__tab.val[0], __tab.val[1]); |
| __temp.val[1] = vcombine_s8 (__tab.val[2], |
| vcreate_s8 (__AARCH64_UINT64_C (0x0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| return __builtin_aarch64_qtbl2v8qi (__o, __idx); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtbl3_u8 (uint8x8x3_t __tab, uint8x8_t __idx) |
| { |
| uint8x16x2_t __temp; |
| __builtin_aarch64_simd_oi __o; |
| __temp.val[0] = vcombine_u8 (__tab.val[0], __tab.val[1]); |
| __temp.val[1] = vcombine_u8 (__tab.val[2], |
| vcreate_u8 (__AARCH64_UINT64_C (0x0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| return (uint8x8_t)__builtin_aarch64_qtbl2v8qi (__o, (int8x8_t)__idx); |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtbl3_p8 (poly8x8x3_t __tab, uint8x8_t __idx) |
| { |
| poly8x16x2_t __temp; |
| __builtin_aarch64_simd_oi __o; |
| __temp.val[0] = vcombine_p8 (__tab.val[0], __tab.val[1]); |
| __temp.val[1] = vcombine_p8 (__tab.val[2], |
| vcreate_p8 (__AARCH64_UINT64_C (0x0))); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| return (poly8x8_t)__builtin_aarch64_qtbl2v8qi (__o, (int8x8_t)__idx); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtbl4_s8 (int8x8x4_t __tab, int8x8_t __idx) |
| { |
| int8x16x2_t __temp; |
| __builtin_aarch64_simd_oi __o; |
| __temp.val[0] = vcombine_s8 (__tab.val[0], __tab.val[1]); |
| __temp.val[1] = vcombine_s8 (__tab.val[2], __tab.val[3]); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| return __builtin_aarch64_qtbl2v8qi (__o, __idx); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtbl4_u8 (uint8x8x4_t __tab, uint8x8_t __idx) |
| { |
| uint8x16x2_t __temp; |
| __builtin_aarch64_simd_oi __o; |
| __temp.val[0] = vcombine_u8 (__tab.val[0], __tab.val[1]); |
| __temp.val[1] = vcombine_u8 (__tab.val[2], __tab.val[3]); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| return (uint8x8_t)__builtin_aarch64_qtbl2v8qi (__o, (int8x8_t)__idx); |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtbl4_p8 (poly8x8x4_t __tab, uint8x8_t __idx) |
| { |
| poly8x16x2_t __temp; |
| __builtin_aarch64_simd_oi __o; |
| __temp.val[0] = vcombine_p8 (__tab.val[0], __tab.val[1]); |
| __temp.val[1] = vcombine_p8 (__tab.val[2], __tab.val[3]); |
| __builtin_memcpy (&__o, &__temp, sizeof (__temp)); |
| return(poly8x8_t)__builtin_aarch64_qtbl2v8qi (__o, (int8x8_t)__idx); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtbx2_s8 (int8x8_t __r, int8x8x2_t __tab, int8x8_t __idx) |
| { |
| int8x16_t __temp = vcombine_s8 (__tab.val[0], __tab.val[1]); |
| return __builtin_aarch64_qtbx1v8qi (__r, __temp, __idx); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtbx2_u8 (uint8x8_t __r, uint8x8x2_t __tab, uint8x8_t __idx) |
| { |
| uint8x16_t __temp = vcombine_u8 (__tab.val[0], __tab.val[1]); |
| return __builtin_aarch64_qtbx1v8qi_uuuu (__r, __temp, __idx); |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vtbx2_p8 (poly8x8_t __r, poly8x8x2_t __tab, uint8x8_t __idx) |
| { |
| poly8x16_t __temp = vcombine_p8 (__tab.val[0], __tab.val[1]); |
| return __builtin_aarch64_qtbx1v8qi_pppu (__r, __temp, __idx); |
| } |
| |
| /* End of temporary inline asm. */ |
| |
| /* Start of optimal implementations in approved order. */ |
| |
| /* vabd. */ |
| |
| __extension__ extern __inline float32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabds_f32 (float32_t __a, float32_t __b) |
| { |
| return __builtin_aarch64_fabdsf (__a, __b); |
| } |
| |
| __extension__ extern __inline float64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdd_f64 (float64_t __a, float64_t __b) |
| { |
| return __builtin_aarch64_fabddf (__a, __b); |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabd_f32 (float32x2_t __a, float32x2_t __b) |
| { |
| return __builtin_aarch64_fabdv2sf (__a, __b); |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabd_f64 (float64x1_t __a, float64x1_t __b) |
| { |
| return (float64x1_t) {vabdd_f64 (vget_lane_f64 (__a, 0), |
| vget_lane_f64 (__b, 0))}; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdq_f32 (float32x4_t __a, float32x4_t __b) |
| { |
| return __builtin_aarch64_fabdv4sf (__a, __b); |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabdq_f64 (float64x2_t __a, float64x2_t __b) |
| { |
| return __builtin_aarch64_fabdv2df (__a, __b); |
| } |
| |
| /* vabs */ |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabs_f32 (float32x2_t __a) |
| { |
| return __builtin_aarch64_absv2sf (__a); |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabs_f64 (float64x1_t __a) |
| { |
| return (float64x1_t) {__builtin_fabs (__a[0])}; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabs_s8 (int8x8_t __a) |
| { |
| return __builtin_aarch64_absv8qi (__a); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabs_s16 (int16x4_t __a) |
| { |
| return __builtin_aarch64_absv4hi (__a); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabs_s32 (int32x2_t __a) |
| { |
| return __builtin_aarch64_absv2si (__a); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabs_s64 (int64x1_t __a) |
| { |
| return (int64x1_t) {__builtin_aarch64_absdi (__a[0])}; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabsq_f32 (float32x4_t __a) |
| { |
| return __builtin_aarch64_absv4sf (__a); |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabsq_f64 (float64x2_t __a) |
| { |
| return __builtin_aarch64_absv2df (__a); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabsq_s8 (int8x16_t __a) |
| { |
| return __builtin_aarch64_absv16qi (__a); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabsq_s16 (int16x8_t __a) |
| { |
| return __builtin_aarch64_absv8hi (__a); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabsq_s32 (int32x4_t __a) |
| { |
| return __builtin_aarch64_absv4si (__a); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabsq_s64 (int64x2_t __a) |
| { |
| return __builtin_aarch64_absv2di (__a); |
| } |
| |
| /* Try to avoid moving between integer and vector registers. |
| For why the cast to unsigned is needed check the vnegd_s64 intrinsic. |
| There is a testcase related to this issue: |
| gcc.target/aarch64/vabsd_s64.c. */ |
| |
| __extension__ extern __inline int64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vabsd_s64 (int64_t __a) |
| { |
| return __a < 0 ? - (uint64_t) __a : __a; |
| } |
| |
| /* vadd */ |
| |
| __extension__ extern __inline int64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddd_s64 (int64_t __a, int64_t __b) |
| { |
| return __a + __b; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddd_u64 (uint64_t __a, uint64_t __b) |
| { |
| return __a + __b; |
| } |
| |
| /* vaddv */ |
| |
| __extension__ extern __inline int8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddv_s8 (int8x8_t __a) |
| { |
| return __builtin_aarch64_reduc_plus_scal_v8qi (__a); |
| } |
| |
| __extension__ extern __inline int16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddv_s16 (int16x4_t __a) |
| { |
| return __builtin_aarch64_reduc_plus_scal_v4hi (__a); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddv_s32 (int32x2_t __a) |
| { |
| return __builtin_aarch64_reduc_plus_scal_v2si (__a); |
| } |
| |
| __extension__ extern __inline uint8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddv_u8 (uint8x8_t __a) |
| { |
| return (uint8_t) __builtin_aarch64_reduc_plus_scal_v8qi ((int8x8_t) __a); |
| } |
| |
| __extension__ extern __inline uint16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddv_u16 (uint16x4_t __a) |
| { |
| return (uint16_t) __builtin_aarch64_reduc_plus_scal_v4hi ((int16x4_t) __a); |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddv_u32 (uint32x2_t __a) |
| { |
| return (int32_t) __builtin_aarch64_reduc_plus_scal_v2si ((int32x2_t) __a); |
| } |
| |
| __extension__ extern __inline int8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddvq_s8 (int8x16_t __a) |
| { |
| return __builtin_aarch64_reduc_plus_scal_v16qi (__a); |
| } |
| |
| __extension__ extern __inline int16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddvq_s16 (int16x8_t __a) |
| { |
| return __builtin_aarch64_reduc_plus_scal_v8hi (__a); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddvq_s32 (int32x4_t __a) |
| { |
| return __builtin_aarch64_reduc_plus_scal_v4si (__a); |
| } |
| |
| __extension__ extern __inline int64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddvq_s64 (int64x2_t __a) |
| { |
| return __builtin_aarch64_reduc_plus_scal_v2di (__a); |
| } |
| |
| __extension__ extern __inline uint8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddvq_u8 (uint8x16_t __a) |
| { |
| return (uint8_t) __builtin_aarch64_reduc_plus_scal_v16qi ((int8x16_t) __a); |
| } |
| |
| __extension__ extern __inline uint16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddvq_u16 (uint16x8_t __a) |
| { |
| return (uint16_t) __builtin_aarch64_reduc_plus_scal_v8hi ((int16x8_t) __a); |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddvq_u32 (uint32x4_t __a) |
| { |
| return (uint32_t) __builtin_aarch64_reduc_plus_scal_v4si ((int32x4_t) __a); |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddvq_u64 (uint64x2_t __a) |
| { |
| return (uint64_t) __builtin_aarch64_reduc_plus_scal_v2di ((int64x2_t) __a); |
| } |
| |
| __extension__ extern __inline float32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddv_f32 (float32x2_t __a) |
| { |
| return __builtin_aarch64_reduc_plus_scal_v2sf (__a); |
| } |
| |
| __extension__ extern __inline float32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddvq_f32 (float32x4_t __a) |
| { |
| return __builtin_aarch64_reduc_plus_scal_v4sf (__a); |
| } |
| |
| __extension__ extern __inline float64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaddvq_f64 (float64x2_t __a) |
| { |
| return __builtin_aarch64_reduc_plus_scal_v2df (__a); |
| } |
| |
| /* vbsl */ |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbsl_f16 (uint16x4_t __a, float16x4_t __b, float16x4_t __c) |
| { |
| return __builtin_aarch64_simd_bslv4hf_suss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbsl_f32 (uint32x2_t __a, float32x2_t __b, float32x2_t __c) |
| { |
| return __builtin_aarch64_simd_bslv2sf_suss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbsl_f64 (uint64x1_t __a, float64x1_t __b, float64x1_t __c) |
| { |
| return (float64x1_t) |
| { __builtin_aarch64_simd_bsldf_suss (__a[0], __b[0], __c[0]) }; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbsl_p8 (uint8x8_t __a, poly8x8_t __b, poly8x8_t __c) |
| { |
| return __builtin_aarch64_simd_bslv8qi_pupp (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbsl_p16 (uint16x4_t __a, poly16x4_t __b, poly16x4_t __c) |
| { |
| return __builtin_aarch64_simd_bslv4hi_pupp (__a, __b, __c); |
| } |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbsl_p64 (uint64x1_t __a, poly64x1_t __b, poly64x1_t __c) |
| { |
| return (poly64x1_t) |
| {__builtin_aarch64_simd_bsldi_pupp (__a[0], __b[0], __c[0])}; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbsl_s8 (uint8x8_t __a, int8x8_t __b, int8x8_t __c) |
| { |
| return __builtin_aarch64_simd_bslv8qi_suss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbsl_s16 (uint16x4_t __a, int16x4_t __b, int16x4_t __c) |
| { |
| return __builtin_aarch64_simd_bslv4hi_suss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbsl_s32 (uint32x2_t __a, int32x2_t __b, int32x2_t __c) |
| { |
| return __builtin_aarch64_simd_bslv2si_suss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbsl_s64 (uint64x1_t __a, int64x1_t __b, int64x1_t __c) |
| { |
| return (int64x1_t) |
| {__builtin_aarch64_simd_bsldi_suss (__a[0], __b[0], __c[0])}; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbsl_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c) |
| { |
| return __builtin_aarch64_simd_bslv8qi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbsl_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c) |
| { |
| return __builtin_aarch64_simd_bslv4hi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbsl_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c) |
| { |
| return __builtin_aarch64_simd_bslv2si_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbsl_u64 (uint64x1_t __a, uint64x1_t __b, uint64x1_t __c) |
| { |
| return (uint64x1_t) |
| {__builtin_aarch64_simd_bsldi_uuuu (__a[0], __b[0], __c[0])}; |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbslq_f16 (uint16x8_t __a, float16x8_t __b, float16x8_t __c) |
| { |
| return __builtin_aarch64_simd_bslv8hf_suss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbslq_f32 (uint32x4_t __a, float32x4_t __b, float32x4_t __c) |
| { |
| return __builtin_aarch64_simd_bslv4sf_suss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbslq_f64 (uint64x2_t __a, float64x2_t __b, float64x2_t __c) |
| { |
| return __builtin_aarch64_simd_bslv2df_suss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbslq_p8 (uint8x16_t __a, poly8x16_t __b, poly8x16_t __c) |
| { |
| return __builtin_aarch64_simd_bslv16qi_pupp (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbslq_p16 (uint16x8_t __a, poly16x8_t __b, poly16x8_t __c) |
| { |
| return __builtin_aarch64_simd_bslv8hi_pupp (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbslq_s8 (uint8x16_t __a, int8x16_t __b, int8x16_t __c) |
| { |
| return __builtin_aarch64_simd_bslv16qi_suss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbslq_s16 (uint16x8_t __a, int16x8_t __b, int16x8_t __c) |
| { |
| return __builtin_aarch64_simd_bslv8hi_suss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbslq_p64 (uint64x2_t __a, poly64x2_t __b, poly64x2_t __c) |
| { |
| return __builtin_aarch64_simd_bslv2di_pupp (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbslq_s32 (uint32x4_t __a, int32x4_t __b, int32x4_t __c) |
| { |
| return __builtin_aarch64_simd_bslv4si_suss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbslq_s64 (uint64x2_t __a, int64x2_t __b, int64x2_t __c) |
| { |
| return __builtin_aarch64_simd_bslv2di_suss (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbslq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c) |
| { |
| return __builtin_aarch64_simd_bslv16qi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbslq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c) |
| { |
| return __builtin_aarch64_simd_bslv8hi_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbslq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c) |
| { |
| return __builtin_aarch64_simd_bslv4si_uuuu (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vbslq_u64 (uint64x2_t __a, uint64x2_t __b, uint64x2_t __c) |
| { |
| return __builtin_aarch64_simd_bslv2di_uuuu (__a, __b, __c); |
| } |
| |
| /* ARMv8.1-A instrinsics. */ |
| #pragma GCC push_options |
| #pragma GCC target ("+nothing+rdma") |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlah_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c) |
| { |
| return __builtin_aarch64_sqrdmlahv4hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlah_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c) |
| { |
| return __builtin_aarch64_sqrdmlahv2si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlahq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c) |
| { |
| return __builtin_aarch64_sqrdmlahv8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlahq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c) |
| { |
| return __builtin_aarch64_sqrdmlahv4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlsh_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c) |
| { |
| return __builtin_aarch64_sqrdmlshv4hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlsh_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c) |
| { |
| return __builtin_aarch64_sqrdmlshv2si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlshq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c) |
| { |
| return __builtin_aarch64_sqrdmlshv8hi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlshq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c) |
| { |
| return __builtin_aarch64_sqrdmlshv4si (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlah_laneq_s16 (int16x4_t __a, int16x4_t __b, int16x8_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlah_laneqv4hi (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlah_laneq_s32 (int32x2_t __a, int32x2_t __b, int32x4_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlah_laneqv2si (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlahq_laneq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlah_laneqv8hi (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlahq_laneq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlah_laneqv4si (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlsh_laneq_s16 (int16x4_t __a, int16x4_t __b, int16x8_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlsh_laneqv4hi (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlsh_laneq_s32 (int32x2_t __a, int32x2_t __b, int32x4_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlsh_laneqv2si (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlshq_laneq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlsh_laneqv8hi (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlshq_laneq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlsh_laneqv4si (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlah_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlah_lanev4hi (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlah_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlah_lanev2si (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlahq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlah_lanev8hi (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlahq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlah_lanev4si (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlahh_s16 (int16_t __a, int16_t __b, int16_t __c) |
| { |
| return (int16_t) __builtin_aarch64_sqrdmlahhi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlahh_lane_s16 (int16_t __a, int16_t __b, int16x4_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlah_lanehi (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlahh_laneq_s16 (int16_t __a, int16_t __b, int16x8_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlah_laneqhi (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlahs_s32 (int32_t __a, int32_t __b, int32_t __c) |
| { |
| return (int32_t) __builtin_aarch64_sqrdmlahsi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlahs_lane_s32 (int32_t __a, int32_t __b, int32x2_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlah_lanesi (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlahs_laneq_s32 (int32_t __a, int32_t __b, int32x4_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlah_laneqsi (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlsh_lane_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlsh_lanev4hi (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlsh_lane_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlsh_lanev2si (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlshq_lane_s16 (int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlsh_lanev8hi (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlshq_lane_s32 (int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlsh_lanev4si (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlshh_s16 (int16_t __a, int16_t __b, int16_t __c) |
| { |
| return (int16_t) __builtin_aarch64_sqrdmlshhi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlshh_lane_s16 (int16_t __a, int16_t __b, int16x4_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlsh_lanehi (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlshh_laneq_s16 (int16_t __a, int16_t __b, int16x8_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlsh_laneqhi (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlshs_s32 (int32_t __a, int32_t __b, int32_t __c) |
| { |
| return (int32_t) __builtin_aarch64_sqrdmlshsi (__a, __b, __c); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlshs_lane_s32 (int32_t __a, int32_t __b, int32x2_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlsh_lanesi (__a, __b, __c, __d); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vqrdmlshs_laneq_s32 (int32_t __a, int32_t __b, int32x4_t __c, const int __d) |
| { |
| return __builtin_aarch64_sqrdmlsh_laneqsi (__a, __b, __c, __d); |
| } |
| #pragma GCC pop_options |
| |
| #pragma GCC push_options |
| #pragma GCC target ("+nothing+crypto") |
| /* vaes */ |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaeseq_u8 (uint8x16_t data, uint8x16_t key) |
| { |
| return __builtin_aarch64_crypto_aesev16qi_uuu (data, key); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaesdq_u8 (uint8x16_t data, uint8x16_t key) |
| { |
| return __builtin_aarch64_crypto_aesdv16qi_uuu (data, key); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaesmcq_u8 (uint8x16_t data) |
| { |
| return __builtin_aarch64_crypto_aesmcv16qi_uu (data); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vaesimcq_u8 (uint8x16_t data) |
| { |
| return __builtin_aarch64_crypto_aesimcv16qi_uu (data); |
| } |
| #pragma GCC pop_options |
| |
| /* vcage */ |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcage_f64 (float64x1_t __a, float64x1_t __b) |
| { |
| return vabs_f64 (__a) >= vabs_f64 (__b); |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcages_f32 (float32_t __a, float32_t __b) |
| { |
| return __builtin_fabsf (__a) >= __builtin_fabsf (__b) ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcage_f32 (float32x2_t __a, float32x2_t __b) |
| { |
| return vabs_f32 (__a) >= vabs_f32 (__b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcageq_f32 (float32x4_t __a, float32x4_t __b) |
| { |
| return vabsq_f32 (__a) >= vabsq_f32 (__b); |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcaged_f64 (float64_t __a, float64_t __b) |
| { |
| return __builtin_fabs (__a) >= __builtin_fabs (__b) ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcageq_f64 (float64x2_t __a, float64x2_t __b) |
| { |
| return vabsq_f64 (__a) >= vabsq_f64 (__b); |
| } |
| |
| /* vcagt */ |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcagts_f32 (float32_t __a, float32_t __b) |
| { |
| return __builtin_fabsf (__a) > __builtin_fabsf (__b) ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcagt_f32 (float32x2_t __a, float32x2_t __b) |
| { |
| return vabs_f32 (__a) > vabs_f32 (__b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcagt_f64 (float64x1_t __a, float64x1_t __b) |
| { |
| return vabs_f64 (__a) > vabs_f64 (__b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcagtq_f32 (float32x4_t __a, float32x4_t __b) |
| { |
| return vabsq_f32 (__a) > vabsq_f32 (__b); |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcagtd_f64 (float64_t __a, float64_t __b) |
| { |
| return __builtin_fabs (__a) > __builtin_fabs (__b) ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcagtq_f64 (float64x2_t __a, float64x2_t __b) |
| { |
| return vabsq_f64 (__a) > vabsq_f64 (__b); |
| } |
| |
| /* vcale */ |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcale_f32 (float32x2_t __a, float32x2_t __b) |
| { |
| return vabs_f32 (__a) <= vabs_f32 (__b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcale_f64 (float64x1_t __a, float64x1_t __b) |
| { |
| return vabs_f64 (__a) <= vabs_f64 (__b); |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcaled_f64 (float64_t __a, float64_t __b) |
| { |
| return __builtin_fabs (__a) <= __builtin_fabs (__b) ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcales_f32 (float32_t __a, float32_t __b) |
| { |
| return __builtin_fabsf (__a) <= __builtin_fabsf (__b) ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcaleq_f32 (float32x4_t __a, float32x4_t __b) |
| { |
| return vabsq_f32 (__a) <= vabsq_f32 (__b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcaleq_f64 (float64x2_t __a, float64x2_t __b) |
| { |
| return vabsq_f64 (__a) <= vabsq_f64 (__b); |
| } |
| |
| /* vcalt */ |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcalt_f32 (float32x2_t __a, float32x2_t __b) |
| { |
| return vabs_f32 (__a) < vabs_f32 (__b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcalt_f64 (float64x1_t __a, float64x1_t __b) |
| { |
| return vabs_f64 (__a) < vabs_f64 (__b); |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcaltd_f64 (float64_t __a, float64_t __b) |
| { |
| return __builtin_fabs (__a) < __builtin_fabs (__b) ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcaltq_f32 (float32x4_t __a, float32x4_t __b) |
| { |
| return vabsq_f32 (__a) < vabsq_f32 (__b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcaltq_f64 (float64x2_t __a, float64x2_t __b) |
| { |
| return vabsq_f64 (__a) < vabsq_f64 (__b); |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcalts_f32 (float32_t __a, float32_t __b) |
| { |
| return __builtin_fabsf (__a) < __builtin_fabsf (__b) ? -1 : 0; |
| } |
| |
| /* vceq - vector. */ |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceq_f32 (float32x2_t __a, float32x2_t __b) |
| { |
| return (uint32x2_t) (__a == __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceq_f64 (float64x1_t __a, float64x1_t __b) |
| { |
| return (uint64x1_t) (__a == __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceq_p8 (poly8x8_t __a, poly8x8_t __b) |
| { |
| return (uint8x8_t) (__a == __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceq_p64 (poly64x1_t __a, poly64x1_t __b) |
| { |
| return (uint64x1_t) (__a == __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceq_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return (uint8x8_t) (__a == __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceq_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return (uint16x4_t) (__a == __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceq_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return (uint32x2_t) (__a == __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceq_s64 (int64x1_t __a, int64x1_t __b) |
| { |
| return (uint64x1_t) (__a == __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceq_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return (__a == __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceq_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return (__a == __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceq_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return (__a == __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceq_u64 (uint64x1_t __a, uint64x1_t __b) |
| { |
| return (__a == __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqq_f32 (float32x4_t __a, float32x4_t __b) |
| { |
| return (uint32x4_t) (__a == __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqq_f64 (float64x2_t __a, float64x2_t __b) |
| { |
| return (uint64x2_t) (__a == __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqq_p8 (poly8x16_t __a, poly8x16_t __b) |
| { |
| return (uint8x16_t) (__a == __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return (uint8x16_t) (__a == __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (uint16x8_t) (__a == __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (uint32x4_t) (__a == __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqq_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return (uint64x2_t) (__a == __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return (__a == __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return (__a == __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return (__a == __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqq_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return (__a == __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqq_p64 (poly64x2_t __a, poly64x2_t __b) |
| { |
| return (__a == __b); |
| } |
| |
| /* vceq - scalar. */ |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqs_f32 (float32_t __a, float32_t __b) |
| { |
| return __a == __b ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqd_s64 (int64_t __a, int64_t __b) |
| { |
| return __a == __b ? -1ll : 0ll; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqd_u64 (uint64_t __a, uint64_t __b) |
| { |
| return __a == __b ? -1ll : 0ll; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqd_f64 (float64_t __a, float64_t __b) |
| { |
| return __a == __b ? -1ll : 0ll; |
| } |
| |
| /* vceqz - vector. */ |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqz_f32 (float32x2_t __a) |
| { |
| return (uint32x2_t) (__a == 0.0f); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqz_f64 (float64x1_t __a) |
| { |
| return (uint64x1_t) (__a == (float64x1_t) {0.0}); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqz_p8 (poly8x8_t __a) |
| { |
| return (uint8x8_t) (__a == 0); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqz_s8 (int8x8_t __a) |
| { |
| return (uint8x8_t) (__a == 0); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqz_s16 (int16x4_t __a) |
| { |
| return (uint16x4_t) (__a == 0); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqz_s32 (int32x2_t __a) |
| { |
| return (uint32x2_t) (__a == 0); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqz_s64 (int64x1_t __a) |
| { |
| return (uint64x1_t) (__a == __AARCH64_INT64_C (0)); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqz_u8 (uint8x8_t __a) |
| { |
| return (__a == 0); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqz_u16 (uint16x4_t __a) |
| { |
| return (__a == 0); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqz_u32 (uint32x2_t __a) |
| { |
| return (__a == 0); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqz_u64 (uint64x1_t __a) |
| { |
| return (__a == __AARCH64_UINT64_C (0)); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqz_p64 (poly64x1_t __a) |
| { |
| return (__a == __AARCH64_UINT64_C (0)); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqzq_f32 (float32x4_t __a) |
| { |
| return (uint32x4_t) (__a == 0.0f); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqzq_f64 (float64x2_t __a) |
| { |
| return (uint64x2_t) (__a == 0.0f); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqzq_p8 (poly8x16_t __a) |
| { |
| return (uint8x16_t) (__a == 0); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqzq_s8 (int8x16_t __a) |
| { |
| return (uint8x16_t) (__a == 0); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqzq_s16 (int16x8_t __a) |
| { |
| return (uint16x8_t) (__a == 0); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqzq_s32 (int32x4_t __a) |
| { |
| return (uint32x4_t) (__a == 0); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqzq_s64 (int64x2_t __a) |
| { |
| return (uint64x2_t) (__a == __AARCH64_INT64_C (0)); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqzq_u8 (uint8x16_t __a) |
| { |
| return (__a == 0); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqzq_u16 (uint16x8_t __a) |
| { |
| return (__a == 0); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqzq_u32 (uint32x4_t __a) |
| { |
| return (__a == 0); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqzq_u64 (uint64x2_t __a) |
| { |
| return (__a == __AARCH64_UINT64_C (0)); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqzq_p64 (poly64x2_t __a) |
| { |
| return (__a == __AARCH64_UINT64_C (0)); |
| } |
| |
| /* vceqz - scalar. */ |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqzs_f32 (float32_t __a) |
| { |
| return __a == 0.0f ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqzd_s64 (int64_t __a) |
| { |
| return __a == 0 ? -1ll : 0ll; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqzd_u64 (uint64_t __a) |
| { |
| return __a == 0 ? -1ll : 0ll; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vceqzd_f64 (float64_t __a) |
| { |
| return __a == 0.0 ? -1ll : 0ll; |
| } |
| |
| /* vcge - vector. */ |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcge_f32 (float32x2_t __a, float32x2_t __b) |
| { |
| return (uint32x2_t) (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcge_f64 (float64x1_t __a, float64x1_t __b) |
| { |
| return (uint64x1_t) (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcge_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return (uint8x8_t) (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcge_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return (uint16x4_t) (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcge_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return (uint32x2_t) (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcge_s64 (int64x1_t __a, int64x1_t __b) |
| { |
| return (uint64x1_t) (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcge_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcge_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcge_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcge_u64 (uint64x1_t __a, uint64x1_t __b) |
| { |
| return (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgeq_f32 (float32x4_t __a, float32x4_t __b) |
| { |
| return (uint32x4_t) (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgeq_f64 (float64x2_t __a, float64x2_t __b) |
| { |
| return (uint64x2_t) (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgeq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return (uint8x16_t) (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgeq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (uint16x8_t) (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgeq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (uint32x4_t) (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgeq_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return (uint64x2_t) (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgeq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgeq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgeq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return (__a >= __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgeq_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return (__a >= __b); |
| } |
| |
| /* vcge - scalar. */ |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcges_f32 (float32_t __a, float32_t __b) |
| { |
| return __a >= __b ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcged_s64 (int64_t __a, int64_t __b) |
| { |
| return __a >= __b ? -1ll : 0ll; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcged_u64 (uint64_t __a, uint64_t __b) |
| { |
| return __a >= __b ? -1ll : 0ll; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcged_f64 (float64_t __a, float64_t __b) |
| { |
| return __a >= __b ? -1ll : 0ll; |
| } |
| |
| /* vcgez - vector. */ |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgez_f32 (float32x2_t __a) |
| { |
| return (uint32x2_t) (__a >= 0.0f); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgez_f64 (float64x1_t __a) |
| { |
| return (uint64x1_t) (__a[0] >= (float64x1_t) {0.0}); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgez_s8 (int8x8_t __a) |
| { |
| return (uint8x8_t) (__a >= 0); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgez_s16 (int16x4_t __a) |
| { |
| return (uint16x4_t) (__a >= 0); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgez_s32 (int32x2_t __a) |
| { |
| return (uint32x2_t) (__a >= 0); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgez_s64 (int64x1_t __a) |
| { |
| return (uint64x1_t) (__a >= __AARCH64_INT64_C (0)); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgezq_f32 (float32x4_t __a) |
| { |
| return (uint32x4_t) (__a >= 0.0f); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgezq_f64 (float64x2_t __a) |
| { |
| return (uint64x2_t) (__a >= 0.0); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgezq_s8 (int8x16_t __a) |
| { |
| return (uint8x16_t) (__a >= 0); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgezq_s16 (int16x8_t __a) |
| { |
| return (uint16x8_t) (__a >= 0); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgezq_s32 (int32x4_t __a) |
| { |
| return (uint32x4_t) (__a >= 0); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgezq_s64 (int64x2_t __a) |
| { |
| return (uint64x2_t) (__a >= __AARCH64_INT64_C (0)); |
| } |
| |
| /* vcgez - scalar. */ |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgezs_f32 (float32_t __a) |
| { |
| return __a >= 0.0f ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgezd_s64 (int64_t __a) |
| { |
| return __a >= 0 ? -1ll : 0ll; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgezd_f64 (float64_t __a) |
| { |
| return __a >= 0.0 ? -1ll : 0ll; |
| } |
| |
| /* vcgt - vector. */ |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgt_f32 (float32x2_t __a, float32x2_t __b) |
| { |
| return (uint32x2_t) (__a > __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgt_f64 (float64x1_t __a, float64x1_t __b) |
| { |
| return (uint64x1_t) (__a > __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgt_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return (uint8x8_t) (__a > __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgt_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return (uint16x4_t) (__a > __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgt_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return (uint32x2_t) (__a > __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgt_s64 (int64x1_t __a, int64x1_t __b) |
| { |
| return (uint64x1_t) (__a > __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgt_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return (__a > __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgt_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return (__a > __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgt_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return (__a > __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgt_u64 (uint64x1_t __a, uint64x1_t __b) |
| { |
| return (__a > __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtq_f32 (float32x4_t __a, float32x4_t __b) |
| { |
| return (uint32x4_t) (__a > __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtq_f64 (float64x2_t __a, float64x2_t __b) |
| { |
| return (uint64x2_t) (__a > __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return (uint8x16_t) (__a > __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (uint16x8_t) (__a > __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (uint32x4_t) (__a > __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtq_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return (uint64x2_t) (__a > __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return (__a > __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return (__a > __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return (__a > __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtq_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return (__a > __b); |
| } |
| |
| /* vcgt - scalar. */ |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgts_f32 (float32_t __a, float32_t __b) |
| { |
| return __a > __b ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtd_s64 (int64_t __a, int64_t __b) |
| { |
| return __a > __b ? -1ll : 0ll; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtd_u64 (uint64_t __a, uint64_t __b) |
| { |
| return __a > __b ? -1ll : 0ll; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtd_f64 (float64_t __a, float64_t __b) |
| { |
| return __a > __b ? -1ll : 0ll; |
| } |
| |
| /* vcgtz - vector. */ |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtz_f32 (float32x2_t __a) |
| { |
| return (uint32x2_t) (__a > 0.0f); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtz_f64 (float64x1_t __a) |
| { |
| return (uint64x1_t) (__a > (float64x1_t) {0.0}); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtz_s8 (int8x8_t __a) |
| { |
| return (uint8x8_t) (__a > 0); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtz_s16 (int16x4_t __a) |
| { |
| return (uint16x4_t) (__a > 0); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtz_s32 (int32x2_t __a) |
| { |
| return (uint32x2_t) (__a > 0); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtz_s64 (int64x1_t __a) |
| { |
| return (uint64x1_t) (__a > __AARCH64_INT64_C (0)); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtzq_f32 (float32x4_t __a) |
| { |
| return (uint32x4_t) (__a > 0.0f); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtzq_f64 (float64x2_t __a) |
| { |
| return (uint64x2_t) (__a > 0.0); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtzq_s8 (int8x16_t __a) |
| { |
| return (uint8x16_t) (__a > 0); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtzq_s16 (int16x8_t __a) |
| { |
| return (uint16x8_t) (__a > 0); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtzq_s32 (int32x4_t __a) |
| { |
| return (uint32x4_t) (__a > 0); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtzq_s64 (int64x2_t __a) |
| { |
| return (uint64x2_t) (__a > __AARCH64_INT64_C (0)); |
| } |
| |
| /* vcgtz - scalar. */ |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtzs_f32 (float32_t __a) |
| { |
| return __a > 0.0f ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtzd_s64 (int64_t __a) |
| { |
| return __a > 0 ? -1ll : 0ll; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcgtzd_f64 (float64_t __a) |
| { |
| return __a > 0.0 ? -1ll : 0ll; |
| } |
| |
| /* vcle - vector. */ |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcle_f32 (float32x2_t __a, float32x2_t __b) |
| { |
| return (uint32x2_t) (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcle_f64 (float64x1_t __a, float64x1_t __b) |
| { |
| return (uint64x1_t) (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcle_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return (uint8x8_t) (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcle_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return (uint16x4_t) (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcle_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return (uint32x2_t) (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcle_s64 (int64x1_t __a, int64x1_t __b) |
| { |
| return (uint64x1_t) (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcle_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcle_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcle_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcle_u64 (uint64x1_t __a, uint64x1_t __b) |
| { |
| return (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcleq_f32 (float32x4_t __a, float32x4_t __b) |
| { |
| return (uint32x4_t) (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcleq_f64 (float64x2_t __a, float64x2_t __b) |
| { |
| return (uint64x2_t) (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcleq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return (uint8x16_t) (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcleq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (uint16x8_t) (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcleq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (uint32x4_t) (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcleq_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return (uint64x2_t) (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcleq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcleq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcleq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return (__a <= __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcleq_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return (__a <= __b); |
| } |
| |
| /* vcle - scalar. */ |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcles_f32 (float32_t __a, float32_t __b) |
| { |
| return __a <= __b ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcled_s64 (int64_t __a, int64_t __b) |
| { |
| return __a <= __b ? -1ll : 0ll; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcled_u64 (uint64_t __a, uint64_t __b) |
| { |
| return __a <= __b ? -1ll : 0ll; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcled_f64 (float64_t __a, float64_t __b) |
| { |
| return __a <= __b ? -1ll : 0ll; |
| } |
| |
| /* vclez - vector. */ |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclez_f32 (float32x2_t __a) |
| { |
| return (uint32x2_t) (__a <= 0.0f); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclez_f64 (float64x1_t __a) |
| { |
| return (uint64x1_t) (__a <= (float64x1_t) {0.0}); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclez_s8 (int8x8_t __a) |
| { |
| return (uint8x8_t) (__a <= 0); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclez_s16 (int16x4_t __a) |
| { |
| return (uint16x4_t) (__a <= 0); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclez_s32 (int32x2_t __a) |
| { |
| return (uint32x2_t) (__a <= 0); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclez_s64 (int64x1_t __a) |
| { |
| return (uint64x1_t) (__a <= __AARCH64_INT64_C (0)); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclezq_f32 (float32x4_t __a) |
| { |
| return (uint32x4_t) (__a <= 0.0f); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclezq_f64 (float64x2_t __a) |
| { |
| return (uint64x2_t) (__a <= 0.0); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclezq_s8 (int8x16_t __a) |
| { |
| return (uint8x16_t) (__a <= 0); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclezq_s16 (int16x8_t __a) |
| { |
| return (uint16x8_t) (__a <= 0); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclezq_s32 (int32x4_t __a) |
| { |
| return (uint32x4_t) (__a <= 0); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclezq_s64 (int64x2_t __a) |
| { |
| return (uint64x2_t) (__a <= __AARCH64_INT64_C (0)); |
| } |
| |
| /* vclez - scalar. */ |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclezs_f32 (float32_t __a) |
| { |
| return __a <= 0.0f ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclezd_s64 (int64_t __a) |
| { |
| return __a <= 0 ? -1ll : 0ll; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclezd_f64 (float64_t __a) |
| { |
| return __a <= 0.0 ? -1ll : 0ll; |
| } |
| |
| /* vclt - vector. */ |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclt_f32 (float32x2_t __a, float32x2_t __b) |
| { |
| return (uint32x2_t) (__a < __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclt_f64 (float64x1_t __a, float64x1_t __b) |
| { |
| return (uint64x1_t) (__a < __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclt_s8 (int8x8_t __a, int8x8_t __b) |
| { |
| return (uint8x8_t) (__a < __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclt_s16 (int16x4_t __a, int16x4_t __b) |
| { |
| return (uint16x4_t) (__a < __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclt_s32 (int32x2_t __a, int32x2_t __b) |
| { |
| return (uint32x2_t) (__a < __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclt_s64 (int64x1_t __a, int64x1_t __b) |
| { |
| return (uint64x1_t) (__a < __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclt_u8 (uint8x8_t __a, uint8x8_t __b) |
| { |
| return (__a < __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclt_u16 (uint16x4_t __a, uint16x4_t __b) |
| { |
| return (__a < __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclt_u32 (uint32x2_t __a, uint32x2_t __b) |
| { |
| return (__a < __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclt_u64 (uint64x1_t __a, uint64x1_t __b) |
| { |
| return (__a < __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltq_f32 (float32x4_t __a, float32x4_t __b) |
| { |
| return (uint32x4_t) (__a < __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltq_f64 (float64x2_t __a, float64x2_t __b) |
| { |
| return (uint64x2_t) (__a < __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltq_s8 (int8x16_t __a, int8x16_t __b) |
| { |
| return (uint8x16_t) (__a < __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltq_s16 (int16x8_t __a, int16x8_t __b) |
| { |
| return (uint16x8_t) (__a < __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltq_s32 (int32x4_t __a, int32x4_t __b) |
| { |
| return (uint32x4_t) (__a < __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltq_s64 (int64x2_t __a, int64x2_t __b) |
| { |
| return (uint64x2_t) (__a < __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltq_u8 (uint8x16_t __a, uint8x16_t __b) |
| { |
| return (__a < __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltq_u16 (uint16x8_t __a, uint16x8_t __b) |
| { |
| return (__a < __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltq_u32 (uint32x4_t __a, uint32x4_t __b) |
| { |
| return (__a < __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltq_u64 (uint64x2_t __a, uint64x2_t __b) |
| { |
| return (__a < __b); |
| } |
| |
| /* vclt - scalar. */ |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclts_f32 (float32_t __a, float32_t __b) |
| { |
| return __a < __b ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltd_s64 (int64_t __a, int64_t __b) |
| { |
| return __a < __b ? -1ll : 0ll; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltd_u64 (uint64_t __a, uint64_t __b) |
| { |
| return __a < __b ? -1ll : 0ll; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltd_f64 (float64_t __a, float64_t __b) |
| { |
| return __a < __b ? -1ll : 0ll; |
| } |
| |
| /* vcltz - vector. */ |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltz_f32 (float32x2_t __a) |
| { |
| return (uint32x2_t) (__a < 0.0f); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltz_f64 (float64x1_t __a) |
| { |
| return (uint64x1_t) (__a < (float64x1_t) {0.0}); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltz_s8 (int8x8_t __a) |
| { |
| return (uint8x8_t) (__a < 0); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltz_s16 (int16x4_t __a) |
| { |
| return (uint16x4_t) (__a < 0); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltz_s32 (int32x2_t __a) |
| { |
| return (uint32x2_t) (__a < 0); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltz_s64 (int64x1_t __a) |
| { |
| return (uint64x1_t) (__a < __AARCH64_INT64_C (0)); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltzq_f32 (float32x4_t __a) |
| { |
| return (uint32x4_t) (__a < 0.0f); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltzq_f64 (float64x2_t __a) |
| { |
| return (uint64x2_t) (__a < 0.0); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltzq_s8 (int8x16_t __a) |
| { |
| return (uint8x16_t) (__a < 0); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltzq_s16 (int16x8_t __a) |
| { |
| return (uint16x8_t) (__a < 0); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltzq_s32 (int32x4_t __a) |
| { |
| return (uint32x4_t) (__a < 0); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltzq_s64 (int64x2_t __a) |
| { |
| return (uint64x2_t) (__a < __AARCH64_INT64_C (0)); |
| } |
| |
| /* vcltz - scalar. */ |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltzs_f32 (float32_t __a) |
| { |
| return __a < 0.0f ? -1 : 0; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltzd_s64 (int64_t __a) |
| { |
| return __a < 0 ? -1ll : 0ll; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcltzd_f64 (float64_t __a) |
| { |
| return __a < 0.0 ? -1ll : 0ll; |
| } |
| |
| /* vcls. */ |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcls_s8 (int8x8_t __a) |
| { |
| return __builtin_aarch64_clrsbv8qi (__a); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcls_s16 (int16x4_t __a) |
| { |
| return __builtin_aarch64_clrsbv4hi (__a); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcls_s32 (int32x2_t __a) |
| { |
| return __builtin_aarch64_clrsbv2si (__a); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclsq_s8 (int8x16_t __a) |
| { |
| return __builtin_aarch64_clrsbv16qi (__a); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclsq_s16 (int16x8_t __a) |
| { |
| return __builtin_aarch64_clrsbv8hi (__a); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclsq_s32 (int32x4_t __a) |
| { |
| return __builtin_aarch64_clrsbv4si (__a); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcls_u8 (uint8x8_t __a) |
| { |
| return __builtin_aarch64_clrsbv8qi ((int8x8_t) __a); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcls_u16 (uint16x4_t __a) |
| { |
| return __builtin_aarch64_clrsbv4hi ((int16x4_t) __a); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcls_u32 (uint32x2_t __a) |
| { |
| return __builtin_aarch64_clrsbv2si ((int32x2_t) __a); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclsq_u8 (uint8x16_t __a) |
| { |
| return __builtin_aarch64_clrsbv16qi ((int8x16_t) __a); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclsq_u16 (uint16x8_t __a) |
| { |
| return __builtin_aarch64_clrsbv8hi ((int16x8_t) __a); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclsq_u32 (uint32x4_t __a) |
| { |
| return __builtin_aarch64_clrsbv4si ((int32x4_t) __a); |
| } |
| |
| /* vclz. */ |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclz_s8 (int8x8_t __a) |
| { |
| return __builtin_aarch64_clzv8qi (__a); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclz_s16 (int16x4_t __a) |
| { |
| return __builtin_aarch64_clzv4hi (__a); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclz_s32 (int32x2_t __a) |
| { |
| return __builtin_aarch64_clzv2si (__a); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclz_u8 (uint8x8_t __a) |
| { |
| return (uint8x8_t)__builtin_aarch64_clzv8qi ((int8x8_t)__a); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclz_u16 (uint16x4_t __a) |
| { |
| return (uint16x4_t)__builtin_aarch64_clzv4hi ((int16x4_t)__a); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclz_u32 (uint32x2_t __a) |
| { |
| return (uint32x2_t)__builtin_aarch64_clzv2si ((int32x2_t)__a); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclzq_s8 (int8x16_t __a) |
| { |
| return __builtin_aarch64_clzv16qi (__a); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclzq_s16 (int16x8_t __a) |
| { |
| return __builtin_aarch64_clzv8hi (__a); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclzq_s32 (int32x4_t __a) |
| { |
| return __builtin_aarch64_clzv4si (__a); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclzq_u8 (uint8x16_t __a) |
| { |
| return (uint8x16_t)__builtin_aarch64_clzv16qi ((int8x16_t)__a); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclzq_u16 (uint16x8_t __a) |
| { |
| return (uint16x8_t)__builtin_aarch64_clzv8hi ((int16x8_t)__a); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vclzq_u32 (uint32x4_t __a) |
| { |
| return (uint32x4_t)__builtin_aarch64_clzv4si ((int32x4_t)__a); |
| } |
| |
| /* vcnt. */ |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcnt_p8 (poly8x8_t __a) |
| { |
| return (poly8x8_t) __builtin_aarch64_popcountv8qi ((int8x8_t) __a); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcnt_s8 (int8x8_t __a) |
| { |
| return __builtin_aarch64_popcountv8qi (__a); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcnt_u8 (uint8x8_t __a) |
| { |
| return (uint8x8_t) __builtin_aarch64_popcountv8qi ((int8x8_t) __a); |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcntq_p8 (poly8x16_t __a) |
| { |
| return (poly8x16_t) __builtin_aarch64_popcountv16qi ((int8x16_t) __a); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcntq_s8 (int8x16_t __a) |
| { |
| return __builtin_aarch64_popcountv16qi (__a); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcntq_u8 (uint8x16_t __a) |
| { |
| return (uint8x16_t) __builtin_aarch64_popcountv16qi ((int8x16_t) __a); |
| } |
| |
| /* vcopy_lane. */ |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_lane_f32 (float32x2_t __a, const int __lane1, |
| float32x2_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_lane_f64 (float64x1_t __a, const int __lane1, |
| float64x1_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_lane_p8 (poly8x8_t __a, const int __lane1, |
| poly8x8_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_lane_p16 (poly16x4_t __a, const int __lane1, |
| poly16x4_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_lane_p64 (poly64x1_t __a, const int __lane1, |
| poly64x1_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_lane_s8 (int8x8_t __a, const int __lane1, |
| int8x8_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_lane_s16 (int16x4_t __a, const int __lane1, |
| int16x4_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_lane_s32 (int32x2_t __a, const int __lane1, |
| int32x2_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_lane_s64 (int64x1_t __a, const int __lane1, |
| int64x1_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_lane_u8 (uint8x8_t __a, const int __lane1, |
| uint8x8_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_lane_u16 (uint16x4_t __a, const int __lane1, |
| uint16x4_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_lane_u32 (uint32x2_t __a, const int __lane1, |
| uint32x2_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_lane_u64 (uint64x1_t __a, const int __lane1, |
| uint64x1_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| /* vcopy_laneq. */ |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_laneq_f32 (float32x2_t __a, const int __lane1, |
| float32x4_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_laneq_f64 (float64x1_t __a, const int __lane1, |
| float64x2_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_laneq_p8 (poly8x8_t __a, const int __lane1, |
| poly8x16_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_laneq_p16 (poly16x4_t __a, const int __lane1, |
| poly16x8_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_laneq_p64 (poly64x1_t __a, const int __lane1, |
| poly64x2_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_laneq_s8 (int8x8_t __a, const int __lane1, |
| int8x16_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_laneq_s16 (int16x4_t __a, const int __lane1, |
| int16x8_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_laneq_s32 (int32x2_t __a, const int __lane1, |
| int32x4_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_laneq_s64 (int64x1_t __a, const int __lane1, |
| int64x2_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_laneq_u8 (uint8x8_t __a, const int __lane1, |
| uint8x16_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_laneq_u16 (uint16x4_t __a, const int __lane1, |
| uint16x8_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_laneq_u32 (uint32x2_t __a, const int __lane1, |
| uint32x4_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopy_laneq_u64 (uint64x1_t __a, const int __lane1, |
| uint64x2_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| /* vcopyq_lane. */ |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_lane_f32 (float32x4_t __a, const int __lane1, |
| float32x2_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_lane_f64 (float64x2_t __a, const int __lane1, |
| float64x1_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_lane_p8 (poly8x16_t __a, const int __lane1, |
| poly8x8_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_lane_p16 (poly16x8_t __a, const int __lane1, |
| poly16x4_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_lane_p64 (poly64x2_t __a, const int __lane1, |
| poly64x1_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_lane_s8 (int8x16_t __a, const int __lane1, |
| int8x8_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_lane_s16 (int16x8_t __a, const int __lane1, |
| int16x4_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_lane_s32 (int32x4_t __a, const int __lane1, |
| int32x2_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_lane_s64 (int64x2_t __a, const int __lane1, |
| int64x1_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_lane_u8 (uint8x16_t __a, const int __lane1, |
| uint8x8_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_lane_u16 (uint16x8_t __a, const int __lane1, |
| uint16x4_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_lane_u32 (uint32x4_t __a, const int __lane1, |
| uint32x2_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_lane_u64 (uint64x2_t __a, const int __lane1, |
| uint64x1_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| /* vcopyq_laneq. */ |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_laneq_f32 (float32x4_t __a, const int __lane1, |
| float32x4_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_laneq_f64 (float64x2_t __a, const int __lane1, |
| float64x2_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_laneq_p8 (poly8x16_t __a, const int __lane1, |
| poly8x16_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_laneq_p16 (poly16x8_t __a, const int __lane1, |
| poly16x8_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_laneq_p64 (poly64x2_t __a, const int __lane1, |
| poly64x2_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_laneq_s8 (int8x16_t __a, const int __lane1, |
| int8x16_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_laneq_s16 (int16x8_t __a, const int __lane1, |
| int16x8_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_laneq_s32 (int32x4_t __a, const int __lane1, |
| int32x4_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_laneq_s64 (int64x2_t __a, const int __lane1, |
| int64x2_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_laneq_u8 (uint8x16_t __a, const int __lane1, |
| uint8x16_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_laneq_u16 (uint16x8_t __a, const int __lane1, |
| uint16x8_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_laneq_u32 (uint32x4_t __a, const int __lane1, |
| uint32x4_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcopyq_laneq_u64 (uint64x2_t __a, const int __lane1, |
| uint64x2_t __b, const int __lane2) |
| { |
| return __aarch64_vset_lane_any (__aarch64_vget_lane_any (__b, __lane2), |
| __a, __lane1); |
| } |
| |
| /* vcvt (double -> float). */ |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_f16_f32 (float32x4_t __a) |
| { |
| return __builtin_aarch64_float_truncate_lo_v4hf (__a); |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_high_f16_f32 (float16x4_t __a, float32x4_t __b) |
| { |
| return __builtin_aarch64_float_truncate_hi_v8hf (__a, __b); |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_f32_f64 (float64x2_t __a) |
| { |
| return __builtin_aarch64_float_truncate_lo_v2sf (__a); |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_high_f32_f64 (float32x2_t __a, float64x2_t __b) |
| { |
| return __builtin_aarch64_float_truncate_hi_v4sf (__a, __b); |
| } |
| |
| /* vcvt (float -> double). */ |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_f32_f16 (float16x4_t __a) |
| { |
| return __builtin_aarch64_float_extend_lo_v4sf (__a); |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_f64_f32 (float32x2_t __a) |
| { |
| |
| return __builtin_aarch64_float_extend_lo_v2df (__a); |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_high_f32_f16 (float16x8_t __a) |
| { |
| return __builtin_aarch64_vec_unpacks_hi_v8hf (__a); |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_high_f64_f32 (float32x4_t __a) |
| { |
| return __builtin_aarch64_vec_unpacks_hi_v4sf (__a); |
| } |
| |
| /* vcvt (<u>fixed-point -> float). */ |
| |
| __extension__ extern __inline float64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtd_n_f64_s64 (int64_t __a, const int __b) |
| { |
| return __builtin_aarch64_scvtfdi (__a, __b); |
| } |
| |
| __extension__ extern __inline float64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtd_n_f64_u64 (uint64_t __a, const int __b) |
| { |
| return __builtin_aarch64_ucvtfdi_sus (__a, __b); |
| } |
| |
| __extension__ extern __inline float32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvts_n_f32_s32 (int32_t __a, const int __b) |
| { |
| return __builtin_aarch64_scvtfsi (__a, __b); |
| } |
| |
| __extension__ extern __inline float32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvts_n_f32_u32 (uint32_t __a, const int __b) |
| { |
| return __builtin_aarch64_ucvtfsi_sus (__a, __b); |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_n_f32_s32 (int32x2_t __a, const int __b) |
| { |
| return __builtin_aarch64_scvtfv2si (__a, __b); |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_n_f32_u32 (uint32x2_t __a, const int __b) |
| { |
| return __builtin_aarch64_ucvtfv2si_sus (__a, __b); |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_n_f64_s64 (int64x1_t __a, const int __b) |
| { |
| return (float64x1_t) |
| { __builtin_aarch64_scvtfdi (vget_lane_s64 (__a, 0), __b) }; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_n_f64_u64 (uint64x1_t __a, const int __b) |
| { |
| return (float64x1_t) |
| { __builtin_aarch64_ucvtfdi_sus (vget_lane_u64 (__a, 0), __b) }; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtq_n_f32_s32 (int32x4_t __a, const int __b) |
| { |
| return __builtin_aarch64_scvtfv4si (__a, __b); |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtq_n_f32_u32 (uint32x4_t __a, const int __b) |
| { |
| return __builtin_aarch64_ucvtfv4si_sus (__a, __b); |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtq_n_f64_s64 (int64x2_t __a, const int __b) |
| { |
| return __builtin_aarch64_scvtfv2di (__a, __b); |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtq_n_f64_u64 (uint64x2_t __a, const int __b) |
| { |
| return __builtin_aarch64_ucvtfv2di_sus (__a, __b); |
| } |
| |
| /* vcvt (float -> <u>fixed-point). */ |
| |
| __extension__ extern __inline int64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtd_n_s64_f64 (float64_t __a, const int __b) |
| { |
| return __builtin_aarch64_fcvtzsdf (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtd_n_u64_f64 (float64_t __a, const int __b) |
| { |
| return __builtin_aarch64_fcvtzudf_uss (__a, __b); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvts_n_s32_f32 (float32_t __a, const int __b) |
| { |
| return __builtin_aarch64_fcvtzssf (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvts_n_u32_f32 (float32_t __a, const int __b) |
| { |
| return __builtin_aarch64_fcvtzusf_uss (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_n_s32_f32 (float32x2_t __a, const int __b) |
| { |
| return __builtin_aarch64_fcvtzsv2sf (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_n_u32_f32 (float32x2_t __a, const int __b) |
| { |
| return __builtin_aarch64_fcvtzuv2sf_uss (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_n_s64_f64 (float64x1_t __a, const int __b) |
| { |
| return (int64x1_t) |
| { __builtin_aarch64_fcvtzsdf (vget_lane_f64 (__a, 0), __b) }; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_n_u64_f64 (float64x1_t __a, const int __b) |
| { |
| return (uint64x1_t) |
| { __builtin_aarch64_fcvtzudf_uss (vget_lane_f64 (__a, 0), __b) }; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtq_n_s32_f32 (float32x4_t __a, const int __b) |
| { |
| return __builtin_aarch64_fcvtzsv4sf (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtq_n_u32_f32 (float32x4_t __a, const int __b) |
| { |
| return __builtin_aarch64_fcvtzuv4sf_uss (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtq_n_s64_f64 (float64x2_t __a, const int __b) |
| { |
| return __builtin_aarch64_fcvtzsv2df (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtq_n_u64_f64 (float64x2_t __a, const int __b) |
| { |
| return __builtin_aarch64_fcvtzuv2df_uss (__a, __b); |
| } |
| |
| /* vcvt (<u>int -> float) */ |
| |
| __extension__ extern __inline float64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtd_f64_s64 (int64_t __a) |
| { |
| return (float64_t) __a; |
| } |
| |
| __extension__ extern __inline float64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtd_f64_u64 (uint64_t __a) |
| { |
| return (float64_t) __a; |
| } |
| |
| __extension__ extern __inline float32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvts_f32_s32 (int32_t __a) |
| { |
| return (float32_t) __a; |
| } |
| |
| __extension__ extern __inline float32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvts_f32_u32 (uint32_t __a) |
| { |
| return (float32_t) __a; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_f32_s32 (int32x2_t __a) |
| { |
| return __builtin_aarch64_floatv2siv2sf (__a); |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_f32_u32 (uint32x2_t __a) |
| { |
| return __builtin_aarch64_floatunsv2siv2sf ((int32x2_t) __a); |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_f64_s64 (int64x1_t __a) |
| { |
| return (float64x1_t) { vget_lane_s64 (__a, 0) }; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_f64_u64 (uint64x1_t __a) |
| { |
| return (float64x1_t) { vget_lane_u64 (__a, 0) }; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtq_f32_s32 (int32x4_t __a) |
| { |
| return __builtin_aarch64_floatv4siv4sf (__a); |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtq_f32_u32 (uint32x4_t __a) |
| { |
| return __builtin_aarch64_floatunsv4siv4sf ((int32x4_t) __a); |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtq_f64_s64 (int64x2_t __a) |
| { |
| return __builtin_aarch64_floatv2div2df (__a); |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtq_f64_u64 (uint64x2_t __a) |
| { |
| return __builtin_aarch64_floatunsv2div2df ((int64x2_t) __a); |
| } |
| |
| /* vcvt (float -> <u>int) */ |
| |
| __extension__ extern __inline int64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtd_s64_f64 (float64_t __a) |
| { |
| return (int64_t) __a; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtd_u64_f64 (float64_t __a) |
| { |
| return (uint64_t) __a; |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvts_s32_f32 (float32_t __a) |
| { |
| return (int32_t) __a; |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvts_u32_f32 (float32_t __a) |
| { |
| return (uint32_t) __a; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_s32_f32 (float32x2_t __a) |
| { |
| return __builtin_aarch64_lbtruncv2sfv2si (__a); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_u32_f32 (float32x2_t __a) |
| { |
| return __builtin_aarch64_lbtruncuv2sfv2si_us (__a); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtq_s32_f32 (float32x4_t __a) |
| { |
| return __builtin_aarch64_lbtruncv4sfv4si (__a); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtq_u32_f32 (float32x4_t __a) |
| { |
| return __builtin_aarch64_lbtruncuv4sfv4si_us (__a); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_s64_f64 (float64x1_t __a) |
| { |
| return (int64x1_t) {vcvtd_s64_f64 (__a[0])}; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvt_u64_f64 (float64x1_t __a) |
| { |
| return (uint64x1_t) {vcvtd_u64_f64 (__a[0])}; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtq_s64_f64 (float64x2_t __a) |
| { |
| return __builtin_aarch64_lbtruncv2dfv2di (__a); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtq_u64_f64 (float64x2_t __a) |
| { |
| return __builtin_aarch64_lbtruncuv2dfv2di_us (__a); |
| } |
| |
| /* vcvta */ |
| |
| __extension__ extern __inline int64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtad_s64_f64 (float64_t __a) |
| { |
| return __builtin_aarch64_lrounddfdi (__a); |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtad_u64_f64 (float64_t __a) |
| { |
| return __builtin_aarch64_lroundudfdi_us (__a); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtas_s32_f32 (float32_t __a) |
| { |
| return __builtin_aarch64_lroundsfsi (__a); |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtas_u32_f32 (float32_t __a) |
| { |
| return __builtin_aarch64_lroundusfsi_us (__a); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvta_s32_f32 (float32x2_t __a) |
| { |
| return __builtin_aarch64_lroundv2sfv2si (__a); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvta_u32_f32 (float32x2_t __a) |
| { |
| return __builtin_aarch64_lrounduv2sfv2si_us (__a); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtaq_s32_f32 (float32x4_t __a) |
| { |
| return __builtin_aarch64_lroundv4sfv4si (__a); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtaq_u32_f32 (float32x4_t __a) |
| { |
| return __builtin_aarch64_lrounduv4sfv4si_us (__a); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvta_s64_f64 (float64x1_t __a) |
| { |
| return (int64x1_t) {vcvtad_s64_f64 (__a[0])}; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvta_u64_f64 (float64x1_t __a) |
| { |
| return (uint64x1_t) {vcvtad_u64_f64 (__a[0])}; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtaq_s64_f64 (float64x2_t __a) |
| { |
| return __builtin_aarch64_lroundv2dfv2di (__a); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtaq_u64_f64 (float64x2_t __a) |
| { |
| return __builtin_aarch64_lrounduv2dfv2di_us (__a); |
| } |
| |
| /* vcvtm */ |
| |
| __extension__ extern __inline int64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtmd_s64_f64 (float64_t __a) |
| { |
| return __builtin_llfloor (__a); |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtmd_u64_f64 (float64_t __a) |
| { |
| return __builtin_aarch64_lfloorudfdi_us (__a); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtms_s32_f32 (float32_t __a) |
| { |
| return __builtin_ifloorf (__a); |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtms_u32_f32 (float32_t __a) |
| { |
| return __builtin_aarch64_lfloorusfsi_us (__a); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtm_s32_f32 (float32x2_t __a) |
| { |
| return __builtin_aarch64_lfloorv2sfv2si (__a); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtm_u32_f32 (float32x2_t __a) |
| { |
| return __builtin_aarch64_lflooruv2sfv2si_us (__a); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtmq_s32_f32 (float32x4_t __a) |
| { |
| return __builtin_aarch64_lfloorv4sfv4si (__a); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtmq_u32_f32 (float32x4_t __a) |
| { |
| return __builtin_aarch64_lflooruv4sfv4si_us (__a); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtm_s64_f64 (float64x1_t __a) |
| { |
| return (int64x1_t) {vcvtmd_s64_f64 (__a[0])}; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtm_u64_f64 (float64x1_t __a) |
| { |
| return (uint64x1_t) {vcvtmd_u64_f64 (__a[0])}; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtmq_s64_f64 (float64x2_t __a) |
| { |
| return __builtin_aarch64_lfloorv2dfv2di (__a); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtmq_u64_f64 (float64x2_t __a) |
| { |
| return __builtin_aarch64_lflooruv2dfv2di_us (__a); |
| } |
| |
| /* vcvtn */ |
| |
| __extension__ extern __inline int64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtnd_s64_f64 (float64_t __a) |
| { |
| return __builtin_aarch64_lfrintndfdi (__a); |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtnd_u64_f64 (float64_t __a) |
| { |
| return __builtin_aarch64_lfrintnudfdi_us (__a); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtns_s32_f32 (float32_t __a) |
| { |
| return __builtin_aarch64_lfrintnsfsi (__a); |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtns_u32_f32 (float32_t __a) |
| { |
| return __builtin_aarch64_lfrintnusfsi_us (__a); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtn_s32_f32 (float32x2_t __a) |
| { |
| return __builtin_aarch64_lfrintnv2sfv2si (__a); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtn_u32_f32 (float32x2_t __a) |
| { |
| return __builtin_aarch64_lfrintnuv2sfv2si_us (__a); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtnq_s32_f32 (float32x4_t __a) |
| { |
| return __builtin_aarch64_lfrintnv4sfv4si (__a); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtnq_u32_f32 (float32x4_t __a) |
| { |
| return __builtin_aarch64_lfrintnuv4sfv4si_us (__a); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtn_s64_f64 (float64x1_t __a) |
| { |
| return (int64x1_t) {vcvtnd_s64_f64 (__a[0])}; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtn_u64_f64 (float64x1_t __a) |
| { |
| return (uint64x1_t) {vcvtnd_u64_f64 (__a[0])}; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtnq_s64_f64 (float64x2_t __a) |
| { |
| return __builtin_aarch64_lfrintnv2dfv2di (__a); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtnq_u64_f64 (float64x2_t __a) |
| { |
| return __builtin_aarch64_lfrintnuv2dfv2di_us (__a); |
| } |
| |
| /* vcvtp */ |
| |
| __extension__ extern __inline int64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtpd_s64_f64 (float64_t __a) |
| { |
| return __builtin_llceil (__a); |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtpd_u64_f64 (float64_t __a) |
| { |
| return __builtin_aarch64_lceiludfdi_us (__a); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtps_s32_f32 (float32_t __a) |
| { |
| return __builtin_iceilf (__a); |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtps_u32_f32 (float32_t __a) |
| { |
| return __builtin_aarch64_lceilusfsi_us (__a); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtp_s32_f32 (float32x2_t __a) |
| { |
| return __builtin_aarch64_lceilv2sfv2si (__a); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtp_u32_f32 (float32x2_t __a) |
| { |
| return __builtin_aarch64_lceiluv2sfv2si_us (__a); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtpq_s32_f32 (float32x4_t __a) |
| { |
| return __builtin_aarch64_lceilv4sfv4si (__a); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtpq_u32_f32 (float32x4_t __a) |
| { |
| return __builtin_aarch64_lceiluv4sfv4si_us (__a); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtp_s64_f64 (float64x1_t __a) |
| { |
| return (int64x1_t) {vcvtpd_s64_f64 (__a[0])}; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtp_u64_f64 (float64x1_t __a) |
| { |
| return (uint64x1_t) {vcvtpd_u64_f64 (__a[0])}; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtpq_s64_f64 (float64x2_t __a) |
| { |
| return __builtin_aarch64_lceilv2dfv2di (__a); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vcvtpq_u64_f64 (float64x2_t __a) |
| { |
| return __builtin_aarch64_lceiluv2dfv2di_us (__a); |
| } |
| |
| /* vdup_n */ |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_n_f16 (float16_t __a) |
| { |
| return (float16x4_t) {__a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_n_f32 (float32_t __a) |
| { |
| return (float32x2_t) {__a, __a}; |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_n_f64 (float64_t __a) |
| { |
| return (float64x1_t) {__a}; |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_n_p8 (poly8_t __a) |
| { |
| return (poly8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_n_p16 (poly16_t __a) |
| { |
| return (poly16x4_t) {__a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_n_p64 (poly64_t __a) |
| { |
| return (poly64x1_t) {__a}; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_n_s8 (int8_t __a) |
| { |
| return (int8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_n_s16 (int16_t __a) |
| { |
| return (int16x4_t) {__a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_n_s32 (int32_t __a) |
| { |
| return (int32x2_t) {__a, __a}; |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_n_s64 (int64_t __a) |
| { |
| return (int64x1_t) {__a}; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_n_u8 (uint8_t __a) |
| { |
| return (uint8x8_t) {__a, __a, __a, __a, __a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_n_u16 (uint16_t __a) |
| { |
| return (uint16x4_t) {__a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_n_u32 (uint32_t __a) |
| { |
| return (uint32x2_t) {__a, __a}; |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_n_u64 (uint64_t __a) |
| { |
| return (uint64x1_t) {__a}; |
| } |
| |
| /* vdupq_n */ |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_n_f16 (float16_t __a) |
| { |
| return (float16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_n_f32 (float32_t __a) |
| { |
| return (float32x4_t) {__a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_n_f64 (float64_t __a) |
| { |
| return (float64x2_t) {__a, __a}; |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_n_p8 (poly8_t __a) |
| { |
| return (poly8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a, |
| __a, __a, __a, __a, __a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_n_p16 (poly16_t __a) |
| { |
| return (poly16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_n_p64 (poly64_t __a) |
| { |
| return (poly64x2_t) {__a, __a}; |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_n_s8 (int8_t __a) |
| { |
| return (int8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a, |
| __a, __a, __a, __a, __a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_n_s16 (int16_t __a) |
| { |
| return (int16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_n_s32 (int32_t __a) |
| { |
| return (int32x4_t) {__a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_n_s64 (int64_t __a) |
| { |
| return (int64x2_t) {__a, __a}; |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_n_u8 (uint8_t __a) |
| { |
| return (uint8x16_t) {__a, __a, __a, __a, __a, __a, __a, __a, |
| __a, __a, __a, __a, __a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_n_u16 (uint16_t __a) |
| { |
| return (uint16x8_t) {__a, __a, __a, __a, __a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_n_u32 (uint32_t __a) |
| { |
| return (uint32x4_t) {__a, __a, __a, __a}; |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_n_u64 (uint64_t __a) |
| { |
| return (uint64x2_t) {__a, __a}; |
| } |
| |
| /* vdup_lane */ |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_lane_f16 (float16x4_t __a, const int __b) |
| { |
| return __aarch64_vdup_lane_f16 (__a, __b); |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_lane_f32 (float32x2_t __a, const int __b) |
| { |
| return __aarch64_vdup_lane_f32 (__a, __b); |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_lane_f64 (float64x1_t __a, const int __b) |
| { |
| return __aarch64_vdup_lane_f64 (__a, __b); |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_lane_p8 (poly8x8_t __a, const int __b) |
| { |
| return __aarch64_vdup_lane_p8 (__a, __b); |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_lane_p16 (poly16x4_t __a, const int __b) |
| { |
| return __aarch64_vdup_lane_p16 (__a, __b); |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_lane_p64 (poly64x1_t __a, const int __b) |
| { |
| return __aarch64_vdup_lane_p64 (__a, __b); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_lane_s8 (int8x8_t __a, const int __b) |
| { |
| return __aarch64_vdup_lane_s8 (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_lane_s16 (int16x4_t __a, const int __b) |
| { |
| return __aarch64_vdup_lane_s16 (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_lane_s32 (int32x2_t __a, const int __b) |
| { |
| return __aarch64_vdup_lane_s32 (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_lane_s64 (int64x1_t __a, const int __b) |
| { |
| return __aarch64_vdup_lane_s64 (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_lane_u8 (uint8x8_t __a, const int __b) |
| { |
| return __aarch64_vdup_lane_u8 (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_lane_u16 (uint16x4_t __a, const int __b) |
| { |
| return __aarch64_vdup_lane_u16 (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_lane_u32 (uint32x2_t __a, const int __b) |
| { |
| return __aarch64_vdup_lane_u32 (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_lane_u64 (uint64x1_t __a, const int __b) |
| { |
| return __aarch64_vdup_lane_u64 (__a, __b); |
| } |
| |
| /* vdup_laneq */ |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_laneq_f16 (float16x8_t __a, const int __b) |
| { |
| return __aarch64_vdup_laneq_f16 (__a, __b); |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_laneq_f32 (float32x4_t __a, const int __b) |
| { |
| return __aarch64_vdup_laneq_f32 (__a, __b); |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_laneq_f64 (float64x2_t __a, const int __b) |
| { |
| return __aarch64_vdup_laneq_f64 (__a, __b); |
| } |
| |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_laneq_p8 (poly8x16_t __a, const int __b) |
| { |
| return __aarch64_vdup_laneq_p8 (__a, __b); |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_laneq_p16 (poly16x8_t __a, const int __b) |
| { |
| return __aarch64_vdup_laneq_p16 (__a, __b); |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_laneq_p64 (poly64x2_t __a, const int __b) |
| { |
| return __aarch64_vdup_laneq_p64 (__a, __b); |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_laneq_s8 (int8x16_t __a, const int __b) |
| { |
| return __aarch64_vdup_laneq_s8 (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_laneq_s16 (int16x8_t __a, const int __b) |
| { |
| return __aarch64_vdup_laneq_s16 (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_laneq_s32 (int32x4_t __a, const int __b) |
| { |
| return __aarch64_vdup_laneq_s32 (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_laneq_s64 (int64x2_t __a, const int __b) |
| { |
| return __aarch64_vdup_laneq_s64 (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_laneq_u8 (uint8x16_t __a, const int __b) |
| { |
| return __aarch64_vdup_laneq_u8 (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_laneq_u16 (uint16x8_t __a, const int __b) |
| { |
| return __aarch64_vdup_laneq_u16 (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_laneq_u32 (uint32x4_t __a, const int __b) |
| { |
| return __aarch64_vdup_laneq_u32 (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdup_laneq_u64 (uint64x2_t __a, const int __b) |
| { |
| return __aarch64_vdup_laneq_u64 (__a, __b); |
| } |
| |
| /* vdupq_lane */ |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_lane_f16 (float16x4_t __a, const int __b) |
| { |
| return __aarch64_vdupq_lane_f16 (__a, __b); |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_lane_f32 (float32x2_t __a, const int __b) |
| { |
| return __aarch64_vdupq_lane_f32 (__a, __b); |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_lane_f64 (float64x1_t __a, const int __b) |
| { |
| return __aarch64_vdupq_lane_f64 (__a, __b); |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_lane_p8 (poly8x8_t __a, const int __b) |
| { |
| return __aarch64_vdupq_lane_p8 (__a, __b); |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_lane_p16 (poly16x4_t __a, const int __b) |
| { |
| return __aarch64_vdupq_lane_p16 (__a, __b); |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_lane_p64 (poly64x1_t __a, const int __b) |
| { |
| return __aarch64_vdupq_lane_p64 (__a, __b); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_lane_s8 (int8x8_t __a, const int __b) |
| { |
| return __aarch64_vdupq_lane_s8 (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_lane_s16 (int16x4_t __a, const int __b) |
| { |
| return __aarch64_vdupq_lane_s16 (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_lane_s32 (int32x2_t __a, const int __b) |
| { |
| return __aarch64_vdupq_lane_s32 (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_lane_s64 (int64x1_t __a, const int __b) |
| { |
| return __aarch64_vdupq_lane_s64 (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_lane_u8 (uint8x8_t __a, const int __b) |
| { |
| return __aarch64_vdupq_lane_u8 (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_lane_u16 (uint16x4_t __a, const int __b) |
| { |
| return __aarch64_vdupq_lane_u16 (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_lane_u32 (uint32x2_t __a, const int __b) |
| { |
| return __aarch64_vdupq_lane_u32 (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_lane_u64 (uint64x1_t __a, const int __b) |
| { |
| return __aarch64_vdupq_lane_u64 (__a, __b); |
| } |
| |
| /* vdupq_laneq */ |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_laneq_f16 (float16x8_t __a, const int __b) |
| { |
| return __aarch64_vdupq_laneq_f16 (__a, __b); |
| } |
| |
| __extension__ extern __inline float32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_laneq_f32 (float32x4_t __a, const int __b) |
| { |
| return __aarch64_vdupq_laneq_f32 (__a, __b); |
| } |
| |
| __extension__ extern __inline float64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_laneq_f64 (float64x2_t __a, const int __b) |
| { |
| return __aarch64_vdupq_laneq_f64 (__a, __b); |
| } |
| |
| __extension__ extern __inline poly8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_laneq_p8 (poly8x16_t __a, const int __b) |
| { |
| return __aarch64_vdupq_laneq_p8 (__a, __b); |
| } |
| |
| __extension__ extern __inline poly16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_laneq_p16 (poly16x8_t __a, const int __b) |
| { |
| return __aarch64_vdupq_laneq_p16 (__a, __b); |
| } |
| |
| __extension__ extern __inline poly64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_laneq_p64 (poly64x2_t __a, const int __b) |
| { |
| return __aarch64_vdupq_laneq_p64 (__a, __b); |
| } |
| |
| __extension__ extern __inline int8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_laneq_s8 (int8x16_t __a, const int __b) |
| { |
| return __aarch64_vdupq_laneq_s8 (__a, __b); |
| } |
| |
| __extension__ extern __inline int16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_laneq_s16 (int16x8_t __a, const int __b) |
| { |
| return __aarch64_vdupq_laneq_s16 (__a, __b); |
| } |
| |
| __extension__ extern __inline int32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_laneq_s32 (int32x4_t __a, const int __b) |
| { |
| return __aarch64_vdupq_laneq_s32 (__a, __b); |
| } |
| |
| __extension__ extern __inline int64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_laneq_s64 (int64x2_t __a, const int __b) |
| { |
| return __aarch64_vdupq_laneq_s64 (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8x16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_laneq_u8 (uint8x16_t __a, const int __b) |
| { |
| return __aarch64_vdupq_laneq_u8 (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_laneq_u16 (uint16x8_t __a, const int __b) |
| { |
| return __aarch64_vdupq_laneq_u16 (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_laneq_u32 (uint32x4_t __a, const int __b) |
| { |
| return __aarch64_vdupq_laneq_u32 (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupq_laneq_u64 (uint64x2_t __a, const int __b) |
| { |
| return __aarch64_vdupq_laneq_u64 (__a, __b); |
| } |
| |
| /* vdupb_lane */ |
| __extension__ extern __inline poly8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupb_lane_p8 (poly8x8_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline int8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupb_lane_s8 (int8x8_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupb_lane_u8 (uint8x8_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| /* vduph_lane */ |
| |
| __extension__ extern __inline float16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vduph_lane_f16 (float16x4_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline poly16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vduph_lane_p16 (poly16x4_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline int16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vduph_lane_s16 (int16x4_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vduph_lane_u16 (uint16x4_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| /* vdups_lane */ |
| |
| __extension__ extern __inline float32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdups_lane_f32 (float32x2_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdups_lane_s32 (int32x2_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdups_lane_u32 (uint32x2_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| /* vdupd_lane */ |
| __extension__ extern __inline float64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupd_lane_f64 (float64x1_t __a, const int __b) |
| { |
| __AARCH64_LANE_CHECK (__a, __b); |
| return __a[0]; |
| } |
| |
| __extension__ extern __inline int64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupd_lane_s64 (int64x1_t __a, const int __b) |
| { |
| __AARCH64_LANE_CHECK (__a, __b); |
| return __a[0]; |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupd_lane_u64 (uint64x1_t __a, const int __b) |
| { |
| __AARCH64_LANE_CHECK (__a, __b); |
| return __a[0]; |
| } |
| |
| /* vdupb_laneq */ |
| __extension__ extern __inline poly8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupb_laneq_p8 (poly8x16_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline int8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupb_laneq_s8 (int8x16_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline uint8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupb_laneq_u8 (uint8x16_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| /* vduph_laneq */ |
| |
| __extension__ extern __inline float16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vduph_laneq_f16 (float16x8_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline poly16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vduph_laneq_p16 (poly16x8_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline int16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vduph_laneq_s16 (int16x8_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline uint16_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vduph_laneq_u16 (uint16x8_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| /* vdups_laneq */ |
| |
| __extension__ extern __inline float32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdups_laneq_f32 (float32x4_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline int32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdups_laneq_s32 (int32x4_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline uint32_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdups_laneq_u32 (uint32x4_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| /* vdupd_laneq */ |
| __extension__ extern __inline float64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupd_laneq_f64 (float64x2_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline int64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupd_laneq_s64 (int64x2_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| __extension__ extern __inline uint64_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vdupd_laneq_u64 (uint64x2_t __a, const int __b) |
| { |
| return __aarch64_vget_lane_any (__a, __b); |
| } |
| |
| /* vext */ |
| |
| __extension__ extern __inline float16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vext_f16 (float16x4_t __a, float16x4_t __b, __const int __c) |
| { |
| __AARCH64_LANE_CHECK (__a, __c); |
| #ifdef __AARCH64EB__ |
| return __builtin_shuffle (__b, __a, |
| (uint16x4_t) {4 - __c, 5 - __c, 6 - __c, 7 - __c}); |
| #else |
| return __builtin_shuffle (__a, __b, |
| (uint16x4_t) {__c, __c + 1, __c + 2, __c + 3}); |
| #endif |
| } |
| |
| __extension__ extern __inline float32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vext_f32 (float32x2_t __a, float32x2_t __b, __const int __c) |
| { |
| __AARCH64_LANE_CHECK (__a, __c); |
| #ifdef __AARCH64EB__ |
| return __builtin_shuffle (__b, __a, (uint32x2_t) {2-__c, 3-__c}); |
| #else |
| return __builtin_shuffle (__a, __b, (uint32x2_t) {__c, __c+1}); |
| #endif |
| } |
| |
| __extension__ extern __inline float64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vext_f64 (float64x1_t __a, float64x1_t __b, __const int __c) |
| { |
| __AARCH64_LANE_CHECK (__a, __c); |
| /* The only possible index to the assembler instruction returns element 0. */ |
| return __a; |
| } |
| __extension__ extern __inline poly8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vext_p8 (poly8x8_t __a, poly8x8_t __b, __const int __c) |
| { |
| __AARCH64_LANE_CHECK (__a, __c); |
| #ifdef __AARCH64EB__ |
| return __builtin_shuffle (__b, __a, (uint8x8_t) |
| {8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c}); |
| #else |
| return __builtin_shuffle (__a, __b, |
| (uint8x8_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7}); |
| #endif |
| } |
| |
| __extension__ extern __inline poly16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vext_p16 (poly16x4_t __a, poly16x4_t __b, __const int __c) |
| { |
| __AARCH64_LANE_CHECK (__a, __c); |
| #ifdef __AARCH64EB__ |
| return __builtin_shuffle (__b, __a, |
| (uint16x4_t) {4-__c, 5-__c, 6-__c, 7-__c}); |
| #else |
| return __builtin_shuffle (__a, __b, (uint16x4_t) {__c, __c+1, __c+2, __c+3}); |
| #endif |
| } |
| |
| __extension__ extern __inline poly64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vext_p64 (poly64x1_t __a, poly64x1_t __b, __const int __c) |
| { |
| __AARCH64_LANE_CHECK (__a, __c); |
| /* The only possible index to the assembler instruction returns element 0. */ |
| return __a; |
| } |
| |
| __extension__ extern __inline int8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vext_s8 (int8x8_t __a, int8x8_t __b, __const int __c) |
| { |
| __AARCH64_LANE_CHECK (__a, __c); |
| #ifdef __AARCH64EB__ |
| return __builtin_shuffle (__b, __a, (uint8x8_t) |
| {8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c}); |
| #else |
| return __builtin_shuffle (__a, __b, |
| (uint8x8_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7}); |
| #endif |
| } |
| |
| __extension__ extern __inline int16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vext_s16 (int16x4_t __a, int16x4_t __b, __const int __c) |
| { |
| __AARCH64_LANE_CHECK (__a, __c); |
| #ifdef __AARCH64EB__ |
| return __builtin_shuffle (__b, __a, |
| (uint16x4_t) {4-__c, 5-__c, 6-__c, 7-__c}); |
| #else |
| return __builtin_shuffle (__a, __b, (uint16x4_t) {__c, __c+1, __c+2, __c+3}); |
| #endif |
| } |
| |
| __extension__ extern __inline int32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vext_s32 (int32x2_t __a, int32x2_t __b, __const int __c) |
| { |
| __AARCH64_LANE_CHECK (__a, __c); |
| #ifdef __AARCH64EB__ |
| return __builtin_shuffle (__b, __a, (uint32x2_t) {2-__c, 3-__c}); |
| #else |
| return __builtin_shuffle (__a, __b, (uint32x2_t) {__c, __c+1}); |
| #endif |
| } |
| |
| __extension__ extern __inline int64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vext_s64 (int64x1_t __a, int64x1_t __b, __const int __c) |
| { |
| __AARCH64_LANE_CHECK (__a, __c); |
| /* The only possible index to the assembler instruction returns element 0. */ |
| return __a; |
| } |
| |
| __extension__ extern __inline uint8x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vext_u8 (uint8x8_t __a, uint8x8_t __b, __const int __c) |
| { |
| __AARCH64_LANE_CHECK (__a, __c); |
| #ifdef __AARCH64EB__ |
| return __builtin_shuffle (__b, __a, (uint8x8_t) |
| {8-__c, 9-__c, 10-__c, 11-__c, 12-__c, 13-__c, 14-__c, 15-__c}); |
| #else |
| return __builtin_shuffle (__a, __b, |
| (uint8x8_t) {__c, __c+1, __c+2, __c+3, __c+4, __c+5, __c+6, __c+7}); |
| #endif |
| } |
| |
| __extension__ extern __inline uint16x4_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vext_u16 (uint16x4_t __a, uint16x4_t __b, __const int __c) |
| { |
| __AARCH64_LANE_CHECK (__a, __c); |
| #ifdef __AARCH64EB__ |
| return __builtin_shuffle (__b, __a, |
| (uint16x4_t) {4-__c, 5-__c, 6-__c, 7-__c}); |
| #else |
| return __builtin_shuffle (__a, __b, (uint16x4_t) {__c, __c+1, __c+2, __c+3}); |
| #endif |
| } |
| |
| __extension__ extern __inline uint32x2_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vext_u32 (uint32x2_t __a, uint32x2_t __b, __const int __c) |
| { |
| __AARCH64_LANE_CHECK (__a, __c); |
| #ifdef __AARCH64EB__ |
| return __builtin_shuffle (__b, __a, (uint32x2_t) {2-__c, 3-__c}); |
| #else |
| return __builtin_shuffle (__a, __b, (uint32x2_t) {__c, __c+1}); |
| #endif |
| } |
| |
| __extension__ extern __inline uint64x1_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
| vext_u64 (uint64x1_t __a, uint64x1_t __b, __const int __c) |
| { |
| __AARCH64_LANE_CHECK (__a, __c); |
| /* The only possible index to the assembler instruction returns element 0. */ |
| return __a; |
| } |
| |
| __extension__ extern __inline float16x8_t |
| __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) |
|